diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md index 0c24155e6..2fea0120e 100644 --- a/.github/ISSUE_TEMPLATE/1_broken_site.md +++ b/.github/ISSUE_TEMPLATE/1_broken_site.md @@ -18,7 +18,7 @@ title: '' - [ ] I'm reporting a broken site support -- [ ] I've verified that I'm running youtube-dl version **2019.09.12.1** +- [ ] I've verified that I'm running youtube-dl version **2019.09.28** - [ ] I've checked that all provided URLs are alive and playable in a browser - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped - [ ] I've searched the bugtracker for similar issues including closed ones @@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v < [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 - [debug] youtube-dl version 2019.09.12.1 + [debug] youtube-dl version 2019.09.28 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md index 9babe0360..6116acc79 100644 --- a/.github/ISSUE_TEMPLATE/2_site_support_request.md +++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md @@ -19,7 +19,7 @@ labels: 'site-support-request' - [ ] I'm reporting a new site support request -- [ ] I've verified that I'm running youtube-dl version **2019.09.12.1** +- [ ] I've verified that I'm running youtube-dl version **2019.09.28** - [ ] I've checked that all provided URLs are alive and playable in a browser - [ ] I've checked that none of provided URLs violate any copyrights - [ ] I've searched the bugtracker for similar site support requests including closed ones diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md index 72322fe26..79d1a7f3c 100644 --- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md +++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md @@ -18,13 +18,13 @@ title: '' - [ ] I'm reporting a site feature request -- [ ] I've verified that I'm running youtube-dl version **2019.09.12.1** +- [ ] I've verified that I'm running youtube-dl version **2019.09.28** - [ ] I've searched the bugtracker for similar site feature requests including closed ones diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md index da7f2cf93..9bda3d440 100644 --- a/.github/ISSUE_TEMPLATE/4_bug_report.md +++ b/.github/ISSUE_TEMPLATE/4_bug_report.md @@ -18,7 +18,7 @@ title: '' - [ ] I'm reporting a broken site support issue -- [ ] I've verified that I'm running youtube-dl version **2019.09.12.1** +- [ ] I've verified that I'm running youtube-dl version **2019.09.28** - [ ] I've checked that all provided URLs are alive and playable in a browser - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped - [ ] I've searched the bugtracker for similar bug reports including closed ones @@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v < [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 - [debug] youtube-dl version 2019.09.12.1 + [debug] youtube-dl version 2019.09.28 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md index d41022b9f..581344917 100644 --- a/.github/ISSUE_TEMPLATE/5_feature_request.md +++ b/.github/ISSUE_TEMPLATE/5_feature_request.md @@ -19,13 +19,13 @@ labels: 'request' - [ ] I'm reporting a feature request -- [ ] I've verified that I'm running youtube-dl version **2019.09.12.1** +- [ ] I've verified that I'm running youtube-dl version **2019.09.28** - [ ] I've searched the bugtracker for similar feature requests including closed ones diff --git a/ChangeLog b/ChangeLog index 2aba02065..80681a9ae 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,34 @@ +version 2019.09.28 + +Core +* [YoutubeDL] Honour all --get-* options with --flat-playlist (#22493) + +Extractors +* [vk] Fix extraction (#22522) +* [heise] Fix kaltura embeds extraction (#22514) +* [ted] Check for resources validity and extract subtitled downloads (#22513) ++ [youtube] Add support for + owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya.b32.i2p (#22292) ++ [nhk] Add support for clips +* [nhk] Fix video extraction (#22249, #22353) +* [byutv] Fix extraction (#22070) ++ [openload] Add support for oload.online (#22304) ++ [youtube] Add support for invidious.drycat.fr (#22451) +* [jwplatfom] Do not match video URLs (#20596, #22148) +* [youtube:playlist] Unescape playlist uploader (#22483) ++ [bilibili] Add support audio albums and songs (#21094) ++ [instagram] Add support for tv URLs ++ [mixcloud] Allow uppercase letters in format URLs (#19280) +* [brightcove] Delegate all supported legacy URLs to new extractor (#11523, + #12842, #13912, #15669, #16303) +* [hotstar] Use native HLS downloader by default ++ [hotstar] Extract more formats (#22323) +* [9now] Fix extraction (#22361) +* [zdf] Bypass geo restriction ++ [tv4] Extract series metadata +* [tv4] Fix extraction (#22443) + + version 2019.09.12.1 Extractors diff --git a/docs/supportedsites.md b/docs/supportedsites.md index 18bddc138..35275278b 100644 --- a/docs/supportedsites.md +++ b/docs/supportedsites.md @@ -98,6 +98,8 @@ - **Bigflix** - **Bild**: Bild.de - **BiliBili** + - **BilibiliAudio** + - **BilibiliAudioAlbum** - **BioBioChileTV** - **BIQLE** - **BitChute** diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index 6a44bc7ba..c3d1407f9 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -852,8 +852,9 @@ class YoutubeDL(object): extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): - if self.params.get('forcejson', False): - self.to_stdout(json.dumps(ie_result)) + self.__forced_printings( + ie_result, self.prepare_filename(ie_result), + incomplete=True) return ie_result if result_type == 'video': @@ -1693,6 +1694,36 @@ class YoutubeDL(object): subs[lang] = f return subs + def __forced_printings(self, info_dict, filename, incomplete): + def print_mandatory(field): + if (self.params.get('force%s' % field, False) + and (not incomplete or info_dict.get(field) is not None)): + self.to_stdout(info_dict[field]) + + def print_optional(field): + if (self.params.get('force%s' % field, False) + and info_dict.get(field) is not None): + self.to_stdout(info_dict[field]) + + print_mandatory('title') + print_mandatory('id') + if self.params.get('forceurl', False) and not incomplete: + if info_dict.get('requested_formats') is not None: + for f in info_dict['requested_formats']: + self.to_stdout(f['url'] + f.get('play_path', '')) + else: + # For RTMP URLs, also include the playpath + self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) + print_optional('thumbnail') + print_optional('description') + if self.params.get('forcefilename', False) and filename is not None: + self.to_stdout(filename) + if self.params.get('forceduration', False) and info_dict.get('duration') is not None: + self.to_stdout(formatSeconds(info_dict['duration'])) + print_mandatory('format') + if self.params.get('forcejson', False): + self.to_stdout(json.dumps(info_dict)) + def process_info(self, info_dict): """Process a single resolved IE result.""" @@ -1703,9 +1734,8 @@ class YoutubeDL(object): if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() + # TODO: backward compatibility, to be removed info_dict['fulltitle'] = info_dict['title'] - if len(info_dict['title']) > 200: - info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] @@ -1720,29 +1750,7 @@ class YoutubeDL(object): info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings - if self.params.get('forcetitle', False): - self.to_stdout(info_dict['fulltitle']) - if self.params.get('forceid', False): - self.to_stdout(info_dict['id']) - if self.params.get('forceurl', False): - if info_dict.get('requested_formats') is not None: - for f in info_dict['requested_formats']: - self.to_stdout(f['url'] + f.get('play_path', '')) - else: - # For RTMP URLs, also include the playpath - self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) - if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: - self.to_stdout(info_dict['thumbnail']) - if self.params.get('forcedescription', False) and info_dict.get('description') is not None: - self.to_stdout(info_dict['description']) - if self.params.get('forcefilename', False) and filename is not None: - self.to_stdout(filename) - if self.params.get('forceduration', False) and info_dict.get('duration') is not None: - self.to_stdout(formatSeconds(info_dict['duration'])) - if self.params.get('forceformat', False): - self.to_stdout(info_dict['format']) - if self.params.get('forcejson', False): - self.to_stdout(json.dumps(info_dict)) + self.__forced_printings(info_dict, filename, incomplete=False) # Do nothing else if in simulate mode if self.params.get('simulate', False): diff --git a/youtube_dl/extractor/bilibili.py b/youtube_dl/extractor/bilibili.py index 3746671d3..80bd696e2 100644 --- a/youtube_dl/extractor/bilibili.py +++ b/youtube_dl/extractor/bilibili.py @@ -15,6 +15,7 @@ from ..utils import ( float_or_none, parse_iso8601, smuggle_url, + str_or_none, strip_jsonp, unified_timestamp, unsmuggle_url, @@ -306,3 +307,115 @@ class BiliBiliBangumiIE(InfoExtractor): return self.playlist_result( entries, bangumi_id, season_info.get('bangumi_title'), season_info.get('evaluate')) + + +class BilibiliAudioBaseIE(InfoExtractor): + def _call_api(self, path, sid, query=None): + if not query: + query = {'sid': sid} + return self._download_json( + 'https://www.bilibili.com/audio/music-service-c/web/' + path, + sid, query=query)['data'] + + +class BilibiliAudioIE(BilibiliAudioBaseIE): + _VALID_URL = r'https?://(?:www\.)?bilibili\.com/audio/au(?P\d+)' + _TEST = { + 'url': 'https://www.bilibili.com/audio/au1003142', + 'md5': 'fec4987014ec94ef9e666d4d158ad03b', + 'info_dict': { + 'id': '1003142', + 'ext': 'm4a', + 'title': '【tsukimi】YELLOW / 神山羊', + 'artist': 'tsukimi', + 'comment_count': int, + 'description': 'YELLOW的mp3版!', + 'duration': 183, + 'subtitles': { + 'origin': [{ + 'ext': 'lrc', + }], + }, + 'thumbnail': r're:^https?://.+\.jpg', + 'timestamp': 1564836614, + 'upload_date': '20190803', + 'uploader': 'tsukimi-つきみぐー', + 'view_count': int, + }, + } + + def _real_extract(self, url): + au_id = self._match_id(url) + + play_data = self._call_api('url', au_id) + formats = [{ + 'url': play_data['cdns'][0], + 'filesize': int_or_none(play_data.get('size')), + }] + + song = self._call_api('song/info', au_id) + title = song['title'] + statistic = song.get('statistic') or {} + + subtitles = None + lyric = song.get('lyric') + if lyric: + subtitles = { + 'origin': [{ + 'url': lyric, + }] + } + + return { + 'id': au_id, + 'title': title, + 'formats': formats, + 'artist': song.get('author'), + 'comment_count': int_or_none(statistic.get('comment')), + 'description': song.get('intro'), + 'duration': int_or_none(song.get('duration')), + 'subtitles': subtitles, + 'thumbnail': song.get('cover'), + 'timestamp': int_or_none(song.get('passtime')), + 'uploader': song.get('uname'), + 'view_count': int_or_none(statistic.get('play')), + } + + +class BilibiliAudioAlbumIE(BilibiliAudioBaseIE): + _VALID_URL = r'https?://(?:www\.)?bilibili\.com/audio/am(?P\d+)' + _TEST = { + 'url': 'https://www.bilibili.com/audio/am10624', + 'info_dict': { + 'id': '10624', + 'title': '每日新曲推荐(每日11:00更新)', + 'description': '每天11:00更新,为你推送最新音乐', + }, + 'playlist_count': 19, + } + + def _real_extract(self, url): + am_id = self._match_id(url) + + songs = self._call_api( + 'song/of-menu', am_id, {'sid': am_id, 'pn': 1, 'ps': 100})['data'] + + entries = [] + for song in songs: + sid = str_or_none(song.get('id')) + if not sid: + continue + entries.append(self.url_result( + 'https://www.bilibili.com/audio/au' + sid, + BilibiliAudioIE.ie_key(), sid)) + + if entries: + album_data = self._call_api('menu/info', am_id) or {} + album_title = album_data.get('title') + if album_title: + for entry in entries: + entry['album'] = album_title + return self.playlist_result( + entries, am_id, album_title, album_data.get('intro')) + + return self.playlist_result(entries, am_id) diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py index 58ec5c979..8e2f7217a 100644 --- a/youtube_dl/extractor/brightcove.py +++ b/youtube_dl/extractor/brightcove.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals import base64 -import json import re import struct @@ -11,14 +10,12 @@ from .adobepass import AdobePassIE from ..compat import ( compat_etree_fromstring, compat_parse_qs, - compat_str, compat_urllib_parse_urlparse, compat_urlparse, compat_xml_parse_error, compat_HTTPError, ) from ..utils import ( - determine_ext, ExtractorError, extract_attributes, find_xpath_attr, @@ -27,18 +24,19 @@ from ..utils import ( js_to_json, int_or_none, parse_iso8601, + smuggle_url, unescapeHTML, unsmuggle_url, update_url_query, clean_html, mimetype2ext, + UnsupportedError, ) class BrightcoveLegacyIE(InfoExtractor): IE_NAME = 'brightcove:legacy' _VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P.*)' - _FEDERATED_URL = 'http://c.brightcove.com/services/viewer/htmlFederated' _TESTS = [ { @@ -55,7 +53,8 @@ class BrightcoveLegacyIE(InfoExtractor): 'timestamp': 1368213670, 'upload_date': '20130510', 'uploader_id': '1589608506001', - } + }, + 'skip': 'The player has been deactivated by the content owner', }, { # From http://medianetwork.oracle.com/video/player/1785452137001 @@ -70,6 +69,7 @@ class BrightcoveLegacyIE(InfoExtractor): 'upload_date': '20120814', 'uploader_id': '1460825906', }, + 'skip': 'video not playable', }, { # From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/ @@ -79,7 +79,7 @@ class BrightcoveLegacyIE(InfoExtractor): 'ext': 'mp4', 'title': 'This Bracelet Acts as a Personal Thermostat', 'description': 'md5:547b78c64f4112766ccf4e151c20b6a0', - 'uploader': 'Mashable', + # 'uploader': 'Mashable', 'timestamp': 1382041798, 'upload_date': '20131017', 'uploader_id': '1130468786001', @@ -124,6 +124,7 @@ class BrightcoveLegacyIE(InfoExtractor): 'id': '3550319591001', }, 'playlist_mincount': 7, + 'skip': 'Unsupported URL', }, { # playlist with 'playlistTab' (https://github.com/ytdl-org/youtube-dl/issues/9965) @@ -133,6 +134,7 @@ class BrightcoveLegacyIE(InfoExtractor): 'title': 'Lesson 08', }, 'playlist_mincount': 10, + 'skip': 'Unsupported URL', }, { # playerID inferred from bcpid @@ -141,12 +143,6 @@ class BrightcoveLegacyIE(InfoExtractor): 'only_matching': True, # Tested in GenericIE } ] - FLV_VCODECS = { - 1: 'SORENSON', - 2: 'ON2', - 3: 'H264', - 4: 'VP8', - } @classmethod def _build_brighcove_url(cls, object_str): @@ -238,7 +234,8 @@ class BrightcoveLegacyIE(InfoExtractor): @classmethod def _make_brightcove_url(cls, params): - return update_url_query(cls._FEDERATED_URL, params) + return update_url_query( + 'http://c.brightcove.com/services/viewer/htmlFederated', params) @classmethod def _extract_brightcove_url(cls, webpage): @@ -297,38 +294,12 @@ class BrightcoveLegacyIE(InfoExtractor): videoPlayer = query.get('@videoPlayer') if videoPlayer: # We set the original url as the default 'Referer' header - referer = smuggled_data.get('Referer', url) + referer = query.get('linkBaseURL', [None])[0] or smuggled_data.get('Referer', url) + video_id = videoPlayer[0] if 'playerID' not in query: mobj = re.search(r'/bcpid(\d+)', url) if mobj is not None: query['playerID'] = [mobj.group(1)] - return self._get_video_info( - videoPlayer[0], query, referer=referer) - elif 'playerKey' in query: - player_key = query['playerKey'] - return self._get_playlist_info(player_key[0]) - else: - raise ExtractorError( - 'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?', - expected=True) - - def _brightcove_new_url_result(self, publisher_id, video_id): - brightcove_new_url = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' % (publisher_id, video_id) - return self.url_result(brightcove_new_url, BrightcoveNewIE.ie_key(), video_id) - - def _get_video_info(self, video_id, query, referer=None): - headers = {} - linkBase = query.get('linkBaseURL') - if linkBase is not None: - referer = linkBase[0] - if referer is not None: - headers['Referer'] = referer - webpage = self._download_webpage(self._FEDERATED_URL, video_id, headers=headers, query=query) - - error_msg = self._html_search_regex( - r"

We're sorry.

([\s\n]*

.*?

)+", webpage, - 'error message', default=None) - if error_msg is not None: publisher_id = query.get('publisherId') if publisher_id and publisher_id[0].isdigit(): publisher_id = publisher_id[0] @@ -339,6 +310,9 @@ class BrightcoveLegacyIE(InfoExtractor): else: player_id = query.get('playerID') if player_id and player_id[0].isdigit(): + headers = {} + if referer: + headers['Referer'] = referer player_page = self._download_webpage( 'http://link.brightcove.com/services/player/bcpid' + player_id[0], video_id, headers=headers, fatal=False) @@ -349,136 +323,16 @@ class BrightcoveLegacyIE(InfoExtractor): if player_key: enc_pub_id = player_key.split(',')[1].replace('~', '=') publisher_id = struct.unpack('>Q', base64.urlsafe_b64decode(enc_pub_id))[0] - if publisher_id: - return self._brightcove_new_url_result(publisher_id, video_id) - raise ExtractorError( - 'brightcove said: %s' % error_msg, expected=True) - - self.report_extraction(video_id) - info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json') - info = json.loads(info)['data'] - video_info = info['programmedContent']['videoPlayer']['mediaDTO'] - video_info['_youtubedl_adServerURL'] = info.get('adServerURL') - - return self._extract_video_info(video_info) - - def _get_playlist_info(self, player_key): - info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key - playlist_info = self._download_webpage( - info_url, player_key, 'Downloading playlist information') - - json_data = json.loads(playlist_info) - if 'videoList' in json_data: - playlist_info = json_data['videoList'] - playlist_dto = playlist_info['mediaCollectionDTO'] - elif 'playlistTabs' in json_data: - playlist_info = json_data['playlistTabs'] - playlist_dto = playlist_info['lineupListDTO']['playlistDTOs'][0] - else: - raise ExtractorError('Empty playlist') - - videos = [self._extract_video_info(video_info) for video_info in playlist_dto['videoDTOs']] - - return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'], - playlist_title=playlist_dto['displayName']) - - def _extract_video_info(self, video_info): - video_id = compat_str(video_info['id']) - publisher_id = video_info.get('publisherId') - info = { - 'id': video_id, - 'title': video_info['displayName'].strip(), - 'description': video_info.get('shortDescription'), - 'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'), - 'uploader': video_info.get('publisherName'), - 'uploader_id': compat_str(publisher_id) if publisher_id else None, - 'duration': float_or_none(video_info.get('length'), 1000), - 'timestamp': int_or_none(video_info.get('creationDate'), 1000), - } - - renditions = video_info.get('renditions', []) + video_info.get('IOSRenditions', []) - if renditions: - formats = [] - for rend in renditions: - url = rend['defaultURL'] - if not url: - continue - ext = None - if rend['remote']: - url_comp = compat_urllib_parse_urlparse(url) - if url_comp.path.endswith('.m3u8'): - formats.extend( - self._extract_m3u8_formats( - url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) - continue - elif 'akamaihd.net' in url_comp.netloc: - # This type of renditions are served through - # akamaihd.net, but they don't use f4m manifests - url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB' - ext = 'flv' - if ext is None: - ext = determine_ext(url) - tbr = int_or_none(rend.get('encodingRate'), 1000) - a_format = { - 'format_id': 'http%s' % ('-%s' % tbr if tbr else ''), - 'url': url, - 'ext': ext, - 'filesize': int_or_none(rend.get('size')) or None, - 'tbr': tbr, - } - if rend.get('audioOnly'): - a_format.update({ - 'vcodec': 'none', - }) - else: - a_format.update({ - 'height': int_or_none(rend.get('frameHeight')), - 'width': int_or_none(rend.get('frameWidth')), - 'vcodec': rend.get('videoCodec'), - }) - - # m3u8 manifests with remote == false are media playlists - # Not calling _extract_m3u8_formats here to save network traffic - if ext == 'm3u8': - a_format.update({ - 'format_id': 'hls%s' % ('-%s' % tbr if tbr else ''), - 'ext': 'mp4', - 'protocol': 'm3u8_native', - }) - - formats.append(a_format) - self._sort_formats(formats) - info['formats'] = formats - elif video_info.get('FLVFullLengthURL') is not None: - info.update({ - 'url': video_info['FLVFullLengthURL'], - 'vcodec': self.FLV_VCODECS.get(video_info.get('FLVFullCodec')), - 'filesize': int_or_none(video_info.get('FLVFullSize')), - }) - - if self._downloader.params.get('include_ads', False): - adServerURL = video_info.get('_youtubedl_adServerURL') - if adServerURL: - ad_info = { - '_type': 'url', - 'url': adServerURL, - } - if 'url' in info: - return { - '_type': 'playlist', - 'title': info['title'], - 'entries': [ad_info, info], - } - else: - return ad_info - - if not info.get('url') and not info.get('formats'): - uploader_id = info.get('uploader_id') - if uploader_id: - info.update(self._brightcove_new_url_result(uploader_id, video_id)) - else: - raise ExtractorError('Unable to extract video url for %s' % video_id) - return info + if publisher_id: + brightcove_new_url = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' % (publisher_id, video_id) + if referer: + brightcove_new_url = smuggle_url(brightcove_new_url, {'referrer': referer}) + return self.url_result(brightcove_new_url, BrightcoveNewIE.ie_key(), video_id) + # TODO: figure out if it's possible to extract playlistId from playerKey + # elif 'playerKey' in query: + # player_key = query['playerKey'] + # return self._get_playlist_info(player_key[0]) + raise UnsupportedError(url) class BrightcoveNewIE(AdobePassIE): diff --git a/youtube_dl/extractor/byutv.py b/youtube_dl/extractor/byutv.py index 562c83af9..0b11bf11f 100644 --- a/youtube_dl/extractor/byutv.py +++ b/youtube_dl/extractor/byutv.py @@ -3,7 +3,12 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..utils import parse_duration +from ..utils import ( + determine_ext, + merge_dicts, + parse_duration, + url_or_none, +) class BYUtvIE(InfoExtractor): @@ -51,7 +56,7 @@ class BYUtvIE(InfoExtractor): video_id = mobj.group('id') display_id = mobj.group('display_id') or video_id - info = self._download_json( + video = self._download_json( 'https://api.byutv.org/api3/catalog/getvideosforcontent', display_id, query={ 'contentid': video_id, @@ -62,7 +67,7 @@ class BYUtvIE(InfoExtractor): 'x-byutv-platformkey': 'xsaaw9c7y5', }) - ep = info.get('ooyalaVOD') + ep = video.get('ooyalaVOD') if ep: return { '_type': 'url_transparent', @@ -75,18 +80,38 @@ class BYUtvIE(InfoExtractor): 'thumbnail': ep.get('imageThumbnail'), } - ep = info['dvr'] - title = ep['title'] - formats = self._extract_m3u8_formats( - ep['videoUrl'], video_id, 'mp4', entry_protocol='m3u8_native', - m3u8_id='hls') + info = {} + formats = [] + for format_id, ep in video.items(): + if not isinstance(ep, dict): + continue + video_url = url_or_none(ep.get('videoUrl')) + if not video_url: + continue + ext = determine_ext(video_url) + if ext == 'm3u8': + formats.extend(self._extract_m3u8_formats( + video_url, video_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id='hls', fatal=False)) + elif ext == 'mpd': + formats.extend(self._extract_mpd_formats( + video_url, video_id, mpd_id='dash', fatal=False)) + else: + formats.append({ + 'url': video_url, + 'format_id': format_id, + }) + merge_dicts(info, { + 'title': ep.get('title'), + 'description': ep.get('description'), + 'thumbnail': ep.get('imageThumbnail'), + 'duration': parse_duration(ep.get('length')), + }) self._sort_formats(formats) - return { + + return merge_dicts(info, { 'id': video_id, 'display_id': display_id, - 'title': title, - 'description': ep.get('description'), - 'thumbnail': ep.get('imageThumbnail'), - 'duration': parse_duration(ep.get('length')), + 'title': display_id, 'formats': formats, - } + }) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 4adcae1e5..44120cae2 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -104,6 +104,8 @@ from .bild import BildIE from .bilibili import ( BiliBiliIE, BiliBiliBangumiIE, + BilibiliAudioIE, + BilibiliAudioAlbumIE, ) from .biobiochiletv import BioBioChileTVIE from .bitchute import ( diff --git a/youtube_dl/extractor/heise.py b/youtube_dl/extractor/heise.py index d8a2f9d76..cbe564a3c 100644 --- a/youtube_dl/extractor/heise.py +++ b/youtube_dl/extractor/heise.py @@ -105,8 +105,7 @@ class HeiseIE(InfoExtractor): webpage, default=None) or self._html_search_meta( 'description', webpage) - kaltura_url = KalturaIE._extract_url(webpage) - if kaltura_url: + def _make_kaltura_result(kaltura_url): return { '_type': 'url_transparent', 'url': smuggle_url(kaltura_url, {'source_url': url}), @@ -115,6 +114,16 @@ class HeiseIE(InfoExtractor): 'description': description, } + kaltura_url = KalturaIE._extract_url(webpage) + if kaltura_url: + return _make_kaltura_result(kaltura_url) + + kaltura_id = self._search_regex( + r'entry-id=(["\'])(?P(?:(?!\1).)+)\1', webpage, 'kaltura id', + default=None, group='id') + if kaltura_id: + return _make_kaltura_result('kaltura:2238431:%s' % kaltura_id) + yt_urls = YoutubeIE._extract_urls(webpage) if yt_urls: return self.playlist_from_matches( diff --git a/youtube_dl/extractor/hotstar.py b/youtube_dl/extractor/hotstar.py index 79d5bbb2e..f9f7c5a64 100644 --- a/youtube_dl/extractor/hotstar.py +++ b/youtube_dl/extractor/hotstar.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals import hashlib import hmac +import re import time import uuid @@ -126,6 +127,8 @@ class HotStarIE(HotStarBaseIE): format_url = url_or_none(playback_set.get('playbackUrl')) if not format_url: continue + format_url = re.sub( + r'(?<=//staragvod)(\d)', r'web\1', format_url) tags = str_or_none(playback_set.get('tagsCombination')) or '' if tags and 'encryption:plain' not in tags: continue @@ -133,7 +136,8 @@ class HotStarIE(HotStarBaseIE): try: if 'package:hls' in tags or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( - format_url, video_id, 'mp4', m3u8_id='hls')) + format_url, video_id, 'mp4', + entry_protocol='m3u8_native', m3u8_id='hls')) elif 'package:dash' in tags or ext == 'mpd': formats.extend(self._extract_mpd_formats( format_url, video_id, mpd_id='dash')) diff --git a/youtube_dl/extractor/instagram.py b/youtube_dl/extractor/instagram.py index ffd87b55f..b061850a1 100644 --- a/youtube_dl/extractor/instagram.py +++ b/youtube_dl/extractor/instagram.py @@ -22,7 +22,7 @@ from ..utils import ( class InstagramIE(InfoExtractor): - _VALID_URL = r'(?Phttps?://(?:www\.)?instagram\.com/p/(?P[^/?#&]+))' + _VALID_URL = r'(?Phttps?://(?:www\.)?instagram\.com/(?:p|tv)/(?P[^/?#&]+))' _TESTS = [{ 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc', 'md5': '0d2da106a9d2631273e192b372806516', @@ -92,6 +92,9 @@ class InstagramIE(InfoExtractor): }, { 'url': 'http://instagram.com/p/9o6LshA7zy/embed/', 'only_matching': True, + }, { + 'url': 'https://www.instagram.com/tv/aye83DjauH/', + 'only_matching': True, }] @staticmethod diff --git a/youtube_dl/extractor/jwplatform.py b/youtube_dl/extractor/jwplatform.py index 647b905f1..2aabd98b5 100644 --- a/youtube_dl/extractor/jwplatform.py +++ b/youtube_dl/extractor/jwplatform.py @@ -7,7 +7,7 @@ from .common import InfoExtractor class JWPlatformIE(InfoExtractor): - _VALID_URL = r'(?:https?://(?:content\.jwplatform|cdn\.jwplayer)\.com/(?:(?:feed|player|thumb|preview|video)s|jw6|v2/media)/|jwplatform:)(?P[a-zA-Z0-9]{8})' + _VALID_URL = r'(?:https?://(?:content\.jwplatform|cdn\.jwplayer)\.com/(?:(?:feed|player|thumb|preview)s|jw6|v2/media)/|jwplatform:)(?P[a-zA-Z0-9]{8})' _TESTS = [{ 'url': 'http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js', 'md5': 'fa8899fa601eb7c83a64e9d568bdf325', diff --git a/youtube_dl/extractor/mixcloud.py b/youtube_dl/extractor/mixcloud.py index bcac13ec5..bf5353ef9 100644 --- a/youtube_dl/extractor/mixcloud.py +++ b/youtube_dl/extractor/mixcloud.py @@ -164,7 +164,7 @@ class MixcloudIE(InfoExtractor): def decrypt_url(f_url): for k in (key, 'IFYOUWANTTHEARTISTSTOGETPAIDDONOTDOWNLOADFROMMIXCLOUD'): decrypted_url = self._decrypt_xor_cipher(k, f_url) - if re.search(r'^https?://[0-9a-z.]+/[0-9A-Za-z/.?=&_-]+$', decrypted_url): + if re.search(r'^https?://[0-9A-Za-z.]+/[0-9A-Za-z/.?=&_-]+$', decrypted_url): return decrypted_url for url_key in ('url', 'hlsUrl', 'dashUrl'): diff --git a/youtube_dl/extractor/nhk.py b/youtube_dl/extractor/nhk.py index 241412f98..6a2c6cb7b 100644 --- a/youtube_dl/extractor/nhk.py +++ b/youtube_dl/extractor/nhk.py @@ -10,6 +10,18 @@ class NhkVodIE(InfoExtractor): # Content available only for a limited period of time. Visit # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples. _TESTS = [{ + # clip + 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/', + 'md5': '256a1be14f48d960a7e61e2532d95ec3', + 'info_dict': { + 'id': 'a95j5iza', + 'ext': 'mp4', + 'title': "Dining with the Chef - Chef Saito's Family recipe: MENCHI-KATSU", + 'description': 'md5:5aee4a9f9d81c26281862382103b0ea5', + 'timestamp': 1565965194, + 'upload_date': '20190816', + }, + }, { 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/', 'only_matching': True, }, { @@ -19,7 +31,7 @@ class NhkVodIE(InfoExtractor): 'url': 'https://www3.nhk.or.jp/nhkworld/fr/ondemand/audio/plugin-20190404-1/', 'only_matching': True, }] - _API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sodesdlist/v7/episode/%s/%s/all%s.json' + _API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sod%slist/v7/episode/%s/%s/all%s.json' def _real_extract(self, url): lang, m_type, episode_id = re.match(self._VALID_URL, url).groups() @@ -28,7 +40,10 @@ class NhkVodIE(InfoExtractor): is_video = m_type == 'video' episode = self._download_json( - self._API_URL_TEMPLATE % ('v' if is_video else 'r', episode_id, lang, '/all' if is_video else ''), + self._API_URL_TEMPLATE % ( + 'v' if is_video else 'r', + 'clip' if episode_id[:4] == '9999' else 'esd', + episode_id, lang, '/all' if is_video else ''), episode_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'][0] title = episode.get('sub_title_clean') or episode['sub_title'] @@ -60,8 +75,8 @@ class NhkVodIE(InfoExtractor): if is_video: info.update({ '_type': 'url_transparent', - 'ie_key': 'Ooyala', - 'url': 'ooyala:' + episode['vod_id'], + 'ie_key': 'Piksel', + 'url': 'https://player.piksel.com/v/refid/nhkworld/prefid/' + episode['vod_id'], }) else: audio = episode['audio'] diff --git a/youtube_dl/extractor/ninenow.py b/youtube_dl/extractor/ninenow.py index f32f530f7..6157dc7c1 100644 --- a/youtube_dl/extractor/ninenow.py +++ b/youtube_dl/extractor/ninenow.py @@ -45,7 +45,11 @@ class NineNowIE(InfoExtractor): webpage = self._download_webpage(url, display_id) page_data = self._parse_json(self._search_regex( r'window\.__data\s*=\s*({.*?});', webpage, - 'page data'), display_id) + 'page data', default='{}'), display_id, fatal=False) + if not page_data: + page_data = self._parse_json(self._parse_json(self._search_regex( + r'window\.__data\s*=\s*JSON\.parse\s*\(\s*(".+?")\s*\)\s*;', + webpage, 'page data'), display_id), display_id) for kind in ('episode', 'clip'): current_key = page_data.get(kind, {}).get( diff --git a/youtube_dl/extractor/nonktube.py b/youtube_dl/extractor/nonktube.py index 63e58aae2..ca1424e06 100644 --- a/youtube_dl/extractor/nonktube.py +++ b/youtube_dl/extractor/nonktube.py @@ -25,9 +25,14 @@ class NonkTubeIE(NuevoBaseIE): def _real_extract(self, url): video_id = self._match_id(url) - info = self._extract_nuevo( - 'https://www.nonktube.com/media/nuevo/econfig.php?key=%s' - % video_id, video_id) + webpage = self._download_webpage(url, video_id) - info['age_limit'] = 18 + title = self._og_search_title(webpage) + info = self._parse_html5_media_entries(url, webpage, video_id)[0] + + info.update({ + 'id': video_id, + 'title': title, + 'age_limit': 18, + }) return info diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py index 679eaf6c3..1fe581780 100644 --- a/youtube_dl/extractor/openload.py +++ b/youtube_dl/extractor/openload.py @@ -246,7 +246,7 @@ class OpenloadIE(InfoExtractor): _DOMAINS = r''' (?: openload\.(?:co|io|link|pw)| - oload\.(?:tv|best|biz|stream|site|xyz|win|download|cloud|cc|icu|fun|club|info|press|pw|life|live|space|services|website|vip)| + oload\.(?:tv|best|biz|stream|site|xyz|win|download|cloud|cc|icu|fun|club|info|online|press|pw|life|live|space|services|website|vip)| oladblock\.(?:services|xyz|me)|openloed\.co ) ''' @@ -362,6 +362,9 @@ class OpenloadIE(InfoExtractor): }, { 'url': 'https://oload.services/embed/bs1NWj1dCag/', 'only_matching': True, + }, { + 'url': 'https://oload.online/f/W8o2UfN1vNY/', + 'only_matching': True, }, { 'url': 'https://oload.press/embed/drTBl1aOTvk/', 'only_matching': True, diff --git a/youtube_dl/extractor/piksel.py b/youtube_dl/extractor/piksel.py index 401298cb8..88b6859b0 100644 --- a/youtube_dl/extractor/piksel.py +++ b/youtube_dl/extractor/piksel.py @@ -15,7 +15,7 @@ from ..utils import ( class PikselIE(InfoExtractor): - _VALID_URL = r'https?://player\.piksel\.com/v/(?P[a-z0-9]+)' + _VALID_URL = r'https?://player\.piksel\.com/v/(?:refid/[^/]+/prefid/)?(?P[a-z0-9_]+)' _TESTS = [ { 'url': 'http://player.piksel.com/v/ums2867l', @@ -40,6 +40,11 @@ class PikselIE(InfoExtractor): 'timestamp': 1486171129, 'upload_date': '20170204' } + }, + { + # https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2019240/ + 'url': 'http://player.piksel.com/v/refid/nhkworld/prefid/nw_vod_v_en_2019_240_20190823233000_02_1566873477', + 'only_matching': True, } ] @@ -52,8 +57,11 @@ class PikselIE(InfoExtractor): return mobj.group('url') def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + video_id = self._search_regex( + r'data-de-program-uuid=[\'"]([a-z0-9]+)', + webpage, 'program uuid', default=display_id) app_token = self._search_regex([ r'clientAPI\s*:\s*"([^"]+)"', r'data-de-api-key\s*=\s*"([^"]+)"' diff --git a/youtube_dl/extractor/ted.py b/youtube_dl/extractor/ted.py index db5a4f44e..63e2455b2 100644 --- a/youtube_dl/extractor/ted.py +++ b/youtube_dl/extractor/ted.py @@ -182,20 +182,29 @@ class TEDIE(InfoExtractor): title = talk_info['title'].strip() - native_downloads = try_get( - talk_info, - (lambda x: x['downloads']['nativeDownloads'], - lambda x: x['nativeDownloads']), - dict) or {} + downloads = talk_info.get('downloads') or {} + native_downloads = downloads.get('nativeDownloads') or talk_info.get('nativeDownloads') or {} formats = [{ 'url': format_url, 'format_id': format_id, - 'format': format_id, } for (format_id, format_url) in native_downloads.items() if format_url is not None] + + subtitled_downloads = downloads.get('subtitledDownloads') or {} + for lang, subtitled_download in subtitled_downloads.items(): + for q in self._NATIVE_FORMATS: + q_url = subtitled_download.get(q) + if not q_url: + continue + formats.append({ + 'url': q_url, + 'format_id': '%s-%s' % (q, lang), + 'language': lang, + }) + if formats: for f in formats: - finfo = self._NATIVE_FORMATS.get(f['format_id']) + finfo = self._NATIVE_FORMATS.get(f['format_id'].split('-')[0]) if finfo: f.update(finfo) @@ -215,34 +224,7 @@ class TEDIE(InfoExtractor): http_url = None for format_id, resources in resources_.items(): - if format_id == 'h264': - for resource in resources: - h264_url = resource.get('file') - if not h264_url: - continue - bitrate = int_or_none(resource.get('bitrate')) - formats.append({ - 'url': h264_url, - 'format_id': '%s-%sk' % (format_id, bitrate), - 'tbr': bitrate, - }) - if re.search(r'\d+k', h264_url): - http_url = h264_url - elif format_id == 'rtmp': - streamer = talk_info.get('streamer') - if not streamer: - continue - for resource in resources: - formats.append({ - 'format_id': '%s-%s' % (format_id, resource.get('name')), - 'url': streamer, - 'play_path': resource['file'], - 'ext': 'flv', - 'width': int_or_none(resource.get('width')), - 'height': int_or_none(resource.get('height')), - 'tbr': int_or_none(resource.get('bitrate')), - }) - elif format_id == 'hls': + if format_id == 'hls': if not isinstance(resources, dict): continue stream_url = url_or_none(resources.get('stream')) @@ -251,6 +233,36 @@ class TEDIE(InfoExtractor): formats.extend(self._extract_m3u8_formats( stream_url, video_name, 'mp4', m3u8_id=format_id, fatal=False)) + else: + if not isinstance(resources, list): + continue + if format_id == 'h264': + for resource in resources: + h264_url = resource.get('file') + if not h264_url: + continue + bitrate = int_or_none(resource.get('bitrate')) + formats.append({ + 'url': h264_url, + 'format_id': '%s-%sk' % (format_id, bitrate), + 'tbr': bitrate, + }) + if re.search(r'\d+k', h264_url): + http_url = h264_url + elif format_id == 'rtmp': + streamer = talk_info.get('streamer') + if not streamer: + continue + for resource in resources: + formats.append({ + 'format_id': '%s-%s' % (format_id, resource.get('name')), + 'url': streamer, + 'play_path': resource['file'], + 'ext': 'flv', + 'width': int_or_none(resource.get('width')), + 'height': int_or_none(resource.get('height')), + 'tbr': int_or_none(resource.get('bitrate')), + }) m3u8_formats = list(filter( lambda f: f.get('protocol') == 'm3u8' and f.get('vcodec') != 'none', diff --git a/youtube_dl/extractor/tv4.py b/youtube_dl/extractor/tv4.py index 51923e44a..a819d048c 100644 --- a/youtube_dl/extractor/tv4.py +++ b/youtube_dl/extractor/tv4.py @@ -72,8 +72,13 @@ class TV4IE(InfoExtractor): video_id = self._match_id(url) info = self._download_json( - 'http://www.tv4play.se/player/assets/%s.json' % video_id, - video_id, 'Downloading video info JSON') + 'https://playback-api.b17g.net/asset/%s' % video_id, + video_id, 'Downloading video info JSON', query={ + 'service': 'tv4', + 'device': 'browser', + 'protocol': 'hls,dash', + 'drm': 'widevine', + })['metadata'] title = info['title'] @@ -111,5 +116,9 @@ class TV4IE(InfoExtractor): 'timestamp': parse_iso8601(info.get('broadcast_date_time')), 'duration': int_or_none(info.get('duration')), 'thumbnail': info.get('image'), - 'is_live': info.get('is_live') is True, + 'is_live': info.get('isLive') is True, + 'series': info.get('seriesTitle'), + 'season_number': int_or_none(info.get('seasonNumber')), + 'episode': info.get('episodeTitle'), + 'episode_number': int_or_none(info.get('episodeNumber')), } diff --git a/youtube_dl/extractor/viewlift.py b/youtube_dl/extractor/viewlift.py index c43d1a1e8..391419d9e 100644 --- a/youtube_dl/extractor/viewlift.py +++ b/youtube_dl/extractor/viewlift.py @@ -13,11 +13,12 @@ from ..utils import ( js_to_json, parse_age_limit, parse_duration, + try_get, ) class ViewLiftBaseIE(InfoExtractor): - _DOMAINS_REGEX = r'(?:snagfilms|snagxtreme|funnyforfree|kiddovid|winnersview|(?:monumental|lax)sportsnetwork|vayafilm)\.com|hoichoi\.tv' + _DOMAINS_REGEX = r'(?:(?:main\.)?snagfilms|snagxtreme|funnyforfree|kiddovid|winnersview|(?:monumental|lax)sportsnetwork|vayafilm)\.com|hoichoi\.tv' class ViewLiftEmbedIE(ViewLiftBaseIE): @@ -113,7 +114,7 @@ class ViewLiftEmbedIE(ViewLiftBaseIE): class ViewLiftIE(ViewLiftBaseIE): - _VALID_URL = r'https?://(?:www\.)?(?P%s)/(?:films/title|show|(?:news/)?videos?)/(?P[^?#]+)' % ViewLiftBaseIE._DOMAINS_REGEX + _VALID_URL = r'https?://(?:www\.)?(?P%s)(?:/(?:films/title|show|(?:news/)?videos?))?/(?P[^?#]+)' % ViewLiftBaseIE._DOMAINS_REGEX _TESTS = [{ 'url': 'http://www.snagfilms.com/films/title/lost_for_life', 'md5': '19844f897b35af219773fd63bdec2942', @@ -128,7 +129,7 @@ class ViewLiftIE(ViewLiftBaseIE): 'categories': 'mincount:3', 'age_limit': 14, 'upload_date': '20150421', - 'timestamp': 1429656819, + 'timestamp': 1429656820, } }, { 'url': 'http://www.snagfilms.com/show/the_world_cut_project/india', @@ -141,10 +142,26 @@ class ViewLiftIE(ViewLiftBaseIE): 'description': 'md5:5c168c5a8f4719c146aad2e0dfac6f5f', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 979, - 'categories': 'mincount:2', 'timestamp': 1399478279, 'upload_date': '20140507', } + }, { + 'url': 'http://main.snagfilms.com/augie_alone/s_2_ep_12_love', + 'info_dict': { + 'id': '00000148-7b53-de26-a9fb-fbf306f70020', + 'display_id': 'augie_alone/s_2_ep_12_love', + 'ext': 'mp4', + 'title': 'Augie, Alone:S. 2 Ep. 12 - Love', + 'description': 'md5:db2a5c72d994f16a780c1eb353a8f403', + 'thumbnail': r're:^https?://.*\.jpg', + 'duration': 107, + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'http://main.snagfilms.com/films/title/the_freebie', + 'only_matching': True, }, { # Film is not playable in your area. 'url': 'http://www.snagfilms.com/films/title/inside_mecca', @@ -181,7 +198,21 @@ class ViewLiftIE(ViewLiftBaseIE): gist = content_data['gist'] film_id = gist['id'] title = gist['title'] - video_assets = content_data['streamingInfo']['videoAssets'] + video_assets = try_get( + content_data, lambda x: x['streamingInfo']['videoAssets'], dict) + if not video_assets: + token = self._download_json( + 'https://prod-api.viewlift.com/identity/anonymous-token', + film_id, 'Downloading authorization token', + query={'site': 'snagfilms'})['authorizationToken'] + video_assets = self._download_json( + 'https://prod-api.viewlift.com/entitlement/video/status', + film_id, headers={ + 'Authorization': token, + 'Referer': url, + }, query={ + 'id': film_id + })['video']['streamingInfo']['videoAssets'] formats = [] mpeg_video_assets = video_assets.get('mpeg') or [] @@ -241,8 +272,9 @@ class ViewLiftIE(ViewLiftBaseIE): if category.get('title')] break else: - title = self._search_regex( - r'itemprop="title">([^<]+)<', webpage, 'title') + title = self._html_search_regex( + (r'itemprop="title">([^<]+)<', + r'(?s)itemprop="title">(.+?)(.+?)', webpage, 'description', default=None) or self._og_search_description(webpage) diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py index f57ed2288..8b6dc0e24 100644 --- a/youtube_dl/extractor/vk.py +++ b/youtube_dl/extractor/vk.py @@ -403,8 +403,17 @@ class VKIE(VKBaseIE): data = self._parse_json( self._search_regex( r'var\s+playerParams\s*=\s*({.+?})\s*;\s*\n', info_page, - 'player params'), - video_id)['params'][0] + 'player params', default='{}'), + video_id) + if data: + data = data['params'][0] + + #