From e09965d550d8d76ea0c616cbb58800ee2249f15c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Tue, 23 Apr 2019 00:39:16 +0700 Subject: [PATCH 1/3] [soundcloud] Add support for new rendition and improve extraction (closes #20699) --- youtube_dl/extractor/soundcloud.py | 204 +++++++++++++++++++++-------- 1 file changed, 149 insertions(+), 55 deletions(-) diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py index 15da3496e..277c3c7b4 100644 --- a/youtube_dl/extractor/soundcloud.py +++ b/youtube_dl/extractor/soundcloud.py @@ -15,7 +15,12 @@ from ..compat import ( ) from ..utils import ( ExtractorError, + float_or_none, int_or_none, + KNOWN_EXTENSIONS, + merge_dicts, + mimetype2ext, + str_or_none, try_get, unified_timestamp, update_url_query, @@ -57,7 +62,7 @@ class SoundcloudIE(InfoExtractor): 'uploader': 'E.T. ExTerrestrial Music', 'timestamp': 1349920598, 'upload_date': '20121011', - 'duration': 143, + 'duration': 143.216, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, @@ -100,7 +105,7 @@ class SoundcloudIE(InfoExtractor): 'uploader': 'jaimeMF', 'timestamp': 1386604920, 'upload_date': '20131209', - 'duration': 9, + 'duration': 9.927, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, @@ -120,7 +125,7 @@ class SoundcloudIE(InfoExtractor): 'uploader': 'jaimeMF', 'timestamp': 1386604920, 'upload_date': '20131209', - 'duration': 9, + 'duration': 9.927, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, @@ -140,7 +145,7 @@ class SoundcloudIE(InfoExtractor): 'uploader': 'oddsamples', 'timestamp': 1389232924, 'upload_date': '20140109', - 'duration': 17, + 'duration': 17.346, 'license': 'cc-by-sa', 'view_count': int, 'like_count': int, @@ -160,7 +165,7 @@ class SoundcloudIE(InfoExtractor): 'uploader': 'Ori Uplift Music', 'timestamp': 1504206263, 'upload_date': '20170831', - 'duration': 7449, + 'duration': 7449.096, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, @@ -180,7 +185,7 @@ class SoundcloudIE(InfoExtractor): 'uploader': 'garyvee', 'timestamp': 1488152409, 'upload_date': '20170226', - 'duration': 207, + 'duration': 207.012, 'thumbnail': r're:https?://.*\.jpg', 'license': 'all-rights-reserved', 'view_count': int, @@ -192,9 +197,31 @@ class SoundcloudIE(InfoExtractor): 'skip_download': True, }, }, + # not avaialble via api.soundcloud.com/i1/tracks/id/streams + { + 'url': 'https://soundcloud.com/giovannisarani/mezzo-valzer', + 'md5': 'e22aecd2bc88e0e4e432d7dcc0a1abf7', + 'info_dict': { + 'id': '583011102', + 'ext': 'mp3', + 'title': 'Mezzo Valzer', + 'description': 'md5:4138d582f81866a530317bae316e8b61', + 'uploader': 'Giovanni Sarani', + 'timestamp': 1551394171, + 'upload_date': '20190228', + 'duration': 180.157, + 'thumbnail': r're:https?://.*\.jpg', + 'license': 'all-rights-reserved', + 'view_count': int, + 'like_count': int, + 'comment_count': int, + 'repost_count': int, + }, + 'expected_warnings': ['Unable to download JSON metadata'], + } ] - _CLIENT_ID = 'NmW1FlPaiL94ueEu7oziOWjYEzZzQDcK' + _CLIENT_ID = 'FweeGBOOEOYJWLJN3oEyToGLKhmSz0I7' @staticmethod def _extract_urls(webpage): @@ -202,10 +229,6 @@ class SoundcloudIE(InfoExtractor): r']+src=(["\'])(?P(?:https?://)?(?:w\.)?soundcloud\.com/player.+?)\1', webpage)] - def report_resolve(self, video_id): - """Report information extraction.""" - self.to_screen('%s: Resolving id' % video_id) - @classmethod def _resolv_url(cls, url): return 'https://api.soundcloud.com/resolve.json?url=' + url + '&client_id=' + cls._CLIENT_ID @@ -224,6 +247,10 @@ class SoundcloudIE(InfoExtractor): def extract_count(key): return int_or_none(info.get('%s_count' % key)) + like_count = extract_count('favoritings') + if like_count is None: + like_count = extract_count('likes') + result = { 'id': track_id, 'uploader': username, @@ -231,15 +258,17 @@ class SoundcloudIE(InfoExtractor): 'title': title, 'description': info.get('description'), 'thumbnail': thumbnail, - 'duration': int_or_none(info.get('duration'), 1000), + 'duration': float_or_none(info.get('duration'), 1000), 'webpage_url': info.get('permalink_url'), 'license': info.get('license'), 'view_count': extract_count('playback'), - 'like_count': extract_count('favoritings'), + 'like_count': like_count, 'comment_count': extract_count('comment'), 'repost_count': extract_count('reposts'), 'genre': info.get('genre'), } + + format_urls = set() formats = [] query = {'client_id': self._CLIENT_ID} if secret_token is not None: @@ -248,6 +277,7 @@ class SoundcloudIE(InfoExtractor): # We can build a direct link to the song format_url = update_url_query( 'https://api.soundcloud.com/tracks/%s/download' % track_id, query) + format_urls.add(format_url) formats.append({ 'format_id': 'download', 'ext': info.get('original_format', 'mp3'), @@ -256,44 +286,91 @@ class SoundcloudIE(InfoExtractor): 'preference': 10, }) - # We have to retrieve the url + # Old API, does not work for some tracks (e.g. + # https://soundcloud.com/giovannisarani/mezzo-valzer) format_dict = self._download_json( 'https://api.soundcloud.com/i1/tracks/%s/streams' % track_id, - track_id, 'Downloading track url', query=query) + track_id, 'Downloading track url', query=query, fatal=False) - for key, stream_url in format_dict.items(): - ext, abr = 'mp3', None - mobj = re.search(r'_([^_]+)_(\d+)_url', key) - if mobj: - ext, abr = mobj.groups() - abr = int(abr) - if key.startswith('http'): - stream_formats = [{ - 'format_id': key, - 'ext': ext, - 'url': stream_url, - }] - elif key.startswith('rtmp'): - # The url doesn't have an rtmp app, we have to extract the playpath - url, path = stream_url.split('mp3:', 1) - stream_formats = [{ - 'format_id': key, - 'url': url, - 'play_path': 'mp3:' + path, - 'ext': 'flv', - }] - elif key.startswith('hls'): - stream_formats = self._extract_m3u8_formats( - stream_url, track_id, ext, entry_protocol='m3u8_native', - m3u8_id=key, fatal=False) - else: + if format_dict: + for key, stream_url in format_dict.items(): + if stream_url in format_urls: + continue + format_urls.add(stream_url) + ext, abr = 'mp3', None + mobj = re.search(r'_([^_]+)_(\d+)_url', key) + if mobj: + ext, abr = mobj.groups() + abr = int(abr) + if key.startswith('http'): + stream_formats = [{ + 'format_id': key, + 'ext': ext, + 'url': stream_url, + }] + elif key.startswith('rtmp'): + # The url doesn't have an rtmp app, we have to extract the playpath + url, path = stream_url.split('mp3:', 1) + stream_formats = [{ + 'format_id': key, + 'url': url, + 'play_path': 'mp3:' + path, + 'ext': 'flv', + }] + elif key.startswith('hls'): + stream_formats = self._extract_m3u8_formats( + stream_url, track_id, ext, entry_protocol='m3u8_native', + m3u8_id=key, fatal=False) + else: + continue + + if abr: + for f in stream_formats: + f['abr'] = abr + + formats.extend(stream_formats) + + # New API + transcodings = try_get( + info, lambda x: x['media']['transcodings'], list) or [] + for t in transcodings: + if not isinstance(t, dict): continue - - if abr: - for f in stream_formats: - f['abr'] = abr - - formats.extend(stream_formats) + format_url = url_or_none(t.get('url')) + if not format_url: + continue + stream = self._download_json( + update_url_query(format_url, query), track_id, fatal=False) + if not isinstance(stream, dict): + continue + stream_url = url_or_none(stream.get('url')) + if not stream_url: + continue + if stream_url in format_urls: + continue + format_urls.add(stream_url) + protocol = try_get(t, lambda x: x['format']['protocol'], compat_str) + if protocol != 'hls' and '/hls' in format_url: + protocol = 'hls' + ext = None + preset = str_or_none(t.get('preset')) + if preset: + ext = preset.split('_')[0] + if ext not in KNOWN_EXTENSIONS: + mimetype = try_get( + t, lambda x: x['format']['mime_type'], compat_str) + ext = mimetype2ext(mimetype) or 'mp3' + format_id_list = [] + if protocol: + format_id_list.append(protocol) + format_id_list.append(ext) + format_id = '_'.join(format_id_list) + formats.append({ + 'url': stream_url, + 'format_id': format_id, + 'ext': ext, + 'protocol': 'm3u8_native' if protocol == 'hls' else 'http', + }) if not formats: # We fallback to the stream_url in the original info, this @@ -303,11 +380,11 @@ class SoundcloudIE(InfoExtractor): 'url': update_url_query(info['stream_url'], query), 'ext': 'mp3', }) + self._check_formats(formats, track_id) for f in formats: f['vcodec'] = 'none' - self._check_formats(formats, track_id) self._sort_formats(formats) result['formats'] = formats @@ -319,6 +396,7 @@ class SoundcloudIE(InfoExtractor): raise ExtractorError('Invalid URL: %s' % url) track_id = mobj.group('track_id') + new_info = {} if track_id is not None: info_json_url = 'https://api.soundcloud.com/tracks/' + track_id + '.json?client_id=' + self._CLIENT_ID @@ -344,13 +422,31 @@ class SoundcloudIE(InfoExtractor): if token: resolve_title += '/%s' % token - self.report_resolve(full_title) + webpage = self._download_webpage(url, full_title, fatal=False) + if webpage: + entries = self._parse_json( + self._search_regex( + r'var\s+c\s*=\s*(\[.+?\])\s*,\s*o\s*=Date\b', webpage, + 'data', default='[]'), full_title, fatal=False) + if entries: + for e in entries: + if not isinstance(e, dict): + continue + if e.get('id') != 67: + continue + data = try_get(e, lambda x: x['data'][0], dict) + if data: + new_info = data + break + info_json_url = self._resolv_url( + 'https://soundcloud.com/%s' % resolve_title) - url = 'https://soundcloud.com/%s' % resolve_title - info_json_url = self._resolv_url(url) - info = self._download_json(info_json_url, full_title, 'Downloading info JSON') + # Contains some additional info missing from new_info + info = self._download_json( + info_json_url, full_title, 'Downloading info JSON') - return self._extract_info_dict(info, full_title, secret_token=token) + return self._extract_info_dict( + merge_dicts(info, new_info), full_title, secret_token=token) class SoundcloudPlaylistBaseIE(SoundcloudIE): @@ -396,8 +492,6 @@ class SoundcloudSetIE(SoundcloudPlaylistBaseIE): full_title += '/' + token url += '/' + token - self.report_resolve(full_title) - resolv_url = self._resolv_url(url) info = self._download_json(resolv_url, full_title) From 15be3eb5e526c232a36f5f73fd5e586572b06fc5 Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Mon, 22 Apr 2019 20:31:09 +0100 Subject: [PATCH 2/3] [youtube] raise ExtractorError when no data available(#20737) --- youtube_dl/extractor/youtube.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 132572c88..67eceb5c4 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -1671,6 +1671,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor): r'(?s)]+id="unavailable-message"[^>]*>(.+?)', video_webpage, 'unavailable message', default=None) + if not video_info: + unavailable_message = extract_unavailable_message() + if not unavailable_message: + unavailable_message = 'Unable to extract video data' + raise ExtractorError( + 'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id) + if 'token' not in video_info: if 'reason' in video_info: if 'The uploader has not made this video available in your country.' in video_info['reason']: From 1fa8893734154cd2144a966c89a9f3801103c6f0 Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Mon, 22 Apr 2019 23:50:37 +0100 Subject: [PATCH 3/3] [vrv] add support for movie listings(closes #19229) --- youtube_dl/extractor/vrv.py | 38 +++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/youtube_dl/extractor/vrv.py b/youtube_dl/extractor/vrv.py index c11da97de..33530fe8a 100644 --- a/youtube_dl/extractor/vrv.py +++ b/youtube_dl/extractor/vrv.py @@ -102,6 +102,15 @@ class VRVIE(VRVBaseIE): # m3u8 download 'skip_download': True, }, + }, { + # movie listing + 'url': 'https://vrv.co/watch/G6NQXZ1J6/Lily-CAT', + 'info_dict': { + 'id': 'G6NQXZ1J6', + 'title': 'Lily C.A.T', + 'description': 'md5:988b031e7809a6aeb60968be4af7db07', + }, + 'playlist_count': 2, }] _NETRC_MACHINE = 'vrv' @@ -123,23 +132,23 @@ class VRVIE(VRVBaseIE): def _extract_vrv_formats(self, url, video_id, stream_format, audio_lang, hardsub_lang): if not url or stream_format not in ('hls', 'dash'): return [] - assert audio_lang or hardsub_lang stream_id_list = [] if audio_lang: stream_id_list.append('audio-%s' % audio_lang) if hardsub_lang: stream_id_list.append('hardsub-%s' % hardsub_lang) - stream_id = '-'.join(stream_id_list) - format_id = '%s-%s' % (stream_format, stream_id) + format_id = stream_format + if stream_id_list: + format_id += '-' + '-'.join(stream_id_list) if stream_format == 'hls': adaptive_formats = self._extract_m3u8_formats( url, video_id, 'mp4', m3u8_id=format_id, - note='Downloading %s m3u8 information' % stream_id, + note='Downloading %s information' % format_id, fatal=False) elif stream_format == 'dash': adaptive_formats = self._extract_mpd_formats( url, video_id, mpd_id=format_id, - note='Downloading %s MPD information' % stream_id, + note='Downloading %s information' % format_id, fatal=False) if audio_lang: for f in adaptive_formats: @@ -155,6 +164,23 @@ class VRVIE(VRVBaseIE): resource_path = object_data['__links__']['resource']['href'] video_data = self._call_cms(resource_path, video_id, 'video') title = video_data['title'] + description = video_data.get('description') + + if video_data.get('__class__') == 'movie_listing': + items = self._call_cms( + video_data['__links__']['movie_listing/movies']['href'], + video_id, 'movie listing').get('items') or [] + if len(items) != 1: + entries = [] + for item in items: + item_id = item.get('id') + if not item_id: + continue + entries.append(self.url_result( + 'https://vrv.co/watch/' + item_id, + self.ie_key(), item_id, item.get('title'))) + return self.playlist_result(entries, video_id, title, description) + video_data = items[0] streams_path = video_data['__links__'].get('streams', {}).get('href') if not streams_path: @@ -198,7 +224,7 @@ class VRVIE(VRVBaseIE): 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, - 'description': video_data.get('description'), + 'description': description, 'duration': float_or_none(video_data.get('duration_ms'), 1000), 'uploader_id': video_data.get('channel_id'), 'series': video_data.get('series_title'),