mirror of
https://codeberg.org/polarisfm/youtube-dl
synced 2024-11-29 11:44:33 +01:00
[BiliBili] fit all test
This commit is contained in:
parent
c35bdd1b8e
commit
4f8d365d88
@ -6,21 +6,11 @@ import re
|
||||
from datetime import datetime
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
parse_iso8601,
|
||||
smuggle_url,
|
||||
str_or_none,
|
||||
strip_jsonp,
|
||||
unified_timestamp,
|
||||
unsmuggle_url,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
|
||||
@ -133,18 +123,18 @@ class BiliBiliIE(InfoExtractor):
|
||||
raise ExtractorError('%s returns error %d' % (self.IE_NAME, result['code']), expected=True)
|
||||
else:
|
||||
raise ExtractorError('Can\'t extract Bangumi episode ID')
|
||||
|
||||
def _aid_to_bid(self, aid):
|
||||
'''
|
||||
convert bilibili avid to bid
|
||||
'''
|
||||
|
||||
api_url = 'http://api.bilibili.com/x/web-interface/view?aid=%s' % (aid, )
|
||||
js = self._download_json(api_url, aid, 'convert avid to bv id', 'convert failed')
|
||||
return js['data']['bvid']
|
||||
def _real_extract(self, url):
|
||||
url, smuggled_data = unsmuggle_url(url, {})
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
# save the origin video id
|
||||
original_video_id = video_id
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
@ -156,32 +146,20 @@ class BiliBiliIE(InfoExtractor):
|
||||
uploader_name = ''
|
||||
view_count = 0
|
||||
part_list = []
|
||||
upload_date = ''
|
||||
# normal video
|
||||
if re.match(r'^\d+$', video_id):
|
||||
video_id = self._aid_to_bid(video_id)
|
||||
self.to_screen("%s: convert to bvid %s" % (original_video_id, video_id))
|
||||
list_api_url = 'https://api.bilibili.com/x/web-interface/view/detail?bvid=%s' % (video_id, )
|
||||
js = self._download_json(list_api_url, original_video_id, 'downloading video list', 'downloding video list failed', fatal=False)
|
||||
if not js:
|
||||
# try old method
|
||||
cid = self._search_regex(
|
||||
r'\bcid(?:["\']:|=)(\d+)', webpage, 'cid',
|
||||
default=None
|
||||
) or compat_parse_qs(self._search_regex(
|
||||
[r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
|
||||
r'EmbedPlayer\([^)]+,\s*\\"([^"]+)\\"\)',
|
||||
r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
|
||||
webpage, 'player parameters'))['cid'][0]
|
||||
part_list = [{'cid': cid, 'title': title}]
|
||||
else:
|
||||
# new method, get value from json
|
||||
video_list = js['data']['View']['pages']
|
||||
title = js.get('data').get('View').get('title')
|
||||
thumbnail = js['data']['View']['pic']
|
||||
uploader_id = js.get('data').get('Card').get('card').get('mid')
|
||||
description = js.get('data').get('View').get('desc')
|
||||
uploader_name = js.get('data').get('Card').get('card').get('name')
|
||||
view_count = js.get('data').get('View').get('stat').get('view')
|
||||
js = self._download_json(list_api_url, original_video_id, 'downloading video list', 'downloding video list failed', fatal=False)['data']
|
||||
video_list = js['View']['pages']
|
||||
title = js['View']['title']
|
||||
thumbnail = js.get('View', {}).get('pic')
|
||||
description = js.get('View', {}).get('desc')
|
||||
view_count = js.get('View', {}).get('stat', {}).get('view')
|
||||
uploader_id = js.get('Card', {}).get('card', {}).get('mid')
|
||||
uploader_name = js.get('Card', {}).get('card', {}).get('name')
|
||||
self.to_screen("%s: video count: %d" % (original_video_id, len(video_list)))
|
||||
part_list = [{'cid': x['cid'], 'title': x['part']} for x in video_list]
|
||||
headers = {
|
||||
@ -241,46 +219,33 @@ class BiliBiliIE(InfoExtractor):
|
||||
'title': part_title
|
||||
})
|
||||
break
|
||||
|
||||
|
||||
# timestamp = unified_timestamp(self._html_search_regex(
|
||||
# r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time',
|
||||
# default=None) or self._html_search_meta(
|
||||
# 'uploadDate', webpage, 'timestamp', default=None))
|
||||
# upload_date =
|
||||
|
||||
|
||||
if not title:
|
||||
title = self._html_search_regex(
|
||||
('<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
|
||||
'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
|
||||
group='title')
|
||||
group='title', fatal=False)
|
||||
if not timestamp:
|
||||
timestamp = self._html_search_regex(
|
||||
(r'"pubdate":(?P<timestamp>\d+),'), webpage, 'title',
|
||||
group='timestamp')
|
||||
(r'"pubdate":(?P<timestamp>\d+),'), webpage, 'timestamp',
|
||||
group='timestamp', fatal=False)
|
||||
if not uploader_id or not uploader_name:
|
||||
uploader_mobj = re.search(
|
||||
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>(?P<name>[^<]+)',
|
||||
webpage)
|
||||
if uploader_mobj:
|
||||
uploader_id = uploader_mobj.group('id')
|
||||
uploader_name = uploader_mobj.group('name')
|
||||
if not uploader_id or not uploader_name:
|
||||
# try agagin
|
||||
uploader_name = self._html_search_meta(
|
||||
'author', webpage, 'uploader', default=None)
|
||||
uploader_id = self._html_search_regex(
|
||||
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/\d+"[^>]*>(?P<name>[^<]+)',
|
||||
webpage, 'id',
|
||||
group='id', fatal=False)
|
||||
uploader_name = self._html_search_regex(
|
||||
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"',
|
||||
webpage, 'name',
|
||||
group='name', fatal=False)
|
||||
if not thumbnail:
|
||||
thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
|
||||
thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage, fatal=False)
|
||||
if not description:
|
||||
description = self._html_search_meta('description', webpage)
|
||||
timestamp = int(timestamp)
|
||||
description = self._html_search_meta('description', webpage, fatal=False)
|
||||
if timestamp:
|
||||
timestamp = int_or_none(timestamp)
|
||||
upload_date = datetime.fromtimestamp(timestamp).strftime('%Y%m%d')
|
||||
view_count = int(view_count)
|
||||
|
||||
|
||||
|
||||
|
||||
if view_count:
|
||||
view_count = int_or_none(view_count)
|
||||
if len(entries) == 1:
|
||||
entry = entries[0]
|
||||
entry['uploader'] = uploader_name
|
||||
@ -368,9 +333,8 @@ class BiliBiliBangumiIE(InfoExtractor):
|
||||
'Downloading bangumi info',
|
||||
'Downloading bangumi failed')['result']
|
||||
title = bangumi_info['season_title']
|
||||
description = bangumi_info['evaluate']
|
||||
view_count = bangumi_info['stat']['views']
|
||||
episodes = bangumi_info['episodes']
|
||||
description = bangumi_info.get('evaluate')
|
||||
episodes = bangumi_info.get('episodes')
|
||||
self.to_screen('%s: episode count: %d' % (bangumi_id, len(episodes)))
|
||||
entries = []
|
||||
for idx, episode in enumerate(episodes, start=1):
|
||||
@ -383,7 +347,7 @@ class BiliBiliBangumiIE(InfoExtractor):
|
||||
# some video is splited to many fragments, here is this fragments
|
||||
formats = [{
|
||||
'url': durl['url'],
|
||||
'filesize': int_or_none(durl['size']),
|
||||
'filesize': int_or_none(durl.get('size')),
|
||||
}]
|
||||
for backup_url in durl.get('backup_url', []):
|
||||
formats.append({
|
||||
@ -402,9 +366,11 @@ class BiliBiliBangumiIE(InfoExtractor):
|
||||
'id': '%s_%d_%d' % (bangumi_id, idx, fragment_idx),
|
||||
'duration': float_or_none(durl.get('length'), 1000),
|
||||
'formats': formats,
|
||||
'title': episode['long_title']
|
||||
'title': episode.get('long_title', '')
|
||||
})
|
||||
return self.playlist_result(entries, bangumi_id, title, description)
|
||||
|
||||
|
||||
class BiliBiliBangumiEpisodeIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)bilibili.com/bangumi/play/[eE][pP](?P<id>\d+)'
|
||||
|
||||
@ -463,6 +429,8 @@ class BiliBiliBangumiEpisodeIE(InfoExtractor):
|
||||
return self.url_result(
|
||||
'https://www.bilibili.com/bangumi/media/md%s' % bangumi_id,
|
||||
ie=BiliBiliBangumiIE.ie_key(), video_id=ep_id)
|
||||
|
||||
|
||||
class BilibiliAudioBaseIE(InfoExtractor):
|
||||
def _call_api(self, path, sid, query=None):
|
||||
if not query:
|
||||
|
Loading…
Reference in New Issue
Block a user