[BiliBili] fit all test

This commit is contained in:
zwb 2020-05-18 11:05:49 +08:00
parent c35bdd1b8e
commit 4f8d365d88
1 changed files with 73 additions and 105 deletions

View File

@ -6,21 +6,11 @@ import re
from datetime import datetime
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
parse_iso8601,
smuggle_url,
str_or_none,
strip_jsonp,
unified_timestamp,
unsmuggle_url,
urlencode_postdata,
)
@ -133,19 +123,19 @@ class BiliBiliIE(InfoExtractor):
raise ExtractorError('%s returns error %d' % (self.IE_NAME, result['code']), expected=True)
else:
raise ExtractorError('Can\'t extract Bangumi episode ID')
def _aid_to_bid(self, aid):
'''
convert bilibili avid to bid
'''
api_url = 'http://api.bilibili.com/x/web-interface/view?aid=%s' %(aid, )
api_url = 'http://api.bilibili.com/x/web-interface/view?aid=%s' % (aid, )
js = self._download_json(api_url, aid, 'convert avid to bv id', 'convert failed')
return js['data']['bvid']
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
# save the origin video id
def _real_extract(self, url):
video_id = self._match_id(url)
# save the origin video id
original_video_id = video_id
webpage = self._download_webpage(url, video_id)
title = ''
@ -156,34 +146,22 @@ class BiliBiliIE(InfoExtractor):
uploader_name = ''
view_count = 0
part_list = []
upload_date = ''
# normal video
if re.match(r'^\d+$', video_id):
video_id = self._aid_to_bid(video_id)
self.to_screen("%s: convert to bvid %s"%(original_video_id, video_id))
list_api_url = 'https://api.bilibili.com/x/web-interface/view/detail?bvid=%s'%(video_id, )
js = self._download_json(list_api_url, original_video_id, 'downloading video list', 'downloding video list failed', fatal=False)
if not js:
# try old method
cid = self._search_regex(
r'\bcid(?:["\']:|=)(\d+)', webpage, 'cid',
default=None
) or compat_parse_qs(self._search_regex(
[r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
r'EmbedPlayer\([^)]+,\s*\\"([^"]+)\\"\)',
r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
webpage, 'player parameters'))['cid'][0]
part_list = [{'cid': cid, 'title': title}]
else:
# new method, get value from json
video_list = js['data']['View']['pages']
title = js.get('data').get('View').get('title')
thumbnail = js['data']['View']['pic']
uploader_id = js.get('data').get('Card').get('card').get('mid')
description = js.get('data').get('View').get('desc')
uploader_name = js.get('data').get('Card').get('card').get('name')
view_count = js.get('data').get('View').get('stat').get('view')
self.to_screen("%s: video count: %d"%(original_video_id, len(video_list)))
part_list = [{'cid': x['cid'], 'title': x['part']} for x in video_list]
self.to_screen("%s: convert to bvid %s" % (original_video_id, video_id))
list_api_url = 'https://api.bilibili.com/x/web-interface/view/detail?bvid=%s' % (video_id, )
js = self._download_json(list_api_url, original_video_id, 'downloading video list', 'downloding video list failed', fatal=False)['data']
video_list = js['View']['pages']
title = js['View']['title']
thumbnail = js.get('View', {}).get('pic')
description = js.get('View', {}).get('desc')
view_count = js.get('View', {}).get('stat', {}).get('view')
uploader_id = js.get('Card', {}).get('card', {}).get('mid')
uploader_name = js.get('Card', {}).get('card', {}).get('name')
self.to_screen("%s: video count: %d" % (original_video_id, len(video_list)))
part_list = [{'cid': x['cid'], 'title': x['part']} for x in video_list]
headers = {
'Referer': url
}
@ -193,14 +171,14 @@ class BiliBiliIE(InfoExtractor):
RENDITIONS = ('qn=80&quality=80&type=', 'quality=2&type=mp4')
for part_info in part_list:
# try to get video playback url, use
# try to get video playback url, use
for num, rendition in enumerate(RENDITIONS, start=1):
payload = 'appkey=%s&cid=%s&otype=json&%s' % (self._APP_KEY, part_info['cid'], rendition)
sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
video_info = self._download_json(
'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign),
original_video_id, note='Downloading video info for cid: %s'%(part_info['cid'], ),
original_video_id, note='Downloading video info for cid: %s' % (part_info['cid'], ),
headers=headers, fatal=num == len(RENDITIONS))
if not video_info:
@ -214,7 +192,7 @@ class BiliBiliIE(InfoExtractor):
if len(part_list) == 1:
# if video only got one part, use video title instead of part title
part_title = title
for idx, durl in enumerate(video_info['durl'], start = 1):
for idx, durl in enumerate(video_info['durl'], start=1):
# some video is splited to many fragments, here is this fragments
formats = [{
'url': durl['url'],
@ -233,54 +211,41 @@ class BiliBiliIE(InfoExtractor):
})
self._sort_formats(formats)
entries.append({
'id': '%s_%s_%s' % (original_video_id,part_info['cid'],idx),
'id': '%s_%s_%s' % (original_video_id, part_info['cid'], idx),
'duration': float_or_none(durl.get('length'), 1000),
'formats': formats,
'title': part_title
})
break
# timestamp = unified_timestamp(self._html_search_regex(
# r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time',
# default=None) or self._html_search_meta(
# 'uploadDate', webpage, 'timestamp', default=None))
# upload_date =
if not title:
title = self._html_search_regex(
('<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
group='title')
group='title', fatal=False)
if not timestamp:
timestamp = self._html_search_regex(
(r'"pubdate":(?P<timestamp>\d+),'), webpage, 'title',
group='timestamp')
(r'"pubdate":(?P<timestamp>\d+),'), webpage, 'timestamp',
group='timestamp', fatal=False)
if not uploader_id or not uploader_name:
uploader_mobj = re.search(
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>(?P<name>[^<]+)',
webpage)
if uploader_mobj:
uploader_id = uploader_mobj.group('id')
uploader_name = uploader_mobj.group('name')
if not uploader_id or not uploader_name:
# try agagin
uploader_name = self._html_search_meta(
'author', webpage, 'uploader', default=None)
uploader_id = self._html_search_regex(
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/\d+"[^>]*>(?P<name>[^<]+)',
webpage, 'id',
group='id', fatal=False)
uploader_name = self._html_search_regex(
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"',
webpage, 'name',
group='name', fatal=False)
if not thumbnail:
thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage, fatal=False)
if not description:
description = self._html_search_meta('description', webpage)
timestamp = int(timestamp)
upload_date = datetime.fromtimestamp(timestamp).strftime('%Y%m%d')
view_count = int(view_count)
description = self._html_search_meta('description', webpage, fatal=False)
if timestamp:
timestamp = int_or_none(timestamp)
upload_date = datetime.fromtimestamp(timestamp).strftime('%Y%m%d')
if view_count:
view_count = int_or_none(view_count)
if len(entries) == 1:
entry = entries[0]
entry['uploader'] = uploader_name
@ -321,37 +286,37 @@ class BiliBiliBangumiIE(InfoExtractor):
'info_dict': {
'id': '3814_1_1',
'ext': 'flv',
'title' : '最后的魔法大战 前篇'
'title': '最后的魔法大战 前篇'
},
},{
}, {
'info_dict': {
'id': '3814_1_2',
'ext': 'flv',
'title' : '最后的魔法大战 前篇'
'title': '最后的魔法大战 前篇'
},
},{
}, {
'info_dict': {
'id': '3814_1_3',
'ext': 'flv',
'title' : '最后的魔法大战 前篇'
'title': '最后的魔法大战 前篇'
},
},{
}, {
'info_dict': {
'id': '3814_1_4',
'ext': 'flv',
'title' : '最后的魔法大战 前篇'
'title': '最后的魔法大战 前篇'
},
},{
}, {
'info_dict': {
'id': '3814_1_5',
'ext': 'flv',
'title' : '最后的魔法大战 前篇'
'title': '最后的魔法大战 前篇'
},
}, {
'info_dict': {
'id': '3814_2_1',
'ext': 'flv',
'title' : '最后的魔法大战 后篇'
'title': '最后的魔法大战 后篇'
},
}
]
@ -360,7 +325,7 @@ class BiliBiliBangumiIE(InfoExtractor):
def _real_extract(self, url):
headers = {
'Referer': url
}
}
bangumi_id = self._match_id(url)
bangumi_info = self._download_json(
'https://api.bilibili.com/pgc/view/web/season?season_id=%s' % (bangumi_id,),
@ -368,9 +333,8 @@ class BiliBiliBangumiIE(InfoExtractor):
'Downloading bangumi info',
'Downloading bangumi failed')['result']
title = bangumi_info['season_title']
description = bangumi_info['evaluate']
view_count = bangumi_info['stat']['views']
episodes = bangumi_info['episodes']
description = bangumi_info.get('evaluate')
episodes = bangumi_info.get('episodes')
self.to_screen('%s: episode count: %d' % (bangumi_id, len(episodes)))
entries = []
for idx, episode in enumerate(episodes, start=1):
@ -383,7 +347,7 @@ class BiliBiliBangumiIE(InfoExtractor):
# some video is splited to many fragments, here is this fragments
formats = [{
'url': durl['url'],
'filesize': int_or_none(durl['size']),
'filesize': int_or_none(durl.get('size')),
}]
for backup_url in durl.get('backup_url', []):
formats.append({
@ -399,12 +363,14 @@ class BiliBiliBangumiIE(InfoExtractor):
self._sort_formats(formats)
entries.append({
'id': '%s_%d_%d' % (bangumi_id,idx, fragment_idx),
'id': '%s_%d_%d' % (bangumi_id, idx, fragment_idx),
'duration': float_or_none(durl.get('length'), 1000),
'formats': formats,
'title': episode['long_title']
'title': episode.get('long_title', '')
})
return self.playlist_result(entries, bangumi_id, title, description)
class BiliBiliBangumiEpisodeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)bilibili.com/bangumi/play/[eE][pP](?P<id>\d+)'
@ -421,37 +387,37 @@ class BiliBiliBangumiEpisodeIE(InfoExtractor):
'info_dict': {
'id': '3814_1_1',
'ext': 'flv',
'title' : '最后的魔法大战 前篇'
'title': '最后的魔法大战 前篇'
},
},{
}, {
'info_dict': {
'id': '3814_1_2',
'ext': 'flv',
'title' : '最后的魔法大战 前篇'
'title': '最后的魔法大战 前篇'
},
},{
}, {
'info_dict': {
'id': '3814_1_3',
'ext': 'flv',
'title' : '最后的魔法大战 前篇'
'title': '最后的魔法大战 前篇'
},
},{
}, {
'info_dict': {
'id': '3814_1_4',
'ext': 'flv',
'title' : '最后的魔法大战 前篇'
'title': '最后的魔法大战 前篇'
},
},{
}, {
'info_dict': {
'id': '3814_1_5',
'ext': 'flv',
'title' : '最后的魔法大战 前篇'
'title': '最后的魔法大战 前篇'
},
}, {
'info_dict': {
'id': '3814_2_1',
'ext': 'flv',
'title' : '最后的魔法大战 后篇'
'title': '最后的魔法大战 后篇'
},
}
]
@ -459,10 +425,12 @@ class BiliBiliBangumiEpisodeIE(InfoExtractor):
def _real_extract(self, url):
ep_id = self._match_id(url)
bangumi_id = self._download_json('https://api.bilibili.com/pgc/view/web/season?ep_id=%s'%(ep_id, ), ep_id, 'Downloading bangumi info')['result']['media_id']
bangumi_id = self._download_json('https://api.bilibili.com/pgc/view/web/season?ep_id=%s' % (ep_id, ), ep_id, 'Downloading bangumi info')['result']['media_id']
return self.url_result(
'https://www.bilibili.com/bangumi/media/md%s' % bangumi_id,
ie=BiliBiliBangumiIE.ie_key(), video_id=ep_id)
class BilibiliAudioBaseIE(InfoExtractor):
def _call_api(self, path, sid, query=None):
if not query: