1
0
mirror of https://codeberg.org/polarisfm/youtube-dl synced 2024-11-26 18:34:32 +01:00

Revert "[3sat] new extractor based on zdf extractor"

This reverts commit 3108be67eb.
This commit is contained in:
Matthias Roos 2019-07-19 20:47:26 +02:00
parent 3108be67eb
commit a23c8fa1a7

View File

@ -1,234 +1,193 @@
# coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ( from ..utils import (
determine_ext,
int_or_none, int_or_none,
NO_DEFAULT, unified_strdate,
orderedSet, xpath_text,
parse_codecs, determine_ext,
qualities, float_or_none,
try_get, ExtractorError,
unified_timestamp,
update_url_query,
url_or_none,
urljoin,
) )
class DreiSatBaseIE(InfoExtractor): class DreiSatIE(InfoExtractor):
def _call_api(self, url, player, referrer, video_id, item): IE_NAME = '3sat'
return self._download_json( _GEO_COUNTRIES = ['DE']
url, video_id, 'Downloading JSON %s' % item, _VALID_URL = r'https?://(?:www\.)?3sat\.de/mediathek/(?:(?:index|mediathek)\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)'
headers={ _TESTS = [
'Referer': referrer, {
'Api-Auth': 'Bearer %s' % player['apiToken'], 'url': 'http://www.3sat.de/mediathek/index.php?mode=play&obj=45918',
}) 'md5': 'be37228896d30a88f315b638900a026e',
def _extract_player(self, webpage, video_id, fatal=True):
return self._parse_json(
self._search_regex(
r'(?s)data-zdfplayer-jsb=(["\'])(?P<json>{.+?})\1', webpage,
'player JSON', default='{}' if not fatal else NO_DEFAULT,
group='json'),
video_id)
class DreiSatIE(DreiSatBaseIE):
_VALID_URL = r'https?://www\.3sat\.de/(?:[^/]+/)*(?P<id>[^/?]+)\.html'
_QUALITIES = ('auto', 'low', 'med', 'high', 'veryhigh')
_TESTS = [{
'url': 'https://www.3sat.de/dokumentation/natur/dolomiten-sagenhaftes-juwel-der-alpen-100.html',
'info_dict': { 'info_dict': {
'id': 'dolomiten-sagenhaftes-juwel-der-alpen-100', 'id': '45918',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Dolomiten - Sagenhaftes Juwel der Alpen', 'title': 'Waidmannsheil',
'description': 'md5:a4fa13cae91b8044353c1d56f3a8fc77', 'description': 'md5:cce00ca1d70e21425e72c86a98a56817',
'duration': 2618, 'uploader': 'SCHWEIZWEIT',
'timestamp': 1561397400, 'uploader_id': '100000210',
'upload_date': '20190624', 'upload_date': '20140913'
}, },
}, { 'params': {
'url': 'https://www.3sat.de/kultur/kulturdoku/der-gugelhupf-koenig-der-kuchen-100.html', 'skip_download': True, # m3u8 downloads
}
},
{
'url': 'http://www.3sat.de/mediathek/mediathek.php?mode=play&obj=51066',
'only_matching': True, 'only_matching': True,
}, { },
'url': 'https://www.3sat.de/dokumentation/natur/karnische-alpen-100.html', ]
'only_matching': True,
}]
@staticmethod def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
def _extract_subtitles(src): param_groups = {}
subtitles = {} for param_group in smil.findall(self._xpath_ns('./head/paramGroup', namespace)):
for caption in try_get(src, lambda x: x['captions'], list) or []: group_id = param_group.get(self._xpath_ns(
subtitle_url = url_or_none(caption.get('uri')) 'id', 'http://www.w3.org/XML/1998/namespace'))
if subtitle_url: params = {}
lang = caption.get('language', 'deu') for param in param_group:
subtitles.setdefault(lang, []).append({ params[param.get('name')] = param.get('value')
'url': subtitle_url, param_groups[group_id] = params
})
return subtitles
def _extract_format(self, video_id, formats, format_urls, meta):
format_url = url_or_none(meta.get('url'))
if not format_url:
return
if format_url in format_urls:
return
format_urls.add(format_url)
mime_type = meta.get('mimeType')
ext = determine_ext(format_url)
if mime_type == 'application/x-mpegURL' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id='hls',
entry_protocol='m3u8_native', fatal=False))
elif mime_type == 'application/f4m+xml' or ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(format_url, {'hdcore': '3.7.0'}), video_id, f4m_id='hds', fatal=False))
else:
f = parse_codecs(meta.get('mimeCodec'))
format_id = ['http']
for p in (meta.get('type'), meta.get('quality')):
if p and isinstance(p, compat_str):
format_id.append(p)
f.update({
'url': format_url,
'format_id': '-'.join(format_id),
'format_note': meta.get('quality'),
'language': meta.get('language'),
'quality': qualities(self._QUALITIES)(meta.get('quality')),
'preference': -10,
})
formats.append(f)
def _extract_entry(self, url, player, content, video_id):
title = content.get('title') or content['teaserHeadline']
t = content['mainVideoContent']['http://zdf.de/rels/target']
ptmd_path = t.get('http://zdf.de/rels/streams/ptmd')
if not ptmd_path:
ptmd_path = t[
'http://zdf.de/rels/streams/ptmd-template'].replace(
'{playerId}', 'portal')
ptmd = self._call_api(
urljoin(url, ptmd_path), player, url, video_id, 'metadata')
formats = [] formats = []
track_uris = set() for video in smil.findall(self._xpath_ns('.//video', namespace)):
for p in ptmd['priorityList']: src = video.get('src')
formitaeten = p.get('formitaeten') if not src:
if not isinstance(formitaeten, list):
continue continue
for f in formitaeten: bitrate = int_or_none(self._search_regex(r'_(\d+)k', src, 'bitrate', None)) or float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
f_qualities = f.get('qualities') group_id = video.get('paramGroup')
if not isinstance(f_qualities, list): param_group = param_groups[group_id]
continue for proto in param_group['protocols'].split(','):
for quality in f_qualities: formats.append({
tracks = try_get(quality, lambda x: x['audio']['tracks'], list) 'url': '%s://%s' % (proto, param_group['host']),
if not tracks: 'app': param_group['app'],
continue 'play_path': src,
for track in tracks: 'ext': 'flv',
self._extract_format( 'format_id': '%s-%d' % (proto, bitrate),
video_id, formats, track_uris, { 'tbr': bitrate,
'url': track.get('uri'),
'type': f.get('type'),
'mimeType': f.get('mimeType'),
'quality': quality.get('quality'),
'language': track.get('language'),
}) })
self._sort_formats(formats) self._sort_formats(formats)
return formats
def extract_from_xml_url(self, video_id, xml_url):
doc = self._download_xml(
xml_url, video_id,
note='Downloading video info',
errnote='Failed to download video info')
status_code = xpath_text(doc, './status/statuscode')
if status_code and status_code != 'ok':
if status_code == 'notVisibleAnymore':
message = 'Video %s is not available' % video_id
else:
message = '%s returned error: %s' % (self.IE_NAME, status_code)
raise ExtractorError(message, expected=True)
title = xpath_text(doc, './/information/title', 'title', True)
urls = []
formats = []
for fnode in doc.findall('.//formitaeten/formitaet'):
video_url = xpath_text(fnode, 'url')
if not video_url or video_url in urls:
continue
urls.append(video_url)
is_available = 'http://www.metafilegenerator' not in video_url
geoloced = 'static_geoloced_online' in video_url
if not is_available or geoloced:
continue
format_id = fnode.attrib['basetype']
format_m = re.match(r'''(?x)
(?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
(?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
''', format_id)
ext = determine_ext(video_url, None) or format_m.group('container')
if ext == 'meta':
continue
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
video_url, video_id, fatal=False))
elif ext == 'm3u8':
# the certificates are misconfigured (see
# https://github.com/ytdl-org/youtube-dl/issues/8665)
if video_url.startswith('https://'):
continue
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=format_id, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id=format_id, fatal=False))
else:
quality = xpath_text(fnode, './quality')
if quality:
format_id += '-' + quality
abr = int_or_none(xpath_text(fnode, './audioBitrate'), 1000)
vbr = int_or_none(xpath_text(fnode, './videoBitrate'), 1000)
tbr = int_or_none(self._search_regex(
r'_(\d+)k', video_url, 'bitrate', None))
if tbr and vbr and not abr:
abr = tbr - vbr
formats.append({
'format_id': format_id,
'url': video_url,
'ext': ext,
'acodec': format_m.group('acodec'),
'vcodec': format_m.group('vcodec'),
'abr': abr,
'vbr': vbr,
'tbr': tbr,
'width': int_or_none(xpath_text(fnode, './width')),
'height': int_or_none(xpath_text(fnode, './height')),
'filesize': int_or_none(xpath_text(fnode, './filesize')),
'protocol': format_m.group('proto').lower(),
})
geolocation = xpath_text(doc, './/details/geolocation')
if not formats and geolocation and geolocation != 'none':
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
self._sort_formats(formats)
thumbnails = [] thumbnails = []
layouts = try_get( for node in doc.findall('.//teaserimages/teaserimage'):
content, lambda x: x['teaserImageRef']['layouts'], dict) thumbnail_url = node.text
if layouts: if not thumbnail_url:
for layout_key, layout_url in layouts.items():
layout_url = url_or_none(layout_url)
if not layout_url:
continue continue
thumbnail = { thumbnail = {
'url': layout_url, 'url': thumbnail_url,
'format_id': layout_key,
} }
mobj = re.search(r'(?P<width>\d+)x(?P<height>\d+)', layout_key) thumbnail_key = node.get('key')
if mobj: if thumbnail_key:
thumbnail.update({ m = re.match('^([0-9]+)x([0-9]+)$', thumbnail_key)
'width': int(mobj.group('width')), if m:
'height': int(mobj.group('height')), thumbnail['width'] = int(m.group(1))
}) thumbnail['height'] = int(m.group(2))
thumbnails.append(thumbnail) thumbnails.append(thumbnail)
return { upload_date = unified_strdate(xpath_text(doc, './/details/airtime'))
'id': video_id,
'title': title,
'description': content.get('leadParagraph') or content.get('teasertext'),
'duration': int_or_none(t.get('duration')),
'timestamp': unified_timestamp(content.get('editorialDate')),
'thumbnails': thumbnails,
'subtitles': self._extract_subtitles(ptmd),
'formats': formats,
}
def _extract_regular(self, url, player, video_id):
content = self._call_api(
player['content'], player, url, video_id, 'content')
return self._extract_entry(player['content'], player, content, video_id)
def _extract_mobile(self, video_id):
document = self._download_json(
'https://zdf-cdn.live.cellular.de/mediathekV2/document/%s' % video_id,
video_id)['document']
title = document['titel']
formats = []
format_urls = set()
for f in document['formitaeten']:
self._extract_format(video_id, formats, format_urls, f)
self._sort_formats(formats)
thumbnails = []
teaser_bild = document.get('teaserBild')
if isinstance(teaser_bild, dict):
for thumbnail_key, thumbnail in teaser_bild.items():
thumbnail_url = try_get(
thumbnail, lambda x: x['url'], compat_str)
if thumbnail_url:
thumbnails.append({
'url': thumbnail_url,
'id': thumbnail_key,
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
return { return {
'id': video_id, 'id': video_id,
'title': title, 'title': title,
'description': document.get('beschreibung'), 'description': xpath_text(doc, './/information/detail'),
'duration': int_or_none(document.get('length')), 'duration': int_or_none(xpath_text(doc, './/details/lengthSec')),
'timestamp': unified_timestamp(try_get(
document, lambda x: x['meta']['editorialDate'], compat_str)),
'thumbnails': thumbnails, 'thumbnails': thumbnails,
'subtitles': self._extract_subtitles(document), 'uploader': xpath_text(doc, './/details/originChannelTitle'),
'uploader_id': xpath_text(doc, './/details/originChannelId'),
'upload_date': upload_date,
'formats': formats, 'formats': formats,
} }
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?id=%s' % video_id
webpage = self._download_webpage(url, video_id, fatal=False) return self.extract_from_xml_url(video_id, details_url)
if webpage:
player = self._extract_player(webpage, url, fatal=False)
if player:
return self._extract_regular(url, player, video_id)
return self._extract_mobile(video_id)