mirror of
https://codeberg.org/polarisfm/youtube-dl
synced 2024-11-29 11:44:33 +01:00
Merge branch 'master' into fubo
This commit is contained in:
commit
dbacb6f30c
6
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
6
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
@ -18,7 +18,7 @@ title: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
|
||||
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||
@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar issues including closed ones
|
||||
@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] youtube-dl version 2020.05.08
|
||||
[debug] youtube-dl version 2020.07.28
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
@ -19,7 +19,7 @@ labels: 'site-support-request'
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
||||
- Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||
@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a new site support request
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that none of provided URLs violate any copyrights
|
||||
- [ ] I've searched the bugtracker for similar site support requests including closed ones
|
||||
|
@ -18,13 +18,13 @@ title: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes (like this [x])
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a site feature request
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
|
||||
- [ ] I've searched the bugtracker for similar site feature requests including closed ones
|
||||
|
||||
|
||||
|
6
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
6
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
@ -18,7 +18,7 @@ title: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
|
||||
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||
@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support issue
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar bug reports including closed ones
|
||||
@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] youtube-dl version 2020.05.08
|
||||
[debug] youtube-dl version 2020.07.28
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
4
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
4
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
@ -19,13 +19,13 @@ labels: 'request'
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes (like this [x])
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a feature request
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
|
||||
- [ ] I've searched the bugtracker for similar feature requests including closed ones
|
||||
|
||||
|
||||
|
@ -153,7 +153,7 @@ After you have ensured this site is distributing its content legally, you can fo
|
||||
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
|
||||
8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](http://flake8.pycqa.org/en/latest/index.html#quickstart):
|
||||
8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
||||
|
||||
$ flake8 youtube_dl/extractor/yourextractor.py
|
||||
|
||||
|
70
ChangeLog
70
ChangeLog
@ -1,3 +1,73 @@
|
||||
version 2020.07.28
|
||||
|
||||
Extractors
|
||||
* [youtube] Fix sigfunc name extraction (#26134, #26135, #26136, #26137)
|
||||
* [youtube] Improve description extraction (#25937, #25980)
|
||||
* [wistia] Restrict embed regular expression (#25969)
|
||||
* [youtube] Prevent excess HTTP 301 (#25786)
|
||||
+ [youtube:playlists] Extend URL regular expression (#25810)
|
||||
+ [bellmedia] Add support for cp24.com clip URLs (#25764)
|
||||
* [brightcove] Improve embed detection (#25674)
|
||||
|
||||
|
||||
version 2020.06.16.1
|
||||
|
||||
Extractors
|
||||
* [youtube] Force old layout (#25682, #25683, #25680, #25686)
|
||||
* [youtube] Fix categories and improve tags extraction
|
||||
|
||||
|
||||
version 2020.06.16
|
||||
|
||||
Extractors
|
||||
* [youtube] Fix uploader id and uploader URL extraction
|
||||
* [youtube] Improve view count extraction
|
||||
* [youtube] Fix upload date extraction (#25677)
|
||||
* [youtube] Fix thumbnails extraction (#25676)
|
||||
* [youtube] Fix playlist and feed extraction (#25675)
|
||||
+ [facebook] Add support for single-video ID links
|
||||
+ [youtube] Extract chapters from JSON (#24819)
|
||||
+ [kaltura] Add support for multiple embeds on a webpage (#25523)
|
||||
|
||||
|
||||
version 2020.06.06
|
||||
|
||||
Extractors
|
||||
* [tele5] Bypass geo restriction
|
||||
+ [jwplatform] Add support for bypass geo restriction
|
||||
* [tele5] Prefer jwplatform over nexx (#25533)
|
||||
* [twitch:stream] Expect 400 and 410 HTTP errors from API
|
||||
* [twitch:stream] Fix extraction (#25528)
|
||||
* [twitch] Fix thumbnails extraction (#25531)
|
||||
+ [twitch] Pass v5 Accept HTTP header (#25531)
|
||||
* [brightcove] Fix subtitles extraction (#25540)
|
||||
+ [malltv] Add support for sk.mall.tv (#25445)
|
||||
* [periscope] Fix untitled broadcasts (#25482)
|
||||
* [jwplatform] Improve embeds extraction (#25467)
|
||||
|
||||
|
||||
version 2020.05.29
|
||||
|
||||
Core
|
||||
* [postprocessor/ffmpeg] Embed series metadata with --add-metadata
|
||||
* [utils] Fix file permissions in write_json_file (#12471, #25122)
|
||||
|
||||
Extractors
|
||||
* [ard:beta] Extend URL regular expression (#25405)
|
||||
+ [youtube] Add support for more invidious instances (#25417)
|
||||
* [giantbomb] Extend URL regular expression (#25222)
|
||||
* [ard] Improve URL regular expression (#25134, #25198)
|
||||
* [redtube] Improve formats extraction and extract m3u8 formats (#25311,
|
||||
#25321)
|
||||
* [indavideo] Switch to HTTPS for API request (#25191)
|
||||
* [redtube] Improve title extraction (#25208)
|
||||
* [vimeo] Improve format extraction and sorting (#25285)
|
||||
* [soundcloud] Reduce API playlist page limit (#25274)
|
||||
+ [youtube] Add support for yewtu.be (#25226)
|
||||
* [mailru] Fix extraction (#24530, #25239)
|
||||
* [bellator] Fix mgid extraction (#25195)
|
||||
|
||||
|
||||
version 2020.05.08
|
||||
|
||||
Core
|
||||
|
@ -267,7 +267,7 @@ class TestYoutubeChapters(unittest.TestCase):
|
||||
for description, duration, expected_chapters in self._TEST_CASES:
|
||||
ie = YoutubeIE()
|
||||
expect_value(
|
||||
self, ie._extract_chapters(description, duration),
|
||||
self, ie._extract_chapters_from_description(description, duration),
|
||||
expected_chapters, None)
|
||||
|
||||
|
||||
|
@ -249,7 +249,7 @@ class ARDMediathekIE(ARDMediathekBaseIE):
|
||||
|
||||
|
||||
class ARDIE(InfoExtractor):
|
||||
_VALID_URL = r'(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
|
||||
_VALID_URL = r'(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos(?:extern)?/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
|
||||
_TESTS = [{
|
||||
# available till 14.02.2019
|
||||
'url': 'http://www.daserste.de/information/talk/maischberger/videos/das-groko-drama-zerlegen-sich-die-volksparteien-video-102.html',
|
||||
@ -263,6 +263,9 @@ class ARDIE(InfoExtractor):
|
||||
'upload_date': '20180214',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.daserste.de/information/reportage-dokumentation/erlebnis-erde/videosextern/woelfe-und-herdenschutzhunde-ungleiche-brueder-102.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
|
||||
'only_matching': True,
|
||||
@ -310,9 +313,9 @@ class ARDIE(InfoExtractor):
|
||||
|
||||
|
||||
class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
_VALID_URL = r'https://(?:beta|www)\.ardmediathek\.de/(?P<client>[^/]+)/(?:player|live)/(?P<video_id>[a-zA-Z0-9]+)(?:/(?P<display_id>[^/?#]+))?'
|
||||
_VALID_URL = r'https://(?:(?:beta|www)\.)?ardmediathek\.de/(?P<client>[^/]+)/(?:player|live|video)/(?P<display_id>(?:[^/]+/)*)(?P<video_id>[a-zA-Z0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://beta.ardmediathek.de/ard/player/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE/die-robuste-roswita',
|
||||
'url': 'https://ardmediathek.de/ard/video/die-robuste-roswita/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE',
|
||||
'md5': 'dfdc87d2e7e09d073d5a80770a9ce88f',
|
||||
'info_dict': {
|
||||
'display_id': 'die-robuste-roswita',
|
||||
@ -325,6 +328,15 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
'upload_date': '20191222',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://beta.ardmediathek.de/ard/video/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://ardmediathek.de/ard/video/saartalk/saartalk-gesellschaftsgift-haltung-gegen-hass/sr-fernsehen/Y3JpZDovL3NyLW9ubGluZS5kZS9TVF84MTY4MA/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.ardmediathek.de/ard/video/trailer/private-eyes-s01-e01/one/Y3JpZDovL3dkci5kZS9CZWl0cmFnLTE1MTgwYzczLWNiMTEtNGNkMS1iMjUyLTg5MGYzOWQxZmQ1YQ/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.ardmediathek.de/ard/player/Y3JpZDovL3N3ci5kZS9hZXgvbzEwNzE5MTU/',
|
||||
'only_matching': True,
|
||||
@ -336,7 +348,11 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('video_id')
|
||||
display_id = mobj.group('display_id') or video_id
|
||||
display_id = mobj.group('display_id')
|
||||
if display_id:
|
||||
display_id = display_id.rstrip('/')
|
||||
if not display_id:
|
||||
display_id = video_id
|
||||
|
||||
player_page = self._download_json(
|
||||
'https://api.ardmediathek.de/public-gateway',
|
||||
|
@ -25,8 +25,8 @@ class BellMediaIE(InfoExtractor):
|
||||
etalk|
|
||||
marilyn
|
||||
)\.ca|
|
||||
much\.com
|
||||
)/.*?(?:\bvid(?:eoid)?=|-vid|~|%7E|/(?:episode)?)(?P<id>[0-9]{6,})'''
|
||||
(?:much|cp24)\.com
|
||||
)/.*?(?:\b(?:vid(?:eoid)?|clipId)=|-vid|~|%7E|/(?:episode)?)(?P<id>[0-9]{6,})'''
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bnnbloomberg.ca/video/david-cockfield-s-top-picks~1403070',
|
||||
'md5': '36d3ef559cfe8af8efe15922cd3ce950',
|
||||
@ -62,6 +62,9 @@ class BellMediaIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'http://www.etalk.ca/video?videoid=663455',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.cp24.com/video?clipId=1982548',
|
||||
'only_matching': True,
|
||||
}]
|
||||
_DOMAINS = {
|
||||
'thecomedynetwork': 'comedy',
|
||||
|
@ -3,10 +3,11 @@ from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .vk import VKIE
|
||||
from ..utils import (
|
||||
HEADRequest,
|
||||
int_or_none,
|
||||
from ..compat import (
|
||||
compat_b64decode,
|
||||
compat_urllib_parse_unquote,
|
||||
)
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
class BIQLEIE(InfoExtractor):
|
||||
@ -47,9 +48,16 @@ class BIQLEIE(InfoExtractor):
|
||||
if VKIE.suitable(embed_url):
|
||||
return self.url_result(embed_url, VKIE.ie_key(), video_id)
|
||||
|
||||
self._request_webpage(
|
||||
HEADRequest(embed_url), video_id, headers={'Referer': url})
|
||||
video_id, sig, _, access_token = self._get_cookies(embed_url)['video_ext'].value.split('%3A')
|
||||
embed_page = self._download_webpage(
|
||||
embed_url, video_id, headers={'Referer': url})
|
||||
video_ext = self._get_cookies(embed_url).get('video_ext')
|
||||
if video_ext:
|
||||
video_ext = compat_urllib_parse_unquote(video_ext.value)
|
||||
if not video_ext:
|
||||
video_ext = compat_b64decode(self._search_regex(
|
||||
r'video_ext\s*:\s*[\'"]([A-Za-z0-9+/=]+)',
|
||||
embed_page, 'video_ext')).decode()
|
||||
video_id, sig, _, access_token = video_ext.split(':')
|
||||
item = self._download_json(
|
||||
'https://api.vk.com/method/video.get', video_id,
|
||||
headers={'User-Agent': 'okhttp/3.4.1'}, query={
|
||||
|
@ -5,32 +5,34 @@ import base64
|
||||
import re
|
||||
import struct
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .adobepass import AdobePassIE
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_HTTPError,
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urlparse,
|
||||
compat_xml_parse_error,
|
||||
compat_HTTPError,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
find_xpath_attr,
|
||||
fix_xml_ampersands,
|
||||
float_or_none,
|
||||
js_to_json,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
mimetype2ext,
|
||||
parse_iso8601,
|
||||
smuggle_url,
|
||||
str_or_none,
|
||||
unescapeHTML,
|
||||
unsmuggle_url,
|
||||
update_url_query,
|
||||
clean_html,
|
||||
mimetype2ext,
|
||||
UnsupportedError,
|
||||
update_url_query,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
@ -424,7 +426,7 @@ class BrightcoveNewIE(AdobePassIE):
|
||||
# [2] looks like:
|
||||
for video, script_tag, account_id, player_id, embed in re.findall(
|
||||
r'''(?isx)
|
||||
(<video\s+[^>]*\bdata-video-id\s*=\s*['"]?[^>]+>)
|
||||
(<video(?:-js)?\s+[^>]*\bdata-video-id\s*=\s*['"]?[^>]+>)
|
||||
(?:.*?
|
||||
(<script[^>]+
|
||||
src=["\'](?:https?:)?//players\.brightcove\.net/
|
||||
@ -553,9 +555,15 @@ class BrightcoveNewIE(AdobePassIE):
|
||||
|
||||
subtitles = {}
|
||||
for text_track in json_data.get('text_tracks', []):
|
||||
if text_track.get('src'):
|
||||
subtitles.setdefault(text_track.get('srclang'), []).append({
|
||||
'url': text_track['src'],
|
||||
if text_track.get('kind') != 'captions':
|
||||
continue
|
||||
text_track_url = url_or_none(text_track.get('src'))
|
||||
if not text_track_url:
|
||||
continue
|
||||
lang = (str_or_none(text_track.get('srclang'))
|
||||
or str_or_none(text_track.get('label')) or 'en').lower()
|
||||
subtitles.setdefault(lang, []).append({
|
||||
'url': text_track_url,
|
||||
})
|
||||
|
||||
is_live = False
|
||||
|
@ -466,15 +466,18 @@ class FacebookIE(InfoExtractor):
|
||||
return info_dict
|
||||
|
||||
if '/posts/' in url:
|
||||
video_id_json = self._search_regex(
|
||||
r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])', webpage, 'video ids', group='ids',
|
||||
default='')
|
||||
if video_id_json:
|
||||
entries = [
|
||||
self.url_result('facebook:%s' % vid, FacebookIE.ie_key())
|
||||
for vid in self._parse_json(
|
||||
self._search_regex(
|
||||
r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])',
|
||||
webpage, 'video ids', group='ids'),
|
||||
video_id)]
|
||||
|
||||
for vid in self._parse_json(video_id_json, video_id)]
|
||||
return self.playlist_result(entries, video_id)
|
||||
|
||||
# Single Video?
|
||||
video_id = self._search_regex(r'video_id:\s*"([0-9]+)"', webpage, 'single video id')
|
||||
return self.url_result('facebook:%s' % video_id, FacebookIE.ie_key())
|
||||
else:
|
||||
_, info_dict = self._extract_from_url(
|
||||
self._VIDEO_PAGE_TEMPLATE % video_id,
|
||||
|
@ -1708,6 +1708,15 @@ class GenericIE(InfoExtractor):
|
||||
},
|
||||
'add_ie': ['Kaltura'],
|
||||
},
|
||||
{
|
||||
# multiple kaltura embeds, nsfw
|
||||
'url': 'https://www.quartier-rouge.be/prive/femmes/kamila-avec-video-jaime-sadomie.html',
|
||||
'info_dict': {
|
||||
'id': 'kamila-avec-video-jaime-sadomie',
|
||||
'title': "Kamila avec vídeo “J'aime sadomie”",
|
||||
},
|
||||
'playlist_count': 8,
|
||||
},
|
||||
{
|
||||
# Non-standard Vimeo embed
|
||||
'url': 'https://openclassrooms.com/courses/understanding-the-web',
|
||||
@ -2844,9 +2853,12 @@ class GenericIE(InfoExtractor):
|
||||
return self.url_result(mobj.group('url'), 'Zapiks')
|
||||
|
||||
# Look for Kaltura embeds
|
||||
kaltura_url = KalturaIE._extract_url(webpage)
|
||||
if kaltura_url:
|
||||
return self.url_result(smuggle_url(kaltura_url, {'source_url': url}), KalturaIE.ie_key())
|
||||
kaltura_urls = KalturaIE._extract_urls(webpage)
|
||||
if kaltura_urls:
|
||||
return self.playlist_from_matches(
|
||||
kaltura_urls, video_id, video_title,
|
||||
getter=lambda x: smuggle_url(x, {'source_url': url}),
|
||||
ie=KalturaIE.ie_key())
|
||||
|
||||
# Look for EaglePlatform embeds
|
||||
eagleplatform_url = EaglePlatformIE._extract_url(webpage)
|
||||
|
@ -13,10 +13,10 @@ from ..utils import (
|
||||
|
||||
|
||||
class GiantBombIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?giantbomb\.com/videos/(?P<display_id>[^/]+)/(?P<id>\d+-\d+)'
|
||||
_TEST = {
|
||||
_VALID_URL = r'https?://(?:www\.)?giantbomb\.com/(?:videos|shows)/(?P<display_id>[^/]+)/(?P<id>\d+-\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.giantbomb.com/videos/quick-look-destiny-the-dark-below/2300-9782/',
|
||||
'md5': 'c8ea694254a59246a42831155dec57ac',
|
||||
'md5': '132f5a803e7e0ab0e274d84bda1e77ae',
|
||||
'info_dict': {
|
||||
'id': '2300-9782',
|
||||
'display_id': 'quick-look-destiny-the-dark-below',
|
||||
@ -26,7 +26,10 @@ class GiantBombIE(InfoExtractor):
|
||||
'duration': 2399,
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
}
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.giantbomb.com/shows/ben-stranding/2970-20212',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
@ -58,7 +58,7 @@ class IndavideoEmbedIE(InfoExtractor):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
video = self._download_json(
|
||||
'http://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/%s' % video_id,
|
||||
'https://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/%s' % video_id,
|
||||
video_id)['data']
|
||||
|
||||
title = video['title']
|
||||
|
@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import unsmuggle_url
|
||||
|
||||
|
||||
class JWPlatformIE(InfoExtractor):
|
||||
@ -32,10 +33,14 @@ class JWPlatformIE(InfoExtractor):
|
||||
@staticmethod
|
||||
def _extract_urls(webpage):
|
||||
return re.findall(
|
||||
r'<(?:script|iframe)[^>]+?src=["\']((?:https?:)?//content\.jwplatform\.com/players/[a-zA-Z0-9]{8})',
|
||||
r'<(?:script|iframe)[^>]+?src=["\']((?:https?:)?//(?:content\.jwplatform|cdn\.jwplayer)\.com/players/[a-zA-Z0-9]{8})',
|
||||
webpage)
|
||||
|
||||
def _real_extract(self, url):
|
||||
url, smuggled_data = unsmuggle_url(url, {})
|
||||
self._initialize_geo_bypass({
|
||||
'countries': smuggled_data.get('geo_countries'),
|
||||
})
|
||||
video_id = self._match_id(url)
|
||||
json_data = self._download_json('https://cdn.jwplayer.com/v2/media/' + video_id, video_id)
|
||||
return self._parse_jwplayer_data(json_data, video_id)
|
||||
|
@ -113,9 +113,14 @@ class KalturaIE(InfoExtractor):
|
||||
|
||||
@staticmethod
|
||||
def _extract_url(webpage):
|
||||
urls = KalturaIE._extract_urls(webpage)
|
||||
return urls[0] if urls else None
|
||||
|
||||
@staticmethod
|
||||
def _extract_urls(webpage):
|
||||
# Embed codes: https://knowledge.kaltura.com/embedding-kaltura-media-players-your-site
|
||||
mobj = (
|
||||
re.search(
|
||||
finditer = (
|
||||
re.finditer(
|
||||
r"""(?xs)
|
||||
kWidget\.(?:thumb)?[Ee]mbed\(
|
||||
\{.*?
|
||||
@ -124,7 +129,7 @@ class KalturaIE(InfoExtractor):
|
||||
(?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s*
|
||||
(?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\})
|
||||
""", webpage)
|
||||
or re.search(
|
||||
or re.finditer(
|
||||
r'''(?xs)
|
||||
(?P<q1>["'])
|
||||
(?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)*
|
||||
@ -138,7 +143,7 @@ class KalturaIE(InfoExtractor):
|
||||
)
|
||||
(?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3)
|
||||
''', webpage)
|
||||
or re.search(
|
||||
or re.finditer(
|
||||
r'''(?xs)
|
||||
<(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["'])
|
||||
(?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)
|
||||
@ -148,7 +153,8 @@ class KalturaIE(InfoExtractor):
|
||||
(?P=q1)
|
||||
''', webpage)
|
||||
)
|
||||
if mobj:
|
||||
urls = []
|
||||
for mobj in finditer:
|
||||
embed_info = mobj.groupdict()
|
||||
for k, v in embed_info.items():
|
||||
if v:
|
||||
@ -160,7 +166,8 @@ class KalturaIE(InfoExtractor):
|
||||
webpage)
|
||||
if service_mobj:
|
||||
url = smuggle_url(url, {'service_url': service_mobj.group('id')})
|
||||
return url
|
||||
urls.append(url)
|
||||
return urls
|
||||
|
||||
def _kaltura_api_call(self, video_id, actions, service_url=None, *args, **kwargs):
|
||||
params = actions[0]
|
||||
|
@ -8,7 +8,7 @@ from ..utils import merge_dicts
|
||||
|
||||
|
||||
class MallTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?mall\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||
_VALID_URL = r'https?://(?:(?:www|sk)\.)?mall\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.mall.tv/18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice',
|
||||
'md5': '1c4a37f080e1f3023103a7b43458e518',
|
||||
@ -26,6 +26,9 @@ class MallTVIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://www.mall.tv/kdo-to-plati/18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://sk.mall.tv/gejmhaus/reklamacia-nehreje-vyrobnik-tepla-alebo-spekacka',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -18,7 +18,7 @@ class PeriscopeBaseIE(InfoExtractor):
|
||||
item_id, query=query)
|
||||
|
||||
def _parse_broadcast_data(self, broadcast, video_id):
|
||||
title = broadcast['status']
|
||||
title = broadcast.get('status') or 'Periscope Broadcast'
|
||||
uploader = broadcast.get('user_display_name') or broadcast.get('username')
|
||||
title = '%s - %s' % (uploader, title) if uploader else title
|
||||
is_live = broadcast.get('state').lower() == 'running'
|
||||
|
@ -4,6 +4,7 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
merge_dicts,
|
||||
@ -57,7 +58,7 @@ class RedTubeIE(InfoExtractor):
|
||||
|
||||
if not info.get('title'):
|
||||
info['title'] = self._html_search_regex(
|
||||
(r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
|
||||
(r'<h(\d)[^>]+class="(?:video_title_text|videoTitle|video_title)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
|
||||
r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
|
||||
webpage, 'title', group='title',
|
||||
default=None) or self._og_search_title(webpage)
|
||||
@ -77,7 +78,7 @@ class RedTubeIE(InfoExtractor):
|
||||
})
|
||||
medias = self._parse_json(
|
||||
self._search_regex(
|
||||
r'mediaDefinition\s*:\s*(\[.+?\])', webpage,
|
||||
r'mediaDefinition["\']?\s*:\s*(\[.+?}\s*\])', webpage,
|
||||
'media definitions', default='{}'),
|
||||
video_id, fatal=False)
|
||||
if medias and isinstance(medias, list):
|
||||
@ -85,6 +86,12 @@ class RedTubeIE(InfoExtractor):
|
||||
format_url = url_or_none(media.get('videoUrl'))
|
||||
if not format_url:
|
||||
continue
|
||||
if media.get('format') == 'hls' or determine_ext(format_url) == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
format_url, video_id, 'mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id='hls',
|
||||
fatal=False))
|
||||
continue
|
||||
format_id = media.get('quality')
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
|
@ -6,18 +6,16 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from .jwplatform import JWPlatformIE
|
||||
from .nexx import NexxIE
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
NO_DEFAULT,
|
||||
try_get,
|
||||
smuggle_url,
|
||||
)
|
||||
|
||||
|
||||
class Tele5IE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?tele5\.de/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||
_GEO_COUNTRIES = ['DE']
|
||||
_TESTS = [{
|
||||
'url': 'https://www.tele5.de/mediathek/filme-online/videos?vid=1549416',
|
||||
'info_dict': {
|
||||
@ -30,6 +28,21 @@ class Tele5IE(InfoExtractor):
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
# jwplatform, nexx unavailable
|
||||
'url': 'https://www.tele5.de/filme/ghoul-das-geheimnis-des-friedhofmonsters/',
|
||||
'info_dict': {
|
||||
'id': 'WJuiOlUp',
|
||||
'ext': 'mp4',
|
||||
'upload_date': '20200603',
|
||||
'timestamp': 1591214400,
|
||||
'title': 'Ghoul - Das Geheimnis des Friedhofmonsters',
|
||||
'description': 'md5:42002af1d887ff3d5b2b3ca1f8137d97',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'add_ie': [JWPlatformIE.ie_key()],
|
||||
}, {
|
||||
'url': 'https://www.tele5.de/kalkofes-mattscheibe/video-clips/politik-und-gesellschaft?ve_id=1551191',
|
||||
'only_matching': True,
|
||||
@ -88,15 +101,8 @@ class Tele5IE(InfoExtractor):
|
||||
if not jwplatform_id:
|
||||
jwplatform_id = extract_id(JWPLATFORM_ID_RE, 'jwplatform id')
|
||||
|
||||
media = self._download_json(
|
||||
'https://cdn.jwplayer.com/v2/media/' + jwplatform_id,
|
||||
display_id)
|
||||
nexx_id = try_get(
|
||||
media, lambda x: x['playlist'][0]['nexx_id'], compat_str)
|
||||
|
||||
if nexx_id:
|
||||
return nexx_result(nexx_id)
|
||||
|
||||
return self.url_result(
|
||||
'jwplatform:%s' % jwplatform_id, ie=JWPlatformIE.ie_key(),
|
||||
video_id=jwplatform_id)
|
||||
smuggle_url(
|
||||
'jwplatform:%s' % jwplatform_id,
|
||||
{'geo_countries': self._GEO_COUNTRIES}),
|
||||
ie=JWPlatformIE.ie_key(), video_id=jwplatform_id)
|
||||
|
@ -21,6 +21,8 @@ from ..utils import (
|
||||
orderedSet,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
qualities,
|
||||
str_or_none,
|
||||
try_get,
|
||||
unified_timestamp,
|
||||
update_url_query,
|
||||
@ -50,8 +52,14 @@ class TwitchBaseIE(InfoExtractor):
|
||||
|
||||
def _call_api(self, path, item_id, *args, **kwargs):
|
||||
headers = kwargs.get('headers', {}).copy()
|
||||
headers['Client-ID'] = self._CLIENT_ID
|
||||
kwargs['headers'] = headers
|
||||
headers.update({
|
||||
'Accept': 'application/vnd.twitchtv.v5+json; charset=UTF-8',
|
||||
'Client-ID': self._CLIENT_ID,
|
||||
})
|
||||
kwargs.update({
|
||||
'headers': headers,
|
||||
'expected_status': (400, 410),
|
||||
})
|
||||
response = self._download_json(
|
||||
'%s/%s' % (self._API_BASE, path), item_id,
|
||||
*args, **compat_kwargs(kwargs))
|
||||
@ -186,12 +194,27 @@ class TwitchItemBaseIE(TwitchBaseIE):
|
||||
is_live = False
|
||||
else:
|
||||
is_live = None
|
||||
_QUALITIES = ('small', 'medium', 'large')
|
||||
quality_key = qualities(_QUALITIES)
|
||||
thumbnails = []
|
||||
preview = info.get('preview')
|
||||
if isinstance(preview, dict):
|
||||
for thumbnail_id, thumbnail_url in preview.items():
|
||||
thumbnail_url = url_or_none(thumbnail_url)
|
||||
if not thumbnail_url:
|
||||
continue
|
||||
if thumbnail_id not in _QUALITIES:
|
||||
continue
|
||||
thumbnails.append({
|
||||
'url': thumbnail_url,
|
||||
'preference': quality_key(thumbnail_id),
|
||||
})
|
||||
return {
|
||||
'id': info['_id'],
|
||||
'title': info.get('title') or 'Untitled Broadcast',
|
||||
'description': info.get('description'),
|
||||
'duration': int_or_none(info.get('length')),
|
||||
'thumbnail': info.get('preview'),
|
||||
'thumbnails': thumbnails,
|
||||
'uploader': info.get('channel', {}).get('display_name'),
|
||||
'uploader_id': info.get('channel', {}).get('name'),
|
||||
'timestamp': parse_iso8601(info.get('recorded_at')),
|
||||
@ -572,10 +595,18 @@ class TwitchStreamIE(TwitchBaseIE):
|
||||
else super(TwitchStreamIE, cls).suitable(url))
|
||||
|
||||
def _real_extract(self, url):
|
||||
channel_id = self._match_id(url)
|
||||
channel_name = self._match_id(url)
|
||||
|
||||
access_token = self._call_api(
|
||||
'api/channels/%s/access_token' % channel_name, channel_name,
|
||||
'Downloading access token JSON')
|
||||
|
||||
token = access_token['token']
|
||||
channel_id = compat_str(self._parse_json(
|
||||
token, channel_name)['channel_id'])
|
||||
|
||||
stream = self._call_api(
|
||||
'kraken/streams/%s?stream_type=all' % channel_id.lower(),
|
||||
'kraken/streams/%s?stream_type=all' % channel_id,
|
||||
channel_id, 'Downloading stream JSON').get('stream')
|
||||
|
||||
if not stream:
|
||||
@ -585,11 +616,9 @@ class TwitchStreamIE(TwitchBaseIE):
|
||||
# (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
|
||||
# an invalid m3u8 URL. Working around by use of original channel name from stream
|
||||
# JSON and fallback to lowercase if it's not available.
|
||||
channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
|
||||
|
||||
access_token = self._call_api(
|
||||
'api/channels/%s/access_token' % channel_id, channel_id,
|
||||
'Downloading channel access token')
|
||||
channel_name = try_get(
|
||||
stream, lambda x: x['channel']['name'],
|
||||
compat_str) or channel_name.lower()
|
||||
|
||||
query = {
|
||||
'allow_source': 'true',
|
||||
@ -600,11 +629,11 @@ class TwitchStreamIE(TwitchBaseIE):
|
||||
'playlist_include_framerate': 'true',
|
||||
'segment_preference': '4',
|
||||
'sig': access_token['sig'].encode('utf-8'),
|
||||
'token': access_token['token'].encode('utf-8'),
|
||||
'token': token.encode('utf-8'),
|
||||
}
|
||||
formats = self._extract_m3u8_formats(
|
||||
'%s/api/channel/hls/%s.m3u8?%s'
|
||||
% (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
|
||||
% (self._USHER_BASE, channel_name, compat_urllib_parse_urlencode(query)),
|
||||
channel_id, 'mp4')
|
||||
self._prefer_source(formats)
|
||||
|
||||
@ -627,8 +656,8 @@ class TwitchStreamIE(TwitchBaseIE):
|
||||
})
|
||||
|
||||
return {
|
||||
'id': compat_str(stream['_id']),
|
||||
'display_id': channel_id,
|
||||
'id': str_or_none(stream.get('_id')) or channel_id,
|
||||
'display_id': channel_name,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnails': thumbnails,
|
||||
|
@ -578,6 +578,18 @@ class TwitterBroadcastIE(TwitterBaseIE, PeriscopeBaseIE):
|
||||
IE_NAME = 'twitter:broadcast'
|
||||
_VALID_URL = TwitterBaseIE._BASE_REGEX + r'i/broadcasts/(?P<id>[0-9a-zA-Z]{13})'
|
||||
|
||||
_TEST = {
|
||||
# untitled Periscope video
|
||||
'url': 'https://twitter.com/i/broadcasts/1yNGaQLWpejGj',
|
||||
'info_dict': {
|
||||
'id': '1yNGaQLWpejGj',
|
||||
'ext': 'mp4',
|
||||
'title': 'Andrea May Sahouri - Periscope Broadcast',
|
||||
'uploader': 'Andrea May Sahouri',
|
||||
'uploader_id': '1PXEdBZWpGwKe',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
broadcast_id = self._match_id(url)
|
||||
broadcast = self._call_api(
|
||||
|
@ -56,7 +56,7 @@ class WistiaIE(InfoExtractor):
|
||||
urls.append(unescapeHTML(match.group('url')))
|
||||
for match in re.finditer(
|
||||
r'''(?sx)
|
||||
<div[^>]+class=(["']).*?\bwistia_async_(?P<id>[a-z0-9]{10})\b.*?\2
|
||||
<div[^>]+class=(["'])(?:(?!\1).)*?\bwistia_async_(?P<id>[a-z0-9]{10})\b(?:(?!\1).)*?\1
|
||||
''', webpage):
|
||||
urls.append('wistia:%s' % match.group('id'))
|
||||
for match in re.finditer(r'(?:data-wistia-?id=["\']|Wistia\.embed\(["\']|id=["\']wistia_)(?P<id>[a-z0-9]{10})', webpage):
|
||||
|
@ -20,13 +20,13 @@ from ..utils import (
|
||||
|
||||
|
||||
class XHamsterIE(InfoExtractor):
|
||||
_DOMAINS = r'(?:xhamster\.(?:com|one|desi)|xhms\.pro|xhamster[27]\.com)'
|
||||
_DOMAINS = r'(?:xhamster\.(?:com|one|desi)|xhms\.pro|xhamster\d+\.com)'
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?:.+?\.)?%s/
|
||||
(?:
|
||||
movies/(?P<id>\d+)/(?P<display_id>[^/]*)\.html|
|
||||
videos/(?P<display_id_2>[^/]*)-(?P<id_2>\d+)
|
||||
movies/(?P<id>[\dA-Za-z]+)/(?P<display_id>[^/]*)\.html|
|
||||
videos/(?P<display_id_2>[^/]*)-(?P<id_2>[\dA-Za-z]+)
|
||||
)
|
||||
''' % _DOMAINS
|
||||
_TESTS = [{
|
||||
@ -99,12 +99,21 @@ class XHamsterIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://xhamster2.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://xhamster11.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://xhamster26.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://de.xhamster.com/videos/skinny-girl-fucks-herself-hard-in-the-forest-xhnBJZx',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -129,7 +138,7 @@ class XHamsterIE(InfoExtractor):
|
||||
|
||||
initials = self._parse_json(
|
||||
self._search_regex(
|
||||
r'window\.initials\s*=\s*({.+?})\s*;\s*\n', webpage, 'initials',
|
||||
r'window\.initials\s*=\s*({.+?})\s*;', webpage, 'initials',
|
||||
default='{}'),
|
||||
video_id, fatal=False)
|
||||
if initials:
|
||||
|
@ -70,9 +70,14 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
|
||||
_PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}'
|
||||
|
||||
_YOUTUBE_CLIENT_HEADERS = {
|
||||
'x-youtube-client-name': '1',
|
||||
'x-youtube-client-version': '1.20200609.04.02',
|
||||
}
|
||||
|
||||
def _set_language(self):
|
||||
self._set_cookie(
|
||||
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
|
||||
'.youtube.com', 'PREF', 'f1=50000000&f6=8&hl=en',
|
||||
# YouTube sets the expire time to about two months
|
||||
expire_time=time.time() + 2 * 30 * 24 * 3600)
|
||||
|
||||
@ -298,10 +303,11 @@ class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
|
||||
# Downloading page may result in intermittent 5xx HTTP error
|
||||
# that is usually worked around with a retry
|
||||
more = self._download_json(
|
||||
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
|
||||
'https://www.youtube.com/%s' % mobj.group('more'), playlist_id,
|
||||
'Downloading page #%s%s'
|
||||
% (page_num, ' (retry #%d)' % count if count else ''),
|
||||
transform_source=uppercase_escape)
|
||||
transform_source=uppercase_escape,
|
||||
headers=self._YOUTUBE_CLIENT_HEADERS)
|
||||
break
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
|
||||
@ -391,6 +397,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
(?:www\.)?yewtu\.be/|
|
||||
(?:www\.)?yt\.elukerio\.org/|
|
||||
(?:www\.)?yt\.lelux\.fi/|
|
||||
(?:www\.)?invidious\.ggc-project\.de/|
|
||||
(?:www\.)?yt\.maisputain\.ovh/|
|
||||
(?:www\.)?invidious\.13ad\.de/|
|
||||
(?:www\.)?invidious\.toot\.koeln/|
|
||||
(?:www\.)?invidious\.fdn\.fr/|
|
||||
(?:www\.)?watch\.nettohikari\.com/|
|
||||
(?:www\.)?kgg2m7yk5aybusll\.onion/|
|
||||
(?:www\.)?qklhadlycap4cnod\.onion/|
|
||||
(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
|
||||
@ -398,6 +410,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
|
||||
(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
|
||||
(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
|
||||
(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/|
|
||||
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
|
||||
(?:.*?\#/)? # handle anchor (#/) redirect urls
|
||||
(?: # the various things that can precede the ID:
|
||||
@ -1371,7 +1384,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
funcname = self._search_regex(
|
||||
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\b(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
|
||||
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
|
||||
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
|
||||
# Obsolete patterns
|
||||
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
@ -1645,8 +1658,63 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
video_id = mobj.group(2)
|
||||
return video_id
|
||||
|
||||
def _extract_chapters_from_json(self, webpage, video_id, duration):
|
||||
if not webpage:
|
||||
return
|
||||
player = self._parse_json(
|
||||
self._search_regex(
|
||||
r'RELATED_PLAYER_ARGS["\']\s*:\s*({.+})\s*,?\s*\n', webpage,
|
||||
'player args', default='{}'),
|
||||
video_id, fatal=False)
|
||||
if not player or not isinstance(player, dict):
|
||||
return
|
||||
watch_next_response = player.get('watch_next_response')
|
||||
if not isinstance(watch_next_response, compat_str):
|
||||
return
|
||||
response = self._parse_json(watch_next_response, video_id, fatal=False)
|
||||
if not response or not isinstance(response, dict):
|
||||
return
|
||||
chapters_list = try_get(
|
||||
response,
|
||||
lambda x: x['playerOverlays']
|
||||
['playerOverlayRenderer']
|
||||
['decoratedPlayerBarRenderer']
|
||||
['decoratedPlayerBarRenderer']
|
||||
['playerBar']
|
||||
['chapteredPlayerBarRenderer']
|
||||
['chapters'],
|
||||
list)
|
||||
if not chapters_list:
|
||||
return
|
||||
|
||||
def chapter_time(chapter):
|
||||
return float_or_none(
|
||||
try_get(
|
||||
chapter,
|
||||
lambda x: x['chapterRenderer']['timeRangeStartMillis'],
|
||||
int),
|
||||
scale=1000)
|
||||
chapters = []
|
||||
for next_num, chapter in enumerate(chapters_list, start=1):
|
||||
start_time = chapter_time(chapter)
|
||||
if start_time is None:
|
||||
continue
|
||||
end_time = (chapter_time(chapters_list[next_num])
|
||||
if next_num < len(chapters_list) else duration)
|
||||
if end_time is None:
|
||||
continue
|
||||
title = try_get(
|
||||
chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
|
||||
compat_str)
|
||||
chapters.append({
|
||||
'start_time': start_time,
|
||||
'end_time': end_time,
|
||||
'title': title,
|
||||
})
|
||||
return chapters
|
||||
|
||||
@staticmethod
|
||||
def _extract_chapters(description, duration):
|
||||
def _extract_chapters_from_description(description, duration):
|
||||
if not description:
|
||||
return None
|
||||
chapter_lines = re.findall(
|
||||
@ -1680,6 +1748,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
})
|
||||
return chapters
|
||||
|
||||
def _extract_chapters(self, webpage, description, video_id, duration):
|
||||
return (self._extract_chapters_from_json(webpage, video_id, duration)
|
||||
or self._extract_chapters_from_description(description, duration))
|
||||
|
||||
def _real_extract(self, url):
|
||||
url, smuggled_data = unsmuggle_url(url, {})
|
||||
|
||||
@ -1826,6 +1898,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
video_details = try_get(
|
||||
player_response, lambda x: x['videoDetails'], dict) or {}
|
||||
|
||||
microformat = try_get(
|
||||
player_response, lambda x: x['microformat']['playerMicroformatRenderer'], dict) or {}
|
||||
|
||||
video_title = video_info.get('title', [None])[0] or video_details.get('title')
|
||||
if not video_title:
|
||||
self._downloader.report_warning('Unable to extract video title')
|
||||
@ -1855,7 +1930,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
''', replace_url, video_description)
|
||||
video_description = clean_html(video_description)
|
||||
else:
|
||||
video_description = self._html_search_meta('description', video_webpage) or video_details.get('shortDescription')
|
||||
video_description = video_details.get('shortDescription') or self._html_search_meta('description', video_webpage)
|
||||
|
||||
if not smuggled_data.get('force_singlefeed', False):
|
||||
if not self._downloader.params.get('noplaylist'):
|
||||
@ -1903,6 +1978,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
view_count = extract_view_count(video_info)
|
||||
if view_count is None and video_details:
|
||||
view_count = int_or_none(video_details.get('viewCount'))
|
||||
if view_count is None and microformat:
|
||||
view_count = int_or_none(microformat.get('viewCount'))
|
||||
|
||||
if is_live is None:
|
||||
is_live = bool_or_none(video_details.get('isLive'))
|
||||
@ -2154,7 +2231,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
video_uploader_id = mobj.group('uploader_id')
|
||||
video_uploader_url = mobj.group('uploader_url')
|
||||
else:
|
||||
self._downloader.report_warning('unable to extract uploader nickname')
|
||||
owner_profile_url = url_or_none(microformat.get('ownerProfileUrl'))
|
||||
if owner_profile_url:
|
||||
video_uploader_id = self._search_regex(
|
||||
r'(?:user|channel)/([^/]+)', owner_profile_url, 'uploader id',
|
||||
default=None)
|
||||
video_uploader_url = owner_profile_url
|
||||
|
||||
channel_id = (
|
||||
str_or_none(video_details.get('channelId'))
|
||||
@ -2165,17 +2247,33 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
video_webpage, 'channel id', default=None, group='id'))
|
||||
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
|
||||
|
||||
# thumbnail image
|
||||
thumbnails = []
|
||||
thumbnails_list = try_get(
|
||||
video_details, lambda x: x['thumbnail']['thumbnails'], list) or []
|
||||
for t in thumbnails_list:
|
||||
if not isinstance(t, dict):
|
||||
continue
|
||||
thumbnail_url = url_or_none(t.get('url'))
|
||||
if not thumbnail_url:
|
||||
continue
|
||||
thumbnails.append({
|
||||
'url': thumbnail_url,
|
||||
'width': int_or_none(t.get('width')),
|
||||
'height': int_or_none(t.get('height')),
|
||||
})
|
||||
|
||||
if not thumbnails:
|
||||
video_thumbnail = None
|
||||
# We try first to get a high quality image:
|
||||
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
|
||||
video_webpage, re.DOTALL)
|
||||
if m_thumb is not None:
|
||||
video_thumbnail = m_thumb.group(1)
|
||||
elif 'thumbnail_url' not in video_info:
|
||||
self._downloader.report_warning('unable to extract video thumbnail')
|
||||
video_thumbnail = None
|
||||
else: # don't panic if we can't find it
|
||||
video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
|
||||
thumbnail_url = try_get(video_info, lambda x: x['thumbnail_url'][0], compat_str)
|
||||
if thumbnail_url:
|
||||
video_thumbnail = compat_urllib_parse_unquote_plus(thumbnail_url)
|
||||
if video_thumbnail:
|
||||
thumbnails.append({'url': video_thumbnail})
|
||||
|
||||
# upload date
|
||||
upload_date = self._html_search_meta(
|
||||
@ -2185,6 +2283,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
[r'(?s)id="eow-date.*?>(.*?)</span>',
|
||||
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
|
||||
video_webpage, 'upload date', default=None)
|
||||
if not upload_date:
|
||||
upload_date = microformat.get('publishDate') or microformat.get('uploadDate')
|
||||
upload_date = unified_strdate(upload_date)
|
||||
|
||||
video_license = self._html_search_regex(
|
||||
@ -2256,17 +2356,21 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
m_cat_container = self._search_regex(
|
||||
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
|
||||
video_webpage, 'categories', default=None)
|
||||
category = None
|
||||
if m_cat_container:
|
||||
category = self._html_search_regex(
|
||||
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
|
||||
default=None)
|
||||
if not category:
|
||||
category = try_get(
|
||||
microformat, lambda x: x['category'], compat_str)
|
||||
video_categories = None if category is None else [category]
|
||||
else:
|
||||
video_categories = None
|
||||
|
||||
video_tags = [
|
||||
unescapeHTML(m.group('content'))
|
||||
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
|
||||
if not video_tags:
|
||||
video_tags = try_get(video_details, lambda x: x['keywords'], list)
|
||||
|
||||
def _extract_count(count_name):
|
||||
return str_to_int(self._search_regex(
|
||||
@ -2317,7 +2421,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
errnote='Unable to download video annotations', fatal=False,
|
||||
data=urlencode_postdata({xsrf_field_name: xsrf_token}))
|
||||
|
||||
chapters = self._extract_chapters(description_original, video_duration)
|
||||
chapters = self._extract_chapters(video_webpage, description_original, video_id, video_duration)
|
||||
|
||||
# Look for the DASH manifest
|
||||
if self._downloader.params.get('youtube_include_dash_manifest', True):
|
||||
@ -2408,7 +2512,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
'creator': video_creator or artist,
|
||||
'title': video_title,
|
||||
'alt_title': video_alt_title or track,
|
||||
'thumbnail': video_thumbnail,
|
||||
'thumbnails': thumbnails,
|
||||
'description': video_description,
|
||||
'categories': video_categories,
|
||||
'tags': video_tags,
|
||||
@ -2672,7 +2776,7 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
|
||||
ids = []
|
||||
last_id = playlist_id[-11:]
|
||||
for n in itertools.count(1):
|
||||
url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
|
||||
url = 'https://www.youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
|
||||
webpage = self._download_webpage(
|
||||
url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
|
||||
new_ids = orderedSet(re.findall(
|
||||
@ -3012,7 +3116,7 @@ class YoutubeLiveIE(YoutubeBaseInfoExtractor):
|
||||
|
||||
class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
|
||||
IE_DESC = 'YouTube.com user/channel playlists'
|
||||
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
|
||||
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel|c)/(?P<id>[^/]+)/playlists'
|
||||
IE_NAME = 'youtube:playlists'
|
||||
|
||||
_TESTS = [{
|
||||
@ -3038,6 +3142,9 @@ class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
|
||||
'title': 'Chem Player',
|
||||
},
|
||||
'skip': 'Blocked',
|
||||
}, {
|
||||
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
|
||||
@ -3182,9 +3289,10 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
|
||||
break
|
||||
|
||||
more = self._download_json(
|
||||
'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
|
||||
'https://www.youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
|
||||
'Downloading page #%s' % page_num,
|
||||
transform_source=uppercase_escape)
|
||||
transform_source=uppercase_escape,
|
||||
headers=self._YOUTUBE_CLIENT_HEADERS)
|
||||
content_html = more['content_html']
|
||||
more_widget_html = more['load_more_widget_html']
|
||||
|
||||
|
@ -447,6 +447,13 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
|
||||
metadata[meta_f] = info[info_f]
|
||||
break
|
||||
|
||||
# See [1-4] for some info on media metadata/metadata supported
|
||||
# by ffmpeg.
|
||||
# 1. https://kdenlive.org/en/project/adding-meta-data-to-mp4-video/
|
||||
# 2. https://wiki.multimedia.cx/index.php/FFmpeg_Metadata
|
||||
# 3. https://kodi.wiki/view/Video_file_tagging
|
||||
# 4. http://atomicparsley.sourceforge.net/mpeg-4files.html
|
||||
|
||||
add('title', ('track', 'title'))
|
||||
add('date', 'upload_date')
|
||||
add(('description', 'comment'), 'description')
|
||||
@ -457,6 +464,10 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
|
||||
add('album')
|
||||
add('album_artist')
|
||||
add('disc', 'disc_number')
|
||||
add('show', 'series')
|
||||
add('season_number')
|
||||
add('episode_id', ('episode', 'episode_id'))
|
||||
add('episode_sort', 'episode_number')
|
||||
|
||||
if not metadata:
|
||||
self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add')
|
||||
|
@ -1837,6 +1837,12 @@ def write_json_file(obj, fn):
|
||||
os.unlink(fn)
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
mask = os.umask(0)
|
||||
os.umask(mask)
|
||||
os.chmod(tf.name, 0o666 & ~mask)
|
||||
except OSError:
|
||||
pass
|
||||
os.rename(tf.name, fn)
|
||||
except Exception:
|
||||
try:
|
||||
|
@ -1,3 +1,3 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '2020.05.08'
|
||||
__version__ = '2020.07.28'
|
||||
|
Loading…
Reference in New Issue
Block a user