1
0
mirror of https://codeberg.org/polarisfm/youtube-dl synced 2024-12-02 13:27:56 +01:00
This commit is contained in:
J. Randall Owens 2019-06-11 16:31:20 +01:00
commit a6062ea104
92 changed files with 1152 additions and 1117 deletions

View File

@ -18,7 +18,7 @@ title: ''
<!-- <!--
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl: Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.04.30. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED. - First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.06.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser. - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape. - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates. - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
--> -->
- [ ] I'm reporting a broken site support - [ ] I'm reporting a broken site support
- [ ] I've verified that I'm running youtube-dl version **2019.04.30** - [ ] I've verified that I'm running youtube-dl version **2019.06.08**
- [ ] I've checked that all provided URLs are alive and playable in a browser - [ ] I've checked that all provided URLs are alive and playable in a browser
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
- [ ] I've searched the bugtracker for similar issues including closed ones - [ ] I've searched the bugtracker for similar issues including closed ones
@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
[debug] User config: [] [debug] User config: []
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
[debug] youtube-dl version 2019.04.30 [debug] youtube-dl version 2019.06.08
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
[debug] Proxy map: {} [debug] Proxy map: {}

View File

@ -19,7 +19,7 @@ labels: 'site-support-request'
<!-- <!--
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl: Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.04.30. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED. - First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.06.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser. - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
- Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights. - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
- Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates. - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
--> -->
- [ ] I'm reporting a new site support request - [ ] I'm reporting a new site support request
- [ ] I've verified that I'm running youtube-dl version **2019.04.30** - [ ] I've verified that I'm running youtube-dl version **2019.06.08**
- [ ] I've checked that all provided URLs are alive and playable in a browser - [ ] I've checked that all provided URLs are alive and playable in a browser
- [ ] I've checked that none of provided URLs violate any copyrights - [ ] I've checked that none of provided URLs violate any copyrights
- [ ] I've searched the bugtracker for similar site support requests including closed ones - [ ] I've searched the bugtracker for similar site support requests including closed ones

View File

@ -18,13 +18,13 @@ title: ''
<!-- <!--
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl: Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.04.30. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED. - First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.06.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
- Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates. - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
- Finally, put x into all relevant boxes (like this [x]) - Finally, put x into all relevant boxes (like this [x])
--> -->
- [ ] I'm reporting a site feature request - [ ] I'm reporting a site feature request
- [ ] I've verified that I'm running youtube-dl version **2019.04.30** - [ ] I've verified that I'm running youtube-dl version **2019.06.08**
- [ ] I've searched the bugtracker for similar site feature requests including closed ones - [ ] I've searched the bugtracker for similar site feature requests including closed ones

View File

@ -18,7 +18,7 @@ title: ''
<!-- <!--
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl: Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.04.30. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED. - First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.06.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser. - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape. - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates. - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
--> -->
- [ ] I'm reporting a broken site support issue - [ ] I'm reporting a broken site support issue
- [ ] I've verified that I'm running youtube-dl version **2019.04.30** - [ ] I've verified that I'm running youtube-dl version **2019.06.08**
- [ ] I've checked that all provided URLs are alive and playable in a browser - [ ] I've checked that all provided URLs are alive and playable in a browser
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
- [ ] I've searched the bugtracker for similar bug reports including closed ones - [ ] I've searched the bugtracker for similar bug reports including closed ones
@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
[debug] User config: [] [debug] User config: []
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
[debug] youtube-dl version 2019.04.30 [debug] youtube-dl version 2019.06.08
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
[debug] Proxy map: {} [debug] Proxy map: {}

View File

@ -19,13 +19,13 @@ labels: 'request'
<!-- <!--
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl: Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.04.30. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED. - First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.06.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
- Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates. - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
- Finally, put x into all relevant boxes (like this [x]) - Finally, put x into all relevant boxes (like this [x])
--> -->
- [ ] I'm reporting a feature request - [ ] I'm reporting a feature request
- [ ] I've verified that I'm running youtube-dl version **2019.04.30** - [ ] I've verified that I'm running youtube-dl version **2019.06.08**
- [ ] I've searched the bugtracker for similar feature requests including closed ones - [ ] I've searched the bugtracker for similar feature requests including closed ones

View File

@ -9,6 +9,7 @@ python:
- "3.6" - "3.6"
- "pypy" - "pypy"
- "pypy3" - "pypy3"
dist: trusty
env: env:
- YTDL_TEST_SET=core - YTDL_TEST_SET=core
- YTDL_TEST_SET=download - YTDL_TEST_SET=download

View File

@ -1,3 +1,83 @@
version 2019.06.08
Core
* [downloader/common] Improve rate limit (#21301)
* [utils] Improve strip_or_none
* [extractor/common] Strip src attribute for HTML5 entries code (#18485,
#21169)
Extractors
* [ted] Fix playlist extraction (#20844, #21032)
* [vlive:playlist] Fix video extraction when no playlist is found (#20590)
+ [vlive] Add CH+ support (#16887, #21209)
+ [openload] Add support for oload.website (#21329)
+ [tvnow] Extract HD formats (#21201)
+ [redbulltv] Add support for rrn:content URLs (#21297)
* [youtube] Fix average rating extraction (#21304)
+ [bitchute] Extract HTML5 formats (#21306)
* [cbsnews] Fix extraction (#9659, #15397)
* [vvvvid] Relax URL regular expression (#21299)
+ [prosiebensat1] Add support for new API (#21272)
+ [vrv] Extract adaptive_hls formats (#21243)
* [viki] Switch to HTTPS (#21001)
* [LiveLeak] Check if the original videos exist (#21206, #21208)
* [rtp] Fix extraction (#15099)
* [youtube] Improve DRM protected videos detection (#1774)
+ [srgssrplay] Add support for popupvideoplayer URLs (#21155)
+ [24video] Add support for porno.24video.net (#21194)
+ [24video] Add support for 24video.site (#21193)
- [pornflip] Remove extractor
- [criterion] Remove extractor (#21195)
* [pornhub] Use HTTPS (#21061)
* [bitchute] Fix uploader extraction (#21076)
* [streamcloud] Reduce waiting time to 6 seconds (#21092)
- [novamov] Remove extractors (#21077)
+ [openload] Add support for oload.press (#21135)
* [vivo] Fix extraction (#18906, #19217)
version 2019.05.20
Core
+ [extractor/common] Move workaround for applying first Set-Cookie header
into a separate _apply_first_set_cookie_header method
Extractors
* [safari] Fix authentication (#21090)
* [vk] Use _apply_first_set_cookie_header
* [vrt] Fix extraction (#20527)
+ [canvas] Add support for vrtnieuws and sporza site ids and extract
AES HLS formats
+ [vrv] Extract captions (#19238)
* [tele5] Improve video id extraction
* [tele5] Relax URL regular expression (#21020, #21063)
* [svtplay] Update API URL (#21075)
+ [yahoo:gyao] Add X-User-Agent header to dam proxy requests (#21071)
version 2019.05.11
Core
* [utils] Transliterate "þ" as "th" (#20897)
Extractors
+ [cloudflarestream] Add support for videodelivery.net (#21049)
+ [byutv] Add support for DVR videos (#20574, #20676)
+ [gfycat] Add support for URLs with tags (#20696, #20731)
+ [openload] Add support for verystream.com (#20701, #20967)
* [youtube] Use sp field value for signature field name (#18841, #18927,
#21028)
+ [yahoo:gyao] Extend URL regular expression (#21008)
* [youtube] Fix channel id extraction (#20982, #21003)
+ [sky] Add support for news.sky.com (#13055)
+ [youtube:entrylistbase] Retry on 5xx HTTP errors (#20965)
+ [francetvinfo] Extend video id extraction (#20619, #20740)
* [4tube] Update token hosts (#20918)
* [hotstar] Move to API v2 (#20931)
* [fox] Fix API error handling under python 2 (#20925)
+ [redbulltv] Extend URL regular expression (#20922)
version 2019.04.30 version 2019.04.30
Extractors Extractors

View File

@ -45,12 +45,12 @@ for test in gettestcases():
RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST) RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST)
if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict'] or if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict']
test['info_dict']['age_limit'] != 18): or test['info_dict']['age_limit'] != 18):
print('\nPotential missing age_limit check: {0}'.format(test['name'])) print('\nPotential missing age_limit check: {0}'.format(test['name']))
elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict'] and elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict']
test['info_dict']['age_limit'] == 18): and test['info_dict']['age_limit'] == 18):
print('\nPotential false negative: {0}'.format(test['name'])) print('\nPotential false negative: {0}'.format(test['name']))
else: else:

View File

@ -78,7 +78,6 @@
- **AudioBoom** - **AudioBoom**
- **audiomack** - **audiomack**
- **audiomack:album** - **audiomack:album**
- **auroravid**: AuroraVid
- **AWAAN** - **AWAAN**
- **awaan:live** - **awaan:live**
- **awaan:season** - **awaan:season**
@ -150,6 +149,7 @@
- **CBSInteractive** - **CBSInteractive**
- **CBSLocal** - **CBSLocal**
- **cbsnews**: CBS News - **cbsnews**: CBS News
- **cbsnews:embed**
- **cbsnews:livevideo**: CBS News Live Videos - **cbsnews:livevideo**: CBS News Live Videos
- **CBSSports** - **CBSSports**
- **CCMA** - **CCMA**
@ -174,7 +174,6 @@
- **Clipsyndicate** - **Clipsyndicate**
- **CloserToTruth** - **CloserToTruth**
- **CloudflareStream** - **CloudflareStream**
- **cloudtime**: CloudTime
- **Cloudy** - **Cloudy**
- **Clubic** - **Clubic**
- **Clyp** - **Clyp**
@ -194,7 +193,6 @@
- **Coub** - **Coub**
- **Cracked** - **Cracked**
- **Crackle** - **Crackle**
- **Criterion**
- **CrooksAndLiars** - **CrooksAndLiars**
- **crunchyroll** - **crunchyroll**
- **crunchyroll:playlist** - **crunchyroll:playlist**
@ -609,7 +607,6 @@
- **nowness** - **nowness**
- **nowness:playlist** - **nowness:playlist**
- **nowness:series** - **nowness:series**
- **nowvideo**: NowVideo
- **Noz** - **Noz**
- **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl - **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
- **npo.nl:live** - **npo.nl:live**
@ -693,7 +690,6 @@
- **PopcornTV** - **PopcornTV**
- **PornCom** - **PornCom**
- **PornerBros** - **PornerBros**
- **PornFlip**
- **PornHd** - **PornHd**
- **PornHub**: PornHub and Thumbzilla - **PornHub**: PornHub and Thumbzilla
- **PornHubPlaylist** - **PornHubPlaylist**
@ -734,6 +730,7 @@
- **RBMARadio** - **RBMARadio**
- **RDS**: RDS.ca - **RDS**: RDS.ca
- **RedBullTV** - **RedBullTV**
- **RedBullTVRrnContent**
- **Reddit** - **Reddit**
- **RedditR** - **RedditR**
- **RedTube** - **RedTube**
@ -805,6 +802,7 @@
- **ShowRoomLive** - **ShowRoomLive**
- **Sina** - **Sina**
- **SkylineWebcams** - **SkylineWebcams**
- **SkyNews**
- **skynewsarabia:article** - **skynewsarabia:article**
- **skynewsarabia:video** - **skynewsarabia:video**
- **SkySports** - **SkySports**
@ -999,6 +997,7 @@
- **Vbox7** - **Vbox7**
- **VeeHD** - **VeeHD**
- **Veoh** - **Veoh**
- **verystream**
- **Vessel** - **Vessel**
- **Vesti**: Вести.Ru - **Vesti**: Вести.Ru
- **Vevo** - **Vevo**
@ -1022,7 +1021,6 @@
- **videomore:video** - **videomore:video**
- **VideoPremium** - **VideoPremium**
- **VideoPress** - **VideoPress**
- **videoweed**: VideoWeed
- **Vidio** - **Vidio**
- **VidLii** - **VidLii**
- **vidme** - **vidme**
@ -1069,7 +1067,7 @@
- **VoxMediaVolume** - **VoxMediaVolume**
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl - **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
- **Vrak** - **Vrak**
- **VRT**: deredactie.be, sporza.be, cobra.be and cobra.canvas.be - **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
- **VrtNU**: VrtNU.be - **VrtNU**: VrtNU.be
- **vrv** - **vrv**
- **vrv:series** - **vrv:series**
@ -1099,7 +1097,6 @@
- **Weibo** - **Weibo**
- **WeiboMobile** - **WeiboMobile**
- **WeiqiTV**: WQTV - **WeiqiTV**: WQTV
- **wholecloud**: WholeCloud
- **Wimp** - **Wimp**
- **Wistia** - **Wistia**
- **wnl**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl - **wnl**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl

View File

@ -3,4 +3,4 @@ universal = True
[flake8] [flake8]
exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,devscripts/lazy_load_template.py,devscripts/make_issue_template.py,setup.py,build,.git,venv exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,devscripts/lazy_load_template.py,devscripts/make_issue_template.py,setup.py,build,.git,venv
ignore = E402,E501,E731,E741 ignore = E402,E501,E731,E741,W503

View File

@ -44,16 +44,16 @@ class TestAES(unittest.TestCase):
def test_decrypt_text(self): def test_decrypt_text(self):
password = intlist_to_bytes(self.key).decode('utf-8') password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode( encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8]) + intlist_to_bytes(self.iv[:8])
b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae' + b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
).decode('utf-8') ).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 16)) decrypted = (aes_decrypt_text(encrypted, password, 16))
self.assertEqual(decrypted, self.secret_msg) self.assertEqual(decrypted, self.secret_msg)
password = intlist_to_bytes(self.key).decode('utf-8') password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode( encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8]) + intlist_to_bytes(self.iv[:8])
b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83' + b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
).decode('utf-8') ).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 32)) decrypted = (aes_decrypt_text(encrypted, password, 32))
self.assertEqual(decrypted, self.secret_msg) self.assertEqual(decrypted, self.secret_msg)

View File

@ -34,8 +34,8 @@ def _make_testfunc(testfile):
def test_func(self): def test_func(self):
as_file = os.path.join(TEST_DIR, testfile) as_file = os.path.join(TEST_DIR, testfile)
swf_file = os.path.join(TEST_DIR, test_id + '.swf') swf_file = os.path.join(TEST_DIR, test_id + '.swf')
if ((not os.path.exists(swf_file)) or if ((not os.path.exists(swf_file))
os.path.getmtime(swf_file) < os.path.getmtime(as_file)): or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
# Recompile # Recompile
try: try:
subprocess.check_call([ subprocess.check_call([

View File

@ -73,6 +73,7 @@ from youtube_dl.utils import (
smuggle_url, smuggle_url,
str_to_int, str_to_int,
strip_jsonp, strip_jsonp,
strip_or_none,
timeconvert, timeconvert,
unescapeHTML, unescapeHTML,
unified_strdate, unified_strdate,
@ -183,7 +184,7 @@ class TestUtil(unittest.TestCase):
self.assertEqual(sanitize_filename( self.assertEqual(sanitize_filename(
'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True), 'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True),
'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYPssaaaaaaaeceeeeiiiionooooooooeuuuuuypy') 'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYTHssaaaaaaaeceeeeiiiionooooooooeuuuuuythy')
def test_sanitize_ids(self): def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw') self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
@ -752,6 +753,18 @@ class TestUtil(unittest.TestCase):
d = json.loads(stripped) d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'}) self.assertEqual(d, {'status': 'success'})
def test_strip_or_none(self):
self.assertEqual(strip_or_none(' abc'), 'abc')
self.assertEqual(strip_or_none('abc '), 'abc')
self.assertEqual(strip_or_none(' abc '), 'abc')
self.assertEqual(strip_or_none('\tabc\t'), 'abc')
self.assertEqual(strip_or_none('\n\tabc\n\t'), 'abc')
self.assertEqual(strip_or_none('abc'), 'abc')
self.assertEqual(strip_or_none(''), '')
self.assertEqual(strip_or_none(None), None)
self.assertEqual(strip_or_none(42), None)
self.assertEqual(strip_or_none([]), None)
def test_uppercase_escape(self): def test_uppercase_escape(self):
self.assertEqual(uppercase_escape(''), '') self.assertEqual(uppercase_escape(''), '')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐') self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')

View File

@ -400,9 +400,9 @@ class YoutubeDL(object):
else: else:
raise raise
if (sys.platform != 'win32' and if (sys.platform != 'win32'
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
not params.get('restrictfilenames', False)): and not params.get('restrictfilenames', False)):
# Unicode filesystem API will throw errors (#1474, #13027) # Unicode filesystem API will throw errors (#1474, #13027)
self.report_warning( self.report_warning(
'Assuming --restrict-filenames since file system encoding ' 'Assuming --restrict-filenames since file system encoding '
@ -440,9 +440,9 @@ class YoutubeDL(object):
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs: if idxs:
correct_argv = ( correct_argv = (
['youtube-dl'] + ['youtube-dl']
[a for i, a in enumerate(argv) if i not in idxs] + + [a for i, a in enumerate(argv) if i not in idxs]
['--'] + [argv[i] for i in idxs] + ['--'] + [argv[i] for i in idxs]
) )
self.report_warning( self.report_warning(
'Long argument string detected. ' 'Long argument string detected. '
@ -850,8 +850,8 @@ class YoutubeDL(object):
if result_type in ('url', 'url_transparent'): if result_type in ('url', 'url_transparent'):
ie_result['url'] = sanitize_url(ie_result['url']) ie_result['url'] = sanitize_url(ie_result['url'])
extract_flat = self.params.get('extract_flat', False) extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
extract_flat is True): or extract_flat is True):
if self.params.get('forcejson', False): if self.params.get('forcejson', False):
self.to_stdout(json.dumps(ie_result)) self.to_stdout(json.dumps(ie_result))
return ie_result return ie_result
@ -1619,9 +1619,9 @@ class YoutubeDL(object):
# https://github.com/ytdl-org/youtube-dl/issues/10083). # https://github.com/ytdl-org/youtube-dl/issues/10083).
incomplete_formats = ( incomplete_formats = (
# All formats are video-only or # All formats are video-only or
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
# all formats are audio-only # all formats are audio-only
all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)) or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
ctx = { ctx = {
'formats': formats, 'formats': formats,
@ -1947,8 +1947,8 @@ class YoutubeDL(object):
else: else:
assert fixup_policy in ('ignore', 'never') assert fixup_policy in ('ignore', 'never')
if (info_dict.get('requested_formats') is None and if (info_dict.get('requested_formats') is None
info_dict.get('container') == 'm4a_dash'): and info_dict.get('container') == 'm4a_dash'):
if fixup_policy == 'warn': if fixup_policy == 'warn':
self.report_warning( self.report_warning(
'%s: writing DASH m4a. ' '%s: writing DASH m4a. '
@ -1967,9 +1967,9 @@ class YoutubeDL(object):
else: else:
assert fixup_policy in ('ignore', 'never') assert fixup_policy in ('ignore', 'never')
if (info_dict.get('protocol') == 'm3u8_native' or if (info_dict.get('protocol') == 'm3u8_native'
info_dict.get('protocol') == 'm3u8' and or info_dict.get('protocol') == 'm3u8'
self.params.get('hls_prefer_native')): and self.params.get('hls_prefer_native')):
if fixup_policy == 'warn': if fixup_policy == 'warn':
self.report_warning('%s: malformed AAC bitstream detected.' % ( self.report_warning('%s: malformed AAC bitstream detected.' % (
info_dict['id'])) info_dict['id']))
@ -1995,10 +1995,10 @@ class YoutubeDL(object):
def download(self, url_list): def download(self, url_list):
"""Download a given list of URLs.""" """Download a given list of URLs."""
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
if (len(url_list) > 1 and if (len(url_list) > 1
outtmpl != '-' and and outtmpl != '-'
'%' not in outtmpl and and '%' not in outtmpl
self.params.get('max_downloads') != 1): and self.params.get('max_downloads') != 1):
raise SameFileError(outtmpl) raise SameFileError(outtmpl)
for url in url_list: for url in url_list:
@ -2143,8 +2143,8 @@ class YoutubeDL(object):
if res: if res:
res += ', ' res += ', '
res += '%s container' % fdict['container'] res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None and if (fdict.get('vcodec') is not None
fdict.get('vcodec') != 'none'): and fdict.get('vcodec') != 'none'):
if res: if res:
res += ', ' res += ', '
res += fdict['vcodec'] res += fdict['vcodec']

View File

@ -230,14 +230,14 @@ def _real_main(argv=None):
if opts.allsubtitles and not opts.writeautomaticsub: if opts.allsubtitles and not opts.writeautomaticsub:
opts.writesubtitles = True opts.writesubtitles = True
outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
(opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
(opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
(opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
(opts.usetitle and '%(title)s-%(id)s.%(ext)s') or or (opts.usetitle and '%(title)s-%(id)s.%(ext)s')
(opts.useid and '%(id)s.%(ext)s') or or (opts.useid and '%(id)s.%(ext)s')
(opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s')
DEFAULT_OUTTMPL) or DEFAULT_OUTTMPL)
if not os.path.splitext(outtmpl)[1] and opts.extractaudio: if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
parser.error('Cannot download a video and extract audio into the same' parser.error('Cannot download a video and extract audio into the same'
' file! Use "{0}.%(ext)s" instead of "{0}" as the output' ' file! Use "{0}.%(ext)s" instead of "{0}" as the output'

View File

@ -2649,9 +2649,9 @@ else:
try: try:
args = shlex.split('中文') args = shlex.split('中文')
assert (isinstance(args, list) and assert (isinstance(args, list)
isinstance(args[0], compat_str) and and isinstance(args[0], compat_str)
args[0] == '中文') and args[0] == '中文')
compat_shlex_split = shlex.split compat_shlex_split = shlex.split
except (AssertionError, UnicodeEncodeError): except (AssertionError, UnicodeEncodeError):
# Working around shlex issue with unicode strings on some python 2 # Working around shlex issue with unicode strings on some python 2

View File

@ -176,7 +176,9 @@ class FileDownloader(object):
return return
speed = float(byte_counter) / elapsed speed = float(byte_counter) / elapsed
if speed > rate_limit: if speed > rate_limit:
time.sleep(max((byte_counter // rate_limit) - elapsed, 0)) sleep_time = float(byte_counter) / rate_limit - elapsed
if sleep_time > 0:
time.sleep(sleep_time)
def temp_name(self, filename): def temp_name(self, filename):
"""Returns a temporary filename for the given filename.""" """Returns a temporary filename for the given filename."""
@ -330,15 +332,15 @@ class FileDownloader(object):
""" """
nooverwrites_and_exists = ( nooverwrites_and_exists = (
self.params.get('nooverwrites', False) and self.params.get('nooverwrites', False)
os.path.exists(encodeFilename(filename)) and os.path.exists(encodeFilename(filename))
) )
if not hasattr(filename, 'write'): if not hasattr(filename, 'write'):
continuedl_and_exists = ( continuedl_and_exists = (
self.params.get('continuedl', True) and self.params.get('continuedl', True)
os.path.isfile(encodeFilename(filename)) and and os.path.isfile(encodeFilename(filename))
not self.params.get('nopart', False) and not self.params.get('nopart', False)
) )
# Check file already present # Check file already present

View File

@ -238,8 +238,8 @@ def write_metadata_tag(stream, metadata):
def remove_encrypted_media(media): def remove_encrypted_media(media):
return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib
'drmAdditionalHeaderSetId' not in e.attrib, and 'drmAdditionalHeaderSetId' not in e.attrib,
media)) media))
@ -267,8 +267,8 @@ class F4mFD(FragmentFD):
media = doc.findall(_add_ns('media')) media = doc.findall(_add_ns('media'))
if not media: if not media:
self.report_error('No media found') self.report_error('No media found')
for e in (doc.findall(_add_ns('drmAdditionalHeader')) + for e in (doc.findall(_add_ns('drmAdditionalHeader'))
doc.findall(_add_ns('drmAdditionalHeaderSet'))): + doc.findall(_add_ns('drmAdditionalHeaderSet'))):
# If id attribute is missing it's valid for all media nodes # If id attribute is missing it's valid for all media nodes
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute # without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
if 'id' not in e.attrib: if 'id' not in e.attrib:

View File

@ -219,8 +219,8 @@ class FragmentFD(FileDownloader):
frag_total_bytes = s.get('total_bytes') or 0 frag_total_bytes = s.get('total_bytes') or 0
if not ctx['live']: if not ctx['live']:
estimated_size = ( estimated_size = (
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes) / (ctx['complete_frags_downloaded_bytes'] + frag_total_bytes)
(state['fragment_index'] + 1) * total_frags) / (state['fragment_index'] + 1) * total_frags)
state['total_bytes_estimate'] = estimated_size state['total_bytes_estimate'] = estimated_size
if s['status'] == 'finished': if s['status'] == 'finished':

View File

@ -76,12 +76,12 @@ class HlsFD(FragmentFD):
return fd.real_download(filename, info_dict) return fd.real_download(filename, info_dict)
def is_ad_fragment_start(s): def is_ad_fragment_start(s):
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s or return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s
s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad')) or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
def is_ad_fragment_end(s): def is_ad_fragment_end(s):
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s or return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s
s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment')) or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
media_frags = 0 media_frags = 0
ad_frags = 0 ad_frags = 0

View File

@ -46,8 +46,8 @@ class HttpFD(FileDownloader):
is_test = self.params.get('test', False) is_test = self.params.get('test', False)
chunk_size = self._TEST_FILE_SIZE if is_test else ( chunk_size = self._TEST_FILE_SIZE if is_test else (
info_dict.get('downloader_options', {}).get('http_chunk_size') or info_dict.get('downloader_options', {}).get('http_chunk_size')
self.params.get('http_chunk_size') or 0) or self.params.get('http_chunk_size') or 0)
ctx.open_mode = 'wb' ctx.open_mode = 'wb'
ctx.resume_len = 0 ctx.resume_len = 0
@ -123,11 +123,11 @@ class HttpFD(FileDownloader):
content_len = int_or_none(content_range_m.group(3)) content_len = int_or_none(content_range_m.group(3))
accept_content_len = ( accept_content_len = (
# Non-chunked download # Non-chunked download
not ctx.chunk_size or not ctx.chunk_size
# Chunked download and requested piece or # Chunked download and requested piece or
# its part is promised to be served # its part is promised to be served
content_range_end == range_end or or content_range_end == range_end
content_len < range_end) or content_len < range_end)
if accept_content_len: if accept_content_len:
ctx.data_len = content_len ctx.data_len = content_len
return return
@ -152,8 +152,8 @@ class HttpFD(FileDownloader):
raise raise
else: else:
# Examine the reported length # Examine the reported length
if (content_length is not None and if (content_length is not None
(ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)): and (ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)):
# The file had already been fully downloaded. # The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that # Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file, # YouTube sometimes adds or removes a few bytes from the end of the file,

View File

@ -59,9 +59,9 @@ class AddAnimeIE(InfoExtractor):
parsed_url = compat_urllib_parse_urlparse(url) parsed_url = compat_urllib_parse_urlparse(url)
av_val = av_res + len(parsed_url.netloc) av_val = av_res + len(parsed_url.netloc)
confirm_url = ( confirm_url = (
parsed_url.scheme + '://' + parsed_url.netloc + parsed_url.scheme + '://' + parsed_url.netloc
action + '?' + + action + '?'
compat_urllib_parse_urlencode({ + compat_urllib_parse_urlencode({
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)})) 'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
self._download_webpage( self._download_webpage(
confirm_url, video_id, confirm_url, video_id,

View File

@ -42,7 +42,7 @@ class BIQLEIE(InfoExtractor):
video_id = self._match_id(url) video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
embed_url = self._proto_relative_url(self._search_regex( embed_url = self._proto_relative_url(self._search_regex(
r'<iframe.+?src="((?:https?:)?//daxab\.com/[^"]+)".*?></iframe>', r'<iframe.+?src="((?:https?:)?//(?:daxab\.com|dxb\.to|[^/]+/player)/[^"]+)".*?></iframe>',
webpage, 'embed url')) webpage, 'embed url'))
if VKIE.suitable(embed_url): if VKIE.suitable(embed_url):
return self.url_result(embed_url, VKIE.ie_key(), video_id) return self.url_result(embed_url, VKIE.ie_key(), video_id)

View File

@ -55,6 +55,11 @@ class BitChuteIE(InfoExtractor):
formats = [ formats = [
{'url': format_url} {'url': format_url}
for format_url in orderedSet(format_urls)] for format_url in orderedSet(format_urls)]
if not formats:
formats = self._parse_html5_media_entries(
url, webpage, video_id)[0]['formats']
self._check_formats(formats, video_id) self._check_formats(formats, video_id)
self._sort_formats(formats) self._sort_formats(formats)
@ -65,8 +70,9 @@ class BitChuteIE(InfoExtractor):
webpage, default=None) or self._html_search_meta( webpage, default=None) or self._html_search_meta(
'twitter:image:src', webpage, 'thumbnail') 'twitter:image:src', webpage, 'thumbnail')
uploader = self._html_search_regex( uploader = self._html_search_regex(
r'(?s)<p\b[^>]+\bclass=["\']video-author[^>]+>(.+?)</p>', webpage, (r'(?s)<div class=["\']channel-banner.*?<p\b[^>]+\bclass=["\']name[^>]+>(.+?)</p>',
'uploader', fatal=False) r'(?s)<p\b[^>]+\bclass=["\']video-author[^>]+>(.+?)</p>'),
webpage, 'uploader', fatal=False)
return { return {
'id': video_id, 'id': video_id,

View File

@ -32,8 +32,8 @@ class BlinkxIE(InfoExtractor):
video_id = self._match_id(url) video_id = self._match_id(url)
display_id = video_id[:8] display_id = video_id[:8]
api_url = ('https://apib4.blinkx.com/api.php?action=play_video&' + api_url = ('https://apib4.blinkx.com/api.php?action=play_video&'
'video=%s' % video_id) + 'video=%s' % video_id)
data_json = self._download_webpage(api_url, display_id) data_json = self._download_webpage(api_url, display_id)
data = json.loads(data_json)['api']['results'][0] data = json.loads(data_json)['api']['results'][0]
duration = None duration = None

View File

@ -3,11 +3,13 @@ from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import parse_duration
class BYUtvIE(InfoExtractor): class BYUtvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?byutv\.org/(?:watch|player)/(?!event/)(?P<id>[0-9a-f-]+)(?:/(?P<display_id>[^/?#&]+))?' _VALID_URL = r'https?://(?:www\.)?byutv\.org/(?:watch|player)/(?!event/)(?P<id>[0-9a-f-]+)(?:/(?P<display_id>[^/?#&]+))?'
_TESTS = [{ _TESTS = [{
# ooyalaVOD
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5', 'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5',
'info_dict': { 'info_dict': {
'id': 'ZvanRocTpW-G5_yZFeltTAMv6jxOU9KH', 'id': 'ZvanRocTpW-G5_yZFeltTAMv6jxOU9KH',
@ -22,6 +24,20 @@ class BYUtvIE(InfoExtractor):
'skip_download': True, 'skip_download': True,
}, },
'add_ie': ['Ooyala'], 'add_ie': ['Ooyala'],
}, {
# dvr
'url': 'https://www.byutv.org/player/8f1dab9b-b243-47c8-b525-3e2d021a3451/byu-softball-pacific-vs-byu-41219---game-2',
'info_dict': {
'id': '8f1dab9b-b243-47c8-b525-3e2d021a3451',
'display_id': 'byu-softball-pacific-vs-byu-41219---game-2',
'ext': 'mp4',
'title': 'Pacific vs. BYU (4/12/19)',
'description': 'md5:1ac7b57cb9a78015910a4834790ce1f3',
'duration': 11645,
},
'params': {
'skip_download': True
},
}, { }, {
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d', 'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d',
'only_matching': True, 'only_matching': True,
@ -35,17 +51,19 @@ class BYUtvIE(InfoExtractor):
video_id = mobj.group('id') video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id display_id = mobj.group('display_id') or video_id
ep = self._download_json( info = self._download_json(
'https://api.byutv.org/api3/catalog/getvideosforcontent', video_id, 'https://api.byutv.org/api3/catalog/getvideosforcontent',
query={ display_id, query={
'contentid': video_id, 'contentid': video_id,
'channel': 'byutv', 'channel': 'byutv',
'x-byutv-context': 'web$US', 'x-byutv-context': 'web$US',
}, headers={ }, headers={
'x-byutv-context': 'web$US', 'x-byutv-context': 'web$US',
'x-byutv-platformkey': 'xsaaw9c7y5', 'x-byutv-platformkey': 'xsaaw9c7y5',
})['ooyalaVOD'] })
ep = info.get('ooyalaVOD')
if ep:
return { return {
'_type': 'url_transparent', '_type': 'url_transparent',
'ie_key': 'Ooyala', 'ie_key': 'Ooyala',
@ -56,3 +74,19 @@ class BYUtvIE(InfoExtractor):
'description': ep.get('description'), 'description': ep.get('description'),
'thumbnail': ep.get('imageThumbnail'), 'thumbnail': ep.get('imageThumbnail'),
} }
ep = info['dvr']
title = ep['title']
formats = self._extract_m3u8_formats(
ep['videoUrl'], video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls')
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': ep.get('description'),
'thumbnail': ep.get('imageThumbnail'),
'duration': parse_duration(ep.get('length')),
'formats': formats,
}

View File

@ -17,7 +17,7 @@ from ..utils import (
class CanvasIE(InfoExtractor): class CanvasIE(InfoExtractor):
_VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrtvideo)/assets/(?P<id>[^/?#&]+)' _VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrt(?:video|nieuws)|sporza)/assets/(?P<id>[^/?#&]+)'
_TESTS = [{ _TESTS = [{
'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475', 'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'md5': '90139b746a0a9bd7bb631283f6e2a64e', 'md5': '90139b746a0a9bd7bb631283f6e2a64e',
@ -35,6 +35,10 @@ class CanvasIE(InfoExtractor):
'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e', 'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'only_matching': True, 'only_matching': True,
}] }]
_HLS_ENTRY_PROTOCOLS_MAP = {
'HLS': 'm3u8_native',
'HLS_AES': 'm3u8',
}
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
@ -52,9 +56,9 @@ class CanvasIE(InfoExtractor):
format_url, format_type = target.get('url'), target.get('type') format_url, format_type = target.get('url'), target.get('type')
if not format_url or not format_type: if not format_url or not format_type:
continue continue
if format_type == 'HLS': if format_type in self._HLS_ENTRY_PROTOCOLS_MAP:
formats.extend(self._extract_m3u8_formats( formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native', format_url, video_id, 'mp4', self._HLS_ENTRY_PROTOCOLS_MAP[format_type],
m3u8_id=format_type, fatal=False)) m3u8_id=format_type, fatal=False))
elif format_type == 'HDS': elif format_type == 'HDS':
formats.extend(self._extract_f4m_formats( formats.extend(self._extract_f4m_formats(

View File

@ -69,7 +69,7 @@ class CBSIE(CBSBaseIE):
last_e = None last_e = None
for item in items_data.findall('.//item'): for item in items_data.findall('.//item'):
asset_type = xpath_text(item, 'assetType') asset_type = xpath_text(item, 'assetType')
if not asset_type or asset_type in asset_types or asset_type in ('HLS_FPS', 'DASH_CENC'): if not asset_type or asset_type in asset_types or 'HLS_FPS' in asset_type or 'DASH_CENC' in asset_type:
continue continue
asset_types.append(asset_type) asset_types.append(asset_type)
query = { query = {

View File

@ -1,40 +1,62 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import re
import zlib
from .common import InfoExtractor from .common import InfoExtractor
from .cbs import CBSIE from .cbs import CBSIE
from ..compat import (
compat_b64decode,
compat_urllib_parse_unquote,
)
from ..utils import ( from ..utils import (
parse_duration, parse_duration,
) )
class CBSNewsEmbedIE(CBSIE):
IE_NAME = 'cbsnews:embed'
_VALID_URL = r'https?://(?:www\.)?cbsnews\.com/embed/video[^#]*#(?P<id>.+)'
_TESTS = [{
'url': 'https://www.cbsnews.com/embed/video/?v=1.c9b5b61492913d6660db0b2f03579ef25e86307a#1Vb7b9s2EP5XBAHbT6Gt98PAMKTJ0se6LVjWYWtdGBR1stlIpEBSTtwi%2F%2FvuJNkNhmHdGxgM2NL57vjd6zt%2B8PngdN%2Fyg79qeGvhzN%2FLGrS%2F%2BuBLB531V28%2B%2BO7Qg7%2Fy97r2z3xZ42NW8yLhDbA0S0KWlHnIijwKWJBHZZnHBa8Cgbpdf%2F89NM9Hi9fXifhpr8sr%2FlP848tn%2BTdXycX25zh4cdX%2FvHl6PmmPqnWQv9w8Ed%2B9GjYRim07bFEqdG%2BZVHuwTm65A7bVRrYtR5lAyMox7pigF6W4k%2By91mjspGsJ%2BwVae4%2BsvdnaO1p73HkXs%2FVisUDTGm7R8IcdnOROeq%2B19qT1amhA1VJtPenoTUgrtfKc9m7Rq8dP7nnjwOB7wg7ADdNt7VX64DWAWlKhPtmDEq22g4GF99x6Dk9E8OSsankHXqPNKDxC%2FdK7MLKTircTDgsI3mmj4OBdSq64dy7fd1x577RU1rt4cvMtOaulFYOd%2FLewRWvDO9lIgXFpZSnkZmjbv5SxKTPoQXClFbpsf%2Fhbbpzs0IB3vb8KkyzJQ%2BywOAgCrMpgRrz%2BKk4fvb7kFbR4XJCu0gAdtNO7woCwZTu%2BBUs9bam%2Fds71drVerpeisgrubLjAB4nnOSkWQnfr5W6o1ku5Xpr1MgrCbL0M0vUyDtfLLK15WiYp47xKWSLyjFVpwVmVJSLIoCjSOFkv3W7oKsVliwZJcB9nwXpZ5GEQQwY8jNKqKCBrgjTLeFxgdCIpazojDgnRtn43J6kG7nZ6cAbxh0EeFFk4%2B1u867cY5u4344n%2FxXjCqAjucdTHgLKojNKmSfO8KRsOFY%2FzKEYCKEJBzv90QA9nfm9gL%2BHulaFqUkz9ULUYxl62B3U%2FRVNLA8IhggaPycOoBuwOCESciDQVSSUgiOMsROB%2FhKfwCKOzEk%2B4k6rWd4uuT%2FwTDz7K7t3d3WLO8ISD95jSPQbayBacthbz86XVgxHwhex5zawzgDOmtp%2F3GPcXn0VXHdSS029%2Fj99UC%2FwJUvyKQ%2FzKyixIEVlYJOn4RxxuaH43Ty9fbJ5OObykHH435XAzJTHeOF4hhEUXD8URe%2FQ%2FBT%2BMpf8d5GN02Ox%2FfiGsl7TA7POu1xZ5%2BbTzcAVKMe48mqcC21hkacVEVScM26liVVBnrKkC4CLKyzAvHu0lhEaTKMFwI3a4SN9MsrfYzdBLq2vkwRD1gVviLT8kY9h2CHH6Y%2Bix6609weFtey4ESp60WtyeWMy%2BsmBuhsoKIyuoT%2Bq2R%2FrW5qi3g%2FvzS2j40DoixDP8%2BKP0yUdpXJ4l6Vla%2Bg9vce%2BC4yM5YlUcbA%2F0jLKdpmTwvsdN5z88nAIe08%2F0HgxeG1iv%2B6Hlhjh7uiW0SDzYNI92L401uha3JKYk268UVRzdOzNQvAaJqoXzAc80dAV440NZ1WVVAAMRYQ2KrGJFmDUsq8saWSnjvIj8t78y%2FRa3JRnbHVfyFpfwoDiGpPgjzekyUiKNlU3OMlwuLMmzgvEojllYVE2Z1HhImvsnk%2BuhusTEoB21PAtSFodeFK3iYhXEH9WOG2%2FkOE833sfeG%2Ff5cfHtEFNXgYes0%2FXj7aGivUgJ9XpusCtoNcNYVVnJVrrDo0OmJAutHCpuZul4W9lLcfy7BnuLPT02%2ByXsCTk%2B9zhzswIN04YueNSK%2BPtM0jS88QdLqSLJDTLsuGZJNolm2yO0PXh3UPnz9Ix5bfIAqxPjvETQsDCEiPG4QbqNyhBZISxybLnZYCrW5H3Axp690%2F0BJdXtDZ5ITuM4xj3f4oUHGzc5JeJmZKpp%2FjwKh4wMV%2FV1yx3emLoR0MwbG4K%2F%2BZgVep3PnzXGDHZ6a3i%2Fk%2BJrONDN13%2Bnq6tBTYk4o7cLGhBtqCC4KwacGHpEVuoH5JNro%2FE6JfE6d5RydbiR76k%2BW5wioDHBIjw1euhHjUGRB0y5A97KoaPx6MlL%2BwgboUVtUFRI%2FLemgTpdtF59ii7pab08kuPcfWzs0l%2FRI5takWnFpka0zOgWRtYcuf9aIxZMxlwr6IiGpsb6j2DQUXPl%2FimXI599Ev7fWjoPD78A',
'only_matching': True,
}]
def _real_extract(self, url):
item = self._parse_json(zlib.decompress(compat_b64decode(
compat_urllib_parse_unquote(self._match_id(url))),
-zlib.MAX_WBITS), None)['video']['items'][0]
return self._extract_video_info(item['mpxRefId'], 'cbsnews')
class CBSNewsIE(CBSIE): class CBSNewsIE(CBSIE):
IE_NAME = 'cbsnews' IE_NAME = 'cbsnews'
IE_DESC = 'CBS News' IE_DESC = 'CBS News'
_VALID_URL = r'https?://(?:www\.)?cbsnews\.com/(?:news|videos)/(?P<id>[\da-z_-]+)' _VALID_URL = r'https?://(?:www\.)?cbsnews\.com/(?:news|video)/(?P<id>[\da-z_-]+)'
_TESTS = [ _TESTS = [
{ {
# 60 minutes # 60 minutes
'url': 'http://www.cbsnews.com/news/artificial-intelligence-positioned-to-be-a-game-changer/', 'url': 'http://www.cbsnews.com/news/artificial-intelligence-positioned-to-be-a-game-changer/',
'info_dict': { 'info_dict': {
'id': '_B6Ga3VJrI4iQNKsir_cdFo9Re_YJHE_', 'id': 'Y_nf_aEg6WwO9OLAq0MpKaPgfnBUxfW4',
'ext': 'mp4', 'ext': 'flv',
'title': 'Artificial Intelligence', 'title': 'Artificial Intelligence, real-life applications',
'description': 'md5:8818145f9974431e0fb58a1b8d69613c', 'description': 'md5:a7aaf27f1b4777244de8b0b442289304',
'thumbnail': r're:^https?://.*\.jpg$', 'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1606, 'duration': 317,
'uploader': 'CBSI-NEW', 'uploader': 'CBSI-NEW',
'timestamp': 1498431900, 'timestamp': 1476046464,
'upload_date': '20170625', 'upload_date': '20161009',
}, },
'params': { 'params': {
# m3u8 download # rtmp download
'skip_download': True, 'skip_download': True,
}, },
}, },
{ {
'url': 'http://www.cbsnews.com/videos/fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack/', 'url': 'https://www.cbsnews.com/video/fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack/',
'info_dict': { 'info_dict': {
'id': 'SNJBOYzXiWBOvaLsdzwH8fmtP1SCd91Y', 'id': 'SNJBOYzXiWBOvaLsdzwH8fmtP1SCd91Y',
'ext': 'mp4', 'ext': 'mp4',
@ -60,37 +82,29 @@ class CBSNewsIE(CBSIE):
# 48 hours # 48 hours
'url': 'http://www.cbsnews.com/news/maria-ridulph-murder-will-the-nations-oldest-cold-case-to-go-to-trial-ever-get-solved/', 'url': 'http://www.cbsnews.com/news/maria-ridulph-murder-will-the-nations-oldest-cold-case-to-go-to-trial-ever-get-solved/',
'info_dict': { 'info_dict': {
'id': 'QpM5BJjBVEAUFi7ydR9LusS69DPLqPJ1',
'ext': 'mp4',
'title': 'Cold as Ice', 'title': 'Cold as Ice',
'description': 'Can a childhood memory of a friend\'s murder solve a 1957 cold case? "48 Hours" correspondent Erin Moriarty has the latest.', 'description': 'Can a childhood memory solve the 1957 murder of 7-year-old Maria Ridulph?',
'upload_date': '20170604',
'timestamp': 1496538000,
'uploader': 'CBSI-NEW',
},
'params': {
'skip_download': True,
}, },
'playlist_mincount': 7,
}, },
] ]
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) display_id = self._match_id(url)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, display_id)
video_info = self._parse_json(self._html_search_regex( entries = []
r'(?:<ul class="media-list items" id="media-related-items"[^>]*><li data-video-info|<div id="cbsNewsVideoPlayer" data-video-player-options)=\'({.+?})\'', for embed_url in re.findall(r'<iframe[^>]+data-src="(https?://(?:www\.)?cbsnews\.com/embed/video/[^#]*#[^"]+)"', webpage):
webpage, 'video JSON info', default='{}'), video_id, fatal=False) entries.append(self.url_result(embed_url, CBSNewsEmbedIE.ie_key()))
if entries:
if video_info: return self.playlist_result(
item = video_info['item'] if 'item' in video_info else video_info entries, playlist_title=self._html_search_meta(['og:title', 'twitter:title'], webpage),
else: playlist_description=self._html_search_meta(['og:description', 'twitter:description', 'description'], webpage))
state = self._parse_json(self._search_regex(
r'data-cbsvideoui-options=(["\'])(?P<json>{.+?})\1', webpage,
'playlist JSON info', group='json'), video_id)['state']
item = state['playlist'][state['pid']]
item = self._parse_json(self._html_search_regex(
r'CBSNEWS\.defaultPayload\s*=\s*({.+})',
webpage, 'video JSON info'), display_id)['items'][0]
return self._extract_video_info(item['mpxRefId'], 'cbsnews') return self._extract_video_info(item['mpxRefId'], 'cbsnews')

View File

@ -10,8 +10,8 @@ class CloudflareStreamIE(InfoExtractor):
_VALID_URL = r'''(?x) _VALID_URL = r'''(?x)
https?:// https?://
(?: (?:
(?:watch\.)?cloudflarestream\.com/| (?:watch\.)?(?:cloudflarestream\.com|videodelivery\.net)/|
embed\.cloudflarestream\.com/embed/[^/]+\.js\?.*?\bvideo= embed\.(?:cloudflarestream\.com|videodelivery\.net)/embed/[^/]+\.js\?.*?\bvideo=
) )
(?P<id>[\da-f]+) (?P<id>[\da-f]+)
''' '''
@ -31,6 +31,9 @@ class CloudflareStreamIE(InfoExtractor):
}, { }, {
'url': 'https://cloudflarestream.com/31c9291ab41fac05471db4e73aa11717/manifest/video.mpd', 'url': 'https://cloudflarestream.com/31c9291ab41fac05471db4e73aa11717/manifest/video.mpd',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://embed.videodelivery.net/embed/r4xu.fla9.latest.js?video=81d80727f3022488598f68d323c1ad5e',
'only_matching': True,
}] }]
@staticmethod @staticmethod
@ -38,7 +41,7 @@ class CloudflareStreamIE(InfoExtractor):
return [ return [
mobj.group('url') mobj.group('url')
for mobj in re.finditer( for mobj in re.finditer(
r'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//embed\.cloudflarestream\.com/embed/[^/]+\.js\?.*?\bvideo=[\da-f]+?.*?)\1', r'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//embed\.(?:cloudflarestream\.com|videodelivery\.net)/embed/[^/]+\.js\?.*?\bvideo=[\da-f]+?.*?)\1',
webpage)] webpage)]
def _real_extract(self, url): def _real_extract(self, url):

View File

@ -67,6 +67,7 @@ from ..utils import (
sanitized_Request, sanitized_Request,
sanitize_filename, sanitize_filename,
str_or_none, str_or_none,
strip_or_none,
unescapeHTML, unescapeHTML,
unified_strdate, unified_strdate,
unified_timestamp, unified_timestamp,
@ -542,11 +543,11 @@ class InfoExtractor(object):
raise ExtractorError('An extractor error has occurred.', cause=e) raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries): def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None) and if (not self._downloader.params.get('geo_bypass_country', None)
self._GEO_BYPASS and and self._GEO_BYPASS
self._downloader.params.get('geo_bypass', True) and and self._downloader.params.get('geo_bypass', True)
not self._x_forwarded_for_ip and and not self._x_forwarded_for_ip
countries): and countries):
country_code = random.choice(countries) country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip: if self._x_forwarded_for_ip:
@ -682,8 +683,8 @@ class InfoExtractor(object):
def __check_blocked(self, content): def __check_blocked(self, content):
first_block = content[:512] first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content and if ('<title>Access to this site is blocked</title>' in content
'Websense' in first_block): and 'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.' msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex( blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content, r'<iframe src="([^"]+)"', content,
@ -701,8 +702,8 @@ class InfoExtractor(object):
if block_msg: if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ') msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True) raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
'blocklist.rkn.gov.ru' in content): and 'blocklist.rkn.gov.ru' in content):
raise ExtractorError( raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. ' 'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.', 'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
@ -1709,8 +1710,8 @@ class InfoExtractor(object):
continue continue
else: else:
tbr = float_or_none( tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH') or last_stream_inf.get('AVERAGE-BANDWIDTH')
last_stream_inf.get('BANDWIDTH'), scale=1000) or last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = [] format_id = []
if m3u8_id: if m3u8_id:
format_id.append(m3u8_id) format_id.append(m3u8_id)
@ -2480,7 +2481,7 @@ class InfoExtractor(object):
'subtitles': {}, 'subtitles': {},
} }
media_attributes = extract_attributes(media_tag) media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src') src = strip_or_none(media_attributes.get('src'))
if src: if src:
_, formats = _media_formats(src, media_type) _, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats) media_info['formats'].extend(formats)
@ -2490,7 +2491,7 @@ class InfoExtractor(object):
s_attr = extract_attributes(source_tag) s_attr = extract_attributes(source_tag)
# data-video-src and data-src are non standard but seen # data-video-src and data-src are non standard but seen
# several times in the wild # several times in the wild
src = dict_get(s_attr, ('src', 'data-video-src', 'data-src')) src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src')))
if not src: if not src:
continue continue
f = parse_content_type(s_attr.get('type')) f = parse_content_type(s_attr.get('type'))
@ -2504,8 +2505,8 @@ class InfoExtractor(object):
if str_or_none(s_attr.get(lbl)) if str_or_none(s_attr.get(lbl))
] ]
width = int_or_none(s_attr.get('width')) width = int_or_none(s_attr.get('width'))
height = (int_or_none(s_attr.get('height')) or height = (int_or_none(s_attr.get('height'))
int_or_none(s_attr.get('res'))) or int_or_none(s_attr.get('res')))
if not width or not height: if not width or not height:
for lbl in labels: for lbl in labels:
resolution = parse_resolution(lbl) resolution = parse_resolution(lbl)
@ -2533,7 +2534,7 @@ class InfoExtractor(object):
track_attributes = extract_attributes(track_tag) track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind') kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'): if not kind or kind in ('subtitles', 'captions'):
src = track_attributes.get('src') src = strip_or_none(track_attributes.get('src'))
if not src: if not src:
continue continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label') lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
@ -2817,6 +2818,33 @@ class InfoExtractor(object):
self._downloader.cookiejar.add_cookie_header(req) self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie')) return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def _apply_first_set_cookie_header(self, url_handle, cookie):
"""
Apply first Set-Cookie header instead of the last. Experimental.
Some sites (e.g. [1-3]) may serve two cookies under the same name
in Set-Cookie header and expect the first (old) one to be set rather
than second (new). However, as of RFC6265 the newer one cookie
should be set into cookie store what actually happens.
We will workaround this issue by resetting the cookie to
the first one manually.
1. https://new.vk.com/
2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3. https://learning.oreilly.com/
"""
for header, cookies in url_handle.headers.items():
if header.lower() != 'set-cookie':
continue
if sys.version_info[0] >= 3:
cookies = cookies.encode('iso-8859-1')
cookies = cookies.decode('utf-8')
cookie_value = re.search(
r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
if cookie_value:
value, domain = cookie_value.groups()
self._set_cookie(domain, cookie, value)
break
def get_testcases(self, include_onlymatching=False): def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None) t = getattr(self, '_TEST', None)
if t: if t:
@ -2847,8 +2875,8 @@ class InfoExtractor(object):
return not any_restricted return not any_restricted
def extract_subtitles(self, *args, **kwargs): def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or if (self._downloader.params.get('writesubtitles', False)
self._downloader.params.get('listsubtitles')): or self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs) return self._get_subtitles(*args, **kwargs)
return {} return {}
@ -2873,8 +2901,8 @@ class InfoExtractor(object):
return ret return ret
def extract_automatic_captions(self, *args, **kwargs): def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or if (self._downloader.params.get('writeautomaticsub', False)
self._downloader.params.get('listsubtitles')): or self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs) return self._get_automatic_captions(*args, **kwargs)
return {} return {}
@ -2882,9 +2910,9 @@ class InfoExtractor(object):
raise NotImplementedError('This method must be implemented by subclasses') raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs): def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and if (self._downloader.params.get('mark_watched', False)
(self._get_login_info()[0] is not None or and (self._get_login_info()[0] is not None
self._downloader.params.get('cookiefile') is not None)): or self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs) self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs): def _mark_watched(self, *args, **kwargs):

View File

@ -1,39 +0,0 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class CriterionIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?criterion\.com/films/(?P<id>[0-9]+)-.+'
_TEST = {
'url': 'http://www.criterion.com/films/184-le-samourai',
'md5': 'bc51beba55685509883a9a7830919ec3',
'info_dict': {
'id': '184',
'ext': 'mp4',
'title': 'Le Samouraï',
'description': 'md5:a2b4b116326558149bef81f76dcbb93f',
'thumbnail': r're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
final_url = self._search_regex(
r'so\.addVariable\("videoURL", "(.+?)"\)\;', webpage, 'video url')
title = self._og_search_title(webpage)
description = self._html_search_meta('description', webpage)
thumbnail = self._search_regex(
r'so\.addVariable\("thumbnailURL", "(.+?)"\)\;',
webpage, 'thumbnail url')
return {
'id': video_id,
'url': final_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}

View File

@ -45,8 +45,8 @@ class DailyMailIE(InfoExtractor):
sources_url = (try_get( sources_url = (try_get(
video_data, video_data,
(lambda x: x['plugins']['sources']['url'], (lambda x: x['plugins']['sources']['url'],
lambda x: x['sources']['url']), compat_str) or lambda x: x['sources']['url']), compat_str)
'http://www.dailymail.co.uk/api/player/%s/video-sources.json' % video_id) or 'http://www.dailymail.co.uk/api/player/%s/video-sources.json' % video_id)
video_sources = self._download_json(sources_url, video_id) video_sources = self._download_json(sources_url, video_id)
body = video_sources.get('body') body = video_sources.get('body')

View File

@ -70,8 +70,8 @@ class DctpTvIE(InfoExtractor):
endpoint = next( endpoint = next(
server['endpoint'] server['endpoint']
for server in servers for server in servers
if url_or_none(server.get('endpoint')) and if url_or_none(server.get('endpoint'))
'cloudfront' in server['endpoint']) and 'cloudfront' in server['endpoint'])
else: else:
endpoint = 'rtmpe://s2pqqn4u96e4j8.cloudfront.net/cfx/st/' endpoint = 'rtmpe://s2pqqn4u96e4j8.cloudfront.net/cfx/st/'

View File

@ -82,8 +82,8 @@ class ExpressenIE(InfoExtractor):
title = info.get('titleRaw') or data['title'] title = info.get('titleRaw') or data['title']
description = info.get('descriptionRaw') description = info.get('descriptionRaw')
thumbnail = info.get('socialMediaImage') or data.get('image') thumbnail = info.get('socialMediaImage') or data.get('image')
duration = int_or_none(info.get('videoTotalSecondsDuration') or duration = int_or_none(info.get('videoTotalSecondsDuration')
data.get('totalSecondsDuration')) or data.get('totalSecondsDuration'))
timestamp = unified_timestamp(info.get('publishDate')) timestamp = unified_timestamp(info.get('publishDate'))
return { return {

View File

@ -173,6 +173,7 @@ from .cbs import CBSIE
from .cbslocal import CBSLocalIE from .cbslocal import CBSLocalIE
from .cbsinteractive import CBSInteractiveIE from .cbsinteractive import CBSInteractiveIE
from .cbsnews import ( from .cbsnews import (
CBSNewsEmbedIE,
CBSNewsIE, CBSNewsIE,
CBSNewsLiveVideoIE, CBSNewsLiveVideoIE,
) )
@ -240,7 +241,6 @@ from .condenast import CondeNastIE
from .corus import CorusIE from .corus import CorusIE
from .cracked import CrackedIE from .cracked import CrackedIE
from .crackle import CrackleIE from .crackle import CrackleIE
from .criterion import CriterionIE
from .crooksandliars import CrooksAndLiarsIE from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import ( from .crunchyroll import (
CrunchyrollIE, CrunchyrollIE,
@ -772,13 +772,6 @@ from .nova import (
NovaEmbedIE, NovaEmbedIE,
NovaIE, NovaIE,
) )
from .novamov import (
AuroraVidIE,
CloudTimeIE,
NowVideoIE,
VideoWeedIE,
WholeCloudIE,
)
from .nowness import ( from .nowness import (
NownessIE, NownessIE,
NownessPlaylistIE, NownessPlaylistIE,
@ -833,7 +826,10 @@ from .ooyala import (
OoyalaIE, OoyalaIE,
OoyalaExternalIE, OoyalaExternalIE,
) )
from .openload import OpenloadIE from .openload import (
OpenloadIE,
VerystreamIE,
)
from .ora import OraTVIE from .ora import OraTVIE
from .orf import ( from .orf import (
ORFTVthekIE, ORFTVthekIE,
@ -893,7 +889,6 @@ from .polskieradio import (
from .popcorntv import PopcornTVIE from .popcorntv import PopcornTVIE
from .porn91 import Porn91IE from .porn91 import Porn91IE
from .porncom import PornComIE from .porncom import PornComIE
from .pornflip import PornFlipIE
from .pornhd import PornHdIE from .pornhd import PornHdIE
from .pornhub import ( from .pornhub import (
PornHubIE, PornHubIE,
@ -943,7 +938,10 @@ from .raywenderlich import (
) )
from .rbmaradio import RBMARadioIE from .rbmaradio import RBMARadioIE
from .rds import RDSIE from .rds import RDSIE
from .redbulltv import RedBullTVIE from .redbulltv import (
RedBullTVIE,
RedBullTVRrnContentIE,
)
from .reddit import ( from .reddit import (
RedditIE, RedditIE,
RedditRIE, RedditRIE,

View File

@ -94,8 +94,8 @@ class FrontendMastersPageBaseIE(FrontendMastersBaseIE):
chapter_number = None chapter_number = None
index = lesson.get('index') index = lesson.get('index')
element_index = lesson.get('elementIndex') element_index = lesson.get('elementIndex')
if (isinstance(index, int) and isinstance(element_index, int) and if (isinstance(index, int) and isinstance(element_index, int)
index < element_index): and index < element_index):
chapter_number = element_index - index chapter_number = element_index - index
chapter = (chapters[chapter_number - 1] chapter = (chapters[chapter_number - 1]
if chapter_number - 1 < len(chapters) else None) if chapter_number - 1 < len(chapters) else None)

View File

@ -89,7 +89,10 @@ from .piksel import PikselIE
from .videa import VideaIE from .videa import VideaIE
from .twentymin import TwentyMinutenIE from .twentymin import TwentyMinutenIE
from .ustream import UstreamIE from .ustream import UstreamIE
from .openload import OpenloadIE from .openload import (
OpenloadIE,
VerystreamIE,
)
from .videopress import VideoPressIE from .videopress import VideoPressIE
from .rutube import RutubeIE from .rutube import RutubeIE
from .limelight import LimelightBaseIE from .limelight import LimelightBaseIE
@ -2546,11 +2549,11 @@ class GenericIE(InfoExtractor):
return self.url_result(mobj.group('url')) return self.url_result(mobj.group('url'))
# Look for Ooyala videos # Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage)
re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage) or or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)
re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage) or or re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage)
re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage)) or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None: if mobj is not None:
embed_token = self._search_regex( embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)', r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
@ -2580,19 +2583,6 @@ class GenericIE(InfoExtractor):
if mobj is not None: if mobj is not None:
return self.url_result(mobj.group(1), 'Mpora') return self.url_result(mobj.group(1), 'Mpora')
# Look for embedded NovaMov-based player
mobj = re.search(
r'''(?x)<(?:pagespeed_)?iframe[^>]+?src=(["\'])
(?P<url>http://(?:(?:embed|www)\.)?
(?:novamov\.com|
nowvideo\.(?:ch|sx|eu|at|ag|co)|
videoweed\.(?:es|com)|
movshare\.(?:net|sx|ag)|
divxstage\.(?:eu|net|ch|co|at|ag))
/embed\.php.+?)\1''', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Facebook player # Look for embedded Facebook player
facebook_urls = FacebookIE._extract_urls(webpage) facebook_urls = FacebookIE._extract_urls(webpage)
if facebook_urls: if facebook_urls:
@ -3017,6 +3007,12 @@ class GenericIE(InfoExtractor):
return self.playlist_from_matches( return self.playlist_from_matches(
openload_urls, video_id, video_title, ie=OpenloadIE.ie_key()) openload_urls, video_id, video_title, ie=OpenloadIE.ie_key())
# Look for Verystream embeds
verystream_urls = VerystreamIE._extract_urls(webpage)
if verystream_urls:
return self.playlist_from_matches(
verystream_urls, video_id, video_title, ie=VerystreamIE.ie_key())
# Look for VideoPress embeds # Look for VideoPress embeds
videopress_urls = VideoPressIE._extract_urls(webpage) videopress_urls = VideoPressIE._extract_urls(webpage)
if videopress_urls: if videopress_urls:
@ -3212,8 +3208,8 @@ class GenericIE(InfoExtractor):
else: else:
formats.append({ formats.append({
'url': src, 'url': src,
'ext': (mimetype2ext(src_type) or 'ext': (mimetype2ext(src_type)
ext if ext in KNOWN_EXTENSIONS else 'mp4'), or ext if ext in KNOWN_EXTENSIONS else 'mp4'),
}) })
if formats: if formats:
self._sort_formats(formats) self._sort_formats(formats)

View File

@ -11,7 +11,7 @@ from ..utils import (
class GfycatIE(InfoExtractor): class GfycatIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gfycat\.com/(?:ifr/|gifs/detail/)?(?P<id>[^/?#]+)' _VALID_URL = r'https?://(?:www\.)?gfycat\.com/(?:ifr/|gifs/detail/)?(?P<id>[^-/?#]+)'
_TESTS = [{ _TESTS = [{
'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher', 'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher',
'info_dict': { 'info_dict': {
@ -47,6 +47,9 @@ class GfycatIE(InfoExtractor):
}, { }, {
'url': 'https://gfycat.com/gifs/detail/UnconsciousLankyIvorygull', 'url': 'https://gfycat.com/gifs/detail/UnconsciousLankyIvorygull',
'only_matching': True 'only_matching': True
}, {
'url': 'https://gfycat.com/acceptablehappygoluckyharborporpoise-baseball',
'only_matching': True
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View File

@ -155,8 +155,8 @@ class HeiseIE(InfoExtractor):
'id': video_id, 'id': video_id,
'title': title, 'title': title,
'description': description, 'description': description,
'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image') or 'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image')
self._og_search_thumbnail(webpage)), or self._og_search_thumbnail(webpage)),
'timestamp': parse_iso8601( 'timestamp': parse_iso8601(
self._html_search_meta('date', webpage)), self._html_search_meta('date', webpage)),
'formats': formats, 'formats': formats,

View File

@ -58,8 +58,8 @@ class HitboxIE(InfoExtractor):
title = video_meta.get('media_status') title = video_meta.get('media_status')
alt_title = video_meta.get('media_title') alt_title = video_meta.get('media_title')
description = clean_html( description = clean_html(
video_meta.get('media_description') or video_meta.get('media_description')
video_meta.get('media_description_md')) or video_meta.get('media_description_md'))
duration = float_or_none(video_meta.get('media_duration')) duration = float_or_none(video_meta.get('media_duration'))
uploader = video_meta.get('media_user_name') uploader = video_meta.get('media_user_name')
views = int_or_none(video_meta.get('media_views')) views = int_or_none(video_meta.get('media_views'))

View File

@ -47,8 +47,8 @@ class HitRecordIE(InfoExtractor):
tags = [ tags = [
t['text'] t['text']
for t in tags_list for t in tags_list
if isinstance(t, dict) and t.get('text') and if isinstance(t, dict) and t.get('text')
isinstance(t['text'], compat_str)] and isinstance(t['text'], compat_str)]
return { return {
'id': video_id, 'id': video_id,

View File

@ -77,13 +77,13 @@ class HKETVIE(InfoExtractor):
title = ( title = (
self._html_search_meta( self._html_search_meta(
('ed_title', 'search.ed_title'), webpage, default=None) or ('ed_title', 'search.ed_title'), webpage, default=None)
self._search_regex( or self._search_regex(
r'data-favorite_title_(?:eng|chi)=(["\'])(?P<id>(?:(?!\1).)+)\1', r'data-favorite_title_(?:eng|chi)=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'title', default=None, group='url') or webpage, 'title', default=None, group='url')
self._html_search_regex( or self._html_search_regex(
r'<h1>([^<]+)</h1>', webpage, 'title', default=None) or r'<h1>([^<]+)</h1>', webpage, 'title', default=None)
self._og_search_title(webpage) or self._og_search_title(webpage)
) )
file_id = self._search_regex( file_id = self._search_regex(

View File

@ -60,8 +60,8 @@ class HRTiBaseIE(InfoExtractor):
language=self._APP_LANGUAGE, language=self._APP_LANGUAGE,
application_id=self._APP_PUBLICATION_ID) application_id=self._APP_PUBLICATION_ID)
self._login_url = (modules['user']['resources']['login']['uri'] + self._login_url = (modules['user']['resources']['login']['uri']
'/format/json').format(session_id=self._session_id) + '/format/json').format(session_id=self._session_id)
self._logout_url = modules['user']['resources']['logout']['uri'] self._logout_url = modules['user']['resources']['logout']['uri']

View File

@ -122,9 +122,9 @@ class InfoQIE(BokeCCBaseIE):
formats = self._extract_bokecc_formats(webpage, video_id) formats = self._extract_bokecc_formats(webpage, video_id)
else: else:
formats = ( formats = (
self._extract_rtmp_video(webpage) + self._extract_rtmp_video(webpage)
self._extract_http_video(webpage) + + self._extract_http_video(webpage)
self._extract_http_audio(webpage, video_id)) + self._extract_http_audio(webpage, video_id))
self._sort_formats(formats) self._sort_formats(formats)

View File

@ -383,9 +383,9 @@ class IqiyiIE(InfoExtractor):
self._sleep(5, video_id) self._sleep(5, video_id)
self._sort_formats(formats) self._sort_formats(formats)
title = (get_element_by_id('widget-videotitle', webpage) or title = (get_element_by_id('widget-videotitle', webpage)
clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage)) or or clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage))
self._html_search_regex(r'<span[^>]+data-videochanged-title="word"[^>]*>([^<]+)</span>', webpage, 'title')) or self._html_search_regex(r'<span[^>]+data-videochanged-title="word"[^>]*>([^<]+)</span>', webpage, 'title'))
return { return {
'id': video_id, 'id': video_id,

View File

@ -77,10 +77,10 @@ class ITVIE(InfoExtractor):
return etree.SubElement(element, _add_ns(name)) return etree.SubElement(element, _add_ns(name))
production_id = ( production_id = (
params.get('data-video-autoplay-id') or params.get('data-video-autoplay-id')
'%s#001' % ( or '%s#001' % (
params.get('data-video-episode-id') or params.get('data-video-episode-id')
video_id.replace('a', '/'))) or video_id.replace('a', '/')))
req_env = etree.Element(_add_ns('soapenv:Envelope')) req_env = etree.Element(_add_ns('soapenv:Envelope'))
_add_sub_element(req_env, 'soapenv:Header') _add_sub_element(req_env, 'soapenv:Header')

View File

@ -118,8 +118,8 @@ class KalturaIE(InfoExtractor):
(?P<q2>['"])_?(?P<partner_id>(?:(?!(?P=q2)).)+)(?P=q2),.*? (?P<q2>['"])_?(?P<partner_id>(?:(?!(?P=q2)).)+)(?P=q2),.*?
(?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s* (?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s*
(?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\}) (?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\})
""", webpage) or """, webpage)
re.search( or re.search(
r'''(?xs) r'''(?xs)
(?P<q1>["']) (?P<q1>["'])
(?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)* (?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)*
@ -132,8 +132,8 @@ class KalturaIE(InfoExtractor):
\[\s*(?P<q2_1>["'])entry_?[Ii]d(?P=q2_1)\s*\]\s*=\s* \[\s*(?P<q2_1>["'])entry_?[Ii]d(?P=q2_1)\s*\]\s*=\s*
) )
(?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3) (?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3)
''', webpage) or ''', webpage)
re.search( or re.search(
r'''(?xs) r'''(?xs)
<(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["']) <(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["'])
(?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+) (?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)

View File

@ -47,8 +47,8 @@ class KarriereVideosIE(InfoExtractor):
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
title = (self._html_search_meta('title', webpage, default=None) or title = (self._html_search_meta('title', webpage, default=None)
self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title')) or self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title'))
video_id = self._search_regex( video_id = self._search_regex(
r'/config/video/(.+?)\.xml', webpage, 'video id') r'/config/video/(.+?)\.xml', webpage, 'video id')

View File

@ -82,6 +82,10 @@ class LiveLeakIE(InfoExtractor):
}, { }, {
'url': 'https://www.liveleak.com/view?t=HvHi_1523016227', 'url': 'https://www.liveleak.com/view?t=HvHi_1523016227',
'only_matching': True, 'only_matching': True,
}, {
# No original video
'url': 'https://www.liveleak.com/view?t=C26ZZ_1558612804',
'only_matching': True,
}] }]
@staticmethod @staticmethod
@ -134,8 +138,10 @@ class LiveLeakIE(InfoExtractor):
orig_url = re.sub(r'\.mp4\.[^.]+', '', a_format['url']) orig_url = re.sub(r'\.mp4\.[^.]+', '', a_format['url'])
if a_format['url'] != orig_url: if a_format['url'] != orig_url:
format_id = a_format.get('format_id') format_id = a_format.get('format_id')
format_id = 'original' + ('-' + format_id if format_id else '')
if self._is_valid_url(orig_url, video_id, format_id):
formats.append({ formats.append({
'format_id': 'original' + ('-' + format_id if format_id else ''), 'format_id': format_id,
'url': orig_url, 'url': orig_url,
'preference': 1, 'preference': 1,
}) })

View File

@ -80,8 +80,8 @@ class MotherlessIE(InfoExtractor):
video_url = (self._html_search_regex( video_url = (self._html_search_regex(
(r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', (r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'), r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'),
webpage, 'video URL', default=None, group='url') or webpage, 'video URL', default=None, group='url')
'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id) or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
age_limit = self._rta_search(webpage) age_limit = self._rta_search(webpage)
view_count = str_to_int(self._html_search_regex( view_count = str_to_int(self._html_search_regex(
r'<strong>Views</strong>\s+([^<]+)<', r'<strong>Views</strong>\s+([^<]+)<',

View File

@ -84,8 +84,8 @@ class NDTVIE(InfoExtractor):
# '__title' does not contain extra words such as sub-site name, "Video" etc. # '__title' does not contain extra words such as sub-site name, "Video" etc.
title = compat_urllib_parse_unquote_plus( title = compat_urllib_parse_unquote_plus(
self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None) or self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None)
self._og_search_title(webpage)) or self._og_search_title(webpage))
filename = self._search_regex( filename = self._search_regex(
r"(?:__)?filename\s*[:=]\s*'([^']+)'", webpage, 'video filename') r"(?:__)?filename\s*[:=]\s*'([^']+)'", webpage, 'video filename')

View File

@ -180,8 +180,8 @@ class AppleDailyIE(NextMediaIE):
_URL_PATTERN = r'\{url: \'(.+)\'\}' _URL_PATTERN = r'\{url: \'(.+)\'\}'
def _fetch_title(self, page): def _fetch_title(self, page):
return (self._html_search_regex(r'<h1 id="h1">([^<>]+)</h1>', page, 'news title', default=None) or return (self._html_search_regex(r'<h1 id="h1">([^<>]+)</h1>', page, 'news title', default=None)
self._html_search_meta('description', page, 'news title')) or self._html_search_meta('description', page, 'news title'))
def _fetch_thumbnail(self, page): def _fetch_thumbnail(self, page):
return self._html_search_regex(r"setInitialImage\(\'([^']+)'\)", page, 'video thumbnail', fatal=False) return self._html_search_regex(r"setInitialImage\(\'([^']+)'\)", page, 'video thumbnail', fatal=False)

View File

@ -369,14 +369,14 @@ class NiconicoIE(InfoExtractor):
video_detail = watch_api_data.get('videoDetail', {}) video_detail = watch_api_data.get('videoDetail', {})
thumbnail = ( thumbnail = (
get_video_info(['thumbnail_url', 'thumbnailURL']) or get_video_info(['thumbnail_url', 'thumbnailURL'])
self._html_search_meta('image', webpage, 'thumbnail', default=None) or or self._html_search_meta('image', webpage, 'thumbnail', default=None)
video_detail.get('thumbnail')) or video_detail.get('thumbnail'))
description = get_video_info('description') description = get_video_info('description')
timestamp = (parse_iso8601(get_video_info('first_retrieve')) or timestamp = (parse_iso8601(get_video_info('first_retrieve'))
unified_timestamp(get_video_info('postedDateTime'))) or unified_timestamp(get_video_info('postedDateTime')))
if not timestamp: if not timestamp:
match = self._html_search_meta('datePublished', webpage, 'date published', default=None) match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
if match: if match:
@ -395,9 +395,9 @@ class NiconicoIE(InfoExtractor):
view_count = int_or_none(match.replace(',', '')) view_count = int_or_none(match.replace(',', ''))
view_count = view_count or video_detail.get('viewCount') view_count = view_count or video_detail.get('viewCount')
comment_count = (int_or_none(get_video_info('comment_num')) or comment_count = (int_or_none(get_video_info('comment_num'))
video_detail.get('commentCount') or or video_detail.get('commentCount')
try_get(api_data, lambda x: x['thread']['commentCount'])) or try_get(api_data, lambda x: x['thread']['commentCount']))
if not comment_count: if not comment_count:
match = self._html_search_regex( match = self._html_search_regex(
r'>Comments: <strong[^>]*>([^<]+)</strong>', r'>Comments: <strong[^>]*>([^<]+)</strong>',
@ -406,11 +406,11 @@ class NiconicoIE(InfoExtractor):
comment_count = int_or_none(match.replace(',', '')) comment_count = int_or_none(match.replace(',', ''))
duration = (parse_duration( duration = (parse_duration(
get_video_info('length') or get_video_info('length')
self._html_search_meta( or self._html_search_meta(
'video:duration', webpage, 'video duration', default=None)) or 'video:duration', webpage, 'video duration', default=None))
video_detail.get('length') or or video_detail.get('length')
get_video_info('duration')) or get_video_info('duration'))
webpage_url = get_video_info('watch_url') or url webpage_url = get_video_info('watch_url') or url

View File

@ -1,212 +0,0 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
ExtractorError,
NO_DEFAULT,
sanitized_Request,
urlencode_postdata,
)
class NovaMovIE(InfoExtractor):
IE_NAME = 'novamov'
IE_DESC = 'NovaMov'
_VALID_URL_TEMPLATE = r'''(?x)
http://
(?:
(?:www\.)?%(host)s/(?:file|video|mobile/\#/videos)/|
(?:(?:embed|www)\.)%(host)s/embed(?:\.php|/)?\?(?:.*?&)?\bv=
)
(?P<id>[a-z\d]{13})
'''
_VALID_URL = _VALID_URL_TEMPLATE % {'host': r'novamov\.com'}
_HOST = 'www.novamov.com'
_FILE_DELETED_REGEX = r'This file no longer exists on our servers!</h2>'
_FILEKEY_REGEX = r'flashvars\.filekey=(?P<filekey>"?[^"]+"?);'
_TITLE_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>([^<]+)</h3>'
_DESCRIPTION_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>[^<]+</h3><p>([^<]+)</p>'
_URL_TEMPLATE = 'http://%s/video/%s'
_TEST = None
def _check_existence(self, webpage, video_id):
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
def _real_extract(self, url):
video_id = self._match_id(url)
url = self._URL_TEMPLATE % (self._HOST, video_id)
webpage = self._download_webpage(
url, video_id, 'Downloading video page')
self._check_existence(webpage, video_id)
def extract_filekey(default=NO_DEFAULT):
filekey = self._search_regex(
self._FILEKEY_REGEX, webpage, 'filekey', default=default)
if filekey is not default and (filekey[0] != '"' or filekey[-1] != '"'):
return self._search_regex(
r'var\s+%s\s*=\s*"([^"]+)"' % re.escape(filekey), webpage, 'filekey', default=default)
else:
return filekey
filekey = extract_filekey(default=None)
if not filekey:
fields = self._hidden_inputs(webpage)
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', webpage,
'post url', default=url, group='url')
if not post_url.startswith('http'):
post_url = compat_urlparse.urljoin(url, post_url)
request = sanitized_Request(
post_url, urlencode_postdata(fields))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
request.add_header('Referer', post_url)
webpage = self._download_webpage(
request, video_id, 'Downloading continue to the video page')
self._check_existence(webpage, video_id)
filekey = extract_filekey()
title = self._html_search_regex(self._TITLE_REGEX, webpage, 'title')
description = self._html_search_regex(self._DESCRIPTION_REGEX, webpage, 'description', default='', fatal=False)
api_response = self._download_webpage(
'http://%s/api/player.api.php?key=%s&file=%s' % (self._HOST, filekey, video_id), video_id,
'Downloading video api response')
response = compat_urlparse.parse_qs(api_response)
if 'error_msg' in response:
raise ExtractorError('%s returned error: %s' % (self.IE_NAME, response['error_msg'][0]), expected=True)
video_url = response['url'][0]
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description
}
class WholeCloudIE(NovaMovIE):
IE_NAME = 'wholecloud'
IE_DESC = 'WholeCloud'
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': r'(?:wholecloud\.net|movshare\.(?:net|sx|ag))'}
_HOST = 'www.wholecloud.net'
_FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
_TITLE_REGEX = r'<strong>Title:</strong> ([^<]+)</p>'
_DESCRIPTION_REGEX = r'<strong>Description:</strong> ([^<]+)</p>'
_TEST = {
'url': 'http://www.wholecloud.net/video/559e28be54d96',
'md5': 'abd31a2132947262c50429e1d16c1bfd',
'info_dict': {
'id': '559e28be54d96',
'ext': 'flv',
'title': 'dissapeared image',
'description': 'optical illusion dissapeared image magic illusion',
}
}
class NowVideoIE(NovaMovIE):
IE_NAME = 'nowvideo'
IE_DESC = 'NowVideo'
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': r'nowvideo\.(?:to|ch|ec|sx|eu|at|ag|co|li)'}
_HOST = 'www.nowvideo.to'
_FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
_TITLE_REGEX = r'<h4>([^<]+)</h4>'
_DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>'
_TEST = {
'url': 'http://www.nowvideo.sx/video/f1d6fce9a968b',
'md5': '12c82cad4f2084881d8bc60ee29df092',
'info_dict': {
'id': 'f1d6fce9a968b',
'ext': 'flv',
'title': 'youtubedl test video BaWjenozKc',
'description': 'Description',
},
}
class VideoWeedIE(NovaMovIE):
IE_NAME = 'videoweed'
IE_DESC = 'VideoWeed'
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': r'videoweed\.(?:es|com)'}
_HOST = 'www.videoweed.es'
_FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
_TITLE_REGEX = r'<h1 class="text_shadow">([^<]+)</h1>'
_URL_TEMPLATE = 'http://%s/file/%s'
_TEST = {
'url': 'http://www.videoweed.es/file/b42178afbea14',
'md5': 'abd31a2132947262c50429e1d16c1bfd',
'info_dict': {
'id': 'b42178afbea14',
'ext': 'flv',
'title': 'optical illusion dissapeared image magic illusion',
'description': ''
},
}
class CloudTimeIE(NovaMovIE):
IE_NAME = 'cloudtime'
IE_DESC = 'CloudTime'
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': r'cloudtime\.to'}
_HOST = 'www.cloudtime.to'
_FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
_TITLE_REGEX = r'<div[^>]+class=["\']video_det["\'][^>]*>\s*<strong>([^<]+)</strong>'
_TEST = None
class AuroraVidIE(NovaMovIE):
IE_NAME = 'auroravid'
IE_DESC = 'AuroraVid'
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': r'auroravid\.to'}
_HOST = 'www.auroravid.to'
_FILE_DELETED_REGEX = r'This file no longer exists on our servers!<'
_TESTS = [{
'url': 'http://www.auroravid.to/video/4rurhn9x446jj',
'md5': '7205f346a52bbeba427603ba10d4b935',
'info_dict': {
'id': '4rurhn9x446jj',
'ext': 'flv',
'title': 'search engine optimization',
'description': 'search engine optimization is used to rank the web page in the google search engine'
},
'skip': '"Invalid token" errors abound (in web interface as well as youtube-dl, there is nothing we can do about it.)'
}, {
'url': 'http://www.auroravid.to/embed/?v=4rurhn9x446jj',
'only_matching': True,
}]

View File

@ -45,8 +45,8 @@ class NRKBaseIE(InfoExtractor):
entries = [] entries = []
conviva = data.get('convivaStatistics') or {} conviva = data.get('convivaStatistics') or {}
live = (data.get('mediaElementType') == 'Live' or live = (data.get('mediaElementType') == 'Live'
data.get('isLive') is True or conviva.get('isLive')) or data.get('isLive') is True or conviva.get('isLive'))
def make_title(t): def make_title(t):
return self._live_title(t) if live else t return self._live_title(t) if live else t

View File

@ -31,8 +31,8 @@ class OoyalaBaseIE(InfoExtractor):
title = metadata['title'] title = metadata['title']
auth_data = self._download_json( auth_data = self._download_json(
self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) + self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code)
compat_urllib_parse_urlencode({ + compat_urllib_parse_urlencode({
'domain': domain, 'domain': domain,
'supportedFormats': supportedformats or 'mp4,rtmp,m3u8,hds,dash,smooth', 'supportedFormats': supportedformats or 'mp4,rtmp,m3u8,hds,dash,smooth',
'embedToken': embed_token, 'embedToken': embed_token,

View File

@ -43,9 +43,9 @@ def cookie_to_dict(cookie):
if cookie.discard is not None: if cookie.discard is not None:
cookie_dict['discard'] = cookie.discard cookie_dict['discard'] = cookie.discard
try: try:
if (cookie.has_nonstandard_attr('httpOnly') or if (cookie.has_nonstandard_attr('httpOnly')
cookie.has_nonstandard_attr('httponly') or or cookie.has_nonstandard_attr('httponly')
cookie.has_nonstandard_attr('HttpOnly')): or cookie.has_nonstandard_attr('HttpOnly')):
cookie_dict['httponly'] = True cookie_dict['httponly'] = True
except TypeError: except TypeError:
pass pass
@ -244,7 +244,7 @@ class PhantomJSwrapper(object):
class OpenloadIE(InfoExtractor): class OpenloadIE(InfoExtractor):
_DOMAINS = r'(?:openload\.(?:co|io|link|pw)|oload\.(?:tv|stream|site|xyz|win|download|cloud|cc|icu|fun|club|info|pw|live|space|services)|oladblock\.(?:services|xyz|me)|openloed\.co)' _DOMAINS = r'(?:openload\.(?:co|io|link|pw)|oload\.(?:tv|stream|site|xyz|win|download|cloud|cc|icu|fun|club|info|press|pw|live|space|services|website)|oladblock\.(?:services|xyz|me)|openloed\.co)'
_VALID_URL = r'''(?x) _VALID_URL = r'''(?x)
https?:// https?://
(?P<host> (?P<host>
@ -254,7 +254,10 @@ class OpenloadIE(InfoExtractor):
(?:f|embed)/ (?:f|embed)/
(?P<id>[a-zA-Z0-9-_]+) (?P<id>[a-zA-Z0-9-_]+)
''' % _DOMAINS ''' % _DOMAINS
_EMBED_WORD = 'embed'
_STREAM_WORD = 'f'
_REDIR_WORD = 'stream'
_URL_IDS = ('streamurl', 'streamuri', 'streamurj')
_TESTS = [{ _TESTS = [{
'url': 'https://openload.co/f/kUEfGclsU9o', 'url': 'https://openload.co/f/kUEfGclsU9o',
'md5': 'bf1c059b004ebc7a256f89408e65c36e', 'md5': 'bf1c059b004ebc7a256f89408e65c36e',
@ -354,6 +357,12 @@ class OpenloadIE(InfoExtractor):
}, { }, {
'url': 'https://oload.services/embed/bs1NWj1dCag/', 'url': 'https://oload.services/embed/bs1NWj1dCag/',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://oload.press/embed/drTBl1aOTvk/',
'only_matching': True,
}, {
'url': 'https://oload.website/embed/drTBl1aOTvk/',
'only_matching': True,
}, { }, {
'url': 'https://oladblock.services/f/b8NWEgkqNLI/', 'url': 'https://oladblock.services/f/b8NWEgkqNLI/',
'only_matching': True, 'only_matching': True,
@ -1948,11 +1957,16 @@ class OpenloadIE(InfoExtractor):
'69.0.3497.28', '69.0.3497.28',
) )
@staticmethod @classmethod
def _extract_urls(webpage): def _extract_urls(cls, webpage):
return re.findall( return re.findall(
r'<iframe[^>]+src=["\']((?:https?://)?%s/embed/[a-zA-Z0-9-_]+)' r'<iframe[^>]+src=["\']((?:https?://)?%s/%s/[a-zA-Z0-9-_]+)'
% OpenloadIE._DOMAINS, webpage) % (cls._DOMAINS, cls._EMBED_WORD), webpage)
def _extract_decrypted_page(self, page_url, webpage, video_id, headers):
phantom = PhantomJSwrapper(self, required_version='2.0')
webpage, _ = phantom.get(page_url, html=webpage, video_id=video_id, headers=headers)
return webpage
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
@ -1964,9 +1978,9 @@ class OpenloadIE(InfoExtractor):
'User-Agent': self._USER_AGENT_TPL % random.choice(self._CHROME_VERSIONS), 'User-Agent': self._USER_AGENT_TPL % random.choice(self._CHROME_VERSIONS),
} }
for path in ('embed', 'f'): for path in (self._EMBED_WORD, self._STREAM_WORD):
page_url = url_pattern % path page_url = url_pattern % path
last = path == 'f' last = path == self._STREAM_WORD
webpage = self._download_webpage( webpage = self._download_webpage(
page_url, video_id, 'Downloading %s webpage' % path, page_url, video_id, 'Downloading %s webpage' % path,
headers=headers, fatal=last) headers=headers, fatal=last)
@ -1978,21 +1992,20 @@ class OpenloadIE(InfoExtractor):
raise ExtractorError('File not found', expected=True, video_id=video_id) raise ExtractorError('File not found', expected=True, video_id=video_id)
break break
phantom = PhantomJSwrapper(self, required_version='2.0') webpage = self._extract_decrypted_page(page_url, webpage, video_id, headers)
webpage, _ = phantom.get(page_url, html=webpage, video_id=video_id, headers=headers) for element_id in self._URL_IDS:
decoded_id = get_element_by_id(element_id, webpage)
decoded_id = (get_element_by_id('streamurl', webpage) or if decoded_id:
get_element_by_id('streamuri', webpage) or break
get_element_by_id('streamurj', webpage) or if not decoded_id:
self._search_regex( decoded_id = self._search_regex(
(r'>\s*([\w-]+~\d{10,}~\d+\.\d+\.0\.0~[\w-]+)\s*<', (r'>\s*([\w-]+~\d{10,}~\d+\.\d+\.0\.0~[\w-]+)\s*<',
r'>\s*([\w~-]+~\d+\.\d+\.\d+\.\d+~[\w~-]+)', r'>\s*([\w~-]+~\d+\.\d+\.\d+\.\d+~[\w~-]+)',
r'>\s*([\w-]+~\d{10,}~(?:[a-f\d]+:){2}:~[\w-]+)\s*<', r'>\s*([\w-]+~\d{10,}~(?:[a-f\d]+:){2}:~[\w-]+)\s*<',
r'>\s*([\w~-]+~[a-f0-9:]+~[\w~-]+)\s*<', r'>\s*([\w~-]+~[a-f0-9:]+~[\w~-]+)\s*<',
r'>\s*([\w~-]+~[a-f0-9:]+~[\w~-]+)'), webpage, r'>\s*([\w~-]+~[a-f0-9:]+~[\w~-]+)'), webpage,
'stream URL')) 'stream URL')
video_url = 'https://%s/%s/%s?mime=true' % (host, self._REDIR_WORD, decoded_id)
video_url = 'https://%s/stream/%s?mime=true' % (host, decoded_id)
title = self._og_search_title(webpage, default=None) or self._search_regex( title = self._og_search_title(webpage, default=None) or self._search_regex(
r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage, r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage,
@ -2012,3 +2025,38 @@ class OpenloadIE(InfoExtractor):
'subtitles': subtitles, 'subtitles': subtitles,
'http_headers': headers, 'http_headers': headers,
} }
class VerystreamIE(OpenloadIE):
IE_NAME = 'verystream'
_DOMAINS = r'(?:verystream\.com)'
_VALID_URL = r'''(?x)
https?://
(?P<host>
(?:www\.)?
%s
)/
(?:stream|e)/
(?P<id>[a-zA-Z0-9-_]+)
''' % _DOMAINS
_EMBED_WORD = 'e'
_STREAM_WORD = 'stream'
_REDIR_WORD = 'gettoken'
_URL_IDS = ('videolink', )
_TESTS = [{
'url': 'https://verystream.com/stream/c1GWQ9ngBBx/',
'md5': 'd3e8c5628ccb9970b65fd65269886795',
'info_dict': {
'id': 'c1GWQ9ngBBx',
'ext': 'mp4',
'title': 'Big Buck Bunny.mp4',
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'https://verystream.com/e/c1GWQ9ngBBx/',
'only_matching': True,
}]
def _extract_decrypted_page(self, page_url, webpage, video_id, headers):
return webpage # for Verystream, the webpage is already decrypted

View File

@ -50,8 +50,8 @@ class PodomaticIE(InfoExtractor):
video_id = mobj.group('id') video_id = mobj.group('id')
channel = mobj.group('channel') or mobj.group('channel_2') channel = mobj.group('channel') or mobj.group('channel_2')
json_url = (('%s://%s.podomatic.com/entry/embed_params/%s' + json_url = (('%s://%s.podomatic.com/entry/embed_params/%s'
'?permalink=true&rtmp=0') % + '?permalink=true&rtmp=0') %
(mobj.group('proto'), channel, video_id)) (mobj.group('proto'), channel, video_id))
data_json = self._download_webpage( data_json = self._download_webpage(
json_url, video_id, 'Downloading video info') json_url, video_id, 'Downloading video info')

View File

@ -1,101 +0,0 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_str,
)
from ..utils import (
int_or_none,
try_get,
unified_timestamp,
)
class PornFlipIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pornflip\.com/(?:v|embed)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.pornflip.com/v/wz7DfNhMmep',
'md5': '98c46639849145ae1fd77af532a9278c',
'info_dict': {
'id': 'wz7DfNhMmep',
'ext': 'mp4',
'title': '2 Amateurs swallow make his dream cumshots true',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 112,
'timestamp': 1481655502,
'upload_date': '20161213',
'uploader_id': '106786',
'uploader': 'figifoto',
'view_count': int,
'age_limit': 18,
}
}, {
'url': 'https://www.pornflip.com/embed/wz7DfNhMmep',
'only_matching': True,
}, {
'url': 'https://www.pornflip.com/v/EkRD6-vS2-s',
'only_matching': True,
}, {
'url': 'https://www.pornflip.com/embed/EkRD6-vS2-s',
'only_matching': True,
}, {
'url': 'https://www.pornflip.com/v/NG9q6Pb_iK8',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://www.pornflip.com/v/%s' % video_id, video_id)
flashvars = compat_parse_qs(self._search_regex(
r'<embed[^>]+flashvars=(["\'])(?P<flashvars>(?:(?!\1).)+)\1',
webpage, 'flashvars', group='flashvars'))
title = flashvars['video_vars[title]'][0]
def flashvar(kind):
return try_get(
flashvars, lambda x: x['video_vars[%s]' % kind][0], compat_str)
formats = []
for key, value in flashvars.items():
if not (value and isinstance(value, list)):
continue
format_url = value[0]
if key == 'video_vars[hds_manifest]':
formats.extend(self._extract_mpd_formats(
format_url, video_id, mpd_id='dash', fatal=False))
continue
height = self._search_regex(
r'video_vars\[video_urls\]\[(\d+)', key, 'height', default=None)
if not height:
continue
formats.append({
'url': format_url,
'format_id': 'http-%s' % height,
'height': int_or_none(height),
})
self._sort_formats(formats)
uploader = self._html_search_regex(
(r'<span[^>]+class="name"[^>]*>\s*<a[^>]+>\s*<strong>(?P<uploader>[^<]+)',
r'<meta[^>]+content=(["\'])[^>]*\buploaded by (?P<uploader>.+?)\1'),
webpage, 'uploader', fatal=False, group='uploader')
return {
'id': video_id,
'formats': formats,
'title': title,
'thumbnail': flashvar('big_thumb'),
'duration': int_or_none(flashvar('duration')),
'timestamp': unified_timestamp(self._html_search_meta(
'uploadDate', webpage, 'timestamp')),
'uploader_id': flashvar('author_id'),
'uploader': uploader,
'view_count': int_or_none(flashvar('views')),
'age_limit': 18,
}

View File

@ -170,7 +170,7 @@ class PornHubIE(PornHubBaseIE):
def dl_webpage(platform): def dl_webpage(platform):
self._set_cookie(host, 'platform', platform) self._set_cookie(host, 'platform', platform)
return self._download_webpage( return self._download_webpage(
'http://www.%s/view_video.php?viewkey=%s' % (host, video_id), 'https://www.%s/view_video.php?viewkey=%s' % (host, video_id),
video_id, 'Downloading %s webpage' % platform) video_id, 'Downloading %s webpage' % platform)
webpage = dl_webpage('pc') webpage = dl_webpage('pc')

View File

@ -16,6 +16,11 @@ from ..utils import (
class ProSiebenSat1BaseIE(InfoExtractor): class ProSiebenSat1BaseIE(InfoExtractor):
_GEO_COUNTRIES = ['DE']
_ACCESS_ID = None
_SUPPORTED_PROTOCOLS = 'dash:clear,hls:clear,progressive:clear'
_V4_BASE_URL = 'https://vas-v4.p7s1video.net/4.0/get'
def _extract_video_info(self, url, clip_id): def _extract_video_info(self, url, clip_id):
client_location = url client_location = url
@ -31,7 +36,43 @@ class ProSiebenSat1BaseIE(InfoExtractor):
if video.get('is_protected') is True: if video.get('is_protected') is True:
raise ExtractorError('This video is DRM protected.', expected=True) raise ExtractorError('This video is DRM protected.', expected=True)
duration = float_or_none(video.get('duration')) formats = []
if self._ACCESS_ID:
raw_ct = self._ENCRYPTION_KEY + clip_id + self._IV + self._ACCESS_ID
server_token = (self._download_json(
self._V4_BASE_URL + 'protocols', clip_id,
'Downloading protocols JSON',
headers=self.geo_verification_headers(), query={
'access_id': self._ACCESS_ID,
'client_token': sha1((raw_ct).encode()).hexdigest(),
'video_id': clip_id,
}, fatal=False) or {}).get('server_token')
if server_token:
urls = (self._download_json(
self._V4_BASE_URL + 'urls', clip_id, 'Downloading urls JSON', query={
'access_id': self._ACCESS_ID,
'client_token': sha1((raw_ct + server_token + self._SUPPORTED_PROTOCOLS).encode()).hexdigest(),
'protocols': self._SUPPORTED_PROTOCOLS,
'server_token': server_token,
'video_id': clip_id,
}, fatal=False) or {}).get('urls') or {}
for protocol, variant in urls.items():
source_url = variant.get('clear', {}).get('url')
if not source_url:
continue
if protocol == 'dash':
formats.extend(self._extract_mpd_formats(
source_url, clip_id, mpd_id=protocol, fatal=False))
elif protocol == 'hls':
formats.extend(self._extract_m3u8_formats(
source_url, clip_id, 'mp4', 'm3u8_native',
m3u8_id=protocol, fatal=False))
else:
formats.append({
'url': source_url,
'format_id': protocol,
})
if not formats:
source_ids = [compat_str(source['id']) for source in video['sources']] source_ids = [compat_str(source['id']) for source in video['sources']]
client_id = self._SALT[:2] + sha1(''.join([clip_id, self._SALT, self._TOKEN, client_location, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest() client_id = self._SALT[:2] + sha1(''.join([clip_id, self._SALT, self._TOKEN, client_location, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
@ -52,7 +93,6 @@ class ProSiebenSat1BaseIE(InfoExtractor):
return None return None
return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate
formats = []
for source_id in source_ids: for source_id in source_ids:
client_id = self._SALT[:2] + sha1(''.join([self._SALT, clip_id, self._TOKEN, server_id, client_location, source_id, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest() client_id = self._SALT[:2] + sha1(''.join([self._SALT, clip_id, self._TOKEN, server_id, client_location, source_id, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
urls = self._download_json( urls = self._download_json(
@ -117,7 +157,7 @@ class ProSiebenSat1BaseIE(InfoExtractor):
self._sort_formats(formats) self._sort_formats(formats)
return { return {
'duration': duration, 'duration': float_or_none(video.get('duration')),
'formats': formats, 'formats': formats,
} }
@ -344,6 +384,11 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
_TOKEN = 'prosieben' _TOKEN = 'prosieben'
_SALT = '01!8d8F_)r9]4s[qeuXfP%' _SALT = '01!8d8F_)r9]4s[qeuXfP%'
_CLIENT_NAME = 'kolibri-2.0.19-splec4' _CLIENT_NAME = 'kolibri-2.0.19-splec4'
_ACCESS_ID = 'x_prosiebenmaxx-de'
_ENCRYPTION_KEY = 'Eeyeey9oquahthainoofashoyoikosag'
_IV = 'Aeluchoc6aevechuipiexeeboowedaok'
_CLIPID_REGEXES = [ _CLIPID_REGEXES = [
r'"clip_id"\s*:\s+"(\d+)"', r'"clip_id"\s*:\s+"(\d+)"',
r'clipid: "(\d+)"', r'clipid: "(\d+)"',

View File

@ -104,3 +104,25 @@ class RedBullTVIE(InfoExtractor):
'formats': formats, 'formats': formats,
'subtitles': subtitles, 'subtitles': subtitles,
} }
class RedBullTVRrnContentIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?redbull(?:\.tv|\.com(?:/[^/]+)?(?:/tv)?)/(?:video|live)/rrn:content:[^:]+:(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
_TESTS = [{
'url': 'https://www.redbull.com/int-en/tv/video/rrn:content:live-videos:e3e6feb4-e95f-50b7-962a-c70f8fd13c73/mens-dh-finals-fort-william',
'only_matching': True,
}, {
'url': 'https://www.redbull.com/int-en/tv/video/rrn:content:videos:a36a0f36-ff1b-5db8-a69d-ee11a14bf48b/tn-ts-style?playlist=rrn:content:event-profiles:83f05926-5de8-5389-b5e4-9bb312d715e8:extras',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_url = self._og_search_url(webpage)
return self.url_result(
video_url, ie=RedBullTVIE.ie_key(),
video_id=RedBullTVIE._match_id(video_url))

View File

@ -1,9 +1,11 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import (
determine_ext,
js_to_json,
)
class RTPIE(InfoExtractor): class RTPIE(InfoExtractor):
@ -18,10 +20,6 @@ class RTPIE(InfoExtractor):
'description': 'As paixões musicais de António Cartaxo e António Macedo', 'description': 'As paixões musicais de António Cartaxo e António Macedo',
'thumbnail': r're:^https?://.*\.jpg', 'thumbnail': r're:^https?://.*\.jpg',
}, },
'params': {
# rtmp download
'skip_download': True,
},
}, { }, {
'url': 'http://www.rtp.pt/play/p831/a-quimica-das-coisas', 'url': 'http://www.rtp.pt/play/p831/a-quimica-das-coisas',
'only_matching': True, 'only_matching': True,
@ -33,57 +31,36 @@ class RTPIE(InfoExtractor):
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
title = self._html_search_meta( title = self._html_search_meta(
'twitter:title', webpage, display_name='title', fatal=True) 'twitter:title', webpage, display_name='title', fatal=True)
description = self._html_search_meta('description', webpage)
thumbnail = self._og_search_thumbnail(webpage)
player_config = self._search_regex( config = self._parse_json(self._search_regex(
r'(?s)RTPPLAY\.player\.newPlayer\(\s*(\{.*?\})\s*\)', webpage, 'player config') r'(?s)RTPPlayer\(({.+?})\);', webpage,
config = self._parse_json(player_config, video_id) 'player config'), video_id, js_to_json)
file_url = config['file']
path, ext = config.get('file').rsplit('.', 1) ext = determine_ext(file_url)
formats = [{ if ext == 'm3u8':
'format_id': 'rtmp', file_key = config.get('fileKey')
'ext': ext, formats = self._extract_m3u8_formats(
'vcodec': config.get('type') == 'audio' and 'none' or None, file_url, video_id, 'mp4', 'm3u8_native',
'preference': -2, m3u8_id='hls', fatal=file_key)
'url': 'rtmp://{streamer:s}/{application:s}'.format(**config), if file_key:
'app': config.get('application'),
'play_path': '{ext:s}:{path:s}'.format(ext=ext, path=path),
'page_url': url,
'rtmp_live': config.get('live', False),
'player_url': 'http://programas.rtp.pt/play/player.swf?v3',
'rtmp_real_time': True,
}]
# Construct regular HTTP download URLs
replacements = {
'audio': {
'format_id': 'mp3',
'pattern': r'^nas2\.share/wavrss/',
'repl': 'http://rsspod.rtp.pt/podcasts/',
'vcodec': 'none',
},
'video': {
'format_id': 'mp4_h264',
'pattern': r'^nas2\.share/h264/',
'repl': 'http://rsspod.rtp.pt/videocasts/',
'vcodec': 'h264',
},
}
r = replacements[config['type']]
if re.match(r['pattern'], config['file']) is not None:
formats.append({ formats.append({
'format_id': r['format_id'], 'url': 'https://cdn-ondemand.rtp.pt' + file_key,
'url': re.sub(r['pattern'], r['repl'], config['file']), 'preference': 1,
'vcodec': r['vcodec'],
}) })
self._sort_formats(formats) self._sort_formats(formats)
else:
formats = [{
'url': file_url,
'ext': ext,
}]
if config.get('mediaType') == 'audio':
for f in formats:
f['vcodec'] = 'none'
return { return {
'id': video_id, 'id': video_id,
'title': title, 'title': title,
'formats': formats, 'formats': formats,
'description': description, 'description': self._html_search_meta(['description', 'twitter:description'], webpage),
'thumbnail': thumbnail, 'thumbnail': config.get('poster') or self._og_search_thumbnail(webpage),
} }

View File

@ -91,8 +91,8 @@ class RuutuIE(InfoExtractor):
extract_formats(child) extract_formats(child)
elif child.tag.endswith('File'): elif child.tag.endswith('File'):
video_url = child.text video_url = child.text
if (not video_url or video_url in processed_urls or if (not video_url or video_url in processed_urls
any(p in video_url for p in ('NOT_USED', 'NOT-USED'))): or any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
continue continue
processed_urls.append(video_url) processed_urls.append(video_url)
ext = determine_ext(video_url) ext = determine_ext(video_url)

View File

@ -1,15 +1,18 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import json
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_str,
compat_urlparse,
)
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
sanitized_Request,
std_headers,
urlencode_postdata,
update_url_query, update_url_query,
) )
@ -31,44 +34,52 @@ class SafariBaseIE(InfoExtractor):
if username is None: if username is None:
return return
headers = std_headers.copy() _, urlh = self._download_webpage_handle(
if 'Referer' not in headers: 'https://learning.oreilly.com/accounts/login-check/', None,
headers['Referer'] = self._LOGIN_URL 'Downloading login page')
login_page = self._download_webpage( def is_logged(urlh):
self._LOGIN_URL, None, 'Downloading login form', headers=headers) return 'learning.oreilly.com/home/' in compat_str(urlh.geturl())
def is_logged(webpage): if is_logged(urlh):
return any(re.search(p, webpage) for p in (
r'href=["\']/accounts/logout/', r'>Sign Out<'))
if is_logged(login_page):
self.LOGGED_IN = True self.LOGGED_IN = True
return return
csrf = self._html_search_regex( redirect_url = compat_str(urlh.geturl())
r"name='csrfmiddlewaretoken'\s+value='([^']+)'", parsed_url = compat_urlparse.urlparse(redirect_url)
login_page, 'csrf token') qs = compat_parse_qs(parsed_url.query)
next_uri = compat_urlparse.urljoin(
'https://api.oreilly.com', qs['next'][0])
login_form = { auth, urlh = self._download_json_handle(
'csrfmiddlewaretoken': csrf, 'https://www.oreilly.com/member/auth/login/', None, 'Logging in',
data=json.dumps({
'email': username, 'email': username,
'password1': password, 'password': password,
'login': 'Sign In', 'redirect_uri': next_uri,
'next': '', }).encode(), headers={
} 'Content-Type': 'application/json',
'Referer': redirect_url,
}, expected_status=400)
request = sanitized_Request( credentials = auth.get('credentials')
self._LOGIN_URL, urlencode_postdata(login_form), headers=headers) if (not auth.get('logged_in') and not auth.get('redirect_uri')
login_page = self._download_webpage( and credentials):
request, None, 'Logging in')
if not is_logged(login_page):
raise ExtractorError( raise ExtractorError(
'Login failed; make sure your credentials are correct and try again.', 'Unable to login: %s' % credentials, expected=True)
expected=True)
# oreilly serves two same groot_sessionid cookies in Set-Cookie header
# and expects first one to be actually set
self._apply_first_set_cookie_header(urlh, 'groot_sessionid')
_, urlh = self._download_webpage_handle(
auth.get('redirect_uri') or next_uri, None, 'Completing login',)
if is_logged(urlh):
self.LOGGED_IN = True self.LOGGED_IN = True
return
raise ExtractorError('Unable to log in')
class SafariIE(SafariBaseIE): class SafariIE(SafariBaseIE):
@ -76,7 +87,7 @@ class SafariIE(SafariBaseIE):
IE_DESC = 'safaribooksonline.com online video' IE_DESC = 'safaribooksonline.com online video'
_VALID_URL = r'''(?x) _VALID_URL = r'''(?x)
https?:// https?://
(?:www\.)?(?:safaribooksonline|learning\.oreilly)\.com/ (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/
(?: (?:
library/view/[^/]+/(?P<course_id>[^/]+)/(?P<part>[^/?\#&]+)\.html| library/view/[^/]+/(?P<course_id>[^/]+)/(?P<part>[^/?\#&]+)\.html|
videos/[^/]+/[^/]+/(?P<reference_id>[^-]+-[^/?\#&]+) videos/[^/]+/[^/]+/(?P<reference_id>[^-]+-[^/?\#&]+)
@ -107,6 +118,9 @@ class SafariIE(SafariBaseIE):
}, { }, {
'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838/9780133392838-00_SeriesIntro', 'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838/9780133392838-00_SeriesIntro',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/00_SeriesIntro.html',
'only_matching': True,
}] }]
_PARTNER_ID = '1926081' _PARTNER_ID = '1926081'
@ -163,7 +177,7 @@ class SafariIE(SafariBaseIE):
class SafariApiIE(SafariBaseIE): class SafariApiIE(SafariBaseIE):
IE_NAME = 'safari:api' IE_NAME = 'safari:api'
_VALID_URL = r'https?://(?:www\.)?(?:safaribooksonline|learning\.oreilly)\.com/api/v1/book/(?P<course_id>[^/]+)/chapter(?:-content)?/(?P<part>[^/?#&]+)\.html' _VALID_URL = r'https?://(?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/api/v1/book/(?P<course_id>[^/]+)/chapter(?:-content)?/(?P<part>[^/?#&]+)\.html'
_TESTS = [{ _TESTS = [{
'url': 'https://www.safaribooksonline.com/api/v1/book/9780133392838/chapter/part00.html', 'url': 'https://www.safaribooksonline.com/api/v1/book/9780133392838/chapter/part00.html',
@ -188,7 +202,7 @@ class SafariCourseIE(SafariBaseIE):
_VALID_URL = r'''(?x) _VALID_URL = r'''(?x)
https?:// https?://
(?: (?:
(?:www\.)?(?:safaribooksonline|learning\.oreilly)\.com/ (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/
(?: (?:
library/view/[^/]+| library/view/[^/]+|
api/v1/book| api/v1/book|
@ -219,6 +233,9 @@ class SafariCourseIE(SafariBaseIE):
}, { }, {
'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838', 'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/',
'only_matching': True,
}] }]
@classmethod @classmethod

View File

@ -55,8 +55,8 @@ class SBSIE(InfoExtractor):
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True) raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
urls = player_params['releaseUrls'] urls = player_params['releaseUrls']
theplatform_url = (urls.get('progressive') or urls.get('html') or theplatform_url = (urls.get('progressive') or urls.get('html')
urls.get('standard') or player_params['relatedItemsURL']) or urls.get('standard') or player_params['relatedItemsURL'])
return { return {
'_type': 'url_transparent', '_type': 'url_transparent',

View File

@ -3,8 +3,11 @@ from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_b64decode from ..compat import compat_b64decode
from ..utils import ( from ..utils import (
determine_ext,
ExtractorError, ExtractorError,
int_or_none, int_or_none,
KNOWN_EXTENSIONS,
parse_filesize,
url_or_none, url_or_none,
urlencode_postdata, urlencode_postdata,
) )
@ -22,10 +25,8 @@ class SharedBaseIE(InfoExtractor):
video_url = self._extract_video_url(webpage, video_id, url) video_url = self._extract_video_url(webpage, video_id, url)
title = compat_b64decode(self._html_search_meta( title = self._extract_title(webpage)
'full:title', webpage, 'title')).decode('utf-8') filesize = int_or_none(self._extract_filesize(webpage))
filesize = int_or_none(self._html_search_meta(
'full:size', webpage, 'file size', fatal=False))
return { return {
'id': video_id, 'id': video_id,
@ -35,6 +36,14 @@ class SharedBaseIE(InfoExtractor):
'title': title, 'title': title,
} }
def _extract_title(self, webpage):
return compat_b64decode(self._html_search_meta(
'full:title', webpage, 'title')).decode('utf-8')
def _extract_filesize(self, webpage):
return self._html_search_meta(
'full:size', webpage, 'file size', fatal=False)
class SharedIE(SharedBaseIE): class SharedIE(SharedBaseIE):
IE_DESC = 'shared.sx' IE_DESC = 'shared.sx'
@ -82,11 +91,27 @@ class VivoIE(SharedBaseIE):
'id': 'd7ddda0e78', 'id': 'd7ddda0e78',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Chicken', 'title': 'Chicken',
'filesize': 528031, 'filesize': 515659,
}, },
} }
def _extract_video_url(self, webpage, video_id, *args): def _extract_title(self, webpage):
title = self._html_search_regex(
r'data-name\s*=\s*(["\'])(?P<title>(?:(?!\1).)+)\1', webpage,
'title', default=None, group='title')
if title:
ext = determine_ext(title)
if ext.lower() in KNOWN_EXTENSIONS:
title = title.rpartition('.' + ext)[0]
return title
return self._og_search_title(webpage)
def _extract_filesize(self, webpage):
return parse_filesize(self._search_regex(
r'data-type=["\']video["\'][^>]*>Watch.*?<strong>\s*\((.+?)\)',
webpage, 'filesize', fatal=False))
def _extract_video_url(self, webpage, video_id, url):
def decode_url(encoded_url): def decode_url(encoded_url):
return compat_b64decode(encoded_url).decode('utf-8') return compat_b64decode(encoded_url).decode('utf-8')

View File

@ -106,7 +106,16 @@ class SRGSSRIE(InfoExtractor):
class SRGSSRPlayIE(InfoExtractor): class SRGSSRPlayIE(InfoExtractor):
IE_DESC = 'srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites' IE_DESC = 'srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites'
_VALID_URL = r'https?://(?:(?:www|play)\.)?(?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/[^/]+/(?P<type>video|audio)/[^?]+\?id=(?P<id>[0-9a-f\-]{36}|\d+)' _VALID_URL = r'''(?x)
https?://
(?:(?:www|play)\.)?
(?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/
(?:
[^/]+/(?P<type>video|audio)/[^?]+|
popup(?P<type_2>video|audio)player
)
\?id=(?P<id>[0-9a-f\-]{36}|\d+)
'''
_TESTS = [{ _TESTS = [{
'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
@ -163,9 +172,15 @@ class SRGSSRPlayIE(InfoExtractor):
# m3u8 download # m3u8 download
'skip_download': True, 'skip_download': True,
} }
}, {
'url': 'https://www.srf.ch/play/tv/popupvideoplayer?id=c4dba0ca-e75b-43b2-a34f-f708a4932e01',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):
bu, media_type, media_id = re.match(self._VALID_URL, url).groups() mobj = re.match(self._VALID_URL, url)
bu = mobj.group('bu')
media_type = mobj.group('type') or mobj.group('type_2')
media_id = mobj.group('id')
# other info can be extracted from url + '&layout=json' # other info can be extracted from url + '&layout=json'
return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR') return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR')

View File

@ -45,7 +45,7 @@ class StreamcloudIE(InfoExtractor):
value="([^"]*)" value="([^"]*)"
''', orig_webpage) ''', orig_webpage)
self._sleep(12, video_id) self._sleep(6, video_id)
webpage = self._download_webpage( webpage = self._download_webpage(
url, video_id, data=urlencode_postdata(fields), headers={ url, video_id, data=urlencode_postdata(fields), headers={

View File

@ -185,7 +185,7 @@ class SVTPlayIE(SVTPlayBaseIE):
def _extract_by_video_id(self, video_id, webpage=None): def _extract_by_video_id(self, video_id, webpage=None):
data = self._download_json( data = self._download_json(
'https://api.svt.se/video/%s' % video_id, 'https://api.svt.se/videoplayer-api/video/%s' % video_id,
video_id, headers=self.geo_verification_headers()) video_id, headers=self.geo_verification_headers())
info_dict = self._extract_video(data, video_id) info_dict = self._extract_video(data, video_id)
if not info_dict.get('title'): if not info_dict.get('title'):

View File

@ -5,8 +5,12 @@ import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str from ..compat import (
compat_str,
compat_urlparse
)
from ..utils import ( from ..utils import (
extract_attributes,
float_or_none, float_or_none,
int_or_none, int_or_none,
try_get, try_get,
@ -20,7 +24,7 @@ class TEDIE(InfoExtractor):
(?P<proto>https?://) (?P<proto>https?://)
(?P<type>www|embed(?:-ssl)?)(?P<urlmain>\.ted\.com/ (?P<type>www|embed(?:-ssl)?)(?P<urlmain>\.ted\.com/
( (
(?P<type_playlist>playlists(?:/\d+)?) # We have a playlist (?P<type_playlist>playlists(?:/(?P<playlist_id>\d+))?) # We have a playlist
| |
((?P<type_talk>talks)) # We have a simple talk ((?P<type_talk>talks)) # We have a simple talk
| |
@ -84,6 +88,7 @@ class TEDIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '10', 'id': '10',
'title': 'Who are the hackers?', 'title': 'Who are the hackers?',
'description': 'md5:49a0dbe8fb76d81a0e64b4a80af7f15a'
}, },
'playlist_mincount': 6, 'playlist_mincount': 6,
}, { }, {
@ -150,22 +155,22 @@ class TEDIE(InfoExtractor):
webpage = self._download_webpage(url, name, webpage = self._download_webpage(url, name,
'Downloading playlist webpage') 'Downloading playlist webpage')
info = self._extract_info(webpage)
playlist_info = try_get( playlist_entries = []
info, lambda x: x['__INITIAL_DATA__']['playlist'], for entry in re.findall(r'(?s)<[^>]+data-ga-context=["\']playlist["\'][^>]*>', webpage):
dict) or info['playlist'] attrs = extract_attributes(entry)
entry_url = compat_urlparse.urljoin(url, attrs['href'])
playlist_entries.append(self.url_result(entry_url, self.ie_key()))
final_url = self._og_search_url(webpage, fatal=False)
playlist_id = (
re.match(self._VALID_URL, final_url).group('playlist_id')
if final_url else None)
playlist_entries = [
self.url_result('http://www.ted.com/talks/' + talk['slug'], self.ie_key())
for talk in try_get(
info, lambda x: x['__INITIAL_DATA__']['talks'],
dict) or info['talks']
]
return self.playlist_result( return self.playlist_result(
playlist_entries, playlist_entries, playlist_id=playlist_id,
playlist_id=compat_str(playlist_info['id']), playlist_title=self._og_search_title(webpage, fatal=False),
playlist_title=playlist_info['title']) playlist_description=self._og_search_description(webpage))
def _talk_info(self, url, video_name): def _talk_info(self, url, video_name):
webpage = self._download_webpage(url, video_name) webpage = self._download_webpage(url, video_name)

View File

@ -7,7 +7,7 @@ from ..compat import compat_urlparse
class Tele5IE(InfoExtractor): class Tele5IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tele5\.de/(?:mediathek|tv)/(?P<id>[^?#&]+)' _VALID_URL = r'https?://(?:www\.)?tele5\.de/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{ _TESTS = [{
'url': 'https://www.tele5.de/mediathek/filme-online/videos?vid=1549416', 'url': 'https://www.tele5.de/mediathek/filme-online/videos?vid=1549416',
'info_dict': { 'info_dict': {
@ -21,10 +21,22 @@ class Tele5IE(InfoExtractor):
'skip_download': True, 'skip_download': True,
}, },
}, { }, {
'url': 'https://www.tele5.de/tv/kalkofes-mattscheibe/video-clips/politik-und-gesellschaft?ve_id=1551191', 'url': 'https://www.tele5.de/kalkofes-mattscheibe/video-clips/politik-und-gesellschaft?ve_id=1551191',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'https://www.tele5.de/tv/dark-matter/videos', 'url': 'https://www.tele5.de/video-clip/?ve_id=1609440',
'only_matching': True,
}, {
'url': 'https://www.tele5.de/filme/schlefaz-dragon-crusaders/',
'only_matching': True,
}, {
'url': 'https://www.tele5.de/filme/making-of/avengers-endgame/',
'only_matching': True,
}, {
'url': 'https://www.tele5.de/star-trek/raumschiff-voyager/ganze-folge/das-vinculum/',
'only_matching': True,
}, {
'url': 'https://www.tele5.de/anders-ist-sevda/',
'only_matching': True, 'only_matching': True,
}] }]
@ -36,8 +48,9 @@ class Tele5IE(InfoExtractor):
display_id = self._match_id(url) display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id) webpage = self._download_webpage(url, display_id)
video_id = self._html_search_regex( video_id = self._html_search_regex(
r'id\s*=\s*["\']video-player["\'][^>]+data-id\s*=\s*["\'](\d+)', (r'id\s*=\s*["\']video-player["\'][^>]+data-id\s*=\s*["\'](\d+)',
webpage, 'video id') r'\s+id\s*=\s*["\']player_(\d{6,})',
r'\bdata-id\s*=\s*["\'](\d{6,})'), webpage, 'video id')
return self.url_result( return self.url_result(
'https://api.nexx.cloud/v3/759/videos/byid/%s' % video_id, 'https://api.nexx.cloud/v3/759/videos/byid/%s' % video_id,

View File

@ -47,15 +47,23 @@ class TVNowBaseIE(InfoExtractor):
r'\.ism/(?:[^.]*\.(?:m3u8|mpd)|[Mm]anifest)', r'\.ism/(?:[^.]*\.(?:m3u8|mpd)|[Mm]anifest)',
'.ism/' + suffix, manifest_url)) '.ism/' + suffix, manifest_url))
def make_urls(proto, suffix):
urls = [url_repl(proto, suffix)]
hd_url = urls[0].replace('/manifest/', '/ngvod/')
if hd_url != urls[0]:
urls.append(hd_url)
return urls
for man_url in make_urls('dash', '.mpd'):
formats = self._extract_mpd_formats( formats = self._extract_mpd_formats(
url_repl('dash', '.mpd'), video_id, man_url, video_id, mpd_id='dash', fatal=False)
mpd_id='dash', fatal=False) for man_url in make_urls('hss', 'Manifest'):
formats.extend(self._extract_ism_formats( formats.extend(self._extract_ism_formats(
url_repl('hss', 'Manifest'), man_url, video_id, ism_id='mss', fatal=False))
video_id, ism_id='mss', fatal=False)) for man_url in make_urls('hls', '.m3u8'):
formats.extend(self._extract_m3u8_formats( formats.extend(self._extract_m3u8_formats(
url_repl('hls', '.m3u8'), video_id, 'mp4', man_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls',
'm3u8_native', m3u8_id='hls', fatal=False)) fatal=False))
if formats: if formats:
break break
else: else:
@ -207,7 +215,7 @@ class TVNowNewBaseIE(InfoExtractor):
return result return result
""" r"""
TODO: new apigw.tvnow.de based version of TVNowIE. Replace old TVNowIE with it TODO: new apigw.tvnow.de based version of TVNowIE. Replace old TVNowIE with it
when api.tvnow.de is shut down. This version can't bypass premium checks though. when api.tvnow.de is shut down. This version can't bypass premium checks though.
class TVNowIE(TVNowNewBaseIE): class TVNowIE(TVNowNewBaseIE):

View File

@ -14,7 +14,18 @@ from ..utils import (
class TwentyFourVideoIE(InfoExtractor): class TwentyFourVideoIE(InfoExtractor):
IE_NAME = '24video' IE_NAME = '24video'
_VALID_URL = r'https?://(?P<host>(?:www\.)?24video\.(?:net|me|xxx|sexy?|tube|adult))/(?:video/(?:view|xml)/|player/new24_play\.swf\?id=)(?P<id>\d+)' _VALID_URL = r'''(?x)
https?://
(?P<host>
(?:(?:www|porno)\.)?24video\.
(?:net|me|xxx|sexy?|tube|adult|site)
)/
(?:
video/(?:(?:view|xml)/)?|
player/new24_play\.swf\?id=
)
(?P<id>\d+)
'''
_TESTS = [{ _TESTS = [{
'url': 'http://www.24video.net/video/view/1044982', 'url': 'http://www.24video.net/video/view/1044982',
@ -42,6 +53,12 @@ class TwentyFourVideoIE(InfoExtractor):
}, { }, {
'url': 'http://www.24video.tube/video/view/2363750', 'url': 'http://www.24video.tube/video/view/2363750',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://www.24video.site/video/view/2640421',
'only_matching': True,
}, {
'url': 'https://porno.24video.net/video/2640421-vsya-takaya-gibkaya-i-v-masle',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View File

@ -275,8 +275,8 @@ class VevoIE(VevoBaseIE):
genres = video_info.get('genres') genres = video_info.get('genres')
genre = ( genre = (
genres[0] if genres and isinstance(genres, list) and genres[0] if genres and isinstance(genres, list)
isinstance(genres[0], compat_str) else None) and isinstance(genres[0], compat_str) else None)
is_explicit = video_info.get('isExplicit') is_explicit = video_info.get('isExplicit')
if is_explicit is True: if is_explicit is True:

View File

@ -21,7 +21,7 @@ from ..utils import (
class VikiBaseIE(InfoExtractor): class VikiBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?viki\.(?:com|net|mx|jp|fr)/' _VALID_URL_BASE = r'https?://(?:www\.)?viki\.(?:com|net|mx|jp|fr)/'
_API_QUERY_TEMPLATE = '/v4/%sapp=%s&t=%s&site=www.viki.com' _API_QUERY_TEMPLATE = '/v4/%sapp=%s&t=%s&site=www.viki.com'
_API_URL_TEMPLATE = 'http://api.viki.io%s&sig=%s' _API_URL_TEMPLATE = 'https://api.viki.io%s&sig=%s'
_APP = '100005a' _APP = '100005a'
_APP_VERSION = '2.2.5.1428709186' _APP_VERSION = '2.2.5.1428709186'
@ -377,7 +377,7 @@ class VikiChannelIE(VikiBaseIE):
for video in page['response']: for video in page['response']:
video_id = video['id'] video_id = video['id']
entries.append(self.url_result( entries.append(self.url_result(
'http://www.viki.com/videos/%s' % video_id, 'Viki')) 'https://www.viki.com/videos/%s' % video_id, 'Viki'))
if not page['pagination']['next']: if not page['pagination']['next']:
break break

View File

@ -3,7 +3,6 @@ from __future__ import unicode_literals
import collections import collections
import re import re
import sys
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urlparse from ..compat import compat_urlparse
@ -45,24 +44,9 @@ class VKBaseIE(InfoExtractor):
'pass': password.encode('cp1251'), 'pass': password.encode('cp1251'),
}) })
# https://new.vk.com/ serves two same remixlhk cookies in Set-Cookie header # vk serves two same remixlhk cookies in Set-Cookie header and expects
# and expects the first one to be set rather than second (see # first one to be actually set
# https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201). self._apply_first_set_cookie_header(url_handle, 'remixlhk')
# As of RFC6265 the newer one cookie should be set into cookie store
# what actually happens.
# We will workaround this VK issue by resetting the remixlhk cookie to
# the first one manually.
for header, cookies in url_handle.headers.items():
if header.lower() != 'set-cookie':
continue
if sys.version_info[0] >= 3:
cookies = cookies.encode('iso-8859-1')
cookies = cookies.decode('utf-8')
remixlhk = re.search(r'remixlhk=(.+?);.*?\bdomain=(.+?)(?:[,;]|$)', cookies)
if remixlhk:
value, domain = remixlhk.groups()
self._set_cookie(domain, 'remixlhk', value)
break
login_page = self._download_webpage( login_page = self._download_webpage(
'https://login.vk.com/?act=login', None, 'https://login.vk.com/?act=login', None,
@ -443,8 +427,8 @@ class VKIE(VKBaseIE):
format_url = url_or_none(format_url) format_url = url_or_none(format_url)
if not format_url or not format_url.startswith(('http', '//', 'rtmp')): if not format_url or not format_url.startswith(('http', '//', 'rtmp')):
continue continue
if (format_id.startswith(('url', 'cache')) or if (format_id.startswith(('url', 'cache'))
format_id in ('extra_data', 'live_mp4', 'postlive_mp4')): or format_id in ('extra_data', 'live_mp4', 'postlive_mp4')):
height = int_or_none(self._search_regex( height = int_or_none(self._search_regex(
r'^(?:url|cache)(\d+)', format_id, 'height', default=None)) r'^(?:url|cache)(\d+)', format_id, 'height', default=None))
formats.append({ formats.append({

View File

@ -24,6 +24,7 @@ from ..utils import (
class VLiveIE(InfoExtractor): class VLiveIE(InfoExtractor):
IE_NAME = 'vlive' IE_NAME = 'vlive'
_VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)' _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)'
_NETRC_MACHINE = 'vlive'
_TESTS = [{ _TESTS = [{
'url': 'http://www.vlive.tv/video/1326', 'url': 'http://www.vlive.tv/video/1326',
'md5': 'cc7314812855ce56de70a06a27314983', 'md5': 'cc7314812855ce56de70a06a27314983',
@ -47,12 +48,55 @@ class VLiveIE(InfoExtractor):
'params': { 'params': {
'skip_download': True, 'skip_download': True,
}, },
}, {
'url': 'https://www.vlive.tv/video/129100',
'md5': 'ca2569453b79d66e5b919e5d308bff6b',
'info_dict': {
'id': '129100',
'ext': 'mp4',
'title': '[V LIVE] [BTS+] Run BTS! 2019 - EP.71 :: Behind the scene',
'creator': 'BTS+',
'view_count': int,
'subtitles': 'mincount:10',
},
'skip': 'This video is only available for CH+ subscribers',
}] }]
@classmethod @classmethod
def suitable(cls, url): def suitable(cls, url):
return False if VLivePlaylistIE.suitable(url) else super(VLiveIE, cls).suitable(url) return False if VLivePlaylistIE.suitable(url) else super(VLiveIE, cls).suitable(url)
def _real_initialize(self):
self._login()
def _login(self):
email, password = self._get_login_info()
if None in (email, password):
return
def is_logged_in():
login_info = self._download_json(
'https://www.vlive.tv/auth/loginInfo', None,
note='Downloading login info',
headers={'Referer': 'https://www.vlive.tv/home'})
return try_get(
login_info, lambda x: x['message']['login'], bool) or False
LOGIN_URL = 'https://www.vlive.tv/auth/email/login'
self._request_webpage(
LOGIN_URL, None, note='Downloading login cookies')
self._download_webpage(
LOGIN_URL, None, note='Logging in',
data=urlencode_postdata({'email': email, 'pwd': password}),
headers={
'Referer': LOGIN_URL,
'Content-Type': 'application/x-www-form-urlencoded'
})
if not is_logged_in():
raise ExtractorError('Unable to log in', expected=True)
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
@ -77,10 +121,7 @@ class VLiveIE(InfoExtractor):
if status in ('LIVE_ON_AIR', 'BIG_EVENT_ON_AIR'): if status in ('LIVE_ON_AIR', 'BIG_EVENT_ON_AIR'):
return self._live(video_id, webpage) return self._live(video_id, webpage)
elif status in ('VOD_ON_AIR', 'BIG_EVENT_INTRO'): elif status in ('VOD_ON_AIR', 'BIG_EVENT_INTRO'):
if long_video_id and key:
return self._replay(video_id, webpage, long_video_id, key) return self._replay(video_id, webpage, long_video_id, key)
else:
status = 'COMING_SOON'
if status == 'LIVE_END': if status == 'LIVE_END':
raise ExtractorError('Uploading for replay. Please wait...', raise ExtractorError('Uploading for replay. Please wait...',
@ -91,13 +132,15 @@ class VLiveIE(InfoExtractor):
raise ExtractorError('We are sorry, ' raise ExtractorError('We are sorry, '
'but the live broadcast has been canceled.', 'but the live broadcast has been canceled.',
expected=True) expected=True)
elif status == 'ONLY_APP':
raise ExtractorError('Unsupported video type', expected=True)
else: else:
raise ExtractorError('Unknown status %s' % status) raise ExtractorError('Unknown status %s' % status)
def _get_common_fields(self, webpage): def _get_common_fields(self, webpage):
title = self._og_search_title(webpage) title = self._og_search_title(webpage)
creator = self._html_search_regex( creator = self._html_search_regex(
r'<div[^>]+class="info_area"[^>]*>\s*<a\s+[^>]*>([^<]+)', r'<div[^>]+class="info_area"[^>]*>\s*(?:<em[^>]*>.*?</em\s*>\s*)?<a\s+[^>]*>([^<]+)',
webpage, 'creator', fatal=False) webpage, 'creator', fatal=False)
thumbnail = self._og_search_thumbnail(webpage) thumbnail = self._og_search_thumbnail(webpage)
return { return {
@ -107,14 +150,7 @@ class VLiveIE(InfoExtractor):
} }
def _live(self, video_id, webpage): def _live(self, video_id, webpage):
init_page = self._download_webpage( init_page = self._download_init_page(video_id)
'https://www.vlive.tv/video/init/view',
video_id, note='Downloading live webpage',
data=urlencode_postdata({'videoSeq': video_id}),
headers={
'Referer': 'https://www.vlive.tv/video/%s' % video_id,
'Content-Type': 'application/x-www-form-urlencoded'
})
live_params = self._search_regex( live_params = self._search_regex(
r'"liveStreamInfo"\s*:\s*(".*"),', r'"liveStreamInfo"\s*:\s*(".*"),',
@ -140,6 +176,17 @@ class VLiveIE(InfoExtractor):
return info return info
def _replay(self, video_id, webpage, long_video_id, key): def _replay(self, video_id, webpage, long_video_id, key):
if '' in (long_video_id, key):
init_page = self._download_init_page(video_id)
video_info = self._parse_json(self._search_regex(
(r'(?s)oVideoStatus\s*=\s*({.+?})\s*</script',
r'(?s)oVideoStatus\s*=\s*({.+})'), init_page, 'video info'),
video_id)
if video_info.get('status') == 'NEED_CHANNEL_PLUS':
self.raise_login_required(
'This video is only available for CH+ subscribers')
long_video_id, key = video_info['vid'], video_info['inkey']
playinfo = self._download_json( playinfo = self._download_json(
'http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?%s' 'http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?%s'
% compat_urllib_parse_urlencode({ % compat_urllib_parse_urlencode({
@ -180,6 +227,16 @@ class VLiveIE(InfoExtractor):
}) })
return info return info
def _download_init_page(self, video_id):
return self._download_webpage(
'https://www.vlive.tv/video/init/view',
video_id, note='Downloading live webpage',
data=urlencode_postdata({'videoSeq': video_id}),
headers={
'Referer': 'https://www.vlive.tv/video/%s' % video_id,
'Content-Type': 'application/x-www-form-urlencoded'
})
class VLiveChannelIE(InfoExtractor): class VLiveChannelIE(InfoExtractor):
IE_NAME = 'vlive:channel' IE_NAME = 'vlive:channel'
@ -275,26 +332,45 @@ class VLiveChannelIE(InfoExtractor):
class VLivePlaylistIE(InfoExtractor): class VLivePlaylistIE(InfoExtractor):
IE_NAME = 'vlive:playlist' IE_NAME = 'vlive:playlist'
_VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<video_id>[0-9]+)/playlist/(?P<id>[0-9]+)' _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<video_id>[0-9]+)/playlist/(?P<id>[0-9]+)'
_TEST = { _VIDEO_URL_TEMPLATE = 'http://www.vlive.tv/video/%s'
_TESTS = [{
# regular working playlist
'url': 'https://www.vlive.tv/video/117956/playlist/117963',
'info_dict': {
'id': '117963',
'title': '아이돌룸(IDOL ROOM) 41회 - (여자)아이들'
},
'playlist_mincount': 10
}, {
# playlist with no playlistVideoSeqs
'url': 'http://www.vlive.tv/video/22867/playlist/22912', 'url': 'http://www.vlive.tv/video/22867/playlist/22912',
'info_dict': { 'info_dict': {
'id': '22912', 'id': '22867',
'title': 'Valentine Day Message from TWICE' 'ext': 'mp4',
'title': '[V LIVE] Valentine Day Message from MINA',
'creator': 'TWICE',
'view_count': int
}, },
'playlist_mincount': 9 'params': {
'skip_download': True,
} }
}]
def _build_video_result(self, video_id, message):
self.to_screen(message)
return self.url_result(
self._VIDEO_URL_TEMPLATE % video_id,
ie=VLiveIE.ie_key(), video_id=video_id)
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id, playlist_id = mobj.group('video_id', 'id') video_id, playlist_id = mobj.group('video_id', 'id')
VIDEO_URL_TEMPLATE = 'http://www.vlive.tv/video/%s'
if self._downloader.params.get('noplaylist'): if self._downloader.params.get('noplaylist'):
self.to_screen( return self._build_video_result(
'Downloading just video %s because of --no-playlist' % video_id) video_id,
return self.url_result( 'Downloading just video %s because of --no-playlist'
VIDEO_URL_TEMPLATE % video_id, % video_id)
ie=VLiveIE.ie_key(), video_id=video_id)
self.to_screen( self.to_screen(
'Downloading playlist %s - add --no-playlist to just download video' 'Downloading playlist %s - add --no-playlist to just download video'
@ -304,15 +380,21 @@ class VLivePlaylistIE(InfoExtractor):
'http://www.vlive.tv/video/%s/playlist/%s' 'http://www.vlive.tv/video/%s/playlist/%s'
% (video_id, playlist_id), playlist_id) % (video_id, playlist_id), playlist_id)
item_ids = self._parse_json( raw_item_ids = self._search_regex(
self._search_regex(
r'playlistVideoSeqs\s*=\s*(\[[^]]+\])', webpage, r'playlistVideoSeqs\s*=\s*(\[[^]]+\])', webpage,
'playlist video seqs'), 'playlist video seqs', default=None, fatal=False)
playlist_id)
if not raw_item_ids:
return self._build_video_result(
video_id,
'Downloading just video %s because no playlist was found'
% video_id)
item_ids = self._parse_json(raw_item_ids, playlist_id)
entries = [ entries = [
self.url_result( self.url_result(
VIDEO_URL_TEMPLATE % item_id, ie=VLiveIE.ie_key(), self._VIDEO_URL_TEMPLATE % item_id, ie=VLiveIE.ie_key(),
video_id=compat_str(item_id)) video_id=compat_str(item_id))
for item_id in item_ids] for item_id in item_ids]

View File

@ -5,150 +5,83 @@ import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
extract_attributes,
float_or_none, float_or_none,
get_element_by_class,
strip_or_none,
unified_timestamp,
) )
class VRTIE(InfoExtractor): class VRTIE(InfoExtractor):
IE_DESC = 'deredactie.be, sporza.be, cobra.be and cobra.canvas.be' IE_DESC = 'VRT NWS, Flanders News, Flandern Info and Sporza'
_VALID_URL = r'https?://(?:deredactie|sporza|cobra(?:\.canvas)?)\.be/cm/(?:[^/]+/)+(?P<id>[^/]+)/*' _VALID_URL = r'https?://(?:www\.)?(?P<site>vrt\.be/vrtnws|sporza\.be)/[a-z]{2}/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)'
_TESTS = [ _TESTS = [{
# deredactie.be 'url': 'https://www.vrt.be/vrtnws/nl/2019/05/15/beelden-van-binnenkant-notre-dame-een-maand-na-de-brand/',
{ 'md5': 'e1663accf5cf13f375f3cd0d10476669',
'url': 'http://deredactie.be/cm/vrtnieuws/videozone/programmas/journaal/EP_141025_JOL',
'md5': '4cebde1eb60a53782d4f3992cbd46ec8',
'info_dict': { 'info_dict': {
'id': '2129880', 'id': 'pbs-pub-7855fc7b-1448-49bc-b073-316cb60caa71$vid-2ca50305-c38a-4762-9890-65cbd098b7bd',
'ext': 'flv',
'title': 'Het journaal L - 25/10/14',
'description': None,
'timestamp': 1414271750.949,
'upload_date': '20141025',
'duration': 929,
},
'skip': 'HTTP Error 404: Not Found',
},
# sporza.be
{
'url': 'http://sporza.be/cm/sporza/videozone/programmas/extratime/EP_141020_Extra_time',
'md5': '11f53088da9bf8e7cfc42456697953ff',
'info_dict': {
'id': '2124639',
'ext': 'flv',
'title': 'Bekijk Extra Time van 20 oktober',
'description': 'md5:83ac5415a4f1816c6a93f8138aef2426',
'timestamp': 1413835980.560,
'upload_date': '20141020',
'duration': 3238,
},
'skip': 'HTTP Error 404: Not Found',
},
# cobra.be
{
'url': 'http://cobra.be/cm/cobra/videozone/rubriek/film-videozone/141022-mv-ellis-cafecorsari',
'md5': '78a2b060a5083c4f055449a72477409d',
'info_dict': {
'id': '2126050',
'ext': 'flv',
'title': 'Bret Easton Ellis in Café Corsari',
'description': 'md5:f699986e823f32fd6036c1855a724ee9',
'timestamp': 1413967500.494,
'upload_date': '20141022',
'duration': 661,
},
'skip': 'HTTP Error 404: Not Found',
},
{
# YouTube video
'url': 'http://deredactie.be/cm/vrtnieuws/videozone/nieuws/cultuurenmedia/1.2622957',
'md5': 'b8b93da1df1cea6c8556255a796b7d61',
'info_dict': {
'id': 'Wji-BZ0oCwg',
'ext': 'mp4', 'ext': 'mp4',
'title': 'ROGUE ONE: A STAR WARS STORY Official Teaser Trailer', 'title': 'Beelden van binnenkant Notre-Dame, één maand na de brand',
'description': 'md5:8e468944dce15567a786a67f74262583', 'description': 'Op maandagavond 15 april ging een deel van het dakgebinte van de Parijse kathedraal in vlammen op.',
'uploader': 'Star Wars', 'timestamp': 1557924660,
'uploader_id': 'starwars', 'upload_date': '20190515',
'upload_date': '20160407', 'duration': 31.2,
}, },
'add_ie': ['Youtube'], }, {
}, 'url': 'https://sporza.be/nl/2019/05/15/de-belgian-cats-zijn-klaar-voor-het-ek/',
{ 'md5': '910bba927566e9ab992278f647eb4b75',
'url': 'http://cobra.canvas.be/cm/cobra/videozone/rubriek/film-videozone/1.2377055',
'info_dict': { 'info_dict': {
'id': '2377055', 'id': 'pbs-pub-f2c86a46-8138-413a-a4b9-a0015a16ce2c$vid-1f112b31-e58e-4379-908d-aca6d80f8818',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Cafe Derby', 'title': 'De Belgian Cats zijn klaar voor het EK mét Ann Wauters',
'description': 'Lenny Van Wesemael debuteert met de langspeelfilm Café Derby. Een waar gebeurd maar ook verzonnen verhaal.', 'timestamp': 1557923760,
'upload_date': '20150626', 'upload_date': '20190515',
'timestamp': 1435305240.769, 'duration': 115.17,
}, },
'params': { }, {
# m3u8 download 'url': 'https://www.vrt.be/vrtnws/en/2019/05/15/belgium_s-eurovision-entry-falls-at-the-first-hurdle/',
'skip_download': True, 'only_matching': True,
}, {
'url': 'https://www.vrt.be/vrtnws/de/2019/05/15/aus-fuer-eliott-im-halbfinale-des-eurosongfestivals/',
'only_matching': True,
}]
_CLIENT_MAP = {
'vrt.be/vrtnws': 'vrtnieuws',
'sporza.be': 'sporza',
} }
}
]
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) site, display_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, display_id)
attrs = extract_attributes(self._search_regex(
r'(<[^>]+class="vrtvideo"[^>]*>)', webpage, 'vrt video'))
webpage = self._download_webpage(url, video_id) asset_id = attrs['data-videoid']
publication_id = attrs.get('data-publicationid')
if publication_id:
asset_id = publication_id + '$' + asset_id
client = attrs.get('data-client') or self._CLIENT_MAP[site]
video_id = self._search_regex( title = strip_or_none(get_element_by_class(
r'data-video-id="([^"]+)_[^"]+"', webpage, 'video id', fatal=False) 'vrt-title', webpage) or self._html_search_meta(
['og:title', 'twitter:title', 'name'], webpage))
src = self._search_regex( description = self._html_search_meta(
r'data-video-src="([^"]+)"', webpage, 'video src', default=None) ['og:description', 'twitter:description', 'description'], webpage)
if description == '':
video_type = self._search_regex( description = None
r'data-video-type="([^"]+)"', webpage, 'video type', default=None) timestamp = unified_timestamp(self._html_search_meta(
'article:published_time', webpage))
if video_type == 'YouTubeVideo':
return self.url_result(src, 'Youtube')
formats = []
mobj = re.search(
r'data-video-iphone-server="(?P<server>[^"]+)"\s+data-video-iphone-path="(?P<path>[^"]+)"',
webpage)
if mobj:
formats.extend(self._extract_m3u8_formats(
'%s/%s' % (mobj.group('server'), mobj.group('path')),
video_id, 'mp4', m3u8_id='hls', fatal=False))
if src:
formats = self._extract_wowza_formats(src, video_id)
if 'data-video-geoblocking="true"' not in webpage:
for f in formats:
if f['url'].startswith('rtsp://'):
http_format = f.copy()
http_format.update({
'url': f['url'].replace('rtsp://', 'http://').replace('vod.', 'download.').replace('/_definst_/', '/').replace('mp4:', ''),
'format_id': f['format_id'].replace('rtsp', 'http'),
'protocol': 'http',
})
formats.append(http_format)
if not formats and 'data-video-geoblocking="true"' in webpage:
self.raise_geo_restricted('This video is only available in Belgium')
self._sort_formats(formats)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage, default=None)
thumbnail = self._og_search_thumbnail(webpage)
timestamp = float_or_none(self._search_regex(
r'data-video-sitestat-pubdate="(\d+)"', webpage, 'timestamp', fatal=False), 1000)
duration = float_or_none(self._search_regex(
r'data-video-duration="(\d+)"', webpage, 'duration', fatal=False), 1000)
return { return {
'id': video_id, '_type': 'url_transparent',
'id': asset_id,
'display_id': display_id,
'title': title, 'title': title,
'description': description, 'description': description,
'thumbnail': thumbnail, 'thumbnail': attrs.get('data-posterimage'),
'timestamp': timestamp, 'timestamp': timestamp,
'duration': duration, 'duration': float_or_none(attrs.get('data-duration'), 1000),
'formats': formats, 'url': 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (client, asset_id),
'ie_key': 'Canvas',
} }

View File

@ -130,7 +130,7 @@ class VRVIE(VRVBaseIE):
self._TOKEN_SECRET = token_credentials['oauth_token_secret'] self._TOKEN_SECRET = token_credentials['oauth_token_secret']
def _extract_vrv_formats(self, url, video_id, stream_format, audio_lang, hardsub_lang): def _extract_vrv_formats(self, url, video_id, stream_format, audio_lang, hardsub_lang):
if not url or stream_format not in ('hls', 'dash'): if not url or stream_format not in ('hls', 'dash', 'adaptive_hls'):
return [] return []
stream_id_list = [] stream_id_list = []
if audio_lang: if audio_lang:
@ -140,7 +140,7 @@ class VRVIE(VRVBaseIE):
format_id = stream_format format_id = stream_format
if stream_id_list: if stream_id_list:
format_id += '-' + '-'.join(stream_id_list) format_id += '-' + '-'.join(stream_id_list)
if stream_format == 'hls': if 'hls' in stream_format:
adaptive_formats = self._extract_m3u8_formats( adaptive_formats = self._extract_m3u8_formats(
url, video_id, 'mp4', m3u8_id=format_id, url, video_id, 'mp4', m3u8_id=format_id,
note='Downloading %s information' % format_id, note='Downloading %s information' % format_id,
@ -198,7 +198,8 @@ class VRVIE(VRVBaseIE):
self._sort_formats(formats) self._sort_formats(formats)
subtitles = {} subtitles = {}
for subtitle in streams_json.get('subtitles', {}).values(): for k in ('captions', 'subtitles'):
for subtitle in streams_json.get(k, {}).values():
subtitle_url = subtitle.get('url') subtitle_url = subtitle.get('url')
if not subtitle_url: if not subtitle_url:
continue continue

View File

@ -12,7 +12,7 @@ from ..utils import (
class VVVVIDIE(InfoExtractor): class VVVVIDIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vvvvid\.it/#!(?:show|anime|film|series)/(?P<show_id>\d+)/[^/]+/(?P<season_id>\d+)/(?P<id>[0-9]+)' _VALID_URL = r'https?://(?:www\.)?vvvvid\.it/(?:#!)?(?:show|anime|film|series)/(?P<show_id>\d+)/[^/]+/(?P<season_id>\d+)/(?P<id>[0-9]+)'
_TESTS = [{ _TESTS = [{
# video_type == 'video/vvvvid' # video_type == 'video/vvvvid'
'url': 'https://www.vvvvid.it/#!show/434/perche-dovrei-guardarlo-di-dario-moccia/437/489048/ping-pong', 'url': 'https://www.vvvvid.it/#!show/434/perche-dovrei-guardarlo-di-dario-moccia/437/489048/ping-pong',

View File

@ -511,6 +511,8 @@ class YahooGyaOPlayerIE(InfoExtractor):
'https://gyao.yahoo.co.jp/dam/v1/videos/' + video_id, 'https://gyao.yahoo.co.jp/dam/v1/videos/' + video_id,
video_id, query={ video_id, query={
'fields': 'longDescription,title,videoId', 'fields': 'longDescription,title,videoId',
}, headers={
'X-User-Agent': 'Unknown Pc GYAO!/2.0.0 Web',
}) })
return { return {
'_type': 'url_transparent', '_type': 'url_transparent',
@ -526,7 +528,7 @@ class YahooGyaOPlayerIE(InfoExtractor):
class YahooGyaOIE(InfoExtractor): class YahooGyaOIE(InfoExtractor):
IE_NAME = 'yahoo:gyao' IE_NAME = 'yahoo:gyao'
_VALID_URL = r'https?://(?:gyao\.yahoo\.co\.jp/p|streaming\.yahoo\.co\.jp/p/y)/(?P<id>\d+/v\d+)' _VALID_URL = r'https?://(?:gyao\.yahoo\.co\.jp/(?:p|title/[^/]+)|streaming\.yahoo\.co\.jp/p/y)/(?P<id>\d+/v\d+|[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
_TESTS = [{ _TESTS = [{
'url': 'https://gyao.yahoo.co.jp/p/00449/v03102/', 'url': 'https://gyao.yahoo.co.jp/p/00449/v03102/',
'info_dict': { 'info_dict': {
@ -536,6 +538,9 @@ class YahooGyaOIE(InfoExtractor):
}, { }, {
'url': 'https://streaming.yahoo.co.jp/p/y/01034/v00133/', 'url': 'https://streaming.yahoo.co.jp/p/y/01034/v00133/',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://gyao.yahoo.co.jp/title/%E3%81%97%E3%82%83%E3%81%B9%E3%81%8F%E3%82%8A007/5b025a49-b2e5-4dc7-945c-09c6634afacf',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View File

@ -70,9 +70,9 @@ class YandexVideoIE(InfoExtractor):
description = content.get('description') description = content.get('description')
thumbnail = content.get('thumbnail') thumbnail = content.get('thumbnail')
timestamp = (int_or_none(content.get('release_date')) or timestamp = (int_or_none(content.get('release_date'))
int_or_none(content.get('release_date_ut')) or or int_or_none(content.get('release_date_ut'))
int_or_none(content.get('start_time'))) or int_or_none(content.get('start_time')))
duration = int_or_none(content.get('duration')) duration = int_or_none(content.get('duration'))
series = content.get('program_title') series = content.get('program_title')
age_limit = int_or_none(content.get('restriction_age')) age_limit = int_or_none(content.get('restriction_age'))

View File

@ -258,8 +258,8 @@ class YoukuShowIE(InfoExtractor):
transform_source=lambda s: js_to_json(strip_jsonp(s))).get('html') transform_source=lambda s: js_to_json(strip_jsonp(s))).get('html')
if playlist_data is None: if playlist_data is None:
return [None, None] return [None, None]
drama_list = (get_element_by_class('p-drama-grid', playlist_data) or drama_list = (get_element_by_class('p-drama-grid', playlist_data)
get_element_by_class('p-drama-half-row', playlist_data)) or get_element_by_class('p-drama-half-row', playlist_data))
if drama_list is None: if drama_list is None:
raise ExtractorError('No episodes found') raise ExtractorError('No episodes found')
video_urls = re.findall(r'<a[^>]+href="([^"]+)"', drama_list) video_urls = re.findall(r'<a[^>]+href="([^"]+)"', drama_list)

View File

@ -1789,9 +1789,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
raise ExtractorError( raise ExtractorError(
'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id) 'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
if video_info.get('license_info'):
raise ExtractorError('This video is DRM protected.', expected=True)
video_details = try_get( video_details = try_get(
player_response, lambda x: x['videoDetails'], dict) or {} player_response, lambda x: x['videoDetails'], dict) or {}
@ -1927,7 +1924,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
formats = [] formats = []
for url_data_str in encoded_url_map.split(','): for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str) url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data: if 'itag' not in url_data or 'url' not in url_data or url_data.get('drm_families'):
continue continue
stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0])) stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
# Unsupported FORMAT_STREAM_TYPE_OTF # Unsupported FORMAT_STREAM_TYPE_OTF
@ -1987,7 +1984,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
signature = self._decrypt_signature( signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate) encrypted_sig, video_id, player_url, age_gate)
url += '&signature=' + signature sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
url += '&%s=%s' % (sp, signature)
if 'ratebypass' not in url: if 'ratebypass' not in url:
url += '&ratebypass=yes' url += '&ratebypass=yes'
@ -2051,8 +2049,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
url_or_none(try_get( url_or_none(try_get(
player_response, player_response,
lambda x: x['streamingData']['hlsManifestUrl'], lambda x: x['streamingData']['hlsManifestUrl'],
compat_str)) or compat_str))
url_or_none(try_get( or url_or_none(try_get(
video_info, lambda x: x['hlsvp'][0], compat_str))) video_info, lambda x: x['hlsvp'][0], compat_str)))
if manifest_url: if manifest_url:
formats = [] formats = []
@ -2101,10 +2099,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
self._downloader.report_warning('unable to extract uploader nickname') self._downloader.report_warning('unable to extract uploader nickname')
channel_id = ( channel_id = (
str_or_none(video_details.get('channelId')) or str_or_none(video_details.get('channelId'))
self._html_search_meta( or self._html_search_meta(
'channelId', video_webpage, 'channel id', default=None) or 'channelId', video_webpage, 'channel id', default=None)
self._search_regex( or self._search_regex(
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1', r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
video_webpage, 'channel id', default=None, group='id')) video_webpage, 'channel id', default=None, group='id'))
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
@ -2226,6 +2224,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage, r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
'view count', default=None)) 'view count', default=None))
average_rating = (
float_or_none(video_details.get('averageRating'))
or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
# subtitles # subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage) video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage) automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
@ -2322,6 +2324,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'"token" parameter not in video info for unknown reason', '"token" parameter not in video info for unknown reason',
video_id=video_id) video_id=video_id)
if not formats and (video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos'])):
raise ExtractorError('This video is DRM protected.', expected=True)
self._sort_formats(formats) self._sort_formats(formats)
self.mark_watched(video_id, video_info, player_response) self.mark_watched(video_id, video_info, player_response)
@ -2352,7 +2357,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'view_count': view_count, 'view_count': view_count,
'like_count': like_count, 'like_count': like_count,
'dislike_count': dislike_count, 'dislike_count': dislike_count,
'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]), 'average_rating': average_rating,
'formats': formats, 'formats': formats,
'is_live': is_live, 'is_live': is_live,
'start_time': start_time, 'start_time': start_time,
@ -2563,9 +2568,9 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage) search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = ( title_span = (
search_title('playlist-title') or search_title('playlist-title')
search_title('title long-title') or or search_title('title long-title')
search_title('title')) or search_title('title'))
title = clean_html(title_span) title = clean_html(title_span)
return self.playlist_result(url_results, playlist_id, title) return self.playlist_result(url_results, playlist_id, title)

View File

@ -86,8 +86,8 @@ class ZattooPlatformBaseIE(InfoExtractor):
return next( return next(
chan['cid'] for chan in channel_list chan['cid'] for chan in channel_list
if chan.get('cid') and ( if chan.get('cid') and (
chan.get('display_alias') == channel_name or chan.get('display_alias') == channel_name
chan.get('cid') == channel_name)) or chan.get('cid') == channel_name))
except StopIteration: except StopIteration:
raise ExtractorError('Could not extract channel id') raise ExtractorError('Could not extract channel id')

View File

@ -221,9 +221,9 @@ class FFmpegPostProcessor(PostProcessor):
# avconv does not have repeat option # avconv does not have repeat option
if self.basename == 'ffmpeg': if self.basename == 'ffmpeg':
cmd += [encodeArgument('-loglevel'), encodeArgument('repeat+info')] cmd += [encodeArgument('-loglevel'), encodeArgument('repeat+info')]
cmd += (files_cmd + cmd += (files_cmd
[encodeArgument(o) for o in opts] + + [encodeArgument(o) for o in opts]
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)]) + [encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
if self._downloader.params.get('verbose', False): if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd)) self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
@ -326,8 +326,8 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
information['ext'] = extension information['ext'] = extension
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly. # If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
if (new_path == path or if (new_path == path
(self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))): or (self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))):
self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path) self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path)
return [], information return [], information

View File

@ -64,8 +64,8 @@ class XAttrMetadataPP(PostProcessor):
except XAttrMetadataError as e: except XAttrMetadataError as e:
if e.reason == 'NO_SPACE': if e.reason == 'NO_SPACE':
self._downloader.report_warning( self._downloader.report_warning(
'There\'s no disk space left, disk quota exceeded or filesystem xattr limit exceeded. ' + 'There\'s no disk space left, disk quota exceeded or filesystem xattr limit exceeded. '
(('Some ' if num_written else '') + 'extended attributes are not written.').capitalize()) + (('Some ' if num_written else '') + 'extended attributes are not written.').capitalize())
elif e.reason == 'VALUE_TOO_LONG': elif e.reason == 'VALUE_TOO_LONG':
self._downloader.report_warning( self._downloader.report_warning(
'Unable to write extended attributes due to too long values.') 'Unable to write extended attributes due to too long values.')

View File

@ -125,8 +125,8 @@ KNOWN_EXTENSIONS = (
# needed for sanitizing filenames in restricted mode # needed for sanitizing filenames in restricted mode
ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
DATE_FORMATS = ( DATE_FORMATS = (
'%d %B %Y', '%d %B %Y',
@ -861,8 +861,8 @@ class XAttrMetadataError(YoutubeDLError):
self.msg = msg self.msg = msg
# Parsing code and msg # Parsing code and msg
if (self.code in (errno.ENOSPC, errno.EDQUOT) or if (self.code in (errno.ENOSPC, errno.EDQUOT)
'No space left' in self.msg or 'Disk quota excedded' in self.msg): or 'No space left' in self.msg or 'Disk quota excedded' in self.msg):
self.reason = 'NO_SPACE' self.reason = 'NO_SPACE'
elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
self.reason = 'VALUE_TOO_LONG' self.reason = 'VALUE_TOO_LONG'
@ -1453,8 +1453,8 @@ def _windows_write_string(s, out):
def not_a_console(handle): def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None: if handle == INVALID_HANDLE_VALUE or handle is None:
return True return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h): if not_a_console(h):
return False return False
@ -1490,8 +1490,8 @@ def write_string(s, out=None, encoding=None):
if _windows_write_string(s, out): if _windows_write_string(s, out):
return return
if ('b' in getattr(out, 'mode', '') or if ('b' in getattr(out, 'mode', '')
sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
byt = s.encode(encoding or preferredencoding(), 'ignore') byt = s.encode(encoding or preferredencoding(), 'ignore')
out.write(byt) out.write(byt)
elif hasattr(out, 'buffer'): elif hasattr(out, 'buffer'):
@ -1951,8 +1951,8 @@ def bool_or_none(v, default=None):
return v if isinstance(v, bool) else default return v if isinstance(v, bool) else default
def strip_or_none(v): def strip_or_none(v, default=None):
return None if v is None else v.strip() return v.strip() if isinstance(v, compat_str) else default
def url_or_none(url): def url_or_none(url):
@ -2328,10 +2328,10 @@ def merge_dicts(*dicts):
for k, v in a_dict.items(): for k, v in a_dict.items():
if v is None: if v is None:
continue continue
if (k not in merged or if (k not in merged
(isinstance(v, compat_str) and v and or (isinstance(v, compat_str) and v
isinstance(merged[k], compat_str) and and isinstance(merged[k], compat_str)
not merged[k])): and not merged[k])):
merged[k] = v merged[k] = v
return merged return merged
@ -2657,14 +2657,14 @@ def _match_one(filter_part, dct):
if m: if m:
op = COMPARISON_OPERATORS[m.group('op')] op = COMPARISON_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key')) actual_value = dct.get(m.group('key'))
if (m.group('quotedstrval') is not None or if (m.group('quotedstrval') is not None
m.group('strval') is not None or or m.group('strval') is not None
# If the original field is a string and matching comparisonvalue is # If the original field is a string and matching comparisonvalue is
# a number we should respect the origin of the original field # a number we should respect the origin of the original field
# and process comparison value as a string (see # and process comparison value as a string (see
# https://github.com/ytdl-org/youtube-dl/issues/11082). # https://github.com/ytdl-org/youtube-dl/issues/11082).
actual_value is not None and m.group('intval') is not None and or actual_value is not None and m.group('intval') is not None
isinstance(actual_value, compat_str)): and isinstance(actual_value, compat_str)):
if m.group('op') not in ('=', '!='): if m.group('op') not in ('=', '!='):
raise ValueError( raise ValueError(
'Operator %s does not support string values!' % m.group('op')) 'Operator %s does not support string values!' % m.group('op'))
@ -3973,9 +3973,9 @@ def write_xattr(path, key, value):
executable = 'xattr' executable = 'xattr'
opts = ['-w', key, value] opts = ['-w', key, value]
cmd = ([encodeFilename(executable, True)] + cmd = ([encodeFilename(executable, True)]
[encodeArgument(o) for o in opts] + + [encodeArgument(o) for o in opts]
[encodeFilename(path, True)]) + [encodeFilename(path, True)])
try: try:
p = subprocess.Popen( p = subprocess.Popen(

View File

@ -1,3 +1,3 @@
from __future__ import unicode_literals from __future__ import unicode_literals
__version__ = '2019.04.30' __version__ = '2019.06.08'