mirror of
https://codeberg.org/polarisfm/youtube-dl
synced 2024-11-29 19:47:54 +01:00
Merge remote-tracking branch 'upstream/master' into spreaker
This commit is contained in:
commit
901735194b
7
.github/ISSUE_TEMPLATE.md
vendored
7
.github/ISSUE_TEMPLATE.md
vendored
@ -6,12 +6,13 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.08.09*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.01*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||||
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.08.09**
|
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.01**
|
||||||
|
|
||||||
### Before submitting an *issue* make sure you have:
|
### Before submitting an *issue* make sure you have:
|
||||||
- [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
- [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
||||||
- [ ] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
|
- [ ] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
|
||||||
|
- [ ] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser
|
||||||
|
|
||||||
### What is the purpose of your *issue*?
|
### What is the purpose of your *issue*?
|
||||||
- [ ] Bug report (encountered problems with youtube-dl)
|
- [ ] Bug report (encountered problems with youtube-dl)
|
||||||
@ -35,7 +36,7 @@ Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl
|
|||||||
[debug] User config: []
|
[debug] User config: []
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
[debug] youtube-dl version 2017.08.09
|
[debug] youtube-dl version 2018.05.01
|
||||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
|
1
.github/ISSUE_TEMPLATE_tmpl.md
vendored
1
.github/ISSUE_TEMPLATE_tmpl.md
vendored
@ -12,6 +12,7 @@
|
|||||||
### Before submitting an *issue* make sure you have:
|
### Before submitting an *issue* make sure you have:
|
||||||
- [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
- [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
||||||
- [ ] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
|
- [ ] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
|
||||||
|
- [ ] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser
|
||||||
|
|
||||||
### What is the purpose of your *issue*?
|
### What is the purpose of your *issue*?
|
||||||
- [ ] Bug report (encountered problems with youtube-dl)
|
- [ ] Bug report (encountered problems with youtube-dl)
|
||||||
|
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -9,6 +9,7 @@
|
|||||||
### Before submitting a *pull request* make sure you have:
|
### Before submitting a *pull request* make sure you have:
|
||||||
- [ ] At least skimmed through [adding new extractor tutorial](https://github.com/rg3/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/rg3/youtube-dl#youtube-dl-coding-conventions) sections
|
- [ ] At least skimmed through [adding new extractor tutorial](https://github.com/rg3/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/rg3/youtube-dl#youtube-dl-coding-conventions) sections
|
||||||
- [ ] [Searched](https://github.com/rg3/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
- [ ] [Searched](https://github.com/rg3/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
||||||
|
- [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
|
||||||
|
|
||||||
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
|
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
|
||||||
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
|
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -22,6 +22,7 @@ cover/
|
|||||||
updates_key.pem
|
updates_key.pem
|
||||||
*.egg-info
|
*.egg-info
|
||||||
*.srt
|
*.srt
|
||||||
|
*.ttml
|
||||||
*.sbv
|
*.sbv
|
||||||
*.vtt
|
*.vtt
|
||||||
*.flv
|
*.flv
|
||||||
|
21
.travis.yml
21
.travis.yml
@ -7,16 +7,21 @@ python:
|
|||||||
- "3.4"
|
- "3.4"
|
||||||
- "3.5"
|
- "3.5"
|
||||||
- "3.6"
|
- "3.6"
|
||||||
|
- "pypy"
|
||||||
|
- "pypy3"
|
||||||
sudo: false
|
sudo: false
|
||||||
env:
|
env:
|
||||||
- YTDL_TEST_SET=core
|
- YTDL_TEST_SET=core
|
||||||
- YTDL_TEST_SET=download
|
- YTDL_TEST_SET=download
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- env: JYTHON=true; YTDL_TEST_SET=core
|
||||||
|
- env: JYTHON=true; YTDL_TEST_SET=download
|
||||||
|
fast_finish: true
|
||||||
|
allow_failures:
|
||||||
|
- env: YTDL_TEST_SET=download
|
||||||
|
- env: JYTHON=true; YTDL_TEST_SET=core
|
||||||
|
- env: JYTHON=true; YTDL_TEST_SET=download
|
||||||
|
before_install:
|
||||||
|
- if [ "$JYTHON" == "true" ]; then ./devscripts/install_jython.sh; export PATH="$HOME/jython/bin:$PATH"; fi
|
||||||
script: ./devscripts/run_tests.sh
|
script: ./devscripts/run_tests.sh
|
||||||
notifications:
|
|
||||||
email:
|
|
||||||
- filippo.valsorda@gmail.com
|
|
||||||
- yasoob.khld@gmail.com
|
|
||||||
# irc:
|
|
||||||
# channels:
|
|
||||||
# - "irc.freenode.org#youtube-dl"
|
|
||||||
# skip_join: true
|
|
||||||
|
16
AUTHORS
16
AUTHORS
@ -223,3 +223,19 @@ Jan Kundrát
|
|||||||
Giuseppe Fabiano
|
Giuseppe Fabiano
|
||||||
Örn Guðjónsson
|
Örn Guðjónsson
|
||||||
Parmjit Virk
|
Parmjit Virk
|
||||||
|
Genki Sky
|
||||||
|
Ľuboš Katrinec
|
||||||
|
Corey Nicholson
|
||||||
|
Ashutosh Chaudhary
|
||||||
|
John Dong
|
||||||
|
Tatsuyuki Ishi
|
||||||
|
Daniel Weber
|
||||||
|
Kay Bouché
|
||||||
|
Yang Hongbo
|
||||||
|
Lei Wang
|
||||||
|
Petr Novák
|
||||||
|
Leonardo Taccari
|
||||||
|
Martin Weinelt
|
||||||
|
Surya Oktafendri
|
||||||
|
TingPing
|
||||||
|
Alexandre Macabies
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
$ youtube-dl -v <your command line>
|
$ youtube-dl -v <your command line>
|
||||||
[debug] System config: []
|
[debug] System config: []
|
||||||
[debug] User config: []
|
[debug] User config: []
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
[debug] Command-line args: [u'-v', u'https://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
[debug] youtube-dl version 2015.12.06
|
[debug] youtube-dl version 2015.12.06
|
||||||
[debug] Git HEAD: 135392e
|
[debug] Git HEAD: 135392e
|
||||||
@ -34,7 +34,7 @@ For bug reports, this means that your report should contain the *complete* outpu
|
|||||||
|
|
||||||
If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
||||||
|
|
||||||
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `http://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `http://www.youtube.com/`) is *not* an example URL.
|
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `https://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `https://www.youtube.com/`) is *not* an example URL.
|
||||||
|
|
||||||
### Are you using the latest version?
|
### Are you using the latest version?
|
||||||
|
|
||||||
@ -70,7 +70,7 @@ It may sound strange, but some bug reports we receive are completely unrelated t
|
|||||||
|
|
||||||
# DEVELOPER INSTRUCTIONS
|
# DEVELOPER INSTRUCTIONS
|
||||||
|
|
||||||
Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution.
|
Most users do not need to build youtube-dl and can [download the builds](https://rg3.github.io/youtube-dl/download.html) or get them from their distribution.
|
||||||
|
|
||||||
To run youtube-dl as a developer, you don't need to build anything either. Simply execute
|
To run youtube-dl as a developer, you don't need to build anything either. Simply execute
|
||||||
|
|
||||||
@ -82,6 +82,8 @@ To run the test, simply invoke your favorite test runner, or execute a test file
|
|||||||
python test/test_download.py
|
python test/test_download.py
|
||||||
nosetests
|
nosetests
|
||||||
|
|
||||||
|
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
||||||
|
|
||||||
If you want to create a build of youtube-dl yourself, you'll need
|
If you want to create a build of youtube-dl yourself, you'll need
|
||||||
|
|
||||||
* python
|
* python
|
||||||
@ -118,7 +120,7 @@ After you have ensured this site is distributing its content legally, you can fo
|
|||||||
class YourExtractorIE(InfoExtractor):
|
class YourExtractorIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://yourextractor.com/watch/42',
|
'url': 'https://yourextractor.com/watch/42',
|
||||||
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '42',
|
'id': '42',
|
||||||
@ -149,10 +151,10 @@ After you have ensured this site is distributing its content legally, you can fo
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
|
||||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L74-L252). Add tests and code for as many as you want.
|
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L74-L252). Add tests and code for as many as you want.
|
||||||
8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://pypi.python.org/pypi/flake8). Also make sure your code works under all [Python](http://www.python.org/) versions claimed supported by youtube-dl, namely 2.6, 2.7, and 3.2+.
|
8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://pypi.python.org/pypi/flake8). Also make sure your code works under all [Python](https://www.python.org/) versions claimed supported by youtube-dl, namely 2.6, 2.7, and 3.2+.
|
||||||
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
9. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files and [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add youtube_dl/extractor/extractors.py
|
$ git add youtube_dl/extractor/extractors.py
|
||||||
$ git add youtube_dl/extractor/yourextractor.py
|
$ git add youtube_dl/extractor/yourextractor.py
|
||||||
|
944
ChangeLog
944
ChangeLog
@ -1,3 +1,943 @@
|
|||||||
|
version 2018.05.01
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [downloader/fragment] Restart download if .ytdl file is corrupt (#16312)
|
||||||
|
+ [extractor/common] Extract interaction statistic
|
||||||
|
+ [utils] Add merge_dicts
|
||||||
|
+ [extractor/common] Add _download_json_handle
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [kaltura] Improve iframe embeds detection (#16337)
|
||||||
|
+ [udemy] Extract outputs renditions (#16289, #16291, #16320, #16321, #16334,
|
||||||
|
#16335)
|
||||||
|
+ [zattoo] Add support for zattoo.com and mobiltv.quickline.com (#14668, #14676)
|
||||||
|
* [yandexmusic] Convert release_year to int
|
||||||
|
* [udemy] Override _download_webpage_handle instead of _download_webpage
|
||||||
|
* [xiami] Override _download_webpage_handle instead of _download_webpage
|
||||||
|
* [yandexmusic] Override _download_webpage_handle instead of _download_webpage
|
||||||
|
* [youtube] Correctly disable polymer on all requests (#16323, #16326)
|
||||||
|
* [generic] Prefer enclosures over links in RSS feeds (#16189)
|
||||||
|
+ [redditr] Add support for old.reddit.com URLs (#16274)
|
||||||
|
* [nrktv] Update API host (#16324)
|
||||||
|
+ [imdb] Extract all formats (#16249)
|
||||||
|
+ [vimeo] Extract JSON-LD (#16295)
|
||||||
|
* [funk:channel] Improve extraction (#16285)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.04.25
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [utils] Fix match_str for boolean meta fields
|
||||||
|
+ [Makefile] Add support for pandoc 2 and disable smart extension (#16251)
|
||||||
|
* [YoutubeDL] Fix typo in media extension compatibility checker (#16215)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [openload] Recognize IPv6 stream URLs (#16136, #16137, #16205, #16246,
|
||||||
|
#16250)
|
||||||
|
+ [twitch] Extract is_live according to status (#16259)
|
||||||
|
* [pornflip] Relax URL regular expression (#16258)
|
||||||
|
- [etonline] Remove extractor (#16256)
|
||||||
|
* [breakcom] Fix extraction (#16254)
|
||||||
|
+ [youtube] Add ability to authenticate with cookies
|
||||||
|
* [youtube:feed] Implement lazy playlist extraction (#10184)
|
||||||
|
+ [svt] Add support for TV channel live streams (#15279, #15809)
|
||||||
|
* [ccma] Fix video extraction (#15931)
|
||||||
|
* [rentv] Fix extraction (#15227)
|
||||||
|
+ [nick] Add support for nickjr.nl (#16230)
|
||||||
|
* [extremetube] Fix metadata extraction
|
||||||
|
+ [keezmovies] Add support for generic embeds (#16134, #16154)
|
||||||
|
* [nexx] Extract new azure URLs (#16223)
|
||||||
|
* [cbssports] Fix extraction (#16217)
|
||||||
|
* [kaltura] Improve embeds detection (#16201)
|
||||||
|
* [instagram:user] Fix extraction (#16119)
|
||||||
|
* [cbs] Skip DRM asset types (#16104)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.04.16
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [smotri:broadcast] Fix extraction (#16180)
|
||||||
|
+ [picarto] Add support for picarto.tv (#6205, #12514, #15276, #15551)
|
||||||
|
* [vine:user] Fix extraction (#15514, #16190)
|
||||||
|
* [pornhub] Relax URL regular expression (#16165)
|
||||||
|
* [cbc:watch] Re-acquire device token when expired (#16160)
|
||||||
|
+ [fxnetworks] Add support for https theplatform URLs (#16125, #16157)
|
||||||
|
+ [instagram:user] Add request signing (#16119)
|
||||||
|
+ [twitch] Add support for mobile URLs (#16146)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.04.09
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [YoutubeDL] Do not save/restore console title while simulate (#16103)
|
||||||
|
* [extractor/common] Relax JSON-LD context check (#16006)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [generic] Add support for tube8 embeds
|
||||||
|
+ [generic] Add support for share-videos.se embeds (#16089, #16115)
|
||||||
|
* [odnoklassniki] Extend URL regular expression (#16081)
|
||||||
|
* [steam] Bypass mature content check (#16113)
|
||||||
|
+ [acast] Extract more metadata
|
||||||
|
* [acast] Fix extraction (#16118)
|
||||||
|
* [instagram:user] Fix extraction (#16119)
|
||||||
|
* [drtuber] Fix title extraction (#16107, #16108)
|
||||||
|
* [liveleak] Extend URL regular expression (#16117)
|
||||||
|
+ [openload] Add support for oload.xyz
|
||||||
|
* [openload] Relax stream URL regular expression
|
||||||
|
* [openload] Fix extraction (#16099)
|
||||||
|
+ [svtplay:series] Add support for season URLs
|
||||||
|
+ [svtplay:series] Add support for series (#11130, #16059)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.04.03
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [tvnow] Add support for shows (#15837)
|
||||||
|
* [dramafever] Fix authentication (#16067)
|
||||||
|
* [afreecatv] Use partial view only when necessary (#14450)
|
||||||
|
+ [afreecatv] Add support for authentication (#14450)
|
||||||
|
+ [nationalgeographic] Add support for new URL schema (#16001, #16054)
|
||||||
|
* [xvideos] Fix thumbnail extraction (#15978, #15979)
|
||||||
|
* [medialaan] Fix vod id (#16038)
|
||||||
|
+ [openload] Add support for oload.site (#16039)
|
||||||
|
* [naver] Fix extraction (#16029)
|
||||||
|
* [dramafever] Partially switch to API v5 (#16026)
|
||||||
|
* [abc:iview] Unescape title and series meta fields (#15994)
|
||||||
|
* [videa] Extend URL regular expression (#16003)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.03.26.1
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [downloader/external] Add elapsed time to progress hook (#10876)
|
||||||
|
* [downloader/external,fragment] Fix download finalization when writing file
|
||||||
|
to stdout (#10809, #10876, #15799)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [vrv] Fix extraction on python2 (#15928)
|
||||||
|
* [afreecatv] Update referrer (#15947)
|
||||||
|
+ [24video] Add support for 24video.sexy (#15973)
|
||||||
|
* [crackle] Bypass geo restriction
|
||||||
|
* [crackle] Fix extraction (#15969)
|
||||||
|
+ [lenta] Add support for lenta.ru (#15953)
|
||||||
|
+ [instagram:user] Add pagination (#15934)
|
||||||
|
* [youku] Update ccode (#15939)
|
||||||
|
* [libsyn] Adapt to new page structure
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.03.20
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [extractor/common] Improve thumbnail extraction for HTML5 entries
|
||||||
|
* Generalize XML manifest processing code and improve XSPF parsing
|
||||||
|
+ [extractor/common] Add _download_xml_handle
|
||||||
|
+ [extractor/common] Add support for relative URIs in _parse_xspf (#15794)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [7plus] Extract series metadata (#15862, #15906)
|
||||||
|
* [9now] Bypass geo restriction (#15920)
|
||||||
|
* [cbs] Skip unavailable assets (#13490, #13506, #15776)
|
||||||
|
+ [canalc2] Add support for HTML5 videos (#15916, #15919)
|
||||||
|
+ [ceskatelevize] Add support for iframe embeds (#15918)
|
||||||
|
+ [prosiebensat1] Add support for galileo.tv (#15894)
|
||||||
|
+ [generic] Add support for xfileshare embeds (#15879)
|
||||||
|
* [bilibili] Switch to v2 playurl API
|
||||||
|
* [bilibili] Fix and improve extraction (#15048, #15430, #15622, #15863)
|
||||||
|
* [heise] Improve extraction (#15496, #15784, #15026)
|
||||||
|
* [instagram] Fix user videos extraction (#15858)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.03.14
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [soundcloud] Update client id (#15866)
|
||||||
|
+ [tennistv] Add support for tennistv.com
|
||||||
|
+ [line] Add support for tv.line.me (#9427)
|
||||||
|
* [xnxx] Fix extraction (#15817)
|
||||||
|
* [njpwworld] Fix authentication (#15815)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.03.10
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [downloader/hls] Skip uplynk ad fragments (#15748)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [pornhub] Don't override session cookies (#15697)
|
||||||
|
+ [raywenderlich] Add support for videos.raywenderlich.com (#15251)
|
||||||
|
* [funk] Fix extraction and rework extractors (#15792)
|
||||||
|
* [nexx] Restore reverse engineered approach
|
||||||
|
+ [heise] Add support for kaltura embeds (#14961, #15728)
|
||||||
|
+ [tvnow] Extract series metadata (#15774)
|
||||||
|
* [ruutu] Continue formats extraction on NOT-USED URLs (#15775)
|
||||||
|
* [vrtnu] Use redirect URL for building video JSON URL (#15767, #15769)
|
||||||
|
* [vimeo] Modernize login code and improve error messaging
|
||||||
|
* [archiveorg] Fix extraction (#15770, #15772)
|
||||||
|
+ [hidive] Add support for hidive.com (#15494)
|
||||||
|
* [afreecatv] Detect deleted videos
|
||||||
|
* [afreecatv] Fix extraction (#15755)
|
||||||
|
* [vice] Fix extraction and rework extractors (#11101, #13019, #13622, #13778)
|
||||||
|
+ [vidzi] Add support for vidzi.si (#15751)
|
||||||
|
* [npo] Fix typo
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.03.03
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [utils] Add parse_resolution
|
||||||
|
Revert respect --prefer-insecure while updating
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [yapfiles] Add support for yapfiles.ru (#15726, #11085)
|
||||||
|
* [spankbang] Fix formats extraction (#15727)
|
||||||
|
* [adn] Fix extraction (#15716)
|
||||||
|
+ [toggle] Extract DASH and ISM formats (#15721)
|
||||||
|
+ [nickelodeon] Add support for nickelodeon.com.tr (#15706)
|
||||||
|
* [npo] Validate and filter format URLs (#15709)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.02.26
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [udemy] Use custom User-Agent (#15571)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.02.25
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [postprocessor/embedthumbnail] Skip embedding when there aren't any
|
||||||
|
thumbnails (#12573)
|
||||||
|
* [extractor/common] Improve jwplayer subtitles extraction (#15695)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [vidlii] Add support for vidlii.com (#14472, #14512, #14779)
|
||||||
|
+ [streamango] Capture and output error messages
|
||||||
|
* [streamango] Fix extraction (#14160, #14256)
|
||||||
|
+ [telequebec] Add support for emissions (#14649, #14655)
|
||||||
|
+ [telequebec:live] Add support for live streams (#15688)
|
||||||
|
+ [mailru:music] Add support for mail.ru/music (#15618)
|
||||||
|
* [aenetworks] Switch to akamai HLS formats (#15612)
|
||||||
|
* [ytsearch] Fix flat title extraction (#11260, #15681)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.02.22
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [utils] Fixup some common URL typos in sanitize_url (#15649)
|
||||||
|
* Respect --prefer-insecure while updating (#15497)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [vidio] Fix HLS URL extraction (#15675)
|
||||||
|
+ [nexx] Add support for arc.nexx.cloud URLs
|
||||||
|
* [nexx] Switch to arc API (#15652)
|
||||||
|
* [redtube] Fix duration extraction (#15659)
|
||||||
|
+ [sonyliv] Respect referrer (#15648)
|
||||||
|
+ [brightcove:new] Use referrer for formats' HTTP headers
|
||||||
|
+ [cbc] Add support for olympics.cbc.ca (#15535)
|
||||||
|
+ [fusion] Add support for fusion.tv (#15628)
|
||||||
|
* [npo] Improve quality metadata extraction
|
||||||
|
* [npo] Relax URL regular expression (#14987, #14994)
|
||||||
|
+ [npo] Capture and output error message
|
||||||
|
+ [pornhub] Add support for channels (#15613)
|
||||||
|
* [youtube] Handle shared URLs with generic extractor (#14303)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.02.11
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [YoutubeDL] Add support for filesize_approx in format selector (#15550)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [francetv] Add support for live streams (#13689)
|
||||||
|
+ [francetv] Add support for zouzous.fr and ludo.fr (#10454, #13087, #13103,
|
||||||
|
#15012)
|
||||||
|
* [francetv] Separate main extractor and rework others to delegate to it
|
||||||
|
* [francetv] Improve manifest URL signing (#15536)
|
||||||
|
+ [francetv] Sign m3u8 manifest URLs (#15565)
|
||||||
|
+ [veoh] Add support for embed URLs (#15561)
|
||||||
|
* [afreecatv] Fix extraction (#15556)
|
||||||
|
* [periscope] Use accessVideoPublic endpoint (#15554)
|
||||||
|
* [discovery] Fix auth request (#15542)
|
||||||
|
+ [6play] Extract subtitles (#15541)
|
||||||
|
* [newgrounds] Fix metadata extraction (#15531)
|
||||||
|
+ [nbc] Add support for stream.nbcolympics.com (#10295)
|
||||||
|
* [dvtv] Fix live streams extraction (#15442)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.02.08
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [myvi] Extend URL regular expression
|
||||||
|
+ [myvi:embed] Add support for myvi.tv embeds (#15521)
|
||||||
|
+ [prosiebensat1] Extend URL regular expression (#15520)
|
||||||
|
* [pokemon] Relax URL regular expression and extend title extraction (#15518)
|
||||||
|
+ [gameinformer] Use geo verification headers
|
||||||
|
* [la7] Fix extraction (#15501, #15502)
|
||||||
|
* [gameinformer] Fix brightcove id extraction (#15416)
|
||||||
|
+ [afreecatv] Pass referrer to video info request (#15507)
|
||||||
|
+ [telebruxelles] Add support for live streams
|
||||||
|
* [telebruxelles] Relax URL regular expression
|
||||||
|
* [telebruxelles] Fix extraction (#15504)
|
||||||
|
* [extractor/common] Respect secure schemes in _extract_wowza_formats
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.02.04
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [downloader/http] Randomize HTTP chunk size
|
||||||
|
+ [downloader/http] Add ability to pass downloader options via info dict
|
||||||
|
* [downloader/http] Fix 302 infinite loops by not reusing requests
|
||||||
|
+ Document http_chunk_size
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [brightcove] Pass embed page URL as referrer (#15486)
|
||||||
|
+ [youtube] Enforce using chunked HTTP downloading for DASH formats
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.02.03
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ Introduce --http-chunk-size for chunk-based HTTP downloading
|
||||||
|
+ Add support for IronPython
|
||||||
|
* [downloader/ism] Fix Python 3.2 support
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [redbulltv] Fix extraction (#15481)
|
||||||
|
* [redtube] Fix metadata extraction (#15472)
|
||||||
|
* [pladform] Respect platform id and extract HLS formats (#15468)
|
||||||
|
- [rtlnl] Remove progressive formats (#15459)
|
||||||
|
* [6play] Do no modify asset URLs with a token (#15248)
|
||||||
|
* [nationalgeographic] Relax URL regular expression
|
||||||
|
* [dplay] Relax URL regular expression (#15458)
|
||||||
|
* [cbsinteractive] Fix data extraction (#15451)
|
||||||
|
+ [amcnetworks] Add support for sundancetv.com (#9260)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.01.27
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [extractor/common] Improve _json_ld for articles
|
||||||
|
* Switch codebase to use compat_b64decode
|
||||||
|
+ [compat] Add compat_b64decode
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [seznamzpravy] Add support for seznam.cz and seznamzpravy.cz (#14102, #14616)
|
||||||
|
* [dplay] Bypass geo restriction
|
||||||
|
+ [dplay] Add support for disco-api videos (#15396)
|
||||||
|
* [youtube] Extract precise error messages (#15284)
|
||||||
|
* [teachertube] Capture and output error message
|
||||||
|
* [teachertube] Fix and relax thumbnail extraction (#15403)
|
||||||
|
+ [prosiebensat1] Add another clip id regular expression (#15378)
|
||||||
|
* [tbs] Update tokenizer url (#15395)
|
||||||
|
* [mixcloud] Use compat_b64decode (#15394)
|
||||||
|
- [thesixtyone] Remove extractor (#15341)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.01.21
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [extractor/common] Improve jwplayer DASH formats extraction (#9242, #15187)
|
||||||
|
* [utils] Improve scientific notation handling in js_to_json (#14789)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [southparkdk] Add support for southparkstudios.nu
|
||||||
|
+ [southpark] Add support for collections (#14803)
|
||||||
|
* [franceinter] Fix upload date extraction (#14996)
|
||||||
|
+ [rtvs] Add support for rtvs.sk (#9242, #15187)
|
||||||
|
* [restudy] Fix extraction and extend URL regular expression (#15347)
|
||||||
|
* [youtube:live] Improve live detection (#15365)
|
||||||
|
+ [springboardplatform] Add support for springboardplatform.com
|
||||||
|
* [prosiebensat1] Add another clip id regular expression (#15290)
|
||||||
|
- [ringtv] Remove extractor (#15345)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.01.18
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [soundcloud] Update client id (#15306)
|
||||||
|
- [kamcord] Remove extractor (#15322)
|
||||||
|
+ [spiegel] Add support for nexx videos (#15285)
|
||||||
|
* [twitch] Fix authentication and error capture (#14090, #15264)
|
||||||
|
* [vk] Detect more errors due to copyright complaints (#15259)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.01.14
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [youtube] Fix live streams extraction (#15202)
|
||||||
|
* [wdr] Bypass geo restriction
|
||||||
|
* [wdr] Rework extractors (#14598)
|
||||||
|
+ [wdr] Add support for wdrmaus.de/elefantenseite (#14598)
|
||||||
|
+ [gamestar] Add support for gamepro.de (#3384)
|
||||||
|
* [viafree] Skip rtmp formats (#15232)
|
||||||
|
+ [pandoratv] Add support for mobile URLs (#12441)
|
||||||
|
+ [pandoratv] Add support for new URL format (#15131)
|
||||||
|
+ [ximalaya] Add support for ximalaya.com (#14687)
|
||||||
|
+ [digg] Add support for digg.com (#15214)
|
||||||
|
* [limelight] Tolerate empty pc formats (#15150, #15151, #15207)
|
||||||
|
* [ndr:embed:base] Make separate formats extraction non fatal (#15203)
|
||||||
|
+ [weibo] Add extractor (#15079)
|
||||||
|
+ [ok] Add support for live streams
|
||||||
|
* [canalplus] Fix extraction (#15072)
|
||||||
|
* [bilibili] Fix extraction (#15188)
|
||||||
|
|
||||||
|
|
||||||
|
version 2018.01.07
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [utils] Fix youtube-dl under PyPy3 on Windows
|
||||||
|
* [YoutubeDL] Output python implementation in debug header
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [jwplatform] Add support for multiple embeds (#15192)
|
||||||
|
* [mitele] Fix extraction (#15186)
|
||||||
|
+ [motherless] Add support for groups (#15124)
|
||||||
|
* [lynda] Relax URL regular expression (#15185)
|
||||||
|
* [soundcloud] Fallback to avatar picture for thumbnail (#12878)
|
||||||
|
* [youku] Fix list extraction (#15135)
|
||||||
|
* [openload] Fix extraction (#15166)
|
||||||
|
* [lynda] Skip invalid subtitles (#15159)
|
||||||
|
* [twitch] Pass video id to url_result when extracting playlist (#15139)
|
||||||
|
* [rtve.es:alacarta] Fix extraction of some new URLs
|
||||||
|
* [acast] Fix extraction (#15147)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.12.31
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [extractor/common] Add container meta field for formats extracted
|
||||||
|
in _parse_mpd_formats (#13616)
|
||||||
|
+ [downloader/hls] Use HTTP headers for key request
|
||||||
|
* [common] Use AACL as the default fourcc when AudioTag is 255
|
||||||
|
* [extractor/common] Fix extraction of DASH formats with the same
|
||||||
|
representation id (#15111)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [slutload] Add support for mobile URLs (#14806)
|
||||||
|
* [abc:iview] Bypass geo restriction
|
||||||
|
* [abc:iview] Fix extraction (#14711, #14782, #14838, #14917, #14963, #14985,
|
||||||
|
#15035, #15057, #15061, #15071, #15095, #15106)
|
||||||
|
* [openload] Fix extraction (#15118)
|
||||||
|
- [sandia] Remove extractor
|
||||||
|
- [collegerama] Remove extractor
|
||||||
|
+ [mediasite] Add support for sites based on Mediasite Video Platform (#5428,
|
||||||
|
#11185, #14343)
|
||||||
|
+ [ufctv] Add support for ufc.tv (#14520)
|
||||||
|
* [pluralsight] Fix missing first line of subtitles (#11118)
|
||||||
|
* [openload] Fallback on f-page extraction (#14665, #14879)
|
||||||
|
* [vimeo] Improve password protected videos extraction (#15114)
|
||||||
|
* [aws] Fix canonical/signed headers generation on python 2 (#15102)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.12.28
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [internazionale] Add support for internazionale.it (#14973)
|
||||||
|
* [playtvak] Relax video regular expression and make description optional
|
||||||
|
(#15037)
|
||||||
|
+ [filmweb] Add support for filmweb.no (#8773, #10368)
|
||||||
|
+ [23video] Add support for 23video.com
|
||||||
|
+ [espn] Add support for fivethirtyeight.com (#6864)
|
||||||
|
+ [umg:de] Add support for universal-music.de (#11582, #11584)
|
||||||
|
+ [espn] Add support for espnfc and extract more formats (#8053)
|
||||||
|
* [youku] Update ccode (#14880)
|
||||||
|
+ [openload] Add support for oload.stream (#15070)
|
||||||
|
* [youku] Fix list extraction (#15065)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.12.23
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [extractor/common] Move X-Forwarded-For setup code into _request_webpage
|
||||||
|
+ [YoutubeDL] Add support for playlist_uploader and playlist_uploader_id in
|
||||||
|
output template (#11427, #15018)
|
||||||
|
+ [extractor/common] Introduce uploader, uploader_id and uploader_url
|
||||||
|
meta fields for playlists (#11427, #15018)
|
||||||
|
* [downloader/fragment] Encode filename of fragment being removed (#15020)
|
||||||
|
+ [utils] Add another date format pattern (#14999)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [kaltura] Add another embed pattern for entry_id
|
||||||
|
+ [7plus] Add support for 7plus.com.au (#15043)
|
||||||
|
* [animeondemand] Relax login error regular expression
|
||||||
|
+ [shahid] Add support for show pages (#7401)
|
||||||
|
+ [youtube] Extract uploader, uploader_id and uploader_url for playlists
|
||||||
|
(#11427, #15018)
|
||||||
|
* [afreecatv] Improve format extraction (#15019)
|
||||||
|
+ [cspan] Add support for audio only pages and catch page errors (#14995)
|
||||||
|
+ [mailru] Add support for embed URLs (#14904)
|
||||||
|
* [crunchyroll] Future-proof XML element checks (#15013)
|
||||||
|
* [cbslocal] Fix timestamp extraction (#14999, #15000)
|
||||||
|
* [discoverygo] Correct TTML subtitle extension
|
||||||
|
* [vk] Make view count optional (#14979)
|
||||||
|
* [disney] Skip Apple FairPlay formats (#14982)
|
||||||
|
* [voot] Fix format extraction (#14758)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.12.14
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [postprocessor/xattr] Clarify NO_SPACE message (#14970)
|
||||||
|
* [downloader/http] Return actual download result from real_download (#14971)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [itv] Extract more subtitles and duration
|
||||||
|
* [itv] Improve extraction (#14944)
|
||||||
|
+ [byutv] Add support for geo restricted videos
|
||||||
|
* [byutv] Fix extraction (#14966, #14967)
|
||||||
|
+ [bbccouk] Fix extraction for 320k HLS streams
|
||||||
|
+ [toutv] Add support for special video URLs (#14179)
|
||||||
|
* [discovery] Fix free videos extraction (#14157, #14954)
|
||||||
|
* [tvnow] Fix extraction (#7831)
|
||||||
|
+ [nickelodeon:br] Add support for nickelodeon brazil websites (#14893)
|
||||||
|
* [nick] Improve extraction (#14876)
|
||||||
|
* [tbs] Fix extraction (#13658)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.12.10
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [utils] Add sami mimetype to mimetype2ext
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [culturebox] Improve video id extraction (#14947)
|
||||||
|
* [twitter] Improve extraction (#14197)
|
||||||
|
+ [udemy] Extract more HLS formats
|
||||||
|
* [udemy] Improve course id extraction (#14938)
|
||||||
|
+ [stretchinternet] Add support for portal.stretchinternet.com (#14576)
|
||||||
|
* [ellentube] Fix extraction (#14407, #14570)
|
||||||
|
+ [raiplay:playlist] Add support for playlists (#14563)
|
||||||
|
* [sonyliv] Bypass geo restriction
|
||||||
|
* [sonyliv] Extract higher quality formats (#14922)
|
||||||
|
* [fox] Extract subtitles
|
||||||
|
+ [fox] Add support for Adobe Pass authentication (#14205, #14489)
|
||||||
|
- [dailymotion:cloud] Remove extractor (#6794)
|
||||||
|
* [xhamster] Fix thumbnail extraction (#14780)
|
||||||
|
+ [xhamster] Add support for mobile URLs (#14780)
|
||||||
|
* [generic] Don't pass video id as mpd id while extracting DASH (#14902)
|
||||||
|
* [ard] Skip invalid stream URLs (#14906)
|
||||||
|
* [porncom] Fix metadata extraction (#14911)
|
||||||
|
* [pluralsight] Detect agreement request (#14913)
|
||||||
|
* [toutv] Fix login (#14614)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.12.02
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [downloader/fragment] Commit part file after each fragment
|
||||||
|
+ [extractor/common] Add durations for DASH fragments with bare SegmentURLs
|
||||||
|
+ [extractor/common] Add support for DASH manifests with SegmentLists with
|
||||||
|
bare SegmentURLs (#14844)
|
||||||
|
+ [utils] Add hvc1 codec code to parse_codecs
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [xhamster] Fix extraction (#14884)
|
||||||
|
* [youku] Update ccode (#14872)
|
||||||
|
* [mnet] Fix format extraction (#14883)
|
||||||
|
+ [xiami] Add Referer header to API request
|
||||||
|
* [mtv] Correct scc extention in extracted subtitles (#13730)
|
||||||
|
* [vvvvid] Fix extraction for kenc videos (#13406)
|
||||||
|
+ [br] Add support for BR Mediathek videos (#14560, #14788)
|
||||||
|
+ [daisuki] Add support for motto.daisuki.com (#14681)
|
||||||
|
* [odnoklassniki] Fix API metadata request (#14862)
|
||||||
|
* [itv] Fix HLS formats extraction
|
||||||
|
+ [pbs] Add another media id regular expression
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.11.26
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [extractor/common] Use final URL when dumping request (#14769)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [fczenit] Fix extraction
|
||||||
|
- [firstpost] Remove extractor
|
||||||
|
* [freespeech] Fix extraction
|
||||||
|
* [nexx] Extract more formats
|
||||||
|
+ [openload] Add support for openload.link (#14763)
|
||||||
|
* [empflix] Relax URL regular expression
|
||||||
|
* [empflix] Fix extractrion
|
||||||
|
* [tnaflix] Don't modify download URLs (#14811)
|
||||||
|
- [gamersyde] Remove extractor
|
||||||
|
* [francetv:generationwhat] Fix extraction
|
||||||
|
+ [massengeschmacktv] Add support for Massengeschmack TV
|
||||||
|
* [fox9] Fix extraction
|
||||||
|
* [faz] Fix extraction and add support for Perform Group embeds (#14714)
|
||||||
|
+ [performgroup] Add support for performgroup.com
|
||||||
|
+ [jwplatform] Add support for iframes (#14828)
|
||||||
|
* [culturebox] Fix extraction (#14827)
|
||||||
|
* [youku] Fix extraction; update ccode (#14815)
|
||||||
|
* [livestream] Make SMIL extraction non fatal (#14792)
|
||||||
|
+ [drtuber] Add support for mobile URLs (#14772)
|
||||||
|
+ [spankbang] Add support for mobile URLs (#14771)
|
||||||
|
* [instagram] Fix description, timestamp and counters extraction (#14755)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.11.15
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [common] Skip Apple FairPlay m3u8 manifests (#14741)
|
||||||
|
* [YoutubeDL] Fix playlist range optimization for --playlist-items (#14740)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [vshare] Capture and output error message
|
||||||
|
* [vshare] Fix extraction (#14473)
|
||||||
|
* [crunchyroll] Extract old RTMP formats
|
||||||
|
* [tva] Fix extraction (#14736)
|
||||||
|
* [gamespot] Lower preference of HTTP formats (#14652)
|
||||||
|
* [instagram:user] Fix extraction (#14699)
|
||||||
|
* [ccma] Fix typo (#14730)
|
||||||
|
- Remove sensitive data from logging in messages
|
||||||
|
* [instagram:user] Fix extraction (#14699)
|
||||||
|
+ [gamespot] Add support for article URLs (#14652)
|
||||||
|
* [gamespot] Skip Brightcove Once HTTP formats (#14652)
|
||||||
|
* [cartoonnetwork] Update tokenizer_src (#14666)
|
||||||
|
+ [wsj] Recognize another URL pattern (#14704)
|
||||||
|
* [pandatv] Update API URL and sign format URLs (#14693)
|
||||||
|
* [crunchyroll] Use old login method (#11572)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.11.06
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [extractor/common] Add protocol for f4m formats
|
||||||
|
* [f4m] Prefer baseURL for relative URLs (#14660)
|
||||||
|
* [extractor/common] Respect URL query in _extract_wowza_formats (14645)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [hotstar:playlist] Add support for playlists (#12465)
|
||||||
|
* [hotstar] Bypass geo restriction (#14672)
|
||||||
|
- [22tracks] Remove extractor (#11024, #14628)
|
||||||
|
+ [skysport] Sdd support ooyala videos protected with embed_token (#14641)
|
||||||
|
* [gamespot] Extract formats referenced with new data fields (#14652)
|
||||||
|
* [spankbang] Detect unavailable videos (#14644)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.10.29
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [extractor/common] Prefix format id for audio only HLS formats
|
||||||
|
+ [utils] Add support for zero years and months in parse_duration
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [egghead] Fix extraction (#14388)
|
||||||
|
+ [fxnetworks] Extract series metadata (#14603)
|
||||||
|
+ [younow] Add support for younow.com (#9255, #9432, #12436)
|
||||||
|
* [dctptv] Fix extraction (#14599)
|
||||||
|
* [youtube] Restrict embed regular expression (#14600)
|
||||||
|
* [vimeo] Restrict iframe embed regular expression (#14600)
|
||||||
|
* [soundgasm] Improve extraction (#14588)
|
||||||
|
- [myvideo] Remove extractor (#8557)
|
||||||
|
+ [nbc] Add support for classic-tv videos (#14575)
|
||||||
|
+ [vrtnu] Add support for cookies authentication and simplify (#11873)
|
||||||
|
+ [canvas] Add support for vrt.be/vrtnu (#11873)
|
||||||
|
* [twitch:clips] Fix title extraction (#14566)
|
||||||
|
+ [ndtv] Add support for sub-sites (#14534)
|
||||||
|
* [dramafever] Fix login error message extraction
|
||||||
|
+ [nick] Add support for more nickelodeon sites (no, dk, se, ch, fr, es, pt,
|
||||||
|
ro, hu) (#14553)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.10.20
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [downloader/fragment] Report warning instead of error on inconsistent
|
||||||
|
download state
|
||||||
|
* [downloader/hls] Fix total fragments count when ad fragments exist
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [parliamentliveuk] Fix extraction (#14524)
|
||||||
|
* [soundcloud] Update client id (#14546)
|
||||||
|
+ [servus] Add support for servus.com (#14362)
|
||||||
|
+ [unity] Add support for unity3d.com (#14528)
|
||||||
|
* [youtube] Replace youtube redirect URLs in description (#14517)
|
||||||
|
* [pbs] Restrict direct video URL regular expression (#14519)
|
||||||
|
* [drtv] Respect preference for direct HTTP formats (#14509)
|
||||||
|
+ [eporner] Add support for embed URLs (#14507)
|
||||||
|
* [arte] Capture and output error message
|
||||||
|
* [niconico] Improve uploader metadata extraction robustness (#14135)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.10.15.1
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [downloader/hls] Ignore anvato ad fragments (#14496)
|
||||||
|
* [downloader/fragment] Output ad fragment count
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [scrippsnetworks:watch] Bypass geo restriction
|
||||||
|
+ [anvato] Add ability to bypass geo restriction
|
||||||
|
* [redditr] Fix extraction for URLs with query (#14495)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.10.15
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [common] Add support for jwplayer youtube embeds
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [scrippsnetworks:watch] Fix extraction (#14389)
|
||||||
|
* [anvato] Process master m3u8 manifests
|
||||||
|
* [youtube] Fix relative URLs in description
|
||||||
|
* [spike] Bypass geo restriction
|
||||||
|
+ [howstuffworks] Add support for more domains
|
||||||
|
* [infoq] Fix http format downloading
|
||||||
|
+ [rtlnl] Add support for another type of embeds
|
||||||
|
+ [onionstudios] Add support for bulbs-video embeds
|
||||||
|
* [udn] Fix extraction
|
||||||
|
* [shahid] Fix extraction (#14448)
|
||||||
|
* [kaltura] Ignore Widevine encrypted video (.wvm) (#14471)
|
||||||
|
* [vh1] Fix extraction (#9613)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.10.12
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [YoutubeDL] Improve _default_format_spec (#14461)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [steam] Fix extraction (#14067)
|
||||||
|
+ [funk] Add support for funk.net (#14464)
|
||||||
|
+ [nexx] Add support for shortcuts and relax domain id extraction
|
||||||
|
+ [voxmedia] Add support for recode.net (#14173)
|
||||||
|
+ [once] Add support for vmap URLs
|
||||||
|
+ [generic] Add support for channel9 embeds (#14469)
|
||||||
|
* [tva] Fix extraction (#14328)
|
||||||
|
+ [tubitv] Add support for new URL format (#14460)
|
||||||
|
- [afreecatv:global] Remove extractor
|
||||||
|
- [youtube:shared] Removed extractor (#14420)
|
||||||
|
+ [slideslive] Add support for slideslive.com (#2680)
|
||||||
|
+ [facebook] Support thumbnails (#14416)
|
||||||
|
* [vvvvid] Fix episode number extraction (#14456)
|
||||||
|
* [hrti:playlist] Relax URL regular expression
|
||||||
|
* [wdr] Relax media link regular expression (#14447)
|
||||||
|
* [hrti] Relax URL regular expression (#14443)
|
||||||
|
* [fox] Delegate extraction to uplynk:preplay (#14147)
|
||||||
|
+ [youtube] Add support for hooktube.com (#14437)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.10.07
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [YoutubeDL] Ignore duplicates in --playlist-items
|
||||||
|
* [YoutubeDL] Fix out of range --playlist-items for iterable playlists and
|
||||||
|
reduce code duplication (#14425)
|
||||||
|
+ [utils] Use cache in OnDemandPagedList by default
|
||||||
|
* [postprocessor/ffmpeg] Convert to opus using libopus (#14381)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [reddit] Sort formats (#14430)
|
||||||
|
* [lnkgo] Relax URL regular expression (#14423)
|
||||||
|
* [pornflip] Extend URL regular expression (#14405, #14406)
|
||||||
|
+ [xtube] Add support for embed URLs (#14417)
|
||||||
|
+ [xvideos] Add support for embed URLs and improve extraction (#14409)
|
||||||
|
* [beeg] Fix extraction (#14403)
|
||||||
|
* [tvn24] Relax URL regular expression (#14395)
|
||||||
|
* [nbc] Fix extraction (#13651, #13715, #14137, #14198, #14312, #14314, #14378,
|
||||||
|
#14392, #14414, #14419, #14431)
|
||||||
|
+ [ketnet] Add support for videos without direct sources (#14377)
|
||||||
|
* [canvas] Generalize mediazone.vrt.be extractor and rework canvas and een
|
||||||
|
+ [afreecatv] Add support for adult videos (#14376)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.10.01
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [YoutubeDL] Document youtube_include_dash_manifest
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [tvp] Add support for new URL schema (#14368)
|
||||||
|
+ [generic] Add support for single format Video.js embeds (#14371)
|
||||||
|
* [yahoo] Bypass geo restriction for brightcove (#14210)
|
||||||
|
* [yahoo] Use extracted brightcove account id (#14210)
|
||||||
|
* [rtve:alacarta] Fix extraction (#14290)
|
||||||
|
+ [yahoo] Add support for custom brigthcove embeds (#14210)
|
||||||
|
+ [generic] Add support for Video.js embeds
|
||||||
|
+ [gfycat] Add support for /gifs/detail URLs (#14322)
|
||||||
|
* [generic] Fix infinite recursion for twitter:player URLs (#14339)
|
||||||
|
* [xhamsterembed] Fix extraction (#14308)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.09.24
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [options] Accept lrc as a subtitle conversion target format (#14292)
|
||||||
|
* [utils] Fix handling raw TTML subtitles (#14191)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [24video] Fix timestamp extraction and make non fatal (#14295)
|
||||||
|
+ [24video] Add support for 24video.adult (#14295)
|
||||||
|
+ [kakao] Add support for tv.kakao.com (#12298, #14007)
|
||||||
|
+ [twitter] Add support for URLs without user id (#14270)
|
||||||
|
+ [americastestkitchen] Add support for americastestkitchen.com (#10764,
|
||||||
|
#13996)
|
||||||
|
* [generic] Fix support for multiple HTML5 videos on one page (#14080)
|
||||||
|
* [mixcloud] Fix extraction (#14088, #14132)
|
||||||
|
+ [lynda] Add support for educourse.ga (#14286)
|
||||||
|
* [beeg] Fix extraction (#14275)
|
||||||
|
* [nbcsports:vplayer] Correct theplatform URL (#13873)
|
||||||
|
* [twitter] Fix duration extraction (#14141)
|
||||||
|
* [tvplay] Bypass geo restriction
|
||||||
|
+ [heise] Add support for YouTube embeds (#14109)
|
||||||
|
+ [popcorntv] Add support for popcorntv.it (#5914, #14211)
|
||||||
|
* [viki] Update app data (#14181)
|
||||||
|
* [morningstar] Relax URL regular expression (#14222)
|
||||||
|
* [openload] Fix extraction (#14225, #14257)
|
||||||
|
* [noovo] Fix extraction (#14214)
|
||||||
|
* [dailymotion:playlist] Relax URL regular expression (#14219)
|
||||||
|
+ [twitch] Add support for go.twitch.tv URLs (#14215)
|
||||||
|
* [vgtv] Relax URL regular expression (#14223)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.09.15
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [downloader/fragment] Restart inconsistent incomplete fragment downloads
|
||||||
|
(#13731)
|
||||||
|
* [YoutubeDL] Download raw subtitles files (#12909, #14191)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [condenast] Fix extraction (#14196, #14207)
|
||||||
|
+ [orf] Add support for f4m stories
|
||||||
|
* [tv4] Relax URL regular expression (#14206)
|
||||||
|
* [animeondemand] Bypass geo restriction
|
||||||
|
+ [animeondemand] Add support for flash videos (#9944)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.09.11
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [rutube:playlist] Fix suitable (#14166)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.09.10
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [utils] Introduce bool_or_none
|
||||||
|
* [YoutubeDL] Ensure dir existence for each requested format (#14116)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [fox] Fix extraction (#14147)
|
||||||
|
* [rutube] Use bool_or_none
|
||||||
|
* [rutube] Rework and generalize playlist extractors (#13565)
|
||||||
|
+ [rutube:playlist] Add support for playlists (#13534, #13565)
|
||||||
|
+ [radiocanada] Add fallback for title extraction (#14145)
|
||||||
|
* [vk] Use dedicated YouTube embeds extraction routine
|
||||||
|
* [vice] Use dedicated YouTube embeds extraction routine
|
||||||
|
* [cracked] Use dedicated YouTube embeds extraction routine
|
||||||
|
* [chilloutzone] Use dedicated YouTube embeds extraction routine
|
||||||
|
* [abcnews] Use dedicated YouTube embeds extraction routine
|
||||||
|
* [youtube] Separate methods for embeds extraction
|
||||||
|
* [redtube] Fix formats extraction (#14122)
|
||||||
|
* [arte] Relax unavailability check (#14112)
|
||||||
|
+ [manyvids] Add support for preview videos from manyvids.com (#14053, #14059)
|
||||||
|
* [vidme:user] Relax URL regular expression (#14054)
|
||||||
|
* [bpb] Fix extraction (#14043, #14086)
|
||||||
|
* [soundcloud] Fix download URL with private tracks (#14093)
|
||||||
|
* [aliexpress:live] Add support for live.aliexpress.com (#13698, #13707)
|
||||||
|
* [viidea] Capture and output lecture error message (#14099)
|
||||||
|
* [radiocanada] Skip unsupported platforms (#14100)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.09.02
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [youtube] Force old layout for each webpage (#14068, #14072, #14074, #14076,
|
||||||
|
#14077, #14079, #14082, #14083, #14094, #14095, #14096)
|
||||||
|
* [youtube] Fix upload date extraction (#14065)
|
||||||
|
+ [charlierose] Add support for episodes (#14062)
|
||||||
|
+ [bbccouk] Add support for w-prefixed ids (#14056)
|
||||||
|
* [googledrive] Extend URL regular expression (#9785)
|
||||||
|
+ [googledrive] Add support for source format (#14046)
|
||||||
|
* [pornhd] Fix extraction (#14005)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.08.27.1
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
|
||||||
|
* [youtube] Fix extraction with --youtube-skip-dash-manifest enabled (#14037)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.08.27
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [extractor/common] Extract height and format id for HTML5 videos (#14034)
|
||||||
|
* [downloader/http] Rework HTTP downloader (#506, #809, #2849, #4240, #6023,
|
||||||
|
#8625, #9483)
|
||||||
|
* Simplify code and split into separate routines to facilitate maintaining
|
||||||
|
* Make retry mechanism work on errors during actual download not only
|
||||||
|
during connection establishment phase
|
||||||
|
* Retry on ECONNRESET and ETIMEDOUT during reading data from network
|
||||||
|
* Retry on content too short
|
||||||
|
* Show error description on retry
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [generic] Lower preference for extraction from LD-JSON
|
||||||
|
* [rai] Fix audio formats extraction (#14024)
|
||||||
|
* [youtube] Fix controversy videos extraction (#14027, #14029)
|
||||||
|
* [mixcloud] Fix extraction (#14015, #14020)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.08.23
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [extractor/common] Introduce _parse_xml
|
||||||
|
* [extractor/common] Make HLS and DASH extraction in_parse_html5_media_entries
|
||||||
|
non fatal (#13970)
|
||||||
|
* [utils] Fix unescapeHTML for misformed string like "&a"" (#13935)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [cbc:watch] Bypass geo restriction (#13993)
|
||||||
|
* [toutv] Relax DRM check (#13994)
|
||||||
|
+ [googledrive] Add support for subtitles (#13619, #13638)
|
||||||
|
* [pornhub] Relax uploader regular expression (#13906, #13975)
|
||||||
|
* [bandcamp:album] Extract track titles (#13962)
|
||||||
|
+ [bbccouk] Add support for events URLs (#13893)
|
||||||
|
+ [liveleak] Support multi-video pages (#6542)
|
||||||
|
+ [liveleak] Support another liveleak embedding pattern (#13336)
|
||||||
|
* [cda] Fix extraction (#13935)
|
||||||
|
+ [laola1tv] Add support for tv.ittf.com (#13965)
|
||||||
|
* [mixcloud] Fix extraction (#13958, #13974, #13980, #14003)
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.08.18
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [YoutubeDL] Sanitize byte string format URLs (#13951)
|
||||||
|
+ [extractor/common] Add support for float durations in _parse_mpd_formats
|
||||||
|
(#13919)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [arte] Detect unavailable videos (#13945)
|
||||||
|
* [generic] Convert redirect URLs to unicode strings (#13951)
|
||||||
|
* [udemy] Fix paid course detection (#13943)
|
||||||
|
* [pluralsight] Use RPC API for course extraction (#13937)
|
||||||
|
+ [clippit] Add support for clippituser.tv
|
||||||
|
+ [qqmusic] Support new URL schemes (#13805)
|
||||||
|
* [periscope] Renew HLS extraction (#13917)
|
||||||
|
* [mixcloud] Extract decrypt key
|
||||||
|
|
||||||
|
|
||||||
|
version 2017.08.13
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [YoutubeDL] Make sure format id is not empty
|
||||||
|
* [extractor/common] Make _family_friendly_search optional
|
||||||
|
* [extractor/common] Respect source's type attribute for HTML5 media (#13892)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [pornhub:playlistbase] Skip videos from drop-down menu (#12819, #13902)
|
||||||
|
+ [fourtube] Add support pornerbros.com (#6022)
|
||||||
|
+ [fourtube] Add support porntube.com (#7859, #13901)
|
||||||
|
+ [fourtube] Add support fux.com
|
||||||
|
* [limelight] Improve embeds detection (#13895)
|
||||||
|
+ [reddit] Add support for v.redd.it and reddit.com (#13847)
|
||||||
|
* [aparat] Extract all formats (#13887)
|
||||||
|
* [mixcloud] Fix play info decryption (#13885)
|
||||||
|
+ [generic] Add support for vzaar embeds (#13876)
|
||||||
|
|
||||||
|
|
||||||
version 2017.08.09
|
version 2017.08.09
|
||||||
|
|
||||||
Core
|
Core
|
||||||
@ -81,7 +1021,7 @@ Extractors
|
|||||||
* [youku:show] Fix playlist extraction (#13248)
|
* [youku:show] Fix playlist extraction (#13248)
|
||||||
+ [dispeak] Recognize sevt subdomain (#13276)
|
+ [dispeak] Recognize sevt subdomain (#13276)
|
||||||
* [adn] Improve error reporting (#13663)
|
* [adn] Improve error reporting (#13663)
|
||||||
* [crunchyroll] Relax series and season regex (#13659)
|
* [crunchyroll] Relax series and season regular expression (#13659)
|
||||||
+ [spiegel:article] Add support for nexx iframe embeds (#13029)
|
+ [spiegel:article] Add support for nexx iframe embeds (#13029)
|
||||||
+ [nexx:embed] Add support for iframe embeds
|
+ [nexx:embed] Add support for iframe embeds
|
||||||
* [nexx] Improve JS embed extraction
|
* [nexx] Improve JS embed extraction
|
||||||
@ -554,7 +1494,7 @@ version 2017.04.14
|
|||||||
|
|
||||||
Core
|
Core
|
||||||
+ [downloader/hls] Add basic support for EXT-X-BYTERANGE tag (#10955)
|
+ [downloader/hls] Add basic support for EXT-X-BYTERANGE tag (#10955)
|
||||||
+ [adobepass] Improve Comcast and Verison login code (#10803)
|
+ [adobepass] Improve Comcast and Verizon login code (#10803)
|
||||||
+ [adobepass] Add support for Verizon (#10803)
|
+ [adobepass] Add support for Verizon (#10803)
|
||||||
|
|
||||||
Extractors
|
Extractors
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
include README.md
|
include README.md
|
||||||
include test/*.py
|
include LICENSE
|
||||||
include test/*.json
|
include AUTHORS
|
||||||
|
include ChangeLog
|
||||||
include youtube-dl.bash-completion
|
include youtube-dl.bash-completion
|
||||||
include youtube-dl.fish
|
include youtube-dl.fish
|
||||||
include youtube-dl.1
|
include youtube-dl.1
|
||||||
recursive-include docs Makefile conf.py *.rst
|
recursive-include docs Makefile conf.py *.rst
|
||||||
|
recursive-include test *
|
||||||
|
36
Makefile
36
Makefile
@ -14,6 +14,9 @@ PYTHON ?= /usr/bin/env python
|
|||||||
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
||||||
SYSCONFDIR = $(shell if [ $(PREFIX) = /usr -o $(PREFIX) = /usr/local ]; then echo /etc; else echo $(PREFIX)/etc; fi)
|
SYSCONFDIR = $(shell if [ $(PREFIX) = /usr -o $(PREFIX) = /usr/local ]; then echo /etc; else echo $(PREFIX)/etc; fi)
|
||||||
|
|
||||||
|
# set markdown input format to "markdown-smart" for pandoc version 2 and to "markdown" for pandoc prior to version 2
|
||||||
|
MARKDOWN = $(shell if [ `pandoc -v | head -n1 | cut -d" " -f2 | head -c1` = "2" ]; then echo markdown-smart; else echo markdown; fi)
|
||||||
|
|
||||||
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||||
install -d $(DESTDIR)$(BINDIR)
|
install -d $(DESTDIR)$(BINDIR)
|
||||||
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
|
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
|
||||||
@ -36,8 +39,17 @@ test:
|
|||||||
|
|
||||||
ot: offlinetest
|
ot: offlinetest
|
||||||
|
|
||||||
|
# Keep this list in sync with devscripts/run_tests.sh
|
||||||
offlinetest: codetest
|
offlinetest: codetest
|
||||||
$(PYTHON) -m nose --verbose test --exclude test_download.py --exclude test_age_restriction.py --exclude test_subtitles.py --exclude test_write_annotations.py --exclude test_youtube_lists.py --exclude test_iqiyi_sdk_interpreter.py --exclude test_socks.py
|
$(PYTHON) -m nose --verbose test \
|
||||||
|
--exclude test_age_restriction.py \
|
||||||
|
--exclude test_download.py \
|
||||||
|
--exclude test_iqiyi_sdk_interpreter.py \
|
||||||
|
--exclude test_socks.py \
|
||||||
|
--exclude test_subtitles.py \
|
||||||
|
--exclude test_write_annotations.py \
|
||||||
|
--exclude test_youtube_lists.py \
|
||||||
|
--exclude test_youtube_signature.py
|
||||||
|
|
||||||
tar: youtube-dl.tar.gz
|
tar: youtube-dl.tar.gz
|
||||||
|
|
||||||
@ -46,8 +58,15 @@ tar: youtube-dl.tar.gz
|
|||||||
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish
|
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish
|
||||||
|
|
||||||
youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
|
youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
|
||||||
zip --quiet youtube-dl youtube_dl/*.py youtube_dl/*/*.py
|
mkdir -p zip
|
||||||
zip --quiet --junk-paths youtube-dl youtube_dl/__main__.py
|
for d in youtube_dl youtube_dl/downloader youtube_dl/extractor youtube_dl/postprocessor ; do \
|
||||||
|
mkdir -p zip/$$d ;\
|
||||||
|
cp -pPR $$d/*.py zip/$$d/ ;\
|
||||||
|
done
|
||||||
|
touch -t 200001010101 zip/youtube_dl/*.py zip/youtube_dl/*/*.py
|
||||||
|
mv zip/youtube_dl/__main__.py zip/
|
||||||
|
cd zip ; zip -q ../youtube-dl youtube_dl/*.py youtube_dl/*/*.py __main__.py
|
||||||
|
rm -rf zip
|
||||||
echo '#!$(PYTHON)' > youtube-dl
|
echo '#!$(PYTHON)' > youtube-dl
|
||||||
cat youtube-dl.zip >> youtube-dl
|
cat youtube-dl.zip >> youtube-dl
|
||||||
rm youtube-dl.zip
|
rm youtube-dl.zip
|
||||||
@ -66,11 +85,11 @@ supportedsites:
|
|||||||
$(PYTHON) devscripts/make_supportedsites.py docs/supportedsites.md
|
$(PYTHON) devscripts/make_supportedsites.py docs/supportedsites.md
|
||||||
|
|
||||||
README.txt: README.md
|
README.txt: README.md
|
||||||
pandoc -f markdown -t plain README.md -o README.txt
|
pandoc -f $(MARKDOWN) -t plain README.md -o README.txt
|
||||||
|
|
||||||
youtube-dl.1: README.md
|
youtube-dl.1: README.md
|
||||||
$(PYTHON) devscripts/prepare_manpage.py youtube-dl.1.temp.md
|
$(PYTHON) devscripts/prepare_manpage.py youtube-dl.1.temp.md
|
||||||
pandoc -s -f markdown -t man youtube-dl.1.temp.md -o youtube-dl.1
|
pandoc -s -f $(MARKDOWN) -t man youtube-dl.1.temp.md -o youtube-dl.1
|
||||||
rm -f youtube-dl.1.temp.md
|
rm -f youtube-dl.1.temp.md
|
||||||
|
|
||||||
youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in
|
youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in
|
||||||
@ -94,7 +113,7 @@ _EXTRACTOR_FILES = $(shell find youtube_dl/extractor -iname '*.py' -and -not -in
|
|||||||
youtube_dl/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
youtube_dl/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
||||||
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
||||||
|
|
||||||
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish ChangeLog
|
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish ChangeLog AUTHORS
|
||||||
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
||||||
--exclude '*.DS_Store' \
|
--exclude '*.DS_Store' \
|
||||||
--exclude '*.kate-swp' \
|
--exclude '*.kate-swp' \
|
||||||
@ -103,11 +122,10 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
|
|||||||
--exclude '*~' \
|
--exclude '*~' \
|
||||||
--exclude '__pycache__' \
|
--exclude '__pycache__' \
|
||||||
--exclude '.git' \
|
--exclude '.git' \
|
||||||
--exclude 'testdata' \
|
|
||||||
--exclude 'docs/_build' \
|
--exclude 'docs/_build' \
|
||||||
-- \
|
-- \
|
||||||
bin devscripts test youtube_dl docs \
|
bin devscripts test youtube_dl docs \
|
||||||
ChangeLog LICENSE README.md README.txt \
|
ChangeLog AUTHORS LICENSE README.md README.txt \
|
||||||
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion \
|
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion \
|
||||||
youtube-dl.zsh youtube-dl.fish setup.py \
|
youtube-dl.zsh youtube-dl.fish setup.py setup.cfg \
|
||||||
youtube-dl
|
youtube-dl
|
||||||
|
78
README.md
78
README.md
@ -1,3 +1,5 @@
|
|||||||
|
[![Build Status](https://travis-ci.org/rg3/youtube-dl.svg?branch=master)](https://travis-ci.org/rg3/youtube-dl)
|
||||||
|
|
||||||
youtube-dl - download videos from youtube.com or other video platforms
|
youtube-dl - download videos from youtube.com or other video platforms
|
||||||
|
|
||||||
- [INSTALLATION](#installation)
|
- [INSTALLATION](#installation)
|
||||||
@ -25,7 +27,7 @@ If you do not have curl, you can alternatively use a recent wget:
|
|||||||
sudo wget https://yt-dl.org/downloads/latest/youtube-dl -O /usr/local/bin/youtube-dl
|
sudo wget https://yt-dl.org/downloads/latest/youtube-dl -O /usr/local/bin/youtube-dl
|
||||||
sudo chmod a+rx /usr/local/bin/youtube-dl
|
sudo chmod a+rx /usr/local/bin/youtube-dl
|
||||||
|
|
||||||
Windows users can [download an .exe file](https://yt-dl.org/latest/youtube-dl.exe) and place it in any location on their [PATH](http://en.wikipedia.org/wiki/PATH_%28variable%29) except for `%SYSTEMROOT%\System32` (e.g. **do not** put in `C:\Windows\System32`).
|
Windows users can [download an .exe file](https://yt-dl.org/latest/youtube-dl.exe) and place it in any location on their [PATH](https://en.wikipedia.org/wiki/PATH_%28variable%29) except for `%SYSTEMROOT%\System32` (e.g. **do not** put in `C:\Windows\System32`).
|
||||||
|
|
||||||
You can also use pip:
|
You can also use pip:
|
||||||
|
|
||||||
@ -33,7 +35,7 @@ You can also use pip:
|
|||||||
|
|
||||||
This command will update youtube-dl if you have already installed it. See the [pypi page](https://pypi.python.org/pypi/youtube_dl) for more information.
|
This command will update youtube-dl if you have already installed it. See the [pypi page](https://pypi.python.org/pypi/youtube_dl) for more information.
|
||||||
|
|
||||||
OS X users can install youtube-dl with [Homebrew](http://brew.sh/):
|
OS X users can install youtube-dl with [Homebrew](https://brew.sh/):
|
||||||
|
|
||||||
brew install youtube-dl
|
brew install youtube-dl
|
||||||
|
|
||||||
@ -44,7 +46,7 @@ Or with [MacPorts](https://www.macports.org/):
|
|||||||
Alternatively, refer to the [developer instructions](#developer-instructions) for how to check out and work with the git repository. For further options, including PGP signatures, see the [youtube-dl Download Page](https://rg3.github.io/youtube-dl/download.html).
|
Alternatively, refer to the [developer instructions](#developer-instructions) for how to check out and work with the git repository. For further options, including PGP signatures, see the [youtube-dl Download Page](https://rg3.github.io/youtube-dl/download.html).
|
||||||
|
|
||||||
# DESCRIPTION
|
# DESCRIPTION
|
||||||
**youtube-dl** is a command-line program to download videos from YouTube.com and a few more sites. It requires the Python interpreter, version 2.6, 2.7, or 3.2+, and it is not platform specific. It should work on your Unix box, on Windows or on Mac OS X. It is released to the public domain, which means you can modify it, redistribute it or use it however you like.
|
**youtube-dl** is a command-line program to download videos from YouTube.com and a few more sites. It requires the Python interpreter, version 2.6, 2.7, or 3.2+, and it is not platform specific. It should work on your Unix box, on Windows or on macOS. It is released to the public domain, which means you can modify it, redistribute it or use it however you like.
|
||||||
|
|
||||||
youtube-dl [OPTIONS] URL [URL...]
|
youtube-dl [OPTIONS] URL [URL...]
|
||||||
|
|
||||||
@ -196,6 +198,11 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo
|
|||||||
size. By default, the buffer size is
|
size. By default, the buffer size is
|
||||||
automatically resized from an initial value
|
automatically resized from an initial value
|
||||||
of SIZE.
|
of SIZE.
|
||||||
|
--http-chunk-size SIZE Size of a chunk for chunk-based HTTP
|
||||||
|
downloading (e.g. 10485760 or 10M) (default
|
||||||
|
is disabled). May be useful for bypassing
|
||||||
|
bandwidth throttling imposed by a webserver
|
||||||
|
(experimental)
|
||||||
--playlist-reverse Download playlist videos in reverse order
|
--playlist-reverse Download playlist videos in reverse order
|
||||||
--playlist-random Download playlist videos in random order
|
--playlist-random Download playlist videos in random order
|
||||||
--xattr-set-filesize Set file xattribute ytdl.filesize with
|
--xattr-set-filesize Set file xattribute ytdl.filesize with
|
||||||
@ -216,7 +223,9 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo
|
|||||||
|
|
||||||
## Filesystem Options:
|
## Filesystem Options:
|
||||||
-a, --batch-file FILE File containing URLs to download ('-' for
|
-a, --batch-file FILE File containing URLs to download ('-' for
|
||||||
stdin)
|
stdin), one URL per line. Lines starting
|
||||||
|
with '#', ';' or ']' are considered as
|
||||||
|
comments and ignored.
|
||||||
--id Use only video ID in file name
|
--id Use only video ID in file name
|
||||||
-o, --output TEMPLATE Output filename template, see the "OUTPUT
|
-o, --output TEMPLATE Output filename template, see the "OUTPUT
|
||||||
TEMPLATE" for all the info
|
TEMPLATE" for all the info
|
||||||
@ -427,7 +436,7 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo
|
|||||||
syntax. Example: --exec 'adb push {}
|
syntax. Example: --exec 'adb push {}
|
||||||
/sdcard/Music/ && rm {}'
|
/sdcard/Music/ && rm {}'
|
||||||
--convert-subs FORMAT Convert the subtitles to other format
|
--convert-subs FORMAT Convert the subtitles to other format
|
||||||
(currently supported: srt|ass|vtt)
|
(currently supported: srt|ass|vtt|lrc)
|
||||||
|
|
||||||
# CONFIGURATION
|
# CONFIGURATION
|
||||||
|
|
||||||
@ -458,7 +467,7 @@ You can also use `--config-location` if you want to use custom configuration fil
|
|||||||
|
|
||||||
### Authentication with `.netrc` file
|
### Authentication with `.netrc` file
|
||||||
|
|
||||||
You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in the shell command history. You can achieve this using a [`.netrc` file](http://stackoverflow.com/tags/.netrc/info) on a per extractor basis. For that you will need to create a `.netrc` file in your `$HOME` and restrict permissions to read/write by only you:
|
You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in the shell command history. You can achieve this using a [`.netrc` file](https://stackoverflow.com/tags/.netrc/info) on a per extractor basis. For that you will need to create a `.netrc` file in your `$HOME` and restrict permissions to read/write by only you:
|
||||||
```
|
```
|
||||||
touch $HOME/.netrc
|
touch $HOME/.netrc
|
||||||
chmod a-rwx,u+rw $HOME/.netrc
|
chmod a-rwx,u+rw $HOME/.netrc
|
||||||
@ -485,7 +494,7 @@ The `-o` option allows users to indicate a template for the output file names.
|
|||||||
|
|
||||||
**tl;dr:** [navigate me to examples](#output-template-examples).
|
**tl;dr:** [navigate me to examples](#output-template-examples).
|
||||||
|
|
||||||
The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences may be formatted according to [python string formatting operations](https://docs.python.org/2/library/stdtypes.html#string-formatting). For example, `%(NAME)s` or `%(NAME)05d`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a formatting operations. Allowed names along with sequence type are:
|
The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "https://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences may be formatted according to [python string formatting operations](https://docs.python.org/2/library/stdtypes.html#string-formatting). For example, `%(NAME)s` or `%(NAME)05d`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a formatting operations. Allowed names along with sequence type are:
|
||||||
|
|
||||||
- `id` (string): Video identifier
|
- `id` (string): Video identifier
|
||||||
- `title` (string): Video title
|
- `title` (string): Video title
|
||||||
@ -509,6 +518,9 @@ The basic usage is not to set any template arguments when downloading a single f
|
|||||||
- `average_rating` (numeric): Average rating give by users, the scale used depends on the webpage
|
- `average_rating` (numeric): Average rating give by users, the scale used depends on the webpage
|
||||||
- `comment_count` (numeric): Number of comments on the video
|
- `comment_count` (numeric): Number of comments on the video
|
||||||
- `age_limit` (numeric): Age restriction for the video (years)
|
- `age_limit` (numeric): Age restriction for the video (years)
|
||||||
|
- `is_live` (boolean): Whether this video is a live stream or a fixed-length video
|
||||||
|
- `start_time` (numeric): Time in seconds where the reproduction should start, as specified in the URL
|
||||||
|
- `end_time` (numeric): Time in seconds where the reproduction should end, as specified in the URL
|
||||||
- `format` (string): A human-readable description of the format
|
- `format` (string): A human-readable description of the format
|
||||||
- `format_id` (string): Format code specified by `--format`
|
- `format_id` (string): Format code specified by `--format`
|
||||||
- `format_note` (string): Additional info about the format
|
- `format_note` (string): Additional info about the format
|
||||||
@ -534,6 +546,8 @@ The basic usage is not to set any template arguments when downloading a single f
|
|||||||
- `playlist_index` (numeric): Index of the video in the playlist padded with leading zeros according to the total length of the playlist
|
- `playlist_index` (numeric): Index of the video in the playlist padded with leading zeros according to the total length of the playlist
|
||||||
- `playlist_id` (string): Playlist identifier
|
- `playlist_id` (string): Playlist identifier
|
||||||
- `playlist_title` (string): Playlist title
|
- `playlist_title` (string): Playlist title
|
||||||
|
- `playlist_uploader` (string): Full name of the playlist uploader
|
||||||
|
- `playlist_uploader_id` (string): Nickname or id of the playlist uploader
|
||||||
|
|
||||||
Available for the video that belongs to some logical chapter or section:
|
Available for the video that belongs to some logical chapter or section:
|
||||||
|
|
||||||
@ -603,7 +617,7 @@ $ youtube-dl -o '%(uploader)s/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)
|
|||||||
$ youtube-dl -u user -p password -o '~/MyVideos/%(playlist)s/%(chapter_number)s - %(chapter)s/%(title)s.%(ext)s' https://www.udemy.com/java-tutorial/
|
$ youtube-dl -u user -p password -o '~/MyVideos/%(playlist)s/%(chapter_number)s - %(chapter)s/%(title)s.%(ext)s' https://www.udemy.com/java-tutorial/
|
||||||
|
|
||||||
# Download entire series season keeping each series and each season in separate directory under C:/MyVideos
|
# Download entire series season keeping each series and each season in separate directory under C:/MyVideos
|
||||||
$ youtube-dl -o "C:/MyVideos/%(series)s/%(season_number)s - %(season)s/%(episode_number)s - %(episode)s.%(ext)s" http://videomore.ru/kino_v_detalayah/5_sezon/367617
|
$ youtube-dl -o "C:/MyVideos/%(series)s/%(season_number)s - %(season)s/%(episode_number)s - %(episode)s.%(ext)s" https://videomore.ru/kino_v_detalayah/5_sezon/367617
|
||||||
|
|
||||||
# Stream the video being downloaded to stdout
|
# Stream the video being downloaded to stdout
|
||||||
$ youtube-dl -o - BaW_jenozKc
|
$ youtube-dl -o - BaW_jenozKc
|
||||||
@ -716,17 +730,17 @@ $ youtube-dl --dateafter 20000101 --datebefore 20091231
|
|||||||
|
|
||||||
### How do I update youtube-dl?
|
### How do I update youtube-dl?
|
||||||
|
|
||||||
If you've followed [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html), you can simply run `youtube-dl -U` (or, on Linux, `sudo youtube-dl -U`).
|
If you've followed [our manual installation instructions](https://rg3.github.io/youtube-dl/download.html), you can simply run `youtube-dl -U` (or, on Linux, `sudo youtube-dl -U`).
|
||||||
|
|
||||||
If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
|
If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
|
||||||
|
|
||||||
If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distribution serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
|
If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to https://yt-dl.org to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distribution serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
|
||||||
|
|
||||||
As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
|
As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
|
||||||
|
|
||||||
sudo apt-get remove -y youtube-dl
|
sudo apt-get remove -y youtube-dl
|
||||||
|
|
||||||
Afterwards, simply follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html):
|
Afterwards, simply follow [our manual installation instructions](https://rg3.github.io/youtube-dl/download.html):
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
|
sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
|
||||||
@ -766,11 +780,11 @@ Apparently YouTube requires you to pass a CAPTCHA test if you download too much.
|
|||||||
|
|
||||||
youtube-dl works fine on its own on most sites. However, if you want to convert video/audio, you'll need [avconv](https://libav.org/) or [ffmpeg](https://www.ffmpeg.org/). On some sites - most notably YouTube - videos can be retrieved in a higher quality format without sound. youtube-dl will detect whether avconv/ffmpeg is present and automatically pick the best option.
|
youtube-dl works fine on its own on most sites. However, if you want to convert video/audio, you'll need [avconv](https://libav.org/) or [ffmpeg](https://www.ffmpeg.org/). On some sites - most notably YouTube - videos can be retrieved in a higher quality format without sound. youtube-dl will detect whether avconv/ffmpeg is present and automatically pick the best option.
|
||||||
|
|
||||||
Videos or video formats streamed via RTMP protocol can only be downloaded when [rtmpdump](https://rtmpdump.mplayerhq.hu/) is installed. Downloading MMS and RTSP videos requires either [mplayer](http://mplayerhq.hu/) or [mpv](https://mpv.io/) to be installed.
|
Videos or video formats streamed via RTMP protocol can only be downloaded when [rtmpdump](https://rtmpdump.mplayerhq.hu/) is installed. Downloading MMS and RTSP videos requires either [mplayer](https://mplayerhq.hu/) or [mpv](https://mpv.io/) to be installed.
|
||||||
|
|
||||||
### I have downloaded a video but how can I play it?
|
### I have downloaded a video but how can I play it?
|
||||||
|
|
||||||
Once the video is fully downloaded, use any video player, such as [mpv](https://mpv.io/), [vlc](http://www.videolan.org/) or [mplayer](http://www.mplayerhq.hu/).
|
Once the video is fully downloaded, use any video player, such as [mpv](https://mpv.io/), [vlc](https://www.videolan.org/) or [mplayer](https://www.mplayerhq.hu/).
|
||||||
|
|
||||||
### I extracted a video URL with `-g`, but it does not play on another machine / in my web browser.
|
### I extracted a video URL with `-g`, but it does not play on another machine / in my web browser.
|
||||||
|
|
||||||
@ -845,10 +859,10 @@ Use the `-o` to specify an [output template](#output-template), for example `-o
|
|||||||
|
|
||||||
### How do I download a video starting with a `-`?
|
### How do I download a video starting with a `-`?
|
||||||
|
|
||||||
Either prepend `http://www.youtube.com/watch?v=` or separate the ID from the options with `--`:
|
Either prepend `https://www.youtube.com/watch?v=` or separate the ID from the options with `--`:
|
||||||
|
|
||||||
youtube-dl -- -wNyEUrxzFU
|
youtube-dl -- -wNyEUrxzFU
|
||||||
youtube-dl "http://www.youtube.com/watch?v=-wNyEUrxzFU"
|
youtube-dl "https://www.youtube.com/watch?v=-wNyEUrxzFU"
|
||||||
|
|
||||||
### How do I pass cookies to youtube-dl?
|
### How do I pass cookies to youtube-dl?
|
||||||
|
|
||||||
@ -856,15 +870,15 @@ Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`.
|
|||||||
|
|
||||||
In order to extract cookies from browser use any conforming browser extension for exporting cookies. For example, [cookies.txt](https://chrome.google.com/webstore/detail/cookiestxt/njabckikapfpffapmjgojcnbfjonfjfg) (for Chrome) or [Export Cookies](https://addons.mozilla.org/en-US/firefox/addon/export-cookies/) (for Firefox).
|
In order to extract cookies from browser use any conforming browser extension for exporting cookies. For example, [cookies.txt](https://chrome.google.com/webstore/detail/cookiestxt/njabckikapfpffapmjgojcnbfjonfjfg) (for Chrome) or [Export Cookies](https://addons.mozilla.org/en-US/firefox/addon/export-cookies/) (for Firefox).
|
||||||
|
|
||||||
Note that the cookies file must be in Mozilla/Netscape format and the first line of the cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in the cookies file and convert newlines if necessary to correspond with your OS, namely `CRLF` (`\r\n`) for Windows and `LF` (`\n`) for Unix and Unix-like systems (Linux, Mac OS, etc.). `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
|
Note that the cookies file must be in Mozilla/Netscape format and the first line of the cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in the cookies file and convert newlines if necessary to correspond with your OS, namely `CRLF` (`\r\n`) for Windows and `LF` (`\n`) for Unix and Unix-like systems (Linux, macOS, etc.). `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
|
||||||
|
|
||||||
Passing cookies to youtube-dl is a good way to workaround login when a particular extractor does not implement it explicitly. Another use case is working around [CAPTCHA](https://en.wikipedia.org/wiki/CAPTCHA) some websites require you to solve in particular cases in order to get access (e.g. YouTube, CloudFlare).
|
Passing cookies to youtube-dl is a good way to workaround login when a particular extractor does not implement it explicitly. Another use case is working around [CAPTCHA](https://en.wikipedia.org/wiki/CAPTCHA) some websites require you to solve in particular cases in order to get access (e.g. YouTube, CloudFlare).
|
||||||
|
|
||||||
### How do I stream directly to media player?
|
### How do I stream directly to media player?
|
||||||
|
|
||||||
You will first need to tell youtube-dl to stream media to stdout with `-o -`, and also tell your media player to read from stdin (it must be capable of this for streaming) and then pipe former to latter. For example, streaming to [vlc](http://www.videolan.org/) can be achieved with:
|
You will first need to tell youtube-dl to stream media to stdout with `-o -`, and also tell your media player to read from stdin (it must be capable of this for streaming) and then pipe former to latter. For example, streaming to [vlc](https://www.videolan.org/) can be achieved with:
|
||||||
|
|
||||||
youtube-dl -o - "http://www.youtube.com/watch?v=BaW_jenozKcj" | vlc -
|
youtube-dl -o - "https://www.youtube.com/watch?v=BaW_jenozKcj" | vlc -
|
||||||
|
|
||||||
### How do I download only new videos from a playlist?
|
### How do I download only new videos from a playlist?
|
||||||
|
|
||||||
@ -884,7 +898,7 @@ When youtube-dl detects an HLS video, it can download it either with the built-i
|
|||||||
|
|
||||||
When youtube-dl knows that one particular downloader works better for a given website, that downloader will be picked. Otherwise, youtube-dl will pick the best downloader for general compatibility, which at the moment happens to be ffmpeg. This choice may change in future versions of youtube-dl, with improvements of the built-in downloader and/or ffmpeg.
|
When youtube-dl knows that one particular downloader works better for a given website, that downloader will be picked. Otherwise, youtube-dl will pick the best downloader for general compatibility, which at the moment happens to be ffmpeg. This choice may change in future versions of youtube-dl, with improvements of the built-in downloader and/or ffmpeg.
|
||||||
|
|
||||||
In particular, the generic extractor (used when your website is not in the [list of supported sites by youtube-dl](http://rg3.github.io/youtube-dl/supportedsites.html) cannot mandate one specific downloader.
|
In particular, the generic extractor (used when your website is not in the [list of supported sites by youtube-dl](https://rg3.github.io/youtube-dl/supportedsites.html) cannot mandate one specific downloader.
|
||||||
|
|
||||||
If you put either `--hls-prefer-native` or `--hls-prefer-ffmpeg` into your configuration, a different subset of videos will fail to download correctly. Instead, it is much better to [file an issue](https://yt-dl.org/bug) or a pull request which details why the native or the ffmpeg HLS downloader is a better choice for your use case.
|
If you put either `--hls-prefer-native` or `--hls-prefer-ffmpeg` into your configuration, a different subset of videos will fail to download correctly. Instead, it is much better to [file an issue](https://yt-dl.org/bug) or a pull request which details why the native or the ffmpeg HLS downloader is a better choice for your use case.
|
||||||
|
|
||||||
@ -910,7 +924,7 @@ Feel free to bump the issue from time to time by writing a small comment ("Issue
|
|||||||
|
|
||||||
### How can I detect whether a given URL is supported by youtube-dl?
|
### How can I detect whether a given URL is supported by youtube-dl?
|
||||||
|
|
||||||
For one, have a look at the [list of supported sites](docs/supportedsites.md). Note that it can sometimes happen that the site changes its URL scheme (say, from http://example.com/video/1234567 to http://example.com/v/1234567 ) and youtube-dl reports an URL of a service in that list as unsupported. In that case, simply report a bug.
|
For one, have a look at the [list of supported sites](docs/supportedsites.md). Note that it can sometimes happen that the site changes its URL scheme (say, from https://example.com/video/1234567 to https://example.com/v/1234567 ) and youtube-dl reports an URL of a service in that list as unsupported. In that case, simply report a bug.
|
||||||
|
|
||||||
It is *not* possible to detect whether a URL is supported or not. That's because youtube-dl contains a generic extractor which matches **all** URLs. You may be tempted to disable, exclude, or remove the generic extractor, but the generic extractor not only allows users to extract videos from lots of websites that embed a video from another service, but may also be used to extract video from a service that it's hosting itself. Therefore, we neither recommend nor support disabling, excluding, or removing the generic extractor.
|
It is *not* possible to detect whether a URL is supported or not. That's because youtube-dl contains a generic extractor which matches **all** URLs. You may be tempted to disable, exclude, or remove the generic extractor, but the generic extractor not only allows users to extract videos from lots of websites that embed a video from another service, but may also be used to extract video from a service that it's hosting itself. Therefore, we neither recommend nor support disabling, excluding, or removing the generic extractor.
|
||||||
|
|
||||||
@ -924,7 +938,7 @@ youtube-dl is an open-source project manned by too few volunteers, so we'd rathe
|
|||||||
|
|
||||||
# DEVELOPER INSTRUCTIONS
|
# DEVELOPER INSTRUCTIONS
|
||||||
|
|
||||||
Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution.
|
Most users do not need to build youtube-dl and can [download the builds](https://rg3.github.io/youtube-dl/download.html) or get them from their distribution.
|
||||||
|
|
||||||
To run youtube-dl as a developer, you don't need to build anything either. Simply execute
|
To run youtube-dl as a developer, you don't need to build anything either. Simply execute
|
||||||
|
|
||||||
@ -936,6 +950,8 @@ To run the test, simply invoke your favorite test runner, or execute a test file
|
|||||||
python test/test_download.py
|
python test/test_download.py
|
||||||
nosetests
|
nosetests
|
||||||
|
|
||||||
|
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
||||||
|
|
||||||
If you want to create a build of youtube-dl yourself, you'll need
|
If you want to create a build of youtube-dl yourself, you'll need
|
||||||
|
|
||||||
* python
|
* python
|
||||||
@ -972,7 +988,7 @@ After you have ensured this site is distributing its content legally, you can fo
|
|||||||
class YourExtractorIE(InfoExtractor):
|
class YourExtractorIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://yourextractor.com/watch/42',
|
'url': 'https://yourextractor.com/watch/42',
|
||||||
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '42',
|
'id': '42',
|
||||||
@ -1003,10 +1019,10 @@ After you have ensured this site is distributing its content legally, you can fo
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
|
||||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L74-L252). Add tests and code for as many as you want.
|
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L74-L252). Add tests and code for as many as you want.
|
||||||
8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://pypi.python.org/pypi/flake8). Also make sure your code works under all [Python](http://www.python.org/) versions claimed supported by youtube-dl, namely 2.6, 2.7, and 3.2+.
|
8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://pypi.python.org/pypi/flake8). Also make sure your code works under all [Python](https://www.python.org/) versions claimed supported by youtube-dl, namely 2.6, 2.7, and 3.2+.
|
||||||
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
9. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files and [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add youtube_dl/extractor/extractors.py
|
$ git add youtube_dl/extractor/extractors.py
|
||||||
$ git add youtube_dl/extractor/yourextractor.py
|
$ git add youtube_dl/extractor/yourextractor.py
|
||||||
@ -1162,10 +1178,10 @@ import youtube_dl
|
|||||||
|
|
||||||
ydl_opts = {}
|
ydl_opts = {}
|
||||||
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||||
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
|
ydl.download(['https://www.youtube.com/watch?v=BaW_jenozKc'])
|
||||||
```
|
```
|
||||||
|
|
||||||
Most likely, you'll want to use various options. For a list of options available, have a look at [`youtube_dl/YoutubeDL.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L129-L279). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
|
Most likely, you'll want to use various options. For a list of options available, have a look at [`youtube_dl/YoutubeDL.py`](https://github.com/rg3/youtube-dl/blob/3e4cedf9e8cd3157df2457df7274d0c842421945/youtube_dl/YoutubeDL.py#L137-L312). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
|
||||||
|
|
||||||
Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file:
|
Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file:
|
||||||
|
|
||||||
@ -1201,19 +1217,19 @@ ydl_opts = {
|
|||||||
'progress_hooks': [my_hook],
|
'progress_hooks': [my_hook],
|
||||||
}
|
}
|
||||||
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||||
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
|
ydl.download(['https://www.youtube.com/watch?v=BaW_jenozKc'])
|
||||||
```
|
```
|
||||||
|
|
||||||
# BUGS
|
# BUGS
|
||||||
|
|
||||||
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>. Unless you were prompted to or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the IRC channel [#youtube-dl](irc://chat.freenode.net/#youtube-dl) on freenode ([webchat](http://webchat.freenode.net/?randomnick=1&channels=youtube-dl)).
|
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>. Unless you were prompted to or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the IRC channel [#youtube-dl](irc://chat.freenode.net/#youtube-dl) on freenode ([webchat](https://webchat.freenode.net/?randomnick=1&channels=youtube-dl)).
|
||||||
|
|
||||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||||
```
|
```
|
||||||
$ youtube-dl -v <your command line>
|
$ youtube-dl -v <your command line>
|
||||||
[debug] System config: []
|
[debug] System config: []
|
||||||
[debug] User config: []
|
[debug] User config: []
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
[debug] Command-line args: [u'-v', u'https://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
[debug] youtube-dl version 2015.12.06
|
[debug] youtube-dl version 2015.12.06
|
||||||
[debug] Git HEAD: 135392e
|
[debug] Git HEAD: 135392e
|
||||||
@ -1244,7 +1260,7 @@ For bug reports, this means that your report should contain the *complete* outpu
|
|||||||
|
|
||||||
If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
||||||
|
|
||||||
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `http://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `http://www.youtube.com/`) is *not* an example URL.
|
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `https://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `https://www.youtube.com/`) is *not* an example URL.
|
||||||
|
|
||||||
### Are you using the latest version?
|
### Are you using the latest version?
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from test.helper import get_testcases
|
from test.helper import gettestcases
|
||||||
from youtube_dl.utils import compat_urllib_parse_urlparse
|
from youtube_dl.utils import compat_urllib_parse_urlparse
|
||||||
from youtube_dl.utils import compat_urllib_request
|
from youtube_dl.utils import compat_urllib_request
|
||||||
|
|
||||||
@ -24,7 +24,7 @@ if len(sys.argv) > 1:
|
|||||||
else:
|
else:
|
||||||
METHOD = 'EURISTIC'
|
METHOD = 'EURISTIC'
|
||||||
|
|
||||||
for test in get_testcases():
|
for test in gettestcases():
|
||||||
if METHOD == 'EURISTIC':
|
if METHOD == 'EURISTIC':
|
||||||
try:
|
try:
|
||||||
webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
|
webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
|
||||||
|
@ -1,27 +1,22 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import hashlib
|
|
||||||
import urllib.request
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
versions_info = json.load(open('update/versions.json'))
|
versions_info = json.load(open('update/versions.json'))
|
||||||
version = versions_info['latest']
|
version = versions_info['latest']
|
||||||
URL = versions_info['versions'][version]['bin'][0]
|
version_dict = versions_info['versions'][version]
|
||||||
|
|
||||||
data = urllib.request.urlopen(URL).read()
|
|
||||||
|
|
||||||
# Read template page
|
# Read template page
|
||||||
with open('download.html.in', 'r', encoding='utf-8') as tmplf:
|
with open('download.html.in', 'r', encoding='utf-8') as tmplf:
|
||||||
template = tmplf.read()
|
template = tmplf.read()
|
||||||
|
|
||||||
sha256sum = hashlib.sha256(data).hexdigest()
|
|
||||||
template = template.replace('@PROGRAM_VERSION@', version)
|
template = template.replace('@PROGRAM_VERSION@', version)
|
||||||
template = template.replace('@PROGRAM_URL@', URL)
|
template = template.replace('@PROGRAM_URL@', version_dict['bin'][0])
|
||||||
template = template.replace('@PROGRAM_SHA256SUM@', sha256sum)
|
template = template.replace('@PROGRAM_SHA256SUM@', version_dict['bin'][1])
|
||||||
template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0])
|
template = template.replace('@EXE_URL@', version_dict['exe'][0])
|
||||||
template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1])
|
template = template.replace('@EXE_SHA256SUM@', version_dict['exe'][1])
|
||||||
template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0])
|
template = template.replace('@TAR_URL@', version_dict['tar'][0])
|
||||||
template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1])
|
template = template.replace('@TAR_SHA256SUM@', version_dict['tar'][1])
|
||||||
with open('download.html', 'w', encoding='utf-8') as dlf:
|
with open('download.html', 'w', encoding='utf-8') as dlf:
|
||||||
dlf.write(template)
|
dlf.write(template)
|
||||||
|
5
devscripts/install_jython.sh
Executable file
5
devscripts/install_jython.sh
Executable file
@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
wget http://central.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar
|
||||||
|
java -jar jython-installer-2.7.1.jar -s -d "$HOME/jython"
|
||||||
|
$HOME/jython/bin/jython -m pip install nose
|
@ -1,6 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
DOWNLOAD_TESTS="age_restriction|download|subtitles|write_annotations|iqiyi_sdk_interpreter|youtube_lists"
|
# Keep this list in sync with the `offlinetest` target in Makefile
|
||||||
|
DOWNLOAD_TESTS="age_restriction|download|iqiyi_sdk_interpreter|socks|subtitles|write_annotations|youtube_lists|youtube_signature"
|
||||||
|
|
||||||
test_set=""
|
test_set=""
|
||||||
multiprocess_args=""
|
multiprocess_args=""
|
||||||
|
@ -3,8 +3,7 @@
|
|||||||
- **1up.com**
|
- **1up.com**
|
||||||
- **20min**
|
- **20min**
|
||||||
- **220.ro**
|
- **220.ro**
|
||||||
- **22tracks:genre**
|
- **23video**
|
||||||
- **22tracks:track**
|
|
||||||
- **24video**
|
- **24video**
|
||||||
- **3qsdn**: 3Q SDN
|
- **3qsdn**: 3Q SDN
|
||||||
- **3sat**
|
- **3sat**
|
||||||
@ -12,6 +11,7 @@
|
|||||||
- **56.com**
|
- **56.com**
|
||||||
- **5min**
|
- **5min**
|
||||||
- **6play**
|
- **6play**
|
||||||
|
- **7plus**
|
||||||
- **8tracks**
|
- **8tracks**
|
||||||
- **91porn**
|
- **91porn**
|
||||||
- **9c9media**
|
- **9c9media**
|
||||||
@ -36,12 +36,13 @@
|
|||||||
- **AdultSwim**
|
- **AdultSwim**
|
||||||
- **aenetworks**: A+E Networks: A&E, Lifetime, History.com, FYI Network
|
- **aenetworks**: A+E Networks: A&E, Lifetime, History.com, FYI Network
|
||||||
- **afreecatv**: afreecatv.com
|
- **afreecatv**: afreecatv.com
|
||||||
- **afreecatv:global**: afreecatv.com
|
|
||||||
- **AirMozilla**
|
- **AirMozilla**
|
||||||
|
- **AliExpressLive**
|
||||||
- **AlJazeera**
|
- **AlJazeera**
|
||||||
- **Allocine**
|
- **Allocine**
|
||||||
- **AlphaPorno**
|
- **AlphaPorno**
|
||||||
- **AMCNetworks**
|
- **AMCNetworks**
|
||||||
|
- **AmericasTestKitchen**
|
||||||
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
- **AnimeOnDemand**
|
- **AnimeOnDemand**
|
||||||
- **anitube.se**
|
- **anitube.se**
|
||||||
@ -113,26 +114,28 @@
|
|||||||
- **BokeCC**
|
- **BokeCC**
|
||||||
- **BostonGlobe**
|
- **BostonGlobe**
|
||||||
- **Bpb**: Bundeszentrale für politische Bildung
|
- **Bpb**: Bundeszentrale für politische Bildung
|
||||||
- **BR**: Bayerischer Rundfunk Mediathek
|
- **BR**: Bayerischer Rundfunk
|
||||||
- **BravoTV**
|
- **BravoTV**
|
||||||
- **Break**
|
- **Break**
|
||||||
- **brightcove:legacy**
|
- **brightcove:legacy**
|
||||||
- **brightcove:new**
|
- **brightcove:new**
|
||||||
|
- **BRMediathek**: Bayerischer Rundfunk Mediathek
|
||||||
- **bt:article**: Bergens Tidende Articles
|
- **bt:article**: Bergens Tidende Articles
|
||||||
- **bt:vestlendingen**: Bergens Tidende - Vestlendingen
|
- **bt:vestlendingen**: Bergens Tidende - Vestlendingen
|
||||||
- **BuzzFeed**
|
- **BuzzFeed**
|
||||||
- **BYUtv**
|
- **BYUtv**
|
||||||
- **BYUtvEvent**
|
|
||||||
- **Camdemy**
|
- **Camdemy**
|
||||||
- **CamdemyFolder**
|
- **CamdemyFolder**
|
||||||
- **CamWithHer**
|
- **CamWithHer**
|
||||||
- **canalc2.tv**
|
- **canalc2.tv**
|
||||||
- **Canalplus**: canalplus.fr, piwiplus.fr and d8.tv
|
- **Canalplus**: mycanal.fr and piwiplus.fr
|
||||||
- **Canvas**: canvas.be and een.be
|
- **Canvas**
|
||||||
|
- **CanvasEen**: canvas.be and een.be
|
||||||
- **CarambaTV**
|
- **CarambaTV**
|
||||||
- **CarambaTVPage**
|
- **CarambaTVPage**
|
||||||
- **CartoonNetwork**
|
- **CartoonNetwork**
|
||||||
- **cbc.ca**
|
- **cbc.ca**
|
||||||
|
- **cbc.ca:olympics**
|
||||||
- **cbc.ca:player**
|
- **cbc.ca:player**
|
||||||
- **cbc.ca:watch**
|
- **cbc.ca:watch**
|
||||||
- **cbc.ca:watch:video**
|
- **cbc.ca:watch:video**
|
||||||
@ -156,6 +159,7 @@
|
|||||||
- **Cinchcast**
|
- **Cinchcast**
|
||||||
- **CJSW**
|
- **CJSW**
|
||||||
- **cliphunter**
|
- **cliphunter**
|
||||||
|
- **Clippit**
|
||||||
- **ClipRs**
|
- **ClipRs**
|
||||||
- **Clipsyndicate**
|
- **Clipsyndicate**
|
||||||
- **CloserToTruth**
|
- **CloserToTruth**
|
||||||
@ -168,7 +172,6 @@
|
|||||||
- **CNN**
|
- **CNN**
|
||||||
- **CNNArticle**
|
- **CNNArticle**
|
||||||
- **CNNBlogs**
|
- **CNNBlogs**
|
||||||
- **CollegeRama**
|
|
||||||
- **ComCarCoff**
|
- **ComCarCoff**
|
||||||
- **ComedyCentral**
|
- **ComedyCentral**
|
||||||
- **ComedyCentralFullEpisodes**
|
- **ComedyCentralFullEpisodes**
|
||||||
@ -187,7 +190,7 @@
|
|||||||
- **CSpan**: C-SPAN
|
- **CSpan**: C-SPAN
|
||||||
- **CtsNews**: 華視新聞
|
- **CtsNews**: 華視新聞
|
||||||
- **CTVNews**
|
- **CTVNews**
|
||||||
- **culturebox.francetvinfo.fr**
|
- **Culturebox**
|
||||||
- **CultureUnplugged**
|
- **CultureUnplugged**
|
||||||
- **curiositystream**
|
- **curiositystream**
|
||||||
- **curiositystream:collection**
|
- **curiositystream:collection**
|
||||||
@ -196,9 +199,8 @@
|
|||||||
- **dailymotion**
|
- **dailymotion**
|
||||||
- **dailymotion:playlist**
|
- **dailymotion:playlist**
|
||||||
- **dailymotion:user**
|
- **dailymotion:user**
|
||||||
- **DailymotionCloud**
|
- **DaisukiMotto**
|
||||||
- **Daisuki**
|
- **DaisukiMottoPlaylist**
|
||||||
- **DaisukiPlaylist**
|
|
||||||
- **daum.net**
|
- **daum.net**
|
||||||
- **daum.net:clip**
|
- **daum.net:clip**
|
||||||
- **daum.net:playlist**
|
- **daum.net:playlist**
|
||||||
@ -209,6 +211,7 @@
|
|||||||
- **defense.gouv.fr**
|
- **defense.gouv.fr**
|
||||||
- **democracynow**
|
- **democracynow**
|
||||||
- **DHM**: Filmarchiv - Deutsches Historisches Museum
|
- **DHM**: Filmarchiv - Deutsches Historisches Museum
|
||||||
|
- **Digg**
|
||||||
- **DigitallySpeaking**
|
- **DigitallySpeaking**
|
||||||
- **Digiteka**
|
- **Digiteka**
|
||||||
- **Discovery**
|
- **Discovery**
|
||||||
@ -241,8 +244,9 @@
|
|||||||
- **eHow**
|
- **eHow**
|
||||||
- **Einthusan**
|
- **Einthusan**
|
||||||
- **eitb.tv**
|
- **eitb.tv**
|
||||||
- **EllenTV**
|
- **EllenTube**
|
||||||
- **EllenTV:clips**
|
- **EllenTubePlaylist**
|
||||||
|
- **EllenTubeVideo**
|
||||||
- **ElPais**: El País
|
- **ElPais**: El País
|
||||||
- **Embedly**
|
- **Embedly**
|
||||||
- **EMPFlix**
|
- **EMPFlix**
|
||||||
@ -253,7 +257,6 @@
|
|||||||
- **ESPN**
|
- **ESPN**
|
||||||
- **ESPNArticle**
|
- **ESPNArticle**
|
||||||
- **EsriVideo**
|
- **EsriVideo**
|
||||||
- **ETOnline**
|
|
||||||
- **Europa**
|
- **Europa**
|
||||||
- **EveryonesMixtape**
|
- **EveryonesMixtape**
|
||||||
- **ExpoTV**
|
- **ExpoTV**
|
||||||
@ -265,10 +268,10 @@
|
|||||||
- **fc2**
|
- **fc2**
|
||||||
- **fc2:embed**
|
- **fc2:embed**
|
||||||
- **Fczenit**
|
- **Fczenit**
|
||||||
- **fernsehkritik.tv**
|
|
||||||
- **filmon**
|
- **filmon**
|
||||||
- **filmon:channel**
|
- **filmon:channel**
|
||||||
- **Firstpost**
|
- **Filmweb**
|
||||||
|
- **FiveThirtyEight**
|
||||||
- **FiveTV**
|
- **FiveTV**
|
||||||
- **Flickr**
|
- **Flickr**
|
||||||
- **Flipagram**
|
- **Flipagram**
|
||||||
@ -282,23 +285,27 @@
|
|||||||
- **foxnews:article**
|
- **foxnews:article**
|
||||||
- **foxnews:insider**
|
- **foxnews:insider**
|
||||||
- **FoxSports**
|
- **FoxSports**
|
||||||
- **france2.fr:generation-quoi**
|
- **france2.fr:generation-what**
|
||||||
- **FranceCulture**
|
- **FranceCulture**
|
||||||
- **FranceInter**
|
- **FranceInter**
|
||||||
- **FranceTV**
|
- **FranceTV**
|
||||||
- **FranceTVEmbed**
|
- **FranceTVEmbed**
|
||||||
- **francetvinfo.fr**
|
- **francetvinfo.fr**
|
||||||
|
- **FranceTVJeunesse**
|
||||||
|
- **FranceTVSite**
|
||||||
- **Freesound**
|
- **Freesound**
|
||||||
- **freespeech.org**
|
- **freespeech.org**
|
||||||
- **FreshLive**
|
- **FreshLive**
|
||||||
- **Funimation**
|
- **Funimation**
|
||||||
|
- **FunkChannel**
|
||||||
|
- **FunkMix**
|
||||||
- **FunnyOrDie**
|
- **FunnyOrDie**
|
||||||
- **Fusion**
|
- **Fusion**
|
||||||
|
- **Fux**
|
||||||
- **FXNetworks**
|
- **FXNetworks**
|
||||||
- **GameInformer**
|
- **GameInformer**
|
||||||
- **GameOne**
|
- **GameOne**
|
||||||
- **gameone:playlist**
|
- **gameone:playlist**
|
||||||
- **Gamersyde**
|
|
||||||
- **GameSpot**
|
- **GameSpot**
|
||||||
- **GameStar**
|
- **GameStar**
|
||||||
- **Gaskrank**
|
- **Gaskrank**
|
||||||
@ -329,6 +336,7 @@
|
|||||||
- **HentaiStigma**
|
- **HentaiStigma**
|
||||||
- **hetklokhuis**
|
- **hetklokhuis**
|
||||||
- **hgtv.com:show**
|
- **hgtv.com:show**
|
||||||
|
- **HiDive**
|
||||||
- **HistoricFilms**
|
- **HistoricFilms**
|
||||||
- **history:topic**: History.com Topic
|
- **history:topic**: History.com Topic
|
||||||
- **hitbox**
|
- **hitbox**
|
||||||
@ -337,6 +345,7 @@
|
|||||||
- **HornBunny**
|
- **HornBunny**
|
||||||
- **HotNewHipHop**
|
- **HotNewHipHop**
|
||||||
- **HotStar**
|
- **HotStar**
|
||||||
|
- **hotstar:playlist**
|
||||||
- **Howcast**
|
- **Howcast**
|
||||||
- **HowStuffWorks**
|
- **HowStuffWorks**
|
||||||
- **HRTi**
|
- **HRTi**
|
||||||
@ -357,10 +366,12 @@
|
|||||||
- **InfoQ**
|
- **InfoQ**
|
||||||
- **Instagram**
|
- **Instagram**
|
||||||
- **instagram:user**: Instagram user profile
|
- **instagram:user**: Instagram user profile
|
||||||
|
- **Internazionale**
|
||||||
- **InternetVideoArchive**
|
- **InternetVideoArchive**
|
||||||
- **IPrima**
|
- **IPrima**
|
||||||
- **iqiyi**: 爱奇艺
|
- **iqiyi**: 爱奇艺
|
||||||
- **Ir90Tv**
|
- **Ir90Tv**
|
||||||
|
- **ITTF**
|
||||||
- **ITV**
|
- **ITV**
|
||||||
- **ivi**: ivi.ru
|
- **ivi**: ivi.ru
|
||||||
- **ivi:compilation**: ivi.ru compilations
|
- **ivi:compilation**: ivi.ru compilations
|
||||||
@ -374,8 +385,8 @@
|
|||||||
- **Jove**
|
- **Jove**
|
||||||
- **jpopsuki.tv**
|
- **jpopsuki.tv**
|
||||||
- **JWPlatform**
|
- **JWPlatform**
|
||||||
|
- **Kakao**
|
||||||
- **Kaltura**
|
- **Kaltura**
|
||||||
- **Kamcord**
|
|
||||||
- **KanalPlay**: Kanal 5/9/11 Play
|
- **KanalPlay**: Kanal 5/9/11 Play
|
||||||
- **Kankan**
|
- **Kankan**
|
||||||
- **Karaoketv**
|
- **Karaoketv**
|
||||||
@ -407,6 +418,7 @@
|
|||||||
- **Lecture2Go**
|
- **Lecture2Go**
|
||||||
- **LEGO**
|
- **LEGO**
|
||||||
- **Lemonde**
|
- **Lemonde**
|
||||||
|
- **Lenta**
|
||||||
- **LePlaylist**
|
- **LePlaylist**
|
||||||
- **LetvCloud**: 乐视云
|
- **LetvCloud**: 乐视云
|
||||||
- **Libsyn**
|
- **Libsyn**
|
||||||
@ -415,8 +427,10 @@
|
|||||||
- **limelight**
|
- **limelight**
|
||||||
- **limelight:channel**
|
- **limelight:channel**
|
||||||
- **limelight:channel_list**
|
- **limelight:channel_list**
|
||||||
|
- **LineTV**
|
||||||
- **LiTV**
|
- **LiTV**
|
||||||
- **LiveLeak**
|
- **LiveLeak**
|
||||||
|
- **LiveLeakEmbed**
|
||||||
- **livestream**
|
- **livestream**
|
||||||
- **livestream:original**
|
- **livestream:original**
|
||||||
- **LnkGo**
|
- **LnkGo**
|
||||||
@ -429,15 +443,20 @@
|
|||||||
- **m6**
|
- **m6**
|
||||||
- **macgamestore**: MacGameStore trailers
|
- **macgamestore**: MacGameStore trailers
|
||||||
- **mailru**: Видео@Mail.Ru
|
- **mailru**: Видео@Mail.Ru
|
||||||
|
- **mailru:music**: Музыка@Mail.Ru
|
||||||
|
- **mailru:music:search**: Музыка@Mail.Ru
|
||||||
- **MakersChannel**
|
- **MakersChannel**
|
||||||
- **MakerTV**
|
- **MakerTV**
|
||||||
- **mangomolo:live**
|
- **mangomolo:live**
|
||||||
- **mangomolo:video**
|
- **mangomolo:video**
|
||||||
|
- **ManyVids**
|
||||||
|
- **massengeschmack.tv**
|
||||||
- **MatchTV**
|
- **MatchTV**
|
||||||
- **MDR**: MDR.DE and KiKA
|
- **MDR**: MDR.DE and KiKA
|
||||||
- **media.ccc.de**
|
- **media.ccc.de**
|
||||||
- **Medialaan**
|
- **Medialaan**
|
||||||
- **Mediaset**
|
- **Mediaset**
|
||||||
|
- **Mediasite**
|
||||||
- **Medici**
|
- **Medici**
|
||||||
- **megaphone.fm**: megaphone.fm embedded players
|
- **megaphone.fm**: megaphone.fm embedded players
|
||||||
- **Meipai**: 美拍
|
- **Meipai**: 美拍
|
||||||
@ -467,6 +486,7 @@
|
|||||||
- **Moniker**: allmyvideos.net and vidspot.net
|
- **Moniker**: allmyvideos.net and vidspot.net
|
||||||
- **Morningstar**: morningstar.com
|
- **Morningstar**: morningstar.com
|
||||||
- **Motherless**
|
- **Motherless**
|
||||||
|
- **MotherlessGroup**
|
||||||
- **Motorsport**: motorsport.com
|
- **Motorsport**: motorsport.com
|
||||||
- **MovieClips**
|
- **MovieClips**
|
||||||
- **MovieFap**
|
- **MovieFap**
|
||||||
@ -489,8 +509,8 @@
|
|||||||
- **MySpace:album**
|
- **MySpace:album**
|
||||||
- **MySpass**
|
- **MySpass**
|
||||||
- **Myvi**
|
- **Myvi**
|
||||||
- **myvideo** (Currently broken)
|
|
||||||
- **MyVidster**
|
- **MyVidster**
|
||||||
|
- **MyviEmbed**
|
||||||
- **n-tv.de**
|
- **n-tv.de**
|
||||||
- **natgeo**
|
- **natgeo**
|
||||||
- **natgeo:episodeguide**
|
- **natgeo:episodeguide**
|
||||||
@ -499,7 +519,8 @@
|
|||||||
- **NBA**
|
- **NBA**
|
||||||
- **NBC**
|
- **NBC**
|
||||||
- **NBCNews**
|
- **NBCNews**
|
||||||
- **NBCOlympics**
|
- **nbcolympics**
|
||||||
|
- **nbcolympics:stream**
|
||||||
- **NBCSports**
|
- **NBCSports**
|
||||||
- **NBCSportsVPlayer**
|
- **NBCSportsVPlayer**
|
||||||
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
||||||
@ -532,6 +553,7 @@
|
|||||||
- **nhl.com:videocenter:category**: NHL videocenter category
|
- **nhl.com:videocenter:category**: NHL videocenter category
|
||||||
- **nick.com**
|
- **nick.com**
|
||||||
- **nick.de**
|
- **nick.de**
|
||||||
|
- **nickelodeon:br**
|
||||||
- **nickelodeonru**
|
- **nickelodeonru**
|
||||||
- **nicknight**
|
- **nicknight**
|
||||||
- **niconico**: ニコニコ動画
|
- **niconico**: ニコニコ動画
|
||||||
@ -550,8 +572,6 @@
|
|||||||
- **nowness**
|
- **nowness**
|
||||||
- **nowness:playlist**
|
- **nowness:playlist**
|
||||||
- **nowness:series**
|
- **nowness:series**
|
||||||
- **NowTV** (Currently broken)
|
|
||||||
- **NowTVList**
|
|
||||||
- **nowvideo**: NowVideo
|
- **nowvideo**: NowVideo
|
||||||
- **Noz**
|
- **Noz**
|
||||||
- **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
@ -587,6 +607,7 @@
|
|||||||
- **Openload**
|
- **Openload**
|
||||||
- **OraTV**
|
- **OraTV**
|
||||||
- **orf:fm4**: radio FM4
|
- **orf:fm4**: radio FM4
|
||||||
|
- **orf:fm4:story**: fm4.orf.at stories
|
||||||
- **orf:iptv**: iptv.ORF.at
|
- **orf:iptv**: iptv.ORF.at
|
||||||
- **orf:oe1**: Radio Österreich 1
|
- **orf:oe1**: Radio Österreich 1
|
||||||
- **orf:tvthek**: ORF TVthek
|
- **orf:tvthek**: ORF TVthek
|
||||||
@ -600,11 +621,14 @@
|
|||||||
- **pcmag**
|
- **pcmag**
|
||||||
- **PearVideo**
|
- **PearVideo**
|
||||||
- **People**
|
- **People**
|
||||||
|
- **PerformGroup**
|
||||||
- **periscope**: Periscope
|
- **periscope**: Periscope
|
||||||
- **periscope:user**: Periscope user videos
|
- **periscope:user**: Periscope user videos
|
||||||
- **PhilharmonieDeParis**: Philharmonie de Paris
|
- **PhilharmonieDeParis**: Philharmonie de Paris
|
||||||
- **phoenix.de**
|
- **phoenix.de**
|
||||||
- **Photobucket**
|
- **Photobucket**
|
||||||
|
- **Picarto**
|
||||||
|
- **PicartoVod**
|
||||||
- **Piksel**
|
- **Piksel**
|
||||||
- **Pinkbike**
|
- **Pinkbike**
|
||||||
- **Pladform**
|
- **Pladform**
|
||||||
@ -620,7 +644,9 @@
|
|||||||
- **Pokemon**
|
- **Pokemon**
|
||||||
- **PolskieRadio**
|
- **PolskieRadio**
|
||||||
- **PolskieRadioCategory**
|
- **PolskieRadioCategory**
|
||||||
|
- **PopcornTV**
|
||||||
- **PornCom**
|
- **PornCom**
|
||||||
|
- **PornerBros**
|
||||||
- **PornFlip**
|
- **PornFlip**
|
||||||
- **PornHd**
|
- **PornHd**
|
||||||
- **PornHub**: PornHub and Thumbzilla
|
- **PornHub**: PornHub and Thumbzilla
|
||||||
@ -629,6 +655,7 @@
|
|||||||
- **Pornotube**
|
- **Pornotube**
|
||||||
- **PornoVoisines**
|
- **PornoVoisines**
|
||||||
- **PornoXO**
|
- **PornoXO**
|
||||||
|
- **PornTube**
|
||||||
- **PressTV**
|
- **PressTV**
|
||||||
- **PrimeShareTV**
|
- **PrimeShareTV**
|
||||||
- **PromptFile**
|
- **PromptFile**
|
||||||
@ -640,6 +667,8 @@
|
|||||||
- **qqmusic:playlist**: QQ音乐 - 歌单
|
- **qqmusic:playlist**: QQ音乐 - 歌单
|
||||||
- **qqmusic:singer**: QQ音乐 - 歌手
|
- **qqmusic:singer**: QQ音乐 - 歌手
|
||||||
- **qqmusic:toplist**: QQ音乐 - 排行榜
|
- **qqmusic:toplist**: QQ音乐 - 排行榜
|
||||||
|
- **Quickline**
|
||||||
|
- **QuicklineLive**
|
||||||
- **R7**
|
- **R7**
|
||||||
- **R7Article**
|
- **R7Article**
|
||||||
- **radio.de**
|
- **radio.de**
|
||||||
@ -651,9 +680,13 @@
|
|||||||
- **Rai**
|
- **Rai**
|
||||||
- **RaiPlay**
|
- **RaiPlay**
|
||||||
- **RaiPlayLive**
|
- **RaiPlayLive**
|
||||||
|
- **RaiPlayPlaylist**
|
||||||
|
- **RayWenderlich**
|
||||||
- **RBMARadio**
|
- **RBMARadio**
|
||||||
- **RDS**: RDS.ca
|
- **RDS**: RDS.ca
|
||||||
- **RedBullTV**
|
- **RedBullTV**
|
||||||
|
- **Reddit**
|
||||||
|
- **RedditR**
|
||||||
- **RedTube**
|
- **RedTube**
|
||||||
- **RegioTV**
|
- **RegioTV**
|
||||||
- **RENTV**
|
- **RENTV**
|
||||||
@ -664,7 +697,6 @@
|
|||||||
- **revision**
|
- **revision**
|
||||||
- **revision3:embed**
|
- **revision3:embed**
|
||||||
- **RICE**
|
- **RICE**
|
||||||
- **RingTV**
|
|
||||||
- **RMCDecouverte**
|
- **RMCDecouverte**
|
||||||
- **RockstarGames**
|
- **RockstarGames**
|
||||||
- **RoosterTeeth**
|
- **RoosterTeeth**
|
||||||
@ -685,6 +717,7 @@
|
|||||||
- **rtve.es:live**: RTVE.es live streams
|
- **rtve.es:live**: RTVE.es live streams
|
||||||
- **rtve.es:television**
|
- **rtve.es:television**
|
||||||
- **RTVNH**
|
- **RTVNH**
|
||||||
|
- **RTVS**
|
||||||
- **Rudo**
|
- **Rudo**
|
||||||
- **RUHD**
|
- **RUHD**
|
||||||
- **RulePorn**
|
- **RulePorn**
|
||||||
@ -693,13 +726,13 @@
|
|||||||
- **rutube:embed**: Rutube embedded videos
|
- **rutube:embed**: Rutube embedded videos
|
||||||
- **rutube:movie**: Rutube movies
|
- **rutube:movie**: Rutube movies
|
||||||
- **rutube:person**: Rutube person videos
|
- **rutube:person**: Rutube person videos
|
||||||
|
- **rutube:playlist**: Rutube playlists
|
||||||
- **RUTV**: RUTV.RU
|
- **RUTV**: RUTV.RU
|
||||||
- **Ruutu**
|
- **Ruutu**
|
||||||
- **Ruv**
|
- **Ruv**
|
||||||
- **safari**: safaribooksonline.com online video
|
- **safari**: safaribooksonline.com online video
|
||||||
- **safari:api**
|
- **safari:api**
|
||||||
- **safari:course**: safaribooksonline.com online courses
|
- **safari:course**: safaribooksonline.com online courses
|
||||||
- **Sandia**: Sandia National Laboratories
|
|
||||||
- **Sapo**: SAPO Vídeos
|
- **Sapo**: SAPO Vídeos
|
||||||
- **savefrom.net**
|
- **savefrom.net**
|
||||||
- **SBS**: sbs.com.au
|
- **SBS**: sbs.com.au
|
||||||
@ -712,8 +745,12 @@
|
|||||||
- **SenateISVP**
|
- **SenateISVP**
|
||||||
- **SendtoNews**
|
- **SendtoNews**
|
||||||
- **ServingSys**
|
- **ServingSys**
|
||||||
|
- **Servus**
|
||||||
- **Sexu**
|
- **Sexu**
|
||||||
|
- **SeznamZpravy**
|
||||||
|
- **SeznamZpravyArticle**
|
||||||
- **Shahid**
|
- **Shahid**
|
||||||
|
- **ShahidShow**
|
||||||
- **Shared**: shared.sx
|
- **Shared**: shared.sx
|
||||||
- **ShowRoomLive**
|
- **ShowRoomLive**
|
||||||
- **Sina**
|
- **Sina**
|
||||||
@ -722,6 +759,7 @@
|
|||||||
- **skynewsarabia:video**
|
- **skynewsarabia:video**
|
||||||
- **SkySports**
|
- **SkySports**
|
||||||
- **Slideshare**
|
- **Slideshare**
|
||||||
|
- **SlidesLive**
|
||||||
- **Slutload**
|
- **Slutload**
|
||||||
- **smotri**: Smotri.com
|
- **smotri**: Smotri.com
|
||||||
- **smotri:broadcast**: Smotri.com broadcasts
|
- **smotri:broadcast**: Smotri.com broadcasts
|
||||||
@ -752,7 +790,7 @@
|
|||||||
- **Sport5**
|
- **Sport5**
|
||||||
- **SportBoxEmbed**
|
- **SportBoxEmbed**
|
||||||
- **SportDeutschland**
|
- **SportDeutschland**
|
||||||
- **Sportschau**
|
- **SpringboardPlatform**
|
||||||
- **Sprout**
|
- **Sprout**
|
||||||
- **sr:mediathek**: Saarländischer Rundfunk
|
- **sr:mediathek**: Saarländischer Rundfunk
|
||||||
- **SRGSSR**
|
- **SRGSSR**
|
||||||
@ -765,9 +803,11 @@
|
|||||||
- **streamcloud.eu**
|
- **streamcloud.eu**
|
||||||
- **StreamCZ**
|
- **StreamCZ**
|
||||||
- **StreetVoice**
|
- **StreetVoice**
|
||||||
|
- **StretchInternet**
|
||||||
- **SunPorno**
|
- **SunPorno**
|
||||||
- **SVT**
|
- **SVT**
|
||||||
- **SVTPlay**: SVT Play and Öppet arkiv
|
- **SVTPlay**: SVT Play and Öppet arkiv
|
||||||
|
- **SVTSeries**
|
||||||
- **SWRMediathek**
|
- **SWRMediathek**
|
||||||
- **Syfy**
|
- **Syfy**
|
||||||
- **SztvHu**
|
- **SztvHu**
|
||||||
@ -776,7 +816,7 @@
|
|||||||
- **tagesschau:player**
|
- **tagesschau:player**
|
||||||
- **Tass**
|
- **Tass**
|
||||||
- **TastyTrade**
|
- **TastyTrade**
|
||||||
- **TBS** (Currently broken)
|
- **TBS**
|
||||||
- **TDSLifeway**
|
- **TDSLifeway**
|
||||||
- **teachertube**: teachertube.com videos
|
- **teachertube**: teachertube.com videos
|
||||||
- **teachertube:user:collection**: teachertube.com user and collection videos
|
- **teachertube:user:collection**: teachertube.com user and collection videos
|
||||||
@ -791,8 +831,11 @@
|
|||||||
- **Telegraaf**
|
- **Telegraaf**
|
||||||
- **TeleMB**
|
- **TeleMB**
|
||||||
- **TeleQuebec**
|
- **TeleQuebec**
|
||||||
|
- **TeleQuebecEmission**
|
||||||
|
- **TeleQuebecLive**
|
||||||
- **TeleTask**
|
- **TeleTask**
|
||||||
- **Telewebion**
|
- **Telewebion**
|
||||||
|
- **TennisTV**
|
||||||
- **TF1**
|
- **TF1**
|
||||||
- **TFO**
|
- **TFO**
|
||||||
- **TheIntercept**
|
- **TheIntercept**
|
||||||
@ -800,7 +843,6 @@
|
|||||||
- **ThePlatform**
|
- **ThePlatform**
|
||||||
- **ThePlatformFeed**
|
- **ThePlatformFeed**
|
||||||
- **TheScene**
|
- **TheScene**
|
||||||
- **TheSixtyOne**
|
|
||||||
- **TheStar**
|
- **TheStar**
|
||||||
- **TheSun**
|
- **TheSun**
|
||||||
- **TheWeatherChannel**
|
- **TheWeatherChannel**
|
||||||
@ -847,6 +889,9 @@
|
|||||||
- **tvland.com**
|
- **tvland.com**
|
||||||
- **TVN24**
|
- **TVN24**
|
||||||
- **TVNoe**
|
- **TVNoe**
|
||||||
|
- **TVNow**
|
||||||
|
- **TVNowList**
|
||||||
|
- **TVNowShow**
|
||||||
- **tvp**: Telewizja Polska
|
- **tvp**: Telewizja Polska
|
||||||
- **tvp:embed**: Telewizja Polska
|
- **tvp:embed**: Telewizja Polska
|
||||||
- **tvp:series**
|
- **tvp:series**
|
||||||
@ -868,8 +913,11 @@
|
|||||||
- **udemy**
|
- **udemy**
|
||||||
- **udemy:course**
|
- **udemy:course**
|
||||||
- **UDNEmbed**: 聯合影音
|
- **UDNEmbed**: 聯合影音
|
||||||
|
- **UFCTV**
|
||||||
- **UKTVPlay**
|
- **UKTVPlay**
|
||||||
|
- **umg:de**: Universal Music Deutschland
|
||||||
- **Unistra**
|
- **Unistra**
|
||||||
|
- **Unity**
|
||||||
- **uol.com.br**
|
- **uol.com.br**
|
||||||
- **uplynk**
|
- **uplynk**
|
||||||
- **uplynk:preplay**
|
- **uplynk:preplay**
|
||||||
@ -897,7 +945,6 @@
|
|||||||
- **vice**
|
- **vice**
|
||||||
- **vice:article**
|
- **vice:article**
|
||||||
- **vice:show**
|
- **vice:show**
|
||||||
- **Viceland**
|
|
||||||
- **Vidbit**
|
- **Vidbit**
|
||||||
- **Viddler**
|
- **Viddler**
|
||||||
- **Videa**
|
- **Videa**
|
||||||
@ -913,6 +960,7 @@
|
|||||||
- **VideoPress**
|
- **VideoPress**
|
||||||
- **videoweed**: VideoWeed
|
- **videoweed**: VideoWeed
|
||||||
- **Vidio**
|
- **Vidio**
|
||||||
|
- **VidLii**
|
||||||
- **vidme**
|
- **vidme**
|
||||||
- **vidme:user**
|
- **vidme:user**
|
||||||
- **vidme:user:likes**
|
- **vidme:user:likes**
|
||||||
@ -953,10 +1001,12 @@
|
|||||||
- **VoiceRepublic**
|
- **VoiceRepublic**
|
||||||
- **Voot**
|
- **Voot**
|
||||||
- **VoxMedia**
|
- **VoxMedia**
|
||||||
|
- **VoxMediaVolume**
|
||||||
- **Vporn**
|
- **Vporn**
|
||||||
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
- **Vrak**
|
- **Vrak**
|
||||||
- **VRT**: deredactie.be, sporza.be, cobra.be and cobra.canvas.be
|
- **VRT**: deredactie.be, sporza.be, cobra.be and cobra.canvas.be
|
||||||
|
- **VrtNU**: VrtNU.be
|
||||||
- **vrv**
|
- **vrv**
|
||||||
- **vrv:series**
|
- **vrv:series**
|
||||||
- **VShare**
|
- **VShare**
|
||||||
@ -973,10 +1023,14 @@
|
|||||||
- **WatchIndianPorn**: Watch Indian Porn
|
- **WatchIndianPorn**: Watch Indian Porn
|
||||||
- **WDR**
|
- **WDR**
|
||||||
- **wdr:mobile**
|
- **wdr:mobile**
|
||||||
|
- **WDRElefant**
|
||||||
|
- **WDRPage**
|
||||||
- **Webcaster**
|
- **Webcaster**
|
||||||
- **WebcasterFeed**
|
- **WebcasterFeed**
|
||||||
- **WebOfStories**
|
- **WebOfStories**
|
||||||
- **WebOfStoriesPlaylist**
|
- **WebOfStoriesPlaylist**
|
||||||
|
- **Weibo**
|
||||||
|
- **WeiboMobile**
|
||||||
- **WeiqiTV**: WQTV
|
- **WeiqiTV**: WQTV
|
||||||
- **wholecloud**: WholeCloud
|
- **wholecloud**: WholeCloud
|
||||||
- **Wimp**
|
- **Wimp**
|
||||||
@ -996,6 +1050,8 @@
|
|||||||
- **xiami:artist**: 虾米音乐 - 歌手
|
- **xiami:artist**: 虾米音乐 - 歌手
|
||||||
- **xiami:collection**: 虾米音乐 - 精选集
|
- **xiami:collection**: 虾米音乐 - 精选集
|
||||||
- **xiami:song**: 虾米音乐
|
- **xiami:song**: 虾米音乐
|
||||||
|
- **ximalaya**: 喜马拉雅FM
|
||||||
|
- **ximalaya:album**: 喜马拉雅FM 专辑
|
||||||
- **XMinus**
|
- **XMinus**
|
||||||
- **XNXX**
|
- **XNXX**
|
||||||
- **Xstream**
|
- **Xstream**
|
||||||
@ -1009,12 +1065,16 @@
|
|||||||
- **yandexmusic:album**: Яндекс.Музыка - Альбом
|
- **yandexmusic:album**: Яндекс.Музыка - Альбом
|
||||||
- **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
|
- **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
|
||||||
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
||||||
|
- **YapFiles**
|
||||||
- **YesJapan**
|
- **YesJapan**
|
||||||
- **yinyuetai:video**: 音悦Tai
|
- **yinyuetai:video**: 音悦Tai
|
||||||
- **Ynet**
|
- **Ynet**
|
||||||
- **YouJizz**
|
- **YouJizz**
|
||||||
- **youku**: 优酷
|
- **youku**: 优酷
|
||||||
- **youku:show**
|
- **youku:show**
|
||||||
|
- **YouNowChannel**
|
||||||
|
- **YouNowLive**
|
||||||
|
- **YouNowMoment**
|
||||||
- **YouPorn**
|
- **YouPorn**
|
||||||
- **YourUpload**
|
- **YourUpload**
|
||||||
- **youtube**: YouTube.com
|
- **youtube**: YouTube.com
|
||||||
@ -1028,13 +1088,14 @@
|
|||||||
- **youtube:search**: YouTube.com searches
|
- **youtube:search**: YouTube.com searches
|
||||||
- **youtube:search:date**: YouTube.com searches, newest videos first
|
- **youtube:search:date**: YouTube.com searches, newest videos first
|
||||||
- **youtube:search_url**: YouTube.com search URLs
|
- **youtube:search_url**: YouTube.com search URLs
|
||||||
- **youtube:shared**
|
|
||||||
- **youtube:show**: YouTube.com (multi-season) shows
|
- **youtube:show**: YouTube.com (multi-season) shows
|
||||||
- **youtube:subscriptions**: YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)
|
- **youtube:subscriptions**: YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)
|
||||||
- **youtube:user**: YouTube.com user videos (URL or "ytuser" keyword)
|
- **youtube:user**: YouTube.com user videos (URL or "ytuser" keyword)
|
||||||
- **youtube:watchlater**: Youtube watch later list, ":ytwatchlater" for short (requires authentication)
|
- **youtube:watchlater**: Youtube watch later list, ":ytwatchlater" for short (requires authentication)
|
||||||
- **Zapiks**
|
- **Zapiks**
|
||||||
- **Zaq1**
|
- **Zaq1**
|
||||||
|
- **Zattoo**
|
||||||
|
- **ZattooLive**
|
||||||
- **ZDF**
|
- **ZDF**
|
||||||
- **ZDFChannel**
|
- **ZDFChannel**
|
||||||
- **zingmp3**: mp3.zing.vn
|
- **zingmp3**: mp3.zing.vn
|
||||||
|
@ -3,4 +3,4 @@ universal = True
|
|||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,devscripts/lazy_load_template.py,devscripts/make_issue_template.py,setup.py,build,.git
|
exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,devscripts/lazy_load_template.py,devscripts/make_issue_template.py,setup.py,build,.git
|
||||||
ignore = E402,E501,E731
|
ignore = E402,E501,E731,E741
|
||||||
|
1
setup.py
1
setup.py
@ -109,6 +109,7 @@ setup(
|
|||||||
author_email='ytdl@yt-dl.org',
|
author_email='ytdl@yt-dl.org',
|
||||||
maintainer='Sergey M.',
|
maintainer='Sergey M.',
|
||||||
maintainer_email='dstftw@gmail.com',
|
maintainer_email='dstftw@gmail.com',
|
||||||
|
license='Unlicense',
|
||||||
packages=[
|
packages=[
|
||||||
'youtube_dl',
|
'youtube_dl',
|
||||||
'youtube_dl.extractor', 'youtube_dl.downloader',
|
'youtube_dl.extractor', 'youtube_dl.downloader',
|
||||||
|
@ -10,6 +10,7 @@ import unittest
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from test.helper import FakeYDL, expect_dict, expect_value
|
from test.helper import FakeYDL, expect_dict, expect_value
|
||||||
|
from youtube_dl.compat import compat_etree_fromstring
|
||||||
from youtube_dl.extractor.common import InfoExtractor
|
from youtube_dl.extractor.common import InfoExtractor
|
||||||
from youtube_dl.extractor import YoutubeIE, get_info_extractor
|
from youtube_dl.extractor import YoutubeIE, get_info_extractor
|
||||||
from youtube_dl.utils import encode_data_uri, strip_jsonp, ExtractorError, RegexNotFoundError
|
from youtube_dl.utils import encode_data_uri, strip_jsonp, ExtractorError, RegexNotFoundError
|
||||||
@ -488,6 +489,260 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
|||||||
self.ie._sort_formats(formats)
|
self.ie._sort_formats(formats)
|
||||||
expect_value(self, formats, expected_formats, None)
|
expect_value(self, formats, expected_formats, None)
|
||||||
|
|
||||||
|
def test_parse_mpd_formats(self):
|
||||||
|
_TEST_CASES = [
|
||||||
|
(
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/13919
|
||||||
|
# Also tests duplicate representation ids, see
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/15111
|
||||||
|
'float_duration',
|
||||||
|
'http://unknown/manifest.mpd',
|
||||||
|
[{
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'm4a',
|
||||||
|
'format_id': '318597',
|
||||||
|
'format_note': 'DASH audio',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'mp4a.40.2',
|
||||||
|
'vcodec': 'none',
|
||||||
|
'tbr': 61.587,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': '318597',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'none',
|
||||||
|
'vcodec': 'avc1.42001f',
|
||||||
|
'tbr': 318.597,
|
||||||
|
'width': 340,
|
||||||
|
'height': 192,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': '638590',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'none',
|
||||||
|
'vcodec': 'avc1.42001f',
|
||||||
|
'tbr': 638.59,
|
||||||
|
'width': 512,
|
||||||
|
'height': 288,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': '1022565',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'none',
|
||||||
|
'vcodec': 'avc1.4d001f',
|
||||||
|
'tbr': 1022.565,
|
||||||
|
'width': 688,
|
||||||
|
'height': 384,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': '2046506',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'none',
|
||||||
|
'vcodec': 'avc1.4d001f',
|
||||||
|
'tbr': 2046.506,
|
||||||
|
'width': 1024,
|
||||||
|
'height': 576,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': '3998017',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'none',
|
||||||
|
'vcodec': 'avc1.640029',
|
||||||
|
'tbr': 3998.017,
|
||||||
|
'width': 1280,
|
||||||
|
'height': 720,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': '5997485',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'none',
|
||||||
|
'vcodec': 'avc1.640032',
|
||||||
|
'tbr': 5997.485,
|
||||||
|
'width': 1920,
|
||||||
|
'height': 1080,
|
||||||
|
}]
|
||||||
|
), (
|
||||||
|
# https://github.com/rg3/youtube-dl/pull/14844
|
||||||
|
'urls_only',
|
||||||
|
'http://unknown/manifest.mpd',
|
||||||
|
[{
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': 'h264_aac_144p_m4s',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'mp4a.40.2',
|
||||||
|
'vcodec': 'avc3.42c01e',
|
||||||
|
'tbr': 200,
|
||||||
|
'width': 256,
|
||||||
|
'height': 144,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': 'h264_aac_240p_m4s',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'mp4a.40.2',
|
||||||
|
'vcodec': 'avc3.42c01e',
|
||||||
|
'tbr': 400,
|
||||||
|
'width': 424,
|
||||||
|
'height': 240,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': 'h264_aac_360p_m4s',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'mp4a.40.2',
|
||||||
|
'vcodec': 'avc3.42c01e',
|
||||||
|
'tbr': 800,
|
||||||
|
'width': 640,
|
||||||
|
'height': 360,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': 'h264_aac_480p_m4s',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'mp4a.40.2',
|
||||||
|
'vcodec': 'avc3.42c01e',
|
||||||
|
'tbr': 1200,
|
||||||
|
'width': 856,
|
||||||
|
'height': 480,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': 'h264_aac_576p_m4s',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'mp4a.40.2',
|
||||||
|
'vcodec': 'avc3.42c01e',
|
||||||
|
'tbr': 1600,
|
||||||
|
'width': 1024,
|
||||||
|
'height': 576,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': 'h264_aac_720p_m4s',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'mp4a.40.2',
|
||||||
|
'vcodec': 'avc3.42c01e',
|
||||||
|
'tbr': 2400,
|
||||||
|
'width': 1280,
|
||||||
|
'height': 720,
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'http://unknown/manifest.mpd',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': 'h264_aac_1080p_m4s',
|
||||||
|
'format_note': 'DASH video',
|
||||||
|
'protocol': 'http_dash_segments',
|
||||||
|
'acodec': 'mp4a.40.2',
|
||||||
|
'vcodec': 'avc3.42c01e',
|
||||||
|
'tbr': 4400,
|
||||||
|
'width': 1920,
|
||||||
|
'height': 1080,
|
||||||
|
}]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
for mpd_file, mpd_url, expected_formats in _TEST_CASES:
|
||||||
|
with io.open('./test/testdata/mpd/%s.mpd' % mpd_file,
|
||||||
|
mode='r', encoding='utf-8') as f:
|
||||||
|
formats = self.ie._parse_mpd_formats(
|
||||||
|
compat_etree_fromstring(f.read().encode('utf-8')),
|
||||||
|
mpd_url=mpd_url)
|
||||||
|
self.ie._sort_formats(formats)
|
||||||
|
expect_value(self, formats, expected_formats, None)
|
||||||
|
|
||||||
|
def test_parse_f4m_formats(self):
|
||||||
|
_TEST_CASES = [
|
||||||
|
(
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/14660
|
||||||
|
'custom_base_url',
|
||||||
|
'http://api.new.livestream.com/accounts/6115179/events/6764928/videos/144884262.f4m',
|
||||||
|
[{
|
||||||
|
'manifest_url': 'http://api.new.livestream.com/accounts/6115179/events/6764928/videos/144884262.f4m',
|
||||||
|
'ext': 'flv',
|
||||||
|
'format_id': '2148',
|
||||||
|
'protocol': 'f4m',
|
||||||
|
'tbr': 2148,
|
||||||
|
'width': 1280,
|
||||||
|
'height': 720,
|
||||||
|
}]
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
for f4m_file, f4m_url, expected_formats in _TEST_CASES:
|
||||||
|
with io.open('./test/testdata/f4m/%s.f4m' % f4m_file,
|
||||||
|
mode='r', encoding='utf-8') as f:
|
||||||
|
formats = self.ie._parse_f4m_formats(
|
||||||
|
compat_etree_fromstring(f.read().encode('utf-8')),
|
||||||
|
f4m_url, None)
|
||||||
|
self.ie._sort_formats(formats)
|
||||||
|
expect_value(self, formats, expected_formats, None)
|
||||||
|
|
||||||
|
def test_parse_xspf(self):
|
||||||
|
_TEST_CASES = [
|
||||||
|
(
|
||||||
|
'foo_xspf',
|
||||||
|
'https://example.org/src/foo_xspf.xspf',
|
||||||
|
[{
|
||||||
|
'id': 'foo_xspf',
|
||||||
|
'title': 'Pandemonium',
|
||||||
|
'description': 'Visit http://bigbrother404.bandcamp.com',
|
||||||
|
'duration': 202.416,
|
||||||
|
'formats': [{
|
||||||
|
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
|
||||||
|
'url': 'https://example.org/src/cd1/track%201.mp3',
|
||||||
|
}],
|
||||||
|
}, {
|
||||||
|
'id': 'foo_xspf',
|
||||||
|
'title': 'Final Cartridge (Nichico Twelve Remix)',
|
||||||
|
'description': 'Visit http://bigbrother404.bandcamp.com',
|
||||||
|
'duration': 255.857,
|
||||||
|
'formats': [{
|
||||||
|
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
|
||||||
|
'url': 'https://example.org/%E3%83%88%E3%83%A9%E3%83%83%E3%82%AF%E3%80%80%EF%BC%92.mp3',
|
||||||
|
}],
|
||||||
|
}, {
|
||||||
|
'id': 'foo_xspf',
|
||||||
|
'title': 'Rebuilding Nightingale',
|
||||||
|
'description': 'Visit http://bigbrother404.bandcamp.com',
|
||||||
|
'duration': 287.915,
|
||||||
|
'formats': [{
|
||||||
|
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
|
||||||
|
'url': 'https://example.org/src/track3.mp3',
|
||||||
|
}, {
|
||||||
|
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
|
||||||
|
'url': 'https://example.com/track3.mp3',
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
for xspf_file, xspf_url, expected_entries in _TEST_CASES:
|
||||||
|
with io.open('./test/testdata/xspf/%s.xspf' % xspf_file,
|
||||||
|
mode='r', encoding='utf-8') as f:
|
||||||
|
entries = self.ie._parse_xspf(
|
||||||
|
compat_etree_fromstring(f.read().encode('utf-8')),
|
||||||
|
xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)
|
||||||
|
expect_value(self, entries, expected_entries, None)
|
||||||
|
for i in range(len(entries)):
|
||||||
|
expect_dict(self, entries[i], expected_entries[i])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -466,12 +466,18 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
ydl = YDL({'simulate': True})
|
ydl = YDL({'simulate': True})
|
||||||
self.assertEqual(ydl._default_format_spec({}), 'bestvideo+bestaudio/best')
|
self.assertEqual(ydl._default_format_spec({}), 'bestvideo+bestaudio/best')
|
||||||
|
|
||||||
|
ydl = YDL({})
|
||||||
|
self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio')
|
||||||
|
|
||||||
|
ydl = YDL({'simulate': True})
|
||||||
|
self.assertEqual(ydl._default_format_spec({'is_live': True}), 'bestvideo+bestaudio/best')
|
||||||
|
|
||||||
ydl = YDL({'outtmpl': '-'})
|
ydl = YDL({'outtmpl': '-'})
|
||||||
self.assertEqual(ydl._default_format_spec({}), 'best')
|
self.assertEqual(ydl._default_format_spec({}), 'best/bestvideo+bestaudio')
|
||||||
|
|
||||||
ydl = YDL({})
|
ydl = YDL({})
|
||||||
self.assertEqual(ydl._default_format_spec({}, download=False), 'bestvideo+bestaudio/best')
|
self.assertEqual(ydl._default_format_spec({}, download=False), 'bestvideo+bestaudio/best')
|
||||||
self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best')
|
self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio')
|
||||||
|
|
||||||
|
|
||||||
class TestYoutubeDL(unittest.TestCase):
|
class TestYoutubeDL(unittest.TestCase):
|
||||||
@ -770,6 +776,12 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
result = get_ids({'playlist_items': '10'})
|
result = get_ids({'playlist_items': '10'})
|
||||||
self.assertEqual(result, [])
|
self.assertEqual(result, [])
|
||||||
|
|
||||||
|
result = get_ids({'playlist_items': '3-10'})
|
||||||
|
self.assertEqual(result, [3, 4])
|
||||||
|
|
||||||
|
result = get_ids({'playlist_items': '2-4,3-4,3'})
|
||||||
|
self.assertEqual(result, [2, 3, 4])
|
||||||
|
|
||||||
def test_urlopen_no_file_protocol(self):
|
def test_urlopen_no_file_protocol(self):
|
||||||
# see https://github.com/rg3/youtube-dl/issues/8227
|
# see https://github.com/rg3/youtube-dl/issues/8227
|
||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
|
@ -92,8 +92,8 @@ class TestDownload(unittest.TestCase):
|
|||||||
def generator(test_case, tname):
|
def generator(test_case, tname):
|
||||||
|
|
||||||
def test_template(self):
|
def test_template(self):
|
||||||
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
|
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])()
|
||||||
other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
|
other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])]
|
||||||
is_playlist = any(k.startswith('playlist') for k in test_case)
|
is_playlist = any(k.startswith('playlist') for k in test_case)
|
||||||
test_cases = test_case.get(
|
test_cases = test_case.get(
|
||||||
'playlist', [] if is_playlist else [test_case])
|
'playlist', [] if is_playlist else [test_case])
|
||||||
|
125
test/test_downloader_http.py
Normal file
125
test/test_downloader_http.py
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from test.helper import try_rm
|
||||||
|
from youtube_dl import YoutubeDL
|
||||||
|
from youtube_dl.compat import compat_http_server
|
||||||
|
from youtube_dl.downloader.http import HttpFD
|
||||||
|
from youtube_dl.utils import encodeFilename
|
||||||
|
import ssl
|
||||||
|
import threading
|
||||||
|
|
||||||
|
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
|
def http_server_port(httpd):
|
||||||
|
if os.name == 'java' and isinstance(httpd.socket, ssl.SSLSocket):
|
||||||
|
# In Jython SSLSocket is not a subclass of socket.socket
|
||||||
|
sock = httpd.socket.sock
|
||||||
|
else:
|
||||||
|
sock = httpd.socket
|
||||||
|
return sock.getsockname()[1]
|
||||||
|
|
||||||
|
|
||||||
|
TEST_SIZE = 10 * 1024
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
||||||
|
def log_message(self, format, *args):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def send_content_range(self, total=None):
|
||||||
|
range_header = self.headers.get('Range')
|
||||||
|
start = end = None
|
||||||
|
if range_header:
|
||||||
|
mobj = re.search(r'^bytes=(\d+)-(\d+)', range_header)
|
||||||
|
if mobj:
|
||||||
|
start = int(mobj.group(1))
|
||||||
|
end = int(mobj.group(2))
|
||||||
|
valid_range = start is not None and end is not None
|
||||||
|
if valid_range:
|
||||||
|
content_range = 'bytes %d-%d' % (start, end)
|
||||||
|
if total:
|
||||||
|
content_range += '/%d' % total
|
||||||
|
self.send_header('Content-Range', content_range)
|
||||||
|
return (end - start + 1) if valid_range else total
|
||||||
|
|
||||||
|
def serve(self, range=True, content_length=True):
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header('Content-Type', 'video/mp4')
|
||||||
|
size = TEST_SIZE
|
||||||
|
if range:
|
||||||
|
size = self.send_content_range(TEST_SIZE)
|
||||||
|
if content_length:
|
||||||
|
self.send_header('Content-Length', size)
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(b'#' * size)
|
||||||
|
|
||||||
|
def do_GET(self):
|
||||||
|
if self.path == '/regular':
|
||||||
|
self.serve()
|
||||||
|
elif self.path == '/no-content-length':
|
||||||
|
self.serve(content_length=False)
|
||||||
|
elif self.path == '/no-range':
|
||||||
|
self.serve(range=False)
|
||||||
|
elif self.path == '/no-range-no-content-length':
|
||||||
|
self.serve(range=False, content_length=False)
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
|
||||||
|
|
||||||
|
class FakeLogger(object):
|
||||||
|
def debug(self, msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def warning(self, msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def error(self, msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TestHttpFD(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.httpd = compat_http_server.HTTPServer(
|
||||||
|
('127.0.0.1', 0), HTTPTestRequestHandler)
|
||||||
|
self.port = http_server_port(self.httpd)
|
||||||
|
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
||||||
|
self.server_thread.daemon = True
|
||||||
|
self.server_thread.start()
|
||||||
|
|
||||||
|
def download(self, params, ep):
|
||||||
|
params['logger'] = FakeLogger()
|
||||||
|
ydl = YoutubeDL(params)
|
||||||
|
downloader = HttpFD(ydl, params)
|
||||||
|
filename = 'testfile.mp4'
|
||||||
|
try_rm(encodeFilename(filename))
|
||||||
|
self.assertTrue(downloader.real_download(filename, {
|
||||||
|
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
|
||||||
|
}))
|
||||||
|
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE)
|
||||||
|
try_rm(encodeFilename(filename))
|
||||||
|
|
||||||
|
def download_all(self, params):
|
||||||
|
for ep in ('regular', 'no-content-length', 'no-range', 'no-range-no-content-length'):
|
||||||
|
self.download(params, ep)
|
||||||
|
|
||||||
|
def test_regular(self):
|
||||||
|
self.download_all({})
|
||||||
|
|
||||||
|
def test_chunked(self):
|
||||||
|
self.download_all({
|
||||||
|
'http_chunk_size': 1000,
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
@ -47,7 +47,7 @@ class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
|||||||
self.end_headers()
|
self.end_headers()
|
||||||
return
|
return
|
||||||
|
|
||||||
new_url = 'http://localhost:%d/中文.html' % http_server_port(self.server)
|
new_url = 'http://127.0.0.1:%d/中文.html' % http_server_port(self.server)
|
||||||
self.send_response(302)
|
self.send_response(302)
|
||||||
self.send_header(b'Location', new_url.encode('utf-8'))
|
self.send_header(b'Location', new_url.encode('utf-8'))
|
||||||
self.end_headers()
|
self.end_headers()
|
||||||
@ -74,7 +74,7 @@ class FakeLogger(object):
|
|||||||
class TestHTTP(unittest.TestCase):
|
class TestHTTP(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.httpd = compat_http_server.HTTPServer(
|
self.httpd = compat_http_server.HTTPServer(
|
||||||
('localhost', 0), HTTPTestRequestHandler)
|
('127.0.0.1', 0), HTTPTestRequestHandler)
|
||||||
self.port = http_server_port(self.httpd)
|
self.port = http_server_port(self.httpd)
|
||||||
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
||||||
self.server_thread.daemon = True
|
self.server_thread.daemon = True
|
||||||
@ -86,15 +86,15 @@ class TestHTTP(unittest.TestCase):
|
|||||||
return
|
return
|
||||||
|
|
||||||
ydl = YoutubeDL({'logger': FakeLogger()})
|
ydl = YoutubeDL({'logger': FakeLogger()})
|
||||||
r = ydl.extract_info('http://localhost:%d/302' % self.port)
|
r = ydl.extract_info('http://127.0.0.1:%d/302' % self.port)
|
||||||
self.assertEqual(r['entries'][0]['url'], 'http://localhost:%d/vid.mp4' % self.port)
|
self.assertEqual(r['entries'][0]['url'], 'http://127.0.0.1:%d/vid.mp4' % self.port)
|
||||||
|
|
||||||
|
|
||||||
class TestHTTPS(unittest.TestCase):
|
class TestHTTPS(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||||
self.httpd = compat_http_server.HTTPServer(
|
self.httpd = compat_http_server.HTTPServer(
|
||||||
('localhost', 0), HTTPTestRequestHandler)
|
('127.0.0.1', 0), HTTPTestRequestHandler)
|
||||||
self.httpd.socket = ssl.wrap_socket(
|
self.httpd.socket = ssl.wrap_socket(
|
||||||
self.httpd.socket, certfile=certfn, server_side=True)
|
self.httpd.socket, certfile=certfn, server_side=True)
|
||||||
self.port = http_server_port(self.httpd)
|
self.port = http_server_port(self.httpd)
|
||||||
@ -107,11 +107,11 @@ class TestHTTPS(unittest.TestCase):
|
|||||||
ydl = YoutubeDL({'logger': FakeLogger()})
|
ydl = YoutubeDL({'logger': FakeLogger()})
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
Exception,
|
Exception,
|
||||||
ydl.extract_info, 'https://localhost:%d/video.html' % self.port)
|
ydl.extract_info, 'https://127.0.0.1:%d/video.html' % self.port)
|
||||||
|
|
||||||
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
|
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
|
||||||
r = ydl.extract_info('https://localhost:%d/video.html' % self.port)
|
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
|
||||||
self.assertEqual(r['entries'][0]['url'], 'https://localhost:%d/vid.mp4' % self.port)
|
self.assertEqual(r['entries'][0]['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
|
||||||
|
|
||||||
|
|
||||||
def _build_proxy_handler(name):
|
def _build_proxy_handler(name):
|
||||||
@ -132,23 +132,23 @@ def _build_proxy_handler(name):
|
|||||||
class TestProxy(unittest.TestCase):
|
class TestProxy(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.proxy = compat_http_server.HTTPServer(
|
self.proxy = compat_http_server.HTTPServer(
|
||||||
('localhost', 0), _build_proxy_handler('normal'))
|
('127.0.0.1', 0), _build_proxy_handler('normal'))
|
||||||
self.port = http_server_port(self.proxy)
|
self.port = http_server_port(self.proxy)
|
||||||
self.proxy_thread = threading.Thread(target=self.proxy.serve_forever)
|
self.proxy_thread = threading.Thread(target=self.proxy.serve_forever)
|
||||||
self.proxy_thread.daemon = True
|
self.proxy_thread.daemon = True
|
||||||
self.proxy_thread.start()
|
self.proxy_thread.start()
|
||||||
|
|
||||||
self.geo_proxy = compat_http_server.HTTPServer(
|
self.geo_proxy = compat_http_server.HTTPServer(
|
||||||
('localhost', 0), _build_proxy_handler('geo'))
|
('127.0.0.1', 0), _build_proxy_handler('geo'))
|
||||||
self.geo_port = http_server_port(self.geo_proxy)
|
self.geo_port = http_server_port(self.geo_proxy)
|
||||||
self.geo_proxy_thread = threading.Thread(target=self.geo_proxy.serve_forever)
|
self.geo_proxy_thread = threading.Thread(target=self.geo_proxy.serve_forever)
|
||||||
self.geo_proxy_thread.daemon = True
|
self.geo_proxy_thread.daemon = True
|
||||||
self.geo_proxy_thread.start()
|
self.geo_proxy_thread.start()
|
||||||
|
|
||||||
def test_proxy(self):
|
def test_proxy(self):
|
||||||
geo_proxy = 'localhost:{0}'.format(self.geo_port)
|
geo_proxy = '127.0.0.1:{0}'.format(self.geo_port)
|
||||||
ydl = YoutubeDL({
|
ydl = YoutubeDL({
|
||||||
'proxy': 'localhost:{0}'.format(self.port),
|
'proxy': '127.0.0.1:{0}'.format(self.port),
|
||||||
'geo_verification_proxy': geo_proxy,
|
'geo_verification_proxy': geo_proxy,
|
||||||
})
|
})
|
||||||
url = 'http://foo.com/bar'
|
url = 'http://foo.com/bar'
|
||||||
@ -162,7 +162,7 @@ class TestProxy(unittest.TestCase):
|
|||||||
|
|
||||||
def test_proxy_with_idn(self):
|
def test_proxy_with_idn(self):
|
||||||
ydl = YoutubeDL({
|
ydl = YoutubeDL({
|
||||||
'proxy': 'localhost:{0}'.format(self.port),
|
'proxy': '127.0.0.1:{0}'.format(self.port),
|
||||||
})
|
})
|
||||||
url = 'http://中文.tw/'
|
url = 'http://中文.tw/'
|
||||||
response = ydl.urlopen(url).read().decode('utf-8')
|
response = ydl.urlopen(url).read().decode('utf-8')
|
||||||
|
@ -232,7 +232,7 @@ class TestNPOSubtitles(BaseTestSubtitles):
|
|||||||
|
|
||||||
|
|
||||||
class TestMTVSubtitles(BaseTestSubtitles):
|
class TestMTVSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother'
|
url = 'http://www.cc.com/video-clips/p63lk0/adam-devine-s-house-party-chasing-white-swans'
|
||||||
IE = ComedyCentralIE
|
IE = ComedyCentralIE
|
||||||
|
|
||||||
def getInfoDict(self):
|
def getInfoDict(self):
|
||||||
@ -243,7 +243,7 @@ class TestMTVSubtitles(BaseTestSubtitles):
|
|||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
self.assertEqual(md5(subtitles['en']), 'b9f6ca22a6acf597ec76f61749765e65')
|
self.assertEqual(md5(subtitles['en']), '78206b8d8a0cfa9da64dc026eea48961')
|
||||||
|
|
||||||
|
|
||||||
class TestNRKSubtitles(BaseTestSubtitles):
|
class TestNRKSubtitles(BaseTestSubtitles):
|
||||||
|
@ -42,6 +42,7 @@ from youtube_dl.utils import (
|
|||||||
is_html,
|
is_html,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
limit_length,
|
limit_length,
|
||||||
|
merge_dicts,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
month_by_name,
|
month_by_name,
|
||||||
multipart_encode,
|
multipart_encode,
|
||||||
@ -53,10 +54,12 @@ from youtube_dl.utils import (
|
|||||||
parse_filesize,
|
parse_filesize,
|
||||||
parse_count,
|
parse_count,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_resolution,
|
||||||
pkcs1pad,
|
pkcs1pad,
|
||||||
read_batch_urls,
|
read_batch_urls,
|
||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
sanitize_path,
|
sanitize_path,
|
||||||
|
sanitize_url,
|
||||||
expand_path,
|
expand_path,
|
||||||
prepend_extension,
|
prepend_extension,
|
||||||
replace_extension,
|
replace_extension,
|
||||||
@ -219,6 +222,12 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(sanitize_path('./abc'), 'abc')
|
self.assertEqual(sanitize_path('./abc'), 'abc')
|
||||||
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
|
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
|
||||||
|
|
||||||
|
def test_sanitize_url(self):
|
||||||
|
self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar')
|
||||||
|
self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
|
||||||
|
self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar')
|
||||||
|
self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar')
|
||||||
|
|
||||||
def test_expand_path(self):
|
def test_expand_path(self):
|
||||||
def env(var):
|
def env(var):
|
||||||
return '%{0}%'.format(var) if sys.platform == 'win32' else '${0}'.format(var)
|
return '%{0}%'.format(var) if sys.platform == 'win32' else '${0}'.format(var)
|
||||||
@ -279,6 +288,7 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(unescapeHTML('/'), '/')
|
self.assertEqual(unescapeHTML('/'), '/')
|
||||||
self.assertEqual(unescapeHTML('é'), 'é')
|
self.assertEqual(unescapeHTML('é'), 'é')
|
||||||
self.assertEqual(unescapeHTML('�'), '�')
|
self.assertEqual(unescapeHTML('�'), '�')
|
||||||
|
self.assertEqual(unescapeHTML('&a"'), '&a"')
|
||||||
# HTML5 entities
|
# HTML5 entities
|
||||||
self.assertEqual(unescapeHTML('.''), '.\'')
|
self.assertEqual(unescapeHTML('.''), '.\'')
|
||||||
|
|
||||||
@ -342,6 +352,8 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100)
|
self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100)
|
||||||
self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361)
|
self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361)
|
||||||
self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
|
self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
|
||||||
|
self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
|
||||||
|
self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
|
||||||
|
|
||||||
def test_determine_ext(self):
|
def test_determine_ext(self):
|
||||||
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
|
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
|
||||||
@ -539,6 +551,7 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(parse_duration('87 Min.'), 5220)
|
self.assertEqual(parse_duration('87 Min.'), 5220)
|
||||||
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
|
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
|
||||||
self.assertEqual(parse_duration('PT00H03M30SZ'), 210)
|
self.assertEqual(parse_duration('PT00H03M30SZ'), 210)
|
||||||
|
self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88)
|
||||||
|
|
||||||
def test_fix_xml_ampersands(self):
|
def test_fix_xml_ampersands(self):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
@ -657,6 +670,17 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
|
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
|
||||||
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
|
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
|
||||||
|
|
||||||
|
def test_merge_dicts(self):
|
||||||
|
self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
|
||||||
|
self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
|
||||||
|
self.assertEqual(merge_dicts({'a': 1}, {'a': None}), {'a': 1})
|
||||||
|
self.assertEqual(merge_dicts({'a': 1}, {'a': ''}), {'a': 1})
|
||||||
|
self.assertEqual(merge_dicts({'a': 1}, {}), {'a': 1})
|
||||||
|
self.assertEqual(merge_dicts({'a': None}, {'a': 1}), {'a': 1})
|
||||||
|
self.assertEqual(merge_dicts({'a': ''}, {'a': 1}), {'a': ''})
|
||||||
|
self.assertEqual(merge_dicts({'a': ''}, {'a': 'abc'}), {'a': 'abc'})
|
||||||
|
self.assertEqual(merge_dicts({'a': None}, {'a': ''}, {'a': 'abc'}), {'a': 'abc'})
|
||||||
|
|
||||||
def test_encode_compat_str(self):
|
def test_encode_compat_str(self):
|
||||||
self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
|
self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
|
||||||
self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
|
self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
|
||||||
@ -811,6 +835,9 @@ class TestUtil(unittest.TestCase):
|
|||||||
inp = '''{"duration": "00:01:07"}'''
|
inp = '''{"duration": "00:01:07"}'''
|
||||||
self.assertEqual(js_to_json(inp), '''{"duration": "00:01:07"}''')
|
self.assertEqual(js_to_json(inp), '''{"duration": "00:01:07"}''')
|
||||||
|
|
||||||
|
inp = '''{segments: [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}'''
|
||||||
|
self.assertEqual(js_to_json(inp), '''{"segments": [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}''')
|
||||||
|
|
||||||
def test_js_to_json_edgecases(self):
|
def test_js_to_json_edgecases(self):
|
||||||
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
||||||
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
||||||
@ -882,6 +909,13 @@ class TestUtil(unittest.TestCase):
|
|||||||
on = js_to_json('{/*comment\n*/42/*comment\n*/:/*comment\n*/42/*comment\n*/}')
|
on = js_to_json('{/*comment\n*/42/*comment\n*/:/*comment\n*/42/*comment\n*/}')
|
||||||
self.assertEqual(json.loads(on), {'42': 42})
|
self.assertEqual(json.loads(on), {'42': 42})
|
||||||
|
|
||||||
|
on = js_to_json('{42:4.2e1}')
|
||||||
|
self.assertEqual(json.loads(on), {'42': 42.0})
|
||||||
|
|
||||||
|
def test_js_to_json_malformed(self):
|
||||||
|
self.assertEqual(js_to_json('42a1'), '42"a1"')
|
||||||
|
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
|
||||||
|
|
||||||
def test_extract_attributes(self):
|
def test_extract_attributes(self):
|
||||||
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
||||||
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
|
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
|
||||||
@ -962,6 +996,16 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(parse_count('1.1kk '), 1100000)
|
self.assertEqual(parse_count('1.1kk '), 1100000)
|
||||||
self.assertEqual(parse_count('1.1kk views'), 1100000)
|
self.assertEqual(parse_count('1.1kk views'), 1100000)
|
||||||
|
|
||||||
|
def test_parse_resolution(self):
|
||||||
|
self.assertEqual(parse_resolution(None), {})
|
||||||
|
self.assertEqual(parse_resolution(''), {})
|
||||||
|
self.assertEqual(parse_resolution('1920x1080'), {'width': 1920, 'height': 1080})
|
||||||
|
self.assertEqual(parse_resolution('1920×1080'), {'width': 1920, 'height': 1080})
|
||||||
|
self.assertEqual(parse_resolution('1920 x 1080'), {'width': 1920, 'height': 1080})
|
||||||
|
self.assertEqual(parse_resolution('720p'), {'height': 720})
|
||||||
|
self.assertEqual(parse_resolution('4k'), {'height': 2160})
|
||||||
|
self.assertEqual(parse_resolution('8K'), {'height': 4320})
|
||||||
|
|
||||||
def test_version_tuple(self):
|
def test_version_tuple(self):
|
||||||
self.assertEqual(version_tuple('1'), (1,))
|
self.assertEqual(version_tuple('1'), (1,))
|
||||||
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
|
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
|
||||||
@ -1040,6 +1084,18 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
|||||||
self.assertFalse(match_str(
|
self.assertFalse(match_str(
|
||||||
'like_count > 100 & dislike_count <? 50 & description',
|
'like_count > 100 & dislike_count <? 50 & description',
|
||||||
{'like_count': 190, 'dislike_count': 10}))
|
{'like_count': 190, 'dislike_count': 10}))
|
||||||
|
self.assertTrue(match_str('is_live', {'is_live': True}))
|
||||||
|
self.assertFalse(match_str('is_live', {'is_live': False}))
|
||||||
|
self.assertFalse(match_str('is_live', {'is_live': None}))
|
||||||
|
self.assertFalse(match_str('is_live', {}))
|
||||||
|
self.assertFalse(match_str('!is_live', {'is_live': True}))
|
||||||
|
self.assertTrue(match_str('!is_live', {'is_live': False}))
|
||||||
|
self.assertTrue(match_str('!is_live', {'is_live': None}))
|
||||||
|
self.assertTrue(match_str('!is_live', {}))
|
||||||
|
self.assertTrue(match_str('title', {'title': 'abc'}))
|
||||||
|
self.assertTrue(match_str('title', {'title': ''}))
|
||||||
|
self.assertFalse(match_str('!title', {'title': 'abc'}))
|
||||||
|
self.assertFalse(match_str('!title', {'title': ''}))
|
||||||
|
|
||||||
def test_parse_dfxp_time_expr(self):
|
def test_parse_dfxp_time_expr(self):
|
||||||
self.assertEqual(parse_dfxp_time_expr(None), None)
|
self.assertEqual(parse_dfxp_time_expr(None), None)
|
||||||
@ -1063,7 +1119,7 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
|||||||
<p begin="3" dur="-1">Ignored, three</p>
|
<p begin="3" dur="-1">Ignored, three</p>
|
||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
</tt>'''
|
</tt>'''.encode('utf-8')
|
||||||
srt_data = '''1
|
srt_data = '''1
|
||||||
00:00:00,000 --> 00:00:01,000
|
00:00:00,000 --> 00:00:01,000
|
||||||
The following line contains Chinese characters and special symbols
|
The following line contains Chinese characters and special symbols
|
||||||
@ -1088,7 +1144,7 @@ Line
|
|||||||
<p begin="0" end="1">The first line</p>
|
<p begin="0" end="1">The first line</p>
|
||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
</tt>'''
|
</tt>'''.encode('utf-8')
|
||||||
srt_data = '''1
|
srt_data = '''1
|
||||||
00:00:00,000 --> 00:00:01,000
|
00:00:00,000 --> 00:00:01,000
|
||||||
The first line
|
The first line
|
||||||
@ -1114,7 +1170,7 @@ The first line
|
|||||||
<p style="s1" tts:textDecoration="underline" begin="00:00:09.56" id="p2" end="00:00:12.36"><span style="s2" tts:color="lime">inner<br /> </span>style</p>
|
<p style="s1" tts:textDecoration="underline" begin="00:00:09.56" id="p2" end="00:00:12.36"><span style="s2" tts:color="lime">inner<br /> </span>style</p>
|
||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
</tt>'''
|
</tt>'''.encode('utf-8')
|
||||||
srt_data = '''1
|
srt_data = '''1
|
||||||
00:00:02,080 --> 00:00:05,839
|
00:00:02,080 --> 00:00:05,839
|
||||||
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>
|
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>
|
||||||
@ -1137,6 +1193,26 @@ part 3</font></u>
|
|||||||
'''
|
'''
|
||||||
self.assertEqual(dfxp2srt(dfxp_data_with_style), srt_data)
|
self.assertEqual(dfxp2srt(dfxp_data_with_style), srt_data)
|
||||||
|
|
||||||
|
dfxp_data_non_utf8 = '''<?xml version="1.0" encoding="UTF-16"?>
|
||||||
|
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
|
||||||
|
<body>
|
||||||
|
<div xml:lang="en">
|
||||||
|
<p begin="0" end="1">Line 1</p>
|
||||||
|
<p begin="1" end="2">第二行</p>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</tt>'''.encode('utf-16')
|
||||||
|
srt_data = '''1
|
||||||
|
00:00:00,000 --> 00:00:01,000
|
||||||
|
Line 1
|
||||||
|
|
||||||
|
2
|
||||||
|
00:00:01,000 --> 00:00:02,000
|
||||||
|
第二行
|
||||||
|
|
||||||
|
'''
|
||||||
|
self.assertEqual(dfxp2srt(dfxp_data_non_utf8), srt_data)
|
||||||
|
|
||||||
def test_cli_option(self):
|
def test_cli_option(self):
|
||||||
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
|
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
|
||||||
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
|
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
|
||||||
|
@ -61,7 +61,7 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
dl.params['extract_flat'] = True
|
dl.params['extract_flat'] = True
|
||||||
ie = YoutubePlaylistIE(dl)
|
ie = YoutubePlaylistIE(dl)
|
||||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
result = ie.extract('https://www.youtube.com/playlist?list=PL-KKIb8rvtMSrAO9YFbeM6UQrAqoFTUWv')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
for entry in result['entries']:
|
for entry in result['entries']:
|
||||||
self.assertTrue(entry.get('title'))
|
self.assertTrue(entry.get('title'))
|
||||||
|
10
test/testdata/f4m/custom_base_url.f4m
vendored
Normal file
10
test/testdata/f4m/custom_base_url.f4m
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<manifest xmlns="http://ns.adobe.com/f4m/1.0">
|
||||||
|
<streamType>recorded</streamType>
|
||||||
|
<baseURL>http://vod.livestream.com/events/0000000000673980/</baseURL>
|
||||||
|
<duration>269.293</duration>
|
||||||
|
<bootstrapInfo profile="named" id="bootstrap_1">AAAAm2Fic3QAAAAAAAAAAQAAAAPoAAAAAAAEG+0AAAAAAAAAAAAAAAAAAQAAABlhc3J0AAAAAAAAAAABAAAAAQAAAC4BAAAAVmFmcnQAAAAAAAAD6AAAAAAEAAAAAQAAAAAAAAAAAAAXcAAAAC0AAAAAAAQHQAAAE5UAAAAuAAAAAAAEGtUAAAEYAAAAAAAAAAAAAAAAAAAAAAA=</bootstrapInfo>
|
||||||
|
<media url="b90f532f-b0f6-4f4e-8289-706d490b2fd8_2292" bootstrapInfoId="bootstrap_1" bitrate="2148" width="1280" height="720" videoCodec="avc1.4d401f" audioCodec="mp4a.40.2">
|
||||||
|
<metadata>AgAKb25NZXRhRGF0YQgAAAAIAAhkdXJhdGlvbgBAcNSwIMSbpgAFd2lkdGgAQJQAAAAAAAAABmhlaWdodABAhoAAAAAAAAAJZnJhbWVyYXRlAEA4/7DoLwW3AA12aWRlb2RhdGFyYXRlAECe1DLgjcobAAx2aWRlb2NvZGVjaWQAQBwAAAAAAAAADWF1ZGlvZGF0YXJhdGUAQGSimlvaPKQADGF1ZGlvY29kZWNpZABAJAAAAAAAAAAACQ==</metadata>
|
||||||
|
</media>
|
||||||
|
</manifest>
|
18
test/testdata/mpd/float_duration.mpd
vendored
Normal file
18
test/testdata/mpd/float_duration.mpd
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<MPD xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="urn:mpeg:dash:schema:mpd:2011" type="static" minBufferTime="PT2S" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" mediaPresentationDuration="PT6014S">
|
||||||
|
<Period bitstreamSwitching="true">
|
||||||
|
<AdaptationSet mimeType="audio/mp4" codecs="mp4a.40.2" startWithSAP="1" segmentAlignment="true">
|
||||||
|
<SegmentTemplate timescale="1000000" presentationTimeOffset="0" initialization="ai_$RepresentationID$.mp4d" media="a_$RepresentationID$_$Number$.mp4d" duration="2000000.0" startNumber="0"></SegmentTemplate>
|
||||||
|
<Representation id="318597" bandwidth="61587"></Representation>
|
||||||
|
</AdaptationSet>
|
||||||
|
<AdaptationSet mimeType="video/mp4" startWithSAP="1" segmentAlignment="true">
|
||||||
|
<SegmentTemplate timescale="1000000" presentationTimeOffset="0" initialization="vi_$RepresentationID$.mp4d" media="v_$RepresentationID$_$Number$.mp4d" duration="2000000.0" startNumber="0"></SegmentTemplate>
|
||||||
|
<Representation id="318597" codecs="avc1.42001f" width="340" height="192" bandwidth="318597"></Representation>
|
||||||
|
<Representation id="638590" codecs="avc1.42001f" width="512" height="288" bandwidth="638590"></Representation>
|
||||||
|
<Representation id="1022565" codecs="avc1.4d001f" width="688" height="384" bandwidth="1022565"></Representation>
|
||||||
|
<Representation id="2046506" codecs="avc1.4d001f" width="1024" height="576" bandwidth="2046506"></Representation>
|
||||||
|
<Representation id="3998017" codecs="avc1.640029" width="1280" height="720" bandwidth="3998017"></Representation>
|
||||||
|
<Representation id="5997485" codecs="avc1.640032" width="1920" height="1080" bandwidth="5997485"></Representation>
|
||||||
|
</AdaptationSet>
|
||||||
|
</Period>
|
||||||
|
</MPD>
|
218
test/testdata/mpd/urls_only.mpd
vendored
Normal file
218
test/testdata/mpd/urls_only.mpd
vendored
Normal file
@ -0,0 +1,218 @@
|
|||||||
|
<?xml version="1.0" ?>
|
||||||
|
<MPD maxSegmentDuration="PT0H0M10.000S" mediaPresentationDuration="PT0H4M1.728S" minBufferTime="PT1.500S" profiles="urn:mpeg:dash:profile:isoff-main:2011" type="static" xmlns="urn:mpeg:dash:schema:mpd:2011">
|
||||||
|
<Period duration="PT0H4M1.728S">
|
||||||
|
<AdaptationSet bitstreamSwitching="true" lang="und" maxHeight="1080" maxWidth="1920" par="16:9" segmentAlignment="true">
|
||||||
|
<ContentComponent contentType="video" id="1"/>
|
||||||
|
<Representation audioSamplingRate="44100" bandwidth="200000" codecs="avc3.42c01e,mp4a.40.2" frameRate="25" height="144" id="h264_aac_144p_m4s" mimeType="video/mp4" sar="1:1" startWithSAP="1" width="256">
|
||||||
|
<SegmentList duration="10000" timescale="1000">
|
||||||
|
<Initialization sourceURL="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/init/432f65a0.mp4"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/0/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/1/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/2/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/3/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/4/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/5/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/6/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/7/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/8/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/9/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/10/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/11/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/12/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/13/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/14/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/15/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/16/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/17/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/18/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/19/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/20/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/21/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/22/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/23/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_144p_m4s/24/432f65a0.m4s"/>
|
||||||
|
</SegmentList>
|
||||||
|
</Representation>
|
||||||
|
<Representation audioSamplingRate="44100" bandwidth="400000" codecs="avc3.42c01e,mp4a.40.2" frameRate="25" height="240" id="h264_aac_240p_m4s" mimeType="video/mp4" sar="160:159" startWithSAP="1" width="424">
|
||||||
|
<SegmentList duration="10000" timescale="1000">
|
||||||
|
<Initialization sourceURL="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/init/432f65a0.mp4"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/0/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/1/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/2/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/3/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/4/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/5/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/6/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/7/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/8/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/9/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/10/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/11/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/12/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/13/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/14/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/15/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/16/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/17/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/18/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/19/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/20/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/21/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/22/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/23/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_240p_m4s/24/432f65a0.m4s"/>
|
||||||
|
</SegmentList>
|
||||||
|
</Representation>
|
||||||
|
<Representation audioSamplingRate="44100" bandwidth="800000" codecs="avc3.42c01e,mp4a.40.2" frameRate="25" height="360" id="h264_aac_360p_m4s" mimeType="video/mp4" sar="1:1" startWithSAP="1" width="640">
|
||||||
|
<SegmentList duration="10000" timescale="1000">
|
||||||
|
<Initialization sourceURL="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/init/432f65a0.mp4"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/0/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/1/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/2/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/3/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/4/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/5/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/6/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/7/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/8/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/9/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/10/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/11/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/12/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/13/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/14/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/15/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/16/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/17/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/18/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/19/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/20/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/21/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/22/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/23/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_360p_m4s/24/432f65a0.m4s"/>
|
||||||
|
</SegmentList>
|
||||||
|
</Representation>
|
||||||
|
<Representation audioSamplingRate="44100" bandwidth="1200000" codecs="avc3.42c01e,mp4a.40.2" frameRate="25" height="480" id="h264_aac_480p_m4s" mimeType="video/mp4" sar="320:321" startWithSAP="1" width="856">
|
||||||
|
<SegmentList duration="10000" timescale="1000">
|
||||||
|
<Initialization sourceURL="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/init/432f65a0.mp4"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/0/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/1/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/2/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/3/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/4/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/5/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/6/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/7/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/8/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/9/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/10/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/11/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/12/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/13/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/14/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/15/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/16/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/17/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/18/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/19/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/20/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/21/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/22/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/23/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_480p_m4s/24/432f65a0.m4s"/>
|
||||||
|
</SegmentList>
|
||||||
|
</Representation>
|
||||||
|
<Representation audioSamplingRate="44100" bandwidth="1600000" codecs="avc3.42c01e,mp4a.40.2" frameRate="25" height="576" id="h264_aac_576p_m4s" mimeType="video/mp4" sar="1:1" startWithSAP="1" width="1024">
|
||||||
|
<SegmentList duration="10000" timescale="1000">
|
||||||
|
<Initialization sourceURL="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/init/432f65a0.mp4"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/0/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/1/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/2/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/3/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/4/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/5/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/6/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/7/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/8/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/9/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/10/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/11/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/12/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/13/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/14/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/15/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/16/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/17/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/18/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/19/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/20/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/21/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/22/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/23/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_576p_m4s/24/432f65a0.m4s"/>
|
||||||
|
</SegmentList>
|
||||||
|
</Representation>
|
||||||
|
<Representation audioSamplingRate="44100" bandwidth="2400000" codecs="avc3.42c01e,mp4a.40.2" frameRate="25" height="720" id="h264_aac_720p_m4s" mimeType="video/mp4" sar="1:1" startWithSAP="1" width="1280">
|
||||||
|
<SegmentList duration="10000" timescale="1000">
|
||||||
|
<Initialization sourceURL="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/init/432f65a0.mp4"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/0/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/1/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/2/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/3/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/4/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/5/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/6/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/7/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/8/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/9/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/10/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/11/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/12/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/13/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/14/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/15/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/16/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/17/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/18/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/19/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/20/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/21/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/22/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/23/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_720p_m4s/24/432f65a0.m4s"/>
|
||||||
|
</SegmentList>
|
||||||
|
</Representation>
|
||||||
|
<Representation audioSamplingRate="44100" bandwidth="4400000" codecs="avc3.42c01e,mp4a.40.2" frameRate="25" height="1080" id="h264_aac_1080p_m4s" mimeType="video/mp4" sar="1:1" startWithSAP="1" width="1920">
|
||||||
|
<SegmentList duration="10000" timescale="1000">
|
||||||
|
<Initialization sourceURL="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/init/432f65a0.mp4"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/0/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/1/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/2/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/3/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/4/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/5/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/6/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/7/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/8/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/9/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/10/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/11/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/12/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/13/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/14/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/15/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/16/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/17/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/18/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/19/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/20/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/21/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/22/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/23/432f65a0.m4s"/>
|
||||||
|
<SegmentURL media="../vd_5999c902ea707c67d8e267a9_1503250723/h264_aac_1080p_m4s/24/432f65a0.m4s"/>
|
||||||
|
</SegmentList>
|
||||||
|
</Representation>
|
||||||
|
</AdaptationSet>
|
||||||
|
</Period>
|
||||||
|
</MPD>
|
34
test/testdata/xspf/foo_xspf.xspf
vendored
Normal file
34
test/testdata/xspf/foo_xspf.xspf
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<playlist version="1" xmlns="http://xspf.org/ns/0/">
|
||||||
|
<date>2018-03-09T18:01:43Z</date>
|
||||||
|
<trackList>
|
||||||
|
<track>
|
||||||
|
<location>cd1/track%201.mp3</location>
|
||||||
|
<title>Pandemonium</title>
|
||||||
|
<creator>Foilverb</creator>
|
||||||
|
<annotation>Visit http://bigbrother404.bandcamp.com</annotation>
|
||||||
|
<album>Pandemonium EP</album>
|
||||||
|
<trackNum>1</trackNum>
|
||||||
|
<duration>202416</duration>
|
||||||
|
</track>
|
||||||
|
<track>
|
||||||
|
<location>../%E3%83%88%E3%83%A9%E3%83%83%E3%82%AF%E3%80%80%EF%BC%92.mp3</location>
|
||||||
|
<title>Final Cartridge (Nichico Twelve Remix)</title>
|
||||||
|
<annotation>Visit http://bigbrother404.bandcamp.com</annotation>
|
||||||
|
<creator>Foilverb</creator>
|
||||||
|
<album>Pandemonium EP</album>
|
||||||
|
<trackNum>2</trackNum>
|
||||||
|
<duration>255857</duration>
|
||||||
|
</track>
|
||||||
|
<track>
|
||||||
|
<location>track3.mp3</location>
|
||||||
|
<location>https://example.com/track3.mp3</location>
|
||||||
|
<title>Rebuilding Nightingale</title>
|
||||||
|
<annotation>Visit http://bigbrother404.bandcamp.com</annotation>
|
||||||
|
<creator>Foilverb</creator>
|
||||||
|
<album>Pandemonium EP</album>
|
||||||
|
<trackNum>3</trackNum>
|
||||||
|
<duration>287915</duration>
|
||||||
|
</track>
|
||||||
|
</trackList>
|
||||||
|
</playlist>
|
@ -65,6 +65,7 @@ from .utils import (
|
|||||||
locked_file,
|
locked_file,
|
||||||
make_HTTPS_handler,
|
make_HTTPS_handler,
|
||||||
MaxDownloadsReached,
|
MaxDownloadsReached,
|
||||||
|
orderedSet,
|
||||||
PagedList,
|
PagedList,
|
||||||
parse_filesize,
|
parse_filesize,
|
||||||
PerRequestProxyHandler,
|
PerRequestProxyHandler,
|
||||||
@ -92,6 +93,7 @@ from .utils import (
|
|||||||
)
|
)
|
||||||
from .cache import Cache
|
from .cache import Cache
|
||||||
from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
|
from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
|
||||||
|
from .extractor.openload import PhantomJSwrapper
|
||||||
from .downloader import get_suitable_downloader
|
from .downloader import get_suitable_downloader
|
||||||
from .downloader.rtmp import rtmpdump_version
|
from .downloader.rtmp import rtmpdump_version
|
||||||
from .postprocessor import (
|
from .postprocessor import (
|
||||||
@ -296,13 +298,20 @@ class YoutubeDL(object):
|
|||||||
the downloader (see youtube_dl/downloader/common.py):
|
the downloader (see youtube_dl/downloader/common.py):
|
||||||
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
|
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
|
||||||
noresizebuffer, retries, continuedl, noprogress, consoletitle,
|
noresizebuffer, retries, continuedl, noprogress, consoletitle,
|
||||||
xattr_set_filesize, external_downloader_args, hls_use_mpegts.
|
xattr_set_filesize, external_downloader_args, hls_use_mpegts,
|
||||||
|
http_chunk_size.
|
||||||
|
|
||||||
The following options are used by the post processors:
|
The following options are used by the post processors:
|
||||||
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
|
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
|
||||||
otherwise prefer avconv.
|
otherwise prefer avconv.
|
||||||
postprocessor_args: A list of additional command-line arguments for the
|
postprocessor_args: A list of additional command-line arguments for the
|
||||||
postprocessor.
|
postprocessor.
|
||||||
|
|
||||||
|
The following options are used by the Youtube extractor:
|
||||||
|
youtube_include_dash_manifest: If True (default), DASH manifests and related
|
||||||
|
data will be downloaded and processed by extractor.
|
||||||
|
You can reduce network I/O by disabling it if you don't
|
||||||
|
care about DASH.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_NUMERIC_FIELDS = set((
|
_NUMERIC_FIELDS = set((
|
||||||
@ -523,6 +532,8 @@ class YoutubeDL(object):
|
|||||||
def save_console_title(self):
|
def save_console_title(self):
|
||||||
if not self.params.get('consoletitle', False):
|
if not self.params.get('consoletitle', False):
|
||||||
return
|
return
|
||||||
|
if self.params.get('simulate', False):
|
||||||
|
return
|
||||||
if compat_os_name != 'nt' and 'TERM' in os.environ:
|
if compat_os_name != 'nt' and 'TERM' in os.environ:
|
||||||
# Save the title on stack
|
# Save the title on stack
|
||||||
self._write_string('\033[22;0t', self._screen_file)
|
self._write_string('\033[22;0t', self._screen_file)
|
||||||
@ -530,6 +541,8 @@ class YoutubeDL(object):
|
|||||||
def restore_console_title(self):
|
def restore_console_title(self):
|
||||||
if not self.params.get('consoletitle', False):
|
if not self.params.get('consoletitle', False):
|
||||||
return
|
return
|
||||||
|
if self.params.get('simulate', False):
|
||||||
|
return
|
||||||
if compat_os_name != 'nt' and 'TERM' in os.environ:
|
if compat_os_name != 'nt' and 'TERM' in os.environ:
|
||||||
# Restore the title from stack
|
# Restore the title from stack
|
||||||
self._write_string('\033[23;0t', self._screen_file)
|
self._write_string('\033[23;0t', self._screen_file)
|
||||||
@ -901,15 +914,25 @@ class YoutubeDL(object):
|
|||||||
yield int(item)
|
yield int(item)
|
||||||
else:
|
else:
|
||||||
yield int(string_segment)
|
yield int(string_segment)
|
||||||
playlistitems = iter_playlistitems(playlistitems_str)
|
playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
|
||||||
|
|
||||||
ie_entries = ie_result['entries']
|
ie_entries = ie_result['entries']
|
||||||
|
|
||||||
|
def make_playlistitems_entries(list_ie_entries):
|
||||||
|
num_entries = len(list_ie_entries)
|
||||||
|
return [
|
||||||
|
list_ie_entries[i - 1] for i in playlistitems
|
||||||
|
if -num_entries <= i - 1 < num_entries]
|
||||||
|
|
||||||
|
def report_download(num_entries):
|
||||||
|
self.to_screen(
|
||||||
|
'[%s] playlist %s: Downloading %d videos' %
|
||||||
|
(ie_result['extractor'], playlist, num_entries))
|
||||||
|
|
||||||
if isinstance(ie_entries, list):
|
if isinstance(ie_entries, list):
|
||||||
n_all_entries = len(ie_entries)
|
n_all_entries = len(ie_entries)
|
||||||
if playlistitems:
|
if playlistitems:
|
||||||
entries = [
|
entries = make_playlistitems_entries(ie_entries)
|
||||||
ie_entries[i - 1] for i in playlistitems
|
|
||||||
if -n_all_entries <= i - 1 < n_all_entries]
|
|
||||||
else:
|
else:
|
||||||
entries = ie_entries[playliststart:playlistend]
|
entries = ie_entries[playliststart:playlistend]
|
||||||
n_entries = len(entries)
|
n_entries = len(entries)
|
||||||
@ -927,20 +950,16 @@ class YoutubeDL(object):
|
|||||||
entries = ie_entries.getslice(
|
entries = ie_entries.getslice(
|
||||||
playliststart, playlistend)
|
playliststart, playlistend)
|
||||||
n_entries = len(entries)
|
n_entries = len(entries)
|
||||||
self.to_screen(
|
report_download(n_entries)
|
||||||
'[%s] playlist %s: Downloading %d videos' %
|
|
||||||
(ie_result['extractor'], playlist, n_entries))
|
|
||||||
else: # iterable
|
else: # iterable
|
||||||
if playlistitems:
|
if playlistitems:
|
||||||
entry_list = list(ie_entries)
|
entries = make_playlistitems_entries(list(itertools.islice(
|
||||||
entries = [entry_list[i - 1] for i in playlistitems]
|
ie_entries, 0, max(playlistitems))))
|
||||||
else:
|
else:
|
||||||
entries = list(itertools.islice(
|
entries = list(itertools.islice(
|
||||||
ie_entries, playliststart, playlistend))
|
ie_entries, playliststart, playlistend))
|
||||||
n_entries = len(entries)
|
n_entries = len(entries)
|
||||||
self.to_screen(
|
report_download(n_entries)
|
||||||
'[%s] playlist %s: Downloading %d videos' %
|
|
||||||
(ie_result['extractor'], playlist, n_entries))
|
|
||||||
|
|
||||||
if self.params.get('playlistreverse', False):
|
if self.params.get('playlistreverse', False):
|
||||||
entries = entries[::-1]
|
entries = entries[::-1]
|
||||||
@ -961,6 +980,8 @@ class YoutubeDL(object):
|
|||||||
'playlist': playlist,
|
'playlist': playlist,
|
||||||
'playlist_id': ie_result.get('id'),
|
'playlist_id': ie_result.get('id'),
|
||||||
'playlist_title': ie_result.get('title'),
|
'playlist_title': ie_result.get('title'),
|
||||||
|
'playlist_uploader': ie_result.get('uploader'),
|
||||||
|
'playlist_uploader_id': ie_result.get('uploader_id'),
|
||||||
'playlist_index': i + playliststart,
|
'playlist_index': i + playliststart,
|
||||||
'extractor': ie_result['extractor'],
|
'extractor': ie_result['extractor'],
|
||||||
'webpage_url': ie_result['webpage_url'],
|
'webpage_url': ie_result['webpage_url'],
|
||||||
@ -1016,7 +1037,7 @@ class YoutubeDL(object):
|
|||||||
'!=': operator.ne,
|
'!=': operator.ne,
|
||||||
}
|
}
|
||||||
operator_rex = re.compile(r'''(?x)\s*
|
operator_rex = re.compile(r'''(?x)\s*
|
||||||
(?P<key>width|height|tbr|abr|vbr|asr|filesize|fps)
|
(?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
|
||||||
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
|
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
|
||||||
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
|
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
|
||||||
$
|
$
|
||||||
@ -1065,22 +1086,27 @@ class YoutubeDL(object):
|
|||||||
return _filter
|
return _filter
|
||||||
|
|
||||||
def _default_format_spec(self, info_dict, download=True):
|
def _default_format_spec(self, info_dict, download=True):
|
||||||
req_format_list = []
|
|
||||||
|
|
||||||
def can_have_partial_formats():
|
def can_merge():
|
||||||
if self.params.get('simulate', False):
|
|
||||||
return True
|
|
||||||
if not download:
|
|
||||||
return True
|
|
||||||
if self.params.get('outtmpl', DEFAULT_OUTTMPL) == '-':
|
|
||||||
return False
|
|
||||||
if info_dict.get('is_live'):
|
|
||||||
return False
|
|
||||||
merger = FFmpegMergerPP(self)
|
merger = FFmpegMergerPP(self)
|
||||||
return merger.available and merger.can_merge()
|
return merger.available and merger.can_merge()
|
||||||
if can_have_partial_formats():
|
|
||||||
req_format_list.append('bestvideo+bestaudio')
|
def prefer_best():
|
||||||
req_format_list.append('best')
|
if self.params.get('simulate', False):
|
||||||
|
return False
|
||||||
|
if not download:
|
||||||
|
return False
|
||||||
|
if self.params.get('outtmpl', DEFAULT_OUTTMPL) == '-':
|
||||||
|
return True
|
||||||
|
if info_dict.get('is_live'):
|
||||||
|
return True
|
||||||
|
if not can_merge():
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
req_format_list = ['bestvideo+bestaudio', 'best']
|
||||||
|
if prefer_best():
|
||||||
|
req_format_list.reverse()
|
||||||
return '/'.join(req_format_list)
|
return '/'.join(req_format_list)
|
||||||
|
|
||||||
def build_format_selector(self, format_spec):
|
def build_format_selector(self, format_spec):
|
||||||
@ -1483,12 +1509,14 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
def is_wellformed(f):
|
def is_wellformed(f):
|
||||||
url = f.get('url')
|
url = f.get('url')
|
||||||
valid_url = url and isinstance(url, compat_str)
|
if not url:
|
||||||
if not valid_url:
|
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'"url" field is missing or empty - skipping format, '
|
'"url" field is missing or empty - skipping format, '
|
||||||
'there is an error in extractor')
|
'there is an error in extractor')
|
||||||
return valid_url
|
return False
|
||||||
|
if isinstance(url, bytes):
|
||||||
|
sanitize_string_field(f, 'url')
|
||||||
|
return True
|
||||||
|
|
||||||
# Filter out malformed formats for better extraction robustness
|
# Filter out malformed formats for better extraction robustness
|
||||||
formats = list(filter(is_wellformed, formats))
|
formats = list(filter(is_wellformed, formats))
|
||||||
@ -1500,7 +1528,7 @@ class YoutubeDL(object):
|
|||||||
sanitize_string_field(format, 'format_id')
|
sanitize_string_field(format, 'format_id')
|
||||||
sanitize_numeric_fields(format)
|
sanitize_numeric_fields(format)
|
||||||
format['url'] = sanitize_url(format['url'])
|
format['url'] = sanitize_url(format['url'])
|
||||||
if format.get('format_id') is None:
|
if not format.get('format_id'):
|
||||||
format['format_id'] = compat_str(i)
|
format['format_id'] = compat_str(i)
|
||||||
else:
|
else:
|
||||||
# Sanitize format_id from characters used in format selector expression
|
# Sanitize format_id from characters used in format selector expression
|
||||||
@ -1708,12 +1736,17 @@ class YoutubeDL(object):
|
|||||||
if filename is None:
|
if filename is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
def ensure_dir_exists(path):
|
||||||
dn = os.path.dirname(sanitize_path(encodeFilename(filename)))
|
try:
|
||||||
if dn and not os.path.exists(dn):
|
dn = os.path.dirname(path)
|
||||||
os.makedirs(dn)
|
if dn and not os.path.exists(dn):
|
||||||
except (OSError, IOError) as err:
|
os.makedirs(dn)
|
||||||
self.report_error('unable to create directory ' + error_to_compat_str(err))
|
return True
|
||||||
|
except (OSError, IOError) as err:
|
||||||
|
self.report_error('unable to create directory ' + error_to_compat_str(err))
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not ensure_dir_exists(sanitize_path(encodeFilename(filename))):
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.params.get('writedescription', False):
|
if self.params.get('writedescription', False):
|
||||||
@ -1756,29 +1789,30 @@ class YoutubeDL(object):
|
|||||||
ie = self.get_info_extractor(info_dict['extractor_key'])
|
ie = self.get_info_extractor(info_dict['extractor_key'])
|
||||||
for sub_lang, sub_info in subtitles.items():
|
for sub_lang, sub_info in subtitles.items():
|
||||||
sub_format = sub_info['ext']
|
sub_format = sub_info['ext']
|
||||||
if sub_info.get('data') is not None:
|
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
|
||||||
sub_data = sub_info['data']
|
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
|
||||||
|
self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
|
||||||
else:
|
else:
|
||||||
try:
|
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
|
||||||
sub_data = ie._download_webpage(
|
if sub_info.get('data') is not None:
|
||||||
sub_info['url'], info_dict['id'], note=False)
|
try:
|
||||||
except ExtractorError as err:
|
# Use newline='' to prevent conversion of newline characters
|
||||||
self.report_warning('Unable to download subtitle for "%s": %s' %
|
# See https://github.com/rg3/youtube-dl/issues/10268
|
||||||
(sub_lang, error_to_compat_str(err.cause)))
|
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
|
||||||
continue
|
subfile.write(sub_info['data'])
|
||||||
try:
|
except (OSError, IOError):
|
||||||
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
|
self.report_error('Cannot write subtitles file ' + sub_filename)
|
||||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
|
return
|
||||||
self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
|
|
||||||
else:
|
else:
|
||||||
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
|
try:
|
||||||
# Use newline='' to prevent conversion of newline characters
|
sub_data = ie._request_webpage(
|
||||||
# See https://github.com/rg3/youtube-dl/issues/10268
|
sub_info['url'], info_dict['id'], note=False).read()
|
||||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
|
with io.open(encodeFilename(sub_filename), 'wb') as subfile:
|
||||||
subfile.write(sub_data)
|
subfile.write(sub_data)
|
||||||
except (OSError, IOError):
|
except (ExtractorError, IOError, OSError, ValueError) as err:
|
||||||
self.report_error('Cannot write subtitles file ' + sub_filename)
|
self.report_warning('Unable to download subtitle for "%s": %s' %
|
||||||
return
|
(sub_lang, error_to_compat_str(err)))
|
||||||
|
continue
|
||||||
|
|
||||||
if self.params.get('writeinfojson', False):
|
if self.params.get('writeinfojson', False):
|
||||||
infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
|
infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
|
||||||
@ -1819,7 +1853,7 @@ class YoutubeDL(object):
|
|||||||
def compatible_formats(formats):
|
def compatible_formats(formats):
|
||||||
video, audio = formats
|
video, audio = formats
|
||||||
# Check extension
|
# Check extension
|
||||||
video_ext, audio_ext = audio.get('ext'), video.get('ext')
|
video_ext, audio_ext = video.get('ext'), audio.get('ext')
|
||||||
if video_ext and audio_ext:
|
if video_ext and audio_ext:
|
||||||
COMPATIBLE_EXTS = (
|
COMPATIBLE_EXTS = (
|
||||||
('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
|
('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
|
||||||
@ -1851,8 +1885,11 @@ class YoutubeDL(object):
|
|||||||
for f in requested_formats:
|
for f in requested_formats:
|
||||||
new_info = dict(info_dict)
|
new_info = dict(info_dict)
|
||||||
new_info.update(f)
|
new_info.update(f)
|
||||||
fname = self.prepare_filename(new_info)
|
fname = prepend_extension(
|
||||||
fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext'])
|
self.prepare_filename(new_info),
|
||||||
|
'f%s' % f['format_id'], new_info['ext'])
|
||||||
|
if not ensure_dir_exists(fname):
|
||||||
|
return
|
||||||
downloaded.append(fname)
|
downloaded.append(fname)
|
||||||
partial_success = dl(fname, new_info)
|
partial_success = dl(fname, new_info)
|
||||||
success = success and partial_success
|
success = success and partial_success
|
||||||
@ -2201,11 +2238,20 @@ class YoutubeDL(object):
|
|||||||
sys.exc_clear()
|
sys.exc_clear()
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
self._write_string('[debug] Python version %s - %s\n' % (
|
|
||||||
platform.python_version(), platform_name()))
|
def python_implementation():
|
||||||
|
impl_name = platform.python_implementation()
|
||||||
|
if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
|
||||||
|
return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
|
||||||
|
return impl_name
|
||||||
|
|
||||||
|
self._write_string('[debug] Python version %s (%s) - %s\n' % (
|
||||||
|
platform.python_version(), python_implementation(),
|
||||||
|
platform_name()))
|
||||||
|
|
||||||
exe_versions = FFmpegPostProcessor.get_versions(self)
|
exe_versions = FFmpegPostProcessor.get_versions(self)
|
||||||
exe_versions['rtmpdump'] = rtmpdump_version()
|
exe_versions['rtmpdump'] = rtmpdump_version()
|
||||||
|
exe_versions['phantomjs'] = PhantomJSwrapper._version()
|
||||||
exe_str = ', '.join(
|
exe_str = ', '.join(
|
||||||
'%s %s' % (exe, v)
|
'%s %s' % (exe, v)
|
||||||
for exe, v in sorted(exe_versions.items())
|
for exe, v in sorted(exe_versions.items())
|
||||||
|
@ -191,6 +191,11 @@ def _real_main(argv=None):
|
|||||||
if numeric_buffersize is None:
|
if numeric_buffersize is None:
|
||||||
parser.error('invalid buffer size specified')
|
parser.error('invalid buffer size specified')
|
||||||
opts.buffersize = numeric_buffersize
|
opts.buffersize = numeric_buffersize
|
||||||
|
if opts.http_chunk_size is not None:
|
||||||
|
numeric_chunksize = FileDownloader.parse_bytes(opts.http_chunk_size)
|
||||||
|
if not numeric_chunksize:
|
||||||
|
parser.error('invalid http chunk size specified')
|
||||||
|
opts.http_chunk_size = numeric_chunksize
|
||||||
if opts.playliststart <= 0:
|
if opts.playliststart <= 0:
|
||||||
raise ValueError('Playlist start must be positive')
|
raise ValueError('Playlist start must be positive')
|
||||||
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
|
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
|
||||||
@ -206,7 +211,7 @@ def _real_main(argv=None):
|
|||||||
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
|
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
|
||||||
parser.error('invalid video recode format specified')
|
parser.error('invalid video recode format specified')
|
||||||
if opts.convertsubtitles is not None:
|
if opts.convertsubtitles is not None:
|
||||||
if opts.convertsubtitles not in ['srt', 'vtt', 'ass']:
|
if opts.convertsubtitles not in ['srt', 'vtt', 'ass', 'lrc']:
|
||||||
parser.error('invalid subtitle format specified')
|
parser.error('invalid subtitle format specified')
|
||||||
|
|
||||||
if opts.date is not None:
|
if opts.date is not None:
|
||||||
@ -346,6 +351,7 @@ def _real_main(argv=None):
|
|||||||
'keep_fragments': opts.keep_fragments,
|
'keep_fragments': opts.keep_fragments,
|
||||||
'buffersize': opts.buffersize,
|
'buffersize': opts.buffersize,
|
||||||
'noresizebuffer': opts.noresizebuffer,
|
'noresizebuffer': opts.noresizebuffer,
|
||||||
|
'http_chunk_size': opts.http_chunk_size,
|
||||||
'continuedl': opts.continue_dl,
|
'continuedl': opts.continue_dl,
|
||||||
'noprogress': opts.noprogress,
|
'noprogress': opts.noprogress,
|
||||||
'progress_with_newline': opts.progress_with_newline,
|
'progress_with_newline': opts.progress_with_newline,
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
from math import ceil
|
from math import ceil
|
||||||
|
|
||||||
|
from .compat import compat_b64decode
|
||||||
from .utils import bytes_to_intlist, intlist_to_bytes
|
from .utils import bytes_to_intlist, intlist_to_bytes
|
||||||
|
|
||||||
BLOCK_SIZE_BYTES = 16
|
BLOCK_SIZE_BYTES = 16
|
||||||
@ -180,7 +180,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
|||||||
"""
|
"""
|
||||||
NONCE_LENGTH_BYTES = 8
|
NONCE_LENGTH_BYTES = 8
|
||||||
|
|
||||||
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
|
data = bytes_to_intlist(compat_b64decode(data))
|
||||||
password = bytes_to_intlist(password.encode('utf-8'))
|
password = bytes_to_intlist(password.encode('utf-8'))
|
||||||
|
|
||||||
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
||||||
|
@ -1,13 +1,17 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import base64
|
||||||
import binascii
|
import binascii
|
||||||
import collections
|
import collections
|
||||||
|
import ctypes
|
||||||
import email
|
import email
|
||||||
import getpass
|
import getpass
|
||||||
import io
|
import io
|
||||||
|
import itertools
|
||||||
import optparse
|
import optparse
|
||||||
import os
|
import os
|
||||||
|
import platform
|
||||||
import re
|
import re
|
||||||
import shlex
|
import shlex
|
||||||
import shutil
|
import shutil
|
||||||
@ -15,7 +19,6 @@ import socket
|
|||||||
import struct
|
import struct
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import itertools
|
|
||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
|
|
||||||
@ -2894,19 +2897,72 @@ except TypeError:
|
|||||||
if isinstance(spec, compat_str):
|
if isinstance(spec, compat_str):
|
||||||
spec = spec.encode('ascii')
|
spec = spec.encode('ascii')
|
||||||
return struct.unpack(spec, *args)
|
return struct.unpack(spec, *args)
|
||||||
|
|
||||||
|
class compat_Struct(struct.Struct):
|
||||||
|
def __init__(self, fmt):
|
||||||
|
if isinstance(fmt, compat_str):
|
||||||
|
fmt = fmt.encode('ascii')
|
||||||
|
super(compat_Struct, self).__init__(fmt)
|
||||||
else:
|
else:
|
||||||
compat_struct_pack = struct.pack
|
compat_struct_pack = struct.pack
|
||||||
compat_struct_unpack = struct.unpack
|
compat_struct_unpack = struct.unpack
|
||||||
|
if platform.python_implementation() == 'IronPython' and sys.version_info < (2, 7, 8):
|
||||||
|
class compat_Struct(struct.Struct):
|
||||||
|
def unpack(self, string):
|
||||||
|
if not isinstance(string, buffer): # noqa: F821
|
||||||
|
string = buffer(string) # noqa: F821
|
||||||
|
return super(compat_Struct, self).unpack(string)
|
||||||
|
else:
|
||||||
|
compat_Struct = struct.Struct
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from future_builtins import zip as compat_zip
|
||||||
|
except ImportError: # not 2.6+ or is 3.x
|
||||||
|
try:
|
||||||
|
from itertools import izip as compat_zip # < 2.5 or 3.x
|
||||||
|
except ImportError:
|
||||||
|
compat_zip = zip
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info < (3, 3):
|
||||||
|
def compat_b64decode(s, *args, **kwargs):
|
||||||
|
if isinstance(s, compat_str):
|
||||||
|
s = s.encode('ascii')
|
||||||
|
return base64.b64decode(s, *args, **kwargs)
|
||||||
|
else:
|
||||||
|
compat_b64decode = base64.b64decode
|
||||||
|
|
||||||
|
|
||||||
|
if platform.python_implementation() == 'PyPy' and sys.pypy_version_info < (5, 4, 0):
|
||||||
|
# PyPy2 prior to version 5.4.0 expects byte strings as Windows function
|
||||||
|
# names, see the original PyPy issue [1] and the youtube-dl one [2].
|
||||||
|
# 1. https://bitbucket.org/pypy/pypy/issues/2360/windows-ctypescdll-typeerror-function-name
|
||||||
|
# 2. https://github.com/rg3/youtube-dl/pull/4392
|
||||||
|
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
|
||||||
|
real = ctypes.WINFUNCTYPE(*args, **kwargs)
|
||||||
|
|
||||||
|
def resf(tpl, *args, **kwargs):
|
||||||
|
funcname, dll = tpl
|
||||||
|
return real((str(funcname), dll), *args, **kwargs)
|
||||||
|
|
||||||
|
return resf
|
||||||
|
else:
|
||||||
|
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
|
||||||
|
return ctypes.WINFUNCTYPE(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'compat_HTMLParseError',
|
'compat_HTMLParseError',
|
||||||
'compat_HTMLParser',
|
'compat_HTMLParser',
|
||||||
'compat_HTTPError',
|
'compat_HTTPError',
|
||||||
|
'compat_Struct',
|
||||||
|
'compat_b64decode',
|
||||||
'compat_basestring',
|
'compat_basestring',
|
||||||
'compat_chr',
|
'compat_chr',
|
||||||
'compat_cookiejar',
|
'compat_cookiejar',
|
||||||
'compat_cookies',
|
'compat_cookies',
|
||||||
|
'compat_ctypes_WINFUNCTYPE',
|
||||||
'compat_etree_fromstring',
|
'compat_etree_fromstring',
|
||||||
'compat_etree_register_namespace',
|
'compat_etree_register_namespace',
|
||||||
'compat_expanduser',
|
'compat_expanduser',
|
||||||
@ -2948,5 +3004,6 @@ __all__ = [
|
|||||||
'compat_urlretrieve',
|
'compat_urlretrieve',
|
||||||
'compat_xml_parse_error',
|
'compat_xml_parse_error',
|
||||||
'compat_xpath',
|
'compat_xpath',
|
||||||
|
'compat_zip',
|
||||||
'workaround_optparse_bug9161',
|
'workaround_optparse_bug9161',
|
||||||
]
|
]
|
||||||
|
@ -49,6 +49,9 @@ class FileDownloader(object):
|
|||||||
external_downloader_args: A list of additional command-line arguments for the
|
external_downloader_args: A list of additional command-line arguments for the
|
||||||
external downloader.
|
external downloader.
|
||||||
hls_use_mpegts: Use the mpegts container for HLS videos.
|
hls_use_mpegts: Use the mpegts container for HLS videos.
|
||||||
|
http_chunk_size: Size of a chunk for chunk-based HTTP downloading. May be
|
||||||
|
useful for bypassing bandwidth throttling imposed by
|
||||||
|
a webserver (experimental)
|
||||||
|
|
||||||
Subclasses of this one must re-define the real_download method.
|
Subclasses of this one must re-define the real_download method.
|
||||||
"""
|
"""
|
||||||
@ -246,12 +249,13 @@ class FileDownloader(object):
|
|||||||
if self.params.get('noprogress', False):
|
if self.params.get('noprogress', False):
|
||||||
self.to_screen('[download] Download completed')
|
self.to_screen('[download] Download completed')
|
||||||
else:
|
else:
|
||||||
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
|
msg_template = '100%%'
|
||||||
|
if s.get('total_bytes') is not None:
|
||||||
|
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
|
||||||
|
msg_template += ' of %(_total_bytes_str)s'
|
||||||
if s.get('elapsed') is not None:
|
if s.get('elapsed') is not None:
|
||||||
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
||||||
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
|
msg_template += ' in %(_elapsed_str)s'
|
||||||
else:
|
|
||||||
msg_template = '100%% of %(_total_bytes_str)s'
|
|
||||||
self._report_progress_status(
|
self._report_progress_status(
|
||||||
msg_template % s, is_last_line=True)
|
msg_template % s, is_last_line=True)
|
||||||
|
|
||||||
@ -304,11 +308,11 @@ class FileDownloader(object):
|
|||||||
"""Report attempt to resume at given byte."""
|
"""Report attempt to resume at given byte."""
|
||||||
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
||||||
|
|
||||||
def report_retry(self, count, retries):
|
def report_retry(self, err, count, retries):
|
||||||
"""Report retry in case of HTTP error 5xx"""
|
"""Report retry in case of HTTP error 5xx"""
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[download] Got server HTTP error. Retrying (attempt %d of %s)...'
|
'[download] Got server HTTP error: %s. Retrying (attempt %d of %s)...'
|
||||||
% (count, self.format_retries(retries)))
|
% (error_to_compat_str(err), count, self.format_retries(retries)))
|
||||||
|
|
||||||
def report_file_already_downloaded(self, file_name):
|
def report_file_already_downloaded(self, file_name):
|
||||||
"""Report file has already been fully downloaded."""
|
"""Report file has already been fully downloaded."""
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import os.path
|
import os.path
|
||||||
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import re
|
import time
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
@ -30,6 +31,7 @@ class ExternalFD(FileDownloader):
|
|||||||
tmpfilename = self.temp_name(filename)
|
tmpfilename = self.temp_name(filename)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
started = time.time()
|
||||||
retval = self._call_downloader(tmpfilename, info_dict)
|
retval = self._call_downloader(tmpfilename, info_dict)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
if not info_dict.get('is_live'):
|
if not info_dict.get('is_live'):
|
||||||
@ -41,15 +43,20 @@ class ExternalFD(FileDownloader):
|
|||||||
self.to_screen('[%s] Interrupted by user' % self.get_basename())
|
self.to_screen('[%s] Interrupted by user' % self.get_basename())
|
||||||
|
|
||||||
if retval == 0:
|
if retval == 0:
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
status = {
|
||||||
self.to_screen('\r[%s] Downloaded %s bytes' % (self.get_basename(), fsize))
|
|
||||||
self.try_rename(tmpfilename, filename)
|
|
||||||
self._hook_progress({
|
|
||||||
'downloaded_bytes': fsize,
|
|
||||||
'total_bytes': fsize,
|
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'status': 'finished',
|
'status': 'finished',
|
||||||
})
|
'elapsed': time.time() - started,
|
||||||
|
}
|
||||||
|
if filename != '-':
|
||||||
|
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
|
self.to_screen('\r[%s] Downloaded %s bytes' % (self.get_basename(), fsize))
|
||||||
|
self.try_rename(tmpfilename, filename)
|
||||||
|
status.update({
|
||||||
|
'downloaded_bytes': fsize,
|
||||||
|
'total_bytes': fsize,
|
||||||
|
})
|
||||||
|
self._hook_progress(status)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
self.to_stderr('\n')
|
self.to_stderr('\n')
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
from __future__ import division, unicode_literals
|
from __future__ import division, unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import io
|
import io
|
||||||
import itertools
|
import itertools
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
@ -243,8 +243,17 @@ def remove_encrypted_media(media):
|
|||||||
media))
|
media))
|
||||||
|
|
||||||
|
|
||||||
def _add_ns(prop):
|
def _add_ns(prop, ver=1):
|
||||||
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
|
return '{http://ns.adobe.com/f4m/%d.0}%s' % (ver, prop)
|
||||||
|
|
||||||
|
|
||||||
|
def get_base_url(manifest):
|
||||||
|
base_url = xpath_text(
|
||||||
|
manifest, [_add_ns('baseURL'), _add_ns('baseURL', 2)],
|
||||||
|
'base URL', default=None)
|
||||||
|
if base_url:
|
||||||
|
base_url = base_url.strip()
|
||||||
|
return base_url
|
||||||
|
|
||||||
|
|
||||||
class F4mFD(FragmentFD):
|
class F4mFD(FragmentFD):
|
||||||
@ -303,7 +312,7 @@ class F4mFD(FragmentFD):
|
|||||||
boot_info = self._get_bootstrap_from_url(bootstrap_url)
|
boot_info = self._get_bootstrap_from_url(bootstrap_url)
|
||||||
else:
|
else:
|
||||||
bootstrap_url = None
|
bootstrap_url = None
|
||||||
bootstrap = base64.b64decode(node.text.encode('ascii'))
|
bootstrap = compat_b64decode(node.text)
|
||||||
boot_info = read_bootstrap_info(bootstrap)
|
boot_info = read_bootstrap_info(bootstrap)
|
||||||
return boot_info, bootstrap_url
|
return boot_info, bootstrap_url
|
||||||
|
|
||||||
@ -330,17 +339,17 @@ class F4mFD(FragmentFD):
|
|||||||
rate, media = list(filter(
|
rate, media = list(filter(
|
||||||
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
||||||
|
|
||||||
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
|
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
||||||
|
man_base_url = get_base_url(doc) or man_url
|
||||||
|
|
||||||
|
base_url = compat_urlparse.urljoin(man_base_url, media.attrib['url'])
|
||||||
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
|
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
|
||||||
# From Adobe F4M 3.0 spec:
|
boot_info, bootstrap_url = self._parse_bootstrap_node(
|
||||||
# The <baseURL> element SHALL be the base URL for all relative
|
bootstrap_node, man_base_url)
|
||||||
# (HTTP-based) URLs in the manifest. If <baseURL> is not present, said
|
|
||||||
# URLs should be relative to the location of the containing document.
|
|
||||||
boot_info, bootstrap_url = self._parse_bootstrap_node(bootstrap_node, man_url)
|
|
||||||
live = boot_info['live']
|
live = boot_info['live']
|
||||||
metadata_node = media.find(_add_ns('metadata'))
|
metadata_node = media.find(_add_ns('metadata'))
|
||||||
if metadata_node is not None:
|
if metadata_node is not None:
|
||||||
metadata = base64.b64decode(metadata_node.text.encode('ascii'))
|
metadata = compat_b64decode(metadata_node.text)
|
||||||
else:
|
else:
|
||||||
metadata = None
|
metadata = None
|
||||||
|
|
||||||
|
@ -74,9 +74,14 @@ class FragmentFD(FileDownloader):
|
|||||||
return not ctx['live'] and not ctx['tmpfilename'] == '-'
|
return not ctx['live'] and not ctx['tmpfilename'] == '-'
|
||||||
|
|
||||||
def _read_ytdl_file(self, ctx):
|
def _read_ytdl_file(self, ctx):
|
||||||
|
assert 'ytdl_corrupt' not in ctx
|
||||||
stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'r')
|
stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'r')
|
||||||
ctx['fragment_index'] = json.loads(stream.read())['downloader']['current_fragment']['index']
|
try:
|
||||||
stream.close()
|
ctx['fragment_index'] = json.loads(stream.read())['downloader']['current_fragment']['index']
|
||||||
|
except Exception:
|
||||||
|
ctx['ytdl_corrupt'] = True
|
||||||
|
finally:
|
||||||
|
stream.close()
|
||||||
|
|
||||||
def _write_ytdl_file(self, ctx):
|
def _write_ytdl_file(self, ctx):
|
||||||
frag_index_stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
frag_index_stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
||||||
@ -107,19 +112,26 @@ class FragmentFD(FileDownloader):
|
|||||||
def _append_fragment(self, ctx, frag_content):
|
def _append_fragment(self, ctx, frag_content):
|
||||||
try:
|
try:
|
||||||
ctx['dest_stream'].write(frag_content)
|
ctx['dest_stream'].write(frag_content)
|
||||||
|
ctx['dest_stream'].flush()
|
||||||
finally:
|
finally:
|
||||||
if self.__do_ytdl_file(ctx):
|
if self.__do_ytdl_file(ctx):
|
||||||
self._write_ytdl_file(ctx)
|
self._write_ytdl_file(ctx)
|
||||||
if not self.params.get('keep_fragments', False):
|
if not self.params.get('keep_fragments', False):
|
||||||
os.remove(ctx['fragment_filename_sanitized'])
|
os.remove(encodeFilename(ctx['fragment_filename_sanitized']))
|
||||||
del ctx['fragment_filename_sanitized']
|
del ctx['fragment_filename_sanitized']
|
||||||
|
|
||||||
def _prepare_frag_download(self, ctx):
|
def _prepare_frag_download(self, ctx):
|
||||||
if 'live' not in ctx:
|
if 'live' not in ctx:
|
||||||
ctx['live'] = False
|
ctx['live'] = False
|
||||||
|
if not ctx['live']:
|
||||||
|
total_frags_str = '%d' % ctx['total_frags']
|
||||||
|
ad_frags = ctx.get('ad_frags', 0)
|
||||||
|
if ad_frags:
|
||||||
|
total_frags_str += ' (not including %d ad)' % ad_frags
|
||||||
|
else:
|
||||||
|
total_frags_str = 'unknown (live)'
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[%s] Total fragments: %s'
|
'[%s] Total fragments: %s' % (self.FD_NAME, total_frags_str))
|
||||||
% (self.FD_NAME, ctx['total_frags'] if not ctx['live'] else 'unknown (live)'))
|
|
||||||
self.report_destination(ctx['filename'])
|
self.report_destination(ctx['filename'])
|
||||||
dl = HttpQuietDownloader(
|
dl = HttpQuietDownloader(
|
||||||
self.ydl,
|
self.ydl,
|
||||||
@ -151,10 +163,21 @@ class FragmentFD(FileDownloader):
|
|||||||
if self.__do_ytdl_file(ctx):
|
if self.__do_ytdl_file(ctx):
|
||||||
if os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename']))):
|
if os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename']))):
|
||||||
self._read_ytdl_file(ctx)
|
self._read_ytdl_file(ctx)
|
||||||
|
is_corrupt = ctx.get('ytdl_corrupt') is True
|
||||||
|
is_inconsistent = ctx['fragment_index'] > 0 and resume_len == 0
|
||||||
|
if is_corrupt or is_inconsistent:
|
||||||
|
message = (
|
||||||
|
'.ytdl file is corrupt' if is_corrupt else
|
||||||
|
'Inconsistent state of incomplete fragment download')
|
||||||
|
self.report_warning(
|
||||||
|
'%s. Restarting from the beginning...' % message)
|
||||||
|
ctx['fragment_index'] = resume_len = 0
|
||||||
|
if 'ytdl_corrupt' in ctx:
|
||||||
|
del ctx['ytdl_corrupt']
|
||||||
|
self._write_ytdl_file(ctx)
|
||||||
else:
|
else:
|
||||||
self._write_ytdl_file(ctx)
|
self._write_ytdl_file(ctx)
|
||||||
if ctx['fragment_index'] > 0:
|
assert ctx['fragment_index'] == 0
|
||||||
assert resume_len > 0
|
|
||||||
|
|
||||||
dest_stream, tmpfilename = sanitize_open(tmpfilename, open_mode)
|
dest_stream, tmpfilename = sanitize_open(tmpfilename, open_mode)
|
||||||
|
|
||||||
@ -229,12 +252,16 @@ class FragmentFD(FileDownloader):
|
|||||||
if os.path.isfile(ytdl_filename):
|
if os.path.isfile(ytdl_filename):
|
||||||
os.remove(ytdl_filename)
|
os.remove(ytdl_filename)
|
||||||
elapsed = time.time() - ctx['started']
|
elapsed = time.time() - ctx['started']
|
||||||
self.try_rename(ctx['tmpfilename'], ctx['filename'])
|
|
||||||
fsize = os.path.getsize(encodeFilename(ctx['filename']))
|
if ctx['tmpfilename'] == '-':
|
||||||
|
downloaded_bytes = ctx['complete_frags_downloaded_bytes']
|
||||||
|
else:
|
||||||
|
self.try_rename(ctx['tmpfilename'], ctx['filename'])
|
||||||
|
downloaded_bytes = os.path.getsize(encodeFilename(ctx['filename']))
|
||||||
|
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'downloaded_bytes': fsize,
|
'downloaded_bytes': downloaded_bytes,
|
||||||
'total_bytes': fsize,
|
'total_bytes': downloaded_bytes,
|
||||||
'filename': ctx['filename'],
|
'filename': ctx['filename'],
|
||||||
'status': 'finished',
|
'status': 'finished',
|
||||||
'elapsed': elapsed,
|
'elapsed': elapsed,
|
||||||
|
@ -75,15 +75,31 @@ class HlsFD(FragmentFD):
|
|||||||
fd.add_progress_hook(ph)
|
fd.add_progress_hook(ph)
|
||||||
return fd.real_download(filename, info_dict)
|
return fd.real_download(filename, info_dict)
|
||||||
|
|
||||||
total_frags = 0
|
def is_ad_fragment(s):
|
||||||
|
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s or
|
||||||
|
s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
|
||||||
|
|
||||||
|
media_frags = 0
|
||||||
|
ad_frags = 0
|
||||||
|
ad_frag_next = False
|
||||||
for line in s.splitlines():
|
for line in s.splitlines():
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
if line and not line.startswith('#'):
|
if not line:
|
||||||
total_frags += 1
|
continue
|
||||||
|
if line.startswith('#'):
|
||||||
|
if is_ad_fragment(line):
|
||||||
|
ad_frags += 1
|
||||||
|
ad_frag_next = True
|
||||||
|
continue
|
||||||
|
if ad_frag_next:
|
||||||
|
ad_frag_next = False
|
||||||
|
continue
|
||||||
|
media_frags += 1
|
||||||
|
|
||||||
ctx = {
|
ctx = {
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'total_frags': total_frags,
|
'total_frags': media_frags,
|
||||||
|
'ad_frags': ad_frags,
|
||||||
}
|
}
|
||||||
|
|
||||||
self._prepare_and_start_frag_download(ctx)
|
self._prepare_and_start_frag_download(ctx)
|
||||||
@ -101,10 +117,14 @@ class HlsFD(FragmentFD):
|
|||||||
decrypt_info = {'METHOD': 'NONE'}
|
decrypt_info = {'METHOD': 'NONE'}
|
||||||
byte_range = {}
|
byte_range = {}
|
||||||
frag_index = 0
|
frag_index = 0
|
||||||
|
ad_frag_next = False
|
||||||
for line in s.splitlines():
|
for line in s.splitlines():
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
if line:
|
if line:
|
||||||
if not line.startswith('#'):
|
if not line.startswith('#'):
|
||||||
|
if ad_frag_next:
|
||||||
|
ad_frag_next = False
|
||||||
|
continue
|
||||||
frag_index += 1
|
frag_index += 1
|
||||||
if frag_index <= ctx['fragment_index']:
|
if frag_index <= ctx['fragment_index']:
|
||||||
continue
|
continue
|
||||||
@ -144,7 +164,8 @@ class HlsFD(FragmentFD):
|
|||||||
return False
|
return False
|
||||||
if decrypt_info['METHOD'] == 'AES-128':
|
if decrypt_info['METHOD'] == 'AES-128':
|
||||||
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)
|
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)
|
||||||
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(decrypt_info['URI']).read()
|
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
|
||||||
|
self._prepare_url(info_dict, decrypt_info['URI'])).read()
|
||||||
frag_content = AES.new(
|
frag_content = AES.new(
|
||||||
decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
|
decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
|
||||||
self._append_fragment(ctx, frag_content)
|
self._append_fragment(ctx, frag_content)
|
||||||
@ -175,6 +196,8 @@ class HlsFD(FragmentFD):
|
|||||||
'start': sub_range_start,
|
'start': sub_range_start,
|
||||||
'end': sub_range_start + int(splitted_byte_range[0]),
|
'end': sub_range_start + int(splitted_byte_range[0]),
|
||||||
}
|
}
|
||||||
|
elif is_ad_fragment(line):
|
||||||
|
ad_frag_next = True
|
||||||
|
|
||||||
self._finish_frag_download(ctx)
|
self._finish_frag_download(ctx)
|
||||||
|
|
||||||
|
@ -4,13 +4,18 @@ import errno
|
|||||||
import os
|
import os
|
||||||
import socket
|
import socket
|
||||||
import time
|
import time
|
||||||
|
import random
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..compat import compat_urllib_error
|
from ..compat import (
|
||||||
|
compat_str,
|
||||||
|
compat_urllib_error,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ContentTooShortError,
|
ContentTooShortError,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
|
int_or_none,
|
||||||
sanitize_open,
|
sanitize_open,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
write_xattr,
|
write_xattr,
|
||||||
@ -22,79 +27,133 @@ from ..utils import (
|
|||||||
class HttpFD(FileDownloader):
|
class HttpFD(FileDownloader):
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
url = info_dict['url']
|
url = info_dict['url']
|
||||||
tmpfilename = self.temp_name(filename)
|
|
||||||
stream = None
|
class DownloadContext(dict):
|
||||||
|
__getattr__ = dict.get
|
||||||
|
__setattr__ = dict.__setitem__
|
||||||
|
__delattr__ = dict.__delitem__
|
||||||
|
|
||||||
|
ctx = DownloadContext()
|
||||||
|
ctx.filename = filename
|
||||||
|
ctx.tmpfilename = self.temp_name(filename)
|
||||||
|
ctx.stream = None
|
||||||
|
|
||||||
# Do not include the Accept-Encoding header
|
# Do not include the Accept-Encoding header
|
||||||
headers = {'Youtubedl-no-compression': 'True'}
|
headers = {'Youtubedl-no-compression': 'True'}
|
||||||
add_headers = info_dict.get('http_headers')
|
add_headers = info_dict.get('http_headers')
|
||||||
if add_headers:
|
if add_headers:
|
||||||
headers.update(add_headers)
|
headers.update(add_headers)
|
||||||
basic_request = sanitized_Request(url, None, headers)
|
|
||||||
request = sanitized_Request(url, None, headers)
|
|
||||||
|
|
||||||
is_test = self.params.get('test', False)
|
is_test = self.params.get('test', False)
|
||||||
|
chunk_size = self._TEST_FILE_SIZE if is_test else (
|
||||||
|
info_dict.get('downloader_options', {}).get('http_chunk_size') or
|
||||||
|
self.params.get('http_chunk_size') or 0)
|
||||||
|
|
||||||
if is_test:
|
ctx.open_mode = 'wb'
|
||||||
request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1))
|
ctx.resume_len = 0
|
||||||
|
ctx.data_len = None
|
||||||
|
ctx.block_size = self.params.get('buffersize', 1024)
|
||||||
|
ctx.start_time = time.time()
|
||||||
|
ctx.chunk_size = None
|
||||||
|
|
||||||
# Establish possible resume length
|
if self.params.get('continuedl', True):
|
||||||
if os.path.isfile(encodeFilename(tmpfilename)):
|
# Establish possible resume length
|
||||||
resume_len = os.path.getsize(encodeFilename(tmpfilename))
|
if os.path.isfile(encodeFilename(ctx.tmpfilename)):
|
||||||
else:
|
ctx.resume_len = os.path.getsize(
|
||||||
resume_len = 0
|
encodeFilename(ctx.tmpfilename))
|
||||||
|
|
||||||
open_mode = 'wb'
|
ctx.is_resume = ctx.resume_len > 0
|
||||||
if resume_len != 0:
|
|
||||||
if self.params.get('continuedl', True):
|
|
||||||
self.report_resuming_byte(resume_len)
|
|
||||||
request.add_header('Range', 'bytes=%d-' % resume_len)
|
|
||||||
open_mode = 'ab'
|
|
||||||
else:
|
|
||||||
resume_len = 0
|
|
||||||
|
|
||||||
count = 0
|
count = 0
|
||||||
retries = self.params.get('retries', 0)
|
retries = self.params.get('retries', 0)
|
||||||
while count <= retries:
|
|
||||||
|
class SucceedDownload(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class RetryDownload(Exception):
|
||||||
|
def __init__(self, source_error):
|
||||||
|
self.source_error = source_error
|
||||||
|
|
||||||
|
class NextFragment(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def set_range(req, start, end):
|
||||||
|
range_header = 'bytes=%d-' % start
|
||||||
|
if end:
|
||||||
|
range_header += compat_str(end)
|
||||||
|
req.add_header('Range', range_header)
|
||||||
|
|
||||||
|
def establish_connection():
|
||||||
|
ctx.chunk_size = (random.randint(int(chunk_size * 0.95), chunk_size)
|
||||||
|
if not is_test and chunk_size else chunk_size)
|
||||||
|
if ctx.resume_len > 0:
|
||||||
|
range_start = ctx.resume_len
|
||||||
|
if ctx.is_resume:
|
||||||
|
self.report_resuming_byte(ctx.resume_len)
|
||||||
|
ctx.open_mode = 'ab'
|
||||||
|
elif ctx.chunk_size > 0:
|
||||||
|
range_start = 0
|
||||||
|
else:
|
||||||
|
range_start = None
|
||||||
|
ctx.is_resume = False
|
||||||
|
range_end = range_start + ctx.chunk_size - 1 if ctx.chunk_size else None
|
||||||
|
if range_end and ctx.data_len is not None and range_end >= ctx.data_len:
|
||||||
|
range_end = ctx.data_len - 1
|
||||||
|
has_range = range_start is not None
|
||||||
|
ctx.has_range = has_range
|
||||||
|
request = sanitized_Request(url, None, headers)
|
||||||
|
if has_range:
|
||||||
|
set_range(request, range_start, range_end)
|
||||||
# Establish connection
|
# Establish connection
|
||||||
try:
|
try:
|
||||||
data = self.ydl.urlopen(request)
|
ctx.data = self.ydl.urlopen(request)
|
||||||
# When trying to resume, Content-Range HTTP header of response has to be checked
|
# When trying to resume, Content-Range HTTP header of response has to be checked
|
||||||
# to match the value of requested Range HTTP header. This is due to a webservers
|
# to match the value of requested Range HTTP header. This is due to a webservers
|
||||||
# that don't support resuming and serve a whole file with no Content-Range
|
# that don't support resuming and serve a whole file with no Content-Range
|
||||||
# set in response despite of requested Range (see
|
# set in response despite of requested Range (see
|
||||||
# https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
|
# https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
|
||||||
if resume_len > 0:
|
if has_range:
|
||||||
content_range = data.headers.get('Content-Range')
|
content_range = ctx.data.headers.get('Content-Range')
|
||||||
if content_range:
|
if content_range:
|
||||||
content_range_m = re.search(r'bytes (\d+)-', content_range)
|
content_range_m = re.search(r'bytes (\d+)-(\d+)?(?:/(\d+))?', content_range)
|
||||||
# Content-Range is present and matches requested Range, resume is possible
|
# Content-Range is present and matches requested Range, resume is possible
|
||||||
if content_range_m and resume_len == int(content_range_m.group(1)):
|
if content_range_m:
|
||||||
break
|
if range_start == int(content_range_m.group(1)):
|
||||||
|
content_range_end = int_or_none(content_range_m.group(2))
|
||||||
|
content_len = int_or_none(content_range_m.group(3))
|
||||||
|
accept_content_len = (
|
||||||
|
# Non-chunked download
|
||||||
|
not ctx.chunk_size or
|
||||||
|
# Chunked download and requested piece or
|
||||||
|
# its part is promised to be served
|
||||||
|
content_range_end == range_end or
|
||||||
|
content_len < range_end)
|
||||||
|
if accept_content_len:
|
||||||
|
ctx.data_len = content_len
|
||||||
|
return
|
||||||
# Content-Range is either not present or invalid. Assuming remote webserver is
|
# Content-Range is either not present or invalid. Assuming remote webserver is
|
||||||
# trying to send the whole file, resume is not possible, so wiping the local file
|
# trying to send the whole file, resume is not possible, so wiping the local file
|
||||||
# and performing entire redownload
|
# and performing entire redownload
|
||||||
self.report_unable_to_resume()
|
self.report_unable_to_resume()
|
||||||
resume_len = 0
|
ctx.resume_len = 0
|
||||||
open_mode = 'wb'
|
ctx.open_mode = 'wb'
|
||||||
break
|
ctx.data_len = int_or_none(ctx.data.info().get('Content-length', None))
|
||||||
|
return
|
||||||
except (compat_urllib_error.HTTPError, ) as err:
|
except (compat_urllib_error.HTTPError, ) as err:
|
||||||
if (err.code < 500 or err.code >= 600) and err.code != 416:
|
if err.code == 416:
|
||||||
# Unexpected HTTP error
|
|
||||||
raise
|
|
||||||
elif err.code == 416:
|
|
||||||
# Unable to resume (requested range not satisfiable)
|
# Unable to resume (requested range not satisfiable)
|
||||||
try:
|
try:
|
||||||
# Open the connection again without the range header
|
# Open the connection again without the range header
|
||||||
data = self.ydl.urlopen(basic_request)
|
ctx.data = self.ydl.urlopen(
|
||||||
content_length = data.info()['Content-Length']
|
sanitized_Request(url, None, headers))
|
||||||
|
content_length = ctx.data.info()['Content-Length']
|
||||||
except (compat_urllib_error.HTTPError, ) as err:
|
except (compat_urllib_error.HTTPError, ) as err:
|
||||||
if err.code < 500 or err.code >= 600:
|
if err.code < 500 or err.code >= 600:
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
# Examine the reported length
|
# Examine the reported length
|
||||||
if (content_length is not None and
|
if (content_length is not None and
|
||||||
(resume_len - 100 < int(content_length) < resume_len + 100)):
|
(ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)):
|
||||||
# The file had already been fully downloaded.
|
# The file had already been fully downloaded.
|
||||||
# Explanation to the above condition: in issue #175 it was revealed that
|
# Explanation to the above condition: in issue #175 it was revealed that
|
||||||
# YouTube sometimes adds or removes a few bytes from the end of the file,
|
# YouTube sometimes adds or removes a few bytes from the end of the file,
|
||||||
@ -102,152 +161,193 @@ class HttpFD(FileDownloader):
|
|||||||
# I decided to implement a suggested change and consider the file
|
# I decided to implement a suggested change and consider the file
|
||||||
# completely downloaded if the file size differs less than 100 bytes from
|
# completely downloaded if the file size differs less than 100 bytes from
|
||||||
# the one in the hard drive.
|
# the one in the hard drive.
|
||||||
self.report_file_already_downloaded(filename)
|
self.report_file_already_downloaded(ctx.filename)
|
||||||
self.try_rename(tmpfilename, filename)
|
self.try_rename(ctx.tmpfilename, ctx.filename)
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'filename': filename,
|
'filename': ctx.filename,
|
||||||
'status': 'finished',
|
'status': 'finished',
|
||||||
'downloaded_bytes': resume_len,
|
'downloaded_bytes': ctx.resume_len,
|
||||||
'total_bytes': resume_len,
|
'total_bytes': ctx.resume_len,
|
||||||
})
|
})
|
||||||
return True
|
raise SucceedDownload()
|
||||||
else:
|
else:
|
||||||
# The length does not match, we start the download over
|
# The length does not match, we start the download over
|
||||||
self.report_unable_to_resume()
|
self.report_unable_to_resume()
|
||||||
resume_len = 0
|
ctx.resume_len = 0
|
||||||
open_mode = 'wb'
|
ctx.open_mode = 'wb'
|
||||||
break
|
return
|
||||||
except socket.error as e:
|
elif err.code < 500 or err.code >= 600:
|
||||||
if e.errno != errno.ECONNRESET:
|
# Unexpected HTTP error
|
||||||
|
raise
|
||||||
|
raise RetryDownload(err)
|
||||||
|
except socket.error as err:
|
||||||
|
if err.errno != errno.ECONNRESET:
|
||||||
# Connection reset is no problem, just retry
|
# Connection reset is no problem, just retry
|
||||||
raise
|
raise
|
||||||
|
raise RetryDownload(err)
|
||||||
|
|
||||||
# Retry
|
def download():
|
||||||
count += 1
|
data_len = ctx.data.info().get('Content-length', None)
|
||||||
if count <= retries:
|
|
||||||
self.report_retry(count, retries)
|
|
||||||
|
|
||||||
if count > retries:
|
# Range HTTP header may be ignored/unsupported by a webserver
|
||||||
self.report_error('giving up after %s retries' % retries)
|
# (e.g. extractor/scivee.py, extractor/bambuser.py).
|
||||||
return False
|
# However, for a test we still would like to download just a piece of a file.
|
||||||
|
# To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
|
||||||
|
# block size when downloading a file.
|
||||||
|
if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
|
||||||
|
data_len = self._TEST_FILE_SIZE
|
||||||
|
|
||||||
data_len = data.info().get('Content-length', None)
|
if data_len is not None:
|
||||||
|
data_len = int(data_len) + ctx.resume_len
|
||||||
# Range HTTP header may be ignored/unsupported by a webserver
|
min_data_len = self.params.get('min_filesize')
|
||||||
# (e.g. extractor/scivee.py, extractor/bambuser.py).
|
max_data_len = self.params.get('max_filesize')
|
||||||
# However, for a test we still would like to download just a piece of a file.
|
if min_data_len is not None and data_len < min_data_len:
|
||||||
# To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
|
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
||||||
# block size when downloading a file.
|
return False
|
||||||
if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
|
if max_data_len is not None and data_len > max_data_len:
|
||||||
data_len = self._TEST_FILE_SIZE
|
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
|
||||||
|
|
||||||
if data_len is not None:
|
|
||||||
data_len = int(data_len) + resume_len
|
|
||||||
min_data_len = self.params.get('min_filesize')
|
|
||||||
max_data_len = self.params.get('max_filesize')
|
|
||||||
if min_data_len is not None and data_len < min_data_len:
|
|
||||||
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
|
||||||
return False
|
|
||||||
if max_data_len is not None and data_len > max_data_len:
|
|
||||||
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
|
|
||||||
return False
|
|
||||||
|
|
||||||
byte_counter = 0 + resume_len
|
|
||||||
block_size = self.params.get('buffersize', 1024)
|
|
||||||
start = time.time()
|
|
||||||
|
|
||||||
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
|
|
||||||
now = None # needed for slow_down() in the first loop run
|
|
||||||
before = start # start measuring
|
|
||||||
while True:
|
|
||||||
|
|
||||||
# Download and write
|
|
||||||
data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
|
|
||||||
byte_counter += len(data_block)
|
|
||||||
|
|
||||||
# exit loop when download is finished
|
|
||||||
if len(data_block) == 0:
|
|
||||||
break
|
|
||||||
|
|
||||||
# Open destination file just in time
|
|
||||||
if stream is None:
|
|
||||||
try:
|
|
||||||
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
|
|
||||||
assert stream is not None
|
|
||||||
filename = self.undo_temp_name(tmpfilename)
|
|
||||||
self.report_destination(filename)
|
|
||||||
except (OSError, IOError) as err:
|
|
||||||
self.report_error('unable to open for writing: %s' % str(err))
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self.params.get('xattr_set_filesize', False) and data_len is not None:
|
byte_counter = 0 + ctx.resume_len
|
||||||
|
block_size = ctx.block_size
|
||||||
|
start = time.time()
|
||||||
|
|
||||||
|
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
|
||||||
|
now = None # needed for slow_down() in the first loop run
|
||||||
|
before = start # start measuring
|
||||||
|
|
||||||
|
def retry(e):
|
||||||
|
if ctx.tmpfilename != '-':
|
||||||
|
ctx.stream.close()
|
||||||
|
ctx.stream = None
|
||||||
|
ctx.resume_len = os.path.getsize(encodeFilename(ctx.tmpfilename))
|
||||||
|
raise RetryDownload(e)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# Download and write
|
||||||
|
data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
|
||||||
|
# socket.timeout is a subclass of socket.error but may not have
|
||||||
|
# errno set
|
||||||
|
except socket.timeout as e:
|
||||||
|
retry(e)
|
||||||
|
except socket.error as e:
|
||||||
|
if e.errno not in (errno.ECONNRESET, errno.ETIMEDOUT):
|
||||||
|
raise
|
||||||
|
retry(e)
|
||||||
|
|
||||||
|
byte_counter += len(data_block)
|
||||||
|
|
||||||
|
# exit loop when download is finished
|
||||||
|
if len(data_block) == 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Open destination file just in time
|
||||||
|
if ctx.stream is None:
|
||||||
try:
|
try:
|
||||||
write_xattr(tmpfilename, 'user.ytdl.filesize', str(data_len).encode('utf-8'))
|
ctx.stream, ctx.tmpfilename = sanitize_open(
|
||||||
except (XAttrUnavailableError, XAttrMetadataError) as err:
|
ctx.tmpfilename, ctx.open_mode)
|
||||||
self.report_error('unable to set filesize xattr: %s' % str(err))
|
assert ctx.stream is not None
|
||||||
|
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
|
||||||
|
self.report_destination(ctx.filename)
|
||||||
|
except (OSError, IOError) as err:
|
||||||
|
self.report_error('unable to open for writing: %s' % str(err))
|
||||||
|
return False
|
||||||
|
|
||||||
try:
|
if self.params.get('xattr_set_filesize', False) and data_len is not None:
|
||||||
stream.write(data_block)
|
try:
|
||||||
except (IOError, OSError) as err:
|
write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode('utf-8'))
|
||||||
|
except (XAttrUnavailableError, XAttrMetadataError) as err:
|
||||||
|
self.report_error('unable to set filesize xattr: %s' % str(err))
|
||||||
|
|
||||||
|
try:
|
||||||
|
ctx.stream.write(data_block)
|
||||||
|
except (IOError, OSError) as err:
|
||||||
|
self.to_stderr('\n')
|
||||||
|
self.report_error('unable to write data: %s' % str(err))
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Apply rate limit
|
||||||
|
self.slow_down(start, now, byte_counter - ctx.resume_len)
|
||||||
|
|
||||||
|
# end measuring of one loop run
|
||||||
|
now = time.time()
|
||||||
|
after = now
|
||||||
|
|
||||||
|
# Adjust block size
|
||||||
|
if not self.params.get('noresizebuffer', False):
|
||||||
|
block_size = self.best_block_size(after - before, len(data_block))
|
||||||
|
|
||||||
|
before = after
|
||||||
|
|
||||||
|
# Progress message
|
||||||
|
speed = self.calc_speed(start, now, byte_counter - ctx.resume_len)
|
||||||
|
if ctx.data_len is None:
|
||||||
|
eta = None
|
||||||
|
else:
|
||||||
|
eta = self.calc_eta(start, time.time(), ctx.data_len - ctx.resume_len, byte_counter - ctx.resume_len)
|
||||||
|
|
||||||
|
self._hook_progress({
|
||||||
|
'status': 'downloading',
|
||||||
|
'downloaded_bytes': byte_counter,
|
||||||
|
'total_bytes': ctx.data_len,
|
||||||
|
'tmpfilename': ctx.tmpfilename,
|
||||||
|
'filename': ctx.filename,
|
||||||
|
'eta': eta,
|
||||||
|
'speed': speed,
|
||||||
|
'elapsed': now - ctx.start_time,
|
||||||
|
})
|
||||||
|
|
||||||
|
if is_test and byte_counter == data_len:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not is_test and ctx.chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len:
|
||||||
|
ctx.resume_len = byte_counter
|
||||||
|
# ctx.block_size = block_size
|
||||||
|
raise NextFragment()
|
||||||
|
|
||||||
|
if ctx.stream is None:
|
||||||
self.to_stderr('\n')
|
self.to_stderr('\n')
|
||||||
self.report_error('unable to write data: %s' % str(err))
|
self.report_error('Did not get any data blocks')
|
||||||
return False
|
return False
|
||||||
|
if ctx.tmpfilename != '-':
|
||||||
|
ctx.stream.close()
|
||||||
|
|
||||||
# Apply rate limit
|
if data_len is not None and byte_counter != data_len:
|
||||||
self.slow_down(start, now, byte_counter - resume_len)
|
err = ContentTooShortError(byte_counter, int(data_len))
|
||||||
|
if count <= retries:
|
||||||
|
retry(err)
|
||||||
|
raise err
|
||||||
|
|
||||||
# end measuring of one loop run
|
self.try_rename(ctx.tmpfilename, ctx.filename)
|
||||||
now = time.time()
|
|
||||||
after = now
|
|
||||||
|
|
||||||
# Adjust block size
|
# Update file modification time
|
||||||
if not self.params.get('noresizebuffer', False):
|
if self.params.get('updatetime', True):
|
||||||
block_size = self.best_block_size(after - before, len(data_block))
|
info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.info().get('last-modified', None))
|
||||||
|
|
||||||
before = after
|
|
||||||
|
|
||||||
# Progress message
|
|
||||||
speed = self.calc_speed(start, now, byte_counter - resume_len)
|
|
||||||
if data_len is None:
|
|
||||||
eta = None
|
|
||||||
else:
|
|
||||||
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
|
|
||||||
|
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'status': 'downloading',
|
|
||||||
'downloaded_bytes': byte_counter,
|
'downloaded_bytes': byte_counter,
|
||||||
'total_bytes': data_len,
|
'total_bytes': byte_counter,
|
||||||
'tmpfilename': tmpfilename,
|
'filename': ctx.filename,
|
||||||
'filename': filename,
|
'status': 'finished',
|
||||||
'eta': eta,
|
'elapsed': time.time() - ctx.start_time,
|
||||||
'speed': speed,
|
|
||||||
'elapsed': now - start,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if is_test and byte_counter == data_len:
|
return True
|
||||||
break
|
|
||||||
|
|
||||||
if stream is None:
|
while count <= retries:
|
||||||
self.to_stderr('\n')
|
try:
|
||||||
self.report_error('Did not get any data blocks')
|
establish_connection()
|
||||||
return False
|
return download()
|
||||||
if tmpfilename != '-':
|
except RetryDownload as e:
|
||||||
stream.close()
|
count += 1
|
||||||
|
if count <= retries:
|
||||||
|
self.report_retry(e.source_error, count, retries)
|
||||||
|
continue
|
||||||
|
except NextFragment:
|
||||||
|
continue
|
||||||
|
except SucceedDownload:
|
||||||
|
return True
|
||||||
|
|
||||||
if data_len is not None and byte_counter != data_len:
|
self.report_error('giving up after %s retries' % retries)
|
||||||
raise ContentTooShortError(byte_counter, int(data_len))
|
return False
|
||||||
self.try_rename(tmpfilename, filename)
|
|
||||||
|
|
||||||
# Update file modification time
|
|
||||||
if self.params.get('updatetime', True):
|
|
||||||
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
|
|
||||||
|
|
||||||
self._hook_progress({
|
|
||||||
'downloaded_bytes': byte_counter,
|
|
||||||
'total_bytes': byte_counter,
|
|
||||||
'filename': filename,
|
|
||||||
'status': 'finished',
|
|
||||||
'elapsed': time.time() - start,
|
|
||||||
})
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
@ -1,25 +1,27 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import struct
|
|
||||||
import binascii
|
import binascii
|
||||||
import io
|
import io
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import compat_urllib_error
|
from ..compat import (
|
||||||
|
compat_Struct,
|
||||||
|
compat_urllib_error,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
u8 = struct.Struct(b'>B')
|
u8 = compat_Struct('>B')
|
||||||
u88 = struct.Struct(b'>Bx')
|
u88 = compat_Struct('>Bx')
|
||||||
u16 = struct.Struct(b'>H')
|
u16 = compat_Struct('>H')
|
||||||
u1616 = struct.Struct(b'>Hxx')
|
u1616 = compat_Struct('>Hxx')
|
||||||
u32 = struct.Struct(b'>I')
|
u32 = compat_Struct('>I')
|
||||||
u64 = struct.Struct(b'>Q')
|
u64 = compat_Struct('>Q')
|
||||||
|
|
||||||
s88 = struct.Struct(b'>bx')
|
s88 = compat_Struct('>bx')
|
||||||
s16 = struct.Struct(b'>h')
|
s16 = compat_Struct('>h')
|
||||||
s1616 = struct.Struct(b'>hxx')
|
s1616 = compat_Struct('>hxx')
|
||||||
s32 = struct.Struct(b'>i')
|
s32 = compat_Struct('>i')
|
||||||
|
|
||||||
unity_matrix = (s32.pack(0x10000) + s32.pack(0) * 3) * 2 + s32.pack(0x40000000)
|
unity_matrix = (s32.pack(0x10000) + s32.pack(0) * 3) * 2 + s32.pack(0x40000000)
|
||||||
|
|
||||||
@ -139,7 +141,7 @@ def write_piff_header(stream, params):
|
|||||||
sample_entry_payload += u16.pack(0x18) # depth
|
sample_entry_payload += u16.pack(0x18) # depth
|
||||||
sample_entry_payload += s16.pack(-1) # pre defined
|
sample_entry_payload += s16.pack(-1) # pre defined
|
||||||
|
|
||||||
codec_private_data = binascii.unhexlify(params['codec_private_data'])
|
codec_private_data = binascii.unhexlify(params['codec_private_data'].encode('utf-8'))
|
||||||
if fourcc in ('H264', 'AVC1'):
|
if fourcc in ('H264', 'AVC1'):
|
||||||
sps, pps = codec_private_data.split(u32.pack(1))[1:]
|
sps, pps = codec_private_data.split(u32.pack(1))[1:]
|
||||||
avcc_payload = u8.pack(1) # configuration version
|
avcc_payload = u8.pack(1) # configuration version
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
import re
|
import re
|
||||||
|
import time
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
@ -10,6 +13,8 @@ from ..utils import (
|
|||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
try_get,
|
try_get,
|
||||||
|
unescapeHTML,
|
||||||
|
update_url_query,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -101,21 +106,25 @@ class ABCIE(InfoExtractor):
|
|||||||
class ABCIViewIE(InfoExtractor):
|
class ABCIViewIE(InfoExtractor):
|
||||||
IE_NAME = 'abc.net.au:iview'
|
IE_NAME = 'abc.net.au:iview'
|
||||||
_VALID_URL = r'https?://iview\.abc\.net\.au/programs/[^/]+/(?P<id>[^/?#]+)'
|
_VALID_URL = r'https?://iview\.abc\.net\.au/programs/[^/]+/(?P<id>[^/?#]+)'
|
||||||
|
_GEO_COUNTRIES = ['AU']
|
||||||
|
|
||||||
# ABC iview programs are normally available for 14 days only.
|
# ABC iview programs are normally available for 14 days only.
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://iview.abc.net.au/programs/diaries-of-a-broken-mind/ZX9735A001S00',
|
'url': 'https://iview.abc.net.au/programs/ben-and-hollys-little-kingdom/ZY9247A021S00',
|
||||||
'md5': 'cde42d728b3b7c2b32b1b94b4a548afc',
|
'md5': 'cde42d728b3b7c2b32b1b94b4a548afc',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'ZX9735A001S00',
|
'id': 'ZY9247A021S00',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Diaries Of A Broken Mind',
|
'title': "Gaston's Visit",
|
||||||
'description': 'md5:7de3903874b7a1be279fe6b68718fc9e',
|
'series': "Ben And Holly's Little Kingdom",
|
||||||
'upload_date': '20161010',
|
'description': 'md5:18db170ad71cf161e006a4c688e33155',
|
||||||
'uploader_id': 'abc2',
|
'upload_date': '20180318',
|
||||||
'timestamp': 1476064920,
|
'uploader_id': 'abc4kids',
|
||||||
|
'timestamp': 1521400959,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
'skip': 'Video gone',
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -126,20 +135,30 @@ class ABCIViewIE(InfoExtractor):
|
|||||||
title = video_params.get('title') or video_params['seriesTitle']
|
title = video_params.get('title') or video_params['seriesTitle']
|
||||||
stream = next(s for s in video_params['playlist'] if s.get('type') == 'program')
|
stream = next(s for s in video_params['playlist'] if s.get('type') == 'program')
|
||||||
|
|
||||||
format_urls = [
|
house_number = video_params.get('episodeHouseNumber')
|
||||||
try_get(stream, lambda x: x['hds-unmetered'], compat_str)]
|
path = '/auth/hls/sign?ts={0}&hn={1}&d=android-mobile'.format(
|
||||||
|
int(time.time()), house_number)
|
||||||
|
sig = hmac.new(
|
||||||
|
'android.content.res.Resources'.encode('utf-8'),
|
||||||
|
path.encode('utf-8'), hashlib.sha256).hexdigest()
|
||||||
|
token = self._download_webpage(
|
||||||
|
'http://iview.abc.net.au{0}&sig={1}'.format(path, sig), video_id)
|
||||||
|
|
||||||
# May have higher quality video
|
def tokenize_url(url, token):
|
||||||
sd_url = try_get(
|
return update_url_query(url, {
|
||||||
stream, lambda x: x['streams']['hds']['sd'], compat_str)
|
'hdnea': token,
|
||||||
if sd_url:
|
})
|
||||||
format_urls.append(sd_url.replace('metered', 'um'))
|
|
||||||
|
|
||||||
formats = []
|
for sd in ('sd', 'sd-low'):
|
||||||
for format_url in format_urls:
|
sd_url = try_get(
|
||||||
if format_url:
|
stream, lambda x: x['streams']['hls'][sd], compat_str)
|
||||||
formats.extend(
|
if not sd_url:
|
||||||
self._extract_akamai_formats(format_url, video_id))
|
continue
|
||||||
|
formats = self._extract_m3u8_formats(
|
||||||
|
tokenize_url(sd_url, token), video_id, 'mp4',
|
||||||
|
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
|
||||||
|
if formats:
|
||||||
|
break
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
@ -152,12 +171,12 @@ class ABCIViewIE(InfoExtractor):
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': unescapeHTML(title),
|
||||||
'description': self._html_search_meta(['og:description', 'twitter:description'], webpage),
|
'description': self._html_search_meta(['og:description', 'twitter:description'], webpage),
|
||||||
'thumbnail': self._html_search_meta(['og:image', 'twitter:image:src'], webpage),
|
'thumbnail': self._html_search_meta(['og:image', 'twitter:image:src'], webpage),
|
||||||
'duration': int_or_none(video_params.get('eventDuration')),
|
'duration': int_or_none(video_params.get('eventDuration')),
|
||||||
'timestamp': parse_iso8601(video_params.get('pubDate'), ' '),
|
'timestamp': parse_iso8601(video_params.get('pubDate'), ' '),
|
||||||
'series': video_params.get('seriesTitle'),
|
'series': unescapeHTML(video_params.get('seriesTitle')),
|
||||||
'series_id': video_params.get('seriesHouseNumber') or video_id[:7],
|
'series_id': video_params.get('seriesHouseNumber') or video_id[:7],
|
||||||
'episode_number': int_or_none(self._html_search_meta('episodeNumber', webpage, default=None)),
|
'episode_number': int_or_none(self._html_search_meta('episodeNumber', webpage, default=None)),
|
||||||
'episode': self._html_search_meta('episode_title', webpage, default=None),
|
'episode': self._html_search_meta('episode_title', webpage, default=None),
|
||||||
|
@ -7,6 +7,7 @@ import time
|
|||||||
|
|
||||||
from .amp import AMPIE
|
from .amp import AMPIE
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from .youtube import YoutubeIE
|
||||||
from ..compat import compat_urlparse
|
from ..compat import compat_urlparse
|
||||||
|
|
||||||
|
|
||||||
@ -65,7 +66,7 @@ class AbcNewsIE(InfoExtractor):
|
|||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://abcnews.go.com/Blotter/News/dramatic-video-rare-death-job-america/story?id=10498713#.UIhwosWHLjY',
|
'url': 'http://abcnews.go.com/Blotter/News/dramatic-video-rare-death-job-america/story?id=10498713#.UIhwosWHLjY',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '10498713',
|
'id': '10505354',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'display_id': 'dramatic-video-rare-death-job-america',
|
'display_id': 'dramatic-video-rare-death-job-america',
|
||||||
'title': 'Occupational Hazards',
|
'title': 'Occupational Hazards',
|
||||||
@ -78,7 +79,7 @@ class AbcNewsIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'http://abcnews.go.com/Entertainment/justin-timberlake-performs-stop-feeling-eurovision-2016/story?id=39125818',
|
'url': 'http://abcnews.go.com/Entertainment/justin-timberlake-performs-stop-feeling-eurovision-2016/story?id=39125818',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '39125818',
|
'id': '38897857',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'display_id': 'justin-timberlake-performs-stop-feeling-eurovision-2016',
|
'display_id': 'justin-timberlake-performs-stop-feeling-eurovision-2016',
|
||||||
'title': 'Justin Timberlake Drops Hints For Secret Single',
|
'title': 'Justin Timberlake Drops Hints For Secret Single',
|
||||||
@ -108,9 +109,7 @@ class AbcNewsIE(InfoExtractor):
|
|||||||
r'window\.abcnvideo\.url\s*=\s*"([^"]+)"', webpage, 'video URL')
|
r'window\.abcnvideo\.url\s*=\s*"([^"]+)"', webpage, 'video URL')
|
||||||
full_video_url = compat_urlparse.urljoin(url, video_url)
|
full_video_url = compat_urlparse.urljoin(url, video_url)
|
||||||
|
|
||||||
youtube_url = self._html_search_regex(
|
youtube_url = YoutubeIE._extract_url(webpage)
|
||||||
r'<iframe[^>]+src="(https://www\.youtube\.com/embed/[^"]+)"',
|
|
||||||
webpage, 'YouTube URL', default=None)
|
|
||||||
|
|
||||||
timestamp = None
|
timestamp = None
|
||||||
date_str = self._html_search_regex(
|
date_str = self._html_search_regex(
|
||||||
@ -140,7 +139,7 @@ class AbcNewsIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
if youtube_url:
|
if youtube_url:
|
||||||
entries = [entry, self.url_result(youtube_url, 'Youtube')]
|
entries = [entry, self.url_result(youtube_url, ie=YoutubeIE.ie_key())]
|
||||||
return self.playlist_result(entries)
|
return self.playlist_result(entries)
|
||||||
|
|
||||||
return entry
|
return entry
|
||||||
|
@ -7,8 +7,10 @@ import functools
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
try_get,
|
||||||
|
unified_timestamp,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -24,39 +26,58 @@ class ACastIE(InfoExtractor):
|
|||||||
'id': '57de3baa-4bb0-487e-9418-2692c1277a34',
|
'id': '57de3baa-4bb0-487e-9418-2692c1277a34',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': '"Where Are You?": Taipei 101, Taiwan',
|
'title': '"Where Are You?": Taipei 101, Taiwan',
|
||||||
|
'description': 'md5:a0b4ef3634e63866b542e5b1199a1a0e',
|
||||||
'timestamp': 1196172000,
|
'timestamp': 1196172000,
|
||||||
'upload_date': '20071127',
|
'upload_date': '20071127',
|
||||||
'description': 'md5:a0b4ef3634e63866b542e5b1199a1a0e',
|
|
||||||
'duration': 211,
|
'duration': 211,
|
||||||
|
'creator': 'Concierge',
|
||||||
|
'series': 'Condé Nast Traveler Podcast',
|
||||||
|
'episode': '"Where Are You?": Taipei 101, Taiwan',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
# test with multiple blings
|
# test with multiple blings
|
||||||
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
|
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
|
||||||
'md5': '55c0097badd7095f494c99a172f86501',
|
'md5': 'a02393c74f3bdb1801c3ec2695577ce0',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
|
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': '2. Raggarmordet - Röster ur det förflutna',
|
'title': '2. Raggarmordet - Röster ur det förflutna',
|
||||||
|
'description': 'md5:4f81f6d8cf2e12ee21a321d8bca32db4',
|
||||||
'timestamp': 1477346700,
|
'timestamp': 1477346700,
|
||||||
'upload_date': '20161024',
|
'upload_date': '20161024',
|
||||||
'description': 'md5:4f81f6d8cf2e12ee21a321d8bca32db4',
|
'duration': 2766.602563,
|
||||||
'duration': 2797,
|
'creator': 'Anton Berg & Martin Johnson',
|
||||||
|
'series': 'Spår',
|
||||||
|
'episode': '2. Raggarmordet - Röster ur det förflutna',
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
channel, display_id = re.match(self._VALID_URL, url).groups()
|
channel, display_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
s = self._download_json(
|
||||||
|
'https://play-api.acast.com/stitch/%s/%s' % (channel, display_id),
|
||||||
|
display_id)['result']
|
||||||
|
media_url = s['url']
|
||||||
cast_data = self._download_json(
|
cast_data = self._download_json(
|
||||||
'https://embed.acast.com/api/acasts/%s/%s' % (channel, display_id), display_id)
|
'https://play-api.acast.com/splash/%s/%s' % (channel, display_id),
|
||||||
|
display_id)['result']
|
||||||
|
e = cast_data['episode']
|
||||||
|
title = e['name']
|
||||||
return {
|
return {
|
||||||
'id': compat_str(cast_data['id']),
|
'id': compat_str(e['id']),
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
'url': [b['audio'] for b in cast_data['blings'] if b['type'] == 'BlingAudio'][0],
|
'url': media_url,
|
||||||
'title': cast_data['name'],
|
'title': title,
|
||||||
'description': cast_data.get('description'),
|
'description': e.get('description') or e.get('summary'),
|
||||||
'thumbnail': cast_data.get('image'),
|
'thumbnail': e.get('image'),
|
||||||
'timestamp': parse_iso8601(cast_data.get('publishingDate')),
|
'timestamp': unified_timestamp(e.get('publishingDate')),
|
||||||
'duration': int_or_none(cast_data.get('duration')),
|
'duration': float_or_none(s.get('duration') or e.get('duration')),
|
||||||
|
'filesize': int_or_none(e.get('contentLength')),
|
||||||
|
'creator': try_get(cast_data, lambda x: x['show']['author'], compat_str),
|
||||||
|
'series': try_get(cast_data, lambda x: x['show']['name'], compat_str),
|
||||||
|
'season_number': int_or_none(e.get('seasonNumber')),
|
||||||
|
'episode': title,
|
||||||
|
'episode_number': int_or_none(e.get('episodeNumber')),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,13 +1,15 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_cbc_decrypt
|
from ..aes import aes_cbc_decrypt
|
||||||
from ..compat import compat_ord
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
|
compat_ord,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@ -48,9 +50,9 @@ class ADNIE(InfoExtractor):
|
|||||||
|
|
||||||
# http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
# http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
||||||
dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
|
dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
|
||||||
bytes_to_intlist(base64.b64decode(enc_subtitles[24:])),
|
bytes_to_intlist(compat_b64decode(enc_subtitles[24:])),
|
||||||
bytes_to_intlist(b'\x1b\xe0\x29\x61\x38\x94\x24\x00\x12\xbd\xc5\x80\xac\xce\xbe\xb0'),
|
bytes_to_intlist(b'\xc8\x6e\x06\xbc\xbe\xc6\x49\xf5\x88\x0d\xc8\x47\xc4\x27\x0c\x60'),
|
||||||
bytes_to_intlist(base64.b64decode(enc_subtitles[:24]))
|
bytes_to_intlist(compat_b64decode(enc_subtitles[:24]))
|
||||||
))
|
))
|
||||||
subtitles_json = self._parse_json(
|
subtitles_json = self._parse_json(
|
||||||
dec_subtitles[:-compat_ord(dec_subtitles[-1])].decode(),
|
dec_subtitles[:-compat_ord(dec_subtitles[-1])].decode(),
|
||||||
@ -105,15 +107,18 @@ class ADNIE(InfoExtractor):
|
|||||||
|
|
||||||
options = player_config.get('options') or {}
|
options = player_config.get('options') or {}
|
||||||
metas = options.get('metas') or {}
|
metas = options.get('metas') or {}
|
||||||
title = metas.get('title') or video_info['title']
|
|
||||||
links = player_config.get('links') or {}
|
links = player_config.get('links') or {}
|
||||||
|
sub_path = player_config.get('subtitles')
|
||||||
error = None
|
error = None
|
||||||
if not links:
|
if not links:
|
||||||
links_url = player_config['linksurl']
|
links_url = player_config.get('linksurl') or options['videoUrl']
|
||||||
links_data = self._download_json(urljoin(
|
links_data = self._download_json(urljoin(
|
||||||
self._BASE_URL, links_url), video_id)
|
self._BASE_URL, links_url), video_id)
|
||||||
links = links_data.get('links') or {}
|
links = links_data.get('links') or {}
|
||||||
|
metas = metas or links_data.get('meta') or {}
|
||||||
|
sub_path = sub_path or links_data.get('subtitles')
|
||||||
error = links_data.get('error')
|
error = links_data.get('error')
|
||||||
|
title = metas.get('title') or video_info['title']
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for format_id, qualities in links.items():
|
for format_id, qualities in links.items():
|
||||||
@ -144,7 +149,7 @@ class ADNIE(InfoExtractor):
|
|||||||
'description': strip_or_none(metas.get('summary') or video_info.get('resume')),
|
'description': strip_or_none(metas.get('summary') or video_info.get('resume')),
|
||||||
'thumbnail': video_info.get('image'),
|
'thumbnail': video_info.get('image'),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': self.extract_subtitles(player_config.get('subtitles'), video_id),
|
'subtitles': self.extract_subtitles(sub_path, video_id),
|
||||||
'episode': metas.get('subtitle') or video_info.get('videoTitle'),
|
'episode': metas.get('subtitle') or video_info.get('videoTitle'),
|
||||||
'series': video_info.get('playlistTitle'),
|
'series': video_info.get('playlistTitle'),
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,8 @@ class AENetworksIE(AENetworksBaseIE):
|
|||||||
|
|
||||||
query = {
|
query = {
|
||||||
'mbr': 'true',
|
'mbr': 'true',
|
||||||
'assetTypes': 'high_video_s3'
|
'assetTypes': 'high_video_ak',
|
||||||
|
'switch': 'hls_high_ak',
|
||||||
}
|
}
|
||||||
video_id = self._html_search_meta('aetn:VideoID', webpage)
|
video_id = self._html_search_meta('aetn:VideoID', webpage)
|
||||||
media_url = self._search_regex(
|
media_url = self._search_regex(
|
||||||
@ -131,7 +132,7 @@ class AENetworksIE(AENetworksBaseIE):
|
|||||||
r'data-media-url=(["\'])(?P<url>(?:(?!\1).)+?)\1'],
|
r'data-media-url=(["\'])(?P<url>(?:(?!\1).)+?)\1'],
|
||||||
webpage, 'video url', group='url')
|
webpage, 'video url', group='url')
|
||||||
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
|
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
|
||||||
r'https?://link.theplatform.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
|
r'https?://link\.theplatform\.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
|
||||||
info = self._parse_theplatform_metadata(theplatform_metadata)
|
info = self._parse_theplatform_metadata(theplatform_metadata)
|
||||||
if theplatform_metadata.get('AETN$isBehindWall'):
|
if theplatform_metadata.get('AETN$isBehindWall'):
|
||||||
requestor_id = self._DOMAIN_TO_REQUESTOR_ID[domain]
|
requestor_id = self._DOMAIN_TO_REQUESTOR_ID[domain]
|
||||||
|
@ -9,6 +9,7 @@ from ..utils import (
|
|||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
urlencode_postdata,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,6 +29,7 @@ class AfreecaTVIE(InfoExtractor):
|
|||||||
)
|
)
|
||||||
(?P<id>\d+)
|
(?P<id>\d+)
|
||||||
'''
|
'''
|
||||||
|
_NETRC_MACHINE = 'afreecatv'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://live.afreecatv.com:8079/app/index.cgi?szType=read_ucc_bbs&szBjId=dailyapril&nStationNo=16711924&nBbsNo=18605867&nTitleNo=36164052&szSkin=',
|
'url': 'http://live.afreecatv.com:8079/app/index.cgi?szType=read_ucc_bbs&szBjId=dailyapril&nStationNo=16711924&nBbsNo=18605867&nTitleNo=36164052&szSkin=',
|
||||||
'md5': 'f72c89fe7ecc14c1b5ce506c4996046e',
|
'md5': 'f72c89fe7ecc14c1b5ce506c4996046e',
|
||||||
@ -138,6 +140,23 @@ class AfreecaTVIE(InfoExtractor):
|
|||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# PARTIAL_ADULT
|
||||||
|
'url': 'http://vod.afreecatv.com/PLAYER/STATION/32028439',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '20180327_27901457_202289533_1',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '[생]빨개요♥ (part 1)',
|
||||||
|
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
|
||||||
|
'uploader': '[SA]서아',
|
||||||
|
'uploader_id': 'bjdyrksu',
|
||||||
|
'upload_date': '20180327',
|
||||||
|
'duration': 3601,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'expected_warnings': ['adult content'],
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.afreecatv.com/player/Player.swf?szType=szBjId=djleegoon&nStationNo=11273158&nBbsNo=13161095&nTitleNo=36327652',
|
'url': 'http://www.afreecatv.com/player/Player.swf?szType=szBjId=djleegoon&nStationNo=11273158&nBbsNo=13161095&nTitleNo=36327652',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@ -155,17 +174,107 @@ class AfreecaTVIE(InfoExtractor):
|
|||||||
video_key['part'] = int(m.group('part'))
|
video_key['part'] = int(m.group('part'))
|
||||||
return video_key
|
return video_key
|
||||||
|
|
||||||
|
def _real_initialize(self):
|
||||||
|
self._login()
|
||||||
|
|
||||||
|
def _login(self):
|
||||||
|
username, password = self._get_login_info()
|
||||||
|
if username is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
login_form = {
|
||||||
|
'szWork': 'login',
|
||||||
|
'szType': 'json',
|
||||||
|
'szUid': username,
|
||||||
|
'szPassword': password,
|
||||||
|
'isSaveId': 'false',
|
||||||
|
'szScriptVar': 'oLoginRet',
|
||||||
|
'szAction': '',
|
||||||
|
}
|
||||||
|
|
||||||
|
response = self._download_json(
|
||||||
|
'https://login.afreecatv.com/app/LoginAction.php', None,
|
||||||
|
'Logging in', data=urlencode_postdata(login_form))
|
||||||
|
|
||||||
|
_ERRORS = {
|
||||||
|
-4: 'Your account has been suspended due to a violation of our terms and policies.',
|
||||||
|
-5: 'https://member.afreecatv.com/app/user_delete_progress.php',
|
||||||
|
-6: 'https://login.afreecatv.com/membership/changeMember.php',
|
||||||
|
-8: "Hello! AfreecaTV here.\nThe username you have entered belongs to \n an account that requires a legal guardian's consent. \nIf you wish to use our services without restriction, \nplease make sure to go through the necessary verification process.",
|
||||||
|
-9: 'https://member.afreecatv.com/app/pop_login_block.php',
|
||||||
|
-11: 'https://login.afreecatv.com/afreeca/second_login.php',
|
||||||
|
-12: 'https://member.afreecatv.com/app/user_security.php',
|
||||||
|
0: 'The username does not exist or you have entered the wrong password.',
|
||||||
|
-1: 'The username does not exist or you have entered the wrong password.',
|
||||||
|
-3: 'You have entered your username/password incorrectly.',
|
||||||
|
-7: 'You cannot use your Global AfreecaTV account to access Korean AfreecaTV.',
|
||||||
|
-10: 'Sorry for the inconvenience. \nYour account has been blocked due to an unauthorized access. \nPlease contact our Help Center for assistance.',
|
||||||
|
-32008: 'You have failed to log in. Please contact our Help Center.',
|
||||||
|
}
|
||||||
|
|
||||||
|
result = int_or_none(response.get('RESULT'))
|
||||||
|
if result != 1:
|
||||||
|
error = _ERRORS.get(result, 'You have failed to log in.')
|
||||||
|
raise ExtractorError(
|
||||||
|
'Unable to login: %s said: %s' % (self.IE_NAME, error),
|
||||||
|
expected=True)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
video_xml = self._download_xml(
|
webpage = self._download_webpage(url, video_id)
|
||||||
'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
|
|
||||||
video_id, query={'nTitleNo': video_id})
|
|
||||||
|
|
||||||
video_element = video_xml.findall(compat_xpath('./track/video'))[1]
|
if re.search(r'alert\(["\']This video has been deleted', webpage):
|
||||||
|
raise ExtractorError(
|
||||||
|
'Video %s has been deleted' % video_id, expected=True)
|
||||||
|
|
||||||
|
station_id = self._search_regex(
|
||||||
|
r'nStationNo\s*=\s*(\d+)', webpage, 'station')
|
||||||
|
bbs_id = self._search_regex(
|
||||||
|
r'nBbsNo\s*=\s*(\d+)', webpage, 'bbs')
|
||||||
|
video_id = self._search_regex(
|
||||||
|
r'nTitleNo\s*=\s*(\d+)', webpage, 'title', default=video_id)
|
||||||
|
|
||||||
|
partial_view = False
|
||||||
|
for _ in range(2):
|
||||||
|
query = {
|
||||||
|
'nTitleNo': video_id,
|
||||||
|
'nStationNo': station_id,
|
||||||
|
'nBbsNo': bbs_id,
|
||||||
|
}
|
||||||
|
if partial_view:
|
||||||
|
query['partialView'] = 'SKIP_ADULT'
|
||||||
|
video_xml = self._download_xml(
|
||||||
|
'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
|
||||||
|
video_id, 'Downloading video info XML%s'
|
||||||
|
% (' (skipping adult)' if partial_view else ''),
|
||||||
|
video_id, headers={
|
||||||
|
'Referer': url,
|
||||||
|
}, query=query)
|
||||||
|
|
||||||
|
flag = xpath_text(video_xml, './track/flag', 'flag', default=None)
|
||||||
|
if flag and flag == 'SUCCEED':
|
||||||
|
break
|
||||||
|
if flag == 'PARTIAL_ADULT':
|
||||||
|
self._downloader.report_warning(
|
||||||
|
'In accordance with local laws and regulations, underage users are restricted from watching adult content. '
|
||||||
|
'Only content suitable for all ages will be downloaded. '
|
||||||
|
'Provide account credentials if you wish to download restricted content.')
|
||||||
|
partial_view = True
|
||||||
|
continue
|
||||||
|
elif flag == 'ADULT':
|
||||||
|
error = 'Only users older than 19 are able to watch this video. Provide account credentials to download this content.'
|
||||||
|
else:
|
||||||
|
error = flag
|
||||||
|
raise ExtractorError(
|
||||||
|
'%s said: %s' % (self.IE_NAME, error), expected=True)
|
||||||
|
else:
|
||||||
|
raise ExtractorError('Unable to download video info')
|
||||||
|
|
||||||
|
video_element = video_xml.findall(compat_xpath('./track/video'))[-1]
|
||||||
if video_element is None or video_element.text is None:
|
if video_element is None or video_element.text is None:
|
||||||
raise ExtractorError('Specified AfreecaTV video does not exist',
|
raise ExtractorError(
|
||||||
expected=True)
|
'Video %s video does not exist' % video_id, expected=True)
|
||||||
|
|
||||||
video_url = video_element.text.strip()
|
video_url = video_element.text.strip()
|
||||||
|
|
||||||
@ -203,10 +312,19 @@ class AfreecaTVIE(InfoExtractor):
|
|||||||
r'^(\d{8})_', key, 'upload date', default=None)
|
r'^(\d{8})_', key, 'upload date', default=None)
|
||||||
file_duration = int_or_none(file_element.get('duration'))
|
file_duration = int_or_none(file_element.get('duration'))
|
||||||
format_id = key if key else '%s_%s' % (video_id, file_num)
|
format_id = key if key else '%s_%s' % (video_id, file_num)
|
||||||
formats = self._extract_m3u8_formats(
|
if determine_ext(file_url) == 'm3u8':
|
||||||
file_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
formats = self._extract_m3u8_formats(
|
||||||
m3u8_id='hls',
|
file_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
note='Downloading part %d m3u8 information' % file_num)
|
m3u8_id='hls',
|
||||||
|
note='Downloading part %d m3u8 information' % file_num)
|
||||||
|
else:
|
||||||
|
formats = [{
|
||||||
|
'url': file_url,
|
||||||
|
'format_id': 'http',
|
||||||
|
}]
|
||||||
|
if not formats:
|
||||||
|
continue
|
||||||
|
self._sort_formats(formats)
|
||||||
file_info = common_entry.copy()
|
file_info = common_entry.copy()
|
||||||
file_info.update({
|
file_info.update({
|
||||||
'id': format_id,
|
'id': format_id,
|
||||||
@ -246,107 +364,3 @@ class AfreecaTVIE(InfoExtractor):
|
|||||||
})
|
})
|
||||||
|
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
|
||||||
class AfreecaTVGlobalIE(AfreecaTVIE):
|
|
||||||
IE_NAME = 'afreecatv:global'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?afreeca\.tv/(?P<channel_id>\d+)(?:/v/(?P<video_id>\d+))?'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://afreeca.tv/36853014/v/58301',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '58301',
|
|
||||||
'title': 'tryhard top100',
|
|
||||||
'uploader_id': '36853014',
|
|
||||||
'uploader': 'makgi Hearthstone Live!',
|
|
||||||
},
|
|
||||||
'playlist_count': 3,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
channel_id, video_id = re.match(self._VALID_URL, url).groups()
|
|
||||||
video_type = 'video' if video_id else 'live'
|
|
||||||
query = {
|
|
||||||
'pt': 'view',
|
|
||||||
'bid': channel_id,
|
|
||||||
}
|
|
||||||
if video_id:
|
|
||||||
query['vno'] = video_id
|
|
||||||
video_data = self._download_json(
|
|
||||||
'http://api.afreeca.tv/%s/view_%s.php' % (video_type, video_type),
|
|
||||||
video_id or channel_id, query=query)['channel']
|
|
||||||
|
|
||||||
if video_data.get('result') != 1:
|
|
||||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, video_data['remsg']))
|
|
||||||
|
|
||||||
title = video_data['title']
|
|
||||||
|
|
||||||
info = {
|
|
||||||
'thumbnail': video_data.get('thumb'),
|
|
||||||
'view_count': int_or_none(video_data.get('vcnt')),
|
|
||||||
'age_limit': int_or_none(video_data.get('grade')),
|
|
||||||
'uploader_id': channel_id,
|
|
||||||
'uploader': video_data.get('cname'),
|
|
||||||
}
|
|
||||||
|
|
||||||
if video_id:
|
|
||||||
entries = []
|
|
||||||
for i, f in enumerate(video_data.get('flist', [])):
|
|
||||||
video_key = self.parse_video_key(f.get('key', ''))
|
|
||||||
f_url = f.get('file')
|
|
||||||
if not video_key or not f_url:
|
|
||||||
continue
|
|
||||||
entries.append({
|
|
||||||
'id': '%s_%s' % (video_id, video_key.get('part', i + 1)),
|
|
||||||
'title': title,
|
|
||||||
'upload_date': video_key.get('upload_date'),
|
|
||||||
'duration': int_or_none(f.get('length')),
|
|
||||||
'url': f_url,
|
|
||||||
'protocol': 'm3u8_native',
|
|
||||||
'ext': 'mp4',
|
|
||||||
})
|
|
||||||
|
|
||||||
info.update({
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'duration': int_or_none(video_data.get('length')),
|
|
||||||
})
|
|
||||||
if len(entries) > 1:
|
|
||||||
info['_type'] = 'multi_video'
|
|
||||||
info['entries'] = entries
|
|
||||||
elif len(entries) == 1:
|
|
||||||
i = entries[0].copy()
|
|
||||||
i.update(info)
|
|
||||||
info = i
|
|
||||||
else:
|
|
||||||
formats = []
|
|
||||||
for s in video_data.get('strm', []):
|
|
||||||
s_url = s.get('purl')
|
|
||||||
if not s_url:
|
|
||||||
continue
|
|
||||||
stype = s.get('stype')
|
|
||||||
if stype == 'HLS':
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
s_url, channel_id, 'mp4', m3u8_id=stype, fatal=False))
|
|
||||||
elif stype == 'RTMP':
|
|
||||||
format_id = [stype]
|
|
||||||
label = s.get('label')
|
|
||||||
if label:
|
|
||||||
format_id.append(label)
|
|
||||||
formats.append({
|
|
||||||
'format_id': '-'.join(format_id),
|
|
||||||
'url': s_url,
|
|
||||||
'tbr': int_or_none(s.get('bps')),
|
|
||||||
'height': int_or_none(s.get('brt')),
|
|
||||||
'ext': 'flv',
|
|
||||||
'rtmp_live': True,
|
|
||||||
})
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
info.update({
|
|
||||||
'id': channel_id,
|
|
||||||
'title': self._live_title(title),
|
|
||||||
'is_live': True,
|
|
||||||
'formats': formats,
|
|
||||||
})
|
|
||||||
|
|
||||||
return info
|
|
||||||
|
53
youtube_dl/extractor/aliexpress.py
Normal file
53
youtube_dl/extractor/aliexpress.py
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_str
|
||||||
|
from ..utils import (
|
||||||
|
float_or_none,
|
||||||
|
try_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AliExpressLiveIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://live\.aliexpress\.com/live/(?P<id>\d+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://live.aliexpress.com/live/2800002704436634',
|
||||||
|
'md5': 'e729e25d47c5e557f2630eaf99b740a5',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2800002704436634',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'CASIMA7.22',
|
||||||
|
'thumbnail': r're:http://.*\.jpg',
|
||||||
|
'uploader': 'CASIMA Official Store',
|
||||||
|
'timestamp': 1500717600,
|
||||||
|
'upload_date': '20170722',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
data = self._parse_json(
|
||||||
|
self._search_regex(
|
||||||
|
r'(?s)runParams\s*=\s*({.+?})\s*;?\s*var',
|
||||||
|
webpage, 'runParams'),
|
||||||
|
video_id)
|
||||||
|
|
||||||
|
title = data['title']
|
||||||
|
|
||||||
|
formats = self._extract_m3u8_formats(
|
||||||
|
data['replyStreamUrl'], video_id, 'mp4',
|
||||||
|
entry_protocol='m3u8_native', m3u8_id='hls')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'thumbnail': data.get('coverUrl'),
|
||||||
|
'uploader': try_get(
|
||||||
|
data, lambda x: x['followBar']['name'], compat_str),
|
||||||
|
'timestamp': float_or_none(data.get('startTimeLong'), scale=1000),
|
||||||
|
'formats': formats,
|
||||||
|
}
|
@ -11,7 +11,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class AMCNetworksIE(ThePlatformIE):
|
class AMCNetworksIE(ThePlatformIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?:amc|bbcamerica|ifc|wetv)\.com/(?:movies|shows(?:/[^/]+)+)/(?P<id>[^/?#]+)'
|
_VALID_URL = r'https?://(?:www\.)?(?:amc|bbcamerica|ifc|(?:we|sundance)tv)\.com/(?:movies|shows(?:/[^/]+)+)/(?P<id>[^/?#]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.ifc.com/shows/maron/season-04/episode-01/step-1',
|
'url': 'http://www.ifc.com/shows/maron/season-04/episode-01/step-1',
|
||||||
'md5': '',
|
'md5': '',
|
||||||
@ -51,6 +51,9 @@ class AMCNetworksIE(ThePlatformIE):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'http://www.wetv.com/shows/la-hair/videos/season-05/episode-09-episode-9-2/episode-9-sneak-peek-3',
|
'url': 'http://www.wetv.com/shows/la-hair/videos/season-05/episode-09-episode-9-2/episode-9-sneak-peek-3',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.sundancetv.com/shows/riviera/full-episodes/season-1/episode-01-episode-1',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
85
youtube_dl/extractor/americastestkitchen.py
Normal file
85
youtube_dl/extractor/americastestkitchen.py
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
clean_html,
|
||||||
|
int_or_none,
|
||||||
|
try_get,
|
||||||
|
unified_strdate,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AmericasTestKitchenIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?americastestkitchen\.com/(?:episode|videos)/(?P<id>\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.americastestkitchen.com/episode/548-summer-dinner-party',
|
||||||
|
'md5': 'b861c3e365ac38ad319cfd509c30577f',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1_5g5zua6e',
|
||||||
|
'title': 'Summer Dinner Party',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'description': 'md5:858d986e73a4826979b6a5d9f8f6a1ec',
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg',
|
||||||
|
'timestamp': 1497285541,
|
||||||
|
'upload_date': '20170612',
|
||||||
|
'uploader_id': 'roger.metcalf@americastestkitchen.com',
|
||||||
|
'release_date': '20170617',
|
||||||
|
'series': "America's Test Kitchen",
|
||||||
|
'season_number': 17,
|
||||||
|
'episode': 'Summer Dinner Party',
|
||||||
|
'episode_number': 24,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.americastestkitchen.com/videos/3420-pan-seared-salmon',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
partner_id = self._search_regex(
|
||||||
|
r'src=["\'](?:https?:)?//(?:[^/]+\.)kaltura\.com/(?:[^/]+/)*(?:p|partner_id)/(\d+)',
|
||||||
|
webpage, 'kaltura partner id')
|
||||||
|
|
||||||
|
video_data = self._parse_json(
|
||||||
|
self._search_regex(
|
||||||
|
r'window\.__INITIAL_STATE__\s*=\s*({.+?})\s*;\s*</script>',
|
||||||
|
webpage, 'initial context'),
|
||||||
|
video_id)
|
||||||
|
|
||||||
|
ep_data = try_get(
|
||||||
|
video_data,
|
||||||
|
(lambda x: x['episodeDetail']['content']['data'],
|
||||||
|
lambda x: x['videoDetail']['content']['data']), dict)
|
||||||
|
ep_meta = ep_data.get('full_video', {})
|
||||||
|
external_id = ep_data.get('external_id') or ep_meta['external_id']
|
||||||
|
|
||||||
|
title = ep_data.get('title') or ep_meta.get('title')
|
||||||
|
description = clean_html(ep_meta.get('episode_description') or ep_data.get(
|
||||||
|
'description') or ep_meta.get('description'))
|
||||||
|
thumbnail = try_get(ep_meta, lambda x: x['photo']['image_url'])
|
||||||
|
release_date = unified_strdate(ep_data.get('aired_at'))
|
||||||
|
|
||||||
|
season_number = int_or_none(ep_meta.get('season_number'))
|
||||||
|
episode = ep_meta.get('title')
|
||||||
|
episode_number = int_or_none(ep_meta.get('episode_number'))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': 'kaltura:%s:%s' % (partner_id, external_id),
|
||||||
|
'ie_key': 'Kaltura',
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'release_date': release_date,
|
||||||
|
'series': "America's Test Kitchen",
|
||||||
|
'season_number': season_number,
|
||||||
|
'episode': episode,
|
||||||
|
'episode_number': episode_number,
|
||||||
|
}
|
@ -3,16 +3,13 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_str
|
||||||
compat_urlparse,
|
|
||||||
compat_str,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
sanitized_Request,
|
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
|
urljoin,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -21,6 +18,8 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
_LOGIN_URL = 'https://www.anime-on-demand.de/users/sign_in'
|
_LOGIN_URL = 'https://www.anime-on-demand.de/users/sign_in'
|
||||||
_APPLY_HTML5_URL = 'https://www.anime-on-demand.de/html5apply'
|
_APPLY_HTML5_URL = 'https://www.anime-on-demand.de/html5apply'
|
||||||
_NETRC_MACHINE = 'animeondemand'
|
_NETRC_MACHINE = 'animeondemand'
|
||||||
|
# German-speaking countries of Europe
|
||||||
|
_GEO_COUNTRIES = ['AT', 'CH', 'DE', 'LI', 'LU']
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# jap, OmU
|
# jap, OmU
|
||||||
'url': 'https://www.anime-on-demand.de/anime/161',
|
'url': 'https://www.anime-on-demand.de/anime/161',
|
||||||
@ -46,6 +45,10 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
# Full length film, non-series, ger/jap, Dub/OmU, account required
|
# Full length film, non-series, ger/jap, Dub/OmU, account required
|
||||||
'url': 'https://www.anime-on-demand.de/anime/185',
|
'url': 'https://www.anime-on-demand.de/anime/185',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# Flash videos
|
||||||
|
'url': 'https://www.anime-on-demand.de/anime/12',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _login(self):
|
def _login(self):
|
||||||
@ -72,19 +75,18 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
'post url', default=self._LOGIN_URL, group='url')
|
'post url', default=self._LOGIN_URL, group='url')
|
||||||
|
|
||||||
if not post_url.startswith('http'):
|
if not post_url.startswith('http'):
|
||||||
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
|
post_url = urljoin(self._LOGIN_URL, post_url)
|
||||||
|
|
||||||
request = sanitized_Request(
|
|
||||||
post_url, urlencode_postdata(login_form))
|
|
||||||
request.add_header('Referer', self._LOGIN_URL)
|
|
||||||
|
|
||||||
response = self._download_webpage(
|
response = self._download_webpage(
|
||||||
request, None, 'Logging in as %s' % username)
|
post_url, None, 'Logging in',
|
||||||
|
data=urlencode_postdata(login_form), headers={
|
||||||
|
'Referer': self._LOGIN_URL,
|
||||||
|
})
|
||||||
|
|
||||||
if all(p not in response for p in ('>Logout<', 'href="/users/sign_out"')):
|
if all(p not in response for p in ('>Logout<', 'href="/users/sign_out"')):
|
||||||
error = self._search_regex(
|
error = self._search_regex(
|
||||||
r'<p class="alert alert-danger">(.+?)</p>',
|
r'<p[^>]+\bclass=(["\'])(?:(?!\1).)*\balert\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</p>',
|
||||||
response, 'error', default=None)
|
response, 'error', default=None, group='error')
|
||||||
if error:
|
if error:
|
||||||
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
||||||
raise ExtractorError('Unable to log in')
|
raise ExtractorError('Unable to log in')
|
||||||
@ -120,10 +122,11 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
formats = []
|
formats = []
|
||||||
|
|
||||||
for input_ in re.findall(
|
for input_ in re.findall(
|
||||||
r'<input[^>]+class=["\'].*?streamstarter_html5[^>]+>', html):
|
r'<input[^>]+class=["\'].*?streamstarter[^>]+>', html):
|
||||||
attributes = extract_attributes(input_)
|
attributes = extract_attributes(input_)
|
||||||
|
title = attributes.get('data-dialog-header')
|
||||||
playlist_urls = []
|
playlist_urls = []
|
||||||
for playlist_key in ('data-playlist', 'data-otherplaylist'):
|
for playlist_key in ('data-playlist', 'data-otherplaylist', 'data-stream'):
|
||||||
playlist_url = attributes.get(playlist_key)
|
playlist_url = attributes.get(playlist_key)
|
||||||
if isinstance(playlist_url, compat_str) and re.match(
|
if isinstance(playlist_url, compat_str) and re.match(
|
||||||
r'/?[\da-zA-Z]+', playlist_url):
|
r'/?[\da-zA-Z]+', playlist_url):
|
||||||
@ -147,19 +150,38 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
format_id_list.append(compat_str(num))
|
format_id_list.append(compat_str(num))
|
||||||
format_id = '-'.join(format_id_list)
|
format_id = '-'.join(format_id_list)
|
||||||
format_note = ', '.join(filter(None, (kind, lang_note)))
|
format_note = ', '.join(filter(None, (kind, lang_note)))
|
||||||
request = sanitized_Request(
|
item_id_list = []
|
||||||
compat_urlparse.urljoin(url, playlist_url),
|
if format_id:
|
||||||
|
item_id_list.append(format_id)
|
||||||
|
item_id_list.append('videomaterial')
|
||||||
|
playlist = self._download_json(
|
||||||
|
urljoin(url, playlist_url), video_id,
|
||||||
|
'Downloading %s JSON' % ' '.join(item_id_list),
|
||||||
headers={
|
headers={
|
||||||
'X-Requested-With': 'XMLHttpRequest',
|
'X-Requested-With': 'XMLHttpRequest',
|
||||||
'X-CSRF-Token': csrf_token,
|
'X-CSRF-Token': csrf_token,
|
||||||
'Referer': url,
|
'Referer': url,
|
||||||
'Accept': 'application/json, text/javascript, */*; q=0.01',
|
'Accept': 'application/json, text/javascript, */*; q=0.01',
|
||||||
})
|
}, fatal=False)
|
||||||
playlist = self._download_json(
|
|
||||||
request, video_id, 'Downloading %s playlist JSON' % format_id,
|
|
||||||
fatal=False)
|
|
||||||
if not playlist:
|
if not playlist:
|
||||||
continue
|
continue
|
||||||
|
stream_url = playlist.get('streamurl')
|
||||||
|
if stream_url:
|
||||||
|
rtmp = re.search(
|
||||||
|
r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+/))(?P<playpath>mp[34]:.+)',
|
||||||
|
stream_url)
|
||||||
|
if rtmp:
|
||||||
|
formats.append({
|
||||||
|
'url': rtmp.group('url'),
|
||||||
|
'app': rtmp.group('app'),
|
||||||
|
'play_path': rtmp.group('playpath'),
|
||||||
|
'page_url': url,
|
||||||
|
'player_url': 'https://www.anime-on-demand.de/assets/jwplayer.flash-55abfb34080700304d49125ce9ffb4a6.swf',
|
||||||
|
'rtmp_real_time': True,
|
||||||
|
'format_id': 'rtmp',
|
||||||
|
'ext': 'flv',
|
||||||
|
})
|
||||||
|
continue
|
||||||
start_video = playlist.get('startvideo', 0)
|
start_video = playlist.get('startvideo', 0)
|
||||||
playlist = playlist.get('playlist')
|
playlist = playlist.get('playlist')
|
||||||
if not playlist or not isinstance(playlist, list):
|
if not playlist or not isinstance(playlist, list):
|
||||||
@ -222,7 +244,7 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
f.update({
|
f.update({
|
||||||
'id': '%s-%s' % (f['id'], m.group('kind').lower()),
|
'id': '%s-%s' % (f['id'], m.group('kind').lower()),
|
||||||
'title': m.group('title'),
|
'title': m.group('title'),
|
||||||
'url': compat_urlparse.urljoin(url, m.group('href')),
|
'url': urljoin(url, m.group('href')),
|
||||||
})
|
})
|
||||||
entries.append(f)
|
entries.append(f)
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ from ..utils import (
|
|||||||
int_or_none,
|
int_or_none,
|
||||||
strip_jsonp,
|
strip_jsonp,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
|
unsmuggle_url,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -197,12 +198,16 @@ class AnvatoIE(InfoExtractor):
|
|||||||
'tbr': tbr if tbr != 0 else None,
|
'tbr': tbr if tbr != 0 else None,
|
||||||
}
|
}
|
||||||
|
|
||||||
if ext == 'm3u8' or media_format in ('m3u8', 'm3u8-variant'):
|
if media_format == 'm3u8' and tbr is not None:
|
||||||
if tbr is not None:
|
a_format.update({
|
||||||
a_format.update({
|
'format_id': '-'.join(filter(None, ['hls', compat_str(tbr)])),
|
||||||
'format_id': '-'.join(filter(None, ['hls', compat_str(tbr)])),
|
'ext': 'mp4',
|
||||||
'ext': 'mp4',
|
})
|
||||||
})
|
elif media_format == 'm3u8-variant' or ext == 'm3u8':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
|
m3u8_id='hls', fatal=False))
|
||||||
|
continue
|
||||||
elif ext == 'mp3' or media_format == 'mp3':
|
elif ext == 'mp3' or media_format == 'mp3':
|
||||||
a_format['vcodec'] = 'none'
|
a_format['vcodec'] = 'none'
|
||||||
else:
|
else:
|
||||||
@ -271,6 +276,9 @@ class AnvatoIE(InfoExtractor):
|
|||||||
anvplayer_data['accessKey'], anvplayer_data['video'])
|
anvplayer_data['accessKey'], anvplayer_data['video'])
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
url, smuggled_data = unsmuggle_url(url, {})
|
||||||
|
self._initialize_geo_bypass(smuggled_data.get('geo_countries'))
|
||||||
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
access_key, video_id = mobj.group('access_key_or_mcp', 'id')
|
access_key, video_id = mobj.group('access_key_or_mcp', 'id')
|
||||||
if access_key not in self._ANVACK_TABLE:
|
if access_key not in self._ANVACK_TABLE:
|
||||||
|
@ -3,13 +3,13 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
int_or_none,
|
||||||
HEADRequest,
|
mimetype2ext,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class AparatIE(InfoExtractor):
|
class AparatIE(InfoExtractor):
|
||||||
_VALID_URL = r'^https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.aparat.com/v/wP8On',
|
'url': 'http://www.aparat.com/v/wP8On',
|
||||||
@ -29,30 +29,41 @@ class AparatIE(InfoExtractor):
|
|||||||
# Note: There is an easier-to-parse configuration at
|
# Note: There is an easier-to-parse configuration at
|
||||||
# http://www.aparat.com/video/video/config/videohash/%video_id
|
# http://www.aparat.com/video/video/config/videohash/%video_id
|
||||||
# but the URL in there does not work
|
# but the URL in there does not work
|
||||||
embed_url = 'http://www.aparat.com/video/video/embed/vt/frame/showvideo/yes/videohash/' + video_id
|
webpage = self._download_webpage(
|
||||||
webpage = self._download_webpage(embed_url, video_id)
|
'http://www.aparat.com/video/video/embed/vt/frame/showvideo/yes/videohash/' + video_id,
|
||||||
|
video_id)
|
||||||
file_list = self._parse_json(self._search_regex(
|
|
||||||
r'fileList\s*=\s*JSON\.parse\(\'([^\']+)\'\)', webpage, 'file list'), video_id)
|
|
||||||
for i, item in enumerate(file_list[0]):
|
|
||||||
video_url = item['file']
|
|
||||||
req = HEADRequest(video_url)
|
|
||||||
res = self._request_webpage(
|
|
||||||
req, video_id, note='Testing video URL %d' % i, errnote=False)
|
|
||||||
if res:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise ExtractorError('No working video URLs found')
|
|
||||||
|
|
||||||
title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, 'title')
|
title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, 'title')
|
||||||
|
|
||||||
|
file_list = self._parse_json(
|
||||||
|
self._search_regex(
|
||||||
|
r'fileList\s*=\s*JSON\.parse\(\'([^\']+)\'\)', webpage,
|
||||||
|
'file list'),
|
||||||
|
video_id)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for item in file_list[0]:
|
||||||
|
file_url = item.get('file')
|
||||||
|
if not file_url:
|
||||||
|
continue
|
||||||
|
ext = mimetype2ext(item.get('type'))
|
||||||
|
label = item.get('label')
|
||||||
|
formats.append({
|
||||||
|
'url': file_url,
|
||||||
|
'ext': ext,
|
||||||
|
'format_id': label or ext,
|
||||||
|
'height': int_or_none(self._search_regex(
|
||||||
|
r'(\d+)[pP]', label or '', 'height', default=None)),
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
thumbnail = self._search_regex(
|
thumbnail = self._search_regex(
|
||||||
r'image:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False)
|
r'image:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'url': video_url,
|
|
||||||
'ext': 'mp4',
|
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'age_limit': self._family_friendly_search(webpage),
|
'age_limit': self._family_friendly_search(webpage),
|
||||||
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
@ -117,7 +117,7 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
continue
|
continue
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': '%s-%s' % (version, size),
|
'format_id': '%s-%s' % (version, size),
|
||||||
'url': re.sub(r'_(\d+p.mov)', r'_h\1', src),
|
'url': re.sub(r'_(\d+p\.mov)', r'_h\1', src),
|
||||||
'width': int_or_none(size_data.get('width')),
|
'width': int_or_none(size_data.get('width')),
|
||||||
'height': int_or_none(size_data.get('height')),
|
'height': int_or_none(size_data.get('height')),
|
||||||
'language': version[:2],
|
'language': version[:2],
|
||||||
@ -179,7 +179,7 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
formats = []
|
formats = []
|
||||||
for format in settings['metadata']['sizes']:
|
for format in settings['metadata']['sizes']:
|
||||||
# The src is a file pointing to the real video file
|
# The src is a file pointing to the real video file
|
||||||
format_url = re.sub(r'_(\d*p.mov)', r'_h\1', format['src'])
|
format_url = re.sub(r'_(\d*p\.mov)', r'_h\1', format['src'])
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': format_url,
|
'url': format_url,
|
||||||
'format': format['type'],
|
'format': format['type'],
|
||||||
|
@ -41,7 +41,7 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
'http://archive.org/embed/' + video_id, video_id)
|
'http://archive.org/embed/' + video_id, video_id)
|
||||||
jwplayer_playlist = self._parse_json(self._search_regex(
|
jwplayer_playlist = self._parse_json(self._search_regex(
|
||||||
r"(?s)Play\('[^']+'\s*,\s*(\[.+\])\s*,\s*{.*?}\);",
|
r"(?s)Play\('[^']+'\s*,\s*(\[.+\])\s*,\s*{.*?}\)",
|
||||||
webpage, 'jwplayer playlist'), video_id)
|
webpage, 'jwplayer playlist'), video_id)
|
||||||
info = self._parse_jwplayer_data(
|
info = self._parse_jwplayer_data(
|
||||||
{'playlist': jwplayer_playlist}, video_id, base_url=url)
|
{'playlist': jwplayer_playlist}, video_id, base_url=url)
|
||||||
|
@ -5,6 +5,7 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .generic import GenericIE
|
from .generic import GenericIE
|
||||||
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@ -23,57 +24,30 @@ class ARDMediathekIE(InfoExtractor):
|
|||||||
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.(?:daserste|rbb-online)\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
|
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.(?:daserste|rbb-online)\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.ardmediathek.de/tv/Dokumentation-und-Reportage/Ich-liebe-das-Leben-trotzdem/rbb-Fernsehen/Video?documentId=29582122&bcastId=3822114',
|
# available till 26.07.2022
|
||||||
|
'url': 'http://www.ardmediathek.de/tv/S%C3%9CDLICHT/Was-ist-die-Kunst-der-Zukunft-liebe-Ann/BR-Fernsehen/Video?bcastId=34633636&documentId=44726822',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '29582122',
|
'id': '44726822',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Ich liebe das Leben trotzdem',
|
'title': 'Was ist die Kunst der Zukunft, liebe Anna McCarthy?',
|
||||||
'description': 'md5:45e4c225c72b27993314b31a84a5261c',
|
'description': 'md5:4ada28b3e3b5df01647310e41f3a62f5',
|
||||||
'duration': 4557,
|
'duration': 1740,
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
}
|
||||||
'skip': 'HTTP Error 404: Not Found',
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.ardmediathek.de/tv/Tatort/Tatort-Scheinwelten-H%C3%B6rfassung-Video/Das-Erste/Video?documentId=29522730&bcastId=602916',
|
|
||||||
'md5': 'f4d98b10759ac06c0072bbcd1f0b9e3e',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '29522730',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Tatort: Scheinwelten - Hörfassung (Video tgl. ab 20 Uhr)',
|
|
||||||
'description': 'md5:196392e79876d0ac94c94e8cdb2875f1',
|
|
||||||
'duration': 5252,
|
|
||||||
},
|
|
||||||
'skip': 'HTTP Error 404: Not Found',
|
|
||||||
}, {
|
}, {
|
||||||
# audio
|
# audio
|
||||||
'url': 'http://www.ardmediathek.de/tv/WDR-H%C3%B6rspiel-Speicher/Tod-eines-Fu%C3%9Fballers/WDR-3/Audio-Podcast?documentId=28488308&bcastId=23074086',
|
'url': 'http://www.ardmediathek.de/tv/WDR-H%C3%B6rspiel-Speicher/Tod-eines-Fu%C3%9Fballers/WDR-3/Audio-Podcast?documentId=28488308&bcastId=23074086',
|
||||||
'md5': '219d94d8980b4f538c7fcb0865eb7f2c',
|
'only_matching': True,
|
||||||
'info_dict': {
|
|
||||||
'id': '28488308',
|
|
||||||
'ext': 'mp3',
|
|
||||||
'title': 'Tod eines Fußballers',
|
|
||||||
'description': 'md5:f6e39f3461f0e1f54bfa48c8875c86ef',
|
|
||||||
'duration': 3240,
|
|
||||||
},
|
|
||||||
'skip': 'HTTP Error 404: Not Found',
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
|
'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
# audio
|
# audio
|
||||||
'url': 'http://mediathek.rbb-online.de/radio/Hörspiel/Vor-dem-Fest/kulturradio/Audio?documentId=30796318&topRessort=radio&bcastId=9839158',
|
'url': 'http://mediathek.rbb-online.de/radio/Hörspiel/Vor-dem-Fest/kulturradio/Audio?documentId=30796318&topRessort=radio&bcastId=9839158',
|
||||||
'md5': '4e8f00631aac0395fee17368ac0e9867',
|
'only_matching': True,
|
||||||
'info_dict': {
|
|
||||||
'id': '30796318',
|
|
||||||
'ext': 'mp3',
|
|
||||||
'title': 'Vor dem Fest',
|
|
||||||
'description': 'md5:c0c1c8048514deaed2a73b3a60eecacb',
|
|
||||||
'duration': 3287,
|
|
||||||
},
|
|
||||||
'skip': 'Video is no longer available',
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _extract_media_info(self, media_info_url, webpage, video_id):
|
def _extract_media_info(self, media_info_url, webpage, video_id):
|
||||||
@ -126,6 +100,8 @@ class ARDMediathekIE(InfoExtractor):
|
|||||||
quality = stream.get('_quality')
|
quality = stream.get('_quality')
|
||||||
server = stream.get('_server')
|
server = stream.get('_server')
|
||||||
for stream_url in stream_urls:
|
for stream_url in stream_urls:
|
||||||
|
if not isinstance(stream_url, compat_str) or '//' not in stream_url:
|
||||||
|
continue
|
||||||
ext = determine_ext(stream_url)
|
ext = determine_ext(stream_url)
|
||||||
if quality != 'auto' and ext in ('f4m', 'm3u8'):
|
if quality != 'auto' and ext in ('f4m', 'm3u8'):
|
||||||
continue
|
continue
|
||||||
@ -146,13 +122,11 @@ class ARDMediathekIE(InfoExtractor):
|
|||||||
'play_path': stream_url,
|
'play_path': stream_url,
|
||||||
'format_id': 'a%s-rtmp-%s' % (num, quality),
|
'format_id': 'a%s-rtmp-%s' % (num, quality),
|
||||||
}
|
}
|
||||||
elif stream_url.startswith('http'):
|
else:
|
||||||
f = {
|
f = {
|
||||||
'url': stream_url,
|
'url': stream_url,
|
||||||
'format_id': 'a%s-%s-%s' % (num, ext, quality)
|
'format_id': 'a%s-%s-%s' % (num, ext, quality)
|
||||||
}
|
}
|
||||||
else:
|
|
||||||
continue
|
|
||||||
m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$', stream_url)
|
m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$', stream_url)
|
||||||
if m:
|
if m:
|
||||||
f.update({
|
f.update({
|
||||||
@ -195,7 +169,7 @@ class ARDMediathekIE(InfoExtractor):
|
|||||||
|
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
|
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
|
||||||
r'<meta name="dcterms.title" content="(.*?)"/>',
|
r'<meta name="dcterms\.title" content="(.*?)"/>',
|
||||||
r'<h4 class="headline">(.*?)</h4>'],
|
r'<h4 class="headline">(.*?)</h4>'],
|
||||||
webpage, 'title')
|
webpage, 'title')
|
||||||
description = self._html_search_meta(
|
description = self._html_search_meta(
|
||||||
@ -251,20 +225,23 @@ class ARDMediathekIE(InfoExtractor):
|
|||||||
|
|
||||||
class ARDIE(InfoExtractor):
|
class ARDIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
|
_VALID_URL = r'(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
|
# available till 14.02.2019
|
||||||
'md5': 'd216c3a86493f9322545e045ddc3eb35',
|
'url': 'http://www.daserste.de/information/talk/maischberger/videos/das-groko-drama-zerlegen-sich-die-volksparteien-video-102.html',
|
||||||
|
'md5': '8e4ec85f31be7c7fc08a26cdbc5a1f49',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'display_id': 'die-story-im-ersten-mission-unter-falscher-flagge',
|
'display_id': 'das-groko-drama-zerlegen-sich-die-volksparteien-video',
|
||||||
'id': '100',
|
'id': '102',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'duration': 2600,
|
'duration': 4435.0,
|
||||||
'title': 'Die Story im Ersten: Mission unter falscher Flagge',
|
'title': 'Das GroKo-Drama: Zerlegen sich die Volksparteien?',
|
||||||
'upload_date': '20140804',
|
'upload_date': '20180214',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
},
|
},
|
||||||
'skip': 'HTTP Error 404: Not Found',
|
}, {
|
||||||
}
|
'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
@ -6,15 +6,18 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
|
compat_str,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
unified_strdate,
|
|
||||||
get_element_by_attribute,
|
get_element_by_attribute,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
NO_DEFAULT,
|
NO_DEFAULT,
|
||||||
qualities,
|
qualities,
|
||||||
|
try_get,
|
||||||
|
unified_strdate,
|
||||||
)
|
)
|
||||||
|
|
||||||
# There are different sources of video in arte.tv, the extraction process
|
# There are different sources of video in arte.tv, the extraction process
|
||||||
@ -79,6 +82,16 @@ class ArteTVBaseIE(InfoExtractor):
|
|||||||
info = self._download_json(json_url, video_id)
|
info = self._download_json(json_url, video_id)
|
||||||
player_info = info['videoJsonPlayer']
|
player_info = info['videoJsonPlayer']
|
||||||
|
|
||||||
|
vsr = try_get(player_info, lambda x: x['VSR'], dict)
|
||||||
|
if not vsr:
|
||||||
|
error = None
|
||||||
|
if try_get(player_info, lambda x: x['custom_msg']['type']) == 'error':
|
||||||
|
error = try_get(
|
||||||
|
player_info, lambda x: x['custom_msg']['msg'], compat_str)
|
||||||
|
if not error:
|
||||||
|
error = 'Video %s is not available' % player_info.get('VID') or video_id
|
||||||
|
raise ExtractorError(error, expected=True)
|
||||||
|
|
||||||
upload_date_str = player_info.get('shootingDate')
|
upload_date_str = player_info.get('shootingDate')
|
||||||
if not upload_date_str:
|
if not upload_date_str:
|
||||||
upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
|
upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
|
||||||
@ -107,7 +120,7 @@ class ArteTVBaseIE(InfoExtractor):
|
|||||||
langcode = LANGS.get(lang, lang)
|
langcode = LANGS.get(lang, lang)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for format_id, format_dict in player_info['VSR'].items():
|
for format_id, format_dict in vsr.items():
|
||||||
f = dict(format_dict)
|
f = dict(format_dict)
|
||||||
versionCode = f.get('versionCode')
|
versionCode = f.get('versionCode')
|
||||||
l = re.escape(langcode)
|
l = re.escape(langcode)
|
||||||
|
@ -87,7 +87,7 @@ class AtresPlayerIE(InfoExtractor):
|
|||||||
self._LOGIN_URL, urlencode_postdata(login_form))
|
self._LOGIN_URL, urlencode_postdata(login_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
response = self._download_webpage(
|
response = self._download_webpage(
|
||||||
request, None, 'Logging in as %s' % username)
|
request, None, 'Logging in')
|
||||||
|
|
||||||
error = self._html_search_regex(
|
error = self._html_search_regex(
|
||||||
r'(?s)<ul[^>]+class="[^"]*\blist_error\b[^"]*">(.+?)</ul>',
|
r'(?s)<ul[^>]+class="[^"]*\blist_error\b[^"]*">(.+?)</ul>',
|
||||||
|
78
youtube_dl/extractor/aws.py
Normal file
78
youtube_dl/extractor/aws.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
|
|
||||||
|
|
||||||
|
class AWSIE(InfoExtractor):
|
||||||
|
_AWS_ALGORITHM = 'AWS4-HMAC-SHA256'
|
||||||
|
_AWS_REGION = 'us-east-1'
|
||||||
|
|
||||||
|
def _aws_execute_api(self, aws_dict, video_id, query=None):
|
||||||
|
query = query or {}
|
||||||
|
amz_date = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
|
||||||
|
date = amz_date[:8]
|
||||||
|
headers = {
|
||||||
|
'Accept': 'application/json',
|
||||||
|
'Host': self._AWS_PROXY_HOST,
|
||||||
|
'X-Amz-Date': amz_date,
|
||||||
|
'X-Api-Key': self._AWS_API_KEY
|
||||||
|
}
|
||||||
|
session_token = aws_dict.get('session_token')
|
||||||
|
if session_token:
|
||||||
|
headers['X-Amz-Security-Token'] = session_token
|
||||||
|
|
||||||
|
def aws_hash(s):
|
||||||
|
return hashlib.sha256(s.encode('utf-8')).hexdigest()
|
||||||
|
|
||||||
|
# Task 1: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
||||||
|
canonical_querystring = compat_urllib_parse_urlencode(query)
|
||||||
|
canonical_headers = ''
|
||||||
|
for header_name, header_value in sorted(headers.items()):
|
||||||
|
canonical_headers += '%s:%s\n' % (header_name.lower(), header_value)
|
||||||
|
signed_headers = ';'.join([header.lower() for header in sorted(headers.keys())])
|
||||||
|
canonical_request = '\n'.join([
|
||||||
|
'GET',
|
||||||
|
aws_dict['uri'],
|
||||||
|
canonical_querystring,
|
||||||
|
canonical_headers,
|
||||||
|
signed_headers,
|
||||||
|
aws_hash('')
|
||||||
|
])
|
||||||
|
|
||||||
|
# Task 2: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
|
||||||
|
credential_scope_list = [date, self._AWS_REGION, 'execute-api', 'aws4_request']
|
||||||
|
credential_scope = '/'.join(credential_scope_list)
|
||||||
|
string_to_sign = '\n'.join([self._AWS_ALGORITHM, amz_date, credential_scope, aws_hash(canonical_request)])
|
||||||
|
|
||||||
|
# Task 3: http://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html
|
||||||
|
def aws_hmac(key, msg):
|
||||||
|
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256)
|
||||||
|
|
||||||
|
def aws_hmac_digest(key, msg):
|
||||||
|
return aws_hmac(key, msg).digest()
|
||||||
|
|
||||||
|
def aws_hmac_hexdigest(key, msg):
|
||||||
|
return aws_hmac(key, msg).hexdigest()
|
||||||
|
|
||||||
|
k_signing = ('AWS4' + aws_dict['secret_key']).encode('utf-8')
|
||||||
|
for value in credential_scope_list:
|
||||||
|
k_signing = aws_hmac_digest(k_signing, value)
|
||||||
|
|
||||||
|
signature = aws_hmac_hexdigest(k_signing, string_to_sign)
|
||||||
|
|
||||||
|
# Task 4: http://docs.aws.amazon.com/general/latest/gr/sigv4-add-signature-to-request.html
|
||||||
|
headers['Authorization'] = ', '.join([
|
||||||
|
'%s Credential=%s/%s' % (self._AWS_ALGORITHM, aws_dict['access_key'], credential_scope),
|
||||||
|
'SignedHeaders=%s' % signed_headers,
|
||||||
|
'Signature=%s' % signature,
|
||||||
|
])
|
||||||
|
|
||||||
|
return self._download_json(
|
||||||
|
'https://%s%s%s' % (self._AWS_PROXY_HOST, aws_dict['uri'], '?' + canonical_querystring if canonical_querystring else ''),
|
||||||
|
video_id, headers=headers)
|
@ -47,7 +47,7 @@ class AZMedienIE(AZMedienBaseIE):
|
|||||||
'url': 'http://www.telezueri.ch/62-show-zuerinews/13772-episode-sonntag-18-dezember-2016/32419-segment-massenabweisungen-beim-hiltl-club-wegen-pelzboom',
|
'url': 'http://www.telezueri.ch/62-show-zuerinews/13772-episode-sonntag-18-dezember-2016/32419-segment-massenabweisungen-beim-hiltl-club-wegen-pelzboom',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1_2444peh4',
|
'id': '1_2444peh4',
|
||||||
'ext': 'mov',
|
'ext': 'mp4',
|
||||||
'title': 'Massenabweisungen beim Hiltl Club wegen Pelzboom',
|
'title': 'Massenabweisungen beim Hiltl Club wegen Pelzboom',
|
||||||
'description': 'md5:9ea9dd1b159ad65b36ddcf7f0d7c76a8',
|
'description': 'md5:9ea9dd1b159ad65b36ddcf7f0d7c76a8',
|
||||||
'uploader_id': 'TeleZ?ri',
|
'uploader_id': 'TeleZ?ri',
|
||||||
|
@ -59,7 +59,7 @@ class BambuserIE(InfoExtractor):
|
|||||||
self._LOGIN_URL, urlencode_postdata(login_form))
|
self._LOGIN_URL, urlencode_postdata(login_form))
|
||||||
request.add_header('Referer', self._LOGIN_URL)
|
request.add_header('Referer', self._LOGIN_URL)
|
||||||
response = self._download_webpage(
|
response = self._download_webpage(
|
||||||
request, None, 'Logging in as %s' % username)
|
request, None, 'Logging in')
|
||||||
|
|
||||||
login_error = self._html_search_regex(
|
login_error = self._html_search_regex(
|
||||||
r'(?s)<div class="messages error">(.+?)</div>',
|
r'(?s)<div class="messages error">(.+?)</div>',
|
||||||
|
@ -242,7 +242,12 @@ class BandcampAlbumIE(InfoExtractor):
|
|||||||
raise ExtractorError('The page doesn\'t contain any tracks')
|
raise ExtractorError('The page doesn\'t contain any tracks')
|
||||||
# Only tracks with duration info have songs
|
# Only tracks with duration info have songs
|
||||||
entries = [
|
entries = [
|
||||||
self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
|
self.url_result(
|
||||||
|
compat_urlparse.urljoin(url, t_path),
|
||||||
|
ie=BandcampIE.ie_key(),
|
||||||
|
video_title=self._search_regex(
|
||||||
|
r'<span\b[^>]+\bitemprop=["\']name["\'][^>]*>([^<]+)',
|
||||||
|
elem_content, 'track title', fatal=False))
|
||||||
for elem_content, t_path in track_elements
|
for elem_content, t_path in track_elements
|
||||||
if self._html_search_meta('duration', elem_content, default=None)]
|
if self._html_search_meta('duration', elem_content, default=None)]
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ from ..compat import (
|
|||||||
class BBCCoUkIE(InfoExtractor):
|
class BBCCoUkIE(InfoExtractor):
|
||||||
IE_NAME = 'bbc.co.uk'
|
IE_NAME = 'bbc.co.uk'
|
||||||
IE_DESC = 'BBC iPlayer'
|
IE_DESC = 'BBC iPlayer'
|
||||||
_ID_REGEX = r'[pb][\da-z]{7}'
|
_ID_REGEX = r'[pbw][\da-z]{7}'
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
https?://
|
https?://
|
||||||
(?:www\.)?bbc\.co\.uk/
|
(?:www\.)?bbc\.co\.uk/
|
||||||
@ -37,7 +37,8 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
programmes/(?!articles/)|
|
programmes/(?!articles/)|
|
||||||
iplayer(?:/[^/]+)?/(?:episode/|playlist/)|
|
iplayer(?:/[^/]+)?/(?:episode/|playlist/)|
|
||||||
music/(?:clips|audiovideo/popular)[/#]|
|
music/(?:clips|audiovideo/popular)[/#]|
|
||||||
radio/player/
|
radio/player/|
|
||||||
|
events/[^/]+/play/[^/]+/
|
||||||
)
|
)
|
||||||
(?P<id>%s)(?!/(?:episodes|broadcasts|clips))
|
(?P<id>%s)(?!/(?:episodes|broadcasts|clips))
|
||||||
''' % _ID_REGEX
|
''' % _ID_REGEX
|
||||||
@ -232,6 +233,9 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'https://www.bbc.co.uk/music/audiovideo/popular#p055bc55',
|
'url': 'https://www.bbc.co.uk/music/audiovideo/popular#p055bc55',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/programmes/w3csv1y9',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_USP_RE = r'/([^/]+?)\.ism(?:\.hlsv2\.ism)?/[^/]+\.m3u8'
|
_USP_RE = r'/([^/]+?)\.ism(?:\.hlsv2\.ism)?/[^/]+\.m3u8'
|
||||||
|
@ -9,6 +9,7 @@ from ..compat import (
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
urljoin,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -36,9 +37,11 @@ class BeegIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
cpl_url = self._search_regex(
|
cpl_url = self._search_regex(
|
||||||
r'<script[^>]+src=(["\'])(?P<url>(?:https?:)?//static\.beeg\.com/cpl/\d+\.js.*?)\1',
|
r'<script[^>]+src=(["\'])(?P<url>(?:/static|(?:https?:)?//static\.beeg\.com)/cpl/\d+\.js.*?)\1',
|
||||||
webpage, 'cpl', default=None, group='url')
|
webpage, 'cpl', default=None, group='url')
|
||||||
|
|
||||||
|
cpl_url = urljoin(url, cpl_url)
|
||||||
|
|
||||||
beeg_version, beeg_salt = [None] * 2
|
beeg_version, beeg_salt = [None] * 2
|
||||||
|
|
||||||
if cpl_url:
|
if cpl_url:
|
||||||
@ -54,12 +57,16 @@ class BeegIE(InfoExtractor):
|
|||||||
r'beeg_salt\s*=\s*(["\'])(?P<beeg_salt>.+?)\1', cpl, 'beeg salt',
|
r'beeg_salt\s*=\s*(["\'])(?P<beeg_salt>.+?)\1', cpl, 'beeg salt',
|
||||||
default=None, group='beeg_salt')
|
default=None, group='beeg_salt')
|
||||||
|
|
||||||
beeg_version = beeg_version or '2000'
|
beeg_version = beeg_version or '2185'
|
||||||
beeg_salt = beeg_salt or 'pmweAkq8lAYKdfWcFCUj0yoVgoPlinamH5UE1CB3H'
|
beeg_salt = beeg_salt or 'pmweAkq8lAYKdfWcFCUj0yoVgoPlinamH5UE1CB3H'
|
||||||
|
|
||||||
video = self._download_json(
|
for api_path in ('', 'api.'):
|
||||||
'https://api.beeg.com/api/v6/%s/video/%s' % (beeg_version, video_id),
|
video = self._download_json(
|
||||||
video_id)
|
'https://%sbeeg.com/api/v6/%s/video/%s'
|
||||||
|
% (api_path, beeg_version, video_id), video_id,
|
||||||
|
fatal=api_path == 'api.')
|
||||||
|
if video:
|
||||||
|
break
|
||||||
|
|
||||||
def split(o, e):
|
def split(o, e):
|
||||||
def cut(s, x):
|
def cut(s, x):
|
||||||
|
@ -1,11 +1,13 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse_unquote
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
|
compat_urllib_parse_unquote,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class BigflixIE(InfoExtractor):
|
class BigflixIE(InfoExtractor):
|
||||||
@ -39,8 +41,8 @@ class BigflixIE(InfoExtractor):
|
|||||||
webpage, 'title')
|
webpage, 'title')
|
||||||
|
|
||||||
def decode_url(quoted_b64_url):
|
def decode_url(quoted_b64_url):
|
||||||
return base64.b64decode(compat_urllib_parse_unquote(
|
return compat_b64decode(compat_urllib_parse_unquote(
|
||||||
quoted_b64_url).encode('ascii')).decode('utf-8')
|
quoted_b64_url)).decode('utf-8')
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for height, encoded_url in re.findall(
|
for height, encoded_url in re.findall(
|
||||||
|
@ -27,14 +27,14 @@ class BiliBiliIE(InfoExtractor):
|
|||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.bilibili.tv/video/av1074402/',
|
'url': 'http://www.bilibili.tv/video/av1074402/',
|
||||||
'md5': '9fa226fe2b8a9a4d5a69b4c6a183417e',
|
'md5': '5f7d29e1a2872f3df0cf76b1f87d3788',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1074402',
|
'id': '1074402',
|
||||||
'ext': 'mp4',
|
'ext': 'flv',
|
||||||
'title': '【金坷垃】金泡沫',
|
'title': '【金坷垃】金泡沫',
|
||||||
'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
|
'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
|
||||||
'duration': 308.315,
|
'duration': 308.067,
|
||||||
'timestamp': 1398012660,
|
'timestamp': 1398012678,
|
||||||
'upload_date': '20140420',
|
'upload_date': '20140420',
|
||||||
'thumbnail': r're:^https?://.+\.jpg',
|
'thumbnail': r're:^https?://.+\.jpg',
|
||||||
'uploader': '菊子桑',
|
'uploader': '菊子桑',
|
||||||
@ -59,17 +59,38 @@ class BiliBiliIE(InfoExtractor):
|
|||||||
'url': 'http://www.bilibili.com/video/av8903802/',
|
'url': 'http://www.bilibili.com/video/av8903802/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '8903802',
|
'id': '8903802',
|
||||||
'ext': 'mp4',
|
|
||||||
'title': '阿滴英文|英文歌分享#6 "Closer',
|
'title': '阿滴英文|英文歌分享#6 "Closer',
|
||||||
'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
|
'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
|
||||||
'uploader': '阿滴英文',
|
|
||||||
'uploader_id': '65880958',
|
|
||||||
'timestamp': 1488382620,
|
|
||||||
'upload_date': '20170301',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True, # Test metadata only
|
|
||||||
},
|
},
|
||||||
|
'playlist': [{
|
||||||
|
'info_dict': {
|
||||||
|
'id': '8903802_part1',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': '阿滴英文|英文歌分享#6 "Closer',
|
||||||
|
'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
|
||||||
|
'uploader': '阿滴英文',
|
||||||
|
'uploader_id': '65880958',
|
||||||
|
'timestamp': 1488382634,
|
||||||
|
'upload_date': '20170301',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True, # Test metadata only
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'info_dict': {
|
||||||
|
'id': '8903802_part2',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': '阿滴英文|英文歌分享#6 "Closer',
|
||||||
|
'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
|
||||||
|
'uploader': '阿滴英文',
|
||||||
|
'uploader_id': '65880958',
|
||||||
|
'timestamp': 1488382634,
|
||||||
|
'upload_date': '20170301',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True, # Test metadata only
|
||||||
|
},
|
||||||
|
}]
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_APP_KEY = '84956560bc028eb7'
|
_APP_KEY = '84956560bc028eb7'
|
||||||
@ -92,8 +113,12 @@ class BiliBiliIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
if 'anime/' not in url:
|
if 'anime/' not in url:
|
||||||
cid = compat_parse_qs(self._search_regex(
|
cid = self._search_regex(
|
||||||
|
r'cid(?:["\']:|=)(\d+)', webpage, 'cid',
|
||||||
|
default=None
|
||||||
|
) or compat_parse_qs(self._search_regex(
|
||||||
[r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
|
[r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
|
||||||
|
r'EmbedPlayer\([^)]+,\s*\\"([^"]+)\\"\)',
|
||||||
r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
|
r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
|
||||||
webpage, 'player parameters'))['cid'][0]
|
webpage, 'player parameters'))['cid'][0]
|
||||||
else:
|
else:
|
||||||
@ -102,6 +127,7 @@ class BiliBiliIE(InfoExtractor):
|
|||||||
video_id, anime_id, compat_urlparse.urljoin(url, '//bangumi.bilibili.com/anime/%s' % anime_id)))
|
video_id, anime_id, compat_urlparse.urljoin(url, '//bangumi.bilibili.com/anime/%s' % anime_id)))
|
||||||
headers = {
|
headers = {
|
||||||
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
|
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
|
||||||
|
'Referer': url
|
||||||
}
|
}
|
||||||
headers.update(self.geo_verification_headers())
|
headers.update(self.geo_verification_headers())
|
||||||
|
|
||||||
@ -113,48 +139,66 @@ class BiliBiliIE(InfoExtractor):
|
|||||||
self._report_error(js)
|
self._report_error(js)
|
||||||
cid = js['result']['cid']
|
cid = js['result']['cid']
|
||||||
|
|
||||||
payload = 'appkey=%s&cid=%s&otype=json&quality=2&type=mp4' % (self._APP_KEY, cid)
|
headers = {
|
||||||
sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
|
'Referer': url
|
||||||
|
}
|
||||||
video_info = self._download_json(
|
headers.update(self.geo_verification_headers())
|
||||||
'http://interface.bilibili.com/playurl?%s&sign=%s' % (payload, sign),
|
|
||||||
video_id, note='Downloading video info page',
|
|
||||||
headers=self.geo_verification_headers())
|
|
||||||
|
|
||||||
if 'durl' not in video_info:
|
|
||||||
self._report_error(video_info)
|
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
|
|
||||||
for idx, durl in enumerate(video_info['durl']):
|
RENDITIONS = ('qn=80&quality=80&type=', 'quality=2&type=mp4')
|
||||||
formats = [{
|
for num, rendition in enumerate(RENDITIONS, start=1):
|
||||||
'url': durl['url'],
|
payload = 'appkey=%s&cid=%s&otype=json&%s' % (self._APP_KEY, cid, rendition)
|
||||||
'filesize': int_or_none(durl['size']),
|
sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
|
||||||
}]
|
|
||||||
for backup_url in durl.get('backup_url', []):
|
video_info = self._download_json(
|
||||||
formats.append({
|
'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign),
|
||||||
'url': backup_url,
|
video_id, note='Downloading video info page',
|
||||||
# backup URLs have lower priorities
|
headers=headers, fatal=num == len(RENDITIONS))
|
||||||
'preference': -2 if 'hd.mp4' in backup_url else -3,
|
|
||||||
|
if not video_info:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if 'durl' not in video_info:
|
||||||
|
if num < len(RENDITIONS):
|
||||||
|
continue
|
||||||
|
self._report_error(video_info)
|
||||||
|
|
||||||
|
for idx, durl in enumerate(video_info['durl']):
|
||||||
|
formats = [{
|
||||||
|
'url': durl['url'],
|
||||||
|
'filesize': int_or_none(durl['size']),
|
||||||
|
}]
|
||||||
|
for backup_url in durl.get('backup_url', []):
|
||||||
|
formats.append({
|
||||||
|
'url': backup_url,
|
||||||
|
# backup URLs have lower priorities
|
||||||
|
'preference': -2 if 'hd.mp4' in backup_url else -3,
|
||||||
|
})
|
||||||
|
|
||||||
|
for a_format in formats:
|
||||||
|
a_format.setdefault('http_headers', {}).update({
|
||||||
|
'Referer': url,
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
entries.append({
|
||||||
|
'id': '%s_part%s' % (video_id, idx),
|
||||||
|
'duration': float_or_none(durl.get('length'), 1000),
|
||||||
|
'formats': formats,
|
||||||
})
|
})
|
||||||
|
break
|
||||||
|
|
||||||
for a_format in formats:
|
title = self._html_search_regex(
|
||||||
a_format.setdefault('http_headers', {}).update({
|
('<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
|
||||||
'Referer': url,
|
'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
|
||||||
})
|
group='title')
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
entries.append({
|
|
||||||
'id': '%s_part%s' % (video_id, idx),
|
|
||||||
'duration': float_or_none(durl.get('length'), 1000),
|
|
||||||
'formats': formats,
|
|
||||||
})
|
|
||||||
|
|
||||||
title = self._html_search_regex('<h1[^>]*>([^<]+)</h1>', webpage, 'title')
|
|
||||||
description = self._html_search_meta('description', webpage)
|
description = self._html_search_meta('description', webpage)
|
||||||
timestamp = unified_timestamp(self._html_search_regex(
|
timestamp = unified_timestamp(self._html_search_regex(
|
||||||
r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time', default=None))
|
r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time',
|
||||||
|
default=None) or self._html_search_meta(
|
||||||
|
'uploadDate', webpage, 'timestamp', default=None))
|
||||||
thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
|
thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
|
||||||
|
|
||||||
# TODO 'view_count' requires deobfuscating Javascript
|
# TODO 'view_count' requires deobfuscating Javascript
|
||||||
@ -168,13 +212,16 @@ class BiliBiliIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
uploader_mobj = re.search(
|
uploader_mobj = re.search(
|
||||||
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]+title="(?P<name>[^"]+)"',
|
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>(?P<name>[^<]+)',
|
||||||
webpage)
|
webpage)
|
||||||
if uploader_mobj:
|
if uploader_mobj:
|
||||||
info.update({
|
info.update({
|
||||||
'uploader': uploader_mobj.group('name'),
|
'uploader': uploader_mobj.group('name'),
|
||||||
'uploader_id': uploader_mobj.group('id'),
|
'uploader_id': uploader_mobj.group('id'),
|
||||||
})
|
})
|
||||||
|
if not info.get('uploader'):
|
||||||
|
info['uploader'] = self._html_search_meta(
|
||||||
|
'author', webpage, 'uploader', default=None)
|
||||||
|
|
||||||
for entry in entries:
|
for entry in entries:
|
||||||
entry.update(info)
|
entry.update(info)
|
||||||
|
@ -33,13 +33,18 @@ class BpbIE(InfoExtractor):
|
|||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'<h2 class="white">(.*?)</h2>', webpage, 'title')
|
r'<h2 class="white">(.*?)</h2>', webpage, 'title')
|
||||||
video_info_dicts = re.findall(
|
video_info_dicts = re.findall(
|
||||||
r"({\s*src:\s*'http://film\.bpb\.de/[^}]+})", webpage)
|
r"({\s*src\s*:\s*'https?://film\.bpb\.de/[^}]+})", webpage)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for video_info in video_info_dicts:
|
for video_info in video_info_dicts:
|
||||||
video_info = self._parse_json(video_info, video_id, transform_source=js_to_json)
|
video_info = self._parse_json(
|
||||||
quality = video_info['quality']
|
video_info, video_id, transform_source=js_to_json, fatal=False)
|
||||||
video_url = video_info['src']
|
if not video_info:
|
||||||
|
continue
|
||||||
|
video_url = video_info.get('src')
|
||||||
|
if not video_url:
|
||||||
|
continue
|
||||||
|
quality = 'high' if '_high' in video_url else 'low'
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'preference': 10 if quality == 'high' else 0,
|
'preference': 10 if quality == 'high' else 0,
|
||||||
|
@ -1,20 +1,23 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
parse_iso8601,
|
||||||
xpath_element,
|
xpath_element,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class BRIE(InfoExtractor):
|
class BRIE(InfoExtractor):
|
||||||
IE_DESC = 'Bayerischer Rundfunk Mediathek'
|
IE_DESC = 'Bayerischer Rundfunk'
|
||||||
_VALID_URL = r'(?P<base_url>https?://(?:www\.)?br(?:-klassik)?\.de)/(?:[a-z0-9\-_]+/)+(?P<id>[a-z0-9\-_]+)\.html'
|
_VALID_URL = r'(?P<base_url>https?://(?:www\.)?br(?:-klassik)?\.de)/(?:[a-z0-9\-_]+/)+(?P<id>[a-z0-9\-_]+)\.html'
|
||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
@ -123,10 +126,10 @@ class BRIE(InfoExtractor):
|
|||||||
for asset in assets.findall('asset'):
|
for asset in assets.findall('asset'):
|
||||||
format_url = xpath_text(asset, ['downloadUrl', 'url'])
|
format_url = xpath_text(asset, ['downloadUrl', 'url'])
|
||||||
asset_type = asset.get('type')
|
asset_type = asset.get('type')
|
||||||
if asset_type == 'HDS':
|
if asset_type.startswith('HDS'):
|
||||||
formats.extend(self._extract_f4m_formats(
|
formats.extend(self._extract_f4m_formats(
|
||||||
format_url + '?hdcore=3.2.0', media_id, f4m_id='hds', fatal=False))
|
format_url + '?hdcore=3.2.0', media_id, f4m_id='hds', fatal=False))
|
||||||
elif asset_type == 'HLS':
|
elif asset_type.startswith('HLS'):
|
||||||
formats.extend(self._extract_m3u8_formats(
|
formats.extend(self._extract_m3u8_formats(
|
||||||
format_url, media_id, 'mp4', 'm3u8_native', m3u8_id='hds', fatal=False))
|
format_url, media_id, 'mp4', 'm3u8_native', m3u8_id='hds', fatal=False))
|
||||||
else:
|
else:
|
||||||
@ -169,3 +172,140 @@ class BRIE(InfoExtractor):
|
|||||||
} for variant in variants.findall('variant') if xpath_text(variant, 'url')]
|
} for variant in variants.findall('variant') if xpath_text(variant, 'url')]
|
||||||
thumbnails.sort(key=lambda x: x['width'] * x['height'], reverse=True)
|
thumbnails.sort(key=lambda x: x['width'] * x['height'], reverse=True)
|
||||||
return thumbnails
|
return thumbnails
|
||||||
|
|
||||||
|
|
||||||
|
class BRMediathekIE(InfoExtractor):
|
||||||
|
IE_DESC = 'Bayerischer Rundfunk Mediathek'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?br\.de/mediathek/video/[^/?&#]*?-(?P<id>av:[0-9a-f]{24})'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.br.de/mediathek/video/gesundheit-die-sendung-vom-28112017-av:5a1e6a6e8fce6d001871cc8e',
|
||||||
|
'md5': 'fdc3d485835966d1622587d08ba632ec',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'av:5a1e6a6e8fce6d001871cc8e',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Die Sendung vom 28.11.2017',
|
||||||
|
'description': 'md5:6000cdca5912ab2277e5b7339f201ccc',
|
||||||
|
'timestamp': 1511942766,
|
||||||
|
'upload_date': '20171129',
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
clip_id = self._match_id(url)
|
||||||
|
|
||||||
|
clip = self._download_json(
|
||||||
|
'https://proxy-base.master.mango.express/graphql',
|
||||||
|
clip_id, data=json.dumps({
|
||||||
|
"query": """{
|
||||||
|
viewer {
|
||||||
|
clip(id: "%s") {
|
||||||
|
title
|
||||||
|
description
|
||||||
|
duration
|
||||||
|
createdAt
|
||||||
|
ageRestriction
|
||||||
|
videoFiles {
|
||||||
|
edges {
|
||||||
|
node {
|
||||||
|
publicLocation
|
||||||
|
fileSize
|
||||||
|
videoProfile {
|
||||||
|
width
|
||||||
|
height
|
||||||
|
bitrate
|
||||||
|
encoding
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
captionFiles {
|
||||||
|
edges {
|
||||||
|
node {
|
||||||
|
publicLocation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
teaserImages {
|
||||||
|
edges {
|
||||||
|
node {
|
||||||
|
imageFiles {
|
||||||
|
edges {
|
||||||
|
node {
|
||||||
|
publicLocation
|
||||||
|
width
|
||||||
|
height
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}""" % clip_id}).encode(), headers={
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
})['data']['viewer']['clip']
|
||||||
|
title = clip['title']
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for edge in clip.get('videoFiles', {}).get('edges', []):
|
||||||
|
node = edge.get('node', {})
|
||||||
|
n_url = node.get('publicLocation')
|
||||||
|
if not n_url:
|
||||||
|
continue
|
||||||
|
ext = determine_ext(n_url)
|
||||||
|
if ext == 'm3u8':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
n_url, clip_id, 'mp4', 'm3u8_native',
|
||||||
|
m3u8_id='hls', fatal=False))
|
||||||
|
else:
|
||||||
|
video_profile = node.get('videoProfile', {})
|
||||||
|
tbr = int_or_none(video_profile.get('bitrate'))
|
||||||
|
format_id = 'http'
|
||||||
|
if tbr:
|
||||||
|
format_id += '-%d' % tbr
|
||||||
|
formats.append({
|
||||||
|
'format_id': format_id,
|
||||||
|
'url': n_url,
|
||||||
|
'width': int_or_none(video_profile.get('width')),
|
||||||
|
'height': int_or_none(video_profile.get('height')),
|
||||||
|
'tbr': tbr,
|
||||||
|
'filesize': int_or_none(node.get('fileSize')),
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
for edge in clip.get('captionFiles', {}).get('edges', []):
|
||||||
|
node = edge.get('node', {})
|
||||||
|
n_url = node.get('publicLocation')
|
||||||
|
if not n_url:
|
||||||
|
continue
|
||||||
|
subtitles.setdefault('de', []).append({
|
||||||
|
'url': n_url,
|
||||||
|
})
|
||||||
|
|
||||||
|
thumbnails = []
|
||||||
|
for edge in clip.get('teaserImages', {}).get('edges', []):
|
||||||
|
for image_edge in edge.get('node', {}).get('imageFiles', {}).get('edges', []):
|
||||||
|
node = image_edge.get('node', {})
|
||||||
|
n_url = node.get('publicLocation')
|
||||||
|
if not n_url:
|
||||||
|
continue
|
||||||
|
thumbnails.append({
|
||||||
|
'url': n_url,
|
||||||
|
'width': int_or_none(node.get('width')),
|
||||||
|
'height': int_or_none(node.get('height')),
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': clip_id,
|
||||||
|
'title': title,
|
||||||
|
'description': clip.get('description'),
|
||||||
|
'duration': int_or_none(clip.get('duration')),
|
||||||
|
'timestamp': parse_iso8601(clip.get('createdAt')),
|
||||||
|
'age_limit': int_or_none(clip.get('ageRestriction')),
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'thumbnails': thumbnails,
|
||||||
|
}
|
||||||
|
@ -3,15 +3,13 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from .youtube import YoutubeIE
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import int_or_none
|
||||||
int_or_none,
|
|
||||||
parse_age_limit,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class BreakIE(InfoExtractor):
|
class BreakIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?P<site>break|screenjunkies)\.com/video/(?P<display_id>[^/]+?)(?:-(?P<id>\d+))?(?:[/?#&]|$)'
|
_VALID_URL = r'https?://(?:www\.)?break\.com/video/(?P<display_id>[^/]+?)(?:-(?P<id>\d+))?(?:[/?#&]|$)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -19,125 +17,73 @@ class BreakIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'When Girls Act Like D-Bags',
|
'title': 'When Girls Act Like D-Bags',
|
||||||
'age_limit': 13,
|
'age_limit': 13,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# youtube embed
|
||||||
|
'url': 'http://www.break.com/video/someone-forgot-boat-brakes-work',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'RrrDLdeL2HQ',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Whale Watching Boat Crashing Into San Diego Dock',
|
||||||
|
'description': 'md5:afc1b2772f0a8468be51dd80eb021069',
|
||||||
|
'upload_date': '20160331',
|
||||||
|
'uploader': 'Steve Holden',
|
||||||
|
'uploader_id': 'sdholden07',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
}
|
}
|
||||||
}, {
|
|
||||||
'url': 'http://www.screenjunkies.com/video/best-quentin-tarantino-movie-2841915',
|
|
||||||
'md5': '5c2b686bec3d43de42bde9ec047536b0',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '2841915',
|
|
||||||
'display_id': 'best-quentin-tarantino-movie',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Best Quentin Tarantino Movie',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg',
|
|
||||||
'duration': 3671,
|
|
||||||
'age_limit': 13,
|
|
||||||
'tags': list,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.screenjunkies.com/video/honest-trailers-the-dark-knight',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '2348808',
|
|
||||||
'display_id': 'honest-trailers-the-dark-knight',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Honest Trailers - The Dark Knight',
|
|
||||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
|
||||||
'age_limit': 10,
|
|
||||||
'tags': list,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
# requires subscription but worked around
|
|
||||||
'url': 'http://www.screenjunkies.com/video/knocking-dead-ep-1-the-show-so-far-3003285',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '3003285',
|
|
||||||
'display_id': 'knocking-dead-ep-1-the-show-so-far',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'State of The Dead Recap: Knocking Dead Pilot',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg',
|
|
||||||
'duration': 3307,
|
|
||||||
'age_limit': 13,
|
|
||||||
'tags': list,
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
|
'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_DEFAULT_BITRATES = (48, 150, 320, 496, 864, 2240, 3264)
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
site, display_id, video_id = re.match(self._VALID_URL, url).groups()
|
display_id, video_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
|
||||||
if not video_id:
|
webpage = self._download_webpage(url, display_id)
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
video_id = self._search_regex(
|
|
||||||
(r'src=["\']/embed/(\d+)', r'data-video-content-id=["\'](\d+)'),
|
|
||||||
webpage, 'video id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(
|
youtube_url = YoutubeIE._extract_url(webpage)
|
||||||
'http://www.%s.com/embed/%s' % (site, video_id),
|
if youtube_url:
|
||||||
display_id, 'Downloading video embed page')
|
return self.url_result(youtube_url, ie=YoutubeIE.ie_key())
|
||||||
embed_vars = self._parse_json(
|
|
||||||
|
content = self._parse_json(
|
||||||
self._search_regex(
|
self._search_regex(
|
||||||
r'(?s)embedVars\s*=\s*({.+?})\s*</script>', webpage, 'embed vars'),
|
r'(?s)content["\']\s*:\s*(\[.+?\])\s*[,\n]', webpage,
|
||||||
|
'content'),
|
||||||
display_id)
|
display_id)
|
||||||
|
|
||||||
youtube_id = embed_vars.get('youtubeId')
|
|
||||||
if youtube_id:
|
|
||||||
return self.url_result(youtube_id, 'Youtube')
|
|
||||||
|
|
||||||
title = embed_vars['contentName']
|
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
bitrates = []
|
for video in content:
|
||||||
for f in embed_vars.get('media', []):
|
video_url = video.get('url')
|
||||||
if not f.get('uri') or f.get('mediaPurpose') != 'play':
|
if not video_url or not isinstance(video_url, compat_str):
|
||||||
continue
|
continue
|
||||||
bitrate = int_or_none(f.get('bitRate'))
|
bitrate = int_or_none(self._search_regex(
|
||||||
if bitrate:
|
r'(\d+)_kbps', video_url, 'tbr', default=None))
|
||||||
bitrates.append(bitrate)
|
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': f['uri'],
|
'url': video_url,
|
||||||
'format_id': 'http-%d' % bitrate if bitrate else 'http',
|
'format_id': 'http-%d' % bitrate if bitrate else 'http',
|
||||||
'width': int_or_none(f.get('width')),
|
|
||||||
'height': int_or_none(f.get('height')),
|
|
||||||
'tbr': bitrate,
|
'tbr': bitrate,
|
||||||
'format': 'mp4',
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if not bitrates:
|
|
||||||
# When subscriptionLevel > 0, i.e. plus subscription is required
|
|
||||||
# media list will be empty. However, hds and hls uris are still
|
|
||||||
# available. We can grab them assuming bitrates to be default.
|
|
||||||
bitrates = self._DEFAULT_BITRATES
|
|
||||||
|
|
||||||
auth_token = embed_vars.get('AuthToken')
|
|
||||||
|
|
||||||
def construct_manifest_url(base_url, ext):
|
|
||||||
pieces = [base_url]
|
|
||||||
pieces.extend([compat_str(b) for b in bitrates])
|
|
||||||
pieces.append('_kbps.mp4.%s?%s' % (ext, auth_token))
|
|
||||||
return ','.join(pieces)
|
|
||||||
|
|
||||||
if bitrates and auth_token:
|
|
||||||
hds_url = embed_vars.get('hdsUri')
|
|
||||||
if hds_url:
|
|
||||||
formats.extend(self._extract_f4m_formats(
|
|
||||||
construct_manifest_url(hds_url, 'f4m'),
|
|
||||||
display_id, f4m_id='hds', fatal=False))
|
|
||||||
hls_url = embed_vars.get('hlsUri')
|
|
||||||
if hls_url:
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
construct_manifest_url(hls_url, 'm3u8'),
|
|
||||||
display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
title = self._search_regex(
|
||||||
|
(r'title["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
|
||||||
|
r'<h1[^>]*>(?P<value>[^<]+)'), webpage, 'title', group='value')
|
||||||
|
|
||||||
|
def get(key, name):
|
||||||
|
return int_or_none(self._search_regex(
|
||||||
|
r'%s["\']\s*:\s*["\'](\d+)' % key, webpage, name,
|
||||||
|
default=None))
|
||||||
|
|
||||||
|
age_limit = get('ratings', 'age limit')
|
||||||
|
video_id = video_id or get('pid', 'video id') or display_id
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'thumbnail': embed_vars.get('thumbUri'),
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
'duration': int_or_none(embed_vars.get('videoLengthInSeconds')) or None,
|
'age_limit': age_limit,
|
||||||
'age_limit': parse_age_limit(embed_vars.get('audienceRating')),
|
|
||||||
'tags': embed_vars.get('tags', '').split(','),
|
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
@ -464,7 +464,7 @@ class BrightcoveNewIE(AdobePassIE):
|
|||||||
'timestamp': 1441391203,
|
'timestamp': 1441391203,
|
||||||
'upload_date': '20150904',
|
'upload_date': '20150904',
|
||||||
'uploader_id': '929656772001',
|
'uploader_id': '929656772001',
|
||||||
'formats': 'mincount:22',
|
'formats': 'mincount:20',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# with rtmp streams
|
# with rtmp streams
|
||||||
@ -478,7 +478,7 @@ class BrightcoveNewIE(AdobePassIE):
|
|||||||
'timestamp': 1433556729,
|
'timestamp': 1433556729,
|
||||||
'upload_date': '20150606',
|
'upload_date': '20150606',
|
||||||
'uploader_id': '4036320279001',
|
'uploader_id': '4036320279001',
|
||||||
'formats': 'mincount:41',
|
'formats': 'mincount:39',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
@ -564,59 +564,7 @@ class BrightcoveNewIE(AdobePassIE):
|
|||||||
|
|
||||||
return entries
|
return entries
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _parse_brightcove_metadata(self, json_data, video_id, headers={}):
|
||||||
url, smuggled_data = unsmuggle_url(url, {})
|
|
||||||
self._initialize_geo_bypass(smuggled_data.get('geo_countries'))
|
|
||||||
|
|
||||||
account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups()
|
|
||||||
|
|
||||||
webpage = self._download_webpage(
|
|
||||||
'http://players.brightcove.net/%s/%s_%s/index.min.js'
|
|
||||||
% (account_id, player_id, embed), video_id)
|
|
||||||
|
|
||||||
policy_key = None
|
|
||||||
|
|
||||||
catalog = self._search_regex(
|
|
||||||
r'catalog\(({.+?})\);', webpage, 'catalog', default=None)
|
|
||||||
if catalog:
|
|
||||||
catalog = self._parse_json(
|
|
||||||
js_to_json(catalog), video_id, fatal=False)
|
|
||||||
if catalog:
|
|
||||||
policy_key = catalog.get('policyKey')
|
|
||||||
|
|
||||||
if not policy_key:
|
|
||||||
policy_key = self._search_regex(
|
|
||||||
r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
|
|
||||||
webpage, 'policy key', group='pk')
|
|
||||||
|
|
||||||
api_url = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' % (account_id, video_id)
|
|
||||||
try:
|
|
||||||
json_data = self._download_json(api_url, video_id, headers={
|
|
||||||
'Accept': 'application/json;pk=%s' % policy_key
|
|
||||||
})
|
|
||||||
except ExtractorError as e:
|
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
|
||||||
json_data = self._parse_json(e.cause.read().decode(), video_id)[0]
|
|
||||||
message = json_data.get('message') or json_data['error_code']
|
|
||||||
if json_data.get('error_subcode') == 'CLIENT_GEO':
|
|
||||||
self.raise_geo_restricted(msg=message)
|
|
||||||
raise ExtractorError(message, expected=True)
|
|
||||||
raise
|
|
||||||
|
|
||||||
errors = json_data.get('errors')
|
|
||||||
if errors and errors[0].get('error_subcode') == 'TVE_AUTH':
|
|
||||||
custom_fields = json_data['custom_fields']
|
|
||||||
tve_token = self._extract_mvpd_auth(
|
|
||||||
smuggled_data['source_url'], video_id,
|
|
||||||
custom_fields['bcadobepassrequestorid'],
|
|
||||||
custom_fields['bcadobepassresourceid'])
|
|
||||||
json_data = self._download_json(
|
|
||||||
api_url, video_id, headers={
|
|
||||||
'Accept': 'application/json;pk=%s' % policy_key
|
|
||||||
}, query={
|
|
||||||
'tveToken': tve_token,
|
|
||||||
})
|
|
||||||
|
|
||||||
title = json_data['name'].strip()
|
title = json_data['name'].strip()
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
@ -682,6 +630,7 @@ class BrightcoveNewIE(AdobePassIE):
|
|||||||
})
|
})
|
||||||
formats.append(f)
|
formats.append(f)
|
||||||
|
|
||||||
|
errors = json_data.get('errors')
|
||||||
if not formats and errors:
|
if not formats and errors:
|
||||||
error = errors[0]
|
error = errors[0]
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
@ -689,6 +638,9 @@ class BrightcoveNewIE(AdobePassIE):
|
|||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
for f in formats:
|
||||||
|
f.setdefault('http_headers', {}).update(headers)
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
for text_track in json_data.get('text_tracks', []):
|
for text_track in json_data.get('text_tracks', []):
|
||||||
if text_track.get('src'):
|
if text_track.get('src'):
|
||||||
@ -708,9 +660,72 @@ class BrightcoveNewIE(AdobePassIE):
|
|||||||
'thumbnail': json_data.get('thumbnail') or json_data.get('poster'),
|
'thumbnail': json_data.get('thumbnail') or json_data.get('poster'),
|
||||||
'duration': duration,
|
'duration': duration,
|
||||||
'timestamp': parse_iso8601(json_data.get('published_at')),
|
'timestamp': parse_iso8601(json_data.get('published_at')),
|
||||||
'uploader_id': account_id,
|
'uploader_id': json_data.get('account_id'),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'tags': json_data.get('tags', []),
|
'tags': json_data.get('tags', []),
|
||||||
'is_live': is_live,
|
'is_live': is_live,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
url, smuggled_data = unsmuggle_url(url, {})
|
||||||
|
self._initialize_geo_bypass(smuggled_data.get('geo_countries'))
|
||||||
|
|
||||||
|
account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
|
||||||
|
webpage = self._download_webpage(
|
||||||
|
'http://players.brightcove.net/%s/%s_%s/index.min.js'
|
||||||
|
% (account_id, player_id, embed), video_id)
|
||||||
|
|
||||||
|
policy_key = None
|
||||||
|
|
||||||
|
catalog = self._search_regex(
|
||||||
|
r'catalog\(({.+?})\);', webpage, 'catalog', default=None)
|
||||||
|
if catalog:
|
||||||
|
catalog = self._parse_json(
|
||||||
|
js_to_json(catalog), video_id, fatal=False)
|
||||||
|
if catalog:
|
||||||
|
policy_key = catalog.get('policyKey')
|
||||||
|
|
||||||
|
if not policy_key:
|
||||||
|
policy_key = self._search_regex(
|
||||||
|
r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
|
||||||
|
webpage, 'policy key', group='pk')
|
||||||
|
|
||||||
|
api_url = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' % (account_id, video_id)
|
||||||
|
headers = {
|
||||||
|
'Accept': 'application/json;pk=%s' % policy_key,
|
||||||
|
}
|
||||||
|
referrer = smuggled_data.get('referrer')
|
||||||
|
if referrer:
|
||||||
|
headers.update({
|
||||||
|
'Referer': referrer,
|
||||||
|
'Origin': re.search(r'https?://[^/]+', referrer).group(0),
|
||||||
|
})
|
||||||
|
try:
|
||||||
|
json_data = self._download_json(api_url, video_id, headers=headers)
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
||||||
|
json_data = self._parse_json(e.cause.read().decode(), video_id)[0]
|
||||||
|
message = json_data.get('message') or json_data['error_code']
|
||||||
|
if json_data.get('error_subcode') == 'CLIENT_GEO':
|
||||||
|
self.raise_geo_restricted(msg=message)
|
||||||
|
raise ExtractorError(message, expected=True)
|
||||||
|
raise
|
||||||
|
|
||||||
|
errors = json_data.get('errors')
|
||||||
|
if errors and errors[0].get('error_subcode') == 'TVE_AUTH':
|
||||||
|
custom_fields = json_data['custom_fields']
|
||||||
|
tve_token = self._extract_mvpd_auth(
|
||||||
|
smuggled_data['source_url'], video_id,
|
||||||
|
custom_fields['bcadobepassrequestorid'],
|
||||||
|
custom_fields['bcadobepassresourceid'])
|
||||||
|
json_data = self._download_json(
|
||||||
|
api_url, video_id, headers={
|
||||||
|
'Accept': 'application/json;pk=%s' % policy_key
|
||||||
|
}, query={
|
||||||
|
'tveToken': tve_token,
|
||||||
|
})
|
||||||
|
|
||||||
|
return self._parse_brightcove_metadata(
|
||||||
|
json_data, video_id, headers=headers)
|
||||||
|
@ -3,20 +3,19 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import ExtractorError
|
|
||||||
|
|
||||||
|
|
||||||
class BYUtvIE(InfoExtractor):
|
class BYUtvIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?byutv\.org/watch/(?!event/)(?P<id>[0-9a-f-]+)(?:/(?P<display_id>[^/?#&]+))?'
|
_VALID_URL = r'https?://(?:www\.)?byutv\.org/(?:watch|player)/(?!event/)(?P<id>[0-9a-f-]+)(?:/(?P<display_id>[^/?#&]+))?'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5',
|
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '6587b9a3-89d2-42a6-a7f7-fd2f81840a7d',
|
'id': 'ZvanRocTpW-G5_yZFeltTAMv6jxOU9KH',
|
||||||
'display_id': 'studio-c-season-5-episode-5',
|
'display_id': 'studio-c-season-5-episode-5',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Season 5 Episode 5',
|
'title': 'Season 5 Episode 5',
|
||||||
'description': 'md5:e07269172baff037f8e8bf9956bc9747',
|
'description': 'md5:1d31dc18ef4f075b28f6a65937d22c65',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*',
|
||||||
'duration': 1486.486,
|
'duration': 1486.486,
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
@ -26,6 +25,9 @@ class BYUtvIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d',
|
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.byutv.org/player/27741493-dc83-40b0-8420-e7ae38a2ae98/byu-football-toledo-vs-byu-93016?listid=4fe0fee5-0d3c-4a29-b725-e4948627f472&listindex=0&q=toledo',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -33,16 +35,16 @@ class BYUtvIE(InfoExtractor):
|
|||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
display_id = mobj.group('display_id') or video_id
|
display_id = mobj.group('display_id') or video_id
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
ep = self._download_json(
|
||||||
episode_code = self._search_regex(
|
'https://api.byutv.org/api3/catalog/getvideosforcontent', video_id,
|
||||||
r'(?s)episode:(.*?\}),\s*\n', webpage, 'episode information')
|
query={
|
||||||
|
'contentid': video_id,
|
||||||
ep = self._parse_json(
|
'channel': 'byutv',
|
||||||
episode_code, display_id, transform_source=lambda s:
|
'x-byutv-context': 'web$US',
|
||||||
re.sub(r'(\n\s+)([a-zA-Z]+):\s+\'(.*?)\'', r'\1"\2": "\3"', s))
|
}, headers={
|
||||||
|
'x-byutv-context': 'web$US',
|
||||||
if ep['providerType'] != 'Ooyala':
|
'x-byutv-platformkey': 'xsaaw9c7y5',
|
||||||
raise ExtractorError('Unsupported provider %s' % ep['provider'])
|
})['ooyalaVOD']
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
@ -50,44 +52,7 @@ class BYUtvIE(InfoExtractor):
|
|||||||
'url': 'ooyala:%s' % ep['providerId'],
|
'url': 'ooyala:%s' % ep['providerId'],
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
'title': ep['title'],
|
'title': ep.get('title'),
|
||||||
'description': ep.get('description'),
|
'description': ep.get('description'),
|
||||||
'thumbnail': ep.get('imageThumbnail'),
|
'thumbnail': ep.get('imageThumbnail'),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class BYUtvEventIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?byutv\.org/watch/event/(?P<id>[0-9a-f-]+)'
|
|
||||||
_TEST = {
|
|
||||||
'url': 'http://www.byutv.org/watch/event/29941b9b-8bf6-48d2-aebf-7a87add9e34b',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '29941b9b-8bf6-48d2-aebf-7a87add9e34b',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Toledo vs. BYU (9/30/16)',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'add_ie': ['Ooyala'],
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
|
|
||||||
ooyala_id = self._search_regex(
|
|
||||||
r'providerId\s*:\s*(["\'])(?P<id>(?:(?!\1).)+)\1',
|
|
||||||
webpage, 'ooyala id', group='id')
|
|
||||||
|
|
||||||
title = self._search_regex(
|
|
||||||
r'class=["\']description["\'][^>]*>\s*<h1>([^<]+)</h1>', webpage,
|
|
||||||
'title').strip()
|
|
||||||
|
|
||||||
return {
|
|
||||||
'_type': 'url_transparent',
|
|
||||||
'ie_key': 'Ooyala',
|
|
||||||
'url': 'ooyala:%s' % ooyala_id,
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
}
|
|
||||||
|
@ -31,6 +31,10 @@ class Canalc2IE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
'http://www.canalc2.tv/video/%s' % video_id, video_id)
|
'http://www.canalc2.tv/video/%s' % video_id, video_id)
|
||||||
|
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'(?s)class="[^"]*col_description[^"]*">.*?<h3>(.+?)</h3>',
|
||||||
|
webpage, 'title')
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for _, video_url in re.findall(r'file\s*=\s*(["\'])(.+?)\1', webpage):
|
for _, video_url in re.findall(r'file\s*=\s*(["\'])(.+?)\1', webpage):
|
||||||
if video_url.startswith('rtmp://'):
|
if video_url.startswith('rtmp://'):
|
||||||
@ -49,17 +53,21 @@ class Canalc2IE(InfoExtractor):
|
|||||||
'url': video_url,
|
'url': video_url,
|
||||||
'format_id': 'http',
|
'format_id': 'http',
|
||||||
})
|
})
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
title = self._html_search_regex(
|
if formats:
|
||||||
r'(?s)class="[^"]*col_description[^"]*">.*?<h3>(.*?)</h3>', webpage, 'title')
|
info = {
|
||||||
duration = parse_duration(self._search_regex(
|
'formats': formats,
|
||||||
r'id=["\']video_duree["\'][^>]*>([^<]+)',
|
}
|
||||||
webpage, 'duration', fatal=False))
|
else:
|
||||||
|
info = self._parse_html5_media_entries(url, webpage, url)[0]
|
||||||
|
|
||||||
return {
|
self._sort_formats(info['formats'])
|
||||||
|
|
||||||
|
info.update({
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'duration': duration,
|
'duration': parse_duration(self._search_regex(
|
||||||
'formats': formats,
|
r'id=["\']video_duree["\'][^>]*>([^<]+)',
|
||||||
}
|
webpage, 'duration', fatal=False)),
|
||||||
|
})
|
||||||
|
return info
|
||||||
|
@ -4,59 +4,36 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse_urlparse
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
dict_get,
|
|
||||||
# ExtractorError,
|
# ExtractorError,
|
||||||
# HEADRequest,
|
# HEADRequest,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
qualities,
|
qualities,
|
||||||
remove_end,
|
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class CanalplusIE(InfoExtractor):
|
class CanalplusIE(InfoExtractor):
|
||||||
IE_DESC = 'canalplus.fr, piwiplus.fr and d8.tv'
|
IE_DESC = 'mycanal.fr and piwiplus.fr'
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'https?://(?:www\.)?(?P<site>mycanal|piwiplus)\.fr/(?:[^/]+/)*(?P<display_id>[^?/]+)(?:\.html\?.*\bvid=|/p/)(?P<id>\d+)'
|
||||||
https?://
|
|
||||||
(?:
|
|
||||||
(?:
|
|
||||||
(?:(?:www|m)\.)?canalplus\.fr|
|
|
||||||
(?:www\.)?piwiplus\.fr|
|
|
||||||
(?:www\.)?d8\.tv|
|
|
||||||
(?:www\.)?c8\.fr|
|
|
||||||
(?:www\.)?d17\.tv|
|
|
||||||
(?:(?:football|www)\.)?cstar\.fr|
|
|
||||||
(?:www\.)?itele\.fr
|
|
||||||
)/(?:(?:[^/]+/)*(?P<display_id>[^/?#&]+))?(?:\?.*\bvid=(?P<vid>\d+))?|
|
|
||||||
player\.canalplus\.fr/#/(?P<id>\d+)
|
|
||||||
)
|
|
||||||
|
|
||||||
'''
|
|
||||||
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s?format=json'
|
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s?format=json'
|
||||||
_SITE_ID_MAP = {
|
_SITE_ID_MAP = {
|
||||||
'canalplus': 'cplus',
|
'mycanal': 'cplus',
|
||||||
'piwiplus': 'teletoon',
|
'piwiplus': 'teletoon',
|
||||||
'd8': 'd8',
|
|
||||||
'c8': 'd8',
|
|
||||||
'd17': 'd17',
|
|
||||||
'cstar': 'd17',
|
|
||||||
'itele': 'itele',
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Only works for direct mp4 URLs
|
# Only works for direct mp4 URLs
|
||||||
_GEO_COUNTRIES = ['FR']
|
_GEO_COUNTRIES = ['FR']
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.canalplus.fr/c-emissions/pid1830-c-zapping.html?vid=1192814',
|
'url': 'https://www.mycanal.fr/d17-emissions/lolywood/p/1397061',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1405510',
|
'id': '1397061',
|
||||||
'display_id': 'pid1830-c-zapping',
|
'display_id': 'lolywood',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Zapping - 02/07/2016',
|
'title': 'Euro 2016 : Je préfère te prévenir - Lolywood - Episode 34',
|
||||||
'description': 'Le meilleur de toutes les chaînes, tous les jours',
|
'description': 'md5:7d97039d455cb29cdba0d652a0efaa5e',
|
||||||
'upload_date': '20160702',
|
'upload_date': '20160602',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# geo restricted, bypassed
|
# geo restricted, bypassed
|
||||||
@ -70,64 +47,12 @@ class CanalplusIE(InfoExtractor):
|
|||||||
'upload_date': '20140724',
|
'upload_date': '20140724',
|
||||||
},
|
},
|
||||||
'expected_warnings': ['HTTP Error 403: Forbidden'],
|
'expected_warnings': ['HTTP Error 403: Forbidden'],
|
||||||
}, {
|
|
||||||
# geo restricted, bypassed
|
|
||||||
'url': 'http://www.c8.fr/c8-divertissement/ms-touche-pas-a-mon-poste/pid6318-videos-integrales.html?vid=1443684',
|
|
||||||
'md5': 'bb6f9f343296ab7ebd88c97b660ecf8d',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '1443684',
|
|
||||||
'display_id': 'pid6318-videos-integrales',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Guess my iep ! - TPMP - 07/04/2017',
|
|
||||||
'description': 'md5:6f005933f6e06760a9236d9b3b5f17fa',
|
|
||||||
'upload_date': '20170407',
|
|
||||||
},
|
|
||||||
'expected_warnings': ['HTTP Error 403: Forbidden'],
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.itele.fr/chroniques/invite-michael-darmon/rachida-dati-nicolas-sarkozy-est-le-plus-en-phase-avec-les-inquietudes-des-francais-171510',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '1420176',
|
|
||||||
'display_id': 'rachida-dati-nicolas-sarkozy-est-le-plus-en-phase-avec-les-inquietudes-des-francais-171510',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'L\'invité de Michaël Darmon du 14/10/2016 - ',
|
|
||||||
'description': 'Chaque matin du lundi au vendredi, Michaël Darmon reçoit un invité politique à 8h25.',
|
|
||||||
'upload_date': '20161014',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://football.cstar.fr/cstar-minisite-foot/pid7566-feminines-videos.html?vid=1416769',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '1416769',
|
|
||||||
'display_id': 'pid7566-feminines-videos',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'France - Albanie : les temps forts de la soirée - 20/09/2016',
|
|
||||||
'description': 'md5:c3f30f2aaac294c1c969b3294de6904e',
|
|
||||||
'upload_date': '20160921',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://m.canalplus.fr/?vid=1398231',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.d17.tv/emissions/pid8303-lolywood.html?vid=1397061',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
site, display_id, video_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
|
||||||
site_id = self._SITE_ID_MAP[compat_urllib_parse_urlparse(url).netloc.rsplit('.', 2)[-2]]
|
site_id = self._SITE_ID_MAP[site]
|
||||||
|
|
||||||
# Beware, some subclasses do not define an id group
|
|
||||||
display_id = remove_end(dict_get(mobj.groupdict(), ('display_id', 'id', 'vid')), '.html')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
video_id = self._search_regex(
|
|
||||||
[r'<canal:player[^>]+?videoId=(["\'])(?P<id>\d+)',
|
|
||||||
r'id=["\']canal_video_player(?P<id>\d+)',
|
|
||||||
r'data-video=["\'](?P<id>\d+)'],
|
|
||||||
webpage, 'video id', default=mobj.group('vid'), group='id')
|
|
||||||
|
|
||||||
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
|
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
|
||||||
video_data = self._download_json(info_url, video_id, 'Downloading video JSON')
|
video_data = self._download_json(info_url, video_id, 'Downloading video JSON')
|
||||||
@ -161,7 +86,7 @@ class CanalplusIE(InfoExtractor):
|
|||||||
format_url + '?hdcore=2.11.3', video_id, f4m_id=format_id, fatal=False))
|
format_url + '?hdcore=2.11.3', video_id, f4m_id=format_id, fatal=False))
|
||||||
else:
|
else:
|
||||||
formats.append({
|
formats.append({
|
||||||
# the secret extracted ya function in http://player.canalplus.fr/common/js/canalPlayer.js
|
# the secret extracted from ya function in http://player.canalplus.fr/common/js/canalPlayer.js
|
||||||
'url': format_url + '?secret=pqzerjlsmdkjfoiuerhsdlfknaes',
|
'url': format_url + '?secret=pqzerjlsmdkjfoiuerhsdlfknaes',
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
'preference': preference(format_id),
|
'preference': preference(format_id),
|
||||||
|
@ -1,26 +1,112 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import float_or_none
|
from .gigya import GigyaBaseIE
|
||||||
|
from ..compat import compat_HTTPError
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
strip_or_none,
|
||||||
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CanvasIE(InfoExtractor):
|
class CanvasIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrtvideo)/assets/(?P<id>[^/?#&]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
|
||||||
|
'md5': '90139b746a0a9bd7bb631283f6e2a64e',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
|
||||||
|
'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Nachtwacht: De Greystook',
|
||||||
|
'description': 'md5:1db3f5dc4c7109c821261e7512975be7',
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'duration': 1468.03,
|
||||||
|
},
|
||||||
|
'expected_warnings': ['is not a supported codec', 'Unknown MIME type'],
|
||||||
|
}, {
|
||||||
|
'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
site_id, video_id = mobj.group('site_id'), mobj.group('id')
|
||||||
|
|
||||||
|
data = self._download_json(
|
||||||
|
'https://mediazone.vrt.be/api/v1/%s/assets/%s'
|
||||||
|
% (site_id, video_id), video_id)
|
||||||
|
|
||||||
|
title = data['title']
|
||||||
|
description = data.get('description')
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for target in data['targetUrls']:
|
||||||
|
format_url, format_type = target.get('url'), target.get('type')
|
||||||
|
if not format_url or not format_type:
|
||||||
|
continue
|
||||||
|
if format_type == 'HLS':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
|
m3u8_id=format_type, fatal=False))
|
||||||
|
elif format_type == 'HDS':
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
format_url, video_id, f4m_id=format_type, fatal=False))
|
||||||
|
elif format_type == 'MPEG_DASH':
|
||||||
|
formats.extend(self._extract_mpd_formats(
|
||||||
|
format_url, video_id, mpd_id=format_type, fatal=False))
|
||||||
|
elif format_type == 'HSS':
|
||||||
|
formats.extend(self._extract_ism_formats(
|
||||||
|
format_url, video_id, ism_id='mss', fatal=False))
|
||||||
|
else:
|
||||||
|
formats.append({
|
||||||
|
'format_id': format_type,
|
||||||
|
'url': format_url,
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
subtitle_urls = data.get('subtitleUrls')
|
||||||
|
if isinstance(subtitle_urls, list):
|
||||||
|
for subtitle in subtitle_urls:
|
||||||
|
subtitle_url = subtitle.get('url')
|
||||||
|
if subtitle_url and subtitle.get('type') == 'CLOSED':
|
||||||
|
subtitles.setdefault('nl', []).append({'url': subtitle_url})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'formats': formats,
|
||||||
|
'duration': float_or_none(data.get('duration'), 1000),
|
||||||
|
'thumbnail': data.get('posterImageUrl'),
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class CanvasEenIE(InfoExtractor):
|
||||||
IE_DESC = 'canvas.be and een.be'
|
IE_DESC = 'canvas.be and een.be'
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?P<site_id>canvas|een)\.be/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?(?P<site_id>canvas|een)\.be/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week',
|
'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week',
|
||||||
'md5': 'ea838375a547ac787d4064d8c7860a6c',
|
'md5': 'ed66976748d12350b118455979cca293',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
|
'id': 'mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
|
||||||
'display_id': 'de-afspraak-veilt-voor-de-warmste-week',
|
'display_id': 'de-afspraak-veilt-voor-de-warmste-week',
|
||||||
'ext': 'mp4',
|
'ext': 'flv',
|
||||||
'title': 'De afspraak veilt voor de Warmste Week',
|
'title': 'De afspraak veilt voor de Warmste Week',
|
||||||
'description': 'md5:24cb860c320dc2be7358e0e5aa317ba6',
|
'description': 'md5:24cb860c320dc2be7358e0e5aa317ba6',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
'duration': 49.02,
|
'duration': 49.02,
|
||||||
}
|
},
|
||||||
|
'expected_warnings': ['is not a supported codec'],
|
||||||
}, {
|
}, {
|
||||||
# with subtitles
|
# with subtitles
|
||||||
'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167',
|
'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167',
|
||||||
@ -40,7 +126,8 @@ class CanvasIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
}
|
},
|
||||||
|
'skip': 'Pagina niet gevonden',
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.een.be/sorry-voor-alles/herbekijk-sorry-voor-alles',
|
'url': 'https://www.een.be/sorry-voor-alles/herbekijk-sorry-voor-alles',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -54,7 +141,8 @@ class CanvasIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
}
|
},
|
||||||
|
'skip': 'Episode no longer available',
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.canvas.be/check-point/najaar-2016/de-politie-uw-vriend',
|
'url': 'https://www.canvas.be/check-point/najaar-2016/de-politie-uw-vriend',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@ -66,55 +154,157 @@ class CanvasIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
title = (self._search_regex(
|
title = strip_or_none(self._search_regex(
|
||||||
r'<h1[^>]+class="video__body__header__title"[^>]*>(.+?)</h1>',
|
r'<h1[^>]+class="video__body__header__title"[^>]*>(.+?)</h1>',
|
||||||
webpage, 'title', default=None) or self._og_search_title(
|
webpage, 'title', default=None) or self._og_search_title(
|
||||||
webpage)).strip()
|
webpage, default=None))
|
||||||
|
|
||||||
video_id = self._html_search_regex(
|
video_id = self._html_search_regex(
|
||||||
r'data-video=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id', group='id')
|
r'data-video=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id',
|
||||||
|
group='id')
|
||||||
data = self._download_json(
|
|
||||||
'https://mediazone.vrt.be/api/v1/%s/assets/%s'
|
|
||||||
% (site_id, video_id), display_id)
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
for target in data['targetUrls']:
|
|
||||||
format_url, format_type = target.get('url'), target.get('type')
|
|
||||||
if not format_url or not format_type:
|
|
||||||
continue
|
|
||||||
if format_type == 'HLS':
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
format_url, display_id, entry_protocol='m3u8_native',
|
|
||||||
ext='mp4', preference=0, fatal=False, m3u8_id=format_type))
|
|
||||||
elif format_type == 'HDS':
|
|
||||||
formats.extend(self._extract_f4m_formats(
|
|
||||||
format_url, display_id, f4m_id=format_type, fatal=False))
|
|
||||||
elif format_type == 'MPEG_DASH':
|
|
||||||
formats.extend(self._extract_mpd_formats(
|
|
||||||
format_url, display_id, mpd_id=format_type, fatal=False))
|
|
||||||
else:
|
|
||||||
formats.append({
|
|
||||||
'format_id': format_type,
|
|
||||||
'url': format_url,
|
|
||||||
})
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
subtitles = {}
|
|
||||||
subtitle_urls = data.get('subtitleUrls')
|
|
||||||
if isinstance(subtitle_urls, list):
|
|
||||||
for subtitle in subtitle_urls:
|
|
||||||
subtitle_url = subtitle.get('url')
|
|
||||||
if subtitle_url and subtitle.get('type') == 'CLOSED':
|
|
||||||
subtitles.setdefault('nl', []).append({'url': subtitle_url})
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (site_id, video_id),
|
||||||
|
'ie_key': CanvasIE.ie_key(),
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
'formats': formats,
|
}
|
||||||
'duration': float_or_none(data.get('duration'), 1000),
|
|
||||||
'thumbnail': data.get('posterImageUrl'),
|
|
||||||
'subtitles': subtitles,
|
class VrtNUIE(GigyaBaseIE):
|
||||||
|
IE_DESC = 'VrtNU.be'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?vrt\.be/(?P<site_id>vrtnu)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.vrt.be/vrtnu/a-z/postbus-x/1/postbus-x-s1a1/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'pbs-pub-2e2d8c27-df26-45c9-9dc6-90c78153044d$vid-90c932b1-e21d-4fb8-99b1-db7b49cf74de',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'De zwarte weduwe',
|
||||||
|
'description': 'md5:d90c21dced7db869a85db89a623998d4',
|
||||||
|
'duration': 1457.04,
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'season': '1',
|
||||||
|
'season_number': 1,
|
||||||
|
'episode_number': 1,
|
||||||
|
},
|
||||||
|
'skip': 'This video is only available for registered users'
|
||||||
|
}]
|
||||||
|
_NETRC_MACHINE = 'vrtnu'
|
||||||
|
_APIKEY = '3_0Z2HujMtiWq_pkAjgnS2Md2E11a1AwZjYiBETtwNE-EoEHDINgtnvcAOpNgmrVGy'
|
||||||
|
_CONTEXT_ID = 'R3595707040'
|
||||||
|
|
||||||
|
def _real_initialize(self):
|
||||||
|
self._login()
|
||||||
|
|
||||||
|
def _login(self):
|
||||||
|
username, password = self._get_login_info()
|
||||||
|
if username is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
auth_data = {
|
||||||
|
'APIKey': self._APIKEY,
|
||||||
|
'targetEnv': 'jssdk',
|
||||||
|
'loginID': username,
|
||||||
|
'password': password,
|
||||||
|
'authMode': 'cookie',
|
||||||
|
}
|
||||||
|
|
||||||
|
auth_info = self._gigya_login(auth_data)
|
||||||
|
|
||||||
|
# Sometimes authentication fails for no good reason, retry
|
||||||
|
login_attempt = 1
|
||||||
|
while login_attempt <= 3:
|
||||||
|
try:
|
||||||
|
# When requesting a token, no actual token is returned, but the
|
||||||
|
# necessary cookies are set.
|
||||||
|
self._request_webpage(
|
||||||
|
'https://token.vrt.be',
|
||||||
|
None, note='Requesting a token', errnote='Could not get a token',
|
||||||
|
headers={
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Referer': 'https://www.vrt.be/vrtnu/',
|
||||||
|
},
|
||||||
|
data=json.dumps({
|
||||||
|
'uid': auth_info['UID'],
|
||||||
|
'uidsig': auth_info['UIDSignature'],
|
||||||
|
'ts': auth_info['signatureTimestamp'],
|
||||||
|
'email': auth_info['profile']['email'],
|
||||||
|
}).encode('utf-8'))
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
||||||
|
login_attempt += 1
|
||||||
|
self.report_warning('Authentication failed')
|
||||||
|
self._sleep(1, None, msg_template='Waiting for %(timeout)s seconds before trying again')
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage, urlh = self._download_webpage_handle(url, display_id)
|
||||||
|
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'(?ms)<h1 class="content__heading">(.+?)</h1>',
|
||||||
|
webpage, 'title').strip()
|
||||||
|
|
||||||
|
description = self._html_search_regex(
|
||||||
|
r'(?ms)<div class="content__description">(.+?)</div>',
|
||||||
|
webpage, 'description', default=None)
|
||||||
|
|
||||||
|
season = self._html_search_regex(
|
||||||
|
[r'''(?xms)<div\ class="tabs__tab\ tabs__tab--active">\s*
|
||||||
|
<span>seizoen\ (.+?)</span>\s*
|
||||||
|
</div>''',
|
||||||
|
r'<option value="seizoen (\d{1,3})" data-href="[^"]+?" selected>'],
|
||||||
|
webpage, 'season', default=None)
|
||||||
|
|
||||||
|
season_number = int_or_none(season)
|
||||||
|
|
||||||
|
episode_number = int_or_none(self._html_search_regex(
|
||||||
|
r'''(?xms)<div\ class="content__episode">\s*
|
||||||
|
<abbr\ title="aflevering">afl</abbr>\s*<span>(\d+)</span>
|
||||||
|
</div>''',
|
||||||
|
webpage, 'episode_number', default=None))
|
||||||
|
|
||||||
|
release_date = parse_iso8601(self._html_search_regex(
|
||||||
|
r'(?ms)<div class="content__broadcastdate">\s*<time\ datetime="(.+?)"',
|
||||||
|
webpage, 'release_date', default=None))
|
||||||
|
|
||||||
|
# If there's a ? or a # in the URL, remove them and everything after
|
||||||
|
clean_url = urlh.geturl().split('?')[0].split('#')[0].strip('/')
|
||||||
|
securevideo_url = clean_url + '.mssecurevideo.json'
|
||||||
|
|
||||||
|
try:
|
||||||
|
video = self._download_json(securevideo_url, display_id)
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
||||||
|
self.raise_login_required()
|
||||||
|
raise
|
||||||
|
|
||||||
|
# We are dealing with a '../<show>.relevant' URL
|
||||||
|
redirect_url = video.get('url')
|
||||||
|
if redirect_url:
|
||||||
|
return self.url_result(self._proto_relative_url(redirect_url, 'https:'))
|
||||||
|
|
||||||
|
# There is only one entry, but with an unknown key, so just get
|
||||||
|
# the first one
|
||||||
|
video_id = list(video.values())[0].get('videoid')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': 'https://mediazone.vrt.be/api/v1/vrtvideo/assets/%s' % video_id,
|
||||||
|
'ie_key': CanvasIE.ie_key(),
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'season': season,
|
||||||
|
'season_number': season_number,
|
||||||
|
'episode_number': episode_number,
|
||||||
|
'release_date': release_date,
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,7 @@ class CartoonNetworkIE(TurnerBaseIE):
|
|||||||
'http://www.cartoonnetwork.com/video-seo-svc/episodeservices/getCvpPlaylist?networkName=CN2&' + query, video_id, {
|
'http://www.cartoonnetwork.com/video-seo-svc/episodeservices/getCvpPlaylist?networkName=CN2&' + query, video_id, {
|
||||||
'secure': {
|
'secure': {
|
||||||
'media_src': 'http://androidhls-secure.cdn.turner.com/toon/big',
|
'media_src': 'http://androidhls-secure.cdn.turner.com/toon/big',
|
||||||
'tokenizer_src': 'http://www.cartoonnetwork.com/cntv/mvpd/processors/services/token_ipadAdobe.do',
|
'tokenizer_src': 'https://token.vgtf.net/token/token_mobile',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': url,
|
'url': url,
|
||||||
|
@ -1,10 +1,14 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import (
|
||||||
|
compat_str,
|
||||||
|
compat_HTTPError,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
js_to_json,
|
js_to_json,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
@ -13,6 +17,7 @@ from ..utils import (
|
|||||||
xpath_element,
|
xpath_element,
|
||||||
xpath_with_ns,
|
xpath_with_ns,
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
|
parse_duration,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
parse_age_limit,
|
parse_age_limit,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
@ -200,33 +205,52 @@ class CBCWatchBaseIE(InfoExtractor):
|
|||||||
'media': 'http://search.yahoo.com/mrss/',
|
'media': 'http://search.yahoo.com/mrss/',
|
||||||
'clearleap': 'http://www.clearleap.com/namespace/clearleap/1.0/',
|
'clearleap': 'http://www.clearleap.com/namespace/clearleap/1.0/',
|
||||||
}
|
}
|
||||||
|
_GEO_COUNTRIES = ['CA']
|
||||||
|
|
||||||
def _call_api(self, path, video_id):
|
def _call_api(self, path, video_id):
|
||||||
url = path if path.startswith('http') else self._API_BASE_URL + path
|
url = path if path.startswith('http') else self._API_BASE_URL + path
|
||||||
result = self._download_xml(url, video_id, headers={
|
for _ in range(2):
|
||||||
'X-Clearleap-DeviceId': self._device_id,
|
try:
|
||||||
'X-Clearleap-DeviceToken': self._device_token,
|
result = self._download_xml(url, video_id, headers={
|
||||||
})
|
'X-Clearleap-DeviceId': self._device_id,
|
||||||
|
'X-Clearleap-DeviceToken': self._device_token,
|
||||||
|
})
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
||||||
|
# Device token has expired, re-acquiring device token
|
||||||
|
self._register_device()
|
||||||
|
continue
|
||||||
|
raise
|
||||||
error_message = xpath_text(result, 'userMessage') or xpath_text(result, 'systemMessage')
|
error_message = xpath_text(result, 'userMessage') or xpath_text(result, 'systemMessage')
|
||||||
if error_message:
|
if error_message:
|
||||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message))
|
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _real_initialize(self):
|
def _real_initialize(self):
|
||||||
if not self._device_id or not self._device_token:
|
if self._valid_device_token():
|
||||||
device = self._downloader.cache.load('cbcwatch', 'device') or {}
|
return
|
||||||
self._device_id, self._device_token = device.get('id'), device.get('token')
|
device = self._downloader.cache.load('cbcwatch', 'device') or {}
|
||||||
if not self._device_id or not self._device_token:
|
self._device_id, self._device_token = device.get('id'), device.get('token')
|
||||||
result = self._download_xml(
|
if self._valid_device_token():
|
||||||
self._API_BASE_URL + 'device/register',
|
return
|
||||||
None, data=b'<device><type>web</type></device>')
|
self._register_device()
|
||||||
self._device_id = xpath_text(result, 'deviceId', fatal=True)
|
|
||||||
self._device_token = xpath_text(result, 'deviceToken', fatal=True)
|
def _valid_device_token(self):
|
||||||
self._downloader.cache.store(
|
return self._device_id and self._device_token
|
||||||
'cbcwatch', 'device', {
|
|
||||||
'id': self._device_id,
|
def _register_device(self):
|
||||||
'token': self._device_token,
|
self._device_id = self._device_token = None
|
||||||
})
|
result = self._download_xml(
|
||||||
|
self._API_BASE_URL + 'device/register',
|
||||||
|
None, 'Acquiring device token',
|
||||||
|
data=b'<device><type>web</type></device>')
|
||||||
|
self._device_id = xpath_text(result, 'deviceId', fatal=True)
|
||||||
|
self._device_token = xpath_text(result, 'deviceToken', fatal=True)
|
||||||
|
self._downloader.cache.store(
|
||||||
|
'cbcwatch', 'device', {
|
||||||
|
'id': self._device_id,
|
||||||
|
'token': self._device_token,
|
||||||
|
})
|
||||||
|
|
||||||
def _parse_rss_feed(self, rss):
|
def _parse_rss_feed(self, rss):
|
||||||
channel = xpath_element(rss, 'channel', fatal=True)
|
channel = xpath_element(rss, 'channel', fatal=True)
|
||||||
@ -287,6 +311,11 @@ class CBCWatchBaseIE(InfoExtractor):
|
|||||||
class CBCWatchVideoIE(CBCWatchBaseIE):
|
class CBCWatchVideoIE(CBCWatchBaseIE):
|
||||||
IE_NAME = 'cbc.ca:watch:video'
|
IE_NAME = 'cbc.ca:watch:video'
|
||||||
_VALID_URL = r'https?://api-cbc\.cloud\.clearleap\.com/cloffice/client/web/play/?\?.*?\bcontentId=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
|
_VALID_URL = r'https?://api-cbc\.cloud\.clearleap\.com/cloffice/client/web/play/?\?.*?\bcontentId=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
|
||||||
|
_TEST = {
|
||||||
|
# geo-restricted to Canada, bypassable
|
||||||
|
'url': 'https://api-cbc.cloud.clearleap.com/cloffice/client/web/play/?contentId=3c84472a-1eea-4dee-9267-2655d5055dcf&categoryId=ebc258f5-ee40-4cca-b66b-ba6bd55b7235',
|
||||||
|
'only_matching': True,
|
||||||
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
@ -323,9 +352,10 @@ class CBCWatchIE(CBCWatchBaseIE):
|
|||||||
IE_NAME = 'cbc.ca:watch'
|
IE_NAME = 'cbc.ca:watch'
|
||||||
_VALID_URL = r'https?://watch\.cbc\.ca/(?:[^/]+/)+(?P<id>[0-9a-f-]+)'
|
_VALID_URL = r'https?://watch\.cbc\.ca/(?:[^/]+/)+(?P<id>[0-9a-f-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
# geo-restricted to Canada, bypassable
|
||||||
'url': 'http://watch.cbc.ca/doc-zone/season-6/customer-disservice/38e815a-009e3ab12e4',
|
'url': 'http://watch.cbc.ca/doc-zone/season-6/customer-disservice/38e815a-009e3ab12e4',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '38e815a-009e3ab12e4',
|
'id': '9673749a-5e77-484c-8b62-a1092a6b5168',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Customer (Dis)Service',
|
'title': 'Customer (Dis)Service',
|
||||||
'description': 'md5:8bdd6913a0fe03d4b2a17ebe169c7c87',
|
'description': 'md5:8bdd6913a0fe03d4b2a17ebe169c7c87',
|
||||||
@ -337,8 +367,8 @@ class CBCWatchIE(CBCWatchBaseIE):
|
|||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
'format': 'bestvideo',
|
'format': 'bestvideo',
|
||||||
},
|
},
|
||||||
'skip': 'Geo-restricted to Canada',
|
|
||||||
}, {
|
}, {
|
||||||
|
# geo-restricted to Canada, bypassable
|
||||||
'url': 'http://watch.cbc.ca/arthur/all/1ed4b385-cd84-49cf-95f0-80f004680057',
|
'url': 'http://watch.cbc.ca/arthur/all/1ed4b385-cd84-49cf-95f0-80f004680057',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1ed4b385-cd84-49cf-95f0-80f004680057',
|
'id': '1ed4b385-cd84-49cf-95f0-80f004680057',
|
||||||
@ -346,10 +376,69 @@ class CBCWatchIE(CBCWatchBaseIE):
|
|||||||
'description': 'Arthur, the sweetest 8-year-old aardvark, and his pals solve all kinds of problems with humour, kindness and teamwork.',
|
'description': 'Arthur, the sweetest 8-year-old aardvark, and his pals solve all kinds of problems with humour, kindness and teamwork.',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 30,
|
'playlist_mincount': 30,
|
||||||
'skip': 'Geo-restricted to Canada',
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
rss = self._call_api('web/browse/' + video_id, video_id)
|
rss = self._call_api('web/browse/' + video_id, video_id)
|
||||||
return self._parse_rss_feed(rss)
|
return self._parse_rss_feed(rss)
|
||||||
|
|
||||||
|
|
||||||
|
class CBCOlympicsIE(InfoExtractor):
|
||||||
|
IE_NAME = 'cbc.ca:olympics'
|
||||||
|
_VALID_URL = r'https?://olympics\.cbc\.ca/video/[^/]+/(?P<id>[^/?#]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://olympics.cbc.ca/video/whats-on-tv/olympic-morning-featuring-the-opening-ceremony/',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
video_id = self._hidden_inputs(webpage)['videoId']
|
||||||
|
video_doc = self._download_xml(
|
||||||
|
'https://olympics.cbc.ca/videodata/%s.xml' % video_id, video_id)
|
||||||
|
title = xpath_text(video_doc, 'title', fatal=True)
|
||||||
|
is_live = xpath_text(video_doc, 'kind') == 'Live'
|
||||||
|
if is_live:
|
||||||
|
title = self._live_title(title)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for video_source in video_doc.findall('videoSources/videoSource'):
|
||||||
|
uri = xpath_text(video_source, 'uri')
|
||||||
|
if not uri:
|
||||||
|
continue
|
||||||
|
tokenize = self._download_json(
|
||||||
|
'https://olympics.cbc.ca/api/api-akamai/tokenize',
|
||||||
|
video_id, data=json.dumps({
|
||||||
|
'VideoSource': uri,
|
||||||
|
}).encode(), headers={
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Referer': url,
|
||||||
|
# d3.VideoPlayer._init in https://olympics.cbc.ca/components/script/base.js
|
||||||
|
'Cookie': '_dvp=TK:C0ObxjerU', # AKAMAI CDN cookie
|
||||||
|
}, fatal=False)
|
||||||
|
if not tokenize:
|
||||||
|
continue
|
||||||
|
content_url = tokenize['ContentUrl']
|
||||||
|
video_source_format = video_source.get('format')
|
||||||
|
if video_source_format == 'IIS':
|
||||||
|
formats.extend(self._extract_ism_formats(
|
||||||
|
content_url, video_id, ism_id=video_source_format, fatal=False))
|
||||||
|
else:
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
content_url, video_id, 'mp4',
|
||||||
|
'm3u8' if is_live else 'm3u8_native',
|
||||||
|
m3u8_id=video_source_format, fatal=False))
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
'title': title,
|
||||||
|
'description': xpath_text(video_doc, 'description'),
|
||||||
|
'thumbnail': xpath_text(video_doc, 'thumbnailUrl'),
|
||||||
|
'duration': parse_duration(xpath_text(video_doc, 'duration')),
|
||||||
|
'formats': formats,
|
||||||
|
'is_live': is_live,
|
||||||
|
}
|
||||||
|
@ -2,6 +2,7 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
from .theplatform import ThePlatformFeedIE
|
from .theplatform import ThePlatformFeedIE
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
xpath_element,
|
xpath_element,
|
||||||
@ -61,9 +62,10 @@ class CBSIE(CBSBaseIE):
|
|||||||
asset_types = []
|
asset_types = []
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
formats = []
|
formats = []
|
||||||
|
last_e = None
|
||||||
for item in items_data.findall('.//item'):
|
for item in items_data.findall('.//item'):
|
||||||
asset_type = xpath_text(item, 'assetType')
|
asset_type = xpath_text(item, 'assetType')
|
||||||
if not asset_type or asset_type in asset_types:
|
if not asset_type or asset_type in asset_types or asset_type in ('HLS_FPS', 'DASH_CENC'):
|
||||||
continue
|
continue
|
||||||
asset_types.append(asset_type)
|
asset_types.append(asset_type)
|
||||||
query = {
|
query = {
|
||||||
@ -74,11 +76,17 @@ class CBSIE(CBSBaseIE):
|
|||||||
query['formats'] = 'MPEG4,M3U'
|
query['formats'] = 'MPEG4,M3U'
|
||||||
elif asset_type in ('RTMP', 'WIFI', '3G'):
|
elif asset_type in ('RTMP', 'WIFI', '3G'):
|
||||||
query['formats'] = 'MPEG4,FLV'
|
query['formats'] = 'MPEG4,FLV'
|
||||||
tp_formats, tp_subtitles = self._extract_theplatform_smil(
|
try:
|
||||||
update_url_query(tp_release_url, query), content_id,
|
tp_formats, tp_subtitles = self._extract_theplatform_smil(
|
||||||
'Downloading %s SMIL data' % asset_type)
|
update_url_query(tp_release_url, query), content_id,
|
||||||
|
'Downloading %s SMIL data' % asset_type)
|
||||||
|
except ExtractorError as e:
|
||||||
|
last_e = e
|
||||||
|
continue
|
||||||
formats.extend(tp_formats)
|
formats.extend(tp_formats)
|
||||||
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
|
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
|
||||||
|
if last_e and not formats:
|
||||||
|
raise last_e
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
info = self._extract_theplatform_metadata(tp_path, content_id)
|
info = self._extract_theplatform_metadata(tp_path, content_id)
|
||||||
|
@ -75,10 +75,10 @@ class CBSInteractiveIE(CBSIE):
|
|||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
data_json = self._html_search_regex(
|
data_json = self._html_search_regex(
|
||||||
r"data-(?:cnet|zdnet)-video(?:-uvp(?:js)?)?-options='([^']+)'",
|
r"data(?:-(?:cnet|zdnet))?-video(?:-(?:uvp(?:js)?|player))?-options='([^']+)'",
|
||||||
webpage, 'data json')
|
webpage, 'data json')
|
||||||
data = self._parse_json(data_json, display_id)
|
data = self._parse_json(data_json, display_id)
|
||||||
vdata = data.get('video') or data['videos'][0]
|
vdata = data.get('video') or (data.get('videos') or data.get('playlist'))[0]
|
||||||
|
|
||||||
video_id = vdata['mpxRefId']
|
video_id = vdata['mpxRefId']
|
||||||
|
|
||||||
|
@ -91,12 +91,10 @@ class CBSLocalIE(AnvatoIE):
|
|||||||
|
|
||||||
info_dict = self._extract_anvato_videos(webpage, display_id)
|
info_dict = self._extract_anvato_videos(webpage, display_id)
|
||||||
|
|
||||||
time_str = self._html_search_regex(
|
timestamp = unified_timestamp(self._html_search_regex(
|
||||||
r'class="entry-date">([^<]+)<', webpage, 'released date', default=None)
|
r'class="(?:entry|post)-date"[^>]*>([^<]+)', webpage,
|
||||||
if time_str:
|
'released date', default=None)) or parse_iso8601(
|
||||||
timestamp = unified_timestamp(time_str)
|
self._html_search_meta('uploadDate', webpage))
|
||||||
else:
|
|
||||||
timestamp = parse_iso8601(self._html_search_meta('uploadDate', webpage))
|
|
||||||
|
|
||||||
info_dict.update({
|
info_dict.update({
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
|
@ -4,28 +4,35 @@ from .cbs import CBSBaseIE
|
|||||||
|
|
||||||
|
|
||||||
class CBSSportsIE(CBSBaseIE):
|
class CBSSportsIE(CBSBaseIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?cbssports\.com/video/player/[^/]+/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/(?:video|news)/(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.cbssports.com/video/player/videos/708337219968/0/ben-simmons-the-next-lebron?-not-so-fast',
|
'url': 'https://www.cbssports.com/nba/video/donovan-mitchell-flashes-star-potential-in-game-2-victory-over-thunder/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '708337219968',
|
'id': '1214315075735',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Ben Simmons the next LeBron? Not so fast',
|
'title': 'Donovan Mitchell flashes star potential in Game 2 victory over Thunder',
|
||||||
'description': 'md5:854294f627921baba1f4b9a990d87197',
|
'description': 'md5:df6f48622612c2d6bd2e295ddef58def',
|
||||||
'timestamp': 1466293740,
|
'timestamp': 1524111457,
|
||||||
'upload_date': '20160618',
|
'upload_date': '20180419',
|
||||||
'uploader': 'CBSI-NEW',
|
'uploader': 'CBSI-NEW',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.cbssports.com/nba/news/nba-playoffs-2018-watch-76ers-vs-heat-game-3-series-schedule-tv-channel-online-stream/',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _extract_video_info(self, filter_query, video_id):
|
def _extract_video_info(self, filter_query, video_id):
|
||||||
return self._extract_feed_info('dJ5BDC', 'VxxJg8Ymh8sE', filter_query, video_id)
|
return self._extract_feed_info('dJ5BDC', 'VxxJg8Ymh8sE', filter_query, video_id)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
video_id = self._search_regex(
|
||||||
|
[r'(?:=|%26)pcid%3D(\d+)', r'embedVideo(?:Container)?_(\d+)'],
|
||||||
|
webpage, 'video id')
|
||||||
return self._extract_video_info('byId=%s' % video_id, video_id)
|
return self._extract_video_info('byId=%s' % video_id, video_id)
|
||||||
|
@ -4,11 +4,13 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
clean_html,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
clean_html,
|
parse_resolution,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -40,34 +42,42 @@ class CCMAIE(InfoExtractor):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
media_type, media_id = re.match(self._VALID_URL, url).groups()
|
media_type, media_id = re.match(self._VALID_URL, url).groups()
|
||||||
media_data = {}
|
|
||||||
formats = []
|
media = self._download_json(
|
||||||
profiles = ['pc'] if media_type == 'audio' else ['mobil', 'pc']
|
'http://dinamics.ccma.cat/pvideo/media.jsp', media_id, query={
|
||||||
for i, profile in enumerate(profiles):
|
|
||||||
md = self._download_json('http://dinamics.ccma.cat/pvideo/media.jsp', media_id, query={
|
|
||||||
'media': media_type,
|
'media': media_type,
|
||||||
'idint': media_id,
|
'idint': media_id,
|
||||||
'profile': profile,
|
})
|
||||||
}, fatal=False)
|
|
||||||
if md:
|
formats = []
|
||||||
media_data = md
|
media_url = media['media']['url']
|
||||||
media_url = media_data.get('media', {}).get('url')
|
if isinstance(media_url, list):
|
||||||
if media_url:
|
for format_ in media_url:
|
||||||
formats.append({
|
format_url = format_.get('file')
|
||||||
'format_id': profile,
|
if not format_url or not isinstance(format_url, compat_str):
|
||||||
'url': media_url,
|
continue
|
||||||
'quality': i,
|
label = format_.get('label')
|
||||||
})
|
f = parse_resolution(label)
|
||||||
|
f.update({
|
||||||
|
'url': format_url,
|
||||||
|
'format_id': label,
|
||||||
|
})
|
||||||
|
formats.append(f)
|
||||||
|
else:
|
||||||
|
formats.append({
|
||||||
|
'url': media_url,
|
||||||
|
'vcodec': 'none' if media_type == 'audio' else None,
|
||||||
|
})
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
informacio = media_data['informacio']
|
informacio = media['informacio']
|
||||||
title = informacio['titol']
|
title = informacio['titol']
|
||||||
durada = informacio.get('durada', {})
|
durada = informacio.get('durada', {})
|
||||||
duration = int_or_none(durada.get('milisegons'), 1000) or parse_duration(durada.get('text'))
|
duration = int_or_none(durada.get('milisegons'), 1000) or parse_duration(durada.get('text'))
|
||||||
timestamp = parse_iso8601(informacio.get('data_emissio', {}).get('utc'))
|
timestamp = parse_iso8601(informacio.get('data_emissio', {}).get('utc'))
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
subtitols = media_data.get('subtitols', {})
|
subtitols = media.get('subtitols', {})
|
||||||
if subtitols:
|
if subtitols:
|
||||||
sub_url = subtitols.get('url')
|
sub_url = subtitols.get('url')
|
||||||
if sub_url:
|
if sub_url:
|
||||||
@ -77,7 +87,7 @@ class CCMAIE(InfoExtractor):
|
|||||||
})
|
})
|
||||||
|
|
||||||
thumbnails = []
|
thumbnails = []
|
||||||
imatges = media_data.get('imatges', {})
|
imatges = media.get('imatges', {})
|
||||||
if imatges:
|
if imatges:
|
||||||
thumbnail_url = imatges.get('url')
|
thumbnail_url = imatges.get('url')
|
||||||
if thumbnail_url:
|
if thumbnail_url:
|
||||||
@ -93,7 +103,7 @@ class CCMAIE(InfoExtractor):
|
|||||||
'description': clean_html(informacio.get('descripcio')),
|
'description': clean_html(informacio.get('descripcio')),
|
||||||
'duration': duration,
|
'duration': duration,
|
||||||
'timestamp': timestamp,
|
'timestamp': timestamp,
|
||||||
'thumnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
2
youtube_dl/extractor/cda.py
Executable file → Normal file
2
youtube_dl/extractor/cda.py
Executable file → Normal file
@ -124,7 +124,7 @@ class CDAIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def extract_format(page, version):
|
def extract_format(page, version):
|
||||||
json_str = self._search_regex(
|
json_str = self._html_search_regex(
|
||||||
r'player_data=(\\?["\'])(?P<player_data>.+?)\1', page,
|
r'player_data=(\\?["\'])(?P<player_data>.+?)\1', page,
|
||||||
'%s player_json' % version, fatal=False, group='player_data')
|
'%s player_json' % version, fatal=False, group='player_data')
|
||||||
if not json_str:
|
if not json_str:
|
||||||
|
@ -13,6 +13,7 @@ from ..utils import (
|
|||||||
float_or_none,
|
float_or_none,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
|
update_url_query,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
USER_AGENTS,
|
USER_AGENTS,
|
||||||
)
|
)
|
||||||
@ -265,6 +266,10 @@ class CeskaTelevizePoradyIE(InfoExtractor):
|
|||||||
# m3u8 download
|
# m3u8 download
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# iframe embed
|
||||||
|
'url': 'http://www.ceskatelevize.cz/porady/10614999031-neviditelni/21251212048/',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -272,8 +277,11 @@ class CeskaTelevizePoradyIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
data_url = unescapeHTML(self._search_regex(
|
data_url = update_url_query(unescapeHTML(self._search_regex(
|
||||||
r'<span[^>]*\bdata-url=(["\'])(?P<url>(?:(?!\1).)+)\1',
|
(r'<span[^>]*\bdata-url=(["\'])(?P<url>(?:(?!\1).)+)\1',
|
||||||
webpage, 'iframe player url', group='url'))
|
r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?ceskatelevize\.cz/ivysilani/embed/iFramePlayer\.php.*?)\1'),
|
||||||
|
webpage, 'iframe player url', group='url')), query={
|
||||||
|
'autoStart': 'true',
|
||||||
|
})
|
||||||
|
|
||||||
return self.url_result(data_url, ie=CeskaTelevizeIE.ie_key())
|
return self.url_result(data_url, ie=CeskaTelevizeIE.ie_key())
|
||||||
|
@ -81,6 +81,12 @@ class Channel9IE(InfoExtractor):
|
|||||||
|
|
||||||
_RSS_URL = 'http://channel9.msdn.com/%s/RSS'
|
_RSS_URL = 'http://channel9.msdn.com/%s/RSS'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return re.findall(
|
||||||
|
r'<iframe[^>]+src=["\'](https?://channel9\.msdn\.com/(?:[^/]+/)+)player\b',
|
||||||
|
webpage)
|
||||||
|
|
||||||
def _extract_list(self, video_id, rss_url=None):
|
def _extract_list(self, video_id, rss_url=None):
|
||||||
if not rss_url:
|
if not rss_url:
|
||||||
rss_url = self._RSS_URL % video_id
|
rss_url = self._RSS_URL % video_id
|
||||||
|
@ -5,7 +5,7 @@ from ..utils import remove_end
|
|||||||
|
|
||||||
|
|
||||||
class CharlieRoseIE(InfoExtractor):
|
class CharlieRoseIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?charlierose\.com/video(?:s|/player)/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?charlierose\.com/(?:video|episode)(?:s|/player)/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://charlierose.com/videos/27996',
|
'url': 'https://charlierose.com/videos/27996',
|
||||||
'md5': 'fda41d49e67d4ce7c2411fd2c4702e09',
|
'md5': 'fda41d49e67d4ce7c2411fd2c4702e09',
|
||||||
@ -24,6 +24,9 @@ class CharlieRoseIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'https://charlierose.com/videos/27996',
|
'url': 'https://charlierose.com/videos/27996',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://charlierose.com/episodes/30887?autoplay=true',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_PLAYER_BASE = 'https://charlierose.com/video/player/%s'
|
_PLAYER_BASE = 'https://charlierose.com/video/player/%s'
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import base64
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from .youtube import YoutubeIE
|
||||||
|
from ..compat import compat_b64decode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
ExtractorError
|
ExtractorError
|
||||||
@ -57,7 +58,7 @@ class ChilloutzoneIE(InfoExtractor):
|
|||||||
|
|
||||||
base64_video_info = self._html_search_regex(
|
base64_video_info = self._html_search_regex(
|
||||||
r'var cozVidData = "(.+?)";', webpage, 'video data')
|
r'var cozVidData = "(.+?)";', webpage, 'video data')
|
||||||
decoded_video_info = base64.b64decode(base64_video_info.encode('utf-8')).decode('utf-8')
|
decoded_video_info = compat_b64decode(base64_video_info).decode('utf-8')
|
||||||
video_info_dict = json.loads(decoded_video_info)
|
video_info_dict = json.loads(decoded_video_info)
|
||||||
|
|
||||||
# get video information from dict
|
# get video information from dict
|
||||||
@ -70,11 +71,9 @@ class ChilloutzoneIE(InfoExtractor):
|
|||||||
|
|
||||||
# If nativePlatform is None a fallback mechanism is used (i.e. youtube embed)
|
# If nativePlatform is None a fallback mechanism is used (i.e. youtube embed)
|
||||||
if native_platform is None:
|
if native_platform is None:
|
||||||
youtube_url = self._html_search_regex(
|
youtube_url = YoutubeIE._extract_url(webpage)
|
||||||
r'<iframe.* src="((?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
|
if youtube_url:
|
||||||
webpage, 'fallback video URL', default=None)
|
return self.url_result(youtube_url, ie=YoutubeIE.ie_key())
|
||||||
if youtube_url is not None:
|
|
||||||
return self.url_result(youtube_url, ie='Youtube')
|
|
||||||
|
|
||||||
# Non Fallback: Decide to use native source (e.g. youtube or vimeo) or
|
# Non Fallback: Decide to use native source (e.g. youtube or vimeo) or
|
||||||
# the own CDN
|
# the own CDN
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_b64decode
|
||||||
from ..utils import parse_duration
|
from ..utils import parse_duration
|
||||||
|
|
||||||
|
|
||||||
@ -44,8 +44,7 @@ class ChirbitIE(InfoExtractor):
|
|||||||
|
|
||||||
# Reverse engineered from https://chirb.it/js/chirbit.player.js (look
|
# Reverse engineered from https://chirb.it/js/chirbit.player.js (look
|
||||||
# for soundURL)
|
# for soundURL)
|
||||||
audio_url = base64.b64decode(
|
audio_url = compat_b64decode(data_fd[::-1]).decode('utf-8')
|
||||||
data_fd[::-1].encode('ascii')).decode('utf-8')
|
|
||||||
|
|
||||||
title = self._search_regex(
|
title = self._search_regex(
|
||||||
r'class=["\']chirbit-title["\'][^>]*>([^<]+)', webpage, 'title')
|
r'class=["\']chirbit-title["\'][^>]*>([^<]+)', webpage, 'title')
|
||||||
|
74
youtube_dl/extractor/clippit.py
Normal file
74
youtube_dl/extractor/clippit.py
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
parse_iso8601,
|
||||||
|
qualities,
|
||||||
|
)
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class ClippitIE(InfoExtractor):
|
||||||
|
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?clippituser\.tv/c/(?P<id>[a-z]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://www.clippituser.tv/c/evmgm',
|
||||||
|
'md5': '963ae7a59a2ec4572ab8bf2f2d2c5f09',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'evmgm',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Bye bye Brutus. #BattleBots - Clippit',
|
||||||
|
'uploader': 'lizllove',
|
||||||
|
'uploader_url': 'https://www.clippituser.tv/p/lizllove',
|
||||||
|
'timestamp': 1472183818,
|
||||||
|
'upload_date': '20160826',
|
||||||
|
'description': 'BattleBots | ABC',
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
title = self._html_search_regex(r'<title.*>(.+?)</title>', webpage, 'title')
|
||||||
|
|
||||||
|
FORMATS = ('sd', 'hd')
|
||||||
|
quality = qualities(FORMATS)
|
||||||
|
formats = []
|
||||||
|
for format_id in FORMATS:
|
||||||
|
url = self._html_search_regex(r'data-%s-file="(.+?)"' % format_id,
|
||||||
|
webpage, 'url', fatal=False)
|
||||||
|
if not url:
|
||||||
|
continue
|
||||||
|
match = re.search(r'/(?P<height>\d+)\.mp4', url)
|
||||||
|
formats.append({
|
||||||
|
'url': url,
|
||||||
|
'format_id': format_id,
|
||||||
|
'quality': quality(format_id),
|
||||||
|
'height': int(match.group('height')) if match else None,
|
||||||
|
})
|
||||||
|
|
||||||
|
uploader = self._html_search_regex(r'class="username".*>\s+(.+?)\n',
|
||||||
|
webpage, 'uploader', fatal=False)
|
||||||
|
uploader_url = ('https://www.clippituser.tv/p/' + uploader
|
||||||
|
if uploader else None)
|
||||||
|
|
||||||
|
timestamp = self._html_search_regex(r'datetime="(.+?)"',
|
||||||
|
webpage, 'date', fatal=False)
|
||||||
|
thumbnail = self._html_search_regex(r'data-image="(.+?)"',
|
||||||
|
webpage, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'uploader': uploader,
|
||||||
|
'uploader_url': uploader_url,
|
||||||
|
'timestamp': parse_iso8601(timestamp),
|
||||||
|
'description': self._og_search_description(webpage),
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
}
|
@ -1,93 +0,0 @@
|
|||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
float_or_none,
|
|
||||||
int_or_none,
|
|
||||||
sanitized_Request,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CollegeRamaIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://collegerama\.tudelft\.nl/Mediasite/Play/(?P<id>[\da-f]+)'
|
|
||||||
_TESTS = [
|
|
||||||
{
|
|
||||||
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/585a43626e544bdd97aeb71a0ec907a01d',
|
|
||||||
'md5': '481fda1c11f67588c0d9d8fbdced4e39',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '585a43626e544bdd97aeb71a0ec907a01d',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Een nieuwe wereld: waarden, bewustzijn en techniek van de mensheid 2.0.',
|
|
||||||
'description': '',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg(?:\?.*?)?$',
|
|
||||||
'duration': 7713.088,
|
|
||||||
'timestamp': 1413309600,
|
|
||||||
'upload_date': '20141014',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/86a9ea9f53e149079fbdb4202b521ed21d?catalog=fd32fd35-6c99-466c-89d4-cd3c431bc8a4',
|
|
||||||
'md5': 'ef1fdded95bdf19b12c5999949419c92',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '86a9ea9f53e149079fbdb4202b521ed21d',
|
|
||||||
'ext': 'wmv',
|
|
||||||
'title': '64ste Vakantiecursus: Afvalwater',
|
|
||||||
'description': 'md5:7fd774865cc69d972f542b157c328305',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg(?:\?.*?)?$',
|
|
||||||
'duration': 10853,
|
|
||||||
'timestamp': 1326446400,
|
|
||||||
'upload_date': '20120113',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
|
|
||||||
player_options_request = {
|
|
||||||
'getPlayerOptionsRequest': {
|
|
||||||
'ResourceId': video_id,
|
|
||||||
'QueryString': '',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
request = sanitized_Request(
|
|
||||||
'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions',
|
|
||||||
json.dumps(player_options_request))
|
|
||||||
request.add_header('Content-Type', 'application/json')
|
|
||||||
|
|
||||||
player_options = self._download_json(request, video_id)
|
|
||||||
|
|
||||||
presentation = player_options['d']['Presentation']
|
|
||||||
title = presentation['Title']
|
|
||||||
description = presentation.get('Description')
|
|
||||||
thumbnail = None
|
|
||||||
duration = float_or_none(presentation.get('Duration'), 1000)
|
|
||||||
timestamp = int_or_none(presentation.get('UnixTime'), 1000)
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
for stream in presentation['Streams']:
|
|
||||||
for video in stream['VideoUrls']:
|
|
||||||
thumbnail_url = stream.get('ThumbnailUrl')
|
|
||||||
if thumbnail_url:
|
|
||||||
thumbnail = 'http://collegerama.tudelft.nl' + thumbnail_url
|
|
||||||
format_id = video['MediaType']
|
|
||||||
if format_id == 'SS':
|
|
||||||
continue
|
|
||||||
formats.append({
|
|
||||||
'url': video['Location'],
|
|
||||||
'format_id': format_id,
|
|
||||||
})
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'description': description,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
'duration': duration,
|
|
||||||
'timestamp': timestamp,
|
|
||||||
'formats': formats,
|
|
||||||
}
|
|
@ -120,13 +120,16 @@ class ComedyCentralTVIE(MTVServicesInfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class ComedyCentralShortnameIE(InfoExtractor):
|
class ComedyCentralShortnameIE(InfoExtractor):
|
||||||
_VALID_URL = r'^:(?P<id>tds|thedailyshow)$'
|
_VALID_URL = r'^:(?P<id>tds|thedailyshow|theopposition)$'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': ':tds',
|
'url': ':tds',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': ':thedailyshow',
|
'url': ':thedailyshow',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': ':theopposition',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -134,5 +137,6 @@ class ComedyCentralShortnameIE(InfoExtractor):
|
|||||||
shortcut_map = {
|
shortcut_map = {
|
||||||
'tds': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes',
|
'tds': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes',
|
||||||
'thedailyshow': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes',
|
'thedailyshow': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes',
|
||||||
|
'theopposition': 'http://www.cc.com/shows/the-opposition-with-jordan-klepper/full-episodes',
|
||||||
}
|
}
|
||||||
return self.url_result(shortcut_map[video_id])
|
return self.url_result(shortcut_map[video_id])
|
||||||
|
@ -27,8 +27,12 @@ from ..compat import (
|
|||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
|
compat_xml_parse_error,
|
||||||
|
)
|
||||||
|
from ..downloader.f4m import (
|
||||||
|
get_base_url,
|
||||||
|
remove_encrypted_media,
|
||||||
)
|
)
|
||||||
from ..downloader.f4m import remove_encrypted_media
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
NO_DEFAULT,
|
NO_DEFAULT,
|
||||||
age_restricted,
|
age_restricted,
|
||||||
@ -170,6 +174,8 @@ class InfoExtractor(object):
|
|||||||
width : height ratio as float.
|
width : height ratio as float.
|
||||||
* no_resume The server does not support resuming the
|
* no_resume The server does not support resuming the
|
||||||
(HTTP or RTMP) download. Boolean.
|
(HTTP or RTMP) download. Boolean.
|
||||||
|
* downloader_options A dictionary of downloader options as
|
||||||
|
described in FileDownloader
|
||||||
|
|
||||||
url: Final video URL.
|
url: Final video URL.
|
||||||
ext: Video filename extension.
|
ext: Video filename extension.
|
||||||
@ -297,8 +303,9 @@ class InfoExtractor(object):
|
|||||||
There must be a key "entries", which is a list, an iterable, or a PagedList
|
There must be a key "entries", which is a list, an iterable, or a PagedList
|
||||||
object, each element of which is a valid dictionary by this specification.
|
object, each element of which is a valid dictionary by this specification.
|
||||||
|
|
||||||
Additionally, playlists can have "title", "description" and "id" attributes
|
Additionally, playlists can have "id", "title", "description", "uploader",
|
||||||
with the same semantics as videos (see above).
|
"uploader_id", "uploader_url" attributes with the same semantics as videos
|
||||||
|
(see above).
|
||||||
|
|
||||||
|
|
||||||
_type "multi_video" indicates that there are multiple videos that
|
_type "multi_video" indicates that there are multiple videos that
|
||||||
@ -490,6 +497,16 @@ class InfoExtractor(object):
|
|||||||
self.to_screen('%s' % (note,))
|
self.to_screen('%s' % (note,))
|
||||||
else:
|
else:
|
||||||
self.to_screen('%s: %s' % (video_id, note))
|
self.to_screen('%s: %s' % (video_id, note))
|
||||||
|
|
||||||
|
# Some sites check X-Forwarded-For HTTP header in order to figure out
|
||||||
|
# the origin of the client behind proxy. This allows bypassing geo
|
||||||
|
# restriction by faking this header's value to IP that belongs to some
|
||||||
|
# geo unrestricted country. We will do so once we encounter any
|
||||||
|
# geo restriction error.
|
||||||
|
if self._x_forwarded_for_ip:
|
||||||
|
if 'X-Forwarded-For' not in headers:
|
||||||
|
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
|
||||||
|
|
||||||
if isinstance(url_or_request, compat_urllib_request.Request):
|
if isinstance(url_or_request, compat_urllib_request.Request):
|
||||||
url_or_request = update_Request(
|
url_or_request = update_Request(
|
||||||
url_or_request, data=data, headers=headers, query=query)
|
url_or_request, data=data, headers=headers, query=query)
|
||||||
@ -519,15 +536,6 @@ class InfoExtractor(object):
|
|||||||
if isinstance(url_or_request, (compat_str, str)):
|
if isinstance(url_or_request, (compat_str, str)):
|
||||||
url_or_request = url_or_request.partition('#')[0]
|
url_or_request = url_or_request.partition('#')[0]
|
||||||
|
|
||||||
# Some sites check X-Forwarded-For HTTP header in order to figure out
|
|
||||||
# the origin of the client behind proxy. This allows bypassing geo
|
|
||||||
# restriction by faking this header's value to IP that belongs to some
|
|
||||||
# geo unrestricted country. We will do so once we encounter any
|
|
||||||
# geo restriction error.
|
|
||||||
if self._x_forwarded_for_ip:
|
|
||||||
if 'X-Forwarded-For' not in headers:
|
|
||||||
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
|
|
||||||
|
|
||||||
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
|
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
|
||||||
if urlh is False:
|
if urlh is False:
|
||||||
assert not fatal
|
assert not fatal
|
||||||
@ -588,19 +596,11 @@ class InfoExtractor(object):
|
|||||||
if not encoding:
|
if not encoding:
|
||||||
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
|
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
|
||||||
if self._downloader.params.get('dump_intermediate_pages', False):
|
if self._downloader.params.get('dump_intermediate_pages', False):
|
||||||
try:
|
self.to_screen('Dumping request to ' + urlh.geturl())
|
||||||
url = url_or_request.get_full_url()
|
|
||||||
except AttributeError:
|
|
||||||
url = url_or_request
|
|
||||||
self.to_screen('Dumping request to ' + url)
|
|
||||||
dump = base64.b64encode(webpage_bytes).decode('ascii')
|
dump = base64.b64encode(webpage_bytes).decode('ascii')
|
||||||
self._downloader.to_screen(dump)
|
self._downloader.to_screen(dump)
|
||||||
if self._downloader.params.get('write_pages', False):
|
if self._downloader.params.get('write_pages', False):
|
||||||
try:
|
basen = '%s_%s' % (video_id, urlh.geturl())
|
||||||
url = url_or_request.get_full_url()
|
|
||||||
except AttributeError:
|
|
||||||
url = url_or_request
|
|
||||||
basen = '%s_%s' % (video_id, url)
|
|
||||||
if len(basen) > 240:
|
if len(basen) > 240:
|
||||||
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
|
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
|
||||||
basen = basen[:240 - len(h)] + h
|
basen = basen[:240 - len(h)] + h
|
||||||
@ -644,30 +644,68 @@ class InfoExtractor(object):
|
|||||||
content, _ = res
|
content, _ = res
|
||||||
return content
|
return content
|
||||||
|
|
||||||
def _download_xml(self, url_or_request, video_id,
|
def _download_xml_handle(
|
||||||
note='Downloading XML', errnote='Unable to download XML',
|
self, url_or_request, video_id, note='Downloading XML',
|
||||||
transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
|
errnote='Unable to download XML', transform_source=None,
|
||||||
"""Return the xml as an xml.etree.ElementTree.Element"""
|
fatal=True, encoding=None, data=None, headers={}, query={}):
|
||||||
xml_string = self._download_webpage(
|
"""Return a tuple (xml as an xml.etree.ElementTree.Element, URL handle)"""
|
||||||
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
|
res = self._download_webpage_handle(
|
||||||
if xml_string is False:
|
|
||||||
return xml_string
|
|
||||||
if transform_source:
|
|
||||||
xml_string = transform_source(xml_string)
|
|
||||||
return compat_etree_fromstring(xml_string.encode('utf-8'))
|
|
||||||
|
|
||||||
def _download_json(self, url_or_request, video_id,
|
|
||||||
note='Downloading JSON metadata',
|
|
||||||
errnote='Unable to download JSON metadata',
|
|
||||||
transform_source=None,
|
|
||||||
fatal=True, encoding=None, data=None, headers={}, query={}):
|
|
||||||
json_string = self._download_webpage(
|
|
||||||
url_or_request, video_id, note, errnote, fatal=fatal,
|
url_or_request, video_id, note, errnote, fatal=fatal,
|
||||||
encoding=encoding, data=data, headers=headers, query=query)
|
encoding=encoding, data=data, headers=headers, query=query)
|
||||||
if (not fatal) and json_string is False:
|
if res is False:
|
||||||
return None
|
return res
|
||||||
|
xml_string, urlh = res
|
||||||
|
return self._parse_xml(
|
||||||
|
xml_string, video_id, transform_source=transform_source,
|
||||||
|
fatal=fatal), urlh
|
||||||
|
|
||||||
|
def _download_xml(self, url_or_request, video_id,
|
||||||
|
note='Downloading XML', errnote='Unable to download XML',
|
||||||
|
transform_source=None, fatal=True, encoding=None,
|
||||||
|
data=None, headers={}, query={}):
|
||||||
|
"""Return the xml as an xml.etree.ElementTree.Element"""
|
||||||
|
res = self._download_xml_handle(
|
||||||
|
url_or_request, video_id, note=note, errnote=errnote,
|
||||||
|
transform_source=transform_source, fatal=fatal, encoding=encoding,
|
||||||
|
data=data, headers=headers, query=query)
|
||||||
|
return res if res is False else res[0]
|
||||||
|
|
||||||
|
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
|
||||||
|
if transform_source:
|
||||||
|
xml_string = transform_source(xml_string)
|
||||||
|
try:
|
||||||
|
return compat_etree_fromstring(xml_string.encode('utf-8'))
|
||||||
|
except compat_xml_parse_error as ve:
|
||||||
|
errmsg = '%s: Failed to parse XML ' % video_id
|
||||||
|
if fatal:
|
||||||
|
raise ExtractorError(errmsg, cause=ve)
|
||||||
|
else:
|
||||||
|
self.report_warning(errmsg + str(ve))
|
||||||
|
|
||||||
|
def _download_json_handle(
|
||||||
|
self, url_or_request, video_id, note='Downloading JSON metadata',
|
||||||
|
errnote='Unable to download JSON metadata', transform_source=None,
|
||||||
|
fatal=True, encoding=None, data=None, headers={}, query={}):
|
||||||
|
"""Return a tuple (JSON object, URL handle)"""
|
||||||
|
res = self._download_webpage_handle(
|
||||||
|
url_or_request, video_id, note, errnote, fatal=fatal,
|
||||||
|
encoding=encoding, data=data, headers=headers, query=query)
|
||||||
|
if res is False:
|
||||||
|
return res
|
||||||
|
json_string, urlh = res
|
||||||
return self._parse_json(
|
return self._parse_json(
|
||||||
json_string, video_id, transform_source=transform_source, fatal=fatal)
|
json_string, video_id, transform_source=transform_source,
|
||||||
|
fatal=fatal), urlh
|
||||||
|
|
||||||
|
def _download_json(
|
||||||
|
self, url_or_request, video_id, note='Downloading JSON metadata',
|
||||||
|
errnote='Unable to download JSON metadata', transform_source=None,
|
||||||
|
fatal=True, encoding=None, data=None, headers={}, query={}):
|
||||||
|
res = self._download_json_handle(
|
||||||
|
url_or_request, video_id, note=note, errnote=errnote,
|
||||||
|
transform_source=transform_source, fatal=fatal, encoding=encoding,
|
||||||
|
data=data, headers=headers, query=query)
|
||||||
|
return res if res is False else res[0]
|
||||||
|
|
||||||
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
|
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
|
||||||
if transform_source:
|
if transform_source:
|
||||||
@ -940,7 +978,8 @@ class InfoExtractor(object):
|
|||||||
|
|
||||||
def _family_friendly_search(self, html):
|
def _family_friendly_search(self, html):
|
||||||
# See http://schema.org/VideoObject
|
# See http://schema.org/VideoObject
|
||||||
family_friendly = self._html_search_meta('isFamilyFriendly', html)
|
family_friendly = self._html_search_meta(
|
||||||
|
'isFamilyFriendly', html, default=None)
|
||||||
|
|
||||||
if not family_friendly:
|
if not family_friendly:
|
||||||
return None
|
return None
|
||||||
@ -981,6 +1020,40 @@ class InfoExtractor(object):
|
|||||||
if isinstance(json_ld, dict):
|
if isinstance(json_ld, dict):
|
||||||
json_ld = [json_ld]
|
json_ld = [json_ld]
|
||||||
|
|
||||||
|
INTERACTION_TYPE_MAP = {
|
||||||
|
'CommentAction': 'comment',
|
||||||
|
'AgreeAction': 'like',
|
||||||
|
'DisagreeAction': 'dislike',
|
||||||
|
'LikeAction': 'like',
|
||||||
|
'DislikeAction': 'dislike',
|
||||||
|
'ListenAction': 'view',
|
||||||
|
'WatchAction': 'view',
|
||||||
|
'ViewAction': 'view',
|
||||||
|
}
|
||||||
|
|
||||||
|
def extract_interaction_statistic(e):
|
||||||
|
interaction_statistic = e.get('interactionStatistic')
|
||||||
|
if not isinstance(interaction_statistic, list):
|
||||||
|
return
|
||||||
|
for is_e in interaction_statistic:
|
||||||
|
if not isinstance(is_e, dict):
|
||||||
|
continue
|
||||||
|
if is_e.get('@type') != 'InteractionCounter':
|
||||||
|
continue
|
||||||
|
interaction_type = is_e.get('interactionType')
|
||||||
|
if not isinstance(interaction_type, compat_str):
|
||||||
|
continue
|
||||||
|
interaction_count = int_or_none(is_e.get('userInteractionCount'))
|
||||||
|
if interaction_count is None:
|
||||||
|
continue
|
||||||
|
count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
|
||||||
|
if not count_kind:
|
||||||
|
continue
|
||||||
|
count_key = '%s_count' % count_kind
|
||||||
|
if info.get(count_key) is not None:
|
||||||
|
continue
|
||||||
|
info[count_key] = interaction_count
|
||||||
|
|
||||||
def extract_video_object(e):
|
def extract_video_object(e):
|
||||||
assert e['@type'] == 'VideoObject'
|
assert e['@type'] == 'VideoObject'
|
||||||
info.update({
|
info.update({
|
||||||
@ -996,9 +1069,10 @@ class InfoExtractor(object):
|
|||||||
'height': int_or_none(e.get('height')),
|
'height': int_or_none(e.get('height')),
|
||||||
'view_count': int_or_none(e.get('interactionCount')),
|
'view_count': int_or_none(e.get('interactionCount')),
|
||||||
})
|
})
|
||||||
|
extract_interaction_statistic(e)
|
||||||
|
|
||||||
for e in json_ld:
|
for e in json_ld:
|
||||||
if e.get('@context') == 'http://schema.org':
|
if isinstance(e.get('@context'), compat_str) and re.match(r'^https?://schema.org/?$', e.get('@context')):
|
||||||
item_type = e.get('@type')
|
item_type = e.get('@type')
|
||||||
if expected_type is not None and expected_type != item_type:
|
if expected_type is not None and expected_type != item_type:
|
||||||
return info
|
return info
|
||||||
@ -1014,7 +1088,7 @@ class InfoExtractor(object):
|
|||||||
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
|
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
|
||||||
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
|
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
|
||||||
info['series'] = unescapeHTML(part_of_series.get('name'))
|
info['series'] = unescapeHTML(part_of_series.get('name'))
|
||||||
elif item_type == 'Article':
|
elif item_type in ('Article', 'NewsArticle'):
|
||||||
info.update({
|
info.update({
|
||||||
'timestamp': parse_iso8601(e.get('datePublished')),
|
'timestamp': parse_iso8601(e.get('datePublished')),
|
||||||
'title': unescapeHTML(e.get('headline')),
|
'title': unescapeHTML(e.get('headline')),
|
||||||
@ -1223,11 +1297,8 @@ class InfoExtractor(object):
|
|||||||
media_nodes = remove_encrypted_media(media_nodes)
|
media_nodes = remove_encrypted_media(media_nodes)
|
||||||
if not media_nodes:
|
if not media_nodes:
|
||||||
return formats
|
return formats
|
||||||
base_url = xpath_text(
|
|
||||||
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
|
manifest_base_url = get_base_url(manifest)
|
||||||
'base URL', default=None)
|
|
||||||
if base_url:
|
|
||||||
base_url = base_url.strip()
|
|
||||||
|
|
||||||
bootstrap_info = xpath_element(
|
bootstrap_info = xpath_element(
|
||||||
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
|
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
|
||||||
@ -1259,7 +1330,7 @@ class InfoExtractor(object):
|
|||||||
continue
|
continue
|
||||||
manifest_url = (
|
manifest_url = (
|
||||||
media_url if media_url.startswith('http://') or media_url.startswith('https://')
|
media_url if media_url.startswith('http://') or media_url.startswith('https://')
|
||||||
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
|
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
|
||||||
# If media_url is itself a f4m manifest do the recursive extraction
|
# If media_url is itself a f4m manifest do the recursive extraction
|
||||||
# since bitrates in parent manifest (this one) and media_url manifest
|
# since bitrates in parent manifest (this one) and media_url manifest
|
||||||
# may differ leading to inability to resolve the format by requested
|
# may differ leading to inability to resolve the format by requested
|
||||||
@ -1294,6 +1365,7 @@ class InfoExtractor(object):
|
|||||||
'url': manifest_url,
|
'url': manifest_url,
|
||||||
'manifest_url': manifest_url,
|
'manifest_url': manifest_url,
|
||||||
'ext': 'flv' if bootstrap_info is not None else None,
|
'ext': 'flv' if bootstrap_info is not None else None,
|
||||||
|
'protocol': 'f4m',
|
||||||
'tbr': tbr,
|
'tbr': tbr,
|
||||||
'width': width,
|
'width': width,
|
||||||
'height': height,
|
'height': height,
|
||||||
@ -1339,6 +1411,9 @@ class InfoExtractor(object):
|
|||||||
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
|
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay
|
||||||
|
return []
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
|
||||||
format_url = lambda u: (
|
format_url = lambda u: (
|
||||||
@ -1385,7 +1460,7 @@ class InfoExtractor(object):
|
|||||||
media_url = media.get('URI')
|
media_url = media.get('URI')
|
||||||
if media_url:
|
if media_url:
|
||||||
format_id = []
|
format_id = []
|
||||||
for v in (group_id, name):
|
for v in (m3u8_id, group_id, name):
|
||||||
if v:
|
if v:
|
||||||
format_id.append(v)
|
format_id.append(v)
|
||||||
f = {
|
f = {
|
||||||
@ -1678,22 +1753,24 @@ class InfoExtractor(object):
|
|||||||
})
|
})
|
||||||
return subtitles
|
return subtitles
|
||||||
|
|
||||||
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
|
def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
|
||||||
xspf = self._download_xml(
|
xspf = self._download_xml(
|
||||||
playlist_url, playlist_id, 'Downloading xpsf playlist',
|
xspf_url, playlist_id, 'Downloading xpsf playlist',
|
||||||
'Unable to download xspf manifest', fatal=fatal)
|
'Unable to download xspf manifest', fatal=fatal)
|
||||||
if xspf is False:
|
if xspf is False:
|
||||||
return []
|
return []
|
||||||
return self._parse_xspf(xspf, playlist_id)
|
return self._parse_xspf(
|
||||||
|
xspf, playlist_id, xspf_url=xspf_url,
|
||||||
|
xspf_base_url=base_url(xspf_url))
|
||||||
|
|
||||||
def _parse_xspf(self, playlist, playlist_id):
|
def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
|
||||||
NS_MAP = {
|
NS_MAP = {
|
||||||
'xspf': 'http://xspf.org/ns/0/',
|
'xspf': 'http://xspf.org/ns/0/',
|
||||||
's1': 'http://static.streamone.nl/player/ns/0',
|
's1': 'http://static.streamone.nl/player/ns/0',
|
||||||
}
|
}
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
|
for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
|
||||||
title = xpath_text(
|
title = xpath_text(
|
||||||
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
|
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
|
||||||
description = xpath_text(
|
description = xpath_text(
|
||||||
@ -1703,12 +1780,18 @@ class InfoExtractor(object):
|
|||||||
duration = float_or_none(
|
duration = float_or_none(
|
||||||
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
|
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
|
||||||
|
|
||||||
formats = [{
|
formats = []
|
||||||
'url': location.text,
|
for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
|
||||||
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
|
format_url = urljoin(xspf_base_url, location.text)
|
||||||
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
|
if not format_url:
|
||||||
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
|
continue
|
||||||
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
|
formats.append({
|
||||||
|
'url': format_url,
|
||||||
|
'manifest_url': xspf_url,
|
||||||
|
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
|
||||||
|
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
|
||||||
|
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
|
||||||
|
})
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
entries.append({
|
entries.append({
|
||||||
@ -1722,18 +1805,18 @@ class InfoExtractor(object):
|
|||||||
return entries
|
return entries
|
||||||
|
|
||||||
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
|
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
|
||||||
res = self._download_webpage_handle(
|
res = self._download_xml_handle(
|
||||||
mpd_url, video_id,
|
mpd_url, video_id,
|
||||||
note=note or 'Downloading MPD manifest',
|
note=note or 'Downloading MPD manifest',
|
||||||
errnote=errnote or 'Failed to download MPD manifest',
|
errnote=errnote or 'Failed to download MPD manifest',
|
||||||
fatal=fatal)
|
fatal=fatal)
|
||||||
if res is False:
|
if res is False:
|
||||||
return []
|
return []
|
||||||
mpd, urlh = res
|
mpd_doc, urlh = res
|
||||||
mpd_base_url = base_url(urlh.geturl())
|
mpd_base_url = base_url(urlh.geturl())
|
||||||
|
|
||||||
return self._parse_mpd_formats(
|
return self._parse_mpd_formats(
|
||||||
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url,
|
mpd_doc, mpd_id=mpd_id, mpd_base_url=mpd_base_url,
|
||||||
formats_dict=formats_dict, mpd_url=mpd_url)
|
formats_dict=formats_dict, mpd_url=mpd_url)
|
||||||
|
|
||||||
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
|
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
|
||||||
@ -1785,7 +1868,7 @@ class InfoExtractor(object):
|
|||||||
ms_info['timescale'] = int(timescale)
|
ms_info['timescale'] = int(timescale)
|
||||||
segment_duration = source.get('duration')
|
segment_duration = source.get('duration')
|
||||||
if segment_duration:
|
if segment_duration:
|
||||||
ms_info['segment_duration'] = int(segment_duration)
|
ms_info['segment_duration'] = float(segment_duration)
|
||||||
|
|
||||||
def extract_Initialization(source):
|
def extract_Initialization(source):
|
||||||
initialization = source.find(_add_ns('Initialization'))
|
initialization = source.find(_add_ns('Initialization'))
|
||||||
@ -1866,6 +1949,7 @@ class InfoExtractor(object):
|
|||||||
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
|
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
|
||||||
'format_note': 'DASH %s' % content_type,
|
'format_note': 'DASH %s' % content_type,
|
||||||
'filesize': filesize,
|
'filesize': filesize,
|
||||||
|
'container': mimetype2ext(mime_type) + '_dash',
|
||||||
}
|
}
|
||||||
f.update(parse_codecs(representation_attrib.get('codecs')))
|
f.update(parse_codecs(representation_attrib.get('codecs')))
|
||||||
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
|
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
|
||||||
@ -1904,7 +1988,7 @@ class InfoExtractor(object):
|
|||||||
# can't be used at the same time
|
# can't be used at the same time
|
||||||
if '%(Number' in media_template and 's' not in representation_ms_info:
|
if '%(Number' in media_template and 's' not in representation_ms_info:
|
||||||
segment_duration = None
|
segment_duration = None
|
||||||
if 'total_number' not in representation_ms_info and 'segment_duration':
|
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
|
||||||
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
|
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
|
||||||
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
|
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
|
||||||
representation_ms_info['fragments'] = [{
|
representation_ms_info['fragments'] = [{
|
||||||
@ -1963,6 +2047,22 @@ class InfoExtractor(object):
|
|||||||
})
|
})
|
||||||
segment_index += 1
|
segment_index += 1
|
||||||
representation_ms_info['fragments'] = fragments
|
representation_ms_info['fragments'] = fragments
|
||||||
|
elif 'segment_urls' in representation_ms_info:
|
||||||
|
# Segment URLs with no SegmentTimeline
|
||||||
|
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
|
||||||
|
# https://github.com/rg3/youtube-dl/pull/14844
|
||||||
|
fragments = []
|
||||||
|
segment_duration = float_or_none(
|
||||||
|
representation_ms_info['segment_duration'],
|
||||||
|
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
|
||||||
|
for segment_url in representation_ms_info['segment_urls']:
|
||||||
|
fragment = {
|
||||||
|
location_key(segment_url): segment_url,
|
||||||
|
}
|
||||||
|
if segment_duration:
|
||||||
|
fragment['duration'] = segment_duration
|
||||||
|
fragments.append(fragment)
|
||||||
|
representation_ms_info['fragments'] = fragments
|
||||||
# NB: MPD manifest may contain direct URLs to unfragmented media.
|
# NB: MPD manifest may contain direct URLs to unfragmented media.
|
||||||
# No fragments key is present in this case.
|
# No fragments key is present in this case.
|
||||||
if 'fragments' in representation_ms_info:
|
if 'fragments' in representation_ms_info:
|
||||||
@ -1977,32 +2077,29 @@ class InfoExtractor(object):
|
|||||||
f['url'] = initialization_url
|
f['url'] = initialization_url
|
||||||
f['fragments'].append({location_key(initialization_url): initialization_url})
|
f['fragments'].append({location_key(initialization_url): initialization_url})
|
||||||
f['fragments'].extend(representation_ms_info['fragments'])
|
f['fragments'].extend(representation_ms_info['fragments'])
|
||||||
try:
|
# According to [1, 5.3.5.2, Table 7, page 35] @id of Representation
|
||||||
existing_format = next(
|
# is not necessarily unique within a Period thus formats with
|
||||||
fo for fo in formats
|
# the same `format_id` are quite possible. There are numerous examples
|
||||||
if fo['format_id'] == representation_id)
|
# of such manifests (see https://github.com/rg3/youtube-dl/issues/15111,
|
||||||
except StopIteration:
|
# https://github.com/rg3/youtube-dl/issues/13919)
|
||||||
full_info = formats_dict.get(representation_id, {}).copy()
|
full_info = formats_dict.get(representation_id, {}).copy()
|
||||||
full_info.update(f)
|
full_info.update(f)
|
||||||
formats.append(full_info)
|
formats.append(full_info)
|
||||||
else:
|
|
||||||
existing_format.update(f)
|
|
||||||
else:
|
else:
|
||||||
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
|
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
|
||||||
return formats
|
return formats
|
||||||
|
|
||||||
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
|
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
|
||||||
res = self._download_webpage_handle(
|
res = self._download_xml_handle(
|
||||||
ism_url, video_id,
|
ism_url, video_id,
|
||||||
note=note or 'Downloading ISM manifest',
|
note=note or 'Downloading ISM manifest',
|
||||||
errnote=errnote or 'Failed to download ISM manifest',
|
errnote=errnote or 'Failed to download ISM manifest',
|
||||||
fatal=fatal)
|
fatal=fatal)
|
||||||
if res is False:
|
if res is False:
|
||||||
return []
|
return []
|
||||||
ism, urlh = res
|
ism_doc, urlh = res
|
||||||
|
|
||||||
return self._parse_ism_formats(
|
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
|
||||||
compat_etree_fromstring(ism.encode('utf-8')), urlh.geturl(), ism_id)
|
|
||||||
|
|
||||||
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
|
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
|
||||||
"""
|
"""
|
||||||
@ -2026,7 +2123,7 @@ class InfoExtractor(object):
|
|||||||
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
|
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
|
||||||
stream_name = stream.get('Name')
|
stream_name = stream.get('Name')
|
||||||
for track in stream.findall('QualityLevel'):
|
for track in stream.findall('QualityLevel'):
|
||||||
fourcc = track.get('FourCC')
|
fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None)
|
||||||
# TODO: add support for WVC1 and WMAP
|
# TODO: add support for WVC1 and WMAP
|
||||||
if fourcc not in ('H264', 'AVC1', 'AACL'):
|
if fourcc not in ('H264', 'AVC1', 'AACL'):
|
||||||
self.report_warning('%s is not a supported codec' % fourcc)
|
self.report_warning('%s is not a supported codec' % fourcc)
|
||||||
@ -2100,8 +2197,8 @@ class InfoExtractor(object):
|
|||||||
return formats
|
return formats
|
||||||
|
|
||||||
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
|
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
|
||||||
def absolute_url(video_url):
|
def absolute_url(item_url):
|
||||||
return compat_urlparse.urljoin(base_url, video_url)
|
return urljoin(base_url, item_url)
|
||||||
|
|
||||||
def parse_content_type(content_type):
|
def parse_content_type(content_type):
|
||||||
if not content_type:
|
if not content_type:
|
||||||
@ -2114,19 +2211,19 @@ class InfoExtractor(object):
|
|||||||
return f
|
return f
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
def _media_formats(src, cur_media_type):
|
def _media_formats(src, cur_media_type, type_info={}):
|
||||||
full_url = absolute_url(src)
|
full_url = absolute_url(src)
|
||||||
ext = determine_ext(full_url)
|
ext = type_info.get('ext') or determine_ext(full_url)
|
||||||
if ext == 'm3u8':
|
if ext == 'm3u8':
|
||||||
is_plain_url = False
|
is_plain_url = False
|
||||||
formats = self._extract_m3u8_formats(
|
formats = self._extract_m3u8_formats(
|
||||||
full_url, video_id, ext='mp4',
|
full_url, video_id, ext='mp4',
|
||||||
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
|
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
|
||||||
preference=preference)
|
preference=preference, fatal=False)
|
||||||
elif ext == 'mpd':
|
elif ext == 'mpd':
|
||||||
is_plain_url = False
|
is_plain_url = False
|
||||||
formats = self._extract_mpd_formats(
|
formats = self._extract_mpd_formats(
|
||||||
full_url, video_id, mpd_id=mpd_id)
|
full_url, video_id, mpd_id=mpd_id, fatal=False)
|
||||||
else:
|
else:
|
||||||
is_plain_url = True
|
is_plain_url = True
|
||||||
formats = [{
|
formats = [{
|
||||||
@ -2158,16 +2255,22 @@ class InfoExtractor(object):
|
|||||||
if src:
|
if src:
|
||||||
_, formats = _media_formats(src, media_type)
|
_, formats = _media_formats(src, media_type)
|
||||||
media_info['formats'].extend(formats)
|
media_info['formats'].extend(formats)
|
||||||
media_info['thumbnail'] = media_attributes.get('poster')
|
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
|
||||||
if media_content:
|
if media_content:
|
||||||
for source_tag in re.findall(r'<source[^>]+>', media_content):
|
for source_tag in re.findall(r'<source[^>]+>', media_content):
|
||||||
source_attributes = extract_attributes(source_tag)
|
source_attributes = extract_attributes(source_tag)
|
||||||
src = source_attributes.get('src')
|
src = source_attributes.get('src')
|
||||||
if not src:
|
if not src:
|
||||||
continue
|
continue
|
||||||
is_plain_url, formats = _media_formats(src, media_type)
|
f = parse_content_type(source_attributes.get('type'))
|
||||||
|
is_plain_url, formats = _media_formats(src, media_type, f)
|
||||||
if is_plain_url:
|
if is_plain_url:
|
||||||
f = parse_content_type(source_attributes.get('type'))
|
# res attribute is not standard but seen several times
|
||||||
|
# in the wild
|
||||||
|
f.update({
|
||||||
|
'height': int_or_none(source_attributes.get('res')),
|
||||||
|
'format_id': source_attributes.get('label'),
|
||||||
|
})
|
||||||
f.update(formats[0])
|
f.update(formats[0])
|
||||||
media_info['formats'].append(f)
|
media_info['formats'].append(f)
|
||||||
else:
|
else:
|
||||||
@ -2211,27 +2314,36 @@ class InfoExtractor(object):
|
|||||||
return formats
|
return formats
|
||||||
|
|
||||||
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
|
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
|
||||||
|
query = compat_urlparse.urlparse(url).query
|
||||||
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
|
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
|
||||||
url_base = self._search_regex(
|
mobj = re.search(
|
||||||
r'(?:(?:https?|rtmp|rtsp):)?(//[^?]+)', url, 'format url')
|
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
|
||||||
http_base_url = '%s:%s' % ('http', url_base)
|
url_base = mobj.group('url')
|
||||||
|
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
|
||||||
formats = []
|
formats = []
|
||||||
|
|
||||||
|
def manifest_url(manifest):
|
||||||
|
m_url = '%s/%s' % (http_base_url, manifest)
|
||||||
|
if query:
|
||||||
|
m_url += '?%s' % query
|
||||||
|
return m_url
|
||||||
|
|
||||||
if 'm3u8' not in skip_protocols:
|
if 'm3u8' not in skip_protocols:
|
||||||
formats.extend(self._extract_m3u8_formats(
|
formats.extend(self._extract_m3u8_formats(
|
||||||
http_base_url + '/playlist.m3u8', video_id, 'mp4',
|
manifest_url('playlist.m3u8'), video_id, 'mp4',
|
||||||
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
|
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
|
||||||
if 'f4m' not in skip_protocols:
|
if 'f4m' not in skip_protocols:
|
||||||
formats.extend(self._extract_f4m_formats(
|
formats.extend(self._extract_f4m_formats(
|
||||||
http_base_url + '/manifest.f4m',
|
manifest_url('manifest.f4m'),
|
||||||
video_id, f4m_id='hds', fatal=False))
|
video_id, f4m_id='hds', fatal=False))
|
||||||
if 'dash' not in skip_protocols:
|
if 'dash' not in skip_protocols:
|
||||||
formats.extend(self._extract_mpd_formats(
|
formats.extend(self._extract_mpd_formats(
|
||||||
http_base_url + '/manifest.mpd',
|
manifest_url('manifest.mpd'),
|
||||||
video_id, mpd_id='dash', fatal=False))
|
video_id, mpd_id='dash', fatal=False))
|
||||||
if re.search(r'(?:/smil:|\.smil)', url_base):
|
if re.search(r'(?:/smil:|\.smil)', url_base):
|
||||||
if 'smil' not in skip_protocols:
|
if 'smil' not in skip_protocols:
|
||||||
rtmp_formats = self._extract_smil_formats(
|
rtmp_formats = self._extract_smil_formats(
|
||||||
http_base_url + '/jwplayer.smil',
|
manifest_url('jwplayer.smil'),
|
||||||
video_id, fatal=False)
|
video_id, fatal=False)
|
||||||
for rtmp_format in rtmp_formats:
|
for rtmp_format in rtmp_formats:
|
||||||
rtsp_format = rtmp_format.copy()
|
rtsp_format = rtmp_format.copy()
|
||||||
@ -2300,7 +2412,6 @@ class InfoExtractor(object):
|
|||||||
formats = self._parse_jwplayer_formats(
|
formats = self._parse_jwplayer_formats(
|
||||||
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
|
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
|
||||||
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
|
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
tracks = video_data.get('tracks')
|
tracks = video_data.get('tracks')
|
||||||
@ -2308,7 +2419,10 @@ class InfoExtractor(object):
|
|||||||
for track in tracks:
|
for track in tracks:
|
||||||
if not isinstance(track, dict):
|
if not isinstance(track, dict):
|
||||||
continue
|
continue
|
||||||
if track.get('kind') != 'captions':
|
track_kind = track.get('kind')
|
||||||
|
if not track_kind or not isinstance(track_kind, compat_str):
|
||||||
|
continue
|
||||||
|
if track_kind.lower() not in ('captions', 'subtitles'):
|
||||||
continue
|
continue
|
||||||
track_url = urljoin(base_url, track.get('file'))
|
track_url = urljoin(base_url, track.get('file'))
|
||||||
if not track_url:
|
if not track_url:
|
||||||
@ -2317,16 +2431,25 @@ class InfoExtractor(object):
|
|||||||
'url': self._proto_relative_url(track_url)
|
'url': self._proto_relative_url(track_url)
|
||||||
})
|
})
|
||||||
|
|
||||||
entries.append({
|
entry = {
|
||||||
'id': this_video_id,
|
'id': this_video_id,
|
||||||
'title': video_data['title'] if require_title else video_data.get('title'),
|
'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
|
||||||
'description': video_data.get('description'),
|
'description': video_data.get('description'),
|
||||||
'thumbnail': self._proto_relative_url(video_data.get('image')),
|
'thumbnail': self._proto_relative_url(video_data.get('image')),
|
||||||
'timestamp': int_or_none(video_data.get('pubdate')),
|
'timestamp': int_or_none(video_data.get('pubdate')),
|
||||||
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
|
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'formats': formats,
|
}
|
||||||
})
|
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
|
||||||
|
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
|
||||||
|
entry.update({
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': formats[0]['url'],
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
self._sort_formats(formats)
|
||||||
|
entry['formats'] = formats
|
||||||
|
entries.append(entry)
|
||||||
if len(entries) == 1:
|
if len(entries) == 1:
|
||||||
return entries[0]
|
return entries[0]
|
||||||
else:
|
else:
|
||||||
@ -2353,7 +2476,7 @@ class InfoExtractor(object):
|
|||||||
formats.extend(self._extract_m3u8_formats(
|
formats.extend(self._extract_m3u8_formats(
|
||||||
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
m3u8_id=m3u8_id, fatal=False))
|
m3u8_id=m3u8_id, fatal=False))
|
||||||
elif ext == 'mpd':
|
elif source_type == 'dash' or ext == 'mpd':
|
||||||
formats.extend(self._extract_mpd_formats(
|
formats.extend(self._extract_mpd_formats(
|
||||||
source_url, video_id, mpd_id=mpd_id, fatal=False))
|
source_url, video_id, mpd_id=mpd_id, fatal=False))
|
||||||
elif ext == 'smil':
|
elif ext == 'smil':
|
||||||
@ -2427,10 +2550,12 @@ class InfoExtractor(object):
|
|||||||
self._downloader.report_warning(msg)
|
self._downloader.report_warning(msg)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def _set_cookie(self, domain, name, value, expire_time=None):
|
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
|
||||||
|
path='/', secure=False, discard=False, rest={}, **kwargs):
|
||||||
cookie = compat_cookiejar.Cookie(
|
cookie = compat_cookiejar.Cookie(
|
||||||
0, name, value, None, None, domain, None,
|
0, name, value, port, port is not None, domain, True,
|
||||||
None, '/', True, False, expire_time, '', None, None, None)
|
domain.startswith('.'), path, True, secure, expire_time,
|
||||||
|
discard, None, None, rest)
|
||||||
self._downloader.cookiejar.set_cookie(cookie)
|
self._downloader.cookiejar.set_cookie(cookie)
|
||||||
|
|
||||||
def _get_cookies(self, url):
|
def _get_cookies(self, url):
|
||||||
|
@ -116,16 +116,16 @@ class CondeNastIE(InfoExtractor):
|
|||||||
entries = [self.url_result(build_url(path), 'CondeNast') for path in paths]
|
entries = [self.url_result(build_url(path), 'CondeNast') for path in paths]
|
||||||
return self.playlist_result(entries, playlist_title=title)
|
return self.playlist_result(entries, playlist_title=title)
|
||||||
|
|
||||||
def _extract_video_params(self, webpage):
|
def _extract_video_params(self, webpage, display_id):
|
||||||
query = {}
|
query = self._parse_json(
|
||||||
params = self._search_regex(
|
self._search_regex(
|
||||||
r'(?s)var params = {(.+?)}[;,]', webpage, 'player params', default=None)
|
r'(?s)var\s+params\s*=\s*({.+?})[;,]', webpage, 'player params',
|
||||||
if params:
|
default='{}'),
|
||||||
query.update({
|
display_id, transform_source=js_to_json, fatal=False)
|
||||||
'videoId': self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id'),
|
if query:
|
||||||
'playerId': self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id'),
|
query['videoId'] = self._search_regex(
|
||||||
'target': self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target'),
|
r'(?:data-video-id=|currentVideoId\s*=\s*)["\']([\da-f]+)',
|
||||||
})
|
webpage, 'video id', default=None)
|
||||||
else:
|
else:
|
||||||
params = extract_attributes(self._search_regex(
|
params = extract_attributes(self._search_regex(
|
||||||
r'(<[^>]+data-js="video-player"[^>]+>)',
|
r'(<[^>]+data-js="video-player"[^>]+>)',
|
||||||
@ -141,17 +141,27 @@ class CondeNastIE(InfoExtractor):
|
|||||||
video_id = params['videoId']
|
video_id = params['videoId']
|
||||||
|
|
||||||
video_info = None
|
video_info = None
|
||||||
if params.get('playerId'):
|
|
||||||
info_page = self._download_json(
|
# New API path
|
||||||
'http://player.cnevids.com/player/video.js',
|
query = params.copy()
|
||||||
video_id, 'Downloading video info', fatal=False, query=params)
|
query['embedType'] = 'inline'
|
||||||
if info_page:
|
info_page = self._download_json(
|
||||||
video_info = info_page.get('video')
|
'http://player.cnevids.com/embed-api.json', video_id,
|
||||||
if not video_info:
|
'Downloading embed info', fatal=False, query=query)
|
||||||
info_page = self._download_webpage(
|
|
||||||
'http://player.cnevids.com/player/loader.js',
|
# Old fallbacks
|
||||||
video_id, 'Downloading loader info', query=params)
|
if not info_page:
|
||||||
else:
|
if params.get('playerId'):
|
||||||
|
info_page = self._download_json(
|
||||||
|
'http://player.cnevids.com/player/video.js', video_id,
|
||||||
|
'Downloading video info', fatal=False, query=params)
|
||||||
|
if info_page:
|
||||||
|
video_info = info_page.get('video')
|
||||||
|
if not video_info:
|
||||||
|
info_page = self._download_webpage(
|
||||||
|
'http://player.cnevids.com/player/loader.js',
|
||||||
|
video_id, 'Downloading loader info', query=params)
|
||||||
|
if not video_info:
|
||||||
info_page = self._download_webpage(
|
info_page = self._download_webpage(
|
||||||
'https://player.cnevids.com/inline/video/%s.js' % video_id,
|
'https://player.cnevids.com/inline/video/%s.js' % video_id,
|
||||||
video_id, 'Downloading inline info', query={
|
video_id, 'Downloading inline info', query={
|
||||||
@ -215,7 +225,7 @@ class CondeNastIE(InfoExtractor):
|
|||||||
if url_type == 'series':
|
if url_type == 'series':
|
||||||
return self._extract_series(url, webpage)
|
return self._extract_series(url, webpage)
|
||||||
else:
|
else:
|
||||||
params = self._extract_video_params(webpage)
|
params = self._extract_video_params(webpage, display_id)
|
||||||
info = self._search_json_ld(
|
info = self._search_json_ld(
|
||||||
webpage, display_id, fatal=False)
|
webpage, display_id, fatal=False)
|
||||||
info.update(self._extract_video(params))
|
info.update(self._extract_video(params))
|
||||||
|
@ -3,6 +3,7 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from .youtube import YoutubeIE
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
@ -41,11 +42,9 @@ class CrackedIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
youtube_url = self._search_regex(
|
youtube_url = YoutubeIE._extract_url(webpage)
|
||||||
r'<iframe[^>]+src="((?:https?:)?//www\.youtube\.com/embed/[^"]+)"',
|
|
||||||
webpage, 'youtube url', default=None)
|
|
||||||
if youtube_url:
|
if youtube_url:
|
||||||
return self.url_result(youtube_url, 'Youtube')
|
return self.url_result(youtube_url, ie=YoutubeIE.ie_key())
|
||||||
|
|
||||||
video_url = self._html_search_regex(
|
video_url = self._html_search_regex(
|
||||||
[r'var\s+CK_vidSrc\s*=\s*"([^"]+)"', r'<video\s+src="([^"]+)"'],
|
[r'var\s+CK_vidSrc\s*=\s*"([^"]+)"', r'<video\s+src="([^"]+)"'],
|
||||||
|
@ -1,31 +1,45 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals, division
|
from __future__ import unicode_literals, division
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import int_or_none
|
from ..compat import (
|
||||||
|
compat_str,
|
||||||
|
compat_HTTPError,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
|
parse_age_limit,
|
||||||
|
parse_duration,
|
||||||
|
ExtractorError
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CrackleIE(InfoExtractor):
|
class CrackleIE(InfoExtractor):
|
||||||
_GEO_COUNTRIES = ['US']
|
|
||||||
_VALID_URL = r'(?:crackle:|https?://(?:(?:www|m)\.)?crackle\.com/(?:playlist/\d+/|(?:[^/]+/)+))(?P<id>\d+)'
|
_VALID_URL = r'(?:crackle:|https?://(?:(?:www|m)\.)?crackle\.com/(?:playlist/\d+/|(?:[^/]+/)+))(?P<id>\d+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.crackle.com/comedians-in-cars-getting-coffee/2498934',
|
# geo restricted to CA
|
||||||
|
'url': 'https://www.crackle.com/andromeda/2502343',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2498934',
|
'id': '2502343',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Everybody Respects A Bloody Nose',
|
'title': 'Under The Night',
|
||||||
'description': 'Jerry is kaffeeklatsching in L.A. with funnyman J.B. Smoove (Saturday Night Live, Real Husbands of Hollywood). They’re headed for brew at 10 Speed Coffee in a 1964 Studebaker Avanti.',
|
'description': 'md5:d2b8ca816579ae8a7bf28bfff8cefc8a',
|
||||||
'thumbnail': r're:^https?://.*\.jpg',
|
'duration': 2583,
|
||||||
'duration': 906,
|
'view_count': int,
|
||||||
'series': 'Comedians In Cars Getting Coffee',
|
'average_rating': 0,
|
||||||
'season_number': 8,
|
'age_limit': 14,
|
||||||
'episode_number': 4,
|
'genre': 'Action, Sci-Fi',
|
||||||
'subtitles': {
|
'creator': 'Allan Kroeker',
|
||||||
'en-US': [
|
'artist': 'Keith Hamilton Cobb, Kevin Sorbo, Lisa Ryder, Lexa Doig, Robert Hewitt Wolfe',
|
||||||
{'ext': 'vtt'},
|
'release_year': 2000,
|
||||||
{'ext': 'tt'},
|
'series': 'Andromeda',
|
||||||
]
|
'episode': 'Under The Night',
|
||||||
},
|
'season_number': 1,
|
||||||
|
'episode_number': 1,
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
@ -33,109 +47,118 @@ class CrackleIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_THUMBNAIL_RES = [
|
|
||||||
(120, 90),
|
|
||||||
(208, 156),
|
|
||||||
(220, 124),
|
|
||||||
(220, 220),
|
|
||||||
(240, 180),
|
|
||||||
(250, 141),
|
|
||||||
(315, 236),
|
|
||||||
(320, 180),
|
|
||||||
(360, 203),
|
|
||||||
(400, 300),
|
|
||||||
(421, 316),
|
|
||||||
(460, 330),
|
|
||||||
(460, 460),
|
|
||||||
(462, 260),
|
|
||||||
(480, 270),
|
|
||||||
(587, 330),
|
|
||||||
(640, 480),
|
|
||||||
(700, 330),
|
|
||||||
(700, 394),
|
|
||||||
(854, 480),
|
|
||||||
(1024, 1024),
|
|
||||||
(1920, 1080),
|
|
||||||
]
|
|
||||||
|
|
||||||
# extracted from http://legacyweb-us.crackle.com/flash/ReferrerRedirect.ashx
|
|
||||||
_MEDIA_FILE_SLOTS = {
|
|
||||||
'c544.flv': {
|
|
||||||
'width': 544,
|
|
||||||
'height': 306,
|
|
||||||
},
|
|
||||||
'360p.mp4': {
|
|
||||||
'width': 640,
|
|
||||||
'height': 360,
|
|
||||||
},
|
|
||||||
'480p.mp4': {
|
|
||||||
'width': 852,
|
|
||||||
'height': 478,
|
|
||||||
},
|
|
||||||
'480p_1mbps.mp4': {
|
|
||||||
'width': 852,
|
|
||||||
'height': 478,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
config_doc = self._download_xml(
|
country_code = self._downloader.params.get('geo_bypass_country', None)
|
||||||
'http://legacyweb-us.crackle.com/flash/QueryReferrer.ashx?site=16',
|
countries = [country_code] if country_code else (
|
||||||
video_id, 'Downloading config')
|
'US', 'AU', 'CA', 'AS', 'FM', 'GU', 'MP', 'PR', 'PW', 'MH', 'VI')
|
||||||
|
|
||||||
item = self._download_xml(
|
last_e = None
|
||||||
'http://legacyweb-us.crackle.com/app/revamp/vidwallcache.aspx?flags=-1&fm=%s' % video_id,
|
|
||||||
video_id, headers=self.geo_verification_headers()).find('i')
|
|
||||||
title = item.attrib['t']
|
|
||||||
|
|
||||||
subtitles = {}
|
for country in countries:
|
||||||
formats = self._extract_m3u8_formats(
|
try:
|
||||||
'http://content.uplynk.com/ext/%s/%s.m3u8' % (config_doc.attrib['strUplynkOwnerId'], video_id),
|
media = self._download_json(
|
||||||
video_id, 'mp4', m3u8_id='hls', fatal=None)
|
'https://web-api-us.crackle.com/Service.svc/details/media/%s/%s'
|
||||||
thumbnails = []
|
% (video_id, country), video_id,
|
||||||
path = item.attrib.get('p')
|
'Downloading media JSON as %s' % country,
|
||||||
if path:
|
'Unable to download media JSON', query={
|
||||||
for width, height in self._THUMBNAIL_RES:
|
'disableProtocols': 'true',
|
||||||
res = '%dx%d' % (width, height)
|
'format': 'json'
|
||||||
thumbnails.append({
|
})
|
||||||
'id': res,
|
except ExtractorError as e:
|
||||||
'url': 'http://images-us-am.crackle.com/%stnl_%s.jpg' % (path, res),
|
# 401 means geo restriction, trying next country
|
||||||
'width': width,
|
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
||||||
'height': height,
|
last_e = e
|
||||||
'resolution': res,
|
continue
|
||||||
})
|
raise
|
||||||
http_base_url = 'http://ahttp.crackle.com/' + path
|
|
||||||
for mfs_path, mfs_info in self._MEDIA_FILE_SLOTS.items():
|
|
||||||
formats.append({
|
|
||||||
'url': http_base_url + mfs_path,
|
|
||||||
'format_id': 'http-' + mfs_path.split('.')[0],
|
|
||||||
'width': mfs_info['width'],
|
|
||||||
'height': mfs_info['height'],
|
|
||||||
})
|
|
||||||
for cc in item.findall('cc'):
|
|
||||||
locale = cc.attrib.get('l')
|
|
||||||
v = cc.attrib.get('v')
|
|
||||||
if locale and v:
|
|
||||||
if locale not in subtitles:
|
|
||||||
subtitles[locale] = []
|
|
||||||
for url_ext, ext in (('vtt', 'vtt'), ('xml', 'tt')):
|
|
||||||
subtitles.setdefault(locale, []).append({
|
|
||||||
'url': '%s/%s%s_%s.%s' % (config_doc.attrib['strSubtitleServer'], path, locale, v, url_ext),
|
|
||||||
'ext': ext,
|
|
||||||
})
|
|
||||||
self._sort_formats(formats, ('width', 'height', 'tbr', 'format_id'))
|
|
||||||
|
|
||||||
return {
|
media_urls = media.get('MediaURLs')
|
||||||
'id': video_id,
|
if not media_urls or not isinstance(media_urls, list):
|
||||||
'title': title,
|
continue
|
||||||
'description': item.attrib.get('d'),
|
|
||||||
'duration': int(item.attrib.get('r'), 16) / 1000 if item.attrib.get('r') else None,
|
title = media['Title']
|
||||||
'series': item.attrib.get('sn'),
|
|
||||||
'season_number': int_or_none(item.attrib.get('se')),
|
formats = []
|
||||||
'episode_number': int_or_none(item.attrib.get('ep')),
|
for e in media['MediaURLs']:
|
||||||
'thumbnails': thumbnails,
|
if e.get('UseDRM') is True:
|
||||||
'subtitles': subtitles,
|
continue
|
||||||
'formats': formats,
|
format_url = e.get('Path')
|
||||||
}
|
if not format_url or not isinstance(format_url, compat_str):
|
||||||
|
continue
|
||||||
|
ext = determine_ext(format_url)
|
||||||
|
if ext == 'm3u8':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
|
m3u8_id='hls', fatal=False))
|
||||||
|
elif ext == 'mpd':
|
||||||
|
formats.extend(self._extract_mpd_formats(
|
||||||
|
format_url, video_id, mpd_id='dash', fatal=False))
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
description = media.get('Description')
|
||||||
|
duration = int_or_none(media.get(
|
||||||
|
'DurationInSeconds')) or parse_duration(media.get('Duration'))
|
||||||
|
view_count = int_or_none(media.get('CountViews'))
|
||||||
|
average_rating = float_or_none(media.get('UserRating'))
|
||||||
|
age_limit = parse_age_limit(media.get('Rating'))
|
||||||
|
genre = media.get('Genre')
|
||||||
|
release_year = int_or_none(media.get('ReleaseYear'))
|
||||||
|
creator = media.get('Directors')
|
||||||
|
artist = media.get('Cast')
|
||||||
|
|
||||||
|
if media.get('MediaTypeDisplayValue') == 'Full Episode':
|
||||||
|
series = media.get('ShowName')
|
||||||
|
episode = title
|
||||||
|
season_number = int_or_none(media.get('Season'))
|
||||||
|
episode_number = int_or_none(media.get('Episode'))
|
||||||
|
else:
|
||||||
|
series = episode = season_number = episode_number = None
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
cc_files = media.get('ClosedCaptionFiles')
|
||||||
|
if isinstance(cc_files, list):
|
||||||
|
for cc_file in cc_files:
|
||||||
|
if not isinstance(cc_file, dict):
|
||||||
|
continue
|
||||||
|
cc_url = cc_file.get('Path')
|
||||||
|
if not cc_url or not isinstance(cc_url, compat_str):
|
||||||
|
continue
|
||||||
|
lang = cc_file.get('Locale') or 'en'
|
||||||
|
subtitles.setdefault(lang, []).append({'url': cc_url})
|
||||||
|
|
||||||
|
thumbnails = []
|
||||||
|
images = media.get('Images')
|
||||||
|
if isinstance(images, list):
|
||||||
|
for image_key, image_url in images.items():
|
||||||
|
mobj = re.search(r'Img_(\d+)[xX](\d+)', image_key)
|
||||||
|
if not mobj:
|
||||||
|
continue
|
||||||
|
thumbnails.append({
|
||||||
|
'url': image_url,
|
||||||
|
'width': int(mobj.group(1)),
|
||||||
|
'height': int(mobj.group(2)),
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'duration': duration,
|
||||||
|
'view_count': view_count,
|
||||||
|
'average_rating': average_rating,
|
||||||
|
'age_limit': age_limit,
|
||||||
|
'genre': genre,
|
||||||
|
'creator': creator,
|
||||||
|
'artist': artist,
|
||||||
|
'release_year': release_year,
|
||||||
|
'series': series,
|
||||||
|
'episode': episode,
|
||||||
|
'season_number': season_number,
|
||||||
|
'episode_number': episode_number,
|
||||||
|
'thumbnails': thumbnails,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
|
|
||||||
|
raise last_e
|
||||||
|
@ -3,13 +3,13 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
import base64
|
|
||||||
import zlib
|
import zlib
|
||||||
|
|
||||||
from hashlib import sha1
|
from hashlib import sha1
|
||||||
from math import pow, sqrt, floor
|
from math import pow, sqrt, floor
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
@ -38,11 +38,32 @@ class CrunchyrollBaseIE(InfoExtractor):
|
|||||||
_LOGIN_FORM = 'login_form'
|
_LOGIN_FORM = 'login_form'
|
||||||
_NETRC_MACHINE = 'crunchyroll'
|
_NETRC_MACHINE = 'crunchyroll'
|
||||||
|
|
||||||
|
def _call_rpc_api(self, method, video_id, note=None, data=None):
|
||||||
|
data = data or {}
|
||||||
|
data['req'] = 'RpcApi' + method
|
||||||
|
data = compat_urllib_parse_urlencode(data).encode('utf-8')
|
||||||
|
return self._download_xml(
|
||||||
|
'http://www.crunchyroll.com/xml/',
|
||||||
|
video_id, note, fatal=False, data=data, headers={
|
||||||
|
'Content-Type': 'application/x-www-form-urlencoded',
|
||||||
|
})
|
||||||
|
|
||||||
def _login(self):
|
def _login(self):
|
||||||
(username, password) = self._get_login_info()
|
(username, password) = self._get_login_info()
|
||||||
if username is None:
|
if username is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
self._download_webpage(
|
||||||
|
'https://www.crunchyroll.com/?a=formhandler',
|
||||||
|
None, 'Logging in', 'Wrong login info',
|
||||||
|
data=urlencode_postdata({
|
||||||
|
'formname': 'RpcApiUser_Login',
|
||||||
|
'next_url': 'https://www.crunchyroll.com/acct/membership',
|
||||||
|
'name': username,
|
||||||
|
'password': password,
|
||||||
|
}))
|
||||||
|
|
||||||
|
'''
|
||||||
login_page = self._download_webpage(
|
login_page = self._download_webpage(
|
||||||
self._LOGIN_URL, None, 'Downloading login page')
|
self._LOGIN_URL, None, 'Downloading login page')
|
||||||
|
|
||||||
@ -86,6 +107,7 @@ class CrunchyrollBaseIE(InfoExtractor):
|
|||||||
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
||||||
|
|
||||||
raise ExtractorError('Unable to log in')
|
raise ExtractorError('Unable to log in')
|
||||||
|
'''
|
||||||
|
|
||||||
def _real_initialize(self):
|
def _real_initialize(self):
|
||||||
self._login()
|
self._login()
|
||||||
@ -250,8 +272,8 @@ class CrunchyrollIE(CrunchyrollBaseIE):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _decrypt_subtitles(self, data, iv, id):
|
def _decrypt_subtitles(self, data, iv, id):
|
||||||
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
|
data = bytes_to_intlist(compat_b64decode(data))
|
||||||
iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8')))
|
iv = bytes_to_intlist(compat_b64decode(iv))
|
||||||
id = int(id)
|
id = int(id)
|
||||||
|
|
||||||
def obfuscate_key_aux(count, modulo, start):
|
def obfuscate_key_aux(count, modulo, start):
|
||||||
@ -365,15 +387,19 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
|||||||
def _get_subtitles(self, video_id, webpage):
|
def _get_subtitles(self, video_id, webpage):
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
for sub_id, sub_name in re.findall(r'\bssid=([0-9]+)"[^>]+?\btitle="([^"]+)', webpage):
|
for sub_id, sub_name in re.findall(r'\bssid=([0-9]+)"[^>]+?\btitle="([^"]+)', webpage):
|
||||||
sub_page = self._download_webpage(
|
sub_doc = self._call_rpc_api(
|
||||||
'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
|
'Subtitle_GetXml', video_id,
|
||||||
video_id, note='Downloading subtitles for ' + sub_name)
|
'Downloading subtitles for ' + sub_name, data={
|
||||||
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
|
'subtitle_script_id': sub_id,
|
||||||
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
|
})
|
||||||
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
if sub_doc is None:
|
||||||
if not id or not iv or not data:
|
|
||||||
continue
|
continue
|
||||||
subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8')
|
sid = sub_doc.get('id')
|
||||||
|
iv = xpath_text(sub_doc, 'iv', 'subtitle iv')
|
||||||
|
data = xpath_text(sub_doc, 'data', 'subtitle data')
|
||||||
|
if not sid or not iv or not data:
|
||||||
|
continue
|
||||||
|
subtitle = self._decrypt_subtitles(data, iv, sid).decode('utf-8')
|
||||||
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
||||||
if not lang_code:
|
if not lang_code:
|
||||||
continue
|
continue
|
||||||
@ -444,65 +470,79 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
|||||||
for fmt in available_fmts:
|
for fmt in available_fmts:
|
||||||
stream_quality, stream_format = self._FORMAT_IDS[fmt]
|
stream_quality, stream_format = self._FORMAT_IDS[fmt]
|
||||||
video_format = fmt + 'p'
|
video_format = fmt + 'p'
|
||||||
streamdata_req = sanitized_Request(
|
stream_infos = []
|
||||||
'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s'
|
streamdata = self._call_rpc_api(
|
||||||
% (video_id, stream_format, stream_quality),
|
'VideoPlayer_GetStandardConfig', video_id,
|
||||||
compat_urllib_parse_urlencode({'current_page': url}).encode('utf-8'))
|
'Downloading media info for %s' % video_format, data={
|
||||||
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
'media_id': video_id,
|
||||||
streamdata = self._download_xml(
|
'video_format': stream_format,
|
||||||
streamdata_req, video_id,
|
'video_quality': stream_quality,
|
||||||
note='Downloading media info for %s' % video_format)
|
'current_page': url,
|
||||||
stream_info = streamdata.find('./{default}preload/stream_info')
|
})
|
||||||
video_encode_id = xpath_text(stream_info, './video_encode_id')
|
if streamdata is not None:
|
||||||
if video_encode_id in video_encode_ids:
|
stream_info = streamdata.find('./{default}preload/stream_info')
|
||||||
continue
|
if stream_info is not None:
|
||||||
video_encode_ids.append(video_encode_id)
|
stream_infos.append(stream_info)
|
||||||
|
stream_info = self._call_rpc_api(
|
||||||
|
'VideoEncode_GetStreamInfo', video_id,
|
||||||
|
'Downloading stream info for %s' % video_format, data={
|
||||||
|
'media_id': video_id,
|
||||||
|
'video_format': stream_format,
|
||||||
|
'video_encode_quality': stream_quality,
|
||||||
|
})
|
||||||
|
if stream_info is not None:
|
||||||
|
stream_infos.append(stream_info)
|
||||||
|
for stream_info in stream_infos:
|
||||||
|
video_encode_id = xpath_text(stream_info, './video_encode_id')
|
||||||
|
if video_encode_id in video_encode_ids:
|
||||||
|
continue
|
||||||
|
video_encode_ids.append(video_encode_id)
|
||||||
|
|
||||||
video_file = xpath_text(stream_info, './file')
|
video_file = xpath_text(stream_info, './file')
|
||||||
if not video_file:
|
if not video_file:
|
||||||
continue
|
continue
|
||||||
if video_file.startswith('http'):
|
if video_file.startswith('http'):
|
||||||
formats.extend(self._extract_m3u8_formats(
|
formats.extend(self._extract_m3u8_formats(
|
||||||
video_file, video_id, 'mp4', entry_protocol='m3u8_native',
|
video_file, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
m3u8_id='hls', fatal=False))
|
m3u8_id='hls', fatal=False))
|
||||||
continue
|
|
||||||
|
|
||||||
video_url = xpath_text(stream_info, './host')
|
|
||||||
if not video_url:
|
|
||||||
continue
|
|
||||||
metadata = stream_info.find('./metadata')
|
|
||||||
format_info = {
|
|
||||||
'format': video_format,
|
|
||||||
'format_id': video_format,
|
|
||||||
'height': int_or_none(xpath_text(metadata, './height')),
|
|
||||||
'width': int_or_none(xpath_text(metadata, './width')),
|
|
||||||
}
|
|
||||||
|
|
||||||
if '.fplive.net/' in video_url:
|
|
||||||
video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip())
|
|
||||||
parsed_video_url = compat_urlparse.urlparse(video_url)
|
|
||||||
direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace(
|
|
||||||
netloc='v.lvlt.crcdn.net',
|
|
||||||
path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_file.split(':')[-1])))
|
|
||||||
if self._is_valid_url(direct_video_url, video_id, video_format):
|
|
||||||
format_info.update({
|
|
||||||
'url': direct_video_url,
|
|
||||||
})
|
|
||||||
formats.append(format_info)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
format_info.update({
|
video_url = xpath_text(stream_info, './host')
|
||||||
'url': video_url,
|
if not video_url:
|
||||||
'play_path': video_file,
|
continue
|
||||||
'ext': 'flv',
|
metadata = stream_info.find('./metadata')
|
||||||
})
|
format_info = {
|
||||||
formats.append(format_info)
|
'format': video_format,
|
||||||
self._sort_formats(formats)
|
'height': int_or_none(xpath_text(metadata, './height')),
|
||||||
|
'width': int_or_none(xpath_text(metadata, './width')),
|
||||||
|
}
|
||||||
|
|
||||||
metadata = self._download_xml(
|
if '.fplive.net/' in video_url:
|
||||||
'http://www.crunchyroll.com/xml', video_id,
|
video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip())
|
||||||
note='Downloading media info', query={
|
parsed_video_url = compat_urlparse.urlparse(video_url)
|
||||||
'req': 'RpcApiVideoPlayer_GetMediaMetadata',
|
direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace(
|
||||||
|
netloc='v.lvlt.crcdn.net',
|
||||||
|
path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_file.split(':')[-1])))
|
||||||
|
if self._is_valid_url(direct_video_url, video_id, video_format):
|
||||||
|
format_info.update({
|
||||||
|
'format_id': 'http-' + video_format,
|
||||||
|
'url': direct_video_url,
|
||||||
|
})
|
||||||
|
formats.append(format_info)
|
||||||
|
continue
|
||||||
|
|
||||||
|
format_info.update({
|
||||||
|
'format_id': 'rtmp-' + video_format,
|
||||||
|
'url': video_url,
|
||||||
|
'play_path': video_file,
|
||||||
|
'ext': 'flv',
|
||||||
|
})
|
||||||
|
formats.append(format_info)
|
||||||
|
self._sort_formats(formats, ('height', 'width', 'tbr', 'fps'))
|
||||||
|
|
||||||
|
metadata = self._call_rpc_api(
|
||||||
|
'VideoPlayer_GetMediaMetadata', video_id,
|
||||||
|
note='Downloading media info', data={
|
||||||
'media_id': video_id,
|
'media_id': video_id,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -4,13 +4,14 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
|
||||||
unescapeHTML,
|
|
||||||
find_xpath_attr,
|
|
||||||
smuggle_url,
|
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
|
find_xpath_attr,
|
||||||
|
get_element_by_class,
|
||||||
|
int_or_none,
|
||||||
|
smuggle_url,
|
||||||
|
unescapeHTML,
|
||||||
)
|
)
|
||||||
from .senateisvp import SenateISVPIE
|
from .senateisvp import SenateISVPIE
|
||||||
from .ustream import UstreamIE
|
from .ustream import UstreamIE
|
||||||
@ -68,6 +69,10 @@ class CSpanIE(InfoExtractor):
|
|||||||
'uploader': 'HouseCommittee',
|
'uploader': 'HouseCommittee',
|
||||||
'uploader_id': '12987475',
|
'uploader_id': '12987475',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# Audio Only
|
||||||
|
'url': 'https://www.c-span.org/video/?437336-1/judiciary-antitrust-competition-policy-consumer-rights',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s'
|
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s'
|
||||||
|
|
||||||
@ -111,7 +116,15 @@ class CSpanIE(InfoExtractor):
|
|||||||
title = self._og_search_title(webpage)
|
title = self._og_search_title(webpage)
|
||||||
surl = smuggle_url(senate_isvp_url, {'force_title': title})
|
surl = smuggle_url(senate_isvp_url, {'force_title': title})
|
||||||
return self.url_result(surl, 'SenateISVP', video_id, title)
|
return self.url_result(surl, 'SenateISVP', video_id, title)
|
||||||
|
video_id = self._search_regex(
|
||||||
|
r'jwsetup\.clipprog\s*=\s*(\d+);',
|
||||||
|
webpage, 'jwsetup program id', default=None)
|
||||||
|
if video_id:
|
||||||
|
video_type = 'program'
|
||||||
if video_type is None or video_id is None:
|
if video_type is None or video_id is None:
|
||||||
|
error_message = get_element_by_class('VLplayer-error-message', webpage)
|
||||||
|
if error_message:
|
||||||
|
raise ExtractorError(error_message)
|
||||||
raise ExtractorError('unable to find video id and type')
|
raise ExtractorError('unable to find video id and type')
|
||||||
|
|
||||||
def get_text_attr(d, attr):
|
def get_text_attr(d, attr):
|
||||||
@ -138,7 +151,7 @@ class CSpanIE(InfoExtractor):
|
|||||||
entries = []
|
entries = []
|
||||||
for partnum, f in enumerate(files):
|
for partnum, f in enumerate(files):
|
||||||
formats = []
|
formats = []
|
||||||
for quality in f['qualities']:
|
for quality in f.get('qualities', []):
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': '%s-%sp' % (get_text_attr(quality, 'bitrate'), get_text_attr(quality, 'height')),
|
'format_id': '%s-%sp' % (get_text_attr(quality, 'bitrate'), get_text_attr(quality, 'height')),
|
||||||
'url': unescapeHTML(get_text_attr(quality, 'file')),
|
'url': unescapeHTML(get_text_attr(quality, 'file')),
|
||||||
|
@ -235,7 +235,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
|||||||
|
|
||||||
# vevo embed
|
# vevo embed
|
||||||
vevo_id = self._search_regex(
|
vevo_id = self._search_regex(
|
||||||
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
|
r'<link rel="video_src" href="[^"]*?vevo\.com[^"]*?video=(?P<id>[\w]*)',
|
||||||
webpage, 'vevo embed', default=None)
|
webpage, 'vevo embed', default=None)
|
||||||
if vevo_id:
|
if vevo_id:
|
||||||
return self.url_result('vevo:%s' % vevo_id, 'Vevo')
|
return self.url_result('vevo:%s' % vevo_id, 'Vevo')
|
||||||
@ -325,7 +325,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
|||||||
|
|
||||||
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
|
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
|
||||||
IE_NAME = 'dailymotion:playlist'
|
IE_NAME = 'dailymotion:playlist'
|
||||||
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
|
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>[^/?#&]+)'
|
||||||
_MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
|
_MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
|
||||||
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
|
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
@ -413,52 +413,3 @@ class DailymotionUserIE(DailymotionPlaylistIE):
|
|||||||
'title': full_user,
|
'title': full_user,
|
||||||
'entries': self._extract_entries(user),
|
'entries': self._extract_entries(user),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class DailymotionCloudIE(DailymotionBaseInfoExtractor):
|
|
||||||
_VALID_URL_PREFIX = r'https?://api\.dmcloud\.net/(?:player/)?embed/'
|
|
||||||
_VALID_URL = r'%s[^/]+/(?P<id>[^/?]+)' % _VALID_URL_PREFIX
|
|
||||||
_VALID_EMBED_URL = r'%s[^/]+/[^\'"]+' % _VALID_URL_PREFIX
|
|
||||||
|
|
||||||
_TESTS = [{
|
|
||||||
# From http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html
|
|
||||||
# Tested at FranceTvInfo_2
|
|
||||||
'url': 'http://api.dmcloud.net/embed/4e7343f894a6f677b10006b4/556e03339473995ee145930c?auth=1464865870-0-jyhsm84b-ead4c701fb750cf9367bf4447167a3db&autoplay=1',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
# http://www.francetvinfo.fr/societe/larguez-les-amarres-le-cobaturage-se-developpe_980101.html
|
|
||||||
'url': 'http://api.dmcloud.net/player/embed/4e7343f894a6f677b10006b4/559545469473996d31429f06?auth=1467430263-0-90tglw2l-a3a4b64ed41efe48d7fccad85b8b8fda&autoplay=1',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _extract_dmcloud_url(cls, webpage):
|
|
||||||
mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL, webpage)
|
|
||||||
if mobj:
|
|
||||||
return mobj.group(1)
|
|
||||||
|
|
||||||
mobj = re.search(
|
|
||||||
r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL,
|
|
||||||
webpage)
|
|
||||||
if mobj:
|
|
||||||
return mobj.group(1)
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
|
|
||||||
webpage = self._download_webpage_no_ff(url, video_id)
|
|
||||||
|
|
||||||
title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title')
|
|
||||||
|
|
||||||
video_info = self._parse_json(self._search_regex(
|
|
||||||
r'var\s+info\s*=\s*([^;]+);', webpage, 'video info'), video_id)
|
|
||||||
|
|
||||||
# TODO: parse ios_url, which is in fact a manifest
|
|
||||||
video_url = video_info['mp4_url']
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'url': video_url,
|
|
||||||
'title': title,
|
|
||||||
'thumbnail': video_info.get('thumbnail_url'),
|
|
||||||
}
|
|
||||||
|
@ -10,36 +10,34 @@ from ..aes import (
|
|||||||
aes_cbc_decrypt,
|
aes_cbc_decrypt,
|
||||||
aes_cbc_encrypt,
|
aes_cbc_encrypt,
|
||||||
)
|
)
|
||||||
|
from ..compat import compat_b64decode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
bytes_to_long,
|
bytes_to_long,
|
||||||
clean_html,
|
extract_attributes,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
get_element_by_id,
|
|
||||||
js_to_json,
|
js_to_json,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
long_to_bytes,
|
long_to_bytes,
|
||||||
pkcs1pad,
|
pkcs1pad,
|
||||||
remove_end,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class DaisukiIE(InfoExtractor):
|
class DaisukiMottoIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?daisuki\.net/[^/]+/[^/]+/[^/]+/watch\.[^.]+\.(?P<id>\d+)\.html'
|
_VALID_URL = r'https?://motto\.daisuki\.net/framewatch/embed/[^/]+/(?P<id>[0-9a-zA-Z]{3})'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.daisuki.net/tw/en/anime/watch.TheIdolMasterCG.11213.html',
|
'url': 'http://motto.daisuki.net/framewatch/embed/embedDRAGONBALLSUPERUniverseSurvivalsaga/V2e/760/428',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '11213',
|
'id': 'V2e',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '#01 Who is in the pumpkin carriage? - THE IDOLM@STER CINDERELLA GIRLS',
|
'title': '#117 SHOWDOWN OF LOVE! ANDROIDS VS UNIVERSE 2!!',
|
||||||
'subtitles': {
|
'subtitles': {
|
||||||
'mul': [{
|
'mul': [{
|
||||||
'ext': 'ttml',
|
'ext': 'ttml',
|
||||||
}],
|
}],
|
||||||
},
|
},
|
||||||
'creator': 'BANDAI NAMCO Entertainment',
|
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True, # AES-encrypted HLS stream
|
'skip_download': True, # AES-encrypted HLS stream
|
||||||
@ -73,15 +71,17 @@ class DaisukiIE(InfoExtractor):
|
|||||||
|
|
||||||
n, e = self._RSA_KEY
|
n, e = self._RSA_KEY
|
||||||
encrypted_aeskey = long_to_bytes(pow(bytes_to_long(padded_aeskey), e, n))
|
encrypted_aeskey = long_to_bytes(pow(bytes_to_long(padded_aeskey), e, n))
|
||||||
init_data = self._download_json('http://www.daisuki.net/bin/bgn/init', video_id, query={
|
init_data = self._download_json(
|
||||||
's': flashvars.get('s', ''),
|
'http://motto.daisuki.net/fastAPI/bgn/init/',
|
||||||
'c': flashvars.get('ss3_prm', ''),
|
video_id, query={
|
||||||
'e': url,
|
's': flashvars.get('s', ''),
|
||||||
'd': base64.b64encode(intlist_to_bytes(aes_cbc_encrypt(
|
'c': flashvars.get('ss3_prm', ''),
|
||||||
bytes_to_intlist(json.dumps(data)),
|
'e': url,
|
||||||
aes_key, iv))).decode('ascii'),
|
'd': base64.b64encode(intlist_to_bytes(aes_cbc_encrypt(
|
||||||
'a': base64.b64encode(encrypted_aeskey).decode('ascii'),
|
bytes_to_intlist(json.dumps(data)),
|
||||||
}, note='Downloading JSON metadata' + (' (try #%d)' % (idx + 1) if idx > 0 else ''))
|
aes_key, iv))).decode('ascii'),
|
||||||
|
'a': base64.b64encode(encrypted_aeskey).decode('ascii'),
|
||||||
|
}, note='Downloading JSON metadata' + (' (try #%d)' % (idx + 1) if idx > 0 else ''))
|
||||||
|
|
||||||
if 'rtn' in init_data:
|
if 'rtn' in init_data:
|
||||||
encrypted_rtn = init_data['rtn']
|
encrypted_rtn = init_data['rtn']
|
||||||
@ -94,18 +94,15 @@ class DaisukiIE(InfoExtractor):
|
|||||||
|
|
||||||
rtn = self._parse_json(
|
rtn = self._parse_json(
|
||||||
intlist_to_bytes(aes_cbc_decrypt(bytes_to_intlist(
|
intlist_to_bytes(aes_cbc_decrypt(bytes_to_intlist(
|
||||||
base64.b64decode(encrypted_rtn)),
|
compat_b64decode(encrypted_rtn)),
|
||||||
aes_key, iv)).decode('utf-8').rstrip('\0'),
|
aes_key, iv)).decode('utf-8').rstrip('\0'),
|
||||||
video_id)
|
video_id)
|
||||||
|
|
||||||
|
title = rtn['title_str']
|
||||||
|
|
||||||
formats = self._extract_m3u8_formats(
|
formats = self._extract_m3u8_formats(
|
||||||
rtn['play_url'], video_id, ext='mp4', entry_protocol='m3u8_native')
|
rtn['play_url'], video_id, ext='mp4', entry_protocol='m3u8_native')
|
||||||
|
|
||||||
title = remove_end(self._og_search_title(webpage), ' - DAISUKI')
|
|
||||||
|
|
||||||
creator = self._html_search_regex(
|
|
||||||
r'Creator\s*:\s*([^<]+)', webpage, 'creator', fatal=False)
|
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
caption_url = rtn.get('caption_url')
|
caption_url = rtn.get('caption_url')
|
||||||
if caption_url:
|
if caption_url:
|
||||||
@ -120,21 +117,18 @@ class DaisukiIE(InfoExtractor):
|
|||||||
'title': title,
|
'title': title,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'creator': creator,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class DaisukiPlaylistIE(InfoExtractor):
|
class DaisukiMottoPlaylistIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)daisuki\.net/[^/]+/[^/]+/[^/]+/detail\.(?P<id>[a-zA-Z0-9]+)\.html'
|
_VALID_URL = r'https?://motto\.daisuki\.net/(?P<id>information)/'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.daisuki.net/tw/en/anime/detail.TheIdolMasterCG.html',
|
'url': 'http://motto.daisuki.net/information/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'TheIdolMasterCG',
|
'title': 'DRAGON BALL SUPER',
|
||||||
'title': 'THE IDOLM@STER CINDERELLA GIRLS',
|
|
||||||
'description': 'md5:0f2c028a9339f7a2c7fbf839edc5c5d8',
|
|
||||||
},
|
},
|
||||||
'playlist_count': 26,
|
'playlist_mincount': 117,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -142,18 +136,19 @@ class DaisukiPlaylistIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, playlist_id)
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
episode_pattern = r'''(?sx)
|
entries = []
|
||||||
<img[^>]+delay="[^"]+/(\d+)/movie\.jpg".+?
|
for li in re.findall(r'(<li[^>]+?data-product_id="[a-zA-Z0-9]{3}"[^>]+>)', webpage):
|
||||||
<p[^>]+class=".*?\bepisodeNumber\b.*?">(?:<a[^>]+>)?([^<]+)'''
|
attr = extract_attributes(li)
|
||||||
entries = [{
|
ad_id = attr.get('data-ad_id')
|
||||||
'_type': 'url_transparent',
|
product_id = attr.get('data-product_id')
|
||||||
'url': url.replace('detail', 'watch').replace('.html', '.' + movie_id + '.html'),
|
if ad_id and product_id:
|
||||||
'episode_id': episode_id,
|
episode_id = attr.get('data-chapter')
|
||||||
'episode_number': int_or_none(episode_id),
|
entries.append({
|
||||||
} for movie_id, episode_id in re.findall(episode_pattern, webpage)]
|
'_type': 'url_transparent',
|
||||||
|
'url': 'http://motto.daisuki.net/framewatch/embed/%s/%s/760/428' % (ad_id, product_id),
|
||||||
|
'episode_id': episode_id,
|
||||||
|
'episode_number': int_or_none(episode_id),
|
||||||
|
'ie_key': 'DaisukiMotto',
|
||||||
|
})
|
||||||
|
|
||||||
playlist_title = remove_end(
|
return self.playlist_result(entries, playlist_title='DRAGON BALL SUPER')
|
||||||
self._og_search_title(webpage, fatal=False), ' - Anime - DAISUKI')
|
|
||||||
playlist_description = clean_html(get_element_by_id('synopsisTxt', webpage))
|
|
||||||
|
|
||||||
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
|
|
||||||
|
@ -2,53 +2,85 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import unified_strdate
|
from ..compat import compat_str
|
||||||
|
from ..utils import (
|
||||||
|
float_or_none,
|
||||||
|
unified_strdate,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class DctpTvIE(InfoExtractor):
|
class DctpTvIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?dctp\.tv/(#/)?filme/(?P<id>.+?)/$'
|
_VALID_URL = r'https?://(?:www\.)?dctp\.tv/(?:#/)?filme/(?P<id>[^/?#&]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.dctp.tv/filme/videoinstallation-fuer-eine-kaufhausfassade/',
|
'url': 'http://www.dctp.tv/filme/videoinstallation-fuer-eine-kaufhausfassade/',
|
||||||
'md5': '174dd4a8a6225cf5655952f969cfbe24',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '95eaa4f33dad413aa17b4ee613cccc6c',
|
'id': '95eaa4f33dad413aa17b4ee613cccc6c',
|
||||||
'display_id': 'videoinstallation-fuer-eine-kaufhausfassade',
|
'display_id': 'videoinstallation-fuer-eine-kaufhausfassade',
|
||||||
'ext': 'mp4',
|
'ext': 'flv',
|
||||||
'title': 'Videoinstallation für eine Kaufhausfassade',
|
'title': 'Videoinstallation für eine Kaufhausfassade',
|
||||||
'description': 'Kurzfilm',
|
'description': 'Kurzfilm',
|
||||||
'upload_date': '20110407',
|
'upload_date': '20110407',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'duration': 71.24,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
|
|
||||||
object_id = self._html_search_meta('DC.identifier', webpage)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
servers_json = self._download_json(
|
video_id = self._html_search_meta(
|
||||||
'http://www.dctp.tv/elastic_streaming_client/get_streaming_server/',
|
'DC.identifier', webpage, 'video id',
|
||||||
video_id, note='Downloading server list')
|
default=None) or self._search_regex(
|
||||||
server = servers_json[0]['server']
|
r'id=["\']uuid[^>]+>([^<]+)<', webpage, 'video id')
|
||||||
m3u8_path = self._search_regex(
|
|
||||||
r'\'([^\'"]+/playlist\.m3u8)"', webpage, 'm3u8 path')
|
|
||||||
formats = self._extract_m3u8_formats(
|
|
||||||
'http://%s%s' % (server, m3u8_path), video_id, ext='mp4',
|
|
||||||
entry_protocol='m3u8_native')
|
|
||||||
|
|
||||||
title = self._og_search_title(webpage)
|
title = self._og_search_title(webpage)
|
||||||
|
|
||||||
|
servers = self._download_json(
|
||||||
|
'http://www.dctp.tv/streaming_servers/', display_id,
|
||||||
|
note='Downloading server list', fatal=False)
|
||||||
|
|
||||||
|
if servers:
|
||||||
|
endpoint = next(
|
||||||
|
server['endpoint']
|
||||||
|
for server in servers
|
||||||
|
if isinstance(server.get('endpoint'), compat_str) and
|
||||||
|
'cloudfront' in server['endpoint'])
|
||||||
|
else:
|
||||||
|
endpoint = 'rtmpe://s2pqqn4u96e4j8.cloudfront.net/cfx/st/'
|
||||||
|
|
||||||
|
app = self._search_regex(
|
||||||
|
r'^rtmpe?://[^/]+/(?P<app>.*)$', endpoint, 'app')
|
||||||
|
|
||||||
|
formats = [{
|
||||||
|
'url': endpoint,
|
||||||
|
'app': app,
|
||||||
|
'play_path': 'mp4:%s_dctp_0500_4x3.m4v' % video_id,
|
||||||
|
'page_url': url,
|
||||||
|
'player_url': 'http://svm-prod-dctptv-static.s3.amazonaws.com/dctptv-relaunch2012-109.swf',
|
||||||
|
'ext': 'flv',
|
||||||
|
}]
|
||||||
|
|
||||||
description = self._html_search_meta('DC.description', webpage)
|
description = self._html_search_meta('DC.description', webpage)
|
||||||
upload_date = unified_strdate(
|
upload_date = unified_strdate(
|
||||||
self._html_search_meta('DC.date.created', webpage))
|
self._html_search_meta('DC.date.created', webpage))
|
||||||
thumbnail = self._og_search_thumbnail(webpage)
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
duration = float_or_none(self._search_regex(
|
||||||
|
r'id=["\']duration_in_ms[^+]>(\d+)', webpage, 'duration',
|
||||||
|
default=None), scale=1000)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': object_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'display_id': video_id,
|
'display_id': display_id,
|
||||||
'description': description,
|
'description': description,
|
||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
|
'duration': duration,
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ class DeezerPlaylistIE(InfoExtractor):
|
|||||||
'id': '176747451',
|
'id': '176747451',
|
||||||
'title': 'Best!',
|
'title': 'Best!',
|
||||||
'uploader': 'Anonymous',
|
'uploader': 'Anonymous',
|
||||||
'thumbnail': r're:^https?://cdn-images.deezer.com/images/cover/.*\.jpg$',
|
'thumbnail': r're:^https?://cdn-images\.deezer\.com/images/cover/.*\.jpg$',
|
||||||
},
|
},
|
||||||
'playlist_count': 30,
|
'playlist_count': 30,
|
||||||
'skip': 'Only available in .de',
|
'skip': 'Only available in .de',
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user