mirror of
https://codeberg.org/polarisfm/youtube-dl
synced 2024-11-24 01:14:32 +01:00
b827ee921f
* [scrippsnetworks] Add new extractor(closes #19857)(closes #22981) * [teachable] Improve locked lessons detection (#23528) * [teachable] Fail with error message if no video URL found * [extractors] add missing import for ScrippsNetworksIE * [brightcove] cache brightcove player policy keys * [prosiebensat1] improve geo restriction handling(closes #23571) * [soundcloud] automatically update client id on failing requests * [spankbang] Fix extraction (closes #23307, closes #23423, closes #23444) * [spankbang] Improve removed video detection (#23423) * [brightcove] update policy key on failing requests * [pornhub] Fix extraction and add support for m3u8 formats (closes #22749, closes #23082) * [pornhub] Improve locked videos detection (closes #22449, closes #22780) * [brightcove] invalidate policy key cache on failing requests * [soundcloud] fix client id extraction for non fatal requests * [ChangeLog] Actualize [ci skip] * [devscripts/create-github-release] Switch to using PAT for authentication Basic authentication will be deprecated soon * release 2020.01.01 * [redtube] Detect private videos (#23518) * [vice] improve extraction(closes #23631) * [devscripts/create-github-release] Remove unused import * [wistia] improve format extraction and extract subtitles(closes #22590) * [nrktv:seriebase] Fix extraction (closes #23625) (#23537) * [discovery] fix anonymous token extraction(closes #23650) * [scrippsnetworks] add support for www.discovery.com videos * [scrippsnetworks] correct test case URL * [dctp] fix format extraction(closes #23656) * [pandatv] Remove extractor (#23630) * [naver] improve extraction - improve geo-restriction handling - extract automatic captions - extract uploader metadata - extract VLive HLS formats * [naver] improve metadata extraction * [cloudflarestream] improve extraction - add support for bytehighway.net domain - add support for signed URLs - extract thumbnail * [cloudflarestream] import embed URL extraction * [lego] fix extraction and extract subtitle(closes #23687) * [safari] Fix kaltura session extraction (closes #23679) (#23670) * [orf:fm4] Fix extraction (#23599) * [orf:radio] Clean description and improve extraction * [twitter] add support for promo_video_website cards(closes #23711) * [vodplatform] add support for embed.kwikmotion.com domain * [ndr:base:embed] Improve thumbnails extraction (closes #23731) * [canvas] Add support for new API endpoint and update tests (closes #17680, closes #18629) * [travis] Add flake8 job (#23720) * [yourporn] Fix extraction (closes #21645, closes #22255, closes #23459) * [ChangeLog] Actualize [ci skip] * release 2020.01.15 * [soundcloud] Restore previews extraction (closes #23739) * [orf:tvthek] Improve geo restricted videos detection (closes #23741) * [zype] improve extraction - extract subtitles(closes #21258) - support URLs with alternative keys/tokens(#21258) - extract more metadata * [americastestkitchen] fix extraction * [nbc] add support for nbc multi network URLs(closes #23049) * [ard] improve extraction(closes #23761) - simplify extraction - extract age limit and series - bypass geo-restriction * [ivi:compilation] Fix entries extraction (closes #23770) * [24video] Add support for 24video.vip (closes #23753) * [businessinsider] Fix jwplatform id extraction (closes #22929) (#22954) * [ard] add a missing condition * [azmedien] fix extraction(closes #23783) * [voicerepublic] fix extraction * [stretchinternet] fix extraction(closes #4319) * [youtube] Fix sigfunc name extraction (closes #23819) * [ChangeLog] Actualize [ci skip] * release 2020.01.24 * [soundcloud] imporve private playlist/set tracks extraction https://github.com/ytdl-org/youtube-dl/issues/3707#issuecomment-577873539 * [svt] fix article extraction(closes #22897)(closes #22919) * [svt] fix series extraction(closes #22297) * [viewlift] improve extraction - fix extraction(closes #23851) - add add support for authentication - add support for more domains * [vimeo] fix album extraction(closes #23864) * [tva] Relax _VALID_URL (closes #23903) * [tv5mondeplus] Fix extraction (closes #23907, closes #23911) * [twitch:stream] Lowercase channel id for stream request (closes #23917) * [sportdeutschland] Update to new sportdeutschland API They switched to SSL, but under a different host AND path... Remove the old test cases because these videos have become unavailable. * [popcorntimes] Add extractor (closes #23949) * [thisoldhouse] fix extraction(closes #23951) * [toggle] Add support for mewatch.sg (closes #23895) (#23930) * [compat] Introduce compat_realpath (refs #23991) * [update] Fix updating via symlinks (closes #23991) * [nytimes] improve format sorting(closes #24010) * [abc:iview] Support 720p (#22907) (#22921) * [nova:embed] Fix extraction (closes #23672) * [nova:embed] Improve (closes #23690) * [nova] Improve extraction (refs #23690) * [jpopsuki] Remove extractor (closes #23858) * [YoutubeDL] Fix playlist entry indexing with --playlist-items (closes #10591, closes #10622) * [test_YoutubeDL] Fix get_ids * [test_YoutubeDL] Add tests for #10591 (closes #23873) * [24video] Add support for porn.24video.net (closes #23779, closes #23784) * [npr] Add support for streams (closes #24042) * [ChangeLog] Actualize [ci skip] * release 2020.02.16 * [tv2dk:bornholm:play] Fix extraction (#24076) * [imdb] Fix extraction (closes #23443) * [wistia] Add support for multiple generic embeds (closes #8347, closes #11385) * [teachable] Add support for multiple videos per lecture (closes #24101) * [pornhd] Fix extraction (closes #24128) * [options] Remove duplicate short option -v for --version (#24162) * [extractor/common] Convert ISM manifest to unicode before processing on python 2 (#24152) * [YoutubeDL] Force redirect URL to unicode on python 2 * Remove no longer needed compat_str around geturl * [youjizz] Fix extraction (closes #24181) * [test_subtitles] Remove obsolete test * [zdf:channel] Fix tests * [zapiks] Fix test * [xtube] Fix metadata extraction (closes #21073, closes #22455) * [xtube:user] Fix test * [telecinco] Fix extraction (refs #24195) * [telecinco] Add support for article opening videos * [franceculture] Fix extraction (closes #24204) * [xhamster] Fix extraction (closes #24205) * [ChangeLog] Actualize [ci skip] * release 2020.03.01 * [vimeo] Fix subtitles URLs (#24209) * [servus] Add support for new URL schema (closes #23475, closes #23583, closes #24142) * [youtube:playlist] Fix tests (closes #23872) (#23885) * [peertube] Improve extraction * [peertube] Fix issues and improve extraction (closes #23657) * [pornhub] Improve title extraction (closes #24184) * [vimeo] fix showcase password protected video extraction(closes #24224) * [youtube] Fix age-gated videos support without login (closes #24248) * [youtube] Fix tests * [ChangeLog] Actualize [ci skip] * release 2020.03.06 * [nhk] update API version(closes #24270) * [youtube] Improve extraction in 429 error conditions (closes #24283) * [youtube] Improve age-gated videos extraction in 429 error conditions (refs #24283) * [youtube] Remove outdated code Additional get_video_info requests don't seem to provide any extra itags any longer * [README.md] Clarify 429 error * [pornhub] Add support for pornhubpremium.com (#24288) * [utils] Add support for cookies with spaces used instead of tabs * [ChangeLog] Actualize [ci skip] * release 2020.03.08 * Revert "[utils] Add support for cookies with spaces used instead of tabs" According to [1] TABs must be used as separators between fields. Files produces by some tools with spaces as separators are considered malformed. 1. https://curl.haxx.se/docs/http-cookies.html This reverts commitcff99c91d1
. * [utils] Add reference to cookie file format * Revert "[vimeo] fix showcase password protected video extraction(closes #24224)" This reverts commit12ee431676
. * [nhk] Relax _VALID_URL (#24329) * [nhk] Remove obsolete rtmp formats (closes #24329) * [nhk] Update m3u8 URL and use native hls (#24329) * [ndr] Fix extraction (closes #24326) * [xtube] Fix formats extraction (closes #24348) * [xtube] Fix typo * [hellporno] Fix extraction (closes #24399) * [cbc:watch] Add support for authentication * [cbc:watch] Fix authenticated device token caching (closes #19160) * [soundcloud] fix download url extraction(closes #24394) * [limelight] remove disabled API requests(closes #24255) * [bilibili] Add support for new URL schema with BV ids (closes #24439, closes #24442) * [bilibili] Add support for player.bilibili.com (closes #24402) * [teachable] Extract chapter metadata (closes #24421) * [generic] Look for teachable embeds before wistia * [teachable] Update upskillcourses domain New version does not use teachable platform any longer * [teachable] Update gns3 domain * [teachable] Update test * [ChangeLog] Actualize [ci skip] * [ChangeLog] Actualize [ci skip] * release 2020.03.24 * [spankwire] Fix extraction (closes #18924, closes #20648) * [spankwire] Add support for generic embeds (refs #24633) * [youporn] Add support form generic embeds * [mofosex] Add support for generic embeds (closes #24633) * [tele5] Fix extraction (closes #24553) * [extractor/common] Skip malformed ISM manifest XMLs while extracting ISM formats (#24667) * [tv4] Fix ISM formats extraction (closes #24667) * [twitch:clips] Extend _VALID_URL (closes #24290) (#24642) * [motherless] Fix extraction (closes #24699) * [nova:embed] Fix extraction (closes #24700) * [youtube] Skip broken multifeed videos (closes #24711) * [soundcloud] Extract AAC format * [soundcloud] Improve AAC format extraction (closes #19173, closes #24708) * [thisoldhouse] Fix video id extraction (closes #24548) Added support for: with of without "www." and either ".chorus.build" or ".com" It now validated correctly on older URL's ``` <iframe src="https://thisoldhouse.chorus.build/videos/zype/5e33baec27d2e50001d5f52f ``` and newer ones ``` <iframe src="https://www.thisoldhouse.com/videos/zype/5e2b70e95216cc0001615120 ``` * [thisoldhouse] Improve video id extraction (closes #24549) * [youtube] Fix DRM videos detection (refs #24736) * [options] Clarify doc on --exec command (closes #19087) (#24883) * [prosiebensat1] Improve extraction and remove 7tv.de support (#24948) * [prosiebensat1] Extract series metadata * [tenplay] Relax _VALID_URL (closes #25001) * [tvplay] fix Viafree extraction(closes #15189)(closes #24473)(closes #24789) * [yahoo] fix GYAO Player extraction and relax title URL regex(closes #24178)(closes #24778) * [youtube] Use redirected video id if any (closes #25063) * [youtube] Improve player id extraction and add tests * [extractor/common] Extract multiple JSON-LD entries * [crunchyroll] Fix and improve extraction (closes #25096, closes #25060) * [ChangeLog] Actualize [ci skip] * release 2020.05.03 * [puhutv] Remove no longer available HTTP formats (closes #25124) * [utils] Improve cookie files support + Add support for UTF-8 in cookie files * Skip malformed cookie file entries instead of crashing (invalid entry len, invalid expires at) * [dailymotion] Fix typo * [compat] Introduce compat_cookiejar_Cookie * [extractor/common] Use compat_cookiejar_Cookie for _set_cookie (closes #23256, closes #24776) To always ensure cookie name and value are bytestrings on python 2. * [orf] Add support for more radio stations (closes #24938) (#24968) * [uol] fix extraction(closes #22007) * [downloader/http] Finish downloading once received data length matches expected Always do this if possible, i.e. if Content-Length or expected length is known, not only in test. This will save unnecessary last extra loop trying to read 0 bytes. * [downloader/http] Request last data block of exact remaining size Always request last data block of exact size remaining to download if possible not the current block size. * [iprima] Improve extraction (closes #25138) * [youtube] Improve signature cipher extraction (closes #25188) * [ChangeLog] Actualize [ci skip] * release 2020.05.08 * [spike] fix Bellator mgid extraction(closes #25195) * [bbccouk] PEP8 * [mailru] Fix extraction (closes #24530) (#25239) * [README.md] flake8 HTTPS URL (#25230) * [youtube] Add support for yewtu.be (#25226) * [soundcloud] reduce API playlist page limit(closes #25274) * [vimeo] improve format extraction and sorting(closes #25285) * [redtube] Improve title extraction (#25208) * [indavideo] Switch to HTTPS for API request (#25191) * [utils] Fix file permissions in write_json_file (closes #12471) (#25122) * [redtube] Improve formats extraction and extract m3u8 formats (closes #25311, closes #25321) * [ard] Improve _VALID_URL (closes #25134) (#25198) * [giantbomb] Extend _VALID_URL (#25222) * [postprocessor/ffmpeg] Embed series metadata with --add-metadata * [youtube] Add support for more invidious instances (#25417) * [ard:beta] Extend _VALID_URL (closes #25405) * [ChangeLog] Actualize [ci skip] * release 2020.05.29 * [jwplatform] Improve embeds extraction (closes #25467) * [periscope] Fix untitled broadcasts (#25482) * [twitter:broadcast] Add untitled periscope broadcast test * [malltv] Add support for sk.mall.tv (#25445) * [brightcove] Fix subtitles extraction (closes #25540) * [brightcove] Sort imports * [twitch] Pass v5 accept header and fix thumbnails extraction (closes #25531) * [twitch:stream] Fix extraction (closes #25528) * [twitch:stream] Expect 400 and 410 HTTP errors from API * [tele5] Prefer jwplatform over nexx (closes #25533) * [jwplatform] Add support for bypass geo restriction * [tele5] Bypass geo restriction * [ChangeLog] Actualize [ci skip] * release 2020.06.06 * [kaltura] Add support for multiple embeds on a webpage (closes #25523) * [youtube] Extract chapters from JSON (closes #24819) * [facebook] Support single-video ID links I stumbled upon this at https://www.facebook.com/bwfbadminton/posts/10157127020046316 . No idea how prevalent it is yet. * [youtube] Fix playlist and feed extraction (closes #25675) * [youtube] Fix thumbnails extraction and remove uploader id extraction warning (closes #25676) * [youtube] Fix upload date extraction * [youtube] Improve view count extraction * [youtube] Fix uploader id and uploader URL extraction * [ChangeLog] Actualize [ci skip] * release 2020.06.16 * [youtube] Fix categories and improve tags extraction * [youtube] Force old layout (closes #25682, closes #25683, closes #25680, closes #25686) * [ChangeLog] Actualize [ci skip] * release 2020.06.16.1 * [brightcove] Improve embed detection (closes #25674) * [bellmedia] add support for cp24.com clip URLs(closes #25764) * [youtube:playlists] Extend _VALID_URL (closes #25810) * [youtube] Prevent excess HTTP 301 (#25786) * [wistia] Restrict embed regex (closes #25969) * [youtube] Improve description extraction (closes #25937) (#25980) * [youtube] Fix sigfunc name extraction (closes #26134, closes #26135, closes #26136, closes #26137) * [ChangeLog] Actualize [ci skip] * release 2020.07.28 * [xhamster] Extend _VALID_URL (closes #25789) (#25804) * [xhamster] Fix extraction (closes #26157) (#26254) * [xhamster] Extend _VALID_URL (closes #25927) Co-authored-by: Remita Amine <remitamine@gmail.com> Co-authored-by: Sergey M․ <dstftw@gmail.com> Co-authored-by: nmeum <soeren+github@soeren-tempel.net> Co-authored-by: Roxedus <me@roxedus.dev> Co-authored-by: Singwai Chan <c.singwai@gmail.com> Co-authored-by: cdarlint <cdarlint@users.noreply.github.com> Co-authored-by: Johannes N <31795504+jonolt@users.noreply.github.com> Co-authored-by: jnozsc <jnozsc@gmail.com> Co-authored-by: Moritz Patelscheck <moritz.patelscheck@campus.tu-berlin.de> Co-authored-by: PB <3854688+uno20001@users.noreply.github.com> Co-authored-by: Philipp Hagemeister <phihag@phihag.de> Co-authored-by: Xaver Hellauer <software@hellauer.bayern> Co-authored-by: d2au <d2au.dev@gmail.com> Co-authored-by: Jan 'Yenda' Trmal <jtrmal@gmail.com> Co-authored-by: jxu <7989982+jxu@users.noreply.github.com> Co-authored-by: Martin Ström <name@my-domain.se> Co-authored-by: The Hatsune Daishi <nao20010128@gmail.com> Co-authored-by: tsia <github@tsia.de> Co-authored-by: 3risian <59593325+3risian@users.noreply.github.com> Co-authored-by: Tristan Waddington <tristan.waddington@gmail.com> Co-authored-by: Devon Meunier <devon.meunier@gmail.com> Co-authored-by: Felix Stupp <felix.stupp@outlook.com> Co-authored-by: tom <tomster954@gmail.com> Co-authored-by: AndrewMBL <62922222+AndrewMBL@users.noreply.github.com> Co-authored-by: willbeaufoy <will@willbeaufoy.net> Co-authored-by: Philipp Stehle <anderschwiedu@googlemail.com> Co-authored-by: hh0rva1h <61889859+hh0rva1h@users.noreply.github.com> Co-authored-by: comsomisha <shmelev1996@mail.ru> Co-authored-by: TotalCaesar659 <14265316+TotalCaesar659@users.noreply.github.com> Co-authored-by: Juan Francisco Cantero Hurtado <iam@juanfra.info> Co-authored-by: Dave Loyall <dave@the-good-guys.net> Co-authored-by: tlsssl <63866177+tlsssl@users.noreply.github.com> Co-authored-by: Rob <ankenyr@gmail.com> Co-authored-by: Michael Klein <github@a98shuttle.de> Co-authored-by: JordanWeatherby <47519158+JordanWeatherby@users.noreply.github.com> Co-authored-by: striker.sh <19488257+strikersh@users.noreply.github.com> Co-authored-by: Matej Dujava <mdujava@gmail.com> Co-authored-by: Glenn Slayden <5589855+glenn-slayden@users.noreply.github.com> Co-authored-by: MRWITEK <mrvvitek@gmail.com> Co-authored-by: JChris246 <43832407+JChris246@users.noreply.github.com> Co-authored-by: TheRealDude2 <the.real.dude@gmx.de>
658 lines
25 KiB
Python
658 lines
25 KiB
Python
from __future__ import unicode_literals
|
|
|
|
import io
|
|
import os
|
|
import subprocess
|
|
import time
|
|
import re
|
|
|
|
|
|
from .common import AudioConversionError, PostProcessor
|
|
|
|
from ..utils import (
|
|
encodeArgument,
|
|
encodeFilename,
|
|
get_exe_version,
|
|
is_outdated_version,
|
|
PostProcessingError,
|
|
prepend_extension,
|
|
shell_quote,
|
|
subtitles_filename,
|
|
dfxp2srt,
|
|
ISO639Utils,
|
|
replace_extension,
|
|
)
|
|
|
|
|
|
EXT_TO_OUT_FORMATS = {
|
|
'aac': 'adts',
|
|
'flac': 'flac',
|
|
'm4a': 'ipod',
|
|
'mka': 'matroska',
|
|
'mkv': 'matroska',
|
|
'mpg': 'mpeg',
|
|
'ogv': 'ogg',
|
|
'ts': 'mpegts',
|
|
'wma': 'asf',
|
|
'wmv': 'asf',
|
|
}
|
|
ACODECS = {
|
|
'mp3': 'libmp3lame',
|
|
'aac': 'aac',
|
|
'flac': 'flac',
|
|
'm4a': 'aac',
|
|
'opus': 'libopus',
|
|
'vorbis': 'libvorbis',
|
|
'wav': None,
|
|
}
|
|
|
|
|
|
class FFmpegPostProcessorError(PostProcessingError):
|
|
pass
|
|
|
|
|
|
class FFmpegPostProcessor(PostProcessor):
|
|
def __init__(self, downloader=None):
|
|
PostProcessor.__init__(self, downloader)
|
|
self._determine_executables()
|
|
|
|
def check_version(self):
|
|
if not self.available:
|
|
raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.')
|
|
|
|
required_version = '10-0' if self.basename == 'avconv' else '1.0'
|
|
if is_outdated_version(
|
|
self._versions[self.basename], required_version):
|
|
warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
|
|
self.basename, self.basename, required_version)
|
|
if self._downloader:
|
|
self._downloader.report_warning(warning)
|
|
|
|
@staticmethod
|
|
def get_versions(downloader=None):
|
|
return FFmpegPostProcessor(downloader)._versions
|
|
|
|
def _determine_executables(self):
|
|
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
|
|
prefer_ffmpeg = True
|
|
|
|
def get_ffmpeg_version(path):
|
|
ver = get_exe_version(path, args=['-version'])
|
|
if ver:
|
|
regexs = [
|
|
r'(?:\d+:)?([0-9.]+)-[0-9]+ubuntu[0-9.]+$', # Ubuntu, see [1]
|
|
r'n([0-9.]+)$', # Arch Linux
|
|
# 1. http://www.ducea.com/2006/06/17/ubuntu-package-version-naming-explanation/
|
|
]
|
|
for regex in regexs:
|
|
mobj = re.match(regex, ver)
|
|
if mobj:
|
|
ver = mobj.group(1)
|
|
return ver
|
|
|
|
self.basename = None
|
|
self.probe_basename = None
|
|
|
|
self._paths = None
|
|
self._versions = None
|
|
if self._downloader:
|
|
prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', True)
|
|
location = self._downloader.params.get('ffmpeg_location')
|
|
if location is not None:
|
|
if not os.path.exists(location):
|
|
self._downloader.report_warning(
|
|
'ffmpeg-location %s does not exist! '
|
|
'Continuing without avconv/ffmpeg.' % (location))
|
|
self._versions = {}
|
|
return
|
|
elif not os.path.isdir(location):
|
|
basename = os.path.splitext(os.path.basename(location))[0]
|
|
if basename not in programs:
|
|
self._downloader.report_warning(
|
|
'Cannot identify executable %s, its basename should be one of %s. '
|
|
'Continuing without avconv/ffmpeg.' %
|
|
(location, ', '.join(programs)))
|
|
self._versions = {}
|
|
return None
|
|
location = os.path.dirname(os.path.abspath(location))
|
|
if basename in ('ffmpeg', 'ffprobe'):
|
|
prefer_ffmpeg = True
|
|
|
|
self._paths = dict(
|
|
(p, os.path.join(location, p)) for p in programs)
|
|
self._versions = dict(
|
|
(p, get_ffmpeg_version(self._paths[p])) for p in programs)
|
|
if self._versions is None:
|
|
self._versions = dict(
|
|
(p, get_ffmpeg_version(p)) for p in programs)
|
|
self._paths = dict((p, p) for p in programs)
|
|
|
|
if prefer_ffmpeg is False:
|
|
prefs = ('avconv', 'ffmpeg')
|
|
else:
|
|
prefs = ('ffmpeg', 'avconv')
|
|
for p in prefs:
|
|
if self._versions[p]:
|
|
self.basename = p
|
|
break
|
|
|
|
if prefer_ffmpeg is False:
|
|
prefs = ('avprobe', 'ffprobe')
|
|
else:
|
|
prefs = ('ffprobe', 'avprobe')
|
|
for p in prefs:
|
|
if self._versions[p]:
|
|
self.probe_basename = p
|
|
break
|
|
|
|
@property
|
|
def available(self):
|
|
return self.basename is not None
|
|
|
|
@property
|
|
def executable(self):
|
|
return self._paths[self.basename]
|
|
|
|
@property
|
|
def probe_available(self):
|
|
return self.probe_basename is not None
|
|
|
|
@property
|
|
def probe_executable(self):
|
|
return self._paths[self.probe_basename]
|
|
|
|
def get_audio_codec(self, path):
|
|
if not self.probe_available and not self.available:
|
|
raise PostProcessingError('ffprobe/avprobe and ffmpeg/avconv not found. Please install one.')
|
|
try:
|
|
if self.probe_available:
|
|
cmd = [
|
|
encodeFilename(self.probe_executable, True),
|
|
encodeArgument('-show_streams')]
|
|
else:
|
|
cmd = [
|
|
encodeFilename(self.executable, True),
|
|
encodeArgument('-i')]
|
|
cmd.append(encodeFilename(self._ffmpeg_filename_argument(path), True))
|
|
if self._downloader.params.get('verbose', False):
|
|
self._downloader.to_screen(
|
|
'[debug] %s command line: %s' % (self.basename, shell_quote(cmd)))
|
|
handle = subprocess.Popen(
|
|
cmd, stderr=subprocess.PIPE,
|
|
stdout=subprocess.PIPE, stdin=subprocess.PIPE)
|
|
stdout_data, stderr_data = handle.communicate()
|
|
expected_ret = 0 if self.probe_available else 1
|
|
if handle.wait() != expected_ret:
|
|
return None
|
|
except (IOError, OSError):
|
|
return None
|
|
output = (stdout_data if self.probe_available else stderr_data).decode('ascii', 'ignore')
|
|
if self.probe_available:
|
|
audio_codec = None
|
|
for line in output.split('\n'):
|
|
if line.startswith('codec_name='):
|
|
audio_codec = line.split('=')[1].strip()
|
|
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
|
|
return audio_codec
|
|
else:
|
|
# Stream #FILE_INDEX:STREAM_INDEX[STREAM_ID](LANGUAGE): CODEC_TYPE: CODEC_NAME
|
|
mobj = re.search(
|
|
r'Stream\s*#\d+:\d+(?:\[0x[0-9a-f]+\])?(?:\([a-z]{3}\))?:\s*Audio:\s*([0-9a-z]+)',
|
|
output)
|
|
if mobj:
|
|
return mobj.group(1)
|
|
return None
|
|
|
|
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
|
|
self.check_version()
|
|
|
|
oldest_mtime = min(
|
|
os.stat(encodeFilename(path)).st_mtime for path in input_paths)
|
|
|
|
opts += self._configuration_args()
|
|
|
|
files_cmd = []
|
|
for path in input_paths:
|
|
files_cmd.extend([
|
|
encodeArgument('-i'),
|
|
encodeFilename(self._ffmpeg_filename_argument(path), True)
|
|
])
|
|
cmd = [encodeFilename(self.executable, True), encodeArgument('-y')]
|
|
# avconv does not have repeat option
|
|
if self.basename == 'ffmpeg':
|
|
cmd += [encodeArgument('-loglevel'), encodeArgument('repeat+info')]
|
|
cmd += (files_cmd
|
|
+ [encodeArgument(o) for o in opts]
|
|
+ [encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
|
|
|
|
if self._downloader.params.get('verbose', False):
|
|
self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
|
|
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
|
|
stdout, stderr = p.communicate()
|
|
if p.returncode != 0:
|
|
stderr = stderr.decode('utf-8', 'replace')
|
|
msg = stderr.strip().split('\n')[-1]
|
|
raise FFmpegPostProcessorError(msg)
|
|
self.try_utime(out_path, oldest_mtime, oldest_mtime)
|
|
|
|
def run_ffmpeg(self, path, out_path, opts):
|
|
self.run_ffmpeg_multiple_files([path], out_path, opts)
|
|
|
|
def _ffmpeg_filename_argument(self, fn):
|
|
# Always use 'file:' because the filename may contain ':' (ffmpeg
|
|
# interprets that as a protocol) or can start with '-' (-- is broken in
|
|
# ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details)
|
|
# Also leave '-' intact in order not to break streaming to stdout.
|
|
return 'file:' + fn if fn != '-' else fn
|
|
|
|
|
|
class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
|
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
|
|
FFmpegPostProcessor.__init__(self, downloader)
|
|
if preferredcodec is None:
|
|
preferredcodec = 'best'
|
|
self._preferredcodec = preferredcodec
|
|
self._preferredquality = preferredquality
|
|
self._nopostoverwrites = nopostoverwrites
|
|
|
|
def run_ffmpeg(self, path, out_path, codec, more_opts):
|
|
if codec is None:
|
|
acodec_opts = []
|
|
else:
|
|
acodec_opts = ['-acodec', codec]
|
|
opts = ['-vn'] + acodec_opts + more_opts
|
|
try:
|
|
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
|
|
except FFmpegPostProcessorError as err:
|
|
raise AudioConversionError(err.msg)
|
|
|
|
def run(self, information):
|
|
path = information['filepath']
|
|
|
|
filecodec = self.get_audio_codec(path)
|
|
if filecodec is None:
|
|
raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
|
|
|
|
more_opts = []
|
|
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
|
|
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
|
|
# Lossless, but in another container
|
|
acodec = 'copy'
|
|
extension = 'm4a'
|
|
more_opts = ['-bsf:a', 'aac_adtstoasc']
|
|
elif filecodec in ['aac', 'flac', 'mp3', 'vorbis', 'opus']:
|
|
# Lossless if possible
|
|
acodec = 'copy'
|
|
extension = filecodec
|
|
if filecodec == 'aac':
|
|
more_opts = ['-f', 'adts']
|
|
if filecodec == 'vorbis':
|
|
extension = 'ogg'
|
|
else:
|
|
# MP3 otherwise.
|
|
acodec = 'libmp3lame'
|
|
extension = 'mp3'
|
|
more_opts = []
|
|
if self._preferredquality is not None:
|
|
if int(self._preferredquality) < 10:
|
|
more_opts += ['-q:a', self._preferredquality]
|
|
else:
|
|
more_opts += ['-b:a', self._preferredquality + 'k']
|
|
else:
|
|
# We convert the audio (lossy if codec is lossy)
|
|
acodec = ACODECS[self._preferredcodec]
|
|
extension = self._preferredcodec
|
|
more_opts = []
|
|
if self._preferredquality is not None:
|
|
# The opus codec doesn't support the -aq option
|
|
if int(self._preferredquality) < 10 and extension != 'opus':
|
|
more_opts += ['-q:a', self._preferredquality]
|
|
else:
|
|
more_opts += ['-b:a', self._preferredquality + 'k']
|
|
if self._preferredcodec == 'aac':
|
|
more_opts += ['-f', 'adts']
|
|
if self._preferredcodec == 'm4a':
|
|
more_opts += ['-bsf:a', 'aac_adtstoasc']
|
|
if self._preferredcodec == 'vorbis':
|
|
extension = 'ogg'
|
|
if self._preferredcodec == 'wav':
|
|
extension = 'wav'
|
|
more_opts += ['-f', 'wav']
|
|
|
|
prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups
|
|
new_path = prefix + sep + extension
|
|
|
|
information['filepath'] = new_path
|
|
information['ext'] = extension
|
|
|
|
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
|
|
if (new_path == path
|
|
or (self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))):
|
|
self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path)
|
|
return [], information
|
|
|
|
try:
|
|
self._downloader.to_screen('[ffmpeg] Destination: ' + new_path)
|
|
self.run_ffmpeg(path, new_path, acodec, more_opts)
|
|
except AudioConversionError as e:
|
|
raise PostProcessingError(
|
|
'audio conversion failed: ' + e.msg)
|
|
except Exception:
|
|
raise PostProcessingError('error running ' + self.basename)
|
|
|
|
# Try to update the date time for extracted audio file.
|
|
if information.get('filetime') is not None:
|
|
self.try_utime(
|
|
new_path, time.time(), information['filetime'],
|
|
errnote='Cannot update utime of audio file')
|
|
|
|
return [path], information
|
|
|
|
|
|
class FFmpegVideoConvertorPP(FFmpegPostProcessor):
|
|
def __init__(self, downloader=None, preferedformat=None):
|
|
super(FFmpegVideoConvertorPP, self).__init__(downloader)
|
|
self._preferedformat = preferedformat
|
|
|
|
def run(self, information):
|
|
path = information['filepath']
|
|
if information['ext'] == self._preferedformat:
|
|
self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
|
|
return [], information
|
|
options = []
|
|
if self._preferedformat == 'avi':
|
|
options.extend(['-c:v', 'libxvid', '-vtag', 'XVID'])
|
|
prefix, sep, ext = path.rpartition('.')
|
|
outpath = prefix + sep + self._preferedformat
|
|
self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
|
|
self.run_ffmpeg(path, outpath, options)
|
|
information['filepath'] = outpath
|
|
information['format'] = self._preferedformat
|
|
information['ext'] = self._preferedformat
|
|
return [path], information
|
|
|
|
|
|
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
|
|
def run(self, information):
|
|
if information['ext'] not in ('mp4', 'webm', 'mkv'):
|
|
self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4, webm or mkv files')
|
|
return [], information
|
|
subtitles = information.get('requested_subtitles')
|
|
if not subtitles:
|
|
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed')
|
|
return [], information
|
|
|
|
filename = information['filepath']
|
|
|
|
ext = information['ext']
|
|
sub_langs = []
|
|
sub_filenames = []
|
|
webm_vtt_warn = False
|
|
|
|
for lang, sub_info in subtitles.items():
|
|
sub_ext = sub_info['ext']
|
|
if ext != 'webm' or ext == 'webm' and sub_ext == 'vtt':
|
|
sub_langs.append(lang)
|
|
sub_filenames.append(subtitles_filename(filename, lang, sub_ext, ext))
|
|
else:
|
|
if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt':
|
|
webm_vtt_warn = True
|
|
self._downloader.to_screen('[ffmpeg] Only WebVTT subtitles can be embedded in webm files')
|
|
|
|
if not sub_langs:
|
|
return [], information
|
|
|
|
input_files = [filename] + sub_filenames
|
|
|
|
opts = [
|
|
'-map', '0',
|
|
'-c', 'copy',
|
|
# Don't copy the existing subtitles, we may be running the
|
|
# postprocessor a second time
|
|
'-map', '-0:s',
|
|
# Don't copy Apple TV chapters track, bin_data (see #19042, #19024,
|
|
# https://trac.ffmpeg.org/ticket/6016)
|
|
'-map', '-0:d',
|
|
]
|
|
if information['ext'] == 'mp4':
|
|
opts += ['-c:s', 'mov_text']
|
|
for (i, lang) in enumerate(sub_langs):
|
|
opts.extend(['-map', '%d:0' % (i + 1)])
|
|
lang_code = ISO639Utils.short2long(lang) or lang
|
|
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
|
|
|
|
temp_filename = prepend_extension(filename, 'temp')
|
|
self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename)
|
|
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
|
|
os.remove(encodeFilename(filename))
|
|
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
|
|
|
|
return sub_filenames, information
|
|
|
|
|
|
class FFmpegMetadataPP(FFmpegPostProcessor):
|
|
def run(self, info):
|
|
metadata = {}
|
|
|
|
def add(meta_list, info_list=None):
|
|
if not info_list:
|
|
info_list = meta_list
|
|
if not isinstance(meta_list, (list, tuple)):
|
|
meta_list = (meta_list,)
|
|
if not isinstance(info_list, (list, tuple)):
|
|
info_list = (info_list,)
|
|
for info_f in info_list:
|
|
if info.get(info_f) is not None:
|
|
for meta_f in meta_list:
|
|
metadata[meta_f] = info[info_f]
|
|
break
|
|
|
|
# See [1-4] for some info on media metadata/metadata supported
|
|
# by ffmpeg.
|
|
# 1. https://kdenlive.org/en/project/adding-meta-data-to-mp4-video/
|
|
# 2. https://wiki.multimedia.cx/index.php/FFmpeg_Metadata
|
|
# 3. https://kodi.wiki/view/Video_file_tagging
|
|
# 4. http://atomicparsley.sourceforge.net/mpeg-4files.html
|
|
|
|
add('title', ('track', 'title'))
|
|
add('date', 'upload_date')
|
|
add(('description', 'comment'), 'description')
|
|
add('purl', 'webpage_url')
|
|
add('track', 'track_number')
|
|
add('artist', ('artist', 'creator', 'uploader', 'uploader_id'))
|
|
add('genre')
|
|
add('album')
|
|
add('album_artist')
|
|
add('disc', 'disc_number')
|
|
add('show', 'series')
|
|
add('season_number')
|
|
add('episode_id', ('episode', 'episode_id'))
|
|
add('episode_sort', 'episode_number')
|
|
|
|
if not metadata:
|
|
self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add')
|
|
return [], info
|
|
|
|
filename = info['filepath']
|
|
temp_filename = prepend_extension(filename, 'temp')
|
|
in_filenames = [filename]
|
|
options = []
|
|
|
|
if info['ext'] == 'm4a':
|
|
options.extend(['-vn', '-acodec', 'copy'])
|
|
else:
|
|
options.extend(['-c', 'copy'])
|
|
|
|
for (name, value) in metadata.items():
|
|
options.extend(['-metadata', '%s=%s' % (name, value)])
|
|
|
|
chapters = info.get('chapters', [])
|
|
if chapters:
|
|
metadata_filename = replace_extension(filename, 'meta')
|
|
with io.open(metadata_filename, 'wt', encoding='utf-8') as f:
|
|
def ffmpeg_escape(text):
|
|
return re.sub(r'(=|;|#|\\|\n)', r'\\\1', text)
|
|
|
|
metadata_file_content = ';FFMETADATA1\n'
|
|
for chapter in chapters:
|
|
metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n'
|
|
metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000)
|
|
metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000)
|
|
chapter_title = chapter.get('title')
|
|
if chapter_title:
|
|
metadata_file_content += 'title=%s\n' % ffmpeg_escape(chapter_title)
|
|
f.write(metadata_file_content)
|
|
in_filenames.append(metadata_filename)
|
|
options.extend(['-map_metadata', '1'])
|
|
|
|
self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename)
|
|
self.run_ffmpeg_multiple_files(in_filenames, temp_filename, options)
|
|
if chapters:
|
|
os.remove(metadata_filename)
|
|
os.remove(encodeFilename(filename))
|
|
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
|
|
return [], info
|
|
|
|
|
|
class FFmpegMergerPP(FFmpegPostProcessor):
|
|
def run(self, info):
|
|
filename = info['filepath']
|
|
temp_filename = prepend_extension(filename, 'temp')
|
|
args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0']
|
|
self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename)
|
|
self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args)
|
|
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
|
|
return info['__files_to_merge'], info
|
|
|
|
def can_merge(self):
|
|
# TODO: figure out merge-capable ffmpeg version
|
|
if self.basename != 'avconv':
|
|
return True
|
|
|
|
required_version = '10-0'
|
|
if is_outdated_version(
|
|
self._versions[self.basename], required_version):
|
|
warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, '
|
|
'youtube-dl will download single file media. '
|
|
'Update %s to version %s or newer to fix this.') % (
|
|
self.basename, self.basename, required_version)
|
|
if self._downloader:
|
|
self._downloader.report_warning(warning)
|
|
return False
|
|
return True
|
|
|
|
|
|
class FFmpegFixupStretchedPP(FFmpegPostProcessor):
|
|
def run(self, info):
|
|
stretched_ratio = info.get('stretched_ratio')
|
|
if stretched_ratio is None or stretched_ratio == 1:
|
|
return [], info
|
|
|
|
filename = info['filepath']
|
|
temp_filename = prepend_extension(filename, 'temp')
|
|
|
|
options = ['-c', 'copy', '-aspect', '%f' % stretched_ratio]
|
|
self._downloader.to_screen('[ffmpeg] Fixing aspect ratio in "%s"' % filename)
|
|
self.run_ffmpeg(filename, temp_filename, options)
|
|
|
|
os.remove(encodeFilename(filename))
|
|
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
|
|
|
|
return [], info
|
|
|
|
|
|
class FFmpegFixupM4aPP(FFmpegPostProcessor):
|
|
def run(self, info):
|
|
if info.get('container') != 'm4a_dash':
|
|
return [], info
|
|
|
|
filename = info['filepath']
|
|
temp_filename = prepend_extension(filename, 'temp')
|
|
|
|
options = ['-c', 'copy', '-f', 'mp4']
|
|
self._downloader.to_screen('[ffmpeg] Correcting container in "%s"' % filename)
|
|
self.run_ffmpeg(filename, temp_filename, options)
|
|
|
|
os.remove(encodeFilename(filename))
|
|
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
|
|
|
|
return [], info
|
|
|
|
|
|
class FFmpegFixupM3u8PP(FFmpegPostProcessor):
|
|
def run(self, info):
|
|
filename = info['filepath']
|
|
if self.get_audio_codec(filename) == 'aac':
|
|
temp_filename = prepend_extension(filename, 'temp')
|
|
|
|
options = ['-c', 'copy', '-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
|
|
self._downloader.to_screen('[ffmpeg] Fixing malformed AAC bitstream in "%s"' % filename)
|
|
self.run_ffmpeg(filename, temp_filename, options)
|
|
|
|
os.remove(encodeFilename(filename))
|
|
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
|
|
return [], info
|
|
|
|
|
|
class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
|
|
def __init__(self, downloader=None, format=None):
|
|
super(FFmpegSubtitlesConvertorPP, self).__init__(downloader)
|
|
self.format = format
|
|
|
|
def run(self, info):
|
|
subs = info.get('requested_subtitles')
|
|
filename = info['filepath']
|
|
new_ext = self.format
|
|
new_format = new_ext
|
|
if new_format == 'vtt':
|
|
new_format = 'webvtt'
|
|
if subs is None:
|
|
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to convert')
|
|
return [], info
|
|
self._downloader.to_screen('[ffmpeg] Converting subtitles')
|
|
sub_filenames = []
|
|
for lang, sub in subs.items():
|
|
ext = sub['ext']
|
|
if ext == new_ext:
|
|
self._downloader.to_screen(
|
|
'[ffmpeg] Subtitle file for %s is already in the requested format' % new_ext)
|
|
continue
|
|
old_file = subtitles_filename(filename, lang, ext, info.get('ext'))
|
|
sub_filenames.append(old_file)
|
|
new_file = subtitles_filename(filename, lang, new_ext, info.get('ext'))
|
|
|
|
if ext in ('dfxp', 'ttml', 'tt'):
|
|
self._downloader.report_warning(
|
|
'You have requested to convert dfxp (TTML) subtitles into another format, '
|
|
'which results in style information loss')
|
|
|
|
dfxp_file = old_file
|
|
srt_file = subtitles_filename(filename, lang, 'srt', info.get('ext'))
|
|
|
|
with open(dfxp_file, 'rb') as f:
|
|
srt_data = dfxp2srt(f.read())
|
|
|
|
with io.open(srt_file, 'wt', encoding='utf-8') as f:
|
|
f.write(srt_data)
|
|
old_file = srt_file
|
|
|
|
subs[lang] = {
|
|
'ext': 'srt',
|
|
'data': srt_data
|
|
}
|
|
|
|
if new_ext == 'srt':
|
|
continue
|
|
else:
|
|
sub_filenames.append(srt_file)
|
|
|
|
self.run_ffmpeg(old_file, new_file, ['-f', new_format])
|
|
|
|
with io.open(new_file, 'rt', encoding='utf-8') as f:
|
|
subs[lang] = {
|
|
'ext': new_ext,
|
|
'data': f.read(),
|
|
}
|
|
|
|
return sub_filenames, info
|