2013-06-18 22:14:21 +02:00
#!/usr/bin/env python
2016-10-02 13:39:18 +02:00
# coding: utf-8
2013-06-18 22:14:21 +02:00
2014-01-05 01:52:03 +01:00
from __future__ import absolute_import , unicode_literals
2013-06-18 22:14:21 +02:00
2013-12-09 22:00:42 +01:00
import collections
2015-03-01 11:46:57 +01:00
import contextlib
2016-07-15 19:55:43 +02:00
import copy
2014-03-13 15:30:25 +01:00
import datetime
2013-10-06 04:27:09 +02:00
import errno
2015-03-01 11:46:57 +01:00
import fileinput
2013-06-18 22:14:21 +02:00
import io
2014-12-06 14:02:19 +01:00
import itertools
2013-11-20 06:18:24 +01:00
import json
2014-03-30 06:02:41 +02:00
import locale
2015-01-23 00:04:05 +01:00
import operator
2013-06-18 22:14:21 +02:00
import os
2013-11-22 19:57:52 +01:00
import platform
2013-06-18 22:14:21 +02:00
import re
import shutil
2013-11-22 19:57:52 +01:00
import subprocess
2013-06-18 22:14:21 +02:00
import socket
import sys
import time
2015-06-28 22:08:29 +02:00
import tokenize
2013-06-18 22:14:21 +02:00
import traceback
2017-01-31 10:03:31 +01:00
import random
2013-06-18 22:14:21 +02:00
2017-07-15 02:02:14 +02:00
from string import ascii_letters
2014-11-02 11:23:40 +01:00
from . compat import (
2015-11-19 22:08:34 +01:00
compat_basestring ,
2013-11-22 19:57:52 +01:00
compat_cookiejar ,
2015-02-28 21:42:16 +01:00
compat_get_terminal_size ,
2013-11-17 16:47:52 +01:00
compat_http_client ,
2014-12-15 01:06:25 +01:00
compat_kwargs ,
2016-03-05 22:52:42 +01:00
compat_numeric_types ,
2016-03-03 12:24:24 +01:00
compat_os_name ,
2013-11-17 16:47:52 +01:00
compat_str ,
2015-06-28 22:08:29 +02:00
compat_tokenize_tokenize ,
2013-11-17 16:47:52 +01:00
compat_urllib_error ,
compat_urllib_request ,
2015-10-17 17:16:40 +02:00
compat_urllib_request_DataHandler ,
2014-11-02 11:23:40 +01:00
)
from . utils import (
2016-03-26 14:40:33 +01:00
age_restricted ,
args_to_str ,
2013-11-17 16:47:52 +01:00
ContentTooShortError ,
date_from_str ,
DateRange ,
2014-04-30 10:02:03 +02:00
DEFAULT_OUTTMPL ,
2013-11-17 16:47:52 +01:00
determine_ext ,
2016-01-16 05:10:28 +01:00
determine_protocol ,
2013-11-17 16:47:52 +01:00
DownloadError ,
2015-12-20 01:29:36 +01:00
encode_compat_str ,
2013-11-17 16:47:52 +01:00
encodeFilename ,
2015-12-20 02:00:39 +01:00
error_to_compat_str ,
2017-03-25 20:31:16 +01:00
expand_path ,
2013-11-17 16:47:52 +01:00
ExtractorError ,
2013-11-25 03:12:26 +01:00
format_bytes ,
2013-12-16 04:15:10 +01:00
formatSeconds ,
2017-02-04 12:49:58 +01:00
GeoRestrictedError ,
2017-06-08 17:53:14 +02:00
int_or_none ,
2017-02-04 12:49:58 +01:00
ISO3166Utils ,
2013-11-17 16:47:52 +01:00
locked_file ,
2013-11-22 19:57:52 +01:00
make_HTTPS_handler ,
2013-11-17 16:47:52 +01:00
MaxDownloadsReached ,
[YoutubeDL] Ignore duplicates in --playlist-items
E.g. '--playlist-items 2-4,3-4,3' should result in '[2,3,4]', not '[2,3,4,3,4,3]'
2017-10-06 18:46:57 +02:00
orderedSet ,
2014-01-20 11:36:47 +01:00
PagedList ,
2015-01-23 00:04:05 +01:00
parse_filesize ,
2015-03-03 00:03:06 +01:00
PerRequestProxyHandler ,
2013-11-22 19:57:52 +01:00
platform_name ,
2016-03-26 14:40:33 +01:00
PostProcessingError ,
2013-11-17 16:47:52 +01:00
preferredencoding ,
2016-03-26 14:40:33 +01:00
prepend_extension ,
2016-05-03 09:15:32 +02:00
register_socks_protocols ,
2015-01-25 02:38:47 +01:00
render_table ,
2016-03-26 14:40:33 +01:00
replace_extension ,
2013-11-17 16:47:52 +01:00
SameFileError ,
sanitize_filename ,
2015-03-08 15:57:30 +01:00
sanitize_path ,
2016-03-26 14:37:41 +01:00
sanitize_url ,
2015-11-20 15:33:49 +01:00
sanitized_Request ,
2015-01-24 18:52:26 +01:00
std_headers ,
2019-02-07 19:08:48 +01:00
str_or_none ,
2013-11-17 16:47:52 +01:00
subtitles_filename ,
UnavailableVideoError ,
2013-12-17 04:13:36 +01:00
url_basename ,
2015-01-10 21:02:27 +01:00
version_tuple ,
2013-11-17 16:47:52 +01:00
write_json_file ,
write_string ,
2018-12-09 00:00:32 +01:00
YoutubeDLCookieJar ,
2015-09-06 02:21:33 +02:00
YoutubeDLCookieProcessor ,
2013-11-22 19:57:52 +01:00
YoutubeDLHandler ,
2020-02-29 13:08:44 +01:00
YoutubeDLRedirectHandler ,
2013-11-17 16:47:52 +01:00
)
2014-09-03 12:41:05 +02:00
from . cache import Cache
2016-02-21 12:28:58 +01:00
from . extractor import get_info_extractor , gen_extractor_classes , _LAZY_LOADER
2017-09-23 19:08:27 +02:00
from . extractor . openload import PhantomJSwrapper
2013-09-23 17:59:27 +02:00
from . downloader import get_suitable_downloader
2014-11-02 10:55:36 +01:00
from . downloader . rtmp import rtmpdump_version
2014-12-15 01:06:25 +01:00
from . postprocessor import (
2016-03-01 21:08:50 +01:00
FFmpegFixupM3u8PP ,
2015-01-23 18:39:12 +01:00
FFmpegFixupM4aPP ,
2015-01-10 05:45:51 +01:00
FFmpegFixupStretchedPP ,
2014-12-15 01:06:25 +01:00
FFmpegMergerPP ,
FFmpegPostProcessor ,
get_postprocessor ,
)
2013-11-22 19:57:52 +01:00
from . version import __version__
2013-06-18 22:14:21 +02:00
2016-03-03 12:24:24 +01:00
if compat_os_name == ' nt ' :
import ctypes
2013-06-18 22:14:21 +02:00
class YoutubeDL ( object ) :
""" YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it , among some other tasks . In most cases there should be one per
program . As , given a video URL , the downloader doesn ' t know how to
extract all the needed information , task that InfoExtractors do , it
has to pass the URL to one of them .
For this , YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order . When it is passed
a URL , the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it . The InfoExtractor extracts
all the information about the video or videos the URL refers to , and
YoutubeDL process the extracted information , possibly using a File
Downloader to download the video .
YoutubeDL objects accept a lot of parameters . In order not to saturate
the object constructor with arguments , it receives a dictionary of
options instead . These options are available through the params
attribute for the InfoExtractors to use . The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it , so this is a " mutual registration " .
Available options :
username : Username for authentication purposes .
password : Password for authentication purposes .
2015-06-14 07:49:42 +02:00
videopassword : Password for accessing a video .
2016-09-15 17:24:55 +02:00
ap_mso : Adobe Pass multiple - system operator identifier .
ap_username : Multiple - system operator account username .
ap_password : Multiple - system operator account password .
2013-06-18 22:14:21 +02:00
usenetrc : Use netrc for authentication instead .
verbose : Print additional info to stdout .
quiet : Do not print messages to stdout .
2014-03-26 00:43:46 +01:00
no_warnings : Do not print out anything for warnings .
2013-06-18 22:14:21 +02:00
forceurl : Force printing final URL .
forcetitle : Force printing title .
forceid : Force printing ID .
forcethumbnail : Force printing thumbnail URL .
forcedescription : Force printing description .
forcefilename : Force printing final filename .
2013-12-16 04:15:10 +01:00
forceduration : Force printing duration .
2013-11-20 06:18:24 +01:00
forcejson : Force printing info_dict as JSON .
2014-10-25 00:30:57 +02:00
dump_single_json : Force printing the info_dict of the whole playlist
( or video ) as a single JSON line .
2013-06-18 22:14:21 +02:00
simulate : Do not download the video files .
2014-12-16 00:22:23 +01:00
format : Video format code . See options . py for more information .
2013-06-18 22:14:21 +02:00
outtmpl : Template for output names .
restrictfilenames : Do not allow " & " and spaces in file names
ignoreerrors : Do not stop on download errors .
2015-06-12 15:20:12 +02:00
force_generic_extractor : Force downloader to use the generic extractor
2019-10-13 18:00:48 +02:00
overwrites : Overwrite all video and metadata files .
2013-06-18 22:14:21 +02:00
nooverwrites : Prevent overwriting files .
playliststart : Playlist item to start at .
playlistend : Playlist item to end at .
2015-01-25 04:24:55 +01:00
playlist_items : Specific indices of playlist to download .
2014-07-11 05:11:11 +02:00
playlistreverse : Download playlist items in reverse order .
2017-01-31 10:03:31 +01:00
playlistrandom : Download playlist items in random order .
2013-06-18 22:14:21 +02:00
matchtitle : Download only matching titles .
rejecttitle : Reject downloads for matching titles .
2013-11-24 06:08:11 +01:00
logger : Log messages to a logging . Logger instance .
2013-06-18 22:14:21 +02:00
logtostderr : Log messages to stderr instead of stdout .
writedescription : Write the video description to a . description file
writeinfojson : Write the video description to a . info . json file
2013-10-14 07:18:58 +02:00
writeannotations : Write the video annotations to a . annotations . xml file
2013-06-18 22:14:21 +02:00
writethumbnail : Write the thumbnail image to a file
2015-01-25 03:11:12 +01:00
write_all_thumbnails : Write all thumbnail formats to files
2013-06-18 22:14:21 +02:00
writesubtitles : Write the video subtitles to a file
2015-11-16 15:15:25 +01:00
writeautomaticsub : Write the automatically generated subtitles to a file
2013-06-18 22:14:21 +02:00
allsubtitles : Downloads all the subtitles of the video
2013-09-14 11:14:40 +02:00
( requires writesubtitles or writeautomaticsub )
2013-06-18 22:14:21 +02:00
listsubtitles : Lists all available subtitles for the video
2015-02-15 18:03:41 +01:00
subtitlesformat : The format code for subtitles
2013-08-23 18:34:57 +02:00
subtitleslangs : List of languages of the subtitles to download
2013-06-18 22:14:21 +02:00
keepvideo : Keep the video file after post - processing
daterange : A DateRange object , download only if the upload_date is in the range .
skip_download : Skip the actual download of the video file
2013-09-22 11:09:25 +02:00
cachedir : Location of the cache files in the filesystem .
2014-09-03 12:41:05 +02:00
False to disable filesystem cache .
2013-09-30 22:26:25 +02:00
noplaylist : Download single video instead of a playlist if in doubt .
2013-10-06 06:06:30 +02:00
age_limit : An integer representing the user ' s age in years.
Unsuitable videos for the given age are skipped .
2013-12-16 03:09:49 +01:00
min_views : An integer representing the minimum view count the video
must have in order to not be skipped .
Videos without view count information are always
downloaded . None for no limit .
max_views : An integer representing the maximum view count .
Videos that are more popular than that are not
downloaded .
Videos without view count information are always
downloaded . None for no limit .
download_archive : File name of a file where all downloads are recorded .
2013-10-06 04:27:09 +02:00
Videos already present in the file are not downloaded
again .
2013-11-22 19:57:52 +01:00
cookiefile : File name where cookies should be read from and dumped to .
2013-11-24 15:03:25 +01:00
nocheckcertificate : Do not verify SSL certificates
2014-03-21 00:33:53 +01:00
prefer_insecure : Use HTTP instead of HTTPS to retrieve information .
At the moment , this is only supported by YouTube .
2013-11-24 15:03:25 +01:00
proxy : URL of the proxy server to use
2016-07-03 17:23:48 +02:00
geo_verification_proxy : URL of the proxy to use for IP address verification
2018-05-19 18:53:24 +02:00
on geo - restricted sites .
2013-12-01 11:42:02 +01:00
socket_timeout : Time to wait for unresponsive hosts , in seconds
2013-12-09 04:08:51 +01:00
bidi_workaround : Work around buggy terminals without bidirectional text
support , using fridibi
2013-12-29 15:28:32 +01:00
debug_printtraffic : Print out sent and received HTTP traffic
2014-01-21 02:09:49 +01:00
include_ads : Download ads as well
2014-01-22 14:16:43 +01:00
default_search : Prepend this string if an input url is not valid .
' auto ' for elaborate guessing
2014-03-30 06:02:41 +02:00
encoding : Use this encoding instead of the system - specified .
2014-08-21 11:52:07 +02:00
extract_flat : Do not resolve URLs , return the immediate result .
2014-10-24 14:48:12 +02:00
Pass in ' in_playlist ' to only show this behavior for
playlist items .
2014-12-15 01:06:25 +01:00
postprocessors : A list of dictionaries , each with an entry
2014-12-15 01:26:18 +01:00
* key : The name of the postprocessor . See
youtube_dl / postprocessor / __init__ . py for a list .
2014-12-15 01:06:25 +01:00
as well as any further keyword arguments for the
postprocessor .
2014-12-15 01:26:18 +01:00
progress_hooks : A list of functions that get called on download
progress , with a dictionary with the entries
2015-02-17 21:37:48 +01:00
* status : One of " downloading " , " error " , or " finished " .
2015-01-25 06:15:51 +01:00
Check this first and ignore unknown values .
2014-12-15 01:26:18 +01:00
2015-02-17 21:37:48 +01:00
If status is one of " downloading " , or " finished " , the
2015-01-25 06:15:51 +01:00
following properties may also be present :
* filename : The final filename ( always present )
2015-02-17 21:37:48 +01:00
* tmpfilename : The filename we ' re currently writing to
2014-12-15 01:26:18 +01:00
* downloaded_bytes : Bytes on disk
* total_bytes : Size of the whole file , None if unknown
2015-02-17 21:37:48 +01:00
* total_bytes_estimate : Guess of the eventual file size ,
None if unavailable .
* elapsed : The number of seconds since download started .
2014-12-15 01:26:18 +01:00
* eta : The estimated time in seconds , None if unknown
* speed : The download speed in bytes / second , None if
unknown
2015-02-17 21:37:48 +01:00
* fragment_index : The counter of the currently
downloaded video fragment .
* fragment_count : The number of fragments ( = individual
files that will be merged )
2014-12-15 01:26:18 +01:00
Progress hooks are guaranteed to be called at least once
( with status " finished " ) if the download is successful .
2015-01-10 01:59:14 +01:00
merge_output_format : Extension to use when merging formats .
2015-01-10 05:45:51 +01:00
fixup : Automatically correct known faults of the file .
One of :
- " never " : do nothing
- " warn " : only emit a warning
- " detect_or_warn " : check whether we can do anything
2015-01-23 18:39:12 +01:00
about it , warn otherwise ( default )
2018-05-19 18:53:24 +02:00
source_address : Client - side IP address to bind to .
2016-01-10 19:27:22 +01:00
call_home : Boolean , true iff we are allowed to contact the
2015-01-10 21:09:15 +01:00
youtube - dl servers for debugging .
2016-08-08 22:46:52 +02:00
sleep_interval : Number of seconds to sleep before each download when
used alone or a lower bound of a range for randomized
sleep before each download ( minimum possible number
of seconds to sleep ) when used along with
max_sleep_interval .
max_sleep_interval : Upper bound of a range for randomized sleep before each
download ( maximum possible number of seconds to sleep ) .
Must only be used along with sleep_interval .
Actual sleep time will be a random float from range
[ sleep_interval ; max_sleep_interval ] .
2015-01-25 02:38:47 +01:00
listformats : Print an overview of available video formats and exit .
list_thumbnails : Print a table of all thumbnails and exit .
2015-02-10 03:32:21 +01:00
match_filter : A function that gets called with the info_dict of
every video .
If it returns a message , the video is ignored .
If it returns None , the video is downloaded .
match_filter_func in utils . py is one example for this .
2015-02-10 04:22:10 +01:00
no_color : Do not emit color codes in output .
2017-02-18 19:53:41 +01:00
geo_bypass : Bypass geographic restriction via faking X - Forwarded - For
2018-05-19 18:53:24 +02:00
HTTP header
2017-02-18 19:53:41 +01:00
geo_bypass_country :
2017-02-04 12:49:58 +01:00
Two - letter ISO 3166 - 2 country code that will be used for
explicit geographic restriction bypassing via faking
2018-05-19 18:53:24 +02:00
X - Forwarded - For HTTP header
2018-05-02 02:18:01 +02:00
geo_bypass_ip_block :
IP range in CIDR notation that will be used similarly to
2018-05-19 18:53:24 +02:00
geo_bypass_country
2014-12-15 01:26:18 +01:00
2015-02-17 12:09:12 +01:00
The following options determine which downloader is picked :
external_downloader : Executable of the external downloader to call .
None or unset for standard ( built - in ) downloader .
2016-04-21 19:02:17 +02:00
hls_prefer_native : Use the native HLS downloader instead of ffmpeg / avconv
if True , otherwise use ffmpeg / avconv if False , otherwise
use downloader suggested by extractor if None .
2013-10-22 14:49:34 +02:00
2013-06-18 22:14:21 +02:00
The following parameters are not used by YoutubeDL itself , they are used by
2015-03-02 15:06:09 +01:00
the downloader ( see youtube_dl / downloader / common . py ) :
2013-06-18 22:14:21 +02:00
nopart , updatetime , buffersize , ratelimit , min_filesize , max_filesize , test ,
2015-01-25 04:49:44 +01:00
noresizebuffer , retries , continuedl , noprogress , consoletitle ,
2018-02-03 20:53:50 +01:00
xattr_set_filesize , external_downloader_args , hls_use_mpegts ,
http_chunk_size .
2014-01-08 17:53:34 +01:00
The following options are used by the post processors :
2018-06-28 20:09:14 +02:00
prefer_ffmpeg : If False , use avconv instead of ffmpeg if both are available ,
otherwise prefer ffmpeg .
2019-04-01 20:29:44 +02:00
ffmpeg_location : Location of the ffmpeg / avconv binary ; either the path
to the binary or its containing directory .
2015-07-11 18:15:16 +02:00
postprocessor_args : A list of additional command - line arguments for the
postprocessor .
2017-09-30 17:56:40 +02:00
2017-09-27 19:46:48 +02:00
The following options are used by the Youtube extractor :
youtube_include_dash_manifest : If True ( default ) , DASH manifests and related
data will be downloaded and processed by extractor .
You can reduce network I / O by disabling it if you don ' t
care about DASH .
2013-06-18 22:14:21 +02:00
"""
2017-06-08 17:53:14 +02:00
_NUMERIC_FIELDS = set ( (
' width ' , ' height ' , ' tbr ' , ' abr ' , ' asr ' , ' vbr ' , ' fps ' , ' filesize ' , ' filesize_approx ' ,
' timestamp ' , ' upload_year ' , ' upload_month ' , ' upload_day ' ,
' duration ' , ' view_count ' , ' like_count ' , ' dislike_count ' , ' repost_count ' ,
' average_rating ' , ' comment_count ' , ' age_limit ' ,
' start_time ' , ' end_time ' ,
' chapter_number ' , ' season_number ' , ' episode_number ' ,
' track_number ' , ' disc_number ' , ' release_year ' ,
' playlist_index ' ,
) )
2013-06-18 22:14:21 +02:00
params = None
_ies = [ ]
_pps = [ ]
_download_retcode = None
_num_downloads = None
_screen_file = None
2014-10-28 12:54:29 +01:00
def __init__ ( self , params = None , auto_init = True ) :
2013-06-18 22:14:21 +02:00
""" Create a FileDownloader object with the given options. """
2013-12-31 13:34:52 +01:00
if params is None :
params = { }
2013-06-18 22:14:21 +02:00
self . _ies = [ ]
2013-07-08 15:14:27 +02:00
self . _ies_instances = { }
2013-06-18 22:14:21 +02:00
self . _pps = [ ]
2013-12-23 10:37:27 +01:00
self . _progress_hooks = [ ]
2013-06-18 22:14:21 +02:00
self . _download_retcode = 0
self . _num_downloads = 0
self . _screen_file = [ sys . stdout , sys . stderr ] [ params . get ( ' logtostderr ' , False ) ]
2013-12-09 04:08:51 +01:00
self . _err_file = sys . stderr
2015-09-05 15:17:30 +02:00
self . params = {
# Default parameters
' nocheckcertificate ' : False ,
}
self . params . update ( params )
2014-09-03 12:41:05 +02:00
self . cache = Cache ( self )
2013-09-21 11:48:07 +02:00
2017-02-24 00:04:27 +01:00
def check_deprecated ( param , option , suggestion ) :
if self . params . get ( param ) is not None :
self . report_warning (
' %s is deprecated. Use %s instead. ' % ( option , suggestion ) )
return True
return False
if check_deprecated ( ' cn_verification_proxy ' , ' --cn-verification-proxy ' , ' --geo-verification-proxy ' ) :
2016-07-03 17:23:48 +02:00
if self . params . get ( ' geo_verification_proxy ' ) is None :
self . params [ ' geo_verification_proxy ' ] = self . params [ ' cn_verification_proxy ' ]
2017-02-24 00:04:27 +01:00
check_deprecated ( ' autonumber_size ' , ' --autonumber-size ' , ' output template with % (autonumber)0Nd, where N in the number of digits ' )
check_deprecated ( ' autonumber ' , ' --auto-number ' , ' -o " %(autonumber)s - %(title)s . %(ext)s " ' )
check_deprecated ( ' usetitle ' , ' --title ' , ' -o " %(title)s - %(id)s . %(ext)s " ' )
2013-12-09 04:08:51 +01:00
if params . get ( ' bidi_workaround ' , False ) :
2013-12-09 18:29:07 +01:00
try :
import pty
master , slave = pty . openpty ( )
2015-02-28 21:42:16 +01:00
width = compat_get_terminal_size ( ) . columns
2013-12-09 18:29:07 +01:00
if width is None :
width_args = [ ]
else :
width_args = [ ' -w ' , str ( width ) ]
2013-12-23 04:19:20 +01:00
sp_kwargs = dict (
2013-12-09 18:29:07 +01:00
stdin = subprocess . PIPE ,
stdout = slave ,
stderr = self . _err_file )
2013-12-23 04:19:20 +01:00
try :
self . _output_process = subprocess . Popen (
[ ' bidiv ' ] + width_args , * * sp_kwargs
)
except OSError :
self . _output_process = subprocess . Popen (
[ ' fribidi ' , ' -c ' , ' UTF-8 ' ] + width_args , * * sp_kwargs )
self . _output_channel = os . fdopen ( master , ' rb ' )
2013-12-09 18:29:07 +01:00
except OSError as ose :
2016-05-14 13:41:41 +02:00
if ose . errno == errno . ENOENT :
2014-01-05 01:52:03 +01:00
self . report_warning ( ' Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH. ' )
2013-12-09 18:29:07 +01:00
else :
raise
2013-12-09 04:08:51 +01:00
2019-05-10 22:56:22 +02:00
if ( sys . platform != ' win32 '
and sys . getfilesystemencoding ( ) in [ ' ascii ' , ' ANSI_X3.4-1968 ' ]
and not params . get ( ' restrictfilenames ' , False ) ) :
2017-05-08 20:14:02 +02:00
# Unicode filesystem API will throw errors (#1474, #13027)
2013-09-21 11:48:07 +02:00
self . report_warning (
2014-01-05 01:52:03 +01:00
' Assuming --restrict-filenames since file system encoding '
2014-10-09 17:00:24 +02:00
' cannot encode all characters. '
2014-01-05 01:52:03 +01:00
' Set the LC_ALL environment variable to fix this. ' )
2013-11-26 18:53:36 +01:00
self . params [ ' restrictfilenames ' ] = True
2013-09-21 11:48:07 +02:00
2015-03-13 08:40:20 +01:00
if isinstance ( params . get ( ' outtmpl ' ) , bytes ) :
self . report_warning (
' Parameter outtmpl is bytes, but should be a unicode string. '
' Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x. ' )
2013-11-22 19:57:52 +01:00
self . _setup_opener ( )
2014-10-28 12:54:29 +01:00
if auto_init :
self . print_debug_header ( )
self . add_default_info_extractors ( )
2014-12-15 01:06:25 +01:00
for pp_def_raw in self . params . get ( ' postprocessors ' , [ ] ) :
pp_class = get_postprocessor ( pp_def_raw [ ' key ' ] )
pp_def = dict ( pp_def_raw )
del pp_def [ ' key ' ]
pp = pp_class ( self , * * compat_kwargs ( pp_def ) )
self . add_post_processor ( pp )
2014-12-15 01:26:18 +01:00
for ph in self . params . get ( ' progress_hooks ' , [ ] ) :
self . add_progress_hook ( ph )
2016-05-03 09:15:32 +02:00
register_socks_protocols ( )
2014-11-23 10:49:19 +01:00
def warn_if_short_id ( self , argv ) :
# short YouTube ID starting with dash?
idxs = [
i for i , a in enumerate ( argv )
if re . match ( r ' ^-[0-9A-Za-z_-] {10} $ ' , a ) ]
if idxs :
correct_argv = (
2019-05-10 22:56:22 +02:00
[ ' youtube-dl ' ]
+ [ a for i , a in enumerate ( argv ) if i not in idxs ]
+ [ ' -- ' ] + [ argv [ i ] for i in idxs ]
2014-11-23 10:49:19 +01:00
)
self . report_warning (
' Long argument string detected. '
' Use -- to separate parameters and URLs, like this: \n %s \n ' %
args_to_str ( correct_argv ) )
2013-06-18 22:14:21 +02:00
def add_info_extractor ( self , ie ) :
""" Add an InfoExtractor object to the end of the list. """
self . _ies . append ( ie )
2016-02-10 13:16:18 +01:00
if not isinstance ( ie , type ) :
self . _ies_instances [ ie . ie_key ( ) ] = ie
ie . set_downloader ( self )
2013-06-18 22:14:21 +02:00
2013-07-08 15:14:27 +02:00
def get_info_extractor ( self , ie_key ) :
"""
Get an instance of an IE with name ie_key , it will try to get one from
the _ies list , if there ' s no instance it will create a new one and add
it to the extractor list .
"""
ie = self . _ies_instances . get ( ie_key )
if ie is None :
ie = get_info_extractor ( ie_key ) ( )
self . add_info_extractor ( ie )
return ie
2013-06-27 23:51:06 +02:00
def add_default_info_extractors ( self ) :
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
2016-02-10 13:16:18 +01:00
for ie in gen_extractor_classes ( ) :
2013-06-27 23:51:06 +02:00
self . add_info_extractor ( ie )
2013-06-18 22:14:21 +02:00
def add_post_processor ( self , pp ) :
""" Add a PostProcessor object to the end of the chain. """
self . _pps . append ( pp )
pp . set_downloader ( self )
2013-12-23 10:37:27 +01:00
def add_progress_hook ( self , ph ) :
""" Add the progress hook (currently only for the file downloader) """
self . _progress_hooks . append ( ph )
2013-09-23 18:09:28 +02:00
2013-12-09 18:29:07 +01:00
def _bidi_workaround ( self , message ) :
2013-12-23 04:19:20 +01:00
if not hasattr ( self , ' _output_channel ' ) :
2013-12-09 18:29:07 +01:00
return message
2013-12-23 04:19:20 +01:00
assert hasattr ( self , ' _output_process ' )
2014-07-25 23:37:32 +02:00
assert isinstance ( message , compat_str )
2014-01-05 01:52:03 +01:00
line_count = message . count ( ' \n ' ) + 1
self . _output_process . stdin . write ( ( message + ' \n ' ) . encode ( ' utf-8 ' ) )
2013-12-23 04:19:20 +01:00
self . _output_process . stdin . flush ( )
2014-01-05 01:52:03 +01:00
res = ' ' . join ( self . _output_channel . readline ( ) . decode ( ' utf-8 ' )
2014-11-23 21:39:15 +01:00
for _ in range ( line_count ) )
2014-01-05 01:52:03 +01:00
return res [ : - len ( ' \n ' ) ]
2013-12-09 18:29:07 +01:00
2013-06-18 22:14:21 +02:00
def to_screen ( self , message , skip_eol = False ) :
2013-12-09 04:08:51 +01:00
""" Print message to stdout if not in quiet mode. """
return self . to_stdout ( message , skip_eol , check_quiet = True )
2014-04-07 19:57:42 +02:00
def _write_string ( self , s , out = None ) :
2014-04-07 22:48:13 +02:00
write_string ( s , out = out , encoding = self . params . get ( ' encoding ' ) )
2014-04-07 19:57:42 +02:00
2013-12-09 04:08:51 +01:00
def to_stdout ( self , message , skip_eol = False , check_quiet = False ) :
2013-06-18 22:14:21 +02:00
""" Print message to stdout if not in quiet mode. """
2013-11-24 06:08:11 +01:00
if self . params . get ( ' logger ' ) :
2013-11-23 09:22:18 +01:00
self . params [ ' logger ' ] . debug ( message )
2013-12-09 04:08:51 +01:00
elif not check_quiet or not self . params . get ( ' quiet ' , False ) :
2013-12-09 18:29:07 +01:00
message = self . _bidi_workaround ( message )
2014-01-05 01:52:03 +01:00
terminator = [ ' \n ' , ' ' ] [ skip_eol ]
2013-06-18 22:14:21 +02:00
output = message + terminator
2013-12-09 18:29:07 +01:00
2014-04-07 19:57:42 +02:00
self . _write_string ( output , self . _screen_file )
2013-06-18 22:14:21 +02:00
def to_stderr ( self , message ) :
""" Print message to stderr. """
2014-07-25 23:37:32 +02:00
assert isinstance ( message , compat_str )
2013-11-24 06:08:11 +01:00
if self . params . get ( ' logger ' ) :
2013-11-23 09:22:18 +01:00
self . params [ ' logger ' ] . error ( message )
else :
2013-12-09 18:29:07 +01:00
message = self . _bidi_workaround ( message )
2014-01-05 01:52:03 +01:00
output = message + ' \n '
2014-04-07 19:57:42 +02:00
self . _write_string ( output , self . _err_file )
2013-06-18 22:14:21 +02:00
2013-11-17 11:39:52 +01:00
def to_console_title ( self , message ) :
if not self . params . get ( ' consoletitle ' , False ) :
return
2017-06-03 14:14:23 +02:00
if compat_os_name == ' nt ' :
if ctypes . windll . kernel32 . GetConsoleWindow ( ) :
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes . windll . kernel32 . SetConsoleTitleW ( ctypes . c_wchar_p ( message ) )
2013-11-17 11:39:52 +01:00
elif ' TERM ' in os . environ :
2014-04-07 19:57:42 +02:00
self . _write_string ( ' \033 ]0; %s \007 ' % message , self . _screen_file )
2013-11-17 11:39:52 +01:00
2013-11-17 21:05:14 +01:00
def save_console_title ( self ) :
if not self . params . get ( ' consoletitle ' , False ) :
return
2018-04-08 20:03:55 +02:00
if self . params . get ( ' simulate ' , False ) :
return
2017-06-03 14:14:23 +02:00
if compat_os_name != ' nt ' and ' TERM ' in os . environ :
2013-11-18 16:35:41 +01:00
# Save the title on stack
2014-04-07 19:57:42 +02:00
self . _write_string ( ' \033 [22;0t ' , self . _screen_file )
2013-11-17 21:05:14 +01:00
def restore_console_title ( self ) :
if not self . params . get ( ' consoletitle ' , False ) :
return
2018-04-08 20:03:55 +02:00
if self . params . get ( ' simulate ' , False ) :
return
2017-06-03 14:14:23 +02:00
if compat_os_name != ' nt ' and ' TERM ' in os . environ :
2013-11-18 16:35:41 +01:00
# Restore the title from stack
2014-04-07 19:57:42 +02:00
self . _write_string ( ' \033 [23;0t ' , self . _screen_file )
2013-11-17 21:05:14 +01:00
def __enter__ ( self ) :
self . save_console_title ( )
return self
def __exit__ ( self , * args ) :
self . restore_console_title ( )
2014-01-25 12:02:43 +01:00
2013-11-22 19:57:52 +01:00
if self . params . get ( ' cookiefile ' ) is not None :
2018-12-09 00:00:32 +01:00
self . cookiejar . save ( ignore_discard = True , ignore_expires = True )
2013-11-17 21:05:14 +01:00
2013-06-18 22:14:21 +02:00
def trouble ( self , message = None , tb = None ) :
""" Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not , this method may throw an exception or
not when errors are found , after printing the message .
tb , if given , is additional traceback information .
"""
if message is not None :
self . to_stderr ( message )
if self . params . get ( ' verbose ' ) :
if tb is None :
if sys . exc_info ( ) [ 0 ] : # if .trouble has been called from an except block
2014-01-05 01:52:03 +01:00
tb = ' '
2013-06-18 22:14:21 +02:00
if hasattr ( sys . exc_info ( ) [ 1 ] , ' exc_info ' ) and sys . exc_info ( ) [ 1 ] . exc_info [ 0 ] :
2014-01-05 01:52:03 +01:00
tb + = ' ' . join ( traceback . format_exception ( * sys . exc_info ( ) [ 1 ] . exc_info ) )
2015-12-20 01:29:36 +01:00
tb + = encode_compat_str ( traceback . format_exc ( ) )
2013-06-18 22:14:21 +02:00
else :
tb_data = traceback . format_list ( traceback . extract_stack ( ) )
2014-01-05 01:52:03 +01:00
tb = ' ' . join ( tb_data )
2013-06-18 22:14:21 +02:00
self . to_stderr ( tb )
if not self . params . get ( ' ignoreerrors ' , False ) :
if sys . exc_info ( ) [ 0 ] and hasattr ( sys . exc_info ( ) [ 1 ] , ' exc_info ' ) and sys . exc_info ( ) [ 1 ] . exc_info [ 0 ] :
exc_info = sys . exc_info ( ) [ 1 ] . exc_info
else :
exc_info = sys . exc_info ( )
raise DownloadError ( message , exc_info )
self . _download_retcode = 1
def report_warning ( self , message ) :
'''
Print the message to stderr , it will be prefixed with ' WARNING: '
If stderr is a tty file the ' WARNING: ' will be colored
'''
2014-03-09 14:53:07 +01:00
if self . params . get ( ' logger ' ) is not None :
self . params [ ' logger ' ] . warning ( message )
2013-06-18 22:14:21 +02:00
else :
2014-03-26 00:43:46 +01:00
if self . params . get ( ' no_warnings ' ) :
return
2016-03-03 12:24:24 +01:00
if not self . params . get ( ' no_color ' ) and self . _err_file . isatty ( ) and compat_os_name != ' nt ' :
2014-03-09 14:53:07 +01:00
_msg_header = ' \033 [0;33mWARNING: \033 [0m '
else :
_msg_header = ' WARNING: '
warning_message = ' %s %s ' % ( _msg_header , message )
self . to_stderr ( warning_message )
2013-06-18 22:14:21 +02:00
def report_error ( self , message , tb = None ) :
'''
Do the same as trouble , but prefixes the message with ' ERROR: ' , colored
in red if stderr is a tty file .
'''
2016-03-03 12:24:24 +01:00
if not self . params . get ( ' no_color ' ) and self . _err_file . isatty ( ) and compat_os_name != ' nt ' :
2014-01-05 01:52:03 +01:00
_msg_header = ' \033 [0;31mERROR: \033 [0m '
2013-06-18 22:14:21 +02:00
else :
2014-01-05 01:52:03 +01:00
_msg_header = ' ERROR: '
error_message = ' %s %s ' % ( _msg_header , message )
2013-06-18 22:14:21 +02:00
self . trouble ( error_message , tb )
def report_file_already_downloaded ( self , file_name ) :
""" Report file has already been fully downloaded. """
try :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [download] %s has already been downloaded ' % file_name )
2013-11-17 16:47:52 +01:00
except UnicodeEncodeError :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [download] The file has already been downloaded ' )
2013-06-18 22:14:21 +02:00
2019-10-13 18:00:48 +02:00
def report_file_delete ( self , file_name ) :
""" Report that existing file will be deleted. """
try :
self . to_screen ( ' Deleting already existent file %s ' % file_name )
except UnicodeEncodeError :
self . to_screen ( ' Deleting already existent file ' )
2013-06-18 22:14:21 +02:00
def prepare_filename ( self , info_dict ) :
""" Generate the output filename. """
try :
template_dict = dict ( info_dict )
template_dict [ ' epoch ' ] = int ( time . time ( ) )
autonumber_size = self . params . get ( ' autonumber_size ' )
if autonumber_size is None :
autonumber_size = 5
2017-02-23 16:01:09 +01:00
template_dict [ ' autonumber ' ] = self . params . get ( ' autonumber_start ' , 1 ) - 1 + self . _num_downloads
2014-03-04 03:49:33 +01:00
if template_dict . get ( ' resolution ' ) is None :
if template_dict . get ( ' width ' ) and template_dict . get ( ' height ' ) :
template_dict [ ' resolution ' ] = ' %d x %d ' % ( template_dict [ ' width ' ] , template_dict [ ' height ' ] )
elif template_dict . get ( ' height ' ) :
2014-03-10 10:29:25 +01:00
template_dict [ ' resolution ' ] = ' %s p ' % template_dict [ ' height ' ]
2014-03-04 03:49:33 +01:00
elif template_dict . get ( ' width ' ) :
2016-03-05 14:38:58 +01:00
template_dict [ ' resolution ' ] = ' %d x? ' % template_dict [ ' width ' ]
2013-06-18 22:14:21 +02:00
2013-10-22 22:28:19 +02:00
sanitize = lambda k , v : sanitize_filename (
2013-12-10 11:23:35 +01:00
compat_str ( v ) ,
2015-03-08 15:57:30 +01:00
restricted = self . params . get ( ' restrictfilenames ' ) ,
2017-03-01 17:03:36 +01:00
is_id = ( k == ' id ' or k . endswith ( ' _id ' ) ) )
2016-03-05 22:52:42 +01:00
template_dict = dict ( ( k , v if isinstance ( v , compat_numeric_types ) else sanitize ( k , v ) )
2013-12-10 11:23:35 +01:00
for k , v in template_dict . items ( )
2016-05-02 16:05:06 +02:00
if v is not None and not isinstance ( v , ( list , tuple , dict ) ) )
2014-01-05 01:52:03 +01:00
template_dict = collections . defaultdict ( lambda : ' NA ' , template_dict )
2013-06-18 22:14:21 +02:00
2015-11-04 23:37:51 +01:00
outtmpl = self . params . get ( ' outtmpl ' , DEFAULT_OUTTMPL )
2016-03-05 22:52:42 +01:00
2017-02-23 16:01:09 +01:00
# For fields playlist_index and autonumber convert all occurrences
# of %(field)s to %(field)0Nd for backward compatibility
field_size_compat_map = {
' playlist_index ' : len ( str ( template_dict [ ' n_entries ' ] ) ) ,
' autonumber ' : autonumber_size ,
}
FIELD_SIZE_COMPAT_RE = r ' (?<! % ) % \ ((?P<field>autonumber|playlist_index) \ )s '
mobj = re . search ( FIELD_SIZE_COMPAT_RE , outtmpl )
if mobj :
outtmpl = re . sub (
FIELD_SIZE_COMPAT_RE ,
r ' %% ( \ 1)0 %d d ' % field_size_compat_map [ mobj . group ( ' field ' ) ] ,
outtmpl )
2016-03-05 22:52:42 +01:00
# Missing numeric fields used together with integer presentation types
# in format specification will break the argument substitution since
# string 'NA' is returned for missing fields. We will patch output
# template for missing fields to meet string presentation type.
2017-06-08 17:53:14 +02:00
for numeric_field in self . _NUMERIC_FIELDS :
2016-03-05 22:52:42 +01:00
if numeric_field not in template_dict :
# As of [1] format syntax is:
# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
FORMAT_RE = r ''' (?x)
( ? < ! % )
%
\( { 0 } \) # mapping key
( ? : [ #0\-+ ]+)? # conversion flags (optional)
( ? : \d + ) ? # minimum field width (optional)
( ? : \. \d + ) ? # precision (optional)
[ hlL ] ? # length modifier (optional)
[ diouxXeEfFgGcrs % ] # conversion type
'''
outtmpl = re . sub (
FORMAT_RE . format ( numeric_field ) ,
r ' % ( {0} )s ' . format ( numeric_field ) , outtmpl )
2017-07-13 19:40:54 +02:00
# expand_path translates '%%' into '%' and '$$' into '$'
# correspondingly that is not what we want since we need to keep
# '%%' intact for template dict substitution step. Working around
# with boundary-alike separator hack.
2017-07-15 02:02:14 +02:00
sep = ' ' . join ( [ random . choice ( ascii_letters ) for _ in range ( 32 ) ] )
2017-07-13 19:40:54 +02:00
outtmpl = outtmpl . replace ( ' %% ' , ' % {0} % ' . format ( sep ) ) . replace ( ' $$ ' , ' $ {0} $ ' . format ( sep ) )
# outtmpl should be expand_path'ed before template dict substitution
# because meta fields may contain env variables we don't want to
# be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
# title "Hello $PATH", we don't want `$PATH` to be expanded.
filename = expand_path ( outtmpl ) . replace ( sep , ' ' ) % template_dict
2015-01-27 17:38:28 +01:00
# Temporary fix for #4787
# 'Treat' all problem characters by passing filename through preferredencoding
# to workaround encoding issues with subprocess on python2 @ Windows
if sys . version_info < ( 3 , 0 ) and sys . platform == ' win32 ' :
filename = encodeFilename ( filename , True ) . decode ( preferredencoding ( ) )
2015-11-04 23:37:51 +01:00
return sanitize_path ( filename )
2013-06-18 22:14:21 +02:00
except ValueError as err :
2014-01-05 01:52:03 +01:00
self . report_error ( ' Error in output template: ' + str ( err ) + ' (encoding: ' + repr ( preferredencoding ( ) ) + ' ) ' )
2013-06-18 22:14:21 +02:00
return None
2015-02-10 11:28:28 +01:00
def _match_entry ( self , info_dict , incomplete ) :
2016-01-10 19:27:22 +01:00
""" Returns None iff the file should be downloaded """
2013-06-18 22:14:21 +02:00
2014-01-05 01:52:03 +01:00
video_title = info_dict . get ( ' title ' , info_dict . get ( ' id ' , ' video ' ) )
2013-11-22 22:46:46 +01:00
if ' title ' in info_dict :
# This can happen when we're just evaluating the playlist
title = info_dict [ ' title ' ]
matchtitle = self . params . get ( ' matchtitle ' , False )
if matchtitle :
if not re . search ( matchtitle , title , re . IGNORECASE ) :
2014-01-05 01:52:03 +01:00
return ' " ' + title + ' " title did not match pattern " ' + matchtitle + ' " '
2013-11-22 22:46:46 +01:00
rejecttitle = self . params . get ( ' rejecttitle ' , False )
if rejecttitle :
if re . search ( rejecttitle , title , re . IGNORECASE ) :
2014-01-05 01:52:03 +01:00
return ' " ' + title + ' " title matched reject pattern " ' + rejecttitle + ' " '
2016-02-14 09:25:04 +01:00
date = info_dict . get ( ' upload_date ' )
2013-06-18 22:14:21 +02:00
if date is not None :
dateRange = self . params . get ( ' daterange ' , DateRange ( ) )
if date not in dateRange :
2014-01-05 01:52:03 +01:00
return ' %s upload date is not in range %s ' % ( date_from_str ( date ) . isoformat ( ) , dateRange )
2016-02-14 09:25:04 +01:00
view_count = info_dict . get ( ' view_count ' )
2013-12-16 03:09:49 +01:00
if view_count is not None :
min_views = self . params . get ( ' min_views ' )
if min_views is not None and view_count < min_views :
2014-01-05 01:52:03 +01:00
return ' Skipping %s , because it has not reached minimum view count ( %d / %d ) ' % ( video_title , view_count , min_views )
2013-12-16 03:09:49 +01:00
max_views = self . params . get ( ' max_views ' )
if max_views is not None and view_count > max_views :
2014-01-05 01:52:03 +01:00
return ' Skipping %s , because it has exceeded the maximum view count ( %d / %d ) ' % ( video_title , view_count , max_views )
2015-01-07 07:20:20 +01:00
if age_restricted ( info_dict . get ( ' age_limit ' ) , self . params . get ( ' age_limit ' ) ) :
2015-02-10 03:32:21 +01:00
return ' Skipping " %s " because it is age restricted ' % video_title
2013-10-06 04:27:09 +02:00
if self . in_download_archive ( info_dict ) :
2014-01-05 01:52:03 +01:00
return ' %s has already been recorded in archive ' % video_title
2015-02-10 03:32:21 +01:00
2015-02-10 11:28:28 +01:00
if not incomplete :
match_filter = self . params . get ( ' match_filter ' )
if match_filter is not None :
ret = match_filter ( info_dict )
if ret is not None :
return ret
2015-02-10 03:32:21 +01:00
2013-06-18 22:14:21 +02:00
return None
2013-10-22 14:49:34 +02:00
2013-11-03 11:56:45 +01:00
@staticmethod
def add_extra_info ( info_dict , extra_info ) :
''' Set the keys from extra_info in info dict if they are missing '''
for key , value in extra_info . items ( ) :
info_dict . setdefault ( key , value )
2013-12-05 14:29:08 +01:00
def extract_info ( self , url , download = True , ie_key = None , extra_info = { } ,
2015-06-12 22:05:21 +02:00
process = True , force_generic_extractor = False ) :
2013-06-18 22:14:21 +02:00
'''
Returns a list with a dictionary for each video we find .
If ' download ' , also downloads the videos .
extra_info is a dict containing the extra values to add to each result
2015-03-15 12:18:23 +01:00
'''
2013-10-22 14:49:34 +02:00
2015-06-12 22:05:21 +02:00
if not ie_key and force_generic_extractor :
2015-06-12 15:20:12 +02:00
ie_key = ' Generic '
2013-06-18 22:14:21 +02:00
if ie_key :
2013-07-08 15:14:27 +02:00
ies = [ self . get_info_extractor ( ie_key ) ]
2013-06-18 22:14:21 +02:00
else :
ies = self . _ies
for ie in ies :
if not ie . suitable ( url ) :
continue
2016-02-10 13:16:18 +01:00
ie = self . get_info_extractor ( ie . ie_key ( ) )
2013-06-18 22:14:21 +02:00
if not ie . working ( ) :
2014-01-05 01:52:03 +01:00
self . report_warning ( ' The program functionality for this site has been marked as broken, '
' and will probably not work. ' )
2013-06-18 22:14:21 +02:00
try :
ie_result = ie . extract ( url )
2014-11-23 20:41:03 +01:00
if ie_result is None : # Finished already (backwards compatibility; listformats and friends should be moved here)
2013-06-18 22:14:21 +02:00
break
if isinstance ( ie_result , list ) :
# Backwards compatibility: old IE result format
ie_result = {
' _type ' : ' compat_list ' ,
' entries ' : ie_result ,
}
2014-03-23 16:06:03 +01:00
self . add_default_extra_info ( ie_result , ie , url )
2013-12-05 14:29:08 +01:00
if process :
return self . process_ie_result ( ie_result , download , extra_info )
else :
return ie_result
2017-02-04 12:49:58 +01:00
except GeoRestrictedError as e :
msg = e . msg
if e . countries :
msg + = ' \n This video is available in %s . ' % ' , ' . join (
map ( ISO3166Utils . short2full , e . countries ) )
msg + = ' \n You might want to use a VPN or a proxy server (with --proxy) to workaround. '
self . report_error ( msg )
break
2015-12-20 01:16:19 +01:00
except ExtractorError as e : # An error we somewhat expected
2015-12-20 01:35:58 +01:00
self . report_error ( compat_str ( e ) , e . format_traceback ( ) )
2013-06-18 22:14:21 +02:00
break
2014-01-23 10:36:47 +01:00
except MaxDownloadsReached :
raise
2013-06-18 22:14:21 +02:00
except Exception as e :
if self . params . get ( ' ignoreerrors ' , False ) :
2015-12-20 02:00:39 +01:00
self . report_error ( error_to_compat_str ( e ) , tb = encode_compat_str ( traceback . format_exc ( ) ) )
2013-06-18 22:14:21 +02:00
break
else :
raise
else :
2014-03-20 16:33:42 +01:00
self . report_error ( ' no suitable InfoExtractor for URL %s ' % url )
2013-10-22 14:49:34 +02:00
2014-03-23 16:06:03 +01:00
def add_default_extra_info ( self , ie_result , ie , url ) :
self . add_extra_info ( ie_result , {
' extractor ' : ie . IE_NAME ,
' webpage_url ' : url ,
' webpage_url_basename ' : url_basename ( url ) ,
' extractor_key ' : ie . ie_key ( ) ,
} )
2013-06-18 22:14:21 +02:00
def process_ie_result ( self , ie_result , download = True , extra_info = { } ) :
"""
Take the result of the ie ( may be modified ) and resolve all unresolved
references ( URLs , playlist items ) .
It will also download the videos if ' download ' .
Returns the resolved ie_result .
"""
2014-08-21 11:52:07 +02:00
result_type = ie_result . get ( ' _type ' , ' video ' )
2014-10-24 14:48:12 +02:00
if result_type in ( ' url ' , ' url_transparent ' ) :
2016-05-14 00:46:38 +02:00
ie_result [ ' url ' ] = sanitize_url ( ie_result [ ' url ' ] )
2014-10-24 14:48:12 +02:00
extract_flat = self . params . get ( ' extract_flat ' , False )
2019-05-10 22:56:22 +02:00
if ( ( extract_flat == ' in_playlist ' and ' playlist ' in extra_info )
or extract_flat is True ) :
2019-09-24 21:08:46 +02:00
self . __forced_printings (
ie_result , self . prepare_filename ( ie_result ) ,
incomplete = True )
2014-08-21 11:52:07 +02:00
return ie_result
2013-06-18 22:14:21 +02:00
if result_type == ' video ' :
2013-11-03 11:56:45 +01:00
self . add_extra_info ( ie_result , extra_info )
2013-11-15 11:04:26 +01:00
return self . process_video_result ( ie_result , download = download )
2013-06-18 22:14:21 +02:00
elif result_type == ' url ' :
# We have to add extra_info to the results because it may be
# contained in a playlist
return self . extract_info ( ie_result [ ' url ' ] ,
download ,
ie_key = ie_result . get ( ' ie_key ' ) ,
extra_info = extra_info )
2013-12-05 14:29:08 +01:00
elif result_type == ' url_transparent ' :
# Use the information from the embedding page
info = self . extract_info (
ie_result [ ' url ' ] , ie_key = ie_result . get ( ' ie_key ' ) ,
extra_info = extra_info , download = False , process = False )
2017-03-31 18:57:35 +02:00
# extract_info may return None when ignoreerrors is enabled and
# extraction failed with an error, don't crash and return early
# in this case
if not info :
return info
2014-12-12 15:55:55 +01:00
force_properties = dict (
( k , v ) for k , v in ie_result . items ( ) if v is not None )
2017-07-20 19:13:32 +02:00
for f in ( ' _type ' , ' url ' , ' id ' , ' extractor ' , ' extractor_key ' , ' ie_key ' ) :
2014-12-12 15:55:55 +01:00
if f in force_properties :
del force_properties [ f ]
new_result = info . copy ( )
new_result . update ( force_properties )
2013-12-05 14:29:08 +01:00
2017-04-15 19:56:53 +02:00
# Extracted info may not be a video result (i.e.
# info.get('_type', 'video') != video) but rather an url or
# url_transparent. In such cases outer metadata (from ie_result)
# should be propagated to inner one (info). For this to happen
# _type of info should be overridden with url_transparent. This
2019-03-09 13:14:41 +01:00
# fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
2017-04-15 19:56:53 +02:00
if new_result . get ( ' _type ' ) == ' url ' :
new_result [ ' _type ' ] = ' url_transparent '
2013-12-05 14:29:08 +01:00
return self . process_ie_result (
new_result , download = download , extra_info = extra_info )
2017-04-12 21:38:43 +02:00
elif result_type in ( ' playlist ' , ' multi_video ' ) :
2013-06-18 22:14:21 +02:00
# We process each entry in the playlist
2016-02-14 09:25:04 +01:00
playlist = ie_result . get ( ' title ' ) or ie_result . get ( ' id ' )
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [download] Downloading playlist: %s ' % playlist )
2013-06-18 22:14:21 +02:00
playlist_results = [ ]
playliststart = self . params . get ( ' playliststart ' , 1 ) - 1
2016-02-14 09:25:04 +01:00
playlistend = self . params . get ( ' playlistend ' )
2013-12-16 13:16:20 +01:00
# For backwards compatibility, interpret -1 as whole list
2013-06-18 22:14:21 +02:00
if playlistend == - 1 :
2013-12-16 13:16:20 +01:00
playlistend = None
2013-06-18 22:14:21 +02:00
2016-02-14 09:25:04 +01:00
playlistitems_str = self . params . get ( ' playlist_items ' )
2015-01-25 04:24:55 +01:00
playlistitems = None
if playlistitems_str is not None :
def iter_playlistitems ( format ) :
for string_segment in format . split ( ' , ' ) :
if ' - ' in string_segment :
start , end = string_segment . split ( ' - ' )
for item in range ( int ( start ) , int ( end ) + 1 ) :
yield int ( item )
else :
yield int ( string_segment )
[YoutubeDL] Ignore duplicates in --playlist-items
E.g. '--playlist-items 2-4,3-4,3' should result in '[2,3,4]', not '[2,3,4,3,4,3]'
2017-10-06 18:46:57 +02:00
playlistitems = orderedSet ( iter_playlistitems ( playlistitems_str ) )
2015-01-25 04:24:55 +01:00
2014-12-06 14:02:19 +01:00
ie_entries = ie_result [ ' entries ' ]
2017-10-06 18:34:46 +02:00
def make_playlistitems_entries ( list_ie_entries ) :
num_entries = len ( list_ie_entries )
return [
list_ie_entries [ i - 1 ] for i in playlistitems
if - num_entries < = i - 1 < num_entries ]
def report_download ( num_entries ) :
self . to_screen (
' [ %s ] playlist %s : Downloading %d videos ' %
( ie_result [ ' extractor ' ] , playlist , num_entries ) )
2014-12-06 14:02:19 +01:00
if isinstance ( ie_entries , list ) :
n_all_entries = len ( ie_entries )
2015-01-25 04:24:55 +01:00
if playlistitems :
2017-10-06 18:34:46 +02:00
entries = make_playlistitems_entries ( ie_entries )
2015-01-25 04:24:55 +01:00
else :
entries = ie_entries [ playliststart : playlistend ]
2014-01-20 11:36:47 +01:00
n_entries = len ( entries )
self . to_screen (
2016-02-14 10:37:17 +01:00
' [ %s ] playlist %s : Collected %d video ids (downloading %d of them) ' %
2014-01-20 11:36:47 +01:00
( ie_result [ ' extractor ' ] , playlist , n_all_entries , n_entries ) )
2014-12-06 14:02:19 +01:00
elif isinstance ( ie_entries , PagedList ) :
2015-01-25 04:24:55 +01:00
if playlistitems :
entries = [ ]
for item in playlistitems :
entries . extend ( ie_entries . getslice (
item - 1 , item
) )
else :
entries = ie_entries . getslice (
playliststart , playlistend )
2014-01-20 11:36:47 +01:00
n_entries = len ( entries )
2017-10-06 18:34:46 +02:00
report_download ( n_entries )
2014-12-06 14:02:19 +01:00
else : # iterable
2015-01-25 04:24:55 +01:00
if playlistitems :
2017-11-13 19:43:20 +01:00
entries = make_playlistitems_entries ( list ( itertools . islice (
ie_entries , 0 , max ( playlistitems ) ) ) )
2015-01-25 04:24:55 +01:00
else :
entries = list ( itertools . islice (
ie_entries , playliststart , playlistend ) )
2014-12-06 14:02:19 +01:00
n_entries = len ( entries )
2017-10-06 18:34:46 +02:00
report_download ( n_entries )
2013-06-18 22:14:21 +02:00
2014-07-11 05:11:11 +02:00
if self . params . get ( ' playlistreverse ' , False ) :
entries = entries [ : : - 1 ]
2017-01-31 10:03:31 +01:00
if self . params . get ( ' playlistrandom ' , False ) :
random . shuffle ( entries )
2017-02-04 15:06:07 +01:00
x_forwarded_for = ie_result . get ( ' __x_forwarded_for_ip ' )
2013-10-22 14:49:34 +02:00
for i , entry in enumerate ( entries , 1 ) :
2014-12-16 00:37:42 +01:00
self . to_screen ( ' [download] Downloading video %s of %s ' % ( i , n_entries ) )
2017-02-04 15:06:07 +01:00
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for :
entry [ ' __x_forwarded_for_ip ' ] = x_forwarded_for
2013-06-18 22:14:21 +02:00
extra = {
2014-08-24 18:49:04 +02:00
' n_entries ' : n_entries ,
2013-10-22 14:49:34 +02:00
' playlist ' : playlist ,
2014-11-09 22:32:26 +01:00
' playlist_id ' : ie_result . get ( ' id ' ) ,
' playlist_title ' : ie_result . get ( ' title ' ) ,
2017-12-18 21:53:44 +01:00
' playlist_uploader ' : ie_result . get ( ' uploader ' ) ,
' playlist_uploader_id ' : ie_result . get ( ' uploader_id ' ) ,
2020-01-28 11:20:19 +01:00
' playlist_index ' : playlistitems [ i - 1 ] if playlistitems else i + playliststart ,
2013-11-03 11:56:45 +01:00
' extractor ' : ie_result [ ' extractor ' ] ,
2013-11-03 12:11:13 +01:00
' webpage_url ' : ie_result [ ' webpage_url ' ] ,
2013-12-17 04:13:36 +01:00
' webpage_url_basename ' : url_basename ( ie_result [ ' webpage_url ' ] ) ,
2013-11-03 12:14:44 +01:00
' extractor_key ' : ie_result [ ' extractor_key ' ] ,
2013-10-22 14:49:34 +02:00
}
2013-11-22 22:46:46 +01:00
2015-02-10 11:28:28 +01:00
reason = self . _match_entry ( entry , incomplete = True )
2013-11-22 22:46:46 +01:00
if reason is not None :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [download] ' + reason )
2013-11-22 22:46:46 +01:00
continue
2013-06-18 22:14:21 +02:00
entry_result = self . process_ie_result ( entry ,
download = download ,
extra_info = extra )
playlist_results . append ( entry_result )
ie_result [ ' entries ' ] = playlist_results
2015-11-17 16:41:59 +01:00
self . to_screen ( ' [download] Finished downloading playlist: %s ' % playlist )
2013-06-18 22:14:21 +02:00
return ie_result
elif result_type == ' compat_list ' :
2014-11-20 16:29:31 +01:00
self . report_warning (
' Extractor %s returned a compat_list result. '
' It needs to be updated. ' % ie_result . get ( ' extractor ' ) )
2014-11-23 20:41:03 +01:00
2013-06-18 22:14:21 +02:00
def _fixup ( r ) :
2014-11-23 21:39:15 +01:00
self . add_extra_info (
r ,
2013-11-03 12:11:13 +01:00
{
' extractor ' : ie_result [ ' extractor ' ] ,
' webpage_url ' : ie_result [ ' webpage_url ' ] ,
2013-12-17 04:13:36 +01:00
' webpage_url_basename ' : url_basename ( ie_result [ ' webpage_url ' ] ) ,
2013-11-03 12:14:44 +01:00
' extractor_key ' : ie_result [ ' extractor_key ' ] ,
2014-11-23 21:39:15 +01:00
}
)
2013-06-18 22:14:21 +02:00
return r
ie_result [ ' entries ' ] = [
2013-11-03 11:56:45 +01:00
self . process_ie_result ( _fixup ( r ) , download , extra_info )
2013-06-18 22:14:21 +02:00
for r in ie_result [ ' entries ' ]
]
return ie_result
else :
raise Exception ( ' Invalid result type: %s ' % result_type )
2015-06-28 22:08:29 +02:00
def _build_format_filter ( self , filter_spec ) :
" Returns a function to filter the formats according to the filter_spec "
2015-01-23 00:04:05 +01:00
OPERATORS = {
' < ' : operator . lt ,
' <= ' : operator . le ,
' > ' : operator . gt ,
' >= ' : operator . ge ,
' = ' : operator . eq ,
' != ' : operator . ne ,
}
2015-06-28 22:08:29 +02:00
operator_rex = re . compile ( r ''' (?x) \ s*
2018-02-10 10:42:45 +01:00
( ? P < key > width | height | tbr | abr | vbr | asr | filesize | filesize_approx | fps )
2015-01-23 00:04:05 +01:00
\s * ( ? P < op > % s ) ( ? P < none_inclusive > \s * \? ) ? \s *
( ? P < value > [ 0 - 9. ] + ( ? : [ kKmMgGtTpPeEzZyY ] i ? [ Bb ] ? ) ? )
2015-06-28 22:08:29 +02:00
$
2015-01-23 00:04:05 +01:00
''' % ' | ' .join(map(re.escape, OPERATORS.keys())))
2015-06-28 22:08:29 +02:00
m = operator_rex . search ( filter_spec )
2015-02-08 20:07:43 +01:00
if m :
try :
comparison_value = int ( m . group ( ' value ' ) )
except ValueError :
comparison_value = parse_filesize ( m . group ( ' value ' ) )
if comparison_value is None :
comparison_value = parse_filesize ( m . group ( ' value ' ) + ' B ' )
if comparison_value is None :
raise ValueError (
' Invalid value %r in format specification %r ' % (
2015-06-28 22:08:29 +02:00
m . group ( ' value ' ) , filter_spec ) )
2015-02-08 20:07:43 +01:00
op = OPERATORS [ m . group ( ' op ' ) ]
2015-01-23 00:04:05 +01:00
if not m :
2015-02-08 20:07:43 +01:00
STR_OPERATORS = {
' = ' : operator . eq ,
2016-01-13 09:24:48 +01:00
' ^= ' : lambda attr , value : attr . startswith ( value ) ,
' $= ' : lambda attr , value : attr . endswith ( value ) ,
' *= ' : lambda attr , value : value in attr ,
2015-02-08 20:07:43 +01:00
}
2015-06-28 22:08:29 +02:00
str_operator_rex = re . compile ( r ''' (?x)
2016-03-18 08:33:03 +01:00
\s * ( ? P < key > ext | acodec | vcodec | container | protocol | format_id )
2019-01-20 07:48:09 +01:00
\s * ( ? P < negation > ! \s * ) ? ( ? P < op > % s ) ( ? P < none_inclusive > \s * \? ) ?
2016-01-28 12:07:15 +01:00
\s * ( ? P < value > [ a - zA - Z0 - 9. _ - ] + )
2015-06-28 22:08:29 +02:00
\s * $
2015-02-08 20:07:43 +01:00
''' % ' | ' .join(map(re.escape, STR_OPERATORS.keys())))
2015-06-28 22:08:29 +02:00
m = str_operator_rex . search ( filter_spec )
2015-02-08 20:07:43 +01:00
if m :
comparison_value = m . group ( ' value ' )
2019-01-20 07:48:09 +01:00
str_op = STR_OPERATORS [ m . group ( ' op ' ) ]
if m . group ( ' negation ' ) :
2019-01-23 19:34:41 +01:00
op = lambda attr , value : not str_op ( attr , value )
2019-01-20 07:48:09 +01:00
else :
op = str_op
2015-01-23 00:04:05 +01:00
2015-02-08 20:07:43 +01:00
if not m :
2015-06-28 22:08:29 +02:00
raise ValueError ( ' Invalid filter specification %r ' % filter_spec )
2015-01-23 00:04:05 +01:00
def _filter ( f ) :
actual_value = f . get ( m . group ( ' key ' ) )
if actual_value is None :
return m . group ( ' none_inclusive ' )
return op ( actual_value , comparison_value )
2015-06-28 22:08:29 +02:00
return _filter
2017-07-22 19:12:01 +02:00
def _default_format_spec ( self , info_dict , download = True ) :
2017-10-11 18:45:03 +02:00
def can_merge ( ) :
merger = FFmpegMergerPP ( self )
return merger . available and merger . can_merge ( )
def prefer_best ( ) :
2017-07-22 19:12:01 +02:00
if self . params . get ( ' simulate ' , False ) :
2017-10-11 18:45:03 +02:00
return False
2017-07-22 19:12:01 +02:00
if not download :
return False
2017-10-11 18:45:03 +02:00
if self . params . get ( ' outtmpl ' , DEFAULT_OUTTMPL ) == ' - ' :
return True
2017-07-22 19:12:01 +02:00
if info_dict . get ( ' is_live ' ) :
2017-10-11 18:45:03 +02:00
return True
if not can_merge ( ) :
return True
return False
req_format_list = [ ' bestvideo+bestaudio ' , ' best ' ]
if prefer_best ( ) :
req_format_list . reverse ( )
2017-07-22 19:12:01 +02:00
return ' / ' . join ( req_format_list )
2015-06-28 22:08:29 +02:00
def build_format_selector ( self , format_spec ) :
def syntax_error ( note , start ) :
message = (
' Invalid format specification: '
' {0} \n \t {1} \n \t {2} ^ ' . format ( note , format_spec , ' ' * start [ 1 ] ) )
return SyntaxError ( message )
PICKFIRST = ' PICKFIRST '
MERGE = ' MERGE '
SINGLE = ' SINGLE '
2015-06-29 12:42:02 +02:00
GROUP = ' GROUP '
2015-06-28 22:08:29 +02:00
FormatSelector = collections . namedtuple ( ' FormatSelector ' , [ ' type ' , ' selector ' , ' filters ' ] )
def _parse_filter ( tokens ) :
filter_parts = [ ]
for type , string , start , _ , _ in tokens :
if type == tokenize . OP and string == ' ] ' :
return ' ' . join ( filter_parts )
else :
filter_parts . append ( string )
2015-08-04 22:29:23 +02:00
def _remove_unused_ops ( tokens ) :
2015-11-20 18:21:46 +01:00
# Remove operators that we don't use and join them with the surrounding strings
2015-08-04 22:29:23 +02:00
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ( ' / ' , ' + ' , ' , ' , ' ( ' , ' ) ' )
last_string , last_start , last_end , last_line = None , None , None , None
for type , string , start , end , line in tokens :
if type == tokenize . OP and string == ' [ ' :
if last_string :
yield tokenize . NAME , last_string , last_start , last_end , last_line
last_string = None
yield type , string , start , end , line
# everything inside brackets will be handled by _parse_filter
for type , string , start , end , line in tokens :
yield type , string , start , end , line
if type == tokenize . OP and string == ' ] ' :
break
elif type == tokenize . OP and string in ALLOWED_OPS :
if last_string :
yield tokenize . NAME , last_string , last_start , last_end , last_line
last_string = None
yield type , string , start , end , line
elif type in [ tokenize . NAME , tokenize . NUMBER , tokenize . OP ] :
if not last_string :
last_string = string
last_start = start
last_end = end
else :
last_string + = string
if last_string :
yield tokenize . NAME , last_string , last_start , last_end , last_line
2015-06-30 19:45:42 +02:00
def _parse_format_selection ( tokens , inside_merge = False , inside_choice = False , inside_group = False ) :
2015-06-28 22:08:29 +02:00
selectors = [ ]
current_selector = None
for type , string , start , _ , _ in tokens :
# ENCODING is only defined in python 3.x
if type == getattr ( tokenize , ' ENCODING ' , None ) :
continue
elif type in [ tokenize . NAME , tokenize . NUMBER ] :
current_selector = FormatSelector ( SINGLE , string , [ ] )
elif type == tokenize . OP :
2015-06-30 19:45:42 +02:00
if string == ' ) ' :
if not inside_group :
# ')' will be handled by the parentheses group
tokens . restore_last_token ( )
2015-06-28 22:08:29 +02:00
break
2015-06-30 19:45:42 +02:00
elif inside_merge and string in [ ' / ' , ' , ' ] :
2015-06-29 12:42:02 +02:00
tokens . restore_last_token ( )
break
2015-06-30 19:45:42 +02:00
elif inside_choice and string == ' , ' :
tokens . restore_last_token ( )
break
elif string == ' , ' :
2015-07-10 22:46:25 +02:00
if not current_selector :
raise syntax_error ( ' " , " must follow a format selector ' , start )
2015-06-28 22:08:29 +02:00
selectors . append ( current_selector )
current_selector = None
elif string == ' / ' :
2015-08-03 23:04:11 +02:00
if not current_selector :
raise syntax_error ( ' " / " must follow a format selector ' , start )
2015-06-28 22:08:29 +02:00
first_choice = current_selector
2015-06-30 19:45:42 +02:00
second_choice = _parse_format_selection ( tokens , inside_choice = True )
2015-07-04 21:30:26 +02:00
current_selector = FormatSelector ( PICKFIRST , ( first_choice , second_choice ) , [ ] )
2015-06-28 22:08:29 +02:00
elif string == ' [ ' :
if not current_selector :
current_selector = FormatSelector ( SINGLE , ' best ' , [ ] )
format_filter = _parse_filter ( tokens )
current_selector . filters . append ( format_filter )
2015-06-29 12:42:02 +02:00
elif string == ' ( ' :
if current_selector :
raise syntax_error ( ' Unexpected " ( " ' , start )
2015-06-30 19:45:42 +02:00
group = _parse_format_selection ( tokens , inside_group = True )
current_selector = FormatSelector ( GROUP , group , [ ] )
2015-06-28 22:08:29 +02:00
elif string == ' + ' :
video_selector = current_selector
2015-06-30 19:45:42 +02:00
audio_selector = _parse_format_selection ( tokens , inside_merge = True )
2015-07-10 22:46:25 +02:00
if not video_selector or not audio_selector :
raise syntax_error ( ' " + " must be between two format selectors ' , start )
2015-06-30 19:45:42 +02:00
current_selector = FormatSelector ( MERGE , ( video_selector , audio_selector ) , [ ] )
2015-06-28 22:08:29 +02:00
else :
raise syntax_error ( ' Operator not recognized: " {0} " ' . format ( string ) , start )
elif type == tokenize . ENDMARKER :
break
if current_selector :
selectors . append ( current_selector )
return selectors
def _build_selector_function ( selector ) :
if isinstance ( selector , list ) :
fs = [ _build_selector_function ( s ) for s in selector ]
2016-07-15 19:55:43 +02:00
def selector_function ( ctx ) :
2015-06-28 22:08:29 +02:00
for f in fs :
2016-07-15 19:55:43 +02:00
for format in f ( ctx ) :
2015-06-28 22:08:29 +02:00
yield format
return selector_function
2015-06-29 12:42:02 +02:00
elif selector . type == GROUP :
selector_function = _build_selector_function ( selector . selector )
2015-06-28 22:08:29 +02:00
elif selector . type == PICKFIRST :
fs = [ _build_selector_function ( s ) for s in selector . selector ]
2016-07-15 19:55:43 +02:00
def selector_function ( ctx ) :
2015-06-28 22:08:29 +02:00
for f in fs :
2016-07-15 19:55:43 +02:00
picked_formats = list ( f ( ctx ) )
2015-06-28 22:08:29 +02:00
if picked_formats :
return picked_formats
return [ ]
elif selector . type == SINGLE :
format_spec = selector . selector
2016-07-15 19:55:43 +02:00
def selector_function ( ctx ) :
formats = list ( ctx [ ' formats ' ] )
2015-07-04 21:41:09 +02:00
if not formats :
return
2015-06-28 22:48:02 +02:00
if format_spec == ' all ' :
for f in formats :
yield f
elif format_spec in [ ' best ' , ' worst ' , None ] :
2015-06-28 22:08:29 +02:00
format_idx = 0 if format_spec == ' worst ' else - 1
audiovideo_formats = [
f for f in formats
if f . get ( ' vcodec ' ) != ' none ' and f . get ( ' acodec ' ) != ' none ' ]
if audiovideo_formats :
yield audiovideo_formats [ format_idx ]
2016-07-15 19:55:43 +02:00
# for extractors with incomplete formats (audio only (soundcloud)
# or video only (imgur)) we will fallback to best/worst
# {video,audio}-only format
elif ctx [ ' incomplete_formats ' ] :
2015-06-28 22:08:29 +02:00
yield formats [ format_idx ]
elif format_spec == ' bestaudio ' :
audio_formats = [
f for f in formats
if f . get ( ' vcodec ' ) == ' none ' ]
if audio_formats :
yield audio_formats [ - 1 ]
elif format_spec == ' worstaudio ' :
audio_formats = [
f for f in formats
if f . get ( ' vcodec ' ) == ' none ' ]
if audio_formats :
yield audio_formats [ 0 ]
elif format_spec == ' bestvideo ' :
video_formats = [
f for f in formats
if f . get ( ' acodec ' ) == ' none ' ]
if video_formats :
yield video_formats [ - 1 ]
elif format_spec == ' worstvideo ' :
video_formats = [
f for f in formats
if f . get ( ' acodec ' ) == ' none ' ]
if video_formats :
yield video_formats [ 0 ]
else :
extensions = [ ' mp4 ' , ' flv ' , ' webm ' , ' 3gp ' , ' m4a ' , ' mp3 ' , ' ogg ' , ' aac ' , ' wav ' ]
if format_spec in extensions :
filter_f = lambda f : f [ ' ext ' ] == format_spec
else :
filter_f = lambda f : f [ ' format_id ' ] == format_spec
matches = list ( filter ( filter_f , formats ) )
if matches :
yield matches [ - 1 ]
elif selector . type == MERGE :
def _merge ( formats_info ) :
format_1 , format_2 = [ f [ ' format_id ' ] for f in formats_info ]
# The first format must contain the video and the
# second the audio
if formats_info [ 0 ] . get ( ' vcodec ' ) == ' none ' :
self . report_error ( ' The first format must '
' contain the video, try using '
' " -f %s + %s " ' % ( format_2 , format_1 ) )
return
2015-12-07 18:10:57 +01:00
# Formats must be opposite (video+audio)
if formats_info [ 0 ] . get ( ' acodec ' ) == ' none ' and formats_info [ 1 ] . get ( ' acodec ' ) == ' none ' :
self . report_error (
' Both formats %s and %s are video-only, you must specify " -f video+audio " '
% ( format_1 , format_2 ) )
return
2015-06-28 22:08:29 +02:00
output_ext = (
formats_info [ 0 ] [ ' ext ' ]
if self . params . get ( ' merge_output_format ' ) is None
else self . params [ ' merge_output_format ' ] )
return {
' requested_formats ' : formats_info ,
' format ' : ' %s + %s ' % ( formats_info [ 0 ] . get ( ' format ' ) ,
formats_info [ 1 ] . get ( ' format ' ) ) ,
' format_id ' : ' %s + %s ' % ( formats_info [ 0 ] . get ( ' format_id ' ) ,
formats_info [ 1 ] . get ( ' format_id ' ) ) ,
' width ' : formats_info [ 0 ] . get ( ' width ' ) ,
' height ' : formats_info [ 0 ] . get ( ' height ' ) ,
' resolution ' : formats_info [ 0 ] . get ( ' resolution ' ) ,
' fps ' : formats_info [ 0 ] . get ( ' fps ' ) ,
' vcodec ' : formats_info [ 0 ] . get ( ' vcodec ' ) ,
' vbr ' : formats_info [ 0 ] . get ( ' vbr ' ) ,
' stretched_ratio ' : formats_info [ 0 ] . get ( ' stretched_ratio ' ) ,
' acodec ' : formats_info [ 1 ] . get ( ' acodec ' ) ,
' abr ' : formats_info [ 1 ] . get ( ' abr ' ) ,
' ext ' : output_ext ,
}
video_selector , audio_selector = map ( _build_selector_function , selector . selector )
2015-01-23 00:04:05 +01:00
2016-07-15 19:55:43 +02:00
def selector_function ( ctx ) :
for pair in itertools . product (
video_selector ( copy . deepcopy ( ctx ) ) , audio_selector ( copy . deepcopy ( ctx ) ) ) :
2015-06-28 22:08:29 +02:00
yield _merge ( pair )
2015-01-23 00:04:05 +01:00
2015-06-28 22:08:29 +02:00
filters = [ self . _build_format_filter ( f ) for f in selector . filters ]
2015-01-23 00:04:05 +01:00
2016-07-15 19:55:43 +02:00
def final_selector ( ctx ) :
ctx_copy = copy . deepcopy ( ctx )
2015-06-28 22:08:29 +02:00
for _filter in filters :
2016-07-15 19:55:43 +02:00
ctx_copy [ ' formats ' ] = list ( filter ( _filter , ctx_copy [ ' formats ' ] ) )
return selector_function ( ctx_copy )
2015-06-28 22:08:29 +02:00
return final_selector
2015-01-23 00:04:05 +01:00
2015-06-28 22:08:29 +02:00
stream = io . BytesIO ( format_spec . encode ( ' utf-8 ' ) )
2015-06-29 12:42:02 +02:00
try :
2015-08-04 22:29:23 +02:00
tokens = list ( _remove_unused_ops ( compat_tokenize_tokenize ( stream . readline ) ) )
2015-06-29 12:42:02 +02:00
except tokenize . TokenError :
raise syntax_error ( ' Missing closing/opening brackets or parenthesis ' , ( 0 , len ( format_spec ) ) )
class TokenIterator ( object ) :
def __init__ ( self , tokens ) :
self . tokens = tokens
self . counter = 0
def __iter__ ( self ) :
return self
def __next__ ( self ) :
if self . counter > = len ( self . tokens ) :
raise StopIteration ( )
value = self . tokens [ self . counter ]
self . counter + = 1
return value
next = __next__
def restore_last_token ( self ) :
self . counter - = 1
parsed_selector = _parse_format_selection ( iter ( TokenIterator ( tokens ) ) )
2015-06-28 22:08:29 +02:00
return _build_selector_function ( parsed_selector )
2013-10-21 13:19:58 +02:00
2015-01-24 18:52:26 +01:00
def _calc_headers ( self , info_dict ) :
res = std_headers . copy ( )
add_headers = info_dict . get ( ' http_headers ' )
if add_headers :
res . update ( add_headers )
cookies = self . _calc_cookies ( info_dict )
if cookies :
res [ ' Cookie ' ] = cookies
2017-02-04 15:06:07 +01:00
if ' X-Forwarded-For ' not in res :
x_forwarded_for_ip = info_dict . get ( ' __x_forwarded_for_ip ' )
if x_forwarded_for_ip :
res [ ' X-Forwarded-For ' ] = x_forwarded_for_ip
2015-01-24 18:52:26 +01:00
return res
def _calc_cookies ( self , info_dict ) :
2015-11-21 17:18:17 +01:00
pr = sanitized_Request ( info_dict [ ' url ' ] )
2015-01-24 18:52:26 +01:00
self . cookiejar . add_cookie_header ( pr )
2015-02-17 16:29:24 +01:00
return pr . get_header ( ' Cookie ' )
2015-01-24 18:52:26 +01:00
2013-07-02 10:08:58 +02:00
def process_video_result ( self , info_dict , download = True ) :
assert info_dict . get ( ' _type ' , ' video ' ) == ' video '
2014-04-03 14:36:40 +02:00
if ' id ' not in info_dict :
raise ExtractorError ( ' Missing " id " field in extractor result ' )
if ' title ' not in info_dict :
raise ExtractorError ( ' Missing " title " field in extractor result ' )
2017-06-08 17:53:14 +02:00
def report_force_conversion ( field , field_not , conversion ) :
self . report_warning (
' " %s " field is not %s - forcing %s conversion, there is an error in extractor '
% ( field , field_not , conversion ) )
def sanitize_string_field ( info , string_field ) :
field = info . get ( string_field )
if field is None or isinstance ( field , compat_str ) :
return
report_force_conversion ( string_field , ' a string ' , ' string ' )
info [ string_field ] = compat_str ( field )
def sanitize_numeric_fields ( info ) :
for numeric_field in self . _NUMERIC_FIELDS :
field = info . get ( numeric_field )
if field is None or isinstance ( field , compat_numeric_types ) :
continue
report_force_conversion ( numeric_field , ' numeric ' , ' int ' )
info [ numeric_field ] = int_or_none ( field )
sanitize_string_field ( info_dict , ' id ' )
sanitize_numeric_fields ( info_dict )
2016-06-09 00:34:19 +02:00
2013-07-02 10:08:58 +02:00
if ' playlist ' not in info_dict :
# It isn't part of a playlist
info_dict [ ' playlist ' ] = None
info_dict [ ' playlist_index ' ] = None
2014-06-07 15:33:45 +02:00
thumbnails = info_dict . get ( ' thumbnails ' )
2015-01-25 02:38:47 +01:00
if thumbnails is None :
thumbnail = info_dict . get ( ' thumbnail ' )
if thumbnail :
2015-01-29 20:15:38 +01:00
info_dict [ ' thumbnails ' ] = thumbnails = [ { ' url ' : thumbnail } ]
2014-06-07 15:33:45 +02:00
if thumbnails :
2014-06-07 15:39:21 +02:00
thumbnails . sort ( key = lambda t : (
2016-08-25 12:53:47 +02:00
t . get ( ' preference ' ) if t . get ( ' preference ' ) is not None else - 1 ,
t . get ( ' width ' ) if t . get ( ' width ' ) is not None else - 1 ,
t . get ( ' height ' ) if t . get ( ' height ' ) is not None else - 1 ,
t . get ( ' id ' ) if t . get ( ' id ' ) is not None else ' ' , t . get ( ' url ' ) ) )
2015-02-03 10:52:22 +01:00
for i , t in enumerate ( thumbnails ) :
2016-03-26 14:37:41 +01:00
t [ ' url ' ] = sanitize_url ( t [ ' url ' ] )
2015-06-28 18:55:28 +02:00
if t . get ( ' width ' ) and t . get ( ' height ' ) :
2014-06-07 15:33:45 +02:00
t [ ' resolution ' ] = ' %d x %d ' % ( t [ ' width ' ] , t [ ' height ' ] )
2015-02-03 10:52:22 +01:00
if t . get ( ' id ' ) is None :
t [ ' id ' ] = ' %d ' % i
2014-06-07 15:33:45 +02:00
2016-03-10 20:17:35 +01:00
if self . params . get ( ' list_thumbnails ' ) :
self . list_thumbnails ( info_dict )
return
2016-04-07 20:17:47 +02:00
thumbnail = info_dict . get ( ' thumbnail ' )
if thumbnail :
info_dict [ ' thumbnail ' ] = sanitize_url ( thumbnail )
elif thumbnails :
2014-06-07 15:33:45 +02:00
info_dict [ ' thumbnail ' ] = thumbnails [ - 1 ] [ ' url ' ]
2014-03-04 03:32:28 +01:00
if ' display_id ' not in info_dict and ' id ' in info_dict :
2014-03-03 12:06:28 +01:00
info_dict [ ' display_id ' ] = info_dict [ ' id ' ]
2014-03-13 18:21:55 +01:00
if info_dict . get ( ' upload_date ' ) is None and info_dict . get ( ' timestamp ' ) is not None :
2015-06-08 17:05:17 +02:00
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try :
upload_date = datetime . datetime . utcfromtimestamp ( info_dict [ ' timestamp ' ] )
info_dict [ ' upload_date ' ] = upload_date . strftime ( ' % Y % m %d ' )
except ( ValueError , OverflowError , OSError ) :
pass
2014-03-13 15:30:25 +01:00
2016-01-15 19:09:54 +01:00
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ( ' chapter ' , ' season ' , ' episode ' ) :
if info_dict . get ( ' %s _number ' % field ) is not None and not info_dict . get ( field ) :
info_dict [ field ] = ' %s %d ' % ( field . capitalize ( ) , info_dict [ ' %s _number ' % field ] )
2018-05-08 17:57:52 +02:00
for cc_kind in ( ' subtitles ' , ' automatic_captions ' ) :
cc = info_dict . get ( cc_kind )
if cc :
for _ , subtitle in cc . items ( ) :
for subtitle_format in subtitle :
if subtitle_format . get ( ' url ' ) :
subtitle_format [ ' url ' ] = sanitize_url ( subtitle_format [ ' url ' ] )
if subtitle_format . get ( ' ext ' ) is None :
subtitle_format [ ' ext ' ] = determine_ext ( subtitle_format [ ' url ' ] ) . lower ( )
automatic_captions = info_dict . get ( ' automatic_captions ' )
2015-10-04 16:33:42 +02:00
subtitles = info_dict . get ( ' subtitles ' )
2015-02-15 18:03:41 +01:00
if self . params . get ( ' listsubtitles ' , False ) :
2015-02-16 21:44:17 +01:00
if ' automatic_captions ' in info_dict :
2018-05-08 17:57:52 +02:00
self . list_subtitles (
info_dict [ ' id ' ] , automatic_captions , ' automatic captions ' )
2015-10-04 16:33:42 +02:00
self . list_subtitles ( info_dict [ ' id ' ] , subtitles , ' subtitles ' )
2015-02-15 18:03:41 +01:00
return
2018-05-08 17:57:52 +02:00
2015-02-16 21:44:17 +01:00
info_dict [ ' requested_subtitles ' ] = self . process_subtitles (
2018-05-08 17:57:52 +02:00
info_dict [ ' id ' ] , subtitles , automatic_captions )
2015-02-15 18:03:41 +01:00
2013-07-02 10:08:58 +02:00
# We now pick which formats have to be downloaded
if info_dict . get ( ' formats ' ) is None :
# There's only one format available
formats = [ info_dict ]
else :
formats = info_dict [ ' formats ' ]
2014-03-10 20:55:47 +01:00
if not formats :
raise ExtractorError ( ' No video formats found! ' )
2017-06-23 16:18:33 +02:00
def is_wellformed ( f ) :
url = f . get ( ' url ' )
2017-08-17 18:59:12 +02:00
if not url :
2017-06-23 16:18:33 +02:00
self . report_warning (
' " url " field is missing or empty - skipping format, '
' there is an error in extractor ' )
2017-08-17 18:59:12 +02:00
return False
if isinstance ( url , bytes ) :
sanitize_string_field ( f , ' url ' )
return True
2017-06-23 16:18:33 +02:00
# Filter out malformed formats for better extraction robustness
formats = list ( filter ( is_wellformed , formats ) )
2015-05-30 12:04:44 +02:00
formats_dict = { }
2013-07-02 10:08:58 +02:00
# We check that all the formats have the format and format_id fields
2014-03-10 20:55:47 +01:00
for i , format in enumerate ( formats ) :
2017-06-08 17:53:14 +02:00
sanitize_string_field ( format , ' format_id ' )
sanitize_numeric_fields ( format )
2016-03-26 14:37:41 +01:00
format [ ' url ' ] = sanitize_url ( format [ ' url ' ] )
2017-08-12 12:14:11 +02:00
if not format . get ( ' format_id ' ) :
2013-07-14 17:31:52 +02:00
format [ ' format_id ' ] = compat_str ( i )
2016-02-10 16:16:58 +01:00
else :
# Sanitize format_id from characters used in format selector expression
2017-01-02 13:08:07 +01:00
format [ ' format_id ' ] = re . sub ( r ' [ \ s,/+ \ [ \ ]()] ' , ' _ ' , format [ ' format_id ' ] )
2015-05-30 12:04:44 +02:00
format_id = format [ ' format_id ' ]
if format_id not in formats_dict :
formats_dict [ format_id ] = [ ]
formats_dict [ format_id ] . append ( format )
# Make sure all formats have unique format_id
for format_id , ambiguous_formats in formats_dict . items ( ) :
if len ( ambiguous_formats ) > 1 :
for i , format in enumerate ( ambiguous_formats ) :
format [ ' format_id ' ] = ' %s - %d ' % ( format_id , i )
for i , format in enumerate ( formats ) :
2013-10-21 14:09:38 +02:00
if format . get ( ' format ' ) is None :
2014-01-05 01:52:03 +01:00
format [ ' format ' ] = ' {id} - {res} {note} ' . format (
2013-10-21 14:09:38 +02:00
id = format [ ' format_id ' ] ,
res = self . format_resolution ( format ) ,
2014-01-05 01:52:03 +01:00
note = ' ( {0} ) ' . format ( format [ ' format_note ' ] ) if format . get ( ' format_note ' ) is not None else ' ' ,
2013-10-21 14:09:38 +02:00
)
2013-10-28 11:28:02 +01:00
# Automatically determine file extension if missing
2016-08-21 03:07:26 +02:00
if format . get ( ' ext ' ) is None :
2014-04-03 08:55:38 +02:00
format [ ' ext ' ] = determine_ext ( format [ ' url ' ] ) . lower ( )
2016-01-16 05:10:28 +01:00
# Automatically determine protocol if missing (useful for format
# selection purposes)
2017-01-15 00:09:32 +01:00
if format . get ( ' protocol ' ) is None :
2016-01-16 05:10:28 +01:00
format [ ' protocol ' ] = determine_protocol ( format )
2015-01-24 18:52:26 +01:00
# Add HTTP headers, so that external programs can use them from the
# json output
full_format_info = info_dict . copy ( )
full_format_info . update ( format )
format [ ' http_headers ' ] = self . _calc_headers ( full_format_info )
2017-02-04 15:06:07 +01:00
# Remove private housekeeping stuff
if ' __x_forwarded_for_ip ' in info_dict :
del info_dict [ ' __x_forwarded_for_ip ' ]
2013-07-02 10:08:58 +02:00
2013-12-24 12:25:22 +01:00
# TODO Central sorting goes here
2013-07-08 12:10:47 +02:00
2014-01-25 12:02:43 +01:00
if formats [ 0 ] is not info_dict :
2013-12-23 10:23:13 +01:00
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
2014-01-25 12:02:43 +01:00
# element in the 'formats' field in info_dict is info_dict itself,
2016-01-10 16:17:47 +01:00
# which can't be exported to json
2013-12-23 10:23:13 +01:00
info_dict [ ' formats ' ] = formats
2015-01-25 02:38:47 +01:00
if self . params . get ( ' listformats ' ) :
2013-12-18 21:24:39 +01:00
self . list_formats ( info_dict )
return
2014-01-22 14:53:23 +01:00
req_format = self . params . get ( ' format ' )
2013-10-21 13:19:58 +02:00
if req_format is None :
2017-07-22 19:12:01 +02:00
req_format = self . _default_format_spec ( info_dict , download = download )
if self . params . get ( ' verbose ' ) :
self . to_stdout ( ' [debug] Default format spec: %s ' % req_format )
2015-06-28 22:48:02 +02:00
format_selector = self . build_format_selector ( req_format )
2016-07-15 19:55:43 +02:00
# While in format selection we may need to have an access to the original
# format set in order to calculate some metrics or do some processing.
# For now we need to be able to guess whether original formats provided
# by extractor are incomplete or not (i.e. whether extractor provides only
# video-only or audio-only formats) for proper formats selection for
# extractors with such incomplete formats (see
2019-03-09 13:14:41 +01:00
# https://github.com/ytdl-org/youtube-dl/pull/5556).
2016-07-15 19:55:43 +02:00
# Since formats may be filtered during format selection and may not match
# the original formats the results may be incorrect. Thus original formats
# or pre-calculated metrics should be passed to format selection routines
# as well.
# We will pass a context object containing all necessary additional data
# instead of just formats.
# This fixes incorrect format selection issue (see
2019-03-09 13:14:41 +01:00
# https://github.com/ytdl-org/youtube-dl/issues/10083).
2016-07-15 20:18:05 +02:00
incomplete_formats = (
2016-07-15 19:55:43 +02:00
# All formats are video-only or
2019-05-10 22:56:22 +02:00
all ( f . get ( ' vcodec ' ) != ' none ' and f . get ( ' acodec ' ) == ' none ' for f in formats )
2016-07-15 19:55:43 +02:00
# all formats are audio-only
2019-05-10 22:56:22 +02:00
or all ( f . get ( ' vcodec ' ) == ' none ' and f . get ( ' acodec ' ) != ' none ' for f in formats ) )
2016-07-15 19:55:43 +02:00
ctx = {
' formats ' : formats ,
' incomplete_formats ' : incomplete_formats ,
}
formats_to_download = list ( format_selector ( ctx ) )
2013-07-02 10:08:58 +02:00
if not formats_to_download :
2014-01-05 01:52:03 +01:00
raise ExtractorError ( ' requested format not available ' ,
2013-10-28 11:41:43 +01:00
expected = True )
2013-07-02 10:08:58 +02:00
if download :
if len ( formats_to_download ) > 1 :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [info] %s : downloading video in %s formats ' % ( info_dict [ ' id ' ] , len ( formats_to_download ) ) )
2013-07-02 10:08:58 +02:00
for format in formats_to_download :
new_info = dict ( info_dict )
new_info . update ( format )
self . process_info ( new_info )
# We update the info dict with the best quality format (backwards compatibility)
info_dict . update ( formats_to_download [ - 1 ] )
return info_dict
2015-02-22 11:37:27 +01:00
def process_subtitles ( self , video_id , normal_subtitles , automatic_captions ) :
2015-02-15 18:03:41 +01:00
""" Select the requested subtitles and their format """
2015-02-22 11:37:27 +01:00
available_subs = { }
if normal_subtitles and self . params . get ( ' writesubtitles ' ) :
available_subs . update ( normal_subtitles )
if automatic_captions and self . params . get ( ' writeautomaticsub ' ) :
for lang , cap_info in automatic_captions . items ( ) :
2015-02-16 21:44:17 +01:00
if lang not in available_subs :
available_subs [ lang ] = cap_info
2015-02-21 22:31:53 +01:00
if ( not self . params . get ( ' writesubtitles ' ) and not
self . params . get ( ' writeautomaticsub ' ) or not
available_subs ) :
return None
2015-02-15 18:03:41 +01:00
if self . params . get ( ' allsubtitles ' , False ) :
requested_langs = available_subs . keys ( )
else :
if self . params . get ( ' subtitleslangs ' , False ) :
requested_langs = self . params . get ( ' subtitleslangs ' )
elif ' en ' in available_subs :
requested_langs = [ ' en ' ]
else :
requested_langs = [ list ( available_subs . keys ( ) ) [ 0 ] ]
formats_query = self . params . get ( ' subtitlesformat ' , ' best ' )
formats_preference = formats_query . split ( ' / ' ) if formats_query else [ ]
subs = { }
for lang in requested_langs :
formats = available_subs . get ( lang )
if formats is None :
self . report_warning ( ' %s subtitles not available for %s ' % ( lang , video_id ) )
continue
for ext in formats_preference :
if ext == ' best ' :
f = formats [ - 1 ]
break
matches = list ( filter ( lambda f : f [ ' ext ' ] == ext , formats ) )
if matches :
f = matches [ - 1 ]
break
else :
f = formats [ - 1 ]
self . report_warning (
' No subtitle format found matching " %s " for language %s , '
' using %s ' % ( formats_query , lang , f [ ' ext ' ] ) )
subs [ lang ] = f
return subs
2019-09-24 21:08:46 +02:00
def __forced_printings ( self , info_dict , filename , incomplete ) :
def print_mandatory ( field ) :
if ( self . params . get ( ' force %s ' % field , False )
and ( not incomplete or info_dict . get ( field ) is not None ) ) :
self . to_stdout ( info_dict [ field ] )
def print_optional ( field ) :
if ( self . params . get ( ' force %s ' % field , False )
and info_dict . get ( field ) is not None ) :
self . to_stdout ( info_dict [ field ] )
print_mandatory ( ' title ' )
print_mandatory ( ' id ' )
if self . params . get ( ' forceurl ' , False ) and not incomplete :
if info_dict . get ( ' requested_formats ' ) is not None :
for f in info_dict [ ' requested_formats ' ] :
self . to_stdout ( f [ ' url ' ] + f . get ( ' play_path ' , ' ' ) )
else :
# For RTMP URLs, also include the playpath
self . to_stdout ( info_dict [ ' url ' ] + info_dict . get ( ' play_path ' , ' ' ) )
print_optional ( ' thumbnail ' )
print_optional ( ' description ' )
if self . params . get ( ' forcefilename ' , False ) and filename is not None :
self . to_stdout ( filename )
if self . params . get ( ' forceduration ' , False ) and info_dict . get ( ' duration ' ) is not None :
self . to_stdout ( formatSeconds ( info_dict [ ' duration ' ] ) )
print_mandatory ( ' format ' )
if self . params . get ( ' forcejson ' , False ) :
self . to_stdout ( json . dumps ( info_dict ) )
2013-06-18 22:14:21 +02:00
def process_info ( self , info_dict ) :
""" Process a single resolved IE result. """
assert info_dict . get ( ' _type ' , ' video ' ) == ' video '
2014-01-23 18:56:36 +01:00
max_downloads = self . params . get ( ' max_downloads ' )
if max_downloads is not None :
if self . _num_downloads > = int ( max_downloads ) :
raise MaxDownloadsReached ( )
2013-06-18 22:14:21 +02:00
2019-09-24 21:08:46 +02:00
# TODO: backward compatibility, to be removed
2013-06-18 22:14:21 +02:00
info_dict [ ' fulltitle ' ] = info_dict [ ' title ' ]
2014-07-25 23:37:32 +02:00
if ' format ' not in info_dict :
2013-06-18 22:14:21 +02:00
info_dict [ ' format ' ] = info_dict [ ' ext ' ]
2015-02-10 11:28:28 +01:00
reason = self . _match_entry ( info_dict , incomplete = False )
2013-06-18 22:14:21 +02:00
if reason is not None :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [download] ' + reason )
2013-06-18 22:14:21 +02:00
return
2014-01-23 18:56:36 +01:00
self . _num_downloads + = 1
2013-06-18 22:14:21 +02:00
2015-01-26 12:01:43 +01:00
info_dict [ ' _filename ' ] = filename = self . prepare_filename ( info_dict )
2013-06-18 22:14:21 +02:00
# Forced printings
2019-09-24 21:08:46 +02:00
self . __forced_printings ( info_dict , filename , incomplete = False )
2013-06-18 22:14:21 +02:00
# Do nothing else if in simulate mode
if self . params . get ( ' simulate ' , False ) :
return
if filename is None :
return
2017-09-05 18:31:34 +02:00
def ensure_dir_exists ( path ) :
try :
dn = os . path . dirname ( path )
if dn and not os . path . exists ( dn ) :
os . makedirs ( dn )
return True
except ( OSError , IOError ) as err :
self . report_error ( ' unable to create directory ' + error_to_compat_str ( err ) )
return False
if not ensure_dir_exists ( sanitize_path ( encodeFilename ( filename ) ) ) :
2013-06-18 22:14:21 +02:00
return
if self . params . get ( ' writedescription ' , False ) :
2015-05-02 19:36:55 +02:00
descfn = replace_extension ( filename , ' description ' , info_dict . get ( ' ext ' ) )
2013-12-16 04:39:04 +01:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( descfn ) ) :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [info] Video description is already present ' )
2014-12-21 20:49:14 +01:00
elif info_dict . get ( ' description ' ) is None :
self . report_warning ( ' There \' s no description to write. ' )
2013-12-16 04:39:04 +01:00
else :
try :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [info] Writing video description to: ' + descfn )
2013-12-16 04:39:04 +01:00
with io . open ( encodeFilename ( descfn ) , ' w ' , encoding = ' utf-8 ' ) as descfile :
descfile . write ( info_dict [ ' description ' ] )
except ( OSError , IOError ) :
2014-01-05 01:52:03 +01:00
self . report_error ( ' Cannot write description file ' + descfn )
2013-12-16 04:39:04 +01:00
return
2013-06-18 22:14:21 +02:00
2013-10-14 07:18:58 +02:00
if self . params . get ( ' writeannotations ' , False ) :
2015-05-02 19:35:18 +02:00
annofn = replace_extension ( filename , ' annotations.xml ' , info_dict . get ( ' ext ' ) )
2013-12-16 04:39:04 +01:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( annofn ) ) :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [info] Video annotations are already present ' )
2019-08-09 09:19:41 +02:00
elif not info_dict . get ( ' annotations ' ) :
self . report_warning ( ' There are no annotations to write. ' )
2013-12-16 04:39:04 +01:00
else :
try :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [info] Writing video annotations to: ' + annofn )
2013-12-16 04:39:04 +01:00
with io . open ( encodeFilename ( annofn ) , ' w ' , encoding = ' utf-8 ' ) as annofile :
annofile . write ( info_dict [ ' annotations ' ] )
except ( KeyError , TypeError ) :
2014-01-05 01:52:03 +01:00
self . report_warning ( ' There are no annotations to write. ' )
2013-12-16 04:39:04 +01:00
except ( OSError , IOError ) :
2014-01-05 01:52:03 +01:00
self . report_error ( ' Cannot write annotations file: ' + annofn )
2013-12-16 04:39:04 +01:00
return
2013-10-14 07:18:58 +02:00
2013-06-26 00:02:15 +02:00
subtitles_are_requested = any ( [ self . params . get ( ' writesubtitles ' , False ) ,
2013-09-14 11:14:40 +02:00
self . params . get ( ' writeautomaticsub ' ) ] )
2013-06-26 00:02:15 +02:00
2015-02-16 21:12:31 +01:00
if subtitles_are_requested and info_dict . get ( ' requested_subtitles ' ) :
2013-06-18 22:14:21 +02:00
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
2015-02-16 21:12:31 +01:00
subtitles = info_dict [ ' requested_subtitles ' ]
2015-02-28 14:03:27 +01:00
ie = self . get_info_extractor ( info_dict [ ' extractor_key ' ] )
2015-02-15 18:03:41 +01:00
for sub_lang , sub_info in subtitles . items ( ) :
sub_format = sub_info [ ' ext ' ]
2019-10-17 23:03:53 +02:00
sub_filename = subtitles_filename ( filename , sub_lang , sub_format , info_dict . get ( ' ext ' ) )
2017-04-28 23:25:20 +02:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( sub_filename ) ) :
self . to_screen ( ' [info] Video subtitle %s . %s is already present ' % ( sub_lang , sub_format ) )
2015-02-15 18:03:41 +01:00
else :
2017-04-28 23:25:20 +02:00
self . to_screen ( ' [info] Writing video subtitles to: ' + sub_filename )
if sub_info . get ( ' data ' ) is not None :
try :
# Use newline='' to prevent conversion of newline characters
2019-03-09 13:14:41 +01:00
# See https://github.com/ytdl-org/youtube-dl/issues/10268
2017-04-28 23:25:20 +02:00
with io . open ( encodeFilename ( sub_filename ) , ' w ' , encoding = ' utf-8 ' , newline = ' ' ) as subfile :
subfile . write ( sub_info [ ' data ' ] )
except ( OSError , IOError ) :
self . report_error ( ' Cannot write subtitles file ' + sub_filename )
return
2013-12-16 04:39:04 +01:00
else :
2017-04-28 23:25:20 +02:00
try :
sub_data = ie . _request_webpage (
sub_info [ ' url ' ] , info_dict [ ' id ' ] , note = False ) . read ( )
with io . open ( encodeFilename ( sub_filename ) , ' wb ' ) as subfile :
subfile . write ( sub_data )
except ( ExtractorError , IOError , OSError , ValueError ) as err :
self . report_warning ( ' Unable to download subtitle for " %s " : %s ' %
( sub_lang , error_to_compat_str ( err ) ) )
continue
2013-06-18 22:14:21 +02:00
if self . params . get ( ' writeinfojson ' , False ) :
2015-05-02 19:23:44 +02:00
infofn = replace_extension ( filename , ' info.json ' , info_dict . get ( ' ext ' ) )
2013-12-16 04:39:04 +01:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( infofn ) ) :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [info] Video description metadata is already present ' )
2013-12-16 04:39:04 +01:00
else :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [info] Writing video description metadata as JSON to: ' + infofn )
2013-12-16 04:39:04 +01:00
try :
2015-04-30 20:44:34 +02:00
write_json_file ( self . filter_requested_info ( info_dict ) , infofn )
2013-12-16 04:39:04 +01:00
except ( OSError , IOError ) :
2014-01-05 01:52:03 +01:00
self . report_error ( ' Cannot write metadata to JSON file ' + infofn )
2013-12-16 04:39:04 +01:00
return
2013-06-18 22:14:21 +02:00
2015-01-25 03:11:12 +01:00
self . _write_thumbnails ( info_dict , filename )
2013-06-18 22:14:21 +02:00
if not self . params . get ( ' skip_download ' , False ) :
2014-09-25 18:37:20 +02:00
try :
def dl ( name , info ) :
2015-01-23 23:50:31 +01:00
fd = get_suitable_downloader ( info , self . params ) ( self , self . params )
2014-09-25 18:37:20 +02:00
for ph in self . _progress_hooks :
fd . add_progress_hook ( ph )
if self . params . get ( ' verbose ' ) :
self . to_stdout ( ' [debug] Invoking downloader on %r ' % info . get ( ' url ' ) )
return fd . download ( name , info )
2015-01-25 06:15:51 +01:00
2014-09-25 18:37:20 +02:00
if info_dict . get ( ' requested_formats ' ) is not None :
downloaded = [ ]
success = True
2015-04-18 11:52:36 +02:00
merger = FFmpegMergerPP ( self )
2015-02-17 17:26:41 +01:00
if not merger . available :
2014-09-25 18:37:20 +02:00
postprocessors = [ ]
self . report_warning ( ' You have requested multiple '
' formats but ffmpeg or avconv are not installed. '
2015-05-17 16:56:03 +02:00
' The formats won \' t be merged. ' )
2014-01-04 13:13:51 +01:00
else :
2014-09-25 18:37:20 +02:00
postprocessors = [ merger ]
2015-04-17 23:00:35 +02:00
def compatible_formats ( formats ) :
video , audio = formats
# Check extension
2018-04-19 17:21:50 +02:00
video_ext , audio_ext = video . get ( ' ext ' ) , audio . get ( ' ext ' )
2015-04-17 23:00:35 +02:00
if video_ext and audio_ext :
COMPATIBLE_EXTS = (
2016-10-19 17:22:40 +02:00
( ' mp3 ' , ' mp4 ' , ' m4a ' , ' m4p ' , ' m4b ' , ' m4r ' , ' m4v ' , ' ismv ' , ' isma ' ) ,
2015-04-17 23:00:35 +02:00
( ' webm ' )
)
for exts in COMPATIBLE_EXTS :
if video_ext in exts and audio_ext in exts :
return True
# TODO: Check acodec/vcodec
return False
2015-05-02 18:52:21 +02:00
filename_real_ext = os . path . splitext ( filename ) [ 1 ] [ 1 : ]
filename_wo_ext = (
os . path . splitext ( filename ) [ 0 ]
if filename_real_ext == info_dict [ ' ext ' ]
else filename )
2015-04-17 23:00:35 +02:00
requested_formats = info_dict [ ' requested_formats ' ]
2015-04-19 18:33:52 +02:00
if self . params . get ( ' merge_output_format ' ) is None and not compatible_formats ( requested_formats ) :
2015-05-02 18:52:21 +02:00
info_dict [ ' ext ' ] = ' mkv '
2015-05-17 16:56:03 +02:00
self . report_warning (
' Requested formats are incompatible for merge and will be merged into mkv. ' )
2015-05-02 18:52:21 +02:00
# Ensure filename always has a correct extension for successful merge
filename = ' %s . %s ' % ( filename_wo_ext , info_dict [ ' ext ' ] )
2019-10-13 18:00:48 +02:00
file_exists = os . path . exists ( encodeFilename ( filename ) )
if not self . params . get ( ' overwrites ' , False ) and file_exists :
2015-04-19 16:56:22 +02:00
self . to_screen (
' [download] %s has already been downloaded and '
' merged ' % filename )
else :
2019-10-13 18:00:48 +02:00
if file_exists :
self . report_file_delete ( filename )
os . remove ( encodeFilename ( filename ) )
2015-04-17 23:00:35 +02:00
for f in requested_formats :
2015-04-19 16:56:22 +02:00
new_info = dict ( info_dict )
new_info . update ( f )
2017-09-05 18:31:34 +02:00
fname = prepend_extension (
self . prepare_filename ( new_info ) ,
' f %s ' % f [ ' format_id ' ] , new_info [ ' ext ' ] )
if not ensure_dir_exists ( fname ) :
return
2015-04-19 16:56:22 +02:00
downloaded . append ( fname )
partial_success = dl ( fname , new_info )
success = success and partial_success
info_dict [ ' __postprocessors ' ] = postprocessors
info_dict [ ' __files_to_merge ' ] = downloaded
2014-09-25 18:37:20 +02:00
else :
2019-10-13 18:00:48 +02:00
# Delete existing file with --yes-overwrites
if self . params . get ( ' overwrites ' , False ) :
if os . path . exists ( encodeFilename ( filename ) ) :
self . report_file_delete ( filename )
os . remove ( encodeFilename ( filename ) )
2014-09-25 18:37:20 +02:00
# Just a single file
success = dl ( filename , info_dict )
except ( compat_urllib_error . URLError , compat_http_client . HTTPException , socket . error ) as err :
2016-05-02 14:35:50 +02:00
self . report_error ( ' unable to download video data: %s ' % error_to_compat_str ( err ) )
2014-09-25 18:37:20 +02:00
return
except ( OSError , IOError ) as err :
raise UnavailableVideoError ( err )
except ( ContentTooShortError , ) as err :
self . report_error ( ' content too short (expected %s bytes and served %s ) ' % ( err . expected , err . downloaded ) )
return
2013-06-18 22:14:21 +02:00
2016-03-02 16:11:18 +01:00
if success and filename != ' - ' :
2015-01-10 05:45:51 +01:00
# Fixup content
2015-01-23 18:39:12 +01:00
fixup_policy = self . params . get ( ' fixup ' )
if fixup_policy is None :
fixup_policy = ' detect_or_warn '
2016-03-05 23:32:18 +01:00
INSTALL_FFMPEG_MESSAGE = ' Install ffmpeg or avconv to fix this automatically. '
2015-01-10 05:45:51 +01:00
stretched_ratio = info_dict . get ( ' stretched_ratio ' )
if stretched_ratio is not None and stretched_ratio != 1 :
if fixup_policy == ' warn ' :
self . report_warning ( ' %s : Non-uniform pixel ratio ( %s ) ' % (
info_dict [ ' id ' ] , stretched_ratio ) )
elif fixup_policy == ' detect_or_warn ' :
stretched_pp = FFmpegFixupStretchedPP ( self )
if stretched_pp . available :
info_dict . setdefault ( ' __postprocessors ' , [ ] )
info_dict [ ' __postprocessors ' ] . append ( stretched_pp )
else :
self . report_warning (
2016-03-05 23:32:18 +01:00
' %s : Non-uniform pixel ratio ( %s ). %s '
% ( info_dict [ ' id ' ] , stretched_ratio , INSTALL_FFMPEG_MESSAGE ) )
2015-01-10 05:45:51 +01:00
else :
2015-01-23 18:39:12 +01:00
assert fixup_policy in ( ' ignore ' , ' never ' )
2019-05-10 22:56:22 +02:00
if ( info_dict . get ( ' requested_formats ' ) is None
and info_dict . get ( ' container ' ) == ' m4a_dash ' ) :
2015-01-23 18:39:12 +01:00
if fixup_policy == ' warn ' :
2016-03-05 23:32:18 +01:00
self . report_warning (
' %s : writing DASH m4a. '
' Only some players support this container. '
% info_dict [ ' id ' ] )
2015-01-23 18:39:12 +01:00
elif fixup_policy == ' detect_or_warn ' :
fixup_pp = FFmpegFixupM4aPP ( self )
if fixup_pp . available :
info_dict . setdefault ( ' __postprocessors ' , [ ] )
info_dict [ ' __postprocessors ' ] . append ( fixup_pp )
else :
self . report_warning (
2016-03-05 23:32:18 +01:00
' %s : writing DASH m4a. '
' Only some players support this container. %s '
% ( info_dict [ ' id ' ] , INSTALL_FFMPEG_MESSAGE ) )
2015-01-23 18:39:12 +01:00
else :
assert fixup_policy in ( ' ignore ' , ' never ' )
2015-01-10 05:45:51 +01:00
2019-05-10 22:56:22 +02:00
if ( info_dict . get ( ' protocol ' ) == ' m3u8_native '
or info_dict . get ( ' protocol ' ) == ' m3u8 '
and self . params . get ( ' hls_prefer_native ' ) ) :
2016-03-01 21:08:50 +01:00
if fixup_policy == ' warn ' :
2017-07-09 12:09:44 +02:00
self . report_warning ( ' %s : malformed AAC bitstream detected. ' % (
2016-03-01 21:08:50 +01:00
info_dict [ ' id ' ] ) )
elif fixup_policy == ' detect_or_warn ' :
fixup_pp = FFmpegFixupM3u8PP ( self )
if fixup_pp . available :
info_dict . setdefault ( ' __postprocessors ' , [ ] )
info_dict [ ' __postprocessors ' ] . append ( fixup_pp )
else :
self . report_warning (
2017-07-09 12:09:44 +02:00
' %s : malformed AAC bitstream detected. %s '
2016-03-05 23:32:18 +01:00
% ( info_dict [ ' id ' ] , INSTALL_FFMPEG_MESSAGE ) )
2016-03-01 21:08:50 +01:00
else :
assert fixup_policy in ( ' ignore ' , ' never ' )
2013-06-18 22:14:21 +02:00
try :
self . post_process ( filename , info_dict )
except ( PostProcessingError ) as err :
2014-01-05 01:52:03 +01:00
self . report_error ( ' postprocessing: %s ' % str ( err ) )
2013-06-18 22:14:21 +02:00
return
2014-12-17 13:21:22 +01:00
self . record_download_archive ( info_dict )
2013-06-18 22:14:21 +02:00
def download ( self , url_list ) :
""" Download a given list of URLs. """
2014-04-30 10:02:03 +02:00
outtmpl = self . params . get ( ' outtmpl ' , DEFAULT_OUTTMPL )
2019-05-10 22:56:22 +02:00
if ( len ( url_list ) > 1
and outtmpl != ' - '
and ' % ' not in outtmpl
and self . params . get ( ' max_downloads ' ) != 1 ) :
2014-04-30 10:02:03 +02:00
raise SameFileError ( outtmpl )
2013-06-18 22:14:21 +02:00
for url in url_list :
try :
2014-11-23 20:41:03 +01:00
# It also downloads the videos
2015-06-12 22:05:21 +02:00
res = self . extract_info (
url , force_generic_extractor = self . params . get ( ' force_generic_extractor ' , False ) )
2013-06-18 22:14:21 +02:00
except UnavailableVideoError :
2014-01-05 01:52:03 +01:00
self . report_error ( ' unable to download video ' )
2013-06-18 22:14:21 +02:00
except MaxDownloadsReached :
2014-01-05 01:52:03 +01:00
self . to_screen ( ' [info] Maximum number of downloaded files reached. ' )
2013-06-18 22:14:21 +02:00
raise
2014-10-25 00:30:57 +02:00
else :
if self . params . get ( ' dump_single_json ' , False ) :
self . to_stdout ( json . dumps ( res ) )
2013-06-18 22:14:21 +02:00
return self . _download_retcode
2013-11-22 14:57:53 +01:00
def download_with_info_file ( self , info_filename ) :
2015-03-01 11:46:57 +01:00
with contextlib . closing ( fileinput . FileInput (
[ info_filename ] , mode = ' r ' ,
openhook = fileinput . hook_encoded ( ' utf-8 ' ) ) ) as f :
# FileInput doesn't have a read method, we can't call json.load
2015-04-30 20:44:34 +02:00
info = self . filter_requested_info ( json . loads ( ' \n ' . join ( f ) ) )
2013-12-03 20:16:52 +01:00
try :
self . process_ie_result ( info , download = True )
except DownloadError :
webpage_url = info . get ( ' webpage_url ' )
if webpage_url is not None :
2014-01-05 01:52:03 +01:00
self . report_warning ( ' The info failed to download, trying with " %s " ' % webpage_url )
2013-12-03 20:16:52 +01:00
return self . download ( [ webpage_url ] )
else :
raise
return self . _download_retcode
2013-11-22 14:57:53 +01:00
2015-04-30 20:44:34 +02:00
@staticmethod
def filter_requested_info ( info_dict ) :
return dict (
( k , v ) for k , v in info_dict . items ( )
if k not in [ ' requested_formats ' , ' requested_subtitles ' ] )
2013-06-18 22:14:21 +02:00
def post_process ( self , filename , ie_info ) :
""" Run all the postprocessors on the given file. """
info = dict ( ie_info )
info [ ' filepath ' ] = filename
2014-01-04 13:13:51 +01:00
pps_chain = [ ]
if ie_info . get ( ' __postprocessors ' ) is not None :
pps_chain . extend ( ie_info [ ' __postprocessors ' ] )
pps_chain . extend ( self . _pps )
for pp in pps_chain :
2015-05-24 00:14:01 +02:00
files_to_delete = [ ]
2013-06-18 22:14:21 +02:00
try :
2015-04-18 11:36:42 +02:00
files_to_delete , info = pp . run ( info )
2013-06-18 22:14:21 +02:00
except PostProcessingError as e :
2013-07-31 21:20:46 +02:00
self . report_error ( e . msg )
2015-04-18 11:36:42 +02:00
if files_to_delete and not self . params . get ( ' keepvideo ' , False ) :
for old_filename in files_to_delete :
2014-02-23 11:29:42 +01:00
self . to_screen ( ' Deleting original file %s (pass -k to keep) ' % old_filename )
2015-04-18 11:36:42 +02:00
try :
os . remove ( encodeFilename ( old_filename ) )
except ( IOError , OSError ) :
self . report_warning ( ' Unable to remove downloaded original file ' )
2013-10-06 04:27:09 +02:00
2013-11-25 15:46:54 +01:00
def _make_archive_id ( self , info_dict ) :
2019-02-01 23:44:31 +01:00
video_id = info_dict . get ( ' id ' )
if not video_id :
return
2013-11-25 15:46:54 +01:00
# Future-proof against any change in case
# and backwards compatibility with prior versions
2019-02-01 23:44:31 +01:00
extractor = info_dict . get ( ' extractor_key ' ) or info_dict . get ( ' ie_key ' ) # key in a playlist
2013-11-22 22:46:46 +01:00
if extractor is None :
2019-02-07 19:08:48 +01:00
url = str_or_none ( info_dict . get ( ' url ' ) )
if not url :
return
2019-02-01 23:44:31 +01:00
# Try to find matching extractor for the URL and take its ie_key
for ie in self . _ies :
2019-02-07 19:08:48 +01:00
if ie . suitable ( url ) :
2019-02-01 23:44:31 +01:00
extractor = ie . ie_key ( )
break
else :
return
return extractor . lower ( ) + ' ' + video_id
2013-11-25 15:46:54 +01:00
def in_download_archive ( self , info_dict ) :
fn = self . params . get ( ' download_archive ' )
if fn is None :
return False
vid_id = self . _make_archive_id ( info_dict )
2019-02-01 23:44:31 +01:00
if not vid_id :
2013-11-22 22:46:46 +01:00
return False # Incomplete video information
2013-11-25 15:46:54 +01:00
2013-10-06 04:27:09 +02:00
try :
with locked_file ( fn , ' r ' , encoding = ' utf-8 ' ) as archive_file :
for line in archive_file :
if line . strip ( ) == vid_id :
return True
except IOError as ioe :
if ioe . errno != errno . ENOENT :
raise
return False
def record_download_archive ( self , info_dict ) :
fn = self . params . get ( ' download_archive ' )
if fn is None :
return
2013-11-25 15:46:54 +01:00
vid_id = self . _make_archive_id ( info_dict )
assert vid_id
2013-10-06 04:27:09 +02:00
with locked_file ( fn , ' a ' , encoding = ' utf-8 ' ) as archive_file :
2014-01-05 01:52:03 +01:00
archive_file . write ( vid_id + ' \n ' )
2013-07-02 10:08:58 +02:00
2013-10-21 14:09:38 +02:00
@staticmethod
2013-10-28 11:31:12 +01:00
def format_resolution ( format , default = ' unknown ' ) :
2013-11-25 22:34:56 +01:00
if format . get ( ' vcodec ' ) == ' none ' :
return ' audio only '
2013-12-24 11:56:02 +01:00
if format . get ( ' resolution ' ) is not None :
return format [ ' resolution ' ]
2013-10-21 14:09:38 +02:00
if format . get ( ' height ' ) is not None :
if format . get ( ' width ' ) is not None :
2014-01-05 01:52:03 +01:00
res = ' %s x %s ' % ( format [ ' width ' ] , format [ ' height ' ] )
2013-10-21 14:09:38 +02:00
else :
2014-01-05 01:52:03 +01:00
res = ' %s p ' % format [ ' height ' ]
2013-12-24 11:56:02 +01:00
elif format . get ( ' width ' ) is not None :
2016-02-11 17:46:13 +01:00
res = ' %d x? ' % format [ ' width ' ]
2013-10-21 14:09:38 +02:00
else :
2013-10-28 11:31:12 +01:00
res = default
2013-10-21 14:09:38 +02:00
return res
2014-04-30 02:02:41 +02:00
def _format_note ( self , fdict ) :
res = ' '
if fdict . get ( ' ext ' ) in [ ' f4f ' , ' f4m ' ] :
res + = ' (unsupported) '
2016-01-01 13:28:45 +01:00
if fdict . get ( ' language ' ) :
if res :
res + = ' '
2016-03-20 17:01:45 +01:00
res + = ' [ %s ] ' % fdict [ ' language ' ]
2014-04-30 02:02:41 +02:00
if fdict . get ( ' format_note ' ) is not None :
res + = fdict [ ' format_note ' ] + ' '
if fdict . get ( ' tbr ' ) is not None :
res + = ' %4d k ' % fdict [ ' tbr ' ]
if fdict . get ( ' container ' ) is not None :
if res :
res + = ' , '
res + = ' %s container ' % fdict [ ' container ' ]
2019-05-10 22:56:22 +02:00
if ( fdict . get ( ' vcodec ' ) is not None
and fdict . get ( ' vcodec ' ) != ' none ' ) :
2014-04-30 02:02:41 +02:00
if res :
res + = ' , '
res + = fdict [ ' vcodec ' ]
2013-11-16 01:08:43 +01:00
if fdict . get ( ' vbr ' ) is not None :
2014-04-30 02:02:41 +02:00
res + = ' @ '
elif fdict . get ( ' vbr ' ) is not None and fdict . get ( ' abr ' ) is not None :
res + = ' video@ '
if fdict . get ( ' vbr ' ) is not None :
res + = ' %4d k ' % fdict [ ' vbr ' ]
2014-10-30 09:34:13 +01:00
if fdict . get ( ' fps ' ) is not None :
2016-03-09 20:03:18 +01:00
if res :
res + = ' , '
res + = ' %s fps ' % fdict [ ' fps ' ]
2014-04-30 02:02:41 +02:00
if fdict . get ( ' acodec ' ) is not None :
if res :
res + = ' , '
if fdict [ ' acodec ' ] == ' none ' :
res + = ' video only '
else :
res + = ' %-5s ' % fdict [ ' acodec ' ]
elif fdict . get ( ' abr ' ) is not None :
if res :
res + = ' , '
res + = ' audio '
if fdict . get ( ' abr ' ) is not None :
res + = ' @ %3d k ' % fdict [ ' abr ' ]
if fdict . get ( ' asr ' ) is not None :
res + = ' ( %5d Hz) ' % fdict [ ' asr ' ]
if fdict . get ( ' filesize ' ) is not None :
if res :
res + = ' , '
res + = format_bytes ( fdict [ ' filesize ' ] )
2014-07-21 12:02:44 +02:00
elif fdict . get ( ' filesize_approx ' ) is not None :
if res :
res + = ' , '
res + = ' ~ ' + format_bytes ( fdict [ ' filesize_approx ' ] )
2014-04-30 02:02:41 +02:00
return res
2013-11-16 01:08:43 +01:00
2014-04-30 02:02:41 +02:00
def list_formats ( self , info_dict ) :
2013-10-30 01:09:26 +01:00
formats = info_dict . get ( ' formats ' , [ info_dict ] )
2015-02-19 00:28:58 +01:00
table = [
[ f [ ' format_id ' ] , f [ ' ext ' ] , self . format_resolution ( f ) , self . _format_note ( f ) ]
for f in formats
2015-01-03 18:33:38 +01:00
if f . get ( ' preference ' ) is None or f [ ' preference ' ] > = - 1000 ]
2013-10-30 01:09:26 +01:00
if len ( formats ) > 1 :
2015-02-19 00:28:58 +01:00
table [ - 1 ] [ - 1 ] + = ( ' ' if table [ - 1 ] [ - 1 ] else ' ' ) + ' (best) '
2013-10-29 15:09:45 +01:00
2015-02-19 00:28:58 +01:00
header_line = [ ' format code ' , ' extension ' , ' resolution ' , ' note ' ]
2015-01-25 02:38:47 +01:00
self . to_screen (
2015-02-19 00:28:58 +01:00
' [info] Available formats for %s : \n %s ' %
( info_dict [ ' id ' ] , render_table ( header_line , table ) ) )
2015-01-25 02:38:47 +01:00
def list_thumbnails ( self , info_dict ) :
thumbnails = info_dict . get ( ' thumbnails ' )
if not thumbnails :
2016-03-10 20:17:35 +01:00
self . to_screen ( ' [info] No thumbnails present for %s ' % info_dict [ ' id ' ] )
return
2015-01-25 02:38:47 +01:00
self . to_screen (
' [info] Thumbnails for %s : ' % info_dict [ ' id ' ] )
self . to_screen ( render_table (
[ ' ID ' , ' width ' , ' height ' , ' URL ' ] ,
[ [ t [ ' id ' ] , t . get ( ' width ' , ' unknown ' ) , t . get ( ' height ' , ' unknown ' ) , t [ ' url ' ] ] for t in thumbnails ] ) )
2013-11-22 19:57:52 +01:00
2015-02-16 21:44:17 +01:00
def list_subtitles ( self , video_id , subtitles , name = ' subtitles ' ) :
2015-02-15 18:03:41 +01:00
if not subtitles :
2015-02-16 21:44:17 +01:00
self . to_screen ( ' %s has no %s ' % ( video_id , name ) )
2015-02-15 18:03:41 +01:00
return
self . to_screen (
2015-02-17 22:59:19 +01:00
' Available %s for %s : ' % ( name , video_id ) )
self . to_screen ( render_table (
[ ' Language ' , ' formats ' ] ,
[ [ lang , ' , ' . join ( f [ ' ext ' ] for f in reversed ( formats ) ) ]
for lang , formats in subtitles . items ( ) ] ) )
2015-02-15 18:03:41 +01:00
2013-11-22 19:57:52 +01:00
def urlopen ( self , req ) :
""" Start an HTTP download """
2015-11-19 22:08:34 +01:00
if isinstance ( req , compat_basestring ) :
2015-11-20 15:33:49 +01:00
req = sanitized_Request ( req )
2014-03-10 19:01:29 +01:00
return self . _opener . open ( req , timeout = self . _socket_timeout )
2013-11-22 19:57:52 +01:00
def print_debug_header ( self ) :
if not self . params . get ( ' verbose ' ) :
return
2014-03-30 06:02:41 +02:00
2014-07-24 13:29:44 +02:00
if type ( ' ' ) is not compat_str :
2019-03-09 13:14:41 +01:00
# Python 2.6 on SLES11 SP1 (https://github.com/ytdl-org/youtube-dl/issues/3326)
2014-07-24 13:29:44 +02:00
self . report_warning (
' Your Python is broken! Update to a newer and supported version ' )
2014-11-12 15:30:26 +01:00
stdout_encoding = getattr (
sys . stdout , ' encoding ' , ' missing ( %s ) ' % type ( sys . stdout ) . __name__ )
2014-07-23 02:24:50 +02:00
encoding_str = (
2014-04-07 19:57:42 +02:00
' [debug] Encodings: locale %s , fs %s , out %s , pref %s \n ' % (
locale . getpreferredencoding ( ) ,
sys . getfilesystemencoding ( ) ,
2014-11-12 15:30:26 +01:00
stdout_encoding ,
2014-07-23 02:24:50 +02:00
self . get_encoding ( ) ) )
2014-07-24 13:29:44 +02:00
write_string ( encoding_str , encoding = None )
2014-04-07 19:57:42 +02:00
self . _write_string ( ' [debug] youtube-dl version ' + __version__ + ' \n ' )
2016-02-21 12:28:58 +01:00
if _LAZY_LOADER :
self . _write_string ( ' [debug] Lazy loading extractors enabled ' + ' \n ' )
2013-11-22 19:57:52 +01:00
try :
sp = subprocess . Popen (
[ ' git ' , ' rev-parse ' , ' --short ' , ' HEAD ' ] ,
stdout = subprocess . PIPE , stderr = subprocess . PIPE ,
cwd = os . path . dirname ( os . path . abspath ( __file__ ) ) )
out , err = sp . communicate ( )
out = out . decode ( ) . strip ( )
if re . match ( ' [0-9a-f]+ ' , out ) :
2014-04-07 19:57:42 +02:00
self . _write_string ( ' [debug] Git HEAD: ' + out + ' \n ' )
2015-03-27 13:02:20 +01:00
except Exception :
2013-11-22 19:57:52 +01:00
try :
sys . exc_clear ( )
2015-03-27 13:02:20 +01:00
except Exception :
2013-11-22 19:57:52 +01:00
pass
2018-01-01 15:52:24 +01:00
def python_implementation ( ) :
impl_name = platform . python_implementation ( )
if impl_name == ' PyPy ' and hasattr ( sys , ' pypy_version_info ' ) :
return impl_name + ' version %d . %d . %d ' % sys . pypy_version_info [ : 3 ]
return impl_name
self . _write_string ( ' [debug] Python version %s ( %s ) - %s \n ' % (
platform . python_version ( ) , python_implementation ( ) ,
platform_name ( ) ) )
2014-10-26 16:31:52 +01:00
2015-02-13 11:14:01 +01:00
exe_versions = FFmpegPostProcessor . get_versions ( self )
2014-11-02 10:55:36 +01:00
exe_versions [ ' rtmpdump ' ] = rtmpdump_version ( )
2017-08-03 14:17:25 +02:00
exe_versions [ ' phantomjs ' ] = PhantomJSwrapper . _version ( )
2014-10-26 16:31:52 +01:00
exe_str = ' , ' . join (
' %s %s ' % ( exe , v )
for exe , v in sorted ( exe_versions . items ( ) )
if v
)
if not exe_str :
exe_str = ' none '
self . _write_string ( ' [debug] exe versions: %s \n ' % exe_str )
2013-11-22 19:57:52 +01:00
proxy_map = { }
for handler in self . _opener . handlers :
if hasattr ( handler , ' proxies ' ) :
proxy_map . update ( handler . proxies )
2014-04-07 19:57:42 +02:00
self . _write_string ( ' [debug] Proxy map: ' + compat_str ( proxy_map ) + ' \n ' )
2013-11-22 19:57:52 +01:00
2015-01-10 21:02:27 +01:00
if self . params . get ( ' call_home ' , False ) :
ipaddr = self . urlopen ( ' https://yt-dl.org/ip ' ) . read ( ) . decode ( ' utf-8 ' )
self . _write_string ( ' [debug] Public IP address: %s \n ' % ipaddr )
latest_version = self . urlopen (
' https://yt-dl.org/latest/version ' ) . read ( ) . decode ( ' utf-8 ' )
if version_tuple ( latest_version ) > version_tuple ( __version__ ) :
self . report_warning (
' You are using an outdated version (newest version: %s )! '
' See https://yt-dl.org/update if you need help updating. ' %
latest_version )
2013-12-01 11:42:02 +01:00
def _setup_opener ( self ) :
2013-12-02 13:37:05 +01:00
timeout_val = self . params . get ( ' socket_timeout ' )
2014-03-10 19:01:29 +01:00
self . _socket_timeout = 600 if timeout_val is None else float ( timeout_val )
2013-12-02 13:37:05 +01:00
2013-11-22 19:57:52 +01:00
opts_cookiefile = self . params . get ( ' cookiefile ' )
opts_proxy = self . params . get ( ' proxy ' )
if opts_cookiefile is None :
self . cookiejar = compat_cookiejar . CookieJar ( )
else :
2017-03-25 20:31:16 +01:00
opts_cookiefile = expand_path ( opts_cookiefile )
2018-12-09 00:00:32 +01:00
self . cookiejar = YoutubeDLCookieJar ( opts_cookiefile )
2013-11-22 19:57:52 +01:00
if os . access ( opts_cookiefile , os . R_OK ) :
2017-04-30 07:56:33 +02:00
self . cookiejar . load ( ignore_discard = True , ignore_expires = True )
2013-11-22 19:57:52 +01:00
2015-09-06 02:21:33 +02:00
cookie_processor = YoutubeDLCookieProcessor ( self . cookiejar )
2013-11-22 19:57:52 +01:00
if opts_proxy is not None :
if opts_proxy == ' ' :
proxies = { }
else :
proxies = { ' http ' : opts_proxy , ' https ' : opts_proxy }
else :
proxies = compat_urllib_request . getproxies ( )
2019-03-09 13:14:41 +01:00
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
2013-11-22 19:57:52 +01:00
if ' http ' in proxies and ' https ' not in proxies :
proxies [ ' https ' ] = proxies [ ' http ' ]
2015-03-03 00:03:06 +01:00
proxy_handler = PerRequestProxyHandler ( proxies )
2013-12-29 15:28:32 +01:00
debuglevel = 1 if self . params . get ( ' debug_printtraffic ' ) else 0
2015-01-10 19:55:36 +01:00
https_handler = make_HTTPS_handler ( self . params , debuglevel = debuglevel )
ydlh = YoutubeDLHandler ( self . params , debuglevel = debuglevel )
2020-02-29 13:08:44 +01:00
redirect_handler = YoutubeDLRedirectHandler ( )
2015-10-17 17:16:40 +02:00
data_handler = compat_urllib_request_DataHandler ( )
2016-01-14 08:14:01 +01:00
# When passing our own FileHandler instance, build_opener won't add the
# default FileHandler and allows us to disable the file protocol, which
# can be used for malicious purposes (see
2019-03-09 13:14:41 +01:00
# https://github.com/ytdl-org/youtube-dl/issues/8227)
2016-01-14 08:14:01 +01:00
file_handler = compat_urllib_request . FileHandler ( )
def file_open ( * args , * * kwargs ) :
2016-01-14 16:28:46 +01:00
raise compat_urllib_error . URLError ( ' file:// scheme is explicitly disabled in youtube-dl for security reasons ' )
2016-01-14 08:14:01 +01:00
file_handler . file_open = file_open
opener = compat_urllib_request . build_opener (
2020-02-29 13:08:44 +01:00
proxy_handler , https_handler , cookie_processor , ydlh , redirect_handler , data_handler , file_handler )
2015-03-03 13:56:06 +01:00
2013-11-22 19:57:52 +01:00
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
2019-03-09 13:14:41 +01:00
# (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
2013-11-22 19:57:52 +01:00
opener . addheaders = [ ]
self . _opener = opener
2014-03-30 06:02:41 +02:00
def encode ( self , s ) :
if isinstance ( s , bytes ) :
return s # Already encoded
try :
return s . encode ( self . get_encoding ( ) )
except UnicodeEncodeError as err :
err . reason = err . reason + ' . Check your system encoding configuration or use the --encoding option. '
raise
def get_encoding ( self ) :
encoding = self . params . get ( ' encoding ' )
if encoding is None :
encoding = preferredencoding ( )
return encoding
2015-01-25 03:11:12 +01:00
def _write_thumbnails ( self , info_dict , filename ) :
if self . params . get ( ' writethumbnail ' , False ) :
thumbnails = info_dict . get ( ' thumbnails ' )
if thumbnails :
thumbnails = [ thumbnails [ - 1 ] ]
elif self . params . get ( ' write_all_thumbnails ' , False ) :
thumbnails = info_dict . get ( ' thumbnails ' )
else :
return
if not thumbnails :
# No thumbnails present, so return immediately
return
for t in thumbnails :
thumb_ext = determine_ext ( t [ ' url ' ] , ' jpg ' )
suffix = ' _ %s ' % t [ ' id ' ] if len ( thumbnails ) > 1 else ' '
thumb_display_id = ' %s ' % t [ ' id ' ] if len ( thumbnails ) > 1 else ' '
2015-05-14 11:21:27 +02:00
t [ ' filename ' ] = thumb_filename = os . path . splitext ( filename ) [ 0 ] + suffix + ' . ' + thumb_ext
2015-01-25 03:11:12 +01:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( thumb_filename ) ) :
self . to_screen ( ' [ %s ] %s : Thumbnail %s is already present ' %
( info_dict [ ' extractor ' ] , info_dict [ ' id ' ] , thumb_display_id ) )
else :
self . to_screen ( ' [ %s ] %s : Downloading thumbnail %s ... ' %
( info_dict [ ' extractor ' ] , info_dict [ ' id ' ] , thumb_display_id ) )
try :
uf = self . urlopen ( t [ ' url ' ] )
2015-08-30 22:01:13 +02:00
with open ( encodeFilename ( thumb_filename ) , ' wb ' ) as thumbf :
2015-01-25 03:11:12 +01:00
shutil . copyfileobj ( uf , thumbf )
self . to_screen ( ' [ %s ] %s : Writing thumbnail %s to: %s ' %
( info_dict [ ' extractor ' ] , info_dict [ ' id ' ] , thumb_display_id , thumb_filename ) )
except ( compat_urllib_error . URLError , compat_http_client . HTTPException , socket . error ) as err :
self . report_warning ( ' Unable to download thumbnail " %s " : %s ' %
2015-12-20 02:00:39 +01:00
( t [ ' url ' ] , error_to_compat_str ( err ) ) )