1
0
mirror of https://codeberg.org/polarisfm/youtube-dl synced 2024-11-09 03:34:32 +01:00
youtube-dl/youtube_dl/extractor/redtube.py

116 lines
4.2 KiB
Python
Raw Normal View History

2014-01-21 14:16:44 +01:00
from __future__ import unicode_literals
import re
2013-06-23 22:27:16 +02:00
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
str_to_int,
unified_strdate,
)
2013-06-23 22:27:16 +02:00
class RedTubeIE(InfoExtractor):
2016-11-06 15:39:29 +01:00
_VALID_URL = r'https?://(?:(?:www\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
_TESTS = [{
2014-01-21 14:16:44 +01:00
'url': 'http://www.redtube.com/66418',
'md5': 'fc08071233725f26b8f014dba9590005',
2014-01-21 14:16:44 +01:00
'info_dict': {
2014-11-26 12:52:45 +01:00
'id': '66418',
'ext': 'mp4',
2015-03-25 15:09:01 +01:00
'title': 'Sucked on a toilet',
'upload_date': '20110811',
'duration': 596,
'view_count': int,
2015-03-25 15:09:01 +01:00
'age_limit': 18,
2013-06-27 20:46:46 +02:00
}
2016-11-06 15:39:29 +01:00
}, {
'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',
'only_matching': True,
}]
2013-06-23 22:27:16 +02:00
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)',
webpage)
2013-10-04 11:41:57 +02:00
def _real_extract(self, url):
2014-11-26 12:52:45 +01:00
video_id = self._match_id(url)
2016-11-06 15:39:29 +01:00
webpage = self._download_webpage(
'http://www.redtube.com/%s' % video_id, video_id)
2013-06-23 22:27:16 +02:00
if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']):
raise ExtractorError('Video %s has been removed' % video_id, expected=True)
title = self._html_search_regex(
(r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
webpage, 'title', group='title',
default=None) or self._og_search_title(webpage)
formats = []
sources = self._parse_json(
self._search_regex(
r'sources\s*:\s*({.+?})', webpage, 'source', default='{}'),
video_id, fatal=False)
if sources and isinstance(sources, dict):
for format_id, format_url in sources.items():
if format_url:
formats.append({
'url': format_url,
'format_id': format_id,
'height': int_or_none(format_id),
})
medias = self._parse_json(
self._search_regex(
r'mediaDefinition\s*:\s*(\[.+?\])', webpage,
'media definitions', default='{}'),
video_id, fatal=False)
if medias and isinstance(medias, list):
for media in medias:
format_url = media.get('videoUrl')
if not format_url or not isinstance(format_url, compat_str):
continue
format_id = media.get('quality')
formats.append({
'url': format_url,
'format_id': format_id,
'height': int_or_none(format_id),
})
if not formats:
video_url = self._html_search_regex(
r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL')
formats.append({'url': video_url})
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._search_regex(
r'<span[^>]+>ADDED ([^<]+)<',
webpage, 'upload date', fatal=False))
duration = int_or_none(self._og_search_property(
'video:duration', webpage, default=None) or self._search_regex(
r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None))
view_count = str_to_int(self._search_regex(
(r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)',
r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)'),
webpage, 'view count', fatal=False))
2013-10-06 16:39:35 +02:00
# No self-labeling, but they describe themselves as
# "Home of Videos Porno"
age_limit = 18
2013-10-04 11:41:57 +02:00
return {
2014-01-21 14:16:44 +01:00
'id': video_id,
2014-11-26 12:52:45 +01:00
'ext': 'mp4',
'title': title,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
2013-10-06 16:39:35 +02:00
'age_limit': age_limit,
'formats': formats,
2013-10-04 11:41:57 +02:00
}