1
0
mirror of https://codeberg.org/polarisfm/youtube-dl synced 2024-11-16 06:34:31 +01:00
youtube-dl/youtube_dl/extractor/facebook.py

153 lines
6.2 KiB
Python
Raw Normal View History

2014-03-04 03:36:54 +01:00
from __future__ import unicode_literals
2013-06-23 20:59:45 +02:00
import json
import re
import socket
from .common import InfoExtractor
from ..utils import (
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_request,
urlencode_postdata,
2013-06-23 20:59:45 +02:00
ExtractorError,
limit_length,
2013-06-23 20:59:45 +02:00
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
2014-03-09 18:42:44 +01:00
https?://(?:\w+\.)?facebook\.com/
(?:[^#]*?\#!/)?
2014-08-27 11:08:47 +02:00
(?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
(?:v|video_id)=(?P<id>[0-9]+)
(?:.*)'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
2013-06-23 20:59:45 +02:00
_NETRC_MACHINE = 'facebook'
2014-03-04 03:36:54 +01:00
IE_NAME = 'facebook'
2014-08-27 11:08:47 +02:00
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
2014-03-04 03:36:54 +01:00
'info_dict': {
'id': '637842556329505',
2014-03-04 03:36:54 +01:00
'ext': 'mp4',
'duration': 38,
2014-09-29 05:19:56 +02:00
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
2013-06-27 20:46:46 +02:00
}
}, {
'note': 'Video without discernible title',
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 'Facebook video #274175099429670',
}
2014-08-27 11:08:47 +02:00
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}]
2013-06-23 20:59:45 +02:00
def _login(self):
(useremail, password) = self._get_login_info()
2013-06-23 20:59:45 +02:00
if useremail is None:
return
login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
login_page_req.add_header('Cookie', 'locale=en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
2014-03-04 03:36:54 +01:00
errnote='Unable to download login page')
2014-03-04 03:39:04 +01:00
lsd = self._search_regex(
2014-03-04 03:39:45 +01:00
r'<input type="hidden" name="lsd" value="([^"]*)"',
2014-03-04 03:39:04 +01:00
login_page, 'lsd')
2014-03-04 03:36:54 +01:00
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
2013-06-23 20:59:45 +02:00
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
2013-06-23 20:59:45 +02:00
}
request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
2013-06-23 20:59:45 +02:00
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
2013-06-23 20:59:45 +02:00
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
2014-03-04 03:36:54 +01:00
self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
2013-06-23 20:59:45 +02:00
return
check_form = {
'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
2014-09-03 09:49:05 +02:00
'h': self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
'name_action_selected': 'dont_save',
}
check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
2014-03-04 03:36:54 +01:00
self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
2013-06-23 20:59:45 +02:00
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
2014-03-04 03:36:54 +01:00
self._downloader.report_warning('unable to log in: %s' % compat_str(err))
2013-06-23 20:59:45 +02:00
return
def _real_initialize(self):
self._login()
2013-06-23 20:59:45 +02:00
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
2013-06-23 20:59:45 +02:00
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
webpage = self._download_webpage(url, video_id)
BEFORE = '{swf.addParam(param[0], param[1]);});\n'
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
if not m:
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
2014-03-04 03:36:54 +01:00
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
else:
2014-03-04 03:36:54 +01:00
raise ExtractorError('Cannot parse data')
2013-06-23 20:59:45 +02:00
data = dict(json.loads(m.group(1)))
params_raw = compat_urllib_parse.unquote(data['params'])
params = json.loads(params_raw)
video_data = params['video_data'][0]
video_url = video_data.get('hd_src')
if not video_url:
video_url = video_data['sd_src']
if not video_url:
2014-03-04 03:36:54 +01:00
raise ExtractorError('Cannot find video URL')
2013-06-23 20:59:45 +02:00
video_title = self._html_search_regex(
r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title',
fatal=False)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', default=None)
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
2013-06-23 20:59:45 +02:00
2014-03-09 18:42:44 +01:00
return {
2013-06-23 20:59:45 +02:00
'id': video_id,
'title': video_title,
'url': video_url,
2014-03-09 18:42:44 +01:00
'duration': int(video_data['video_duration']),
'thumbnail': video_data['thumbnail_src'],
2013-06-23 20:59:45 +02:00
}