2013-06-23 20:41:54 +02:00
import itertools
2013-06-23 20:13:52 +02:00
import json
import re
2013-06-23 20:41:54 +02:00
from . common import InfoExtractor , SearchInfoExtractor
2013-06-23 20:13:52 +02:00
from . . utils import (
2013-06-23 20:41:54 +02:00
compat_urllib_parse ,
2013-09-28 21:19:52 +02:00
compat_urlparse ,
clean_html ,
2013-12-25 15:18:40 +01:00
int_or_none ,
2013-06-23 20:13:52 +02:00
)
2013-09-28 21:19:52 +02:00
2013-06-23 20:13:52 +02:00
class YahooIE ( InfoExtractor ) :
2013-07-01 18:52:19 +02:00
IE_DESC = u ' Yahoo screen '
2013-06-23 20:13:52 +02:00
_VALID_URL = r ' http://screen \ .yahoo \ .com/.*?-(?P<id> \ d*?) \ .html '
2013-09-28 21:19:52 +02:00
_TESTS = [
{
u ' url ' : u ' http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html ' ,
2013-11-29 22:06:17 +01:00
u ' file ' : u ' 214727115.mp4 ' ,
u ' md5 ' : u ' 4962b075c08be8690a922ee026d05e69 ' ,
2013-09-28 21:19:52 +02:00
u ' info_dict ' : {
u ' title ' : u ' Julian Smith & Travis Legg Watch Julian Smith ' ,
u ' description ' : u ' Julian and Travis watch Julian Smith ' ,
} ,
} ,
{
u ' url ' : u ' http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html ' ,
2013-11-29 22:06:17 +01:00
u ' file ' : u ' 103000935.mp4 ' ,
u ' md5 ' : u ' d6e6fc6e1313c608f316ddad7b82b306 ' ,
2013-09-28 21:19:52 +02:00
u ' info_dict ' : {
2013-10-04 11:44:02 +02:00
u ' title ' : u ' Codefellas - The Cougar Lies with Spanish Moss ' ,
2013-09-28 21:19:52 +02:00
u ' description ' : u ' Agent Topple \' s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about? ' ,
} ,
2013-06-27 20:46:46 +02:00
} ,
2013-09-28 21:19:52 +02:00
]
2013-06-23 20:13:52 +02:00
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
video_id = mobj . group ( ' id ' )
webpage = self . _download_webpage ( url , video_id )
2013-11-27 21:24:55 +01:00
items_json = self . _search_regex ( r ' mediaItems: ( { .*?})$ ' ,
2013-09-28 21:19:52 +02:00
webpage , u ' items ' , flags = re . MULTILINE )
items = json . loads ( items_json )
info = items [ ' mediaItems ' ] [ ' query ' ] [ ' results ' ] [ ' mediaObj ' ] [ 0 ]
2013-10-10 21:01:45 +02:00
# The 'meta' field is not always in the video webpage, we request it
# from another page
long_id = info [ ' id ' ]
2013-12-05 14:31:54 +01:00
return self . _get_info ( long_id , video_id )
2013-11-29 15:25:43 +01:00
def _get_info ( self , long_id , video_id ) :
2013-10-10 21:01:45 +02:00
query = ( ' SELECT * FROM yahoo.media.video.streams WHERE id= " %s " '
2013-11-29 22:06:17 +01:00
' AND plrs= " 86Gj0vCaSzV_Iuf6hNylf2 " AND region= " US " '
' AND protocol= " http " ' % long_id )
2013-10-10 21:01:45 +02:00
data = compat_urllib_parse . urlencode ( {
' q ' : query ,
' env ' : ' prod ' ,
' format ' : ' json ' ,
} )
query_result_json = self . _download_webpage (
' http://video.query.yahoo.com/v1/public/yql? ' + data ,
video_id , u ' Downloading video info ' )
query_result = json . loads ( query_result_json )
info = query_result [ ' query ' ] [ ' results ' ] [ ' mediaObj ' ] [ 0 ]
2013-09-28 21:19:52 +02:00
meta = info [ ' meta ' ]
formats = [ ]
for s in info [ ' streams ' ] :
format_info = {
2013-12-25 15:18:40 +01:00
' width ' : int_or_none ( s . get ( ' width ' ) ) ,
' height ' : int_or_none ( s . get ( ' height ' ) ) ,
' tbr ' : int_or_none ( s . get ( ' bitrate ' ) ) ,
2013-09-28 21:19:52 +02:00
}
host = s [ ' host ' ]
path = s [ ' path ' ]
if host . startswith ( ' rtmp ' ) :
format_info . update ( {
' url ' : host ,
' play_path ' : path ,
' ext ' : ' flv ' ,
} )
else :
format_url = compat_urlparse . urljoin ( host , path )
format_info [ ' url ' ] = format_url
formats . append ( format_info )
2013-12-25 15:18:40 +01:00
self . _sort_formats ( formats )
2013-09-28 21:19:52 +02:00
2013-11-27 21:24:55 +01:00
return {
2013-09-28 21:19:52 +02:00
' id ' : video_id ,
' title ' : meta [ ' title ' ] ,
' formats ' : formats ,
' description ' : clean_html ( meta [ ' description ' ] ) ,
' thumbnail ' : meta [ ' thumbnail ' ] ,
}
2013-06-23 20:13:52 +02:00
2013-06-23 20:41:54 +02:00
2013-11-29 15:25:43 +01:00
class YahooNewsIE ( YahooIE ) :
IE_NAME = ' yahoo:news '
_VALID_URL = r ' http://news \ .yahoo \ .com/video/.*?-(?P<id> \ d*?) \ .html '
_TEST = {
u ' url ' : u ' http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html ' ,
2013-11-29 22:06:17 +01:00
u ' md5 ' : u ' 67010fdf3a08d290e060a4dd96baa07b ' ,
2013-11-29 15:25:43 +01:00
u ' info_dict ' : {
u ' id ' : u ' 104538833 ' ,
2013-11-29 22:06:17 +01:00
u ' ext ' : u ' mp4 ' ,
2013-11-29 15:25:43 +01:00
u ' title ' : u ' China Moses Is Crazy About the Blues ' ,
u ' description ' : u ' md5:9900ab8cd5808175c7b3fe55b979bed0 ' ,
} ,
}
# Overwrite YahooIE properties we don't want
_TESTS = [ ]
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
video_id = mobj . group ( ' id ' )
webpage = self . _download_webpage ( url , video_id )
long_id = self . _search_regex ( r ' contentId: \' (.+?) \' , ' , webpage , u ' long id ' )
return self . _get_info ( long_id , video_id )
2013-06-23 20:41:54 +02:00
class YahooSearchIE ( SearchInfoExtractor ) :
2013-07-01 18:52:19 +02:00
IE_DESC = u ' Yahoo screen search '
2013-06-23 20:41:54 +02:00
_MAX_RESULTS = 1000
IE_NAME = u ' screen.yahoo:search '
_SEARCH_KEY = ' yvsearch '
def _get_n_results ( self , query , n ) :
""" Get a specified number of results for a query """
res = {
' _type ' : ' playlist ' ,
' id ' : query ,
' entries ' : [ ]
}
for pagenum in itertools . count ( 0 ) :
result_url = u ' http://video.search.yahoo.com/search/?p= %s &fr=screen&o=js&gs=0&b= %d ' % ( compat_urllib_parse . quote_plus ( query ) , pagenum * 30 )
webpage = self . _download_webpage ( result_url , query ,
note = ' Downloading results page ' + str ( pagenum + 1 ) )
info = json . loads ( webpage )
m = info [ u ' m ' ]
results = info [ u ' results ' ]
for ( i , r ) in enumerate ( results ) :
if ( pagenum * 30 ) + i > = n :
break
mobj = re . search ( r ' (?P<url>screen \ .yahoo \ .com/.*?- \ d*? \ .html) " ' , r )
e = self . url_result ( ' http:// ' + mobj . group ( ' url ' ) , ' Yahoo ' )
res [ ' entries ' ] . append ( e )
2013-11-03 14:03:17 +01:00
if ( pagenum * 30 + i > = n ) or ( m [ u ' last ' ] > = ( m [ u ' total ' ] - 1 ) ) :
2013-06-23 20:41:54 +02:00
break
return res