2013-06-27 18:32:02 +00:00
|
|
|
# coding: utf-8
|
2014-09-25 07:58:09 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2013-06-23 20:01:02 +00:00
|
|
|
import re
|
2015-05-28 09:00:09 +00:00
|
|
|
import base64
|
2013-06-23 20:01:02 +00:00
|
|
|
|
|
|
|
from .common import InfoExtractor
|
2015-05-28 09:00:09 +00:00
|
|
|
from ..utils import ExtractorError
|
2013-06-23 20:01:02 +00:00
|
|
|
|
2015-05-29 02:13:09 +00:00
|
|
|
from ..compat import compat_urllib_parse
|
|
|
|
|
|
|
|
bytes_is_str = (bytes == str) # for compatible
|
|
|
|
|
2013-06-23 20:01:02 +00:00
|
|
|
class YoukuIE(InfoExtractor):
|
2015-05-28 09:00:09 +00:00
|
|
|
IE_NAME = 'youku'
|
2014-09-25 07:58:09 +00:00
|
|
|
_VALID_URL = r'''(?x)
|
|
|
|
(?:
|
|
|
|
http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
|
|
|
|
youku:)
|
|
|
|
(?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
|
|
|
|
'''
|
2015-05-28 09:00:09 +00:00
|
|
|
|
2014-09-25 07:58:09 +00:00
|
|
|
_TEST = {
|
2015-05-28 09:00:09 +00:00
|
|
|
'url': 'http://v.youku.com/v_show/id_XMTc1ODE5Njcy.html',
|
|
|
|
'md5': '5f3af4192eabacc4501508d54a8cabd7',
|
|
|
|
'info_dict': {
|
|
|
|
'id': 'XMTc1ODE5Njcy',
|
|
|
|
'title': '★Smile﹗♡ Git Fresh -Booty Music舞蹈.',
|
|
|
|
'ext': 'flv'
|
|
|
|
}
|
2013-06-27 18:25:46 +00:00
|
|
|
}
|
|
|
|
|
2015-05-28 09:00:09 +00:00
|
|
|
def construct_video_urls(self, data1, data2):
|
|
|
|
# get sid, token
|
|
|
|
def yk_t(s1, s2):
|
|
|
|
ls = list(range(256))
|
|
|
|
t = 0
|
|
|
|
for i in range(256):
|
|
|
|
t = (t + ls[i] + ord(s1[i%len(s1)])) % 256
|
|
|
|
ls[i], ls[t] = ls[t], ls[i]
|
2015-05-29 02:13:09 +00:00
|
|
|
s = '' if not bytes_is_str else b''
|
2015-05-28 13:04:58 +00:00
|
|
|
x, y = 0, 0
|
2015-05-28 09:00:09 +00:00
|
|
|
for i in range(len(s2)):
|
|
|
|
y = (y + 1) % 256
|
|
|
|
x = (x + ls[y]) % 256
|
|
|
|
ls[x], ls[y] = ls[y], ls[x]
|
2015-05-28 13:04:58 +00:00
|
|
|
if isinstance(s2[i], int):
|
|
|
|
s += chr(s2[i] ^ ls[(ls[x]+ls[y]) % 256])
|
|
|
|
else:
|
|
|
|
s += chr(ord(s2[i]) ^ ls[(ls[x]+ls[y]) % 256])
|
2015-05-28 09:00:09 +00:00
|
|
|
return s
|
|
|
|
|
|
|
|
sid, token = yk_t(
|
2015-05-28 13:04:58 +00:00
|
|
|
'becaf9be',
|
|
|
|
base64.b64decode(bytes(data2['ep'], 'ascii')) \
|
2015-05-29 02:13:09 +00:00
|
|
|
if not bytes_is_str \
|
2015-05-28 13:04:58 +00:00
|
|
|
else base64.b64decode(data2['ep'])
|
2015-05-28 09:00:09 +00:00
|
|
|
).split('_')
|
|
|
|
|
|
|
|
# get oip
|
|
|
|
oip = data2['ip']
|
|
|
|
|
|
|
|
# get fileid
|
|
|
|
string_ls = list(
|
|
|
|
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890')
|
|
|
|
shuffled_string_ls = []
|
|
|
|
seed = data1['seed']
|
|
|
|
N = len(string_ls)
|
|
|
|
for ii in range(N):
|
|
|
|
seed = (seed * 0xd3 + 0x754f) % 0x10000
|
|
|
|
idx = seed * len(string_ls) // 0x10000
|
|
|
|
shuffled_string_ls.append(string_ls[idx])
|
|
|
|
del string_ls[idx]
|
|
|
|
|
|
|
|
fileid_dict = {}
|
|
|
|
for format in data1['streamtypes']:
|
|
|
|
streamfileid = [
|
|
|
|
int(i) for i in data1['streamfileids'][format].strip('*').split('*')]
|
|
|
|
fileid = ''.join(
|
|
|
|
[shuffled_string_ls[i] for i in streamfileid])
|
|
|
|
fileid_dict[format] = fileid[:8] + '%s' + fileid[10:]
|
|
|
|
|
|
|
|
def get_fileid(format, n):
|
|
|
|
fileid = fileid_dict[format] % hex(int(n))[2:].upper().zfill(2)
|
|
|
|
return fileid
|
|
|
|
|
|
|
|
# get ep
|
|
|
|
def generate_ep(format, n):
|
|
|
|
fileid = get_fileid(format, n)
|
|
|
|
ep_t = yk_t(
|
|
|
|
'bf7e5f01',
|
2015-05-28 13:04:58 +00:00
|
|
|
bytes('%s_%s_%s' % (sid, fileid, token), 'ascii') \
|
2015-05-29 02:13:09 +00:00
|
|
|
if not bytes_is_str \
|
2015-05-28 13:04:58 +00:00
|
|
|
else ('%s_%s_%s' % (sid, fileid, token))
|
|
|
|
)
|
|
|
|
ep = base64.b64encode(
|
|
|
|
bytes(ep_t, 'latin') \
|
2015-05-29 02:13:09 +00:00
|
|
|
if not bytes_is_str \
|
2015-05-28 13:04:58 +00:00
|
|
|
else ep_t
|
|
|
|
).decode()
|
2015-05-28 09:00:09 +00:00
|
|
|
return ep
|
|
|
|
|
|
|
|
# generate video_urls
|
|
|
|
video_urls_dict = {}
|
|
|
|
for format in data1['streamtypes']:
|
|
|
|
video_urls = []
|
|
|
|
for dt in data1['segs'][format]:
|
|
|
|
n = str(int(dt['no']))
|
2015-05-29 02:13:09 +00:00
|
|
|
param = {
|
|
|
|
'K': dt['k'],
|
|
|
|
'hd': self.get_hd(format),
|
|
|
|
'myp': 0,
|
|
|
|
'ts': dt['seconds'],
|
|
|
|
'ypp': 0,
|
|
|
|
'ctype': 12,
|
|
|
|
'ev': 1,
|
|
|
|
'token': token,
|
|
|
|
'oip': oip,
|
|
|
|
'ep': generate_ep(format, n)
|
|
|
|
}
|
2015-05-28 09:00:09 +00:00
|
|
|
video_url = \
|
|
|
|
'http://k.youku.com/player/getFlvPath/' + \
|
|
|
|
'sid/' + sid + \
|
|
|
|
'_' + str(int(n)+1).zfill(2) + \
|
|
|
|
'/st/' + self.parse_ext_l(format) + \
|
|
|
|
'/fileid/' + get_fileid(format, n) + '?' + \
|
2015-05-29 02:13:09 +00:00
|
|
|
compat_urllib_parse.urlencode(param)
|
2015-05-28 09:00:09 +00:00
|
|
|
video_urls.append(video_url)
|
|
|
|
video_urls_dict[format] = video_urls
|
|
|
|
|
|
|
|
return video_urls_dict
|
|
|
|
|
|
|
|
def get_hd(self, fm):
|
|
|
|
hd_id_dict = {
|
2015-05-30 02:03:32 +00:00
|
|
|
'flv' : '0',
|
|
|
|
'mp4' : '1',
|
|
|
|
'hd2' : '2',
|
|
|
|
'hd3' : '3',
|
|
|
|
'3gp' : '0',
|
|
|
|
'3gphd' : '1'
|
2015-05-28 09:00:09 +00:00
|
|
|
}
|
|
|
|
return hd_id_dict[fm]
|
|
|
|
|
|
|
|
def parse_ext_l(self, fm):
|
|
|
|
ext_dict = {
|
2015-05-30 02:03:32 +00:00
|
|
|
'flv' : 'flv',
|
|
|
|
'mp4' : 'mp4',
|
|
|
|
'hd2' : 'flv',
|
|
|
|
'hd3' : 'flv',
|
|
|
|
'3gp' : 'flv',
|
|
|
|
'3gphd' : 'mp4'
|
2015-05-28 09:00:09 +00:00
|
|
|
}
|
|
|
|
return ext_dict[fm]
|
2013-06-23 20:01:02 +00:00
|
|
|
|
2015-05-30 02:03:32 +00:00
|
|
|
def get_format_name(self, fm):
|
|
|
|
_dict = {
|
|
|
|
'3gp' : 'h6',
|
|
|
|
'3gphd' : 'h5',
|
|
|
|
'flv' : 'h4',
|
|
|
|
'mp4' : 'h3',
|
|
|
|
'hd2' : 'h2',
|
|
|
|
'hd3' : 'h1'
|
|
|
|
}
|
|
|
|
return _dict[fm]
|
|
|
|
|
2013-06-23 20:01:02 +00:00
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
2014-09-25 07:58:09 +00:00
|
|
|
video_id = mobj.group('id')
|
2013-06-23 20:01:02 +00:00
|
|
|
|
2015-05-28 09:00:09 +00:00
|
|
|
# request basic data
|
|
|
|
data1_url = 'http://v.youku.com/player/getPlayList/VideoIDS/%s' % video_id
|
|
|
|
data2_url = 'http://v.youku.com/player/getPlayList/VideoIDS/%s/Pf/4/ctype/12/ev/1' % video_id
|
2013-06-23 20:01:02 +00:00
|
|
|
|
2015-05-28 09:00:09 +00:00
|
|
|
raw_data1 = self._download_json(data1_url, video_id)
|
|
|
|
raw_data2 = self._download_json(data2_url, video_id)
|
|
|
|
data1 = raw_data1['data'][0]
|
|
|
|
data2 = raw_data2['data'][0]
|
2014-09-25 07:58:09 +00:00
|
|
|
|
2015-05-28 09:00:09 +00:00
|
|
|
error_code = data1.get('error_code')
|
2014-09-25 07:58:09 +00:00
|
|
|
if error_code:
|
|
|
|
# -8 means blocked outside China.
|
2015-05-28 09:00:09 +00:00
|
|
|
# Chinese and English, separated by newline.
|
|
|
|
error = data1.get('error')
|
|
|
|
raise ExtractorError(
|
|
|
|
error or 'Server reported error %i' %
|
|
|
|
error_code,
|
|
|
|
expected=True)
|
|
|
|
|
|
|
|
title = data1['title']
|
|
|
|
|
|
|
|
# generate video_urls_dict
|
|
|
|
video_urls_dict = self.construct_video_urls(data1, data2)
|
|
|
|
|
|
|
|
# construct info
|
|
|
|
entries = []
|
|
|
|
for fm in data1['streamtypes']:
|
|
|
|
#formats = []
|
|
|
|
video_urls = video_urls_dict[fm]
|
|
|
|
for i in range(len(video_urls)):
|
|
|
|
if len(entries) < i+1:
|
|
|
|
entries.append({'formats': []})
|
|
|
|
entries[i]['formats'].append(
|
|
|
|
{
|
|
|
|
'url': video_urls[i],
|
2015-05-30 02:03:32 +00:00
|
|
|
'format_id': self.get_format_name(fm),
|
2015-05-28 09:00:09 +00:00
|
|
|
'ext': self.parse_ext_l(fm),
|
|
|
|
'filesize': int(data1['segs'][fm][i]['size'])
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
for i in range(len(entries)):
|
|
|
|
entries[i].update(
|
|
|
|
{
|
|
|
|
'id': '_part%d' % (i+1),
|
|
|
|
'title': title,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
if len(entries) > 1:
|
2013-06-23 20:01:02 +00:00
|
|
|
info = {
|
2015-05-28 09:00:09 +00:00
|
|
|
'_type': 'multi_video',
|
|
|
|
'id': video_id,
|
|
|
|
'title': title,
|
|
|
|
'entries': entries,
|
2013-06-23 20:01:02 +00:00
|
|
|
}
|
2015-05-28 09:00:09 +00:00
|
|
|
else:
|
|
|
|
info = entries[0]
|
|
|
|
info['id'] = video_id
|
2013-06-23 20:01:02 +00:00
|
|
|
|
2015-05-28 09:00:09 +00:00
|
|
|
return info
|