[vlive] Updated to new V App/VLive api.

More robust with getting keys and ids from website.
This commit is contained in:
Erwin de Haan 2016-02-06 23:37:55 +01:00 committed by Sergey M․
parent 59b35c6745
commit b8b465af3e

View file

@ -9,17 +9,18 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
determine_ext determine_ext,
int_or_none
) )
from ..compat import compat_urllib_parse from ..compat import compat_urllib_parse
class VLiveIE(InfoExtractor): class VLiveIE(InfoExtractor):
IE_NAME = 'vlive' IE_NAME = 'vlive'
# www.vlive.tv/video/ links redirect to m.vlive.tv/video/ for mobile devices # vlive.tv/video/ links redirect to www.vlive.tv/video/
_VALID_URL = r'https?://(?:(www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)' _VALID_URL = r'https?://(?:(www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)'
_TEST = { _TEST = {
'url': 'http://m.vlive.tv/video/1326', 'url': 'http://www.vlive.tv/video/1326',
'md5': 'cc7314812855ce56de70a06a27314983', 'md5': 'cc7314812855ce56de70a06a27314983',
'info_dict': { 'info_dict': {
'id': '1326', 'id': '1326',
@ -28,50 +29,45 @@ class VLiveIE(InfoExtractor):
'creator': 'Girl\'s Day', 'creator': 'Girl\'s Day',
}, },
} }
_SECRET = 'rFkwZet6pqk1vQt6SxxUkAHX7YL3lmqzUMrU4IDusTo4jEBdtOhNfT4BYYAdArwH'
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
webpage = self._download_webpage( webpage = self._download_webpage(
'http://m.vlive.tv/video/%s' % video_id, 'http://www.vlive.tv/video/%s' % video_id,
video_id, note='Download video page') video_id, note='Download video page')
long_video_id = self._search_regex(
r'vlive\.tv\.video\.ajax\.request\.handler\.init\("[0-9]+",\s?"[^"]*",\s?"([^"]+)",\s?"[^"]+",\s?"[^"]*",\s?"[^"]*"\)', webpage, 'long_video_id')
key = self._search_regex(
r'vlive\.tv\.video\.ajax\.request\.handler\.init\("[0-9]+",\s?"[^"]*",\s?"[^"]+",\s?"([^"]+)",\s?"[^"]*",\s?"[^"]*"\)', webpage, 'key')
title = self._og_search_title(webpage) title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage) thumbnail = self._og_search_thumbnail(webpage)
creator = self._html_search_regex( creator = self._html_search_regex(
r'<span[^>]+class="name">([^<>]+)</span>', webpage, 'creator') r'<div class="info_area">\s*<strong[^>]+class="name">([^<>]+)</strong>', webpage, 'creator',fatal=False)
url = 'http://global.apis.naver.com/globalV/globalV/vod/%s/playinfo?' % video_id # doct = document type (xml or json), cpt = caption type (vtt or ttml)
msgpad = '%.0f' % (time() * 1000) url = "http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?videoId=%s&key=%s&ptc=http&doct=json&cpt=vtt" % (long_video_id, key)
md = b64encode(
hmac.new(self._SECRET.encode('ascii'),
(url[:255] + msgpad).encode('ascii'), sha1).digest()
)
url += '&' + compat_urllib_parse.urlencode({'msgpad': msgpad, 'md': md})
playinfo = self._download_json(url, video_id, 'Downloading video json') playinfo = self._download_json(url, video_id, 'Downloading video json')
if playinfo.get('message', '') != 'success':
raise ExtractorError(playinfo.get('message', 'JSON request unsuccessful'))
if not playinfo.get('result'):
raise ExtractorError('No videos found.')
formats = [] formats = []
for vid in playinfo['result'].get('videos', {}).get('list', []): for vid in playinfo.get('videos', {}).get('list', []):
formats.append({ formats.append({
'url': vid['source'], 'url': vid['source'],
'ext': 'mp4', 'ext': 'mp4',
'abr': vid.get('bitrate', {}).get('audio'), 'abr': vid.get('bitrate', {}).get('audio'),
'vbr': vid.get('bitrate', {}).get('video'), 'vbr': vid.get('bitrate', {}).get('video'),
'format_id': vid['encodingOption']['name'], 'format_id': vid.get('encodingOption', {}).get('name'),
'height': vid.get('height'), 'height': int_or_none(vid.get('encodingOption', {}).get('height')),
'width': vid.get('width'), 'width': int_or_none(vid.get('encodingOption', {}).get('width')),
}) })
self._sort_formats(formats) self._sort_formats(formats)
subtitles = {} subtitles = {}
for caption in playinfo['result'].get('captions', {}).get('list', []): for caption in playinfo.get('captions', {}).get('list', []):
subtitles[caption['language']] = [ subtitles[caption['language']] = [
{'ext': determine_ext(caption['source'], default_ext='vtt'), {'ext': determine_ext(caption['source'], default_ext='vtt'),
'url': caption['source']}] 'url': caption['source']}]