mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-12-02 11:26:41 +00:00
[compat] Add compat_urllib_parse_urlencode and eliminate encode_dict
encode_dict functionality has been improved and moved directly into compat_urllib_parse_urlencode All occurrences of compat_urllib_parse.urlencode throughout the codebase have been replaced by compat_urllib_parse_urlencode Closes #8974
This commit is contained in:
parent
2156f16ca7
commit
15707c7e02
|
@ -169,6 +169,31 @@ def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace')
|
||||||
string = string.replace('+', ' ')
|
string = string.replace('+', ' ')
|
||||||
return compat_urllib_parse_unquote(string, encoding, errors)
|
return compat_urllib_parse_unquote(string, encoding, errors)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.parse import urlencode as compat_urllib_parse_urlencode
|
||||||
|
except ImportError: # Python 2
|
||||||
|
# Python 2 will choke in urlencode on mixture of byte and unicode strings.
|
||||||
|
# Possible solutions are to either port it from python 3 with all
|
||||||
|
# the friends or manually ensure input query contains only byte strings.
|
||||||
|
# We will stick with latter thus recursively encoding the whole query.
|
||||||
|
def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
|
||||||
|
def encode_elem(e):
|
||||||
|
if isinstance(e, dict):
|
||||||
|
e = encode_dict(e)
|
||||||
|
elif isinstance(e, (list, tuple,)):
|
||||||
|
e = encode_list(e)
|
||||||
|
elif isinstance(e, compat_str):
|
||||||
|
e = e.encode(encoding)
|
||||||
|
return e
|
||||||
|
|
||||||
|
def encode_dict(d):
|
||||||
|
return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
|
||||||
|
|
||||||
|
def encode_list(l):
|
||||||
|
return [encode_elem(e) for e in l]
|
||||||
|
|
||||||
|
return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from urllib.request import DataHandler as compat_urllib_request_DataHandler
|
from urllib.request import DataHandler as compat_urllib_request_DataHandler
|
||||||
except ImportError: # Python < 3.4
|
except ImportError: # Python < 3.4
|
||||||
|
@ -588,6 +613,7 @@ def compat_itertools_count(start=0, step=1):
|
||||||
'compat_urllib_parse_unquote',
|
'compat_urllib_parse_unquote',
|
||||||
'compat_urllib_parse_unquote_plus',
|
'compat_urllib_parse_unquote_plus',
|
||||||
'compat_urllib_parse_unquote_to_bytes',
|
'compat_urllib_parse_unquote_to_bytes',
|
||||||
|
'compat_urllib_parse_urlencode',
|
||||||
'compat_urllib_parse_urlparse',
|
'compat_urllib_parse_urlparse',
|
||||||
'compat_urllib_request',
|
'compat_urllib_request',
|
||||||
'compat_urllib_request_DataHandler',
|
'compat_urllib_request_DataHandler',
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -60,7 +60,7 @@ def _real_extract(self, url):
|
||||||
confirm_url = (
|
confirm_url = (
|
||||||
parsed_url.scheme + '://' + parsed_url.netloc +
|
parsed_url.scheme + '://' + parsed_url.netloc +
|
||||||
action + '?' +
|
action + '?' +
|
||||||
compat_urllib_parse.urlencode({
|
compat_urllib_parse_urlencode({
|
||||||
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
|
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
|
||||||
self._download_webpage(
|
self._download_webpage(
|
||||||
confirm_url, video_id,
|
confirm_url, video_id,
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
encode_dict,
|
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
|
@ -71,7 +70,7 @@ def _login(self):
|
||||||
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
|
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
post_url, urlencode_postdata(encode_dict(login_form)))
|
post_url, urlencode_postdata(login_form))
|
||||||
request.add_header('Referer', self._LOGIN_URL)
|
request.add_header('Referer', self._LOGIN_URL)
|
||||||
|
|
||||||
response = self._download_webpage(
|
response = self._download_webpage(
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -86,7 +86,7 @@ def _login(self):
|
||||||
}
|
}
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
response = self._download_webpage(
|
response = self._download_webpage(
|
||||||
request, None, 'Logging in as %s' % username)
|
request, None, 'Logging in as %s' % username)
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_str,
|
compat_str,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -58,7 +58,7 @@ def _login(self):
|
||||||
}
|
}
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
|
||||||
request.add_header('Referer', self._LOGIN_URL)
|
request.add_header('Referer', self._LOGIN_URL)
|
||||||
response = self._download_webpage(
|
response = self._download_webpage(
|
||||||
request, None, 'Logging in as %s' % username)
|
request, None, 'Logging in as %s' % username)
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -139,7 +139,7 @@ def _real_extract(self, url):
|
||||||
parsed_url = list(compat_urlparse.urlparse(url))
|
parsed_url = list(compat_urlparse.urlparse(url))
|
||||||
query = dict(compat_urlparse.parse_qsl(parsed_url[4]))
|
query = dict(compat_urlparse.parse_qsl(parsed_url[4]))
|
||||||
query.update({'displayMode': 'list'})
|
query.update({'displayMode': 'list'})
|
||||||
parsed_url[4] = compat_urllib_parse.urlencode(query)
|
parsed_url[4] = compat_urllib_parse_urlencode(query)
|
||||||
final_url = compat_urlparse.urlunparse(parsed_url)
|
final_url = compat_urlparse.urlunparse(parsed_url)
|
||||||
|
|
||||||
page = self._download_webpage(final_url, folder_id)
|
page = self._download_webpage(final_url, folder_id)
|
||||||
|
|
|
@ -5,8 +5,8 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -102,7 +102,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
req = sanitized_Request(
|
req = sanitized_Request(
|
||||||
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
|
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
|
||||||
data=compat_urllib_parse.urlencode(data))
|
data=compat_urllib_parse_urlencode(data))
|
||||||
|
|
||||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||||
req.add_header('x-addr', '127.0.0.1')
|
req.add_header('x-addr', '127.0.0.1')
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -64,7 +64,7 @@ def _extract_video(self, video_host, video_id, file_key, error_url=None, try_num
|
||||||
'errorUrl': error_url,
|
'errorUrl': error_url,
|
||||||
})
|
})
|
||||||
|
|
||||||
data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode(form))
|
data_url = self._API_URL % (video_host, compat_urllib_parse_urlencode(form))
|
||||||
player_data = self._download_webpage(
|
player_data = self._download_webpage(
|
||||||
data_url, video_id, 'Downloading player data')
|
data_url, video_id, 'Downloading player data')
|
||||||
data = compat_parse_qs(player_data)
|
data = compat_parse_qs(player_data)
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
from .mtv import MTVServicesInfoExtractor
|
from .mtv import MTVServicesInfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -201,7 +201,7 @@ def _real_extract(self, url):
|
||||||
# Correct cc.com in uri
|
# Correct cc.com in uri
|
||||||
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.com', uri)
|
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.com', uri)
|
||||||
|
|
||||||
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
|
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse_urlencode({'uri': uri}))
|
||||||
idoc = self._download_xml(
|
idoc = self._download_xml(
|
||||||
index_url, epTitle,
|
index_url, epTitle,
|
||||||
'Downloading show index', 'Unable to download episode index')
|
'Downloading show index', 'Unable to download episode index')
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
compat_os_name,
|
compat_os_name,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -1300,7 +1300,7 @@ def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_para
|
||||||
'plugin': 'flowplayer-3.2.0.1',
|
'plugin': 'flowplayer-3.2.0.1',
|
||||||
}
|
}
|
||||||
f4m_url += '&' if '?' in f4m_url else '?'
|
f4m_url += '&' if '?' in f4m_url else '?'
|
||||||
f4m_url += compat_urllib_parse.urlencode(f4m_params)
|
f4m_url += compat_urllib_parse_urlencode(f4m_params)
|
||||||
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
|
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
|
@ -97,7 +97,7 @@ def _extract_video(self, webpage, url_type):
|
||||||
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id')
|
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id')
|
||||||
player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id')
|
player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id')
|
||||||
target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target')
|
target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target')
|
||||||
data = compat_urllib_parse.urlencode({'videoId': video_id,
|
data = compat_urllib_parse_urlencode({'videoId': video_id,
|
||||||
'playerId': player_id,
|
'playerId': player_id,
|
||||||
'target': target,
|
'target': target,
|
||||||
})
|
})
|
||||||
|
|
|
@ -11,8 +11,8 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
|
@ -78,7 +78,7 @@ def _add_skip_wall(url):
|
||||||
# See https://github.com/rg3/youtube-dl/issues/7202.
|
# See https://github.com/rg3/youtube-dl/issues/7202.
|
||||||
qs['skip_wall'] = ['1']
|
qs['skip_wall'] = ['1']
|
||||||
return compat_urlparse.urlunparse(
|
return compat_urlparse.urlunparse(
|
||||||
parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
|
parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
|
||||||
|
|
||||||
|
|
||||||
class CrunchyrollIE(CrunchyrollBaseIE):
|
class CrunchyrollIE(CrunchyrollBaseIE):
|
||||||
|
@ -308,7 +308,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
|
playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
|
||||||
playerdata_req = sanitized_Request(playerdata_url)
|
playerdata_req = sanitized_Request(playerdata_url)
|
||||||
playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
|
playerdata_req.data = compat_urllib_parse_urlencode({'current_page': webpage_url})
|
||||||
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
|
playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
|
||||||
|
|
||||||
|
@ -322,7 +322,7 @@ def _real_extract(self, url):
|
||||||
streamdata_req = sanitized_Request(
|
streamdata_req = sanitized_Request(
|
||||||
'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s'
|
'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s'
|
||||||
% (stream_id, stream_format, stream_quality),
|
% (stream_id, stream_format, stream_quality),
|
||||||
compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8'))
|
compat_urllib_parse_urlencode({'current_page': url}).encode('utf-8'))
|
||||||
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
streamdata = self._download_xml(
|
streamdata = self._download_xml(
|
||||||
streamdata_req, video_id,
|
streamdata_req, video_id,
|
||||||
|
|
|
@ -8,8 +8,8 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -70,7 +70,7 @@ class DaumIE(InfoExtractor):
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = compat_urllib_parse_unquote(self._match_id(url))
|
video_id = compat_urllib_parse_unquote(self._match_id(url))
|
||||||
query = compat_urllib_parse.urlencode({'vid': video_id})
|
query = compat_urllib_parse_urlencode({'vid': video_id})
|
||||||
movie_data = self._download_json(
|
movie_data = self._download_json(
|
||||||
'http://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json?' + query,
|
'http://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json?' + query,
|
||||||
video_id, 'Downloading video formats info')
|
video_id, 'Downloading video formats info')
|
||||||
|
@ -86,7 +86,7 @@ def _real_extract(self, url):
|
||||||
formats = []
|
formats = []
|
||||||
for format_el in movie_data['output_list']['output_list']:
|
for format_el in movie_data['output_list']['output_list']:
|
||||||
profile = format_el['profile']
|
profile = format_el['profile']
|
||||||
format_query = compat_urllib_parse.urlencode({
|
format_query = compat_urllib_parse_urlencode({
|
||||||
'vid': video_id,
|
'vid': video_id,
|
||||||
'profile': profile,
|
'profile': profile,
|
||||||
})
|
})
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_str,
|
compat_str,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -106,7 +106,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' +
|
'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' +
|
||||||
compat_urllib_parse.urlencode({
|
compat_urllib_parse_urlencode({
|
||||||
'id': video_data['id'],
|
'id': video_data['id'],
|
||||||
'user_id': video_data['user_id'],
|
'user_id': video_data['user_id'],
|
||||||
'signature': video_data['signature'],
|
'signature': video_data['signature'],
|
||||||
|
@ -133,7 +133,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
'http://admin.mangomolo.com/analytics/index.php/customers/embed/index?' +
|
'http://admin.mangomolo.com/analytics/index.php/customers/embed/index?' +
|
||||||
compat_urllib_parse.urlencode({
|
compat_urllib_parse_urlencode({
|
||||||
'id': base64.b64encode(channel_data['user_id'].encode()).decode(),
|
'id': base64.b64encode(channel_data['user_id'].encode()).decode(),
|
||||||
'channelid': base64.b64encode(channel_data['id'].encode()).decode(),
|
'channelid': base64.b64encode(channel_data['id'].encode()).decode(),
|
||||||
'signature': channel_data['signature'],
|
'signature': channel_data['signature'],
|
||||||
|
@ -174,7 +174,7 @@ def _real_extract(self, url):
|
||||||
data['show_id'] = show_id
|
data['show_id'] = show_id
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
'http://admin.mangomolo.com/analytics/index.php/plus/show',
|
'http://admin.mangomolo.com/analytics/index.php/plus/show',
|
||||||
compat_urllib_parse.urlencode(data),
|
compat_urllib_parse_urlencode(data),
|
||||||
{
|
{
|
||||||
'Origin': 'http://www.dcndigital.ae',
|
'Origin': 'http://www.dcndigital.ae',
|
||||||
'Content-Type': 'application/x-www-form-urlencoded'
|
'Content-Type': 'application/x-www-form-urlencoded'
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
from .amp import AMPIE
|
from .amp import AMPIE
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -50,7 +50,7 @@ def _login(self):
|
||||||
}
|
}
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
|
||||||
response = self._download_webpage(
|
response = self._download_webpage(
|
||||||
request, None, 'Logging in as %s' % username)
|
request, None, 'Logging in as %s' % username)
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
unescapeHTML
|
unescapeHTML
|
||||||
|
@ -43,7 +43,7 @@ def _login(self):
|
||||||
if username is None:
|
if username is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
query = compat_urllib_parse.urlencode({
|
query = compat_urllib_parse_urlencode({
|
||||||
'username': username,
|
'username': username,
|
||||||
'password': password,
|
'password': password,
|
||||||
'url': 'http://www.eroprofile.com/',
|
'url': 'http://www.eroprofile.com/',
|
||||||
|
|
|
@ -5,12 +5,11 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
encode_dict,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
)
|
)
|
||||||
|
@ -57,7 +56,7 @@ def _login(self):
|
||||||
'Submit': ' Login ',
|
'Submit': ' Login ',
|
||||||
}
|
}
|
||||||
|
|
||||||
login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
|
login_data = compat_urllib_parse_urlencode(login_form_strs).encode('utf-8')
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
|
'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
|
||||||
|
|
||||||
|
|
|
@ -4,8 +4,8 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
|
@ -109,7 +109,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
response = self._download_json(
|
response = self._download_json(
|
||||||
'https://syn.5min.com/handlers/SenseHandler.ashx?' +
|
'https://syn.5min.com/handlers/SenseHandler.ashx?' +
|
||||||
compat_urllib_parse.urlencode({
|
compat_urllib_parse_urlencode({
|
||||||
'func': 'GetResults',
|
'func': 'GetResults',
|
||||||
'playlist': video_id,
|
'playlist': video_id,
|
||||||
'sid': sid,
|
'sid': sid,
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -42,7 +42,7 @@ def _call_api(self, method, video_id, api_key, note, secret=None):
|
||||||
}
|
}
|
||||||
if secret:
|
if secret:
|
||||||
query['secret'] = secret
|
query['secret'] = secret
|
||||||
data = self._download_json(self._API_BASE_URL + compat_urllib_parse.urlencode(query), video_id, note)
|
data = self._download_json(self._API_BASE_URL + compat_urllib_parse_urlencode(query), video_id, note)
|
||||||
if data['stat'] != 'ok':
|
if data['stat'] != 'ok':
|
||||||
raise ExtractorError(data['message'])
|
raise ExtractorError(data['message'])
|
||||||
return data
|
return data
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
encode_dict,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -54,10 +53,10 @@ def _login(self):
|
||||||
(username, password) = self._get_login_info()
|
(username, password) = self._get_login_info()
|
||||||
if username is None:
|
if username is None:
|
||||||
return
|
return
|
||||||
data = urlencode_postdata(encode_dict({
|
data = urlencode_postdata({
|
||||||
'email_field': username,
|
'email_field': username,
|
||||||
'password_field': password,
|
'password_field': password,
|
||||||
}))
|
})
|
||||||
login_request = sanitized_Request('http://www.funimation.com/login', data, headers={
|
login_request = sanitized_Request('http://www.funimation.com/login', data, headers={
|
||||||
'User-Agent': 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0',
|
'User-Agent': 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0',
|
||||||
'Content-Type': 'application/x-www-form-urlencoded'
|
'Content-Type': 'application/x-www-form-urlencoded'
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
remove_end,
|
remove_end,
|
||||||
HEADRequest,
|
HEADRequest,
|
||||||
|
@ -123,7 +123,7 @@ def _login(self, webpage_url, display_id):
|
||||||
'password': password,
|
'password': password,
|
||||||
}
|
}
|
||||||
|
|
||||||
request = sanitized_Request(login_url, compat_urllib_parse.urlencode(login_form))
|
request = sanitized_Request(login_url, compat_urllib_parse_urlencode(login_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
self._download_webpage(request, display_id, 'Logging in')
|
self._download_webpage(request, display_id, 'Logging in')
|
||||||
start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
|
start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
HEADRequest,
|
HEADRequest,
|
||||||
|
@ -35,7 +35,7 @@ def _real_extract(self, url):
|
||||||
r'"contentUrl" content="(.*?)"', webpage, 'content URL')
|
r'"contentUrl" content="(.*?)"', webpage, 'content URL')
|
||||||
return self.url_result(video_url, ie='Youtube')
|
return self.url_result(video_url, ie='Youtube')
|
||||||
|
|
||||||
reqdata = compat_urllib_parse.urlencode([
|
reqdata = compat_urllib_parse_urlencode([
|
||||||
('mediaType', 's'),
|
('mediaType', 's'),
|
||||||
('mediaId', video_id),
|
('mediaId', video_id),
|
||||||
])
|
])
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
|
@ -28,7 +28,7 @@ def _real_extract(self, url):
|
||||||
track_id = self._match_id(url)
|
track_id = self._match_id(url)
|
||||||
|
|
||||||
data = {'ax': 1, 'ts': time.time()}
|
data = {'ax': 1, 'ts': time.time()}
|
||||||
request = sanitized_Request(url + '?' + compat_urllib_parse.urlencode(data))
|
request = sanitized_Request(url + '?' + compat_urllib_parse_urlencode(data))
|
||||||
response, urlh = self._download_webpage_handle(
|
response, urlh = self._download_webpage_handle(
|
||||||
request, track_id, 'Downloading webpage with the url')
|
request, track_id, 'Downloading webpage with the url')
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
xpath_with_ns,
|
xpath_with_ns,
|
||||||
|
@ -38,7 +38,7 @@ def _clean_query(query):
|
||||||
# Other player ids return m3u8 urls
|
# Other player ids return m3u8 urls
|
||||||
cleaned_dic['playerid'] = '247'
|
cleaned_dic['playerid'] = '247'
|
||||||
cleaned_dic['videokbrate'] = '100000'
|
cleaned_dic['videokbrate'] = '100000'
|
||||||
return compat_urllib_parse.urlencode(cleaned_dic)
|
return compat_urllib_parse_urlencode(cleaned_dic)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
query = compat_urlparse.urlparse(url).query
|
query = compat_urlparse.urlparse(url).query
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -322,7 +322,7 @@ def _login(self):
|
||||||
'bird_t': timestamp,
|
'bird_t': timestamp,
|
||||||
}
|
}
|
||||||
validation_result = self._download_json(
|
validation_result = self._download_json(
|
||||||
'http://kylin.iqiyi.com/validate?' + compat_urllib_parse.urlencode(validation_params), None,
|
'http://kylin.iqiyi.com/validate?' + compat_urllib_parse_urlencode(validation_params), None,
|
||||||
note='Validate credentials', errnote='Unable to validate credentials')
|
note='Validate credentials', errnote='Unable to validate credentials')
|
||||||
|
|
||||||
MSG_MAP = {
|
MSG_MAP = {
|
||||||
|
@ -456,7 +456,7 @@ def get_path_key(x, format_id, segment_index):
|
||||||
'QY00001': auth_result['data']['u'],
|
'QY00001': auth_result['data']['u'],
|
||||||
})
|
})
|
||||||
api_video_url += '?' if '?' not in api_video_url else '&'
|
api_video_url += '?' if '?' not in api_video_url else '&'
|
||||||
api_video_url += compat_urllib_parse.urlencode(param)
|
api_video_url += compat_urllib_parse_urlencode(param)
|
||||||
js = self._download_json(
|
js = self._download_json(
|
||||||
api_video_url, video_id,
|
api_video_url, video_id,
|
||||||
note='Download video info of segment %d for format %s' % (segment_index + 1, format_id))
|
note='Download video info of segment %d for format %s' % (segment_index + 1, format_id))
|
||||||
|
@ -494,7 +494,7 @@ def get_raw_data(self, tvid, video_id, enc_key, _uuid):
|
||||||
}
|
}
|
||||||
|
|
||||||
api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
|
api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
|
||||||
compat_urllib_parse.urlencode(param)
|
compat_urllib_parse_urlencode(param)
|
||||||
raw_data = self._download_json(api_url, video_id)
|
raw_data = self._download_json(api_url, video_id)
|
||||||
return raw_data
|
return raw_data
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import qualities
|
from ..utils import qualities
|
||||||
|
@ -62,7 +62,7 @@ def _real_extract(self, url):
|
||||||
quality = qualities(self._QUALITIES)
|
quality = qualities(self._QUALITIES)
|
||||||
|
|
||||||
formats = [{
|
formats = [{
|
||||||
'url': 'https://streaming.ivideon.com/flv/live?%s' % compat_urllib_parse.urlencode({
|
'url': 'https://streaming.ivideon.com/flv/live?%s' % compat_urllib_parse_urlencode({
|
||||||
'server': server_id,
|
'server': server_id,
|
||||||
'camera': camera_id,
|
'camera': camera_id,
|
||||||
'sessionId': 'demo',
|
'sessionId': 'demo',
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
)
|
)
|
||||||
|
@ -71,7 +71,7 @@ def _kaltura_api_call(self, video_id, actions, *args, **kwargs):
|
||||||
for k, v in a.items():
|
for k, v in a.items():
|
||||||
params['%d:%s' % (i, k)] = v
|
params['%d:%s' % (i, k)] = v
|
||||||
|
|
||||||
query = compat_urllib_parse.urlencode(params)
|
query = compat_urllib_parse_urlencode(params)
|
||||||
url = self._API_BASE + query
|
url = self._API_BASE + query
|
||||||
data = self._download_json(url, video_id, *args, **kwargs)
|
data = self._download_json(url, video_id, *args, **kwargs)
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -90,7 +90,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
hd_doc = self._download_xml(
|
hd_doc = self._download_xml(
|
||||||
'http://www.laola1.tv/server/hd_video.php?%s'
|
'http://www.laola1.tv/server/hd_video.php?%s'
|
||||||
% compat_urllib_parse.urlencode({
|
% compat_urllib_parse_urlencode({
|
||||||
'play': video_id,
|
'play': video_id,
|
||||||
'partner': partner_id,
|
'partner': partner_id,
|
||||||
'portal': portal,
|
'portal': portal,
|
||||||
|
@ -108,7 +108,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
req = sanitized_Request(
|
req = sanitized_Request(
|
||||||
'https://club.laola1.tv/sp/laola1/api/v3/user/session/premium/player/stream-access?%s' %
|
'https://club.laola1.tv/sp/laola1/api/v3/user/session/premium/player/stream-access?%s' %
|
||||||
compat_urllib_parse.urlencode({
|
compat_urllib_parse_urlencode({
|
||||||
'videoId': video_id,
|
'videoId': video_id,
|
||||||
'target': VS_TARGETS.get(kind, '2'),
|
'target': VS_TARGETS.get(kind, '2'),
|
||||||
'label': _v('label'),
|
'label': _v('label'),
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_ord,
|
compat_ord,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
@ -122,7 +122,7 @@ def _real_extract(self, url):
|
||||||
'domain': 'www.le.com'
|
'domain': 'www.le.com'
|
||||||
}
|
}
|
||||||
play_json_req = sanitized_Request(
|
play_json_req = sanitized_Request(
|
||||||
'http://api.le.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params)
|
'http://api.le.com/mms/out/video/playJson?' + compat_urllib_parse_urlencode(params)
|
||||||
)
|
)
|
||||||
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
|
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
|
||||||
if cn_verification_proxy:
|
if cn_verification_proxy:
|
||||||
|
@ -151,7 +151,7 @@ def _real_extract(self, url):
|
||||||
for format_id in formats:
|
for format_id in formats:
|
||||||
if format_id in dispatch:
|
if format_id in dispatch:
|
||||||
media_url = playurl['domain'][0] + dispatch[format_id][0]
|
media_url = playurl['domain'][0] + dispatch[format_id][0]
|
||||||
media_url += '&' + compat_urllib_parse.urlencode({
|
media_url += '&' + compat_urllib_parse_urlencode({
|
||||||
'm3v': 1,
|
'm3v': 1,
|
||||||
'format': 1,
|
'format': 1,
|
||||||
'expect': 3,
|
'expect': 3,
|
||||||
|
@ -305,7 +305,7 @@ def get_play_json(cf, timestamp):
|
||||||
}
|
}
|
||||||
self.sign_data(data)
|
self.sign_data(data)
|
||||||
return self._download_json(
|
return self._download_json(
|
||||||
'http://api.letvcloud.com/gpc.php?' + compat_urllib_parse.urlencode(data),
|
'http://api.letvcloud.com/gpc.php?' + compat_urllib_parse_urlencode(data),
|
||||||
media_id, 'Downloading playJson data for type %s' % cf)
|
media_id, 'Downloading playJson data for type %s' % cf)
|
||||||
|
|
||||||
play_json = get_play_json(cf, time.time())
|
play_json = get_play_json(cf, time.time())
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -36,7 +36,7 @@ def _login(self):
|
||||||
'stayPut': 'false'
|
'stayPut': 'false'
|
||||||
}
|
}
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
|
||||||
login_page = self._download_webpage(
|
login_page = self._download_webpage(
|
||||||
request, None, 'Logging in as %s' % username)
|
request, None, 'Logging in as %s' % username)
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ def _login(self):
|
||||||
'stayPut': 'false',
|
'stayPut': 'false',
|
||||||
}
|
}
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form).encode('utf-8'))
|
self._LOGIN_URL, compat_urllib_parse_urlencode(confirm_form).encode('utf-8'))
|
||||||
login_page = self._download_webpage(
|
login_page = self._download_webpage(
|
||||||
request, None,
|
request, None,
|
||||||
'Confirming log in and log out from another device')
|
'Confirming log in and log out from another device')
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
import random
|
import random
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
|
@ -29,7 +29,7 @@ class MatchTVIE(InfoExtractor):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = 'matchtv-live'
|
video_id = 'matchtv-live'
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
'http://player.matchtv.ntvplus.tv/player/smil?%s' % compat_urllib_parse.urlencode({
|
'http://player.matchtv.ntvplus.tv/player/smil?%s' % compat_urllib_parse_urlencode({
|
||||||
'ts': '',
|
'ts': '',
|
||||||
'quality': 'SD',
|
'quality': 'SD',
|
||||||
'contentId': '561d2c0df7159b37178b4567',
|
'contentId': '561d2c0df7159b37178b4567',
|
||||||
|
|
|
@ -5,8 +5,8 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
@ -117,7 +117,7 @@ def _real_initialize(self):
|
||||||
'filters': '0',
|
'filters': '0',
|
||||||
'submit': "Continue - I'm over 18",
|
'submit': "Continue - I'm over 18",
|
||||||
}
|
}
|
||||||
request = sanitized_Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
|
request = sanitized_Request(self._FILTER_POST, compat_urllib_parse_urlencode(disclaimer_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
self.report_age_confirmation()
|
self.report_age_confirmation()
|
||||||
self._download_webpage(request, None, False, 'Unable to confirm age')
|
self._download_webpage(request, None, False, 'Unable to confirm age')
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
@ -39,7 +39,7 @@ def _real_extract(self, url):
|
||||||
]
|
]
|
||||||
req = sanitized_Request(
|
req = sanitized_Request(
|
||||||
'http://minhateca.com.br/action/License/Download',
|
'http://minhateca.com.br/action/License/Download',
|
||||||
data=compat_urllib_parse.urlencode(token_data))
|
data=compat_urllib_parse_urlencode(token_data))
|
||||||
req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
data = self._download_json(
|
data = self._download_json(
|
||||||
req, video_id, note='Downloading metadata')
|
req, video_id, note='Downloading metadata')
|
||||||
|
|
|
@ -2,11 +2,10 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
encode_dict,
|
|
||||||
get_element_by_attribute,
|
get_element_by_attribute,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
)
|
)
|
||||||
|
@ -60,7 +59,7 @@ def _real_extract(self, url):
|
||||||
'sta': '0',
|
'sta': '0',
|
||||||
}
|
}
|
||||||
media = self._download_json(
|
media = self._download_json(
|
||||||
'%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data))),
|
'%s/?%s' % (gat, compat_urllib_parse_urlencode(token_data)),
|
||||||
display_id, 'Downloading %s JSON' % location['loc'])
|
display_id, 'Downloading %s JSON' % location['loc'])
|
||||||
file_ = media.get('file')
|
file_ = media.get('file')
|
||||||
if not file_:
|
if not file_:
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -77,7 +77,7 @@ def _real_extract(self, url):
|
||||||
],
|
],
|
||||||
]
|
]
|
||||||
r_json = json.dumps(r)
|
r_json = json.dumps(r)
|
||||||
post = compat_urllib_parse.urlencode({'r': r_json})
|
post = compat_urllib_parse_urlencode({'r': r_json})
|
||||||
req = sanitized_Request(self._API_URL, post)
|
req = sanitized_Request(self._API_URL, post)
|
||||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
remove_start,
|
remove_start,
|
||||||
|
@ -88,7 +88,7 @@ def _real_extract(self, url):
|
||||||
fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
|
fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
|
||||||
data = dict(fields)
|
data = dict(fields)
|
||||||
|
|
||||||
post = compat_urllib_parse.urlencode(data)
|
post = compat_urllib_parse_urlencode(data)
|
||||||
headers = {
|
headers = {
|
||||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
|
@ -58,7 +58,7 @@ def _real_extract(self, url):
|
||||||
}
|
}
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
'http://mooshare.biz/%s' % video_id, compat_urllib_parse.urlencode(download_form))
|
'http://mooshare.biz/%s' % video_id, compat_urllib_parse_urlencode(download_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
|
||||||
self._sleep(5, video_id)
|
self._sleep(5, video_id)
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_str,
|
compat_str,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -171,7 +171,7 @@ def _get_feed_query(self, uri):
|
||||||
data = {'uri': uri}
|
data = {'uri': uri}
|
||||||
if self._LANG:
|
if self._LANG:
|
||||||
data['lang'] = self._LANG
|
data['lang'] = self._LANG
|
||||||
return compat_urllib_parse.urlencode(data)
|
return compat_urllib_parse_urlencode(data)
|
||||||
|
|
||||||
def _get_videos_info(self, uri):
|
def _get_videos_info(self, uri):
|
||||||
video_id = self._id_from_uri(uri)
|
video_id = self._id_from_uri(uri)
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
compat_urllib_parse,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MuzuTVIE(InfoExtractor):
|
class MuzuTVIE(InfoExtractor):
|
||||||
|
@ -25,7 +23,7 @@ class MuzuTVIE(InfoExtractor):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
info_data = compat_urllib_parse.urlencode({
|
info_data = compat_urllib_parse_urlencode({
|
||||||
'format': 'json',
|
'format': 'json',
|
||||||
'url': url,
|
'url': url,
|
||||||
})
|
})
|
||||||
|
@ -41,7 +39,7 @@ def _real_extract(self, url):
|
||||||
if video_info.get('v%s' % quality):
|
if video_info.get('v%s' % quality):
|
||||||
break
|
break
|
||||||
|
|
||||||
data = compat_urllib_parse.urlencode({
|
data = compat_urllib_parse_urlencode({
|
||||||
'ai': video_id,
|
'ai': video_id,
|
||||||
# Even if each time you watch a video the hash changes,
|
# Even if each time you watch a video the hash changes,
|
||||||
# it seems to work for different videos, and it will work
|
# it seems to work for different videos, and it will work
|
||||||
|
|
|
@ -9,8 +9,8 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_ord,
|
compat_ord,
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -112,7 +112,7 @@ def _real_extract(self, url):
|
||||||
encxml = compat_urllib_parse_unquote(b)
|
encxml = compat_urllib_parse_unquote(b)
|
||||||
if not params.get('domain'):
|
if not params.get('domain'):
|
||||||
params['domain'] = 'www.myvideo.de'
|
params['domain'] = 'www.myvideo.de'
|
||||||
xmldata_url = '%s?%s' % (encxml, compat_urllib_parse.urlencode(params))
|
xmldata_url = '%s?%s' % (encxml, compat_urllib_parse_urlencode(params))
|
||||||
if 'flash_playertype=MTV' in xmldata_url:
|
if 'flash_playertype=MTV' in xmldata_url:
|
||||||
self._downloader.report_warning('avoiding MTV player')
|
self._downloader.report_warning('avoiding MTV player')
|
||||||
xmldata_url = (
|
xmldata_url = (
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -53,8 +53,8 @@ def _real_extract(self, url):
|
||||||
raise ExtractorError('couldn\'t extract vid and key')
|
raise ExtractorError('couldn\'t extract vid and key')
|
||||||
vid = m_id.group(1)
|
vid = m_id.group(1)
|
||||||
key = m_id.group(2)
|
key = m_id.group(2)
|
||||||
query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, })
|
query = compat_urllib_parse_urlencode({'vid': vid, 'inKey': key, })
|
||||||
query_urls = compat_urllib_parse.urlencode({
|
query_urls = compat_urllib_parse_urlencode({
|
||||||
'masterVid': vid,
|
'masterVid': vid,
|
||||||
'protocol': 'p2p',
|
'protocol': 'p2p',
|
||||||
'inKey': key,
|
'inKey': key,
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -97,7 +97,7 @@ class NBAIE(InfoExtractor):
|
||||||
_PAGE_SIZE = 30
|
_PAGE_SIZE = 30
|
||||||
|
|
||||||
def _fetch_page(self, team, video_id, page):
|
def _fetch_page(self, team, video_id, page):
|
||||||
search_url = 'http://searchapp2.nba.com/nba-search/query.jsp?' + compat_urllib_parse.urlencode({
|
search_url = 'http://searchapp2.nba.com/nba-search/query.jsp?' + compat_urllib_parse_urlencode({
|
||||||
'type': 'teamvideo',
|
'type': 'teamvideo',
|
||||||
'start': page * self._PAGE_SIZE + 1,
|
'start': page * self._PAGE_SIZE + 1,
|
||||||
'npp': (page + 1) * self._PAGE_SIZE + 1,
|
'npp': (page + 1) * self._PAGE_SIZE + 1,
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_itertools_count,
|
compat_itertools_count,
|
||||||
)
|
)
|
||||||
|
@ -153,7 +153,7 @@ def _real_extract(self, url):
|
||||||
'ids': '[%s]' % song_id
|
'ids': '[%s]' % song_id
|
||||||
}
|
}
|
||||||
info = self.query_api(
|
info = self.query_api(
|
||||||
'song/detail?' + compat_urllib_parse.urlencode(params),
|
'song/detail?' + compat_urllib_parse_urlencode(params),
|
||||||
song_id, 'Downloading song info')['songs'][0]
|
song_id, 'Downloading song info')['songs'][0]
|
||||||
|
|
||||||
formats = self.extract_formats(info)
|
formats = self.extract_formats(info)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .mtv import MTVServicesInfoExtractor
|
from .mtv import MTVServicesInfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
|
|
||||||
|
|
||||||
class NextMovieIE(MTVServicesInfoExtractor):
|
class NextMovieIE(MTVServicesInfoExtractor):
|
||||||
|
@ -20,7 +20,7 @@ class NextMovieIE(MTVServicesInfoExtractor):
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _get_feed_query(self, uri):
|
def _get_feed_query(self, uri):
|
||||||
return compat_urllib_parse.urlencode({
|
return compat_urllib_parse_urlencode({
|
||||||
'feed': '1505',
|
'feed': '1505',
|
||||||
'mgid': uri,
|
'mgid': uri,
|
||||||
})
|
})
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import sanitized_Request
|
from ..utils import sanitized_Request
|
||||||
|
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
'https://www.nfb.ca/film/%s/player_config' % video_id,
|
'https://www.nfb.ca/film/%s/player_config' % video_id,
|
||||||
compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
|
compat_urllib_parse_urlencode({'getConfig': 'true'}).encode('ascii'))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')
|
request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse
|
compat_urllib_parse_urlparse
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -38,7 +38,7 @@ def _extract_video(self, info):
|
||||||
parsed_url = compat_urllib_parse_urlparse(initial_video_url)
|
parsed_url = compat_urllib_parse_urlparse(initial_video_url)
|
||||||
filename, ext = os.path.splitext(parsed_url.path)
|
filename, ext = os.path.splitext(parsed_url.path)
|
||||||
path = '%s_sd%s' % (filename, ext)
|
path = '%s_sd%s' % (filename, ext)
|
||||||
data = compat_urllib_parse.urlencode({
|
data = compat_urllib_parse_urlencode({
|
||||||
'type': 'fvod',
|
'type': 'fvod',
|
||||||
'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])
|
'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])
|
||||||
})
|
})
|
||||||
|
@ -211,7 +211,7 @@ def _real_extract(self, url):
|
||||||
r'tab0"[^>]*?>(.*?)</td>',
|
r'tab0"[^>]*?>(.*?)</td>',
|
||||||
webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()
|
webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()
|
||||||
|
|
||||||
data = compat_urllib_parse.urlencode({
|
data = compat_urllib_parse_urlencode({
|
||||||
'cid': cat_id,
|
'cid': cat_id,
|
||||||
# This is the default value
|
# This is the default value
|
||||||
'count': 12,
|
'count': 12,
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .mtv import MTVServicesInfoExtractor
|
from .mtv import MTVServicesInfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
|
|
||||||
|
|
||||||
class NickIE(MTVServicesInfoExtractor):
|
class NickIE(MTVServicesInfoExtractor):
|
||||||
|
@ -54,7 +54,7 @@ class NickIE(MTVServicesInfoExtractor):
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _get_feed_query(self, uri):
|
def _get_feed_query(self, uri):
|
||||||
return compat_urllib_parse.urlencode({
|
return compat_urllib_parse_urlencode({
|
||||||
'feed': 'nick_arc_player_prime',
|
'feed': 'nick_arc_player_prime',
|
||||||
'mgid': uri,
|
'mgid': uri,
|
||||||
})
|
})
|
||||||
|
|
|
@ -7,11 +7,10 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
encode_dict,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
@ -101,7 +100,7 @@ def _login(self):
|
||||||
'mail': username,
|
'mail': username,
|
||||||
'password': password,
|
'password': password,
|
||||||
}
|
}
|
||||||
login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
|
login_data = compat_urllib_parse_urlencode(login_form_strs).encode('utf-8')
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
'https://secure.nicovideo.jp/secure/login', login_data)
|
'https://secure.nicovideo.jp/secure/login', login_data)
|
||||||
login_results = self._download_webpage(
|
login_results = self._download_webpage(
|
||||||
|
@ -141,7 +140,7 @@ def _real_extract(self, url):
|
||||||
r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey')
|
r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey')
|
||||||
|
|
||||||
# Get flv info
|
# Get flv info
|
||||||
flv_info_data = compat_urllib_parse.urlencode({
|
flv_info_data = compat_urllib_parse_urlencode({
|
||||||
'k': thumb_play_key,
|
'k': thumb_play_key,
|
||||||
'v': video_id
|
'v': video_id
|
||||||
})
|
})
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -75,7 +75,7 @@ def _login(self):
|
||||||
'username': username,
|
'username': username,
|
||||||
'password': password,
|
'password': password,
|
||||||
}
|
}
|
||||||
request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse_urlencode(login_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
|
||||||
|
|
||||||
login = self._download_json(request, None, 'Logging in as %s' % username)
|
login = self._download_json(request, None, 'Logging in as %s' % username)
|
||||||
|
|
|
@ -7,7 +7,6 @@
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
NO_DEFAULT,
|
NO_DEFAULT,
|
||||||
encode_dict,
|
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
|
@ -73,7 +72,7 @@ def extract_filekey(default=NO_DEFAULT):
|
||||||
if not post_url.startswith('http'):
|
if not post_url.startswith('http'):
|
||||||
post_url = compat_urlparse.urljoin(url, post_url)
|
post_url = compat_urlparse.urljoin(url, post_url)
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
post_url, urlencode_postdata(encode_dict(fields)))
|
post_url, urlencode_postdata(fields))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
request.add_header('Referer', post_url)
|
request.add_header('Referer', post_url)
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
qualities,
|
qualities,
|
||||||
|
@ -38,7 +38,7 @@ def _real_extract(self, url):
|
||||||
playlist_id = self._match_id(url)
|
playlist_id = self._match_id(url)
|
||||||
|
|
||||||
config = self._download_json(
|
config = self._download_json(
|
||||||
'http://api.npr.org/query?%s' % compat_urllib_parse.urlencode({
|
'http://api.npr.org/query?%s' % compat_urllib_parse_urlencode({
|
||||||
'id': playlist_id,
|
'id': playlist_id,
|
||||||
'fields': 'titles,audio,show',
|
'fields': 'titles,audio,show',
|
||||||
'format': 'json',
|
'format': 'json',
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
)
|
)
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
|
|
||||||
|
|
||||||
class OoyalaBaseIE(InfoExtractor):
|
class OoyalaBaseIE(InfoExtractor):
|
||||||
|
@ -35,7 +35,7 @@ def _extract(self, content_tree_url, video_id, domain='example.org'):
|
||||||
for supported_format in ('mp4', 'm3u8', 'hds', 'rtmp'):
|
for supported_format in ('mp4', 'm3u8', 'hds', 'rtmp'):
|
||||||
auth_data = self._download_json(
|
auth_data = self._download_json(
|
||||||
self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
|
self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
|
||||||
compat_urllib_parse.urlencode({
|
compat_urllib_parse_urlencode({
|
||||||
'domain': domain,
|
'domain': domain,
|
||||||
'supportedFormats': supported_format
|
'supportedFormats': supported_format
|
||||||
}),
|
}),
|
||||||
|
|
|
@ -65,7 +65,7 @@ def _login(self):
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
'https://www.patreon.com/processLogin',
|
'https://www.patreon.com/processLogin',
|
||||||
compat_urllib_parse.urlencode(login_form).encode('utf-8')
|
compat_urllib_parse_urlencode(login_form).encode('utf-8')
|
||||||
)
|
)
|
||||||
login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
|
login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
import os.path
|
import os.path
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
|
@ -40,7 +40,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
self._sleep(2, video_id)
|
self._sleep(2, video_id)
|
||||||
|
|
||||||
post = compat_urllib_parse.urlencode(data)
|
post = compat_urllib_parse_urlencode(data)
|
||||||
headers = {
|
headers = {
|
||||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -106,7 +106,7 @@ def _real_extract(self, url):
|
||||||
})
|
})
|
||||||
|
|
||||||
info_url = compat_urlparse.urlunparse(
|
info_url = compat_urlparse.urlunparse(
|
||||||
parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
|
parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
|
||||||
|
|
||||||
json_info = self._download_json(
|
json_info = self._download_json(
|
||||||
info_url, video_id,
|
info_url, video_id,
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -76,7 +76,7 @@ def _login(self):
|
||||||
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
|
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
post_url, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
|
||||||
response = self._download_webpage(
|
response = self._download_webpage(
|
||||||
|
|
|
@ -2,8 +2,8 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -50,7 +50,7 @@ def _real_extract(self, url):
|
||||||
r'so.addVariable\(\'seccode\',\'([^\']+)\'', webpage, 'sec code')
|
r'so.addVariable\(\'seccode\',\'([^\']+)\'', webpage, 'sec code')
|
||||||
max_vid = self._search_regex(
|
max_vid = self._search_regex(
|
||||||
r'so.addVariable\(\'max_vid\',\'(\d+)\'', webpage, 'max vid')
|
r'so.addVariable\(\'max_vid\',\'(\d+)\'', webpage, 'max vid')
|
||||||
url_params = compat_urllib_parse.urlencode({
|
url_params = compat_urllib_parse_urlencode({
|
||||||
'VID': file_id,
|
'VID': file_id,
|
||||||
'mp4': '1',
|
'mp4': '1',
|
||||||
'seccode': sec_code,
|
'seccode': sec_code,
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
|
@ -42,7 +42,7 @@ def _real_extract(self, url):
|
||||||
self._sleep(wait_time, video_id)
|
self._sleep(wait_time, video_id)
|
||||||
|
|
||||||
req = sanitized_Request(
|
req = sanitized_Request(
|
||||||
url, compat_urllib_parse.urlencode(fields), headers)
|
url, compat_urllib_parse_urlencode(fields), headers)
|
||||||
video_page = self._download_webpage(
|
video_page = self._download_webpage(
|
||||||
req, video_id, 'Downloading video page')
|
req, video_id, 'Downloading video page')
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -34,7 +34,7 @@ def _real_extract(self, url):
|
||||||
expected=True)
|
expected=True)
|
||||||
|
|
||||||
fields = self._hidden_inputs(webpage)
|
fields = self._hidden_inputs(webpage)
|
||||||
post = compat_urllib_parse.urlencode(fields)
|
post = compat_urllib_parse_urlencode(fields)
|
||||||
req = sanitized_Request(url, post)
|
req = sanitized_Request(url, post)
|
||||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
|
|
|
@ -5,9 +5,7 @@
|
||||||
|
|
||||||
from hashlib import sha1
|
from hashlib import sha1
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
compat_urllib_parse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
@ -235,7 +233,7 @@ def _extract_clip(self, url, webpage):
|
||||||
client_name = 'kolibri-2.0.19-splec4'
|
client_name = 'kolibri-2.0.19-splec4'
|
||||||
client_location = url
|
client_location = url
|
||||||
|
|
||||||
videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse.urlencode({
|
videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse_urlencode({
|
||||||
'access_token': access_token,
|
'access_token': access_token,
|
||||||
'client_location': client_location,
|
'client_location': client_location,
|
||||||
'client_name': client_name,
|
'client_name': client_name,
|
||||||
|
@ -256,7 +254,7 @@ def _extract_clip(self, url, webpage):
|
||||||
client_id = g[:2] + sha1(''.join([clip_id, g, access_token, client_location, g, client_name])
|
client_id = g[:2] + sha1(''.join([clip_id, g, access_token, client_location, g, client_name])
|
||||||
.encode('utf-8')).hexdigest()
|
.encode('utf-8')).hexdigest()
|
||||||
|
|
||||||
sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse.urlencode({
|
sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse_urlencode({
|
||||||
'access_token': access_token,
|
'access_token': access_token,
|
||||||
'client_id': client_id,
|
'client_id': client_id,
|
||||||
'client_location': client_location,
|
'client_location': client_location,
|
||||||
|
@ -270,7 +268,7 @@ def _extract_clip(self, url, webpage):
|
||||||
client_location, source_ids_str, g, client_name])
|
client_location, source_ids_str, g, client_name])
|
||||||
.encode('utf-8')).hexdigest()
|
.encode('utf-8')).hexdigest()
|
||||||
|
|
||||||
url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse.urlencode({
|
url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse_urlencode({
|
||||||
'access_token': access_token,
|
'access_token': access_token,
|
||||||
'client_id': client_id,
|
'client_id': client_id,
|
||||||
'client_location': client_location,
|
'client_location': client_location,
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -81,7 +81,7 @@ def _real_extract(self, url):
|
||||||
video = self._download_json(
|
video = self._download_json(
|
||||||
'%s/%s/%s?%s' % (
|
'%s/%s/%s?%s' % (
|
||||||
api_vars['url'], api_vars['playerType'], api_vars['id'],
|
api_vars['url'], api_vars['playerType'], api_vars['id'],
|
||||||
compat_urllib_parse.urlencode({
|
compat_urllib_parse_urlencode({
|
||||||
'apiKey': 'sh@hid0nlin3',
|
'apiKey': 'sh@hid0nlin3',
|
||||||
'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
|
'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
|
||||||
})),
|
})),
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -45,7 +45,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
download_form = self._hidden_inputs(webpage)
|
download_form = self._hidden_inputs(webpage)
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
url, compat_urllib_parse.urlencode(download_form))
|
url, compat_urllib_parse_urlencode(download_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
|
||||||
video_page = self._download_webpage(
|
video_page = self._download_webpage(
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
parse_duration,
|
parse_duration,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
|
@ -47,7 +47,7 @@ def _real_extract(self, url):
|
||||||
fields = {
|
fields = {
|
||||||
'method_free': 'Free'
|
'method_free': 'Free'
|
||||||
}
|
}
|
||||||
post = compat_urllib_parse.urlencode(fields)
|
post = compat_urllib_parse_urlencode(fields)
|
||||||
req = sanitized_Request(url, post)
|
req = sanitized_Request(url, post)
|
||||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import sanitized_Request
|
from ..utils import sanitized_Request
|
||||||
|
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ class SinaIE(InfoExtractor):
|
||||||
]
|
]
|
||||||
|
|
||||||
def _extract_video(self, video_id):
|
def _extract_video(self, video_id):
|
||||||
data = compat_urllib_parse.urlencode({'vid': video_id})
|
data = compat_urllib_parse_urlencode({'vid': video_id})
|
||||||
url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data,
|
url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data,
|
||||||
video_id, 'Downloading video url')
|
video_id, 'Downloading video url')
|
||||||
image_page = self._download_webpage(
|
image_page = self._download_webpage(
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -175,7 +175,7 @@ def _real_extract(self, url):
|
||||||
video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest()
|
video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest()
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form))
|
'http://smotri.com/video/view/url/bot/', compat_urllib_parse_urlencode(video_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
|
||||||
video = self._download_json(request, video_id, 'Downloading video JSON')
|
video = self._download_json(request, video_id, 'Downloading video JSON')
|
||||||
|
@ -338,7 +338,7 @@ def _real_extract(self, url):
|
||||||
}
|
}
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form))
|
broadcast_url + '/?no_redirect=1', compat_urllib_parse_urlencode(login_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
broadcast_page = self._download_webpage(
|
broadcast_page = self._download_webpage(
|
||||||
request, broadcast_id, 'Logging in and confirming age')
|
request, broadcast_id, 'Logging in and confirming age')
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -170,7 +170,7 @@ def _fetch_data(vid_id, mytv=False):
|
||||||
if retries > 0:
|
if retries > 0:
|
||||||
download_note += ' (retry #%d)' % retries
|
download_note += ' (retry #%d)' % retries
|
||||||
part_info = self._parse_json(self._download_webpage(
|
part_info = self._parse_json(self._download_webpage(
|
||||||
'http://%s/?%s' % (allot, compat_urllib_parse.urlencode(params)),
|
'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)),
|
||||||
video_id, download_note), video_id)
|
video_id, download_note), video_id)
|
||||||
|
|
||||||
video_url = part_info['url']
|
video_url = part_info['url']
|
||||||
|
|
|
@ -11,10 +11,9 @@
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
encode_dict,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
|
@ -393,7 +392,7 @@ def _real_extract(self, url):
|
||||||
query = COMMON_QUERY.copy()
|
query = COMMON_QUERY.copy()
|
||||||
query['offset'] = 0
|
query['offset'] = 0
|
||||||
|
|
||||||
next_href = base_url + '?' + compat_urllib_parse.urlencode(query)
|
next_href = base_url + '?' + compat_urllib_parse_urlencode(query)
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
for i in itertools.count():
|
for i in itertools.count():
|
||||||
|
@ -424,7 +423,7 @@ def resolve_permalink_url(candidates):
|
||||||
qs = compat_urlparse.parse_qs(parsed_next_href.query)
|
qs = compat_urlparse.parse_qs(parsed_next_href.query)
|
||||||
qs.update(COMMON_QUERY)
|
qs.update(COMMON_QUERY)
|
||||||
next_href = compat_urlparse.urlunparse(
|
next_href = compat_urlparse.urlunparse(
|
||||||
parsed_next_href._replace(query=compat_urllib_parse.urlencode(qs, True)))
|
parsed_next_href._replace(query=compat_urllib_parse_urlencode(qs, True)))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'playlist',
|
'_type': 'playlist',
|
||||||
|
@ -460,7 +459,7 @@ def _real_extract(self, url):
|
||||||
if token:
|
if token:
|
||||||
data_dict['secret_token'] = token
|
data_dict['secret_token'] = token
|
||||||
|
|
||||||
data = compat_urllib_parse.urlencode(data_dict)
|
data = compat_urllib_parse_urlencode(data_dict)
|
||||||
data = self._download_json(
|
data = self._download_json(
|
||||||
base_url + data, playlist_id, 'Downloading playlist')
|
base_url + data, playlist_id, 'Downloading playlist')
|
||||||
|
|
||||||
|
@ -500,7 +499,8 @@ def _get_collection(self, endpoint, collection_id, **query):
|
||||||
query['client_id'] = self._CLIENT_ID
|
query['client_id'] = self._CLIENT_ID
|
||||||
query['linked_partitioning'] = '1'
|
query['linked_partitioning'] = '1'
|
||||||
query['offset'] = 0
|
query['offset'] = 0
|
||||||
data = compat_urllib_parse.urlencode(encode_dict(query))
|
data = compat_urllib_parse_urlencode(query)
|
||||||
|
data = compat_urllib_parse_urlencode(query)
|
||||||
next_url = '{0}{1}?{2}'.format(self._API_V2_BASE, endpoint, data)
|
next_url = '{0}{1}?{2}'.format(self._API_V2_BASE, endpoint, data)
|
||||||
|
|
||||||
collected_results = 0
|
collected_results = 0
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import sanitized_Request
|
from ..utils import sanitized_Request
|
||||||
|
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ def _real_extract(self, url):
|
||||||
(?:id="[^"]+"\s+)?
|
(?:id="[^"]+"\s+)?
|
||||||
value="([^"]*)"
|
value="([^"]*)"
|
||||||
''', orig_webpage)
|
''', orig_webpage)
|
||||||
post = compat_urllib_parse.urlencode(fields)
|
post = compat_urllib_parse_urlencode(fields)
|
||||||
|
|
||||||
self._sleep(12, video_id)
|
self._sleep(12, video_id)
|
||||||
headers = {
|
headers = {
|
||||||
|
|
|
@ -5,8 +5,8 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -74,7 +74,7 @@ def _real_extract(self, url):
|
||||||
info_el = self._download_xml(info_url, episode).find('./video/info')
|
info_el = self._download_xml(info_url, episode).find('./video/info')
|
||||||
|
|
||||||
video_link = info_el.find('videoUrl/link').text
|
video_link = info_el.find('videoUrl/link').text
|
||||||
token_query = compat_urllib_parse.urlencode({'id': video_link})
|
token_query = compat_urllib_parse_urlencode({'id': video_link})
|
||||||
token_info = self._download_json(
|
token_info = self._download_json(
|
||||||
embed_data['flashvars']['ov_tk'] + '?' + token_query,
|
embed_data['flashvars']['ov_tk'] + '?' + token_query,
|
||||||
episode,
|
episode,
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -41,7 +41,7 @@ def _login(self):
|
||||||
'username': username,
|
'username': username,
|
||||||
'password': password,
|
'password': password,
|
||||||
}
|
}
|
||||||
payload = compat_urllib_parse.urlencode(form_data).encode('utf-8')
|
payload = compat_urllib_parse_urlencode(form_data).encode('utf-8')
|
||||||
request = sanitized_Request(self._LOGIN_URL, payload)
|
request = sanitized_Request(self._LOGIN_URL, payload)
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
login_page = self._download_webpage(
|
login_page = self._download_webpage(
|
||||||
|
|
|
@ -9,12 +9,11 @@
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
encode_dict,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
|
@ -82,7 +81,7 @@ def _login(self):
|
||||||
post_url = compat_urlparse.urljoin(redirect_url, post_url)
|
post_url = compat_urlparse.urljoin(redirect_url, post_url)
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8'))
|
post_url, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
|
||||||
request.add_header('Referer', redirect_url)
|
request.add_header('Referer', redirect_url)
|
||||||
response = self._download_webpage(
|
response = self._download_webpage(
|
||||||
request, None, 'Logging in as %s' % username)
|
request, None, 'Logging in as %s' % username)
|
||||||
|
@ -250,7 +249,7 @@ def _real_extract(self, url):
|
||||||
formats = self._extract_m3u8_formats(
|
formats = self._extract_m3u8_formats(
|
||||||
'%s/vod/%s?%s' % (
|
'%s/vod/%s?%s' % (
|
||||||
self._USHER_BASE, item_id,
|
self._USHER_BASE, item_id,
|
||||||
compat_urllib_parse.urlencode({
|
compat_urllib_parse_urlencode({
|
||||||
'allow_source': 'true',
|
'allow_source': 'true',
|
||||||
'allow_audio_only': 'true',
|
'allow_audio_only': 'true',
|
||||||
'allow_spectre': 'true',
|
'allow_spectre': 'true',
|
||||||
|
@ -442,7 +441,7 @@ def _real_extract(self, url):
|
||||||
}
|
}
|
||||||
formats = self._extract_m3u8_formats(
|
formats = self._extract_m3u8_formats(
|
||||||
'%s/api/channel/hls/%s.m3u8?%s'
|
'%s/api/channel/hls/%s.m3u8?%s'
|
||||||
% (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query)),
|
% (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
|
||||||
channel_id, 'mp4')
|
channel_id, 'mp4')
|
||||||
self._prefer_source(formats)
|
self._prefer_source(formats)
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
|
@ -71,7 +71,7 @@ def _enroll_course(self, base_url, webpage, course_id):
|
||||||
def _download_lecture(self, course_id, lecture_id):
|
def _download_lecture(self, course_id, lecture_id):
|
||||||
return self._download_json(
|
return self._download_json(
|
||||||
'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?%s' % (
|
'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?%s' % (
|
||||||
course_id, lecture_id, compat_urllib_parse.urlencode({
|
course_id, lecture_id, compat_urllib_parse_urlencode({
|
||||||
'video_only': '',
|
'video_only': '',
|
||||||
'auto_play': '',
|
'auto_play': '',
|
||||||
'fields[lecture]': 'title,description,asset',
|
'fields[lecture]': 'title,description,asset',
|
||||||
|
@ -139,7 +139,7 @@ def is_logged(webpage):
|
||||||
})
|
})
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
|
||||||
request.add_header('Referer', self._ORIGIN_URL)
|
request.add_header('Referer', self._ORIGIN_URL)
|
||||||
request.add_header('Origin', self._ORIGIN_URL)
|
request.add_header('Origin', self._ORIGIN_URL)
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -48,7 +48,7 @@ def get_session_id():
|
||||||
webpage, 'title').split('/')[0].strip()
|
webpage, 'title').split('/')[0].strip()
|
||||||
|
|
||||||
info_url = 'http://vbox7.com/play/magare.do'
|
info_url = 'http://vbox7.com/play/magare.do'
|
||||||
data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
|
data = compat_urllib_parse_urlencode({'as3': '1', 'vid': video_id})
|
||||||
info_request = sanitized_Request(info_url, data)
|
info_request = sanitized_Request(info_url, data)
|
||||||
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage')
|
info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage')
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -93,7 +93,7 @@ def _real_extract(self, url):
|
||||||
headers = {'Referer': 'http://static.cdn-ec.viddler.com/js/arpeggio/v2/embed.html'}
|
headers = {'Referer': 'http://static.cdn-ec.viddler.com/js/arpeggio/v2/embed.html'}
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?%s'
|
'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?%s'
|
||||||
% compat_urllib_parse.urlencode(query), None, headers)
|
% compat_urllib_parse_urlencode(query), None, headers)
|
||||||
data = self._download_json(request, video_id)['video']
|
data = self._download_json(request, video_id)['video']
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
encode_dict,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -42,13 +41,13 @@ def _login(self):
|
||||||
self.report_login()
|
self.report_login()
|
||||||
webpage = self._download_webpage(self._LOGIN_URL, None, False)
|
webpage = self._download_webpage(self._LOGIN_URL, None, False)
|
||||||
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
||||||
data = urlencode_postdata(encode_dict({
|
data = urlencode_postdata({
|
||||||
'action': 'login',
|
'action': 'login',
|
||||||
'email': username,
|
'email': username,
|
||||||
'password': password,
|
'password': password,
|
||||||
'service': 'vimeo',
|
'service': 'vimeo',
|
||||||
'token': token,
|
'token': token,
|
||||||
}))
|
})
|
||||||
login_request = sanitized_Request(self._LOGIN_URL, data)
|
login_request = sanitized_Request(self._LOGIN_URL, data)
|
||||||
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
login_request.add_header('Referer', self._LOGIN_URL)
|
login_request.add_header('Referer', self._LOGIN_URL)
|
||||||
|
@ -255,10 +254,10 @@ def _verify_video_password(self, url, video_id, webpage):
|
||||||
if password is None:
|
if password is None:
|
||||||
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
|
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
|
||||||
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
||||||
data = urlencode_postdata(encode_dict({
|
data = urlencode_postdata({
|
||||||
'password': password,
|
'password': password,
|
||||||
'token': token,
|
'token': token,
|
||||||
}))
|
})
|
||||||
if url.startswith('http://'):
|
if url.startswith('http://'):
|
||||||
# vimeo only supports https now, but the user can give an http url
|
# vimeo only supports https now, but the user can give an http url
|
||||||
url = url.replace('http://', 'https://')
|
url = url.replace('http://', 'https://')
|
||||||
|
@ -274,7 +273,7 @@ def _verify_player_video_password(self, url, video_id):
|
||||||
password = self._downloader.params.get('videopassword')
|
password = self._downloader.params.get('videopassword')
|
||||||
if password is None:
|
if password is None:
|
||||||
raise ExtractorError('This video is protected by a password, use the --video-password option')
|
raise ExtractorError('This video is protected by a password, use the --video-password option')
|
||||||
data = urlencode_postdata(encode_dict({'password': password}))
|
data = urlencode_postdata({'password': password})
|
||||||
pass_url = url + '/check-password'
|
pass_url = url + '/check-password'
|
||||||
password_request = sanitized_Request(pass_url, data)
|
password_request = sanitized_Request(pass_url, data)
|
||||||
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
@ -575,7 +574,7 @@ def _login_list_password(self, page_url, list_id, webpage):
|
||||||
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
||||||
fields['token'] = token
|
fields['token'] = token
|
||||||
fields['password'] = password
|
fields['password'] = password
|
||||||
post = urlencode_postdata(encode_dict(fields))
|
post = urlencode_postdata(fields)
|
||||||
password_path = self._search_regex(
|
password_path = self._search_regex(
|
||||||
r'action="([^"]+)"', login_form, 'password URL')
|
r'action="([^"]+)"', login_form, 'password URL')
|
||||||
password_url = compat_urlparse.urljoin(page_url, password_path)
|
password_url = compat_urlparse.urljoin(page_url, password_path)
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -204,7 +204,7 @@ def _login(self):
|
||||||
|
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
'https://login.vk.com/?act=login',
|
'https://login.vk.com/?act=login',
|
||||||
compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
compat_urllib_parse_urlencode(login_form).encode('utf-8'))
|
||||||
login_page = self._download_webpage(
|
login_page = self._download_webpage(
|
||||||
request, None, note='Logging in as %s' % username)
|
request, None, note='Logging in as %s' % username)
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
)
|
)
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
|
|
||||||
|
|
||||||
class VLiveIE(InfoExtractor):
|
class VLiveIE(InfoExtractor):
|
||||||
|
@ -43,7 +43,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
playinfo = self._download_json(
|
playinfo = self._download_json(
|
||||||
'http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?%s'
|
'http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?%s'
|
||||||
% compat_urllib_parse.urlencode({
|
% compat_urllib_parse_urlencode({
|
||||||
'videoId': long_video_id,
|
'videoId': long_video_id,
|
||||||
'key': key,
|
'key': key,
|
||||||
'ptc': 'http',
|
'ptc': 'http',
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
NO_DEFAULT,
|
NO_DEFAULT,
|
||||||
|
@ -38,7 +38,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
if fields['op'] == 'download1':
|
if fields['op'] == 'download1':
|
||||||
self._sleep(3, video_id) # they do detect when requests happen too fast!
|
self._sleep(3, video_id) # they do detect when requests happen too fast!
|
||||||
post = compat_urllib_parse.urlencode(fields)
|
post = compat_urllib_parse_urlencode(fields)
|
||||||
req = sanitized_Request(url, post)
|
req = sanitized_Request(url, post)
|
||||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
|
|
|
@ -4,10 +4,9 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
encode_dict,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
)
|
)
|
||||||
|
@ -109,7 +108,7 @@ def _real_extract(self, url):
|
||||||
if countdown:
|
if countdown:
|
||||||
self._sleep(countdown, video_id)
|
self._sleep(countdown, video_id)
|
||||||
|
|
||||||
post = compat_urllib_parse.urlencode(encode_dict(fields))
|
post = compat_urllib_parse_urlencode(fields)
|
||||||
|
|
||||||
req = sanitized_Request(url, post)
|
req = sanitized_Request(url, post)
|
||||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
from .common import InfoExtractor, SearchInfoExtractor
|
from .common import InfoExtractor, SearchInfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -303,7 +304,7 @@ def _get_info(self, video_id, display_id, webpage):
|
||||||
region = self._search_regex(
|
region = self._search_regex(
|
||||||
r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
|
r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
|
||||||
webpage, 'region', fatal=False, default='US')
|
webpage, 'region', fatal=False, default='US')
|
||||||
data = compat_urllib_parse.urlencode({
|
data = compat_urllib_parse_urlencode({
|
||||||
'protocol': 'http',
|
'protocol': 'http',
|
||||||
'region': region,
|
'region': region,
|
||||||
})
|
})
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -170,7 +170,7 @@ def _real_extract(self, url):
|
||||||
missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids)
|
missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids)
|
||||||
request = sanitized_Request(
|
request = sanitized_Request(
|
||||||
'https://music.yandex.ru/handlers/track-entries.jsx',
|
'https://music.yandex.ru/handlers/track-entries.jsx',
|
||||||
compat_urllib_parse.urlencode({
|
compat_urllib_parse_urlencode({
|
||||||
'entries': ','.join(missing_track_ids),
|
'entries': ','.join(missing_track_ids),
|
||||||
'lang': mu.get('settings', {}).get('lang', 'en'),
|
'lang': mu.get('settings', {}).get('lang', 'en'),
|
||||||
'external-domain': 'music.yandex.ru',
|
'external-domain': 'music.yandex.ru',
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_ord,
|
compat_ord,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -138,7 +138,7 @@ def generate_ep(format, n):
|
||||||
'_00' + \
|
'_00' + \
|
||||||
'/st/' + self.parse_ext_l(format) + \
|
'/st/' + self.parse_ext_l(format) + \
|
||||||
'/fileid/' + get_fileid(format, n) + '?' + \
|
'/fileid/' + get_fileid(format, n) + '?' + \
|
||||||
compat_urllib_parse.urlencode(param)
|
compat_urllib_parse_urlencode(param)
|
||||||
video_urls.append(video_url)
|
video_urls.append(video_url)
|
||||||
video_urls_dict[format] = video_urls
|
video_urls_dict[format] = video_urls
|
||||||
|
|
||||||
|
|
|
@ -17,16 +17,15 @@
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_chr,
|
compat_chr,
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_unquote_plus,
|
compat_urllib_parse_unquote_plus,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_str,
|
compat_str,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
encode_dict,
|
|
||||||
error_to_compat_str,
|
error_to_compat_str,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
@ -116,7 +115,7 @@ def _login(self):
|
||||||
'hl': 'en_US',
|
'hl': 'en_US',
|
||||||
}
|
}
|
||||||
|
|
||||||
login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
|
login_data = compat_urllib_parse_urlencode(login_form_strs).encode('ascii')
|
||||||
|
|
||||||
req = sanitized_Request(self._LOGIN_URL, login_data)
|
req = sanitized_Request(self._LOGIN_URL, login_data)
|
||||||
login_results = self._download_webpage(
|
login_results = self._download_webpage(
|
||||||
|
@ -149,7 +148,7 @@ def _login(self):
|
||||||
'TrustDevice': 'on',
|
'TrustDevice': 'on',
|
||||||
})
|
})
|
||||||
|
|
||||||
tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
|
tfa_data = compat_urllib_parse_urlencode(tfa_form_strs).encode('ascii')
|
||||||
|
|
||||||
tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
|
tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
|
||||||
tfa_results = self._download_webpage(
|
tfa_results = self._download_webpage(
|
||||||
|
@ -1007,7 +1006,7 @@ def _get_subtitles(self, video_id, webpage):
|
||||||
continue
|
continue
|
||||||
sub_formats = []
|
sub_formats = []
|
||||||
for ext in self._SUBTITLE_FORMATS:
|
for ext in self._SUBTITLE_FORMATS:
|
||||||
params = compat_urllib_parse.urlencode({
|
params = compat_urllib_parse_urlencode({
|
||||||
'lang': lang,
|
'lang': lang,
|
||||||
'v': video_id,
|
'v': video_id,
|
||||||
'fmt': ext,
|
'fmt': ext,
|
||||||
|
@ -1056,7 +1055,7 @@ def _get_automatic_captions(self, video_id, webpage):
|
||||||
if caption_url:
|
if caption_url:
|
||||||
timestamp = args['timestamp']
|
timestamp = args['timestamp']
|
||||||
# We get the available subtitles
|
# We get the available subtitles
|
||||||
list_params = compat_urllib_parse.urlencode({
|
list_params = compat_urllib_parse_urlencode({
|
||||||
'type': 'list',
|
'type': 'list',
|
||||||
'tlangs': 1,
|
'tlangs': 1,
|
||||||
'asrs': 1,
|
'asrs': 1,
|
||||||
|
@ -1075,7 +1074,7 @@ def _get_automatic_captions(self, video_id, webpage):
|
||||||
sub_lang = lang_node.attrib['lang_code']
|
sub_lang = lang_node.attrib['lang_code']
|
||||||
sub_formats = []
|
sub_formats = []
|
||||||
for ext in self._SUBTITLE_FORMATS:
|
for ext in self._SUBTITLE_FORMATS:
|
||||||
params = compat_urllib_parse.urlencode({
|
params = compat_urllib_parse_urlencode({
|
||||||
'lang': original_lang,
|
'lang': original_lang,
|
||||||
'tlang': sub_lang,
|
'tlang': sub_lang,
|
||||||
'fmt': ext,
|
'fmt': ext,
|
||||||
|
@ -1094,7 +1093,7 @@ def _get_automatic_captions(self, video_id, webpage):
|
||||||
caption_tracks = args['caption_tracks']
|
caption_tracks = args['caption_tracks']
|
||||||
caption_translation_languages = args['caption_translation_languages']
|
caption_translation_languages = args['caption_translation_languages']
|
||||||
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
|
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
|
||||||
parsed_caption_url = compat_urlparse.urlparse(caption_url)
|
parsed_caption_url = compat_urllib_parse_urlparse(caption_url)
|
||||||
caption_qs = compat_parse_qs(parsed_caption_url.query)
|
caption_qs = compat_parse_qs(parsed_caption_url.query)
|
||||||
|
|
||||||
sub_lang_list = {}
|
sub_lang_list = {}
|
||||||
|
@ -1110,7 +1109,7 @@ def _get_automatic_captions(self, video_id, webpage):
|
||||||
'fmt': [ext],
|
'fmt': [ext],
|
||||||
})
|
})
|
||||||
sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
|
sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
|
||||||
query=compat_urllib_parse.urlencode(caption_qs, True)))
|
query=compat_urllib_parse_urlencode(caption_qs, True)))
|
||||||
sub_formats.append({
|
sub_formats.append({
|
||||||
'url': sub_url,
|
'url': sub_url,
|
||||||
'ext': ext,
|
'ext': ext,
|
||||||
|
@ -1140,7 +1139,7 @@ def _mark_watched(self, video_id, video_info):
|
||||||
'cpn': [cpn],
|
'cpn': [cpn],
|
||||||
})
|
})
|
||||||
playback_url = compat_urlparse.urlunparse(
|
playback_url = compat_urlparse.urlunparse(
|
||||||
parsed_playback_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
|
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
|
||||||
|
|
||||||
self._download_webpage(
|
self._download_webpage(
|
||||||
playback_url, video_id, 'Marking watched',
|
playback_url, video_id, 'Marking watched',
|
||||||
|
@ -1225,7 +1224,7 @@ def add_dash_mpd(video_info):
|
||||||
# this can be viewed without login into Youtube
|
# this can be viewed without login into Youtube
|
||||||
url = proto + '://www.youtube.com/embed/%s' % video_id
|
url = proto + '://www.youtube.com/embed/%s' % video_id
|
||||||
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
|
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
|
||||||
data = compat_urllib_parse.urlencode({
|
data = compat_urllib_parse_urlencode({
|
||||||
'video_id': video_id,
|
'video_id': video_id,
|
||||||
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
|
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
|
||||||
'sts': self._search_regex(
|
'sts': self._search_regex(
|
||||||
|
@ -2085,7 +2084,7 @@ def _get_n_results(self, query, n):
|
||||||
'spf': 'navigate',
|
'spf': 'navigate',
|
||||||
}
|
}
|
||||||
url_query.update(self._EXTRA_QUERY_ARGS)
|
url_query.update(self._EXTRA_QUERY_ARGS)
|
||||||
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse.urlencode(url_query)
|
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
|
||||||
data = self._download_json(
|
data = self._download_json(
|
||||||
result_url, video_id='query "%s"' % query,
|
result_url, video_id='query "%s"' % query,
|
||||||
note='Downloading page %s' % pagenum,
|
note='Downloading page %s' % pagenum,
|
||||||
|
|
|
@ -47,6 +47,7 @@
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
|
@ -1315,7 +1316,7 @@ def shell_quote(args):
|
||||||
def smuggle_url(url, data):
|
def smuggle_url(url, data):
|
||||||
""" Pass additional data in a URL for internal use. """
|
""" Pass additional data in a URL for internal use. """
|
||||||
|
|
||||||
sdata = compat_urllib_parse.urlencode(
|
sdata = compat_urllib_parse_urlencode(
|
||||||
{'__youtubedl_smuggle': json.dumps(data)})
|
{'__youtubedl_smuggle': json.dumps(data)})
|
||||||
return url + '#' + sdata
|
return url + '#' + sdata
|
||||||
|
|
||||||
|
@ -1789,22 +1790,15 @@ def fixup(url):
|
||||||
|
|
||||||
|
|
||||||
def urlencode_postdata(*args, **kargs):
|
def urlencode_postdata(*args, **kargs):
|
||||||
return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
|
return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
|
||||||
|
|
||||||
|
|
||||||
def update_url_query(url, query):
|
def update_url_query(url, query):
|
||||||
parsed_url = compat_urlparse.urlparse(url)
|
parsed_url = compat_urlparse.urlparse(url)
|
||||||
qs = compat_parse_qs(parsed_url.query)
|
qs = compat_parse_qs(parsed_url.query)
|
||||||
qs.update(query)
|
qs.update(query)
|
||||||
qs = encode_dict(qs)
|
|
||||||
return compat_urlparse.urlunparse(parsed_url._replace(
|
return compat_urlparse.urlunparse(parsed_url._replace(
|
||||||
query=compat_urllib_parse.urlencode(qs, True)))
|
query=compat_urllib_parse_urlencode(qs, True)))
|
||||||
|
|
||||||
|
|
||||||
def encode_dict(d, encoding='utf-8'):
|
|
||||||
def encode(v):
|
|
||||||
return v.encode(encoding) if isinstance(v, compat_basestring) else v
|
|
||||||
return dict((encode(k), encode(v)) for k, v in d.items())
|
|
||||||
|
|
||||||
|
|
||||||
def dict_get(d, key_or_keys, default=None, skip_false_values=True):
|
def dict_get(d, key_or_keys, default=None, skip_false_values=True):
|
||||||
|
|
Loading…
Reference in a new issue