Use urlencode_postdata across the codebase

This commit is contained in:
Sergey M․ 2016-03-26 02:19:24 +06:00
parent 15707c7e02
commit 6e6bc8dae5
36 changed files with 90 additions and 94 deletions

View file

@ -6,16 +6,14 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import compat_str
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import ( from ..utils import (
int_or_none,
float_or_none,
sanitized_Request,
xpath_text,
ExtractorError, ExtractorError,
float_or_none,
int_or_none,
sanitized_Request,
urlencode_postdata,
xpath_text,
) )
@ -86,7 +84,7 @@ def _login(self):
} }
request = sanitized_Request( request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8')) self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self._download_webpage( response = self._download_webpage(
request, None, 'Logging in as %s' % username) request, None, 'Logging in as %s' % username)

View file

@ -4,15 +4,13 @@
import itertools import itertools
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import compat_str
compat_urllib_parse_urlencode,
compat_str,
)
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none,
float_or_none, float_or_none,
int_or_none,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -58,7 +56,7 @@ def _login(self):
} }
request = sanitized_Request( request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8')) self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Referer', self._LOGIN_URL) request.add_header('Referer', self._LOGIN_URL)
response = self._download_webpage( response = self._download_webpage(
request, None, 'Logging in as %s' % username) request, None, 'Logging in as %s' % username)

View file

@ -6,13 +6,13 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_urllib_parse_unquote, compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse, compat_urllib_parse_urlparse,
) )
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
float_or_none, float_or_none,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -102,7 +102,7 @@ def _real_extract(self, url):
req = sanitized_Request( req = sanitized_Request(
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist', 'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
data=compat_urllib_parse_urlencode(data)) data=urlencode_postdata(data))
req.add_header('Content-type', 'application/x-www-form-urlencoded') req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('x-addr', '127.0.0.1') req.add_header('x-addr', '127.0.0.1')

View file

@ -308,7 +308,7 @@ def _real_extract(self, url):
playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url')) playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
playerdata_req = sanitized_Request(playerdata_url) playerdata_req = sanitized_Request(playerdata_url)
playerdata_req.data = compat_urllib_parse_urlencode({'current_page': webpage_url}) playerdata_req.data = urlencode_postdata({'current_page': webpage_url})
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info') playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')

View file

@ -15,6 +15,7 @@
sanitized_Request, sanitized_Request,
smuggle_url, smuggle_url,
unsmuggle_url, unsmuggle_url,
urlencode_postdata,
) )
@ -174,7 +175,7 @@ def _real_extract(self, url):
data['show_id'] = show_id data['show_id'] = show_id
request = sanitized_Request( request = sanitized_Request(
'http://admin.mangomolo.com/analytics/index.php/plus/show', 'http://admin.mangomolo.com/analytics/index.php/plus/show',
compat_urllib_parse_urlencode(data), urlencode_postdata(data),
{ {
'Origin': 'http://www.dcndigital.ae', 'Origin': 'http://www.dcndigital.ae',
'Content-Type': 'application/x-www-form-urlencoded' 'Content-Type': 'application/x-www-form-urlencoded'

View file

@ -6,7 +6,6 @@
from .amp import AMPIE from .amp import AMPIE
from ..compat import ( from ..compat import (
compat_HTTPError, compat_HTTPError,
compat_urllib_parse_urlencode,
compat_urlparse, compat_urlparse,
) )
from ..utils import ( from ..utils import (
@ -14,6 +13,7 @@
clean_html, clean_html,
int_or_none, int_or_none,
sanitized_Request, sanitized_Request,
urlencode_postdata
) )
@ -50,7 +50,7 @@ def _login(self):
} }
request = sanitized_Request( request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8')) self._LOGIN_URL, urlencode_postdata(login_form))
response = self._download_webpage( response = self._download_webpage(
request, None, 'Logging in as %s' % username) request, None, 'Logging in as %s' % username)

View file

@ -5,13 +5,13 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_urllib_parse_urlencode,
compat_urllib_request, compat_urllib_request,
compat_urlparse, compat_urlparse,
) )
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -56,7 +56,7 @@ def _login(self):
'Submit': ' Login ', 'Submit': ' Login ',
} }
login_data = compat_urllib_parse_urlencode(login_form_strs).encode('utf-8') login_data = urlencode_postdata(login_form_strs)
request = sanitized_Request( request = sanitized_Request(
'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data) 'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)

View file

@ -3,11 +3,11 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
remove_end, remove_end,
HEADRequest, HEADRequest,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -123,7 +123,7 @@ def _login(self, webpage_url, display_id):
'password': password, 'password': password,
} }
request = sanitized_Request(login_url, compat_urllib_parse_urlencode(login_form)) request = sanitized_Request(login_url, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(request, display_id, 'Logging in') self._download_webpage(request, display_id, 'Logging in')
start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page') start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')

View file

@ -3,11 +3,11 @@
import base64 import base64
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
HEADRequest, HEADRequest,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -35,7 +35,7 @@ def _real_extract(self, url):
r'"contentUrl" content="(.*?)"', webpage, 'content URL') r'"contentUrl" content="(.*?)"', webpage, 'content URL')
return self.url_result(video_url, ie='Youtube') return self.url_result(video_url, ie='Youtube')
reqdata = compat_urllib_parse_urlencode([ reqdata = urlencode_postdata([
('mediaType', 's'), ('mediaType', 's'),
('mediaId', video_id), ('mediaId', video_id),
]) ])

View file

@ -4,15 +4,13 @@
import json import json
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import compat_str
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
clean_html, clean_html,
int_or_none, int_or_none,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -36,7 +34,7 @@ def _login(self):
'stayPut': 'false' 'stayPut': 'false'
} }
request = sanitized_Request( request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8')) self._LOGIN_URL, urlencode_postdata(login_form))
login_page = self._download_webpage( login_page = self._download_webpage(
request, None, 'Logging in as %s' % username) request, None, 'Logging in as %s' % username)
@ -65,7 +63,7 @@ def _login(self):
'stayPut': 'false', 'stayPut': 'false',
} }
request = sanitized_Request( request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse_urlencode(confirm_form).encode('utf-8')) self._LOGIN_URL, urlencode_postdata(confirm_form))
login_page = self._download_webpage( login_page = self._download_webpage(
request, None, request, None,
'Confirming log in and log out from another device') 'Confirming log in and log out from another device')

View file

@ -6,13 +6,13 @@
from ..compat import ( from ..compat import (
compat_parse_qs, compat_parse_qs,
compat_urllib_parse_unquote, compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
) )
from ..utils import ( from ..utils import (
determine_ext, determine_ext,
ExtractorError, ExtractorError,
int_or_none, int_or_none,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -117,7 +117,7 @@ def _real_initialize(self):
'filters': '0', 'filters': '0',
'submit': "Continue - I'm over 18", 'submit': "Continue - I'm over 18",
} }
request = sanitized_Request(self._FILTER_POST, compat_urllib_parse_urlencode(disclaimer_form)) request = sanitized_Request(self._FILTER_POST, urlencode_postdata(disclaimer_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self.report_age_confirmation() self.report_age_confirmation()
self._download_webpage(request, None, False, 'Unable to confirm age') self._download_webpage(request, None, False, 'Unable to confirm age')

View file

@ -2,12 +2,12 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
int_or_none, int_or_none,
parse_duration, parse_duration,
parse_filesize, parse_filesize,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -39,7 +39,7 @@ def _real_extract(self, url):
] ]
req = sanitized_Request( req = sanitized_Request(
'http://minhateca.com.br/action/License/Download', 'http://minhateca.com.br/action/License/Download',
data=compat_urllib_parse_urlencode(token_data)) data=urlencode_postdata(token_data))
req.add_header('Content-Type', 'application/x-www-form-urlencoded') req.add_header('Content-Type', 'application/x-www-form-urlencoded')
data = self._download_json( data = self._download_json(
req, video_id, note='Downloading metadata') req, video_id, note='Downloading metadata')

View file

@ -5,11 +5,11 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none, int_or_none,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -77,7 +77,7 @@ def _real_extract(self, url):
], ],
] ]
r_json = json.dumps(r) r_json = json.dumps(r)
post = compat_urllib_parse_urlencode({'r': r_json}) post = urlencode_postdata({'r': r_json})
req = sanitized_Request(self._API_URL, post) req = sanitized_Request(self._API_URL, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded') req.add_header('Content-type', 'application/x-www-form-urlencoded')

View file

@ -5,11 +5,11 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
remove_start, remove_start,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -88,7 +88,7 @@ def _real_extract(self, url):
fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage) fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
data = dict(fields) data = dict(fields)
post = compat_urllib_parse_urlencode(data) post = urlencode_postdata(data)
headers = { headers = {
b'Content-Type': b'application/x-www-form-urlencoded', b'Content-Type': b'application/x-www-form-urlencoded',
} }

View file

@ -3,10 +3,10 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -58,7 +58,7 @@ def _real_extract(self, url):
} }
request = sanitized_Request( request = sanitized_Request(
'http://mooshare.biz/%s' % video_id, compat_urllib_parse_urlencode(download_form)) 'http://mooshare.biz/%s' % video_id, urlencode_postdata(download_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._sleep(5, video_id) self._sleep(5, video_id)

View file

@ -1,8 +1,10 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode from ..utils import (
from ..utils import sanitized_Request sanitized_Request,
urlencode_postdata,
)
class NFBIE(InfoExtractor): class NFBIE(InfoExtractor):
@ -40,7 +42,7 @@ def _real_extract(self, url):
request = sanitized_Request( request = sanitized_Request(
'https://www.nfb.ca/film/%s/player_config' % video_id, 'https://www.nfb.ca/film/%s/player_config' % video_id,
compat_urllib_parse_urlencode({'getConfig': 'true'}).encode('ascii')) urlencode_postdata({'getConfig': 'true'}))
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf') request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')

View file

@ -18,6 +18,7 @@
sanitized_Request, sanitized_Request,
xpath_text, xpath_text,
determine_ext, determine_ext,
urlencode_postdata,
) )
@ -100,7 +101,7 @@ def _login(self):
'mail': username, 'mail': username,
'password': password, 'password': password,
} }
login_data = compat_urllib_parse_urlencode(login_form_strs).encode('utf-8') login_data = urlencode_postdata(login_form_strs)
request = sanitized_Request( request = sanitized_Request(
'https://secure.nicovideo.jp/secure/login', login_data) 'https://secure.nicovideo.jp/secure/login', login_data)
login_results = self._download_webpage( login_results = self._download_webpage(

View file

@ -8,7 +8,6 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_str, compat_str,
compat_urllib_parse_urlencode,
compat_urlparse, compat_urlparse,
) )
from ..utils import ( from ..utils import (
@ -18,6 +17,7 @@
float_or_none, float_or_none,
parse_iso8601, parse_iso8601,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -75,7 +75,7 @@ def _login(self):
'username': username, 'username': username,
'password': password, 'password': password,
} }
request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse_urlencode(login_form)) request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8') request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
login = self._download_json(request, None, 'Logging in as %s' % username) login = self._download_json(request, None, 'Logging in as %s' % username)

View file

@ -5,10 +5,10 @@
import os.path import os.path
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -40,7 +40,7 @@ def _real_extract(self, url):
self._sleep(2, video_id) self._sleep(2, video_id)
post = compat_urllib_parse_urlencode(data) post = urlencode_postdata(data)
headers = { headers = {
b'Content-Type': b'application/x-www-form-urlencoded', b'Content-Type': b'application/x-www-form-urlencoded',
} }

View file

@ -8,7 +8,6 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_str, compat_str,
compat_urllib_parse_urlencode,
compat_urlparse, compat_urlparse,
) )
from ..utils import ( from ..utils import (
@ -17,6 +16,7 @@
parse_duration, parse_duration,
qualities, qualities,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -76,7 +76,7 @@ def _login(self):
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
request = sanitized_Request( request = sanitized_Request(
post_url, compat_urllib_parse_urlencode(login_form).encode('utf-8')) post_url, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self._download_webpage( response = self._download_webpage(

View file

@ -1,10 +1,10 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -42,7 +42,7 @@ def _real_extract(self, url):
self._sleep(wait_time, video_id) self._sleep(wait_time, video_id)
req = sanitized_Request( req = sanitized_Request(
url, compat_urllib_parse_urlencode(fields), headers) url, urlencode_postdata(fields), headers)
video_page = self._download_webpage( video_page = self._download_webpage(
req, video_id, 'Downloading video page') req, video_id, 'Downloading video page')

View file

@ -4,11 +4,11 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
determine_ext, determine_ext,
ExtractorError, ExtractorError,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -34,7 +34,7 @@ def _real_extract(self, url):
expected=True) expected=True)
fields = self._hidden_inputs(webpage) fields = self._hidden_inputs(webpage)
post = compat_urllib_parse_urlencode(fields) post = urlencode_postdata(fields)
req = sanitized_Request(url, post) req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded') req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage( webpage = self._download_webpage(

View file

@ -3,11 +3,11 @@
import base64 import base64
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none, int_or_none,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -45,7 +45,7 @@ def _real_extract(self, url):
download_form = self._hidden_inputs(webpage) download_form = self._hidden_inputs(webpage)
request = sanitized_Request( request = sanitized_Request(
url, compat_urllib_parse_urlencode(download_form)) url, urlencode_postdata(download_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
video_page = self._download_webpage( video_page = self._download_webpage(

View file

@ -4,10 +4,10 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
parse_duration, parse_duration,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -47,7 +47,7 @@ def _real_extract(self, url):
fields = { fields = {
'method_free': 'Free' 'method_free': 'Free'
} }
post = compat_urllib_parse_urlencode(fields) post = urlencode_postdata(fields)
req = sanitized_Request(url, post) req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded') req.add_header('Content-type', 'application/x-www-form-urlencoded')

View file

@ -7,12 +7,12 @@
import uuid import uuid
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none, int_or_none,
sanitized_Request, sanitized_Request,
unified_strdate, unified_strdate,
urlencode_postdata,
) )
@ -175,7 +175,7 @@ def _real_extract(self, url):
video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest() video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest()
request = sanitized_Request( request = sanitized_Request(
'http://smotri.com/video/view/url/bot/', compat_urllib_parse_urlencode(video_form)) 'http://smotri.com/video/view/url/bot/', urlencode_postdata(video_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
video = self._download_json(request, video_id, 'Downloading video JSON') video = self._download_json(request, video_id, 'Downloading video JSON')
@ -338,7 +338,7 @@ def _real_extract(self, url):
} }
request = sanitized_Request( request = sanitized_Request(
broadcast_url + '/?no_redirect=1', compat_urllib_parse_urlencode(login_form)) broadcast_url + '/?no_redirect=1', urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
broadcast_page = self._download_webpage( broadcast_page = self._download_webpage(
request, broadcast_id, 'Logging in and confirming age') request, broadcast_id, 'Logging in and confirming age')

View file

@ -500,7 +500,6 @@ def _get_collection(self, endpoint, collection_id, **query):
query['linked_partitioning'] = '1' query['linked_partitioning'] = '1'
query['offset'] = 0 query['offset'] = 0
data = compat_urllib_parse_urlencode(query) data = compat_urllib_parse_urlencode(query)
data = compat_urllib_parse_urlencode(query)
next_url = '{0}{1}?{2}'.format(self._API_V2_BASE, endpoint, data) next_url = '{0}{1}?{2}'.format(self._API_V2_BASE, endpoint, data)
collected_results = 0 collected_results = 0

View file

@ -4,8 +4,10 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode from ..utils import (
from ..utils import sanitized_Request sanitized_Request,
urlencode_postdata,
)
class StreamcloudIE(InfoExtractor): class StreamcloudIE(InfoExtractor):
@ -35,7 +37,7 @@ def _real_extract(self, url):
(?:id="[^"]+"\s+)? (?:id="[^"]+"\s+)?
value="([^"]*)" value="([^"]*)"
''', orig_webpage) ''', orig_webpage)
post = compat_urllib_parse_urlencode(fields) post = urlencode_postdata(fields)
self._sleep(12, video_id) self._sleep(12, video_id)
headers = { headers = {

View file

@ -5,11 +5,11 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none, int_or_none,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -41,7 +41,7 @@ def _login(self):
'username': username, 'username': username,
'password': password, 'password': password,
} }
payload = compat_urllib_parse_urlencode(form_data).encode('utf-8') payload = urlencode_postdata(form_data)
request = sanitized_Request(self._LOGIN_URL, payload) request = sanitized_Request(self._LOGIN_URL, payload)
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_page = self._download_webpage( login_page = self._download_webpage(

View file

@ -20,6 +20,7 @@
parse_duration, parse_duration,
parse_iso8601, parse_iso8601,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -81,7 +82,7 @@ def _login(self):
post_url = compat_urlparse.urljoin(redirect_url, post_url) post_url = compat_urlparse.urljoin(redirect_url, post_url)
request = sanitized_Request( request = sanitized_Request(
post_url, compat_urllib_parse_urlencode(login_form).encode('utf-8')) post_url, urlencode_postdata(login_form))
request.add_header('Referer', redirect_url) request.add_header('Referer', redirect_url)
response = self._download_webpage( response = self._download_webpage(
request, None, 'Logging in as %s' % username) request, None, 'Logging in as %s' % username)

View file

@ -13,6 +13,7 @@
int_or_none, int_or_none,
sanitized_Request, sanitized_Request,
unescapeHTML, unescapeHTML,
urlencode_postdata,
) )
@ -139,7 +140,7 @@ def is_logged(webpage):
}) })
request = sanitized_Request( request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8')) self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Referer', self._ORIGIN_URL) request.add_header('Referer', self._ORIGIN_URL)
request.add_header('Origin', self._ORIGIN_URL) request.add_header('Origin', self._ORIGIN_URL)

View file

@ -2,13 +2,11 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import compat_urlparse
compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -48,7 +46,7 @@ def get_session_id():
webpage, 'title').split('/')[0].strip() webpage, 'title').split('/')[0].strip()
info_url = 'http://vbox7.com/play/magare.do' info_url = 'http://vbox7.com/play/magare.do'
data = compat_urllib_parse_urlencode({'as3': '1', 'vid': video_id}) data = urlencode_postdata({'as3': '1', 'vid': video_id})
info_request = sanitized_Request(info_url, data) info_request = sanitized_Request(info_url, data)
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded') info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage') info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage')

View file

@ -5,10 +5,7 @@
import json import json
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import compat_str
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none, int_or_none,
@ -17,6 +14,7 @@
str_to_int, str_to_int,
unescapeHTML, unescapeHTML,
unified_strdate, unified_strdate,
urlencode_postdata,
) )
from .vimeo import VimeoIE from .vimeo import VimeoIE
from .pladform import PladformIE from .pladform import PladformIE
@ -204,7 +202,7 @@ def _login(self):
request = sanitized_Request( request = sanitized_Request(
'https://login.vk.com/?act=login', 'https://login.vk.com/?act=login',
compat_urllib_parse_urlencode(login_form).encode('utf-8')) urlencode_postdata(login_form))
login_page = self._download_webpage( login_page = self._download_webpage(
request, None, note='Logging in as %s' % username) request, None, note='Logging in as %s' % username)

View file

@ -2,11 +2,11 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
NO_DEFAULT, NO_DEFAULT,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -38,7 +38,7 @@ def _real_extract(self, url):
if fields['op'] == 'download1': if fields['op'] == 'download1':
self._sleep(3, video_id) # they do detect when requests happen too fast! self._sleep(3, video_id) # they do detect when requests happen too fast!
post = compat_urllib_parse_urlencode(fields) post = urlencode_postdata(fields)
req = sanitized_Request(url, post) req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded') req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage( webpage = self._download_webpage(

View file

@ -4,11 +4,11 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none, int_or_none,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -108,7 +108,7 @@ def _real_extract(self, url):
if countdown: if countdown:
self._sleep(countdown, video_id) self._sleep(countdown, video_id)
post = compat_urllib_parse_urlencode(fields) post = urlencode_postdata(fields)
req = sanitized_Request(url, post) req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded') req.add_header('Content-type', 'application/x-www-form-urlencoded')

View file

@ -5,15 +5,13 @@
import hashlib import hashlib
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import compat_str
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none, int_or_none,
float_or_none, float_or_none,
sanitized_Request, sanitized_Request,
urlencode_postdata,
) )
@ -170,14 +168,14 @@ def _real_extract(self, url):
missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids) missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids)
request = sanitized_Request( request = sanitized_Request(
'https://music.yandex.ru/handlers/track-entries.jsx', 'https://music.yandex.ru/handlers/track-entries.jsx',
compat_urllib_parse_urlencode({ urlencode_postdata({
'entries': ','.join(missing_track_ids), 'entries': ','.join(missing_track_ids),
'lang': mu.get('settings', {}).get('lang', 'en'), 'lang': mu.get('settings', {}).get('lang', 'en'),
'external-domain': 'music.yandex.ru', 'external-domain': 'music.yandex.ru',
'overembed': 'false', 'overembed': 'false',
'sign': mu.get('authData', {}).get('user', {}).get('sign'), 'sign': mu.get('authData', {}).get('user', {}).get('sign'),
'strict': 'true', 'strict': 'true',
}).encode('utf-8')) }))
request.add_header('Referer', url) request.add_header('Referer', url)
request.add_header('X-Requested-With', 'XMLHttpRequest') request.add_header('X-Requested-With', 'XMLHttpRequest')

View file

@ -44,6 +44,7 @@
unified_strdate, unified_strdate,
unsmuggle_url, unsmuggle_url,
uppercase_escape, uppercase_escape,
urlencode_postdata,
ISO3166Utils, ISO3166Utils,
) )
@ -115,7 +116,7 @@ def _login(self):
'hl': 'en_US', 'hl': 'en_US',
} }
login_data = compat_urllib_parse_urlencode(login_form_strs).encode('ascii') login_data = urlencode_postdata(login_form_strs)
req = sanitized_Request(self._LOGIN_URL, login_data) req = sanitized_Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage( login_results = self._download_webpage(
@ -148,7 +149,7 @@ def _login(self):
'TrustDevice': 'on', 'TrustDevice': 'on',
}) })
tfa_data = compat_urllib_parse_urlencode(tfa_form_strs).encode('ascii') tfa_data = urlencode_postdata(tfa_form_strs)
tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data) tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
tfa_results = self._download_webpage( tfa_results = self._download_webpage(