mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-30 12:01:28 +00:00
[utils] Add parse_qs
This commit is contained in:
parent
8fc54b1230
commit
4dfbf8696b
|
@ -62,6 +62,7 @@
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
parse_resolution,
|
parse_resolution,
|
||||||
parse_bitrate,
|
parse_bitrate,
|
||||||
|
parse_qs,
|
||||||
pkcs1pad,
|
pkcs1pad,
|
||||||
read_batch_urls,
|
read_batch_urls,
|
||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
|
@ -117,8 +118,6 @@
|
||||||
compat_getenv,
|
compat_getenv,
|
||||||
compat_os_name,
|
compat_os_name,
|
||||||
compat_setenv,
|
compat_setenv,
|
||||||
compat_urlparse,
|
|
||||||
compat_parse_qs,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -688,38 +687,36 @@ def test_urlencode_postdata(self):
|
||||||
self.assertTrue(isinstance(data, bytes))
|
self.assertTrue(isinstance(data, bytes))
|
||||||
|
|
||||||
def test_update_url_query(self):
|
def test_update_url_query(self):
|
||||||
def query_dict(url):
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
return compat_parse_qs(compat_urlparse.urlparse(url).query)
|
|
||||||
self.assertEqual(query_dict(update_url_query(
|
|
||||||
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
|
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
|
||||||
query_dict('http://example.com/path?quality=HD&format=mp4'))
|
parse_qs('http://example.com/path?quality=HD&format=mp4'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
|
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
|
||||||
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
|
parse_qs('http://example.com/path?system=LINUX&system=WINDOWS'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
|
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
|
||||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
|
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
|
||||||
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
parse_qs('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path?manifest=f4m', {'manifest': []})),
|
'http://example.com/path?manifest=f4m', {'manifest': []})),
|
||||||
query_dict('http://example.com/path'))
|
parse_qs('http://example.com/path'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
|
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
|
||||||
query_dict('http://example.com/path?system=LINUX'))
|
parse_qs('http://example.com/path?system=LINUX'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
|
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
|
||||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'width': 1080, 'height': 720})),
|
'http://example.com/path', {'width': 1080, 'height': 720})),
|
||||||
query_dict('http://example.com/path?width=1080&height=720'))
|
parse_qs('http://example.com/path?width=1080&height=720'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'bitrate': 5020.43})),
|
'http://example.com/path', {'bitrate': 5020.43})),
|
||||||
query_dict('http://example.com/path?bitrate=5020.43'))
|
parse_qs('http://example.com/path?bitrate=5020.43'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'test': '第二行тест'})),
|
'http://example.com/path', {'test': '第二行тест'})),
|
||||||
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
parse_qs('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
||||||
|
|
||||||
def test_multipart_encode(self):
|
def test_multipart_encode(self):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
|
|
@ -4,13 +4,10 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .yahoo import YahooIE
|
from .yahoo import YahooIE
|
||||||
from ..compat import (
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -119,7 +116,7 @@ def _real_extract(self, url):
|
||||||
'height': int(mobj.group(2)),
|
'height': int(mobj.group(2)),
|
||||||
})
|
})
|
||||||
else:
|
else:
|
||||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(video_url).query)
|
qs = parse_qs(video_url)
|
||||||
f.update({
|
f.update({
|
||||||
'width': int_or_none(qs.get('w', [None])[0]),
|
'width': int_or_none(qs.get('w', [None])[0]),
|
||||||
'height': int_or_none(qs.get('h', [None])[0]),
|
'height': int_or_none(qs.get('h', [None])[0]),
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_unquote_plus,
|
compat_urllib_parse_unquote_plus,
|
||||||
compat_urlparse,
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_HTTPError
|
compat_HTTPError
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -25,6 +23,7 @@
|
||||||
merge_dicts,
|
merge_dicts,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
parse_qs,
|
||||||
RegexNotFoundError,
|
RegexNotFoundError,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
|
@ -399,7 +398,7 @@ def _extract_title(webpage):
|
||||||
expected=True)
|
expected=True)
|
||||||
raise
|
raise
|
||||||
video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
|
video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
|
||||||
video_file_url_qs = compat_parse_qs(compat_urlparse.urlparse(video_file_url).query)
|
video_file_url_qs = parse_qs(video_file_url)
|
||||||
|
|
||||||
# Attempt to recover any ext & format info from playback url
|
# Attempt to recover any ext & format info from playback url
|
||||||
format = {'url': video_file_url}
|
format = {'url': video_file_url}
|
||||||
|
|
|
@ -4,12 +4,12 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_qs,
|
||||||
try_get,
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
# Handle http://video.arkena.com/play2/embed/player URL
|
# Handle http://video.arkena.com/play2/embed/player URL
|
||||||
if not video_id:
|
if not video_id:
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
video_id = qs.get('mediaId', [None])[0]
|
video_id = qs.get('mediaId', [None])[0]
|
||||||
account_id = qs.get('accountId', [None])[0]
|
account_id = qs.get('accountId', [None])[0]
|
||||||
if not video_id or not account_id:
|
if not video_id or not account_id:
|
||||||
|
|
|
@ -6,11 +6,11 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
qualities,
|
qualities,
|
||||||
try_get,
|
try_get,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
|
@ -204,7 +204,7 @@ def _extract_urls(webpage):
|
||||||
webpage)]
|
webpage)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
json_url = qs['json_url'][0]
|
json_url = qs['json_url'][0]
|
||||||
video_id = ArteTVIE._match_id(json_url)
|
video_id = ArteTVIE._match_id(json_url)
|
||||||
return self.url_result(
|
return self.url_result(
|
||||||
|
|
|
@ -10,9 +10,7 @@
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_etree_Element,
|
compat_etree_Element,
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_parse_qs,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -26,6 +24,7 @@
|
||||||
js_to_json,
|
js_to_json,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_qs,
|
||||||
strip_or_none,
|
strip_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
|
@ -1410,7 +1409,7 @@ def _fetch_page(self, programme_id, per_page, series_id, page):
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
pid = self._match_id(url)
|
pid = self._match_id(url)
|
||||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
series_id = qs.get('seriesId', [None])[0]
|
series_id = qs.get('seriesId', [None])[0]
|
||||||
page = qs.get('page', [None])[0]
|
page = qs.get('page', [None])[0]
|
||||||
per_page = 36 if page else self._PAGE_SIZE
|
per_page = 36 if page else self._PAGE_SIZE
|
||||||
|
|
|
@ -3,10 +3,10 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ def _real_extract(self, url):
|
||||||
query = {
|
query = {
|
||||||
'v': 2,
|
'v': 2,
|
||||||
}
|
}
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
t = qs.get('t', [''])[0].split('-')
|
t = qs.get('t', [''])[0].split('-')
|
||||||
if len(t) > 1:
|
if len(t) > 1:
|
||||||
query.update({
|
query.update({
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_xml_parse_error,
|
compat_xml_parse_error,
|
||||||
)
|
)
|
||||||
|
@ -26,6 +25,7 @@
|
||||||
js_to_json,
|
js_to_json,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_qs,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
|
@ -177,7 +177,7 @@ def _build_brightcove_url(cls, object_str):
|
||||||
flashvars = {}
|
flashvars = {}
|
||||||
|
|
||||||
data_url = object_doc.attrib.get('data', '')
|
data_url = object_doc.attrib.get('data', '')
|
||||||
data_url_params = compat_parse_qs(compat_urllib_parse_urlparse(data_url).query)
|
data_url_params = parse_qs(data_url)
|
||||||
|
|
||||||
def find_param(name):
|
def find_param(name):
|
||||||
if name in flashvars:
|
if name in flashvars:
|
||||||
|
|
|
@ -4,14 +4,11 @@
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
try_get,
|
try_get,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
|
@ -145,7 +142,7 @@ def _entries(self, query, url):
|
||||||
query['from'] += query['size']
|
query['from'] += query['size']
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
query = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
query = parse_qs(url)
|
||||||
query['type'] = 'session'
|
query['type'] = 'session'
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
self._entries(query, url), playlist_title='Search query')
|
self._entries(query, url), playlist_title='Search query')
|
||||||
|
|
|
@ -1,12 +1,9 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
parse_qs,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -44,7 +41,7 @@ class ClypIE(InfoExtractor):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
audio_id = self._match_id(url)
|
audio_id = self._match_id(url)
|
||||||
|
|
||||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
token = qs.get('token', [None])[0]
|
token = qs.get('token', [None])[0]
|
||||||
|
|
||||||
query = {}
|
query = {}
|
||||||
|
|
|
@ -6,10 +6,9 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urlparse,
|
|
||||||
)
|
)
|
||||||
|
from ..utils import parse_qs
|
||||||
|
|
||||||
|
|
||||||
class DaumBaseIE(InfoExtractor):
|
class DaumBaseIE(InfoExtractor):
|
||||||
|
@ -155,7 +154,7 @@ def _get_entries(self, list_id, list_id_type):
|
||||||
return name, entries
|
return name, entries
|
||||||
|
|
||||||
def _check_clip(self, url, list_id):
|
def _check_clip(self, url, list_id):
|
||||||
query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
|
query_dict = parse_qs(url)
|
||||||
if 'clipid' in query_dict:
|
if 'clipid' in query_dict:
|
||||||
clip_id = query_dict['clipid'][0]
|
clip_id = query_dict['clipid'][0]
|
||||||
if self.get_param('noplaylist'):
|
if self.get_param('noplaylist'):
|
||||||
|
@ -256,7 +255,7 @@ def _real_extract(self, url):
|
||||||
if clip_result:
|
if clip_result:
|
||||||
return clip_result
|
return clip_result
|
||||||
|
|
||||||
query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
|
query_dict = parse_qs(url)
|
||||||
if 'playlistid' in query_dict:
|
if 'playlistid' in query_dict:
|
||||||
playlist_id = query_dict['playlistid'][0]
|
playlist_id = query_dict['playlistid'][0]
|
||||||
return self.url_result(DaumPlaylistIE._URL_TEMPLATE % playlist_id, 'DaumPlaylist')
|
return self.url_result(DaumPlaylistIE._URL_TEMPLATE % playlist_id, 'DaumPlaylist')
|
||||||
|
|
|
@ -2,11 +2,11 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
parse_qs,
|
||||||
qualities,
|
qualities,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
xpath_text
|
xpath_text
|
||||||
|
@ -53,7 +53,7 @@ def get_item(type_, preference):
|
||||||
if items.get(p):
|
if items.get(p):
|
||||||
return items[p]
|
return items[p]
|
||||||
|
|
||||||
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
query = parse_qs(url)
|
||||||
preferred_lang = query.get('sitelang', ('en', ))[0]
|
preferred_lang = query.get('sitelang', ('en', ))[0]
|
||||||
|
|
||||||
preferred_langs = orderedSet((preferred_lang, 'en', 'int'))
|
preferred_langs = orderedSet((preferred_lang, 'en', 'int'))
|
||||||
|
|
|
@ -6,7 +6,6 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
|
@ -14,6 +13,7 @@
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
parse_qs,
|
||||||
try_get,
|
try_get,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
urljoin,
|
urljoin,
|
||||||
|
@ -226,7 +226,7 @@ def _real_extract(self, url):
|
||||||
catalog = mobj.group('catalog')
|
catalog = mobj.group('catalog')
|
||||||
|
|
||||||
if not video_id:
|
if not video_id:
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
video_id = qs.get('idDiffusion', [None])[0]
|
video_id = qs.get('idDiffusion', [None])[0]
|
||||||
catalog = qs.get('catalogue', [None])[0]
|
catalog = qs.get('catalogue', [None])[0]
|
||||||
if not video_id:
|
if not video_id:
|
||||||
|
|
|
@ -4,10 +4,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..utils import parse_qs
|
||||||
compat_parse_qs,
|
|
||||||
compat_urlparse,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class InternetVideoArchiveIE(InfoExtractor):
|
class InternetVideoArchiveIE(InfoExtractor):
|
||||||
|
@ -32,7 +29,7 @@ def _build_json_url(query):
|
||||||
return 'http://video.internetvideoarchive.net/player/6/configuration.ashx?' + query
|
return 'http://video.internetvideoarchive.net/player/6/configuration.ashx?' + query
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
query = compat_parse_qs(compat_urlparse.urlparse(url).query)
|
query = parse_qs(url)
|
||||||
video_id = query['publishedid'][0]
|
video_id = query['publishedid'][0]
|
||||||
data = self._download_json(
|
data = self._download_json(
|
||||||
'https://video.internetvideoarchive.net/videojs7/videojs7.ivasettings.ashx',
|
'https://video.internetvideoarchive.net/videojs7/videojs7.ivasettings.ashx',
|
||||||
|
|
|
@ -6,16 +6,15 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
|
parse_qs,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
try_get,
|
try_get,
|
||||||
urljoin,
|
urljoin,
|
||||||
|
@ -256,7 +255,7 @@ def _real_extract(self, url):
|
||||||
result = self._resolve_url(
|
result = self._resolve_url(
|
||||||
'lbry://' + display_id, display_id, 'channel')
|
'lbry://' + display_id, display_id, 'channel')
|
||||||
claim_id = result['claim_id']
|
claim_id = result['claim_id']
|
||||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
content = qs.get('content', [None])[0]
|
content = qs.get('content', [None])[0]
|
||||||
params = {
|
params = {
|
||||||
'fee_amount': qs.get('fee_amount', ['>=0'])[0],
|
'fee_amount': qs.get('fee_amount', ['>=0'])[0],
|
||||||
|
|
|
@ -4,13 +4,10 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .theplatform import ThePlatformBaseIE
|
from .theplatform import ThePlatformBaseIE
|
||||||
from ..compat import (
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -96,7 +93,7 @@ class MediasetIE(ThePlatformBaseIE):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _extract_urls(ie, webpage):
|
def _extract_urls(ie, webpage):
|
||||||
def _qs(url):
|
def _qs(url):
|
||||||
return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
return parse_qs(url)
|
||||||
|
|
||||||
def _program_guid(qs):
|
def _program_guid(qs):
|
||||||
return qs.get('programGuid', [None])[0]
|
return qs.get('programGuid', [None])[0]
|
||||||
|
|
|
@ -5,10 +5,8 @@
|
||||||
|
|
||||||
from .turner import TurnerBaseIE
|
from .turner import TurnerBaseIE
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -16,6 +14,7 @@
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_qs,
|
||||||
try_get,
|
try_get,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
urljoin,
|
urljoin,
|
||||||
|
@ -165,7 +164,7 @@ class NBAWatchIE(NBAWatchBaseIE):
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
collection_id = compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('collection', [None])[0]
|
collection_id = parse_qs(url).get('collection', [None])[0]
|
||||||
if collection_id:
|
if collection_id:
|
||||||
if self.get_param('noplaylist'):
|
if self.get_param('noplaylist'):
|
||||||
self.to_screen('Downloading just video %s because of --no-playlist' % display_id)
|
self.to_screen('Downloading just video %s because of --no-playlist' % display_id)
|
||||||
|
@ -359,7 +358,7 @@ class NBAEmbedIE(NBABaseIE):
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
content_id = qs['contentId'][0]
|
content_id = qs['contentId'][0]
|
||||||
team = qs.get('team', [None])[0]
|
team = qs.get('team', [None])[0]
|
||||||
if not team:
|
if not team:
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
|
@ -16,6 +15,7 @@
|
||||||
int_or_none,
|
int_or_none,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_qs,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
|
@ -123,7 +123,7 @@ def _real_extract(self, url):
|
||||||
webpage, 'noco player', group='player',
|
webpage, 'noco player', group='player',
|
||||||
default='http://noco.tv/cdata/js/player/NocoPlayer-v1.2.40.swf')
|
default='http://noco.tv/cdata/js/player/NocoPlayer-v1.2.40.swf')
|
||||||
|
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(player_url).query)
|
qs = parse_qs(player_url)
|
||||||
ts = int_or_none(qs.get('ts', [None])[0])
|
ts = int_or_none(qs.get('ts', [None])[0])
|
||||||
self._ts_offset = ts - self._ts() if ts else 0
|
self._ts_offset = ts - self._ts() if ts else 0
|
||||||
self._referer = player_url
|
self._referer = player_url
|
||||||
|
|
|
@ -5,12 +5,12 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
parse_qs,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
|
@ -75,7 +75,7 @@ def _real_extract(self, url):
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
if not user_id or not video_id:
|
if not user_id or not video_id:
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
video_id = qs.get('prgid', [None])[0]
|
video_id = qs.get('prgid', [None])[0]
|
||||||
user_id = qs.get('ch_userid', [None])[0]
|
user_id = qs.get('ch_userid', [None])[0]
|
||||||
if any(not f for f in (video_id, user_id,)):
|
if any(not f for f in (video_id, user_id,)):
|
||||||
|
|
|
@ -4,11 +4,11 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
qualities,
|
qualities,
|
||||||
)
|
)
|
||||||
|
@ -56,7 +56,7 @@ def _extract_url(webpage):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
pl = qs.get('pl', ['1'])[0]
|
pl = qs.get('pl', ['1'])[0]
|
||||||
|
|
||||||
video = self._download_xml(
|
video = self._download_xml(
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
parse_qs,
|
||||||
qualities,
|
qualities,
|
||||||
srt_subtitles_timecode,
|
srt_subtitles_timecode,
|
||||||
try_get,
|
try_get,
|
||||||
|
@ -273,7 +274,7 @@ def _convert_subtitles(duration, subs):
|
||||||
return srt
|
return srt
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
|
|
||||||
author = qs.get('author', [None])[0]
|
author = qs.get('author', [None])[0]
|
||||||
name = qs.get('name', [None])[0]
|
name = qs.get('name', [None])[0]
|
||||||
|
|
|
@ -7,13 +7,12 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
bool_or_none,
|
bool_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
try_get,
|
try_get,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
|
@ -178,7 +177,7 @@ def _real_extract(self, url):
|
||||||
embed_id = self._match_id(url)
|
embed_id = self._match_id(url)
|
||||||
# Query may contain private videos token and should be passed to API
|
# Query may contain private videos token and should be passed to API
|
||||||
# requests (see #19163)
|
# requests (see #19163)
|
||||||
query = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
query = parse_qs(url)
|
||||||
options = self._download_api_options(embed_id, query)
|
options = self._download_api_options(embed_id, query)
|
||||||
video_id = options['effective_video']
|
video_id = options['effective_video']
|
||||||
formats = self._extract_formats(options, video_id)
|
formats = self._extract_formats(options, video_id)
|
||||||
|
@ -300,14 +299,14 @@ class RutubePlaylistIE(RutubePlaylistBaseIE):
|
||||||
def suitable(cls, url):
|
def suitable(cls, url):
|
||||||
if not super(RutubePlaylistIE, cls).suitable(url):
|
if not super(RutubePlaylistIE, cls).suitable(url):
|
||||||
return False
|
return False
|
||||||
params = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
params = parse_qs(url)
|
||||||
return params.get('pl_type', [None])[0] and int_or_none(params.get('pl_id', [None])[0])
|
return params.get('pl_type', [None])[0] and int_or_none(params.get('pl_id', [None])[0])
|
||||||
|
|
||||||
def _next_page_url(self, page_num, playlist_id, item_kind):
|
def _next_page_url(self, page_num, playlist_id, item_kind):
|
||||||
return self._PAGE_TEMPLATE % (item_kind, playlist_id, page_num)
|
return self._PAGE_TEMPLATE % (item_kind, playlist_id, page_num)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
playlist_kind = qs['pl_type'][0]
|
playlist_kind = qs['pl_type'][0]
|
||||||
playlist_id = qs['pl_id'][0]
|
playlist_id = qs['pl_id'][0]
|
||||||
return self._extract_playlist(playlist_id, item_kind=playlist_kind)
|
return self._extract_playlist(playlist_id, item_kind=playlist_kind)
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
|
@ -13,6 +12,7 @@
|
||||||
urljoin,
|
urljoin,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_codecs,
|
parse_codecs,
|
||||||
|
parse_qs,
|
||||||
try_get,
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -108,7 +108,7 @@ def get_url(format_id):
|
||||||
return formats
|
return formats
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
params = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
params = parse_qs(url)
|
||||||
|
|
||||||
src = params['src'][0]
|
src = params['src'][0]
|
||||||
title = params['title'][0]
|
title = params['title'][0]
|
||||||
|
|
|
@ -4,13 +4,12 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
try_get,
|
try_get,
|
||||||
qualities,
|
qualities,
|
||||||
)
|
)
|
||||||
|
@ -78,7 +77,7 @@ def _real_extract(self, url):
|
||||||
continue
|
continue
|
||||||
if container == 'm3u8' or ext == 'm3u8':
|
if container == 'm3u8' or ext == 'm3u8':
|
||||||
if protocol == 'usp':
|
if protocol == 'usp':
|
||||||
if compat_parse_qs(compat_urllib_parse_urlparse(asset_url).query).get('token', [None])[0]:
|
if parse_qs(asset_url).get('token', [None])[0]:
|
||||||
urlh = self._request_webpage(
|
urlh = self._request_webpage(
|
||||||
asset_url, video_id, fatal=False,
|
asset_url, video_id, fatal=False,
|
||||||
headers=self.geo_verification_headers())
|
headers=self.geo_verification_headers())
|
||||||
|
|
|
@ -2,15 +2,12 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_qs,
|
||||||
strip_or_none,
|
strip_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
)
|
)
|
||||||
|
@ -61,7 +58,7 @@ def _real_extract(self, url):
|
||||||
}
|
}
|
||||||
videos = asset.get('videos') or []
|
videos = asset.get('videos') or []
|
||||||
if len(videos) > 1:
|
if len(videos) > 1:
|
||||||
playlist_id = compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('playlistId', [None])[0]
|
playlist_id = parse_qs(url).get('playlistId', [None])[0]
|
||||||
if playlist_id:
|
if playlist_id:
|
||||||
if self.get_param('noplaylist'):
|
if self.get_param('noplaylist'):
|
||||||
videos = [videos[int(playlist_id)]]
|
videos = [videos[int(playlist_id)]]
|
||||||
|
|
|
@ -6,9 +6,9 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .jwplatform import JWPlatformIE
|
from .jwplatform import JWPlatformIE
|
||||||
from .nexx import NexxIE
|
from .nexx import NexxIE
|
||||||
from ..compat import compat_urlparse
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
NO_DEFAULT,
|
NO_DEFAULT,
|
||||||
|
parse_qs,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ class Tele5IE(InfoExtractor):
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
video_id = (qs.get('vid') or qs.get('ve_id') or [None])[0]
|
video_id = (qs.get('vid') or qs.get('ve_id') or [None])[0]
|
||||||
|
|
||||||
NEXX_ID_RE = r'\d{6,}'
|
NEXX_ID_RE = r'\d{6,}'
|
||||||
|
|
|
@ -10,15 +10,12 @@
|
||||||
|
|
||||||
from .once import OnceIE
|
from .once import OnceIE
|
||||||
from .adobepass import AdobePassIE
|
from .adobepass import AdobePassIE
|
||||||
from ..compat import (
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
|
@ -250,7 +247,7 @@ def _real_extract(self, url):
|
||||||
path += mobj.group('media')
|
path += mobj.group('media')
|
||||||
path += video_id
|
path += video_id
|
||||||
|
|
||||||
qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
qs_dict = parse_qs(url)
|
||||||
if 'guid' in qs_dict:
|
if 'guid' in qs_dict:
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
|
scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
|
||||||
|
@ -359,7 +356,7 @@ def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custo
|
||||||
if first_video_id is None:
|
if first_video_id is None:
|
||||||
first_video_id = cur_video_id
|
first_video_id = cur_video_id
|
||||||
duration = float_or_none(item.get('plfile$duration'))
|
duration = float_or_none(item.get('plfile$duration'))
|
||||||
file_asset_types = item.get('plfile$assetTypes') or compat_parse_qs(compat_urllib_parse_urlparse(smil_url).query)['assetTypes']
|
file_asset_types = item.get('plfile$assetTypes') or parse_qs(smil_url)['assetTypes']
|
||||||
for asset_type in file_asset_types:
|
for asset_type in file_asset_types:
|
||||||
if asset_type in asset_types:
|
if asset_type in asset_types:
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urlparse,
|
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
|
@ -23,6 +22,7 @@
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_qs,
|
||||||
qualities,
|
qualities,
|
||||||
try_get,
|
try_get,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
|
@ -571,7 +571,7 @@ def _extract_entry(node):
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
channel_name = self._match_id(url)
|
channel_name = self._match_id(url)
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
filter = qs.get('filter', ['all'])[0]
|
filter = qs.get('filter', ['all'])[0]
|
||||||
sort = qs.get('sort', ['time'])[0]
|
sort = qs.get('sort', ['time'])[0]
|
||||||
broadcast = self._BROADCASTS.get(filter, self._DEFAULT_BROADCAST)
|
broadcast = self._BROADCASTS.get(filter, self._DEFAULT_BROADCAST)
|
||||||
|
@ -647,7 +647,7 @@ def _extract_entry(node):
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
channel_name = self._match_id(url)
|
channel_name = self._match_id(url)
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
range = qs.get('range', ['7d'])[0]
|
range = qs.get('range', ['7d'])[0]
|
||||||
clip = self._RANGE.get(range, self._DEFAULT_CLIP)
|
clip = self._RANGE.get(range, self._DEFAULT_CLIP)
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
|
|
|
@ -2,12 +2,9 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
compat_parse_qs,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
|
parse_qs,
|
||||||
remove_start,
|
remove_start,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -59,7 +56,7 @@ def _real_extract(self, url):
|
||||||
fb_sharer_url = self._search_regex(
|
fb_sharer_url = self._search_regex(
|
||||||
r'<a[^>]+href="(https?://www\.facebook\.com/sharer/sharer\.php?[^"]+)"',
|
r'<a[^>]+href="(https?://www\.facebook\.com/sharer/sharer\.php?[^"]+)"',
|
||||||
webpage, 'facebook sharer URL', fatal=False)
|
webpage, 'facebook sharer URL', fatal=False)
|
||||||
sharer_params = compat_parse_qs(compat_urllib_parse_urlparse(fb_sharer_url).query)
|
sharer_params = parse_qs(fb_sharer_url)
|
||||||
thumbnail = sharer_params.get('p[images][0]', [None])[0]
|
thumbnail = sharer_params.get('p[images][0]', [None])[0]
|
||||||
|
|
||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
|
|
|
@ -6,13 +6,13 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urlparse,
|
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_qs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -218,7 +218,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
webpage = self._download_webpage(url, playlist_id)
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
index = qs.get('index', [None])[0]
|
index = qs.get('index', [None])[0]
|
||||||
|
|
||||||
if index:
|
if index:
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
int_or_none,
|
int_or_none,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_codecs,
|
parse_codecs,
|
||||||
|
parse_qs,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
urljoin,
|
urljoin,
|
||||||
xpath_element,
|
xpath_element,
|
||||||
|
@ -20,7 +21,6 @@
|
||||||
compat_b64decode,
|
compat_b64decode,
|
||||||
compat_ord,
|
compat_ord,
|
||||||
compat_struct_pack,
|
compat_struct_pack,
|
||||||
compat_urlparse,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ def _real_extract(self, url):
|
||||||
for i in range(0, 32):
|
for i in range(0, 32):
|
||||||
result += s[i - (self._STATIC_SECRET.index(l[i]) - 31)]
|
result += s[i - (self._STATIC_SECRET.index(l[i]) - 31)]
|
||||||
|
|
||||||
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(player_url).query)
|
query = parse_qs(player_url)
|
||||||
random_seed = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
|
random_seed = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
|
||||||
query['_s'] = random_seed
|
query['_s'] = random_seed
|
||||||
query['_t'] = result[:16]
|
query['_t'] = result[:16]
|
||||||
|
|
|
@ -5,12 +5,11 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -146,7 +145,7 @@ def _extract_url(webpage):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = self._match_valid_url(url)
|
mobj = self._match_valid_url(url)
|
||||||
video_id = mobj.group('sid') or mobj.group('id')
|
video_id = mobj.group('sid') or mobj.group('id')
|
||||||
partner_id = mobj.group('partner_id') or compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('partner_id', [None])[0] or '97'
|
partner_id = mobj.group('partner_id') or parse_qs(url).get('partner_id', [None])[0] or '97'
|
||||||
|
|
||||||
item = self._download_json(
|
item = self._download_json(
|
||||||
'https://siren.more.tv/player/config', video_id, query={
|
'https://siren.more.tv/player/config', video_id, query={
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
parse_filesize,
|
parse_filesize,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_qs,
|
||||||
RegexNotFoundError,
|
RegexNotFoundError,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
|
@ -265,7 +266,7 @@ def _extract_original_format(self, url, video_id, unlisted_hash=None):
|
||||||
download_url = download_data.get('link')
|
download_url = download_data.get('link')
|
||||||
if not download_url or download_data.get('quality') != 'source':
|
if not download_url or download_data.get('quality') != 'source':
|
||||||
continue
|
continue
|
||||||
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(download_url).query)
|
query = parse_qs(download_url)
|
||||||
return {
|
return {
|
||||||
'url': download_url,
|
'url': download_url,
|
||||||
'ext': determine_ext(query.get('filename', [''])[0].lower()),
|
'ext': determine_ext(query.get('filename', [''])[0].lower()),
|
||||||
|
|
|
@ -4,14 +4,11 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
month_by_abbreviation,
|
month_by_abbreviation,
|
||||||
parse_filesize,
|
parse_filesize,
|
||||||
|
parse_qs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -37,7 +34,7 @@ def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
if '/video.php' in url:
|
if '/video.php' in url:
|
||||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
url = 'https://gameclips.io/%s/%s' % (qs['gamertag'][0], qs['vid'][0])
|
url = 'https://gameclips.io/%s/%s' % (qs['gamertag'][0], qs['vid'][0])
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
|
@ -46,6 +46,7 @@
|
||||||
parse_count,
|
parse_count,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_qs,
|
||||||
qualities,
|
qualities,
|
||||||
remove_start,
|
remove_start,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
|
@ -64,10 +65,6 @@
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_qs(url):
|
|
||||||
return compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
|
||||||
|
|
||||||
|
|
||||||
# any clients starting with _ cannot be explicity requested by the user
|
# any clients starting with _ cannot be explicity requested by the user
|
||||||
INNERTUBE_CLIENTS = {
|
INNERTUBE_CLIENTS = {
|
||||||
'web': {
|
'web': {
|
||||||
|
@ -1842,7 +1839,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
def suitable(cls, url):
|
def suitable(cls, url):
|
||||||
# Hack for lazy extractors until more generic solution is implemented
|
# Hack for lazy extractors until more generic solution is implemented
|
||||||
# (see #28780)
|
# (see #28780)
|
||||||
from .youtube import parse_qs
|
from ..utils import parse_qs
|
||||||
|
|
||||||
qs = parse_qs(url)
|
qs = parse_qs(url)
|
||||||
if qs.get('list', [None])[0]:
|
if qs.get('list', [None])[0]:
|
||||||
return False
|
return False
|
||||||
|
@ -4598,7 +4596,7 @@ def _make_valid_url(cls):
|
||||||
return cls._VALID_URL
|
return cls._VALID_URL
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
query = (qs.get('search_query') or qs.get('q'))[0]
|
query = (qs.get('search_query') or qs.get('q'))[0]
|
||||||
self._SEARCH_PARAMS = qs.get('sp', ('',))[0]
|
self._SEARCH_PARAMS = qs.get('sp', ('',))[0]
|
||||||
return self._get_n_results(query, self._MAX_RESULTS)
|
return self._get_n_results(query, self._MAX_RESULTS)
|
||||||
|
|
|
@ -4167,6 +4167,10 @@ def escape_url(url):
|
||||||
).geturl()
|
).geturl()
|
||||||
|
|
||||||
|
|
||||||
|
def parse_qs(url):
|
||||||
|
return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
||||||
|
|
||||||
|
|
||||||
def read_batch_urls(batch_fd):
|
def read_batch_urls(batch_fd):
|
||||||
def fixup(url):
|
def fixup(url):
|
||||||
if not isinstance(url, compat_str):
|
if not isinstance(url, compat_str):
|
||||||
|
|
Loading…
Reference in a new issue