manually set limit for youtubesearchurl

This commit is contained in:
Unknown 2020-11-10 23:20:52 +01:00
parent 9833e7a015
commit 0f8566e90b

View file

@ -327,7 +327,7 @@ def _real_find(obj):
return entries, try_get(c, lambda x: x["continuation"]) return entries, try_get(c, lambda x: x["continuation"])
def _entries(self, page, playlist_id): def _entries(self, page, playlist_id, n=1):
seen = [] seen = []
yt_conf = {} yt_conf = {}
@ -339,7 +339,8 @@ def _entries(self, page, playlist_id):
data_json = self._parse_json(self._search_regex(self._INITIAL_DATA_RE, page, 'ytInitialData'), None) data_json = self._parse_json(self._search_regex(self._INITIAL_DATA_RE, page, 'ytInitialData'), None)
for page_num in itertools.count(1): # for page_num in itertools.count(1):
for page_num in range(n):
entries, continuation = self._find_entries_in_json(data_json) entries, continuation = self._find_entries_in_json(data_json)
processed = self._process_entries(entries, seen) processed = self._process_entries(entries, seen)
@ -3447,8 +3448,8 @@ def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query')) query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query) webpage = self._download_webpage(url, query)
data_json = self._process_initial_data(webpage) # data_json = self._process_initial_data(webpage)
return self.playlist_result(self._process_data(data_json), playlist_title=query) return self.playlist_result(self._entries(webpage, query, n=5), playlist_title=query)
class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor): class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):