From 2c4aaaddc99dfb57cb3a5395e4d2fff2f4b819a4 Mon Sep 17 00:00:00 2001 From: pukkandan Date: Wed, 1 Dec 2021 23:21:19 +0530 Subject: [PATCH] [lazy_extractors] Fix for search IEs Closes #1851 --- devscripts/make_lazy_extractors.py | 11 ++--------- yt_dlp/extractor/common.py | 13 +++---------- 2 files changed, 5 insertions(+), 19 deletions(-) diff --git a/devscripts/make_lazy_extractors.py b/devscripts/make_lazy_extractors.py index 0411df76b..b58fb85e3 100644 --- a/devscripts/make_lazy_extractors.py +++ b/devscripts/make_lazy_extractors.py @@ -39,12 +39,6 @@ class {name}({bases}): _module = '{module}' ''' -make_valid_template = ''' - @classmethod - def _make_valid_url(cls): - return {valid_url!r} -''' - def get_base_name(base): if base is InfoExtractor: @@ -61,15 +55,14 @@ def build_lazy_ie(ie, name): bases=', '.join(map(get_base_name, ie.__bases__)), module=ie.__module__) valid_url = getattr(ie, '_VALID_URL', None) + if not valid_url and hasattr(ie, '_make_valid_url'): + valid_url = ie._make_valid_url() if valid_url: s += f' _VALID_URL = {valid_url!r}\n' if not ie._WORKING: s += ' _WORKING = False\n' if ie.suitable.__func__ is not InfoExtractor.suitable.__func__: s += f'\n{getsource(ie.suitable)}' - if hasattr(ie, '_make_valid_url'): - # search extractors - s += make_valid_template.format(valid_url=ie._make_valid_url()) return s diff --git a/yt_dlp/extractor/common.py b/yt_dlp/extractor/common.py index 37e69d409..597db63d1 100644 --- a/yt_dlp/extractor/common.py +++ b/yt_dlp/extractor/common.py @@ -466,6 +466,8 @@ class InfoExtractor(object): # we have cached the regexp for *this* class, whereas getattr would also # match the superclass if '_VALID_URL_RE' not in cls.__dict__: + if '_VALID_URL' not in cls.__dict__: + cls._VALID_URL = cls._make_valid_url() cls._VALID_URL_RE = re.compile(cls._VALID_URL) return cls._VALID_URL_RE.match(url) @@ -3658,17 +3660,8 @@ class SearchInfoExtractor(InfoExtractor): def _make_valid_url(cls): return r'%s(?P|[1-9][0-9]*|all):(?P[\s\S]+)' % cls._SEARCH_KEY - @classmethod - def suitable(cls, url): - return re.match(cls._make_valid_url(), url) is not None - def _real_extract(self, query): - mobj = re.match(self._make_valid_url(), query) - if mobj is None: - raise ExtractorError('Invalid search query "%s"' % query) - - prefix = mobj.group('prefix') - query = mobj.group('query') + prefix, query = self._match_valid_url(query).group('prefix', 'query') if prefix == '': return self._get_n_results(query, 1) elif prefix == 'all':