mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-27 10:31:29 +00:00
parent
251ae04e6a
commit
8b7491c8d1
|
@ -66,7 +66,7 @@ def build_lazy_ie(ie, name):
|
||||||
if valid_url:
|
if valid_url:
|
||||||
s += f' _VALID_URL = {valid_url!r}\n'
|
s += f' _VALID_URL = {valid_url!r}\n'
|
||||||
if not ie._WORKING:
|
if not ie._WORKING:
|
||||||
s += f' _WORKING = False\n'
|
s += ' _WORKING = False\n'
|
||||||
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
||||||
s += f'\n{getsource(ie.suitable)}'
|
s += f'\n{getsource(ie.suitable)}'
|
||||||
if hasattr(ie, '_make_valid_url'):
|
if hasattr(ie, '_make_valid_url'):
|
||||||
|
|
|
@ -461,7 +461,7 @@ class YoutubeDL(object):
|
||||||
))
|
))
|
||||||
|
|
||||||
params = None
|
params = None
|
||||||
_ies = []
|
_ies = {}
|
||||||
_pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
|
_pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
|
||||||
_printed_messages = set()
|
_printed_messages = set()
|
||||||
_first_webpage_request = True
|
_first_webpage_request = True
|
||||||
|
@ -475,7 +475,7 @@ def __init__(self, params=None, auto_init=True):
|
||||||
"""Create a FileDownloader object with the given options."""
|
"""Create a FileDownloader object with the given options."""
|
||||||
if params is None:
|
if params is None:
|
||||||
params = {}
|
params = {}
|
||||||
self._ies = []
|
self._ies = {}
|
||||||
self._ies_instances = {}
|
self._ies_instances = {}
|
||||||
self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
|
self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
|
||||||
self._printed_messages = set()
|
self._printed_messages = set()
|
||||||
|
@ -631,11 +631,19 @@ def warn_if_short_id(self, argv):
|
||||||
|
|
||||||
def add_info_extractor(self, ie):
|
def add_info_extractor(self, ie):
|
||||||
"""Add an InfoExtractor object to the end of the list."""
|
"""Add an InfoExtractor object to the end of the list."""
|
||||||
self._ies.append(ie)
|
ie_key = ie.ie_key()
|
||||||
|
self._ies[ie_key] = ie
|
||||||
if not isinstance(ie, type):
|
if not isinstance(ie, type):
|
||||||
self._ies_instances[ie.ie_key()] = ie
|
self._ies_instances[ie_key] = ie
|
||||||
ie.set_downloader(self)
|
ie.set_downloader(self)
|
||||||
|
|
||||||
|
def _get_info_extractor_class(self, ie_key):
|
||||||
|
ie = self._ies.get(ie_key)
|
||||||
|
if ie is None:
|
||||||
|
ie = get_info_extractor(ie_key)
|
||||||
|
self.add_info_extractor(ie)
|
||||||
|
return ie
|
||||||
|
|
||||||
def get_info_extractor(self, ie_key):
|
def get_info_extractor(self, ie_key):
|
||||||
"""
|
"""
|
||||||
Get an instance of an IE with name ie_key, it will try to get one from
|
Get an instance of an IE with name ie_key, it will try to get one from
|
||||||
|
@ -1179,15 +1187,14 @@ def extract_info(self, url, download=True, ie_key=None, extra_info={},
|
||||||
ie_key = 'Generic'
|
ie_key = 'Generic'
|
||||||
|
|
||||||
if ie_key:
|
if ie_key:
|
||||||
ies = [get_info_extractor(ie_key)]
|
ies = {ie_key: self._get_info_extractor_class(ie_key)}
|
||||||
else:
|
else:
|
||||||
ies = self._ies
|
ies = self._ies
|
||||||
|
|
||||||
for ie in ies:
|
for ie_key, ie in ies.items():
|
||||||
if not ie.suitable(url):
|
if not ie.suitable(url):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ie_key = ie.ie_key()
|
|
||||||
if not ie.working():
|
if not ie.working():
|
||||||
self.report_warning('The program functionality for this site has been marked as broken, '
|
self.report_warning('The program functionality for this site has been marked as broken, '
|
||||||
'and will probably not work.')
|
'and will probably not work.')
|
||||||
|
@ -1197,8 +1204,7 @@ def extract_info(self, url, download=True, ie_key=None, extra_info={},
|
||||||
self.to_screen("[%s] %s: has already been recorded in archive" % (
|
self.to_screen("[%s] %s: has already been recorded in archive" % (
|
||||||
ie_key, temp_id))
|
ie_key, temp_id))
|
||||||
break
|
break
|
||||||
return self.__extract_info(url, self.get_info_extractor(ie.ie_key()),
|
return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
|
||||||
download, extra_info, process)
|
|
||||||
else:
|
else:
|
||||||
self.report_error('no suitable InfoExtractor for URL %s' % url)
|
self.report_error('no suitable InfoExtractor for URL %s' % url)
|
||||||
|
|
||||||
|
@ -3025,9 +3031,9 @@ def _make_archive_id(self, info_dict):
|
||||||
if not url:
|
if not url:
|
||||||
return
|
return
|
||||||
# Try to find matching extractor for the URL and take its ie_key
|
# Try to find matching extractor for the URL and take its ie_key
|
||||||
for ie in self._ies:
|
for ie_key, ie in self._ies.items():
|
||||||
if ie.suitable(url):
|
if ie.suitable(url):
|
||||||
extractor = ie.ie_key()
|
extractor = ie_key
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
|
Loading…
Reference in a new issue