mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-27 18:41:29 +00:00
some py3 fixes, both needed and recommended; we should pass 2to3 as cleanly as possible now
This commit is contained in:
parent
0eaf520d77
commit
1a2c3c0f3e
|
@ -433,11 +433,8 @@ def process_info(self, info_dict):
|
||||||
try:
|
try:
|
||||||
srtfn = filename.rsplit('.', 1)[0] + u'.srt'
|
srtfn = filename.rsplit('.', 1)[0] + u'.srt'
|
||||||
self.report_writesubtitles(srtfn)
|
self.report_writesubtitles(srtfn)
|
||||||
srtfile = open(encodeFilename(srtfn), 'wb')
|
with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile:
|
||||||
try:
|
srtfile.write(info_dict['subtitles'])
|
||||||
srtfile.write(info_dict['subtitles'].encode('utf-8'))
|
|
||||||
finally:
|
|
||||||
srtfile.close()
|
|
||||||
except (OSError, IOError):
|
except (OSError, IOError):
|
||||||
self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
|
self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
|
||||||
return
|
return
|
||||||
|
|
|
@ -235,7 +235,7 @@ def _extract_subtitles(self, video_id):
|
||||||
elif 'en' in srt_lang_list:
|
elif 'en' in srt_lang_list:
|
||||||
srt_lang = 'en'
|
srt_lang = 'en'
|
||||||
else:
|
else:
|
||||||
srt_lang = srt_lang_list.keys()[0]
|
srt_lang = list(srt_lang_list.keys())[0]
|
||||||
if not srt_lang in srt_lang_list:
|
if not srt_lang in srt_lang_list:
|
||||||
return (u'WARNING: no closed captions found in the specified language', None)
|
return (u'WARNING: no closed captions found in the specified language', None)
|
||||||
request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
|
request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
|
||||||
|
@ -445,7 +445,7 @@ def _real_extract(self, url):
|
||||||
elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
|
elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
|
||||||
url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
|
url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
|
||||||
url_data = [compat_parse_qs(uds) for uds in url_data_strs]
|
url_data = [compat_parse_qs(uds) for uds in url_data_strs]
|
||||||
url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data)
|
url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud]
|
||||||
url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
|
url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
|
||||||
|
|
||||||
format_limit = self._downloader.params.get('format_limit', None)
|
format_limit = self._downloader.params.get('format_limit', None)
|
||||||
|
@ -2115,7 +2115,7 @@ def _real_extract(self, url):
|
||||||
video_description = video_info.get('description', 'No description available.')
|
video_description = video_info.get('description', 'No description available.')
|
||||||
|
|
||||||
url_map = video_info['video_urls']
|
url_map = video_info['video_urls']
|
||||||
if len(url_map.keys()) > 0:
|
if len(list(url_map.keys())) > 0:
|
||||||
# Decide which formats to download
|
# Decide which formats to download
|
||||||
req_format = self._downloader.params.get('format', None)
|
req_format = self._downloader.params.get('format', None)
|
||||||
format_limit = self._downloader.params.get('format_limit', None)
|
format_limit = self._downloader.params.get('format_limit', None)
|
||||||
|
@ -2975,7 +2975,7 @@ def _real_extract(self, url):
|
||||||
if file_url is not None:
|
if file_url is not None:
|
||||||
break # got it!
|
break # got it!
|
||||||
else:
|
else:
|
||||||
if req_format not in formats.keys():
|
if req_format not in list(formats.keys()):
|
||||||
self._downloader.trouble(u'ERROR: format is not available')
|
self._downloader.trouble(u'ERROR: format is not available')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -3274,7 +3274,7 @@ def _real_extract(self, url):
|
||||||
seed = config['data'][0]['seed']
|
seed = config['data'][0]['seed']
|
||||||
|
|
||||||
format = self._downloader.params.get('format', None)
|
format = self._downloader.params.get('format', None)
|
||||||
supported_format = config['data'][0]['streamfileids'].keys()
|
supported_format = list(config['data'][0]['streamfileids'].keys())
|
||||||
|
|
||||||
if format is None or format == 'best':
|
if format is None or format == 'best':
|
||||||
if 'hd2' in supported_format:
|
if 'hd2' in supported_format:
|
||||||
|
|
|
@ -453,8 +453,8 @@ def _real_main():
|
||||||
if opts.list_extractors:
|
if opts.list_extractors:
|
||||||
for ie in extractors:
|
for ie in extractors:
|
||||||
print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
|
print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
|
||||||
matchedUrls = filter(lambda url: ie.suitable(url), all_urls)
|
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
||||||
all_urls = filter(lambda url: url not in matchedUrls, all_urls)
|
all_urls = [url for url in all_urls if url not in matchedUrls]
|
||||||
for mu in matchedUrls:
|
for mu in matchedUrls:
|
||||||
print(u' ' + mu)
|
print(u' ' + mu)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
Loading…
Reference in a new issue