Revert "UP031"

The risk of making a mistake and breaking core functionality is too high with these fixes. The rule will be disabled until ruff allows being more selective.
This commit is contained in:
sepro 2024-11-26 20:48:02 +01:00
parent 0a88e784c2
commit 0afbce5e25
58 changed files with 117 additions and 110 deletions

View file

@ -75,7 +75,7 @@ def apply_patch(text, patch):
), ),
( # Avoid newline when a space is available b/w switch and description ( # Avoid newline when a space is available b/w switch and description
DISABLE_PATCH, # This creates issues with prepare_manpage DISABLE_PATCH, # This creates issues with prepare_manpage
r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim), # noqa: UP031 r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim),
r'\1 ', r'\1 ',
), ),
( # Replace brackets with a Markdown link ( # Replace brackets with a Markdown link

View file

@ -854,7 +854,7 @@ def expect_same_infodict(out):
test('%(id+1-height+3)05d', '00158') test('%(id+1-height+3)05d', '00158')
test('%(width+100)05d', 'NA') test('%(width+100)05d', 'NA')
test('%(filesize*8)d', '8192') test('%(filesize*8)d', '8192')
test('%(formats.0) 15s', ('% 15s' % FORMATS[0], None)) # noqa: UP031 test('%(formats.0) 15s', ('% 15s' % FORMATS[0], None))
test('%(formats.0)r', (repr(FORMATS[0]), None)) test('%(formats.0)r', (repr(FORMATS[0]), None))
test('%(height.0)03d', '001') test('%(height.0)03d', '001')
test('%(-height.0)04d', '-001') test('%(-height.0)04d', '-001')

View file

@ -199,14 +199,14 @@ def try_rm_tcs_files(tcs=None):
self, self,
len(res_dict['entries']), len(res_dict['entries']),
test_case['playlist_mincount'], test_case['playlist_mincount'],
'Expected at least {} in playlist {}, but got only {}'.format( 'Expected at least %d in playlist %s, but got only %d' % (
test_case['playlist_mincount'], test_case['url'], test_case['playlist_mincount'], test_case['url'],
len(res_dict['entries']))) len(res_dict['entries'])))
if 'playlist_count' in test_case: if 'playlist_count' in test_case:
self.assertEqual( self.assertEqual(
len(res_dict['entries']), len(res_dict['entries']),
test_case['playlist_count'], test_case['playlist_count'],
'Expected {} entries in playlist {}, but got {}.'.format( 'Expected %d entries in playlist %s, but got %d.' % (
test_case['playlist_count'], test_case['playlist_count'],
test_case['url'], test_case['url'],
len(res_dict['entries']), len(res_dict['entries']),

View file

@ -1504,10 +1504,10 @@ def check_filter():
if view_count is not None: if view_count is not None:
min_views = self.params.get('min_views') min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views: if min_views is not None and view_count < min_views:
return f'Skipping {video_title}, because it has not reached minimum view count ({view_count}/{min_views})' return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views') max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views: if max_views is not None and view_count > max_views:
return f'Skipping {video_title}, because it has exceeded the maximum view count ({view_count}/{max_views})' return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
return f'Skipping "{video_title}" because it is age restricted' return f'Skipping "{video_title}" because it is age restricted'
@ -1654,7 +1654,7 @@ def _wait_for_video(self, ie_result={}):
or ie_result.get('formats') or ie_result.get('url')): or ie_result.get('formats') or ie_result.get('url')):
return return
format_dur = lambda dur: '{:02.0f}:{:02.0f}:{:02.0f}'.format(*timetuple_from_msec(dur * 1000)[:-1]) format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
last_msg = '' last_msg = ''
def progress(msg): def progress(msg):
@ -2634,7 +2634,7 @@ def check_thumbnails(thumbnails):
if t.get('id') is None: if t.get('id') is None:
t['id'] = str(i) t['id'] = str(i)
if t.get('width') and t.get('height'): if t.get('width') and t.get('height'):
t['resolution'] = '{}x{}'.format(t['width'], t['height']) t['resolution'] = '%dx%d' % (t['width'], t['height'])
t['url'] = sanitize_url(t['url']) t['url'] = sanitize_url(t['url'])
if self.params.get('check_formats') is True: if self.params.get('check_formats') is True:
@ -2694,7 +2694,7 @@ def _fill_common_fields(self, info_dict, final=True):
# in order to always have clean titles. This is very common for TV series. # in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'): for field in ('chapter', 'season', 'episode'):
if final and info_dict.get(f'{field}_number') is not None and not info_dict.get(field): if final and info_dict.get(f'{field}_number') is not None and not info_dict.get(field):
info_dict[field] = '{} {}'.format(field.capitalize(), info_dict[f'{field}_number']) info_dict[field] = '%s %d' % (field.capitalize(), info_dict[f'{field}_number'])
for old_key, new_key in self._deprecated_multivalue_fields.items(): for old_key, new_key in self._deprecated_multivalue_fields.items():
if new_key in info_dict and old_key in info_dict: if new_key in info_dict and old_key in info_dict:
@ -3790,11 +3790,11 @@ def format_resolution(format, default='unknown'):
if format.get('resolution') is not None: if format.get('resolution') is not None:
return format['resolution'] return format['resolution']
if format.get('width') and format.get('height'): if format.get('width') and format.get('height'):
return '{}x{}'.format(format['width'], format['height']) return '%dx%d' % (format['width'], format['height'])
elif format.get('height'): elif format.get('height'):
return '{}p'.format(format['height']) return '{}p'.format(format['height'])
elif format.get('width'): elif format.get('width'):
return '{}x?'.format(format['width']) return '%dx?' % format['width']
return default return default
def _list_format_headers(self, *headers): def _list_format_headers(self, *headers):
@ -3817,7 +3817,7 @@ def _format_note(self, fdict):
if fdict.get('tbr') is not None: if fdict.get('tbr') is not None:
if res: if res:
res += ', ' res += ', '
res += '{:4.0f}k'.format(fdict['tbr']) res += '%4dk' % fdict['tbr']
if fdict.get('container') is not None: if fdict.get('container') is not None:
if res: if res:
res += ', ' res += ', '
@ -3832,7 +3832,7 @@ def _format_note(self, fdict):
elif fdict.get('vbr') is not None and fdict.get('abr') is not None: elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@' res += 'video@'
if fdict.get('vbr') is not None: if fdict.get('vbr') is not None:
res += '{:4.0f}k'.format(fdict['vbr']) res += '%4dk' % fdict['vbr']
if fdict.get('fps') is not None: if fdict.get('fps') is not None:
if res: if res:
res += ', ' res += ', '
@ -3843,15 +3843,15 @@ def _format_note(self, fdict):
if fdict['acodec'] == 'none': if fdict['acodec'] == 'none':
res += 'video only' res += 'video only'
else: else:
res += '{:<5}'.format(fdict['acodec']) res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None: elif fdict.get('abr') is not None:
if res: if res:
res += ', ' res += ', '
res += 'audio' res += 'audio'
if fdict.get('abr') is not None: if fdict.get('abr') is not None:
res += '@{:3.0f}k'.format(fdict['abr']) res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None: if fdict.get('asr') is not None:
res += ' ({:5.0f}Hz)'.format(fdict['asr']) res += ' (%5dHz)' % fdict['asr']
if fdict.get('filesize') is not None: if fdict.get('filesize') is not None:
if res: if res:
res += ', ' res += ', '

View file

@ -125,7 +125,7 @@ def format_seconds(seconds):
time = timetuple_from_msec(seconds * 1000) time = timetuple_from_msec(seconds * 1000)
if time.hours > 99: if time.hours > 99:
return '--:--:--' return '--:--:--'
return '{:02.0f}:{:02.0f}:{:02.0f}'.format(*time[:-1]) return '%02d:%02d:%02d' % time[:-1]
@classmethod @classmethod
def format_eta(cls, seconds): def format_eta(cls, seconds):

View file

@ -76,7 +76,8 @@ def real_download(self, filename, info_dict):
return True return True
else: else:
self.to_stderr('\n') self.to_stderr('\n')
self.report_error(f'{self.get_basename()} exited with code {retval}') self.report_error('%s exited with code %d' % (
self.get_basename(), retval))
return False return False
@classmethod @classmethod

View file

@ -25,7 +25,8 @@ def read_bytes(self, n):
data = self.read(n) data = self.read(n)
if len(data) < n: if len(data) < n:
raise DataTruncatedError( raise DataTruncatedError(
f'FlvReader error: need {n} bytes while only {len(data)} bytes got') 'FlvReader error: need %d bytes while only %d bytes got' % (
n, len(data)))
return data return data
# Utility functions for reading numbers and strings # Utility functions for reading numbers and strings
@ -234,7 +235,7 @@ def remove_encrypted_media(media):
def _add_ns(prop, ver=1): def _add_ns(prop, ver=1):
return '{http://ns.adobe.com/f4m/%d.0}%s' % (ver, prop) # noqa: UP031 return '{http://ns.adobe.com/f4m/%d.0}%s' % (ver, prop)
def get_base_url(manifest): def get_base_url(manifest):
@ -376,7 +377,7 @@ def real_download(self, filename, info_dict):
frag_index += 1 frag_index += 1
if frag_index <= ctx['fragment_index']: if frag_index <= ctx['fragment_index']:
continue continue
name = f'Seg{seg_i}-Frag{frag_i}' name = 'Seg%d-Frag%d' % (seg_i, frag_i)
query = [] query = []
if base_url_parsed.query: if base_url_parsed.query:
query.append(base_url_parsed.query) query.append(base_url_parsed.query)
@ -410,7 +411,7 @@ def real_download(self, filename, info_dict):
if live and (err.status == 404 or err.status == 410): if live and (err.status == 404 or err.status == 410):
# We didn't keep up with the live window. Continue # We didn't keep up with the live window. Continue
# with the next available fragment. # with the next available fragment.
msg = f'Fragment {frag_i} unavailable' msg = 'Fragment %d unavailable' % frag_i
self.report_warning(msg) self.report_warning(msg)
fragments_list = [] fragments_list = []
else: else:
@ -420,7 +421,7 @@ def real_download(self, filename, info_dict):
fragments_list = self._update_live_fragments(bootstrap_url, frag_i) fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
total_frags += len(fragments_list) total_frags += len(fragments_list)
if fragments_list and (fragments_list[0][1] > frag_i + 1): if fragments_list and (fragments_list[0][1] > frag_i + 1):
msg = f'Missed {fragments_list[0][1] - (frag_i + 1)} fragments' msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
self.report_warning(msg) self.report_warning(msg)
return self._finish_frag_download(ctx, info_dict) return self._finish_frag_download(ctx, info_dict)

View file

@ -23,7 +23,7 @@ def heartbeat():
try: try:
heartbeat_state[1] += 1 heartbeat_state[1] += 1
ws.send('{"name":"heartbeat","arguments":{},"id":%d}' % heartbeat_state[1]) # noqa: UP031 ws.send('{"name":"heartbeat","arguments":{},"id":%d}' % heartbeat_state[1])
except Exception: except Exception:
self.to_screen('[fc2:live] Heartbeat failed') self.to_screen('[fc2:live] Heartbeat failed')

View file

@ -109,7 +109,7 @@ def _write_ytdl_file(self, ctx):
frag_index_stream.close() frag_index_stream.close()
def _download_fragment(self, ctx, frag_url, info_dict, headers=None, request_data=None): def _download_fragment(self, ctx, frag_url, info_dict, headers=None, request_data=None):
fragment_filename = '{}-Frag{}'.format(ctx['tmpfilename'], ctx['fragment_index']) fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], ctx['fragment_index'])
fragment_info_dict = { fragment_info_dict = {
'url': frag_url, 'url': frag_url,
'http_headers': headers or info_dict.get('http_headers'), 'http_headers': headers or info_dict.get('http_headers'),
@ -156,10 +156,10 @@ def _append_fragment(self, ctx, frag_content):
def _prepare_frag_download(self, ctx): def _prepare_frag_download(self, ctx):
if not ctx.setdefault('live', False): if not ctx.setdefault('live', False):
total_frags_str = str(ctx['total_frags']) total_frags_str = '%d' % ctx['total_frags']
ad_frags = ctx.get('ad_frags', 0) ad_frags = ctx.get('ad_frags', 0)
if ad_frags: if ad_frags:
total_frags_str += f' (not including {ad_frags} ad)' total_frags_str += ' (not including %d ad)' % ad_frags
else: else:
total_frags_str = 'unknown (live)' total_frags_str = 'unknown (live)'
self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}') self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}')
@ -322,10 +322,10 @@ def _prepare_external_frag_download(self, ctx):
if 'live' not in ctx: if 'live' not in ctx:
ctx['live'] = False ctx['live'] = False
if not ctx['live']: if not ctx['live']:
total_frags_str = str(ctx['total_frags']) total_frags_str = '%d' % ctx['total_frags']
ad_frags = ctx.get('ad_frags', 0) ad_frags = ctx.get('ad_frags', 0)
if ad_frags: if ad_frags:
total_frags_str += f' (not including {ad_frags} ad)' total_frags_str += ' (not including %d ad)' % ad_frags
else: else:
total_frags_str = 'unknown (live)' total_frags_str = 'unknown (live)'
self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}') self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}')
@ -445,7 +445,7 @@ def download_fragment(fragment, ctx):
headers = HTTPHeaderDict(info_dict.get('http_headers')) headers = HTTPHeaderDict(info_dict.get('http_headers'))
byte_range = fragment.get('byte_range') byte_range = fragment.get('byte_range')
if byte_range: if byte_range:
headers['Range'] = 'bytes={}-{}'.format(byte_range['start'], byte_range['end'] - 1) headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
# Never skip the first fragment # Never skip the first fragment
fatal = is_fatal(fragment.get('index') or (frag_index - 1)) fatal = is_fatal(fragment.get('index') or (frag_index - 1))

View file

@ -42,7 +42,7 @@ def heartbeat():
timer[0].start() timer[0].start()
heartbeat_info_dict['ping']() heartbeat_info_dict['ping']()
self.to_screen(f'[{self.FD_NAME}] Heartbeat with {heartbeat_interval} second interval ...') self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval))
try: try:
heartbeat() heartbeat()
if type(fd).__name__ == 'HlsFD': if type(fd).__name__ == 'HlsFD':

View file

@ -208,5 +208,5 @@ def run_rtmpdump(args):
return True return True
else: else:
self.to_stderr('\n') self.to_stderr('\n')
self.report_error(f'rtmpdump exited with code {retval}') self.report_error('rtmpdump exited with code %d' % retval)
return False return False

View file

@ -38,5 +38,5 @@ def real_download(self, filename, info_dict):
return True return True
else: else:
self.to_stderr('\n') self.to_stderr('\n')
self.report_error(f'{args[0]} exited with code {retval}') self.report_error('%s exited with code %d' % (args[0], retval))
return False return False

View file

@ -134,7 +134,7 @@ def _get_subtitles(self, sub_url, video_id):
ssa += os.linesep + 'Dialogue: Marked=0,{},{},Default,,0,0,0,,{}{}'.format( ssa += os.linesep + 'Dialogue: Marked=0,{},{},Default,,0,0,0,,{}{}'.format(
ass_subtitles_timecode(start), ass_subtitles_timecode(start),
ass_subtitles_timecode(end), ass_subtitles_timecode(end),
'{\\a%d}' % alignment if alignment != 2 else '', # noqa: UP031 '{\\a%d}' % alignment if alignment != 2 else '',
text.replace('\n', '\\N').replace('<i>', '{\\i1}').replace('</i>', '{\\i0}')) text.replace('\n', '\\N').replace('<i>', '{\\i1}').replace('</i>', '{\\i0}'))
if sub_lang == 'vostf': if sub_lang == 'vostf':

View file

@ -118,7 +118,7 @@ def _real_extract(self, url):
if isinstance(f['tbr'], float): if isinstance(f['tbr'], float):
f['vbr'] = f['tbr'] * 1000 f['vbr'] = f['tbr'] * 1000
del f['tbr'] del f['tbr']
f['format_id'] = 'rtmp-{:.0f}'.format(f['vbr']) f['format_id'] = 'rtmp-%d' % f['vbr']
formats.extend(smil_formats) formats.extend(smil_formats)
elif stream_type in ('ts', 'hls'): elif stream_type in ('ts', 'hls'):
m3u8_formats = self._extract_m3u8_formats( m3u8_formats = self._extract_m3u8_formats(
@ -130,7 +130,7 @@ def _real_extract(self, url):
if not height: if not height:
continue continue
vbr = self._search_regex( vbr = self._search_regex(
fr'[_x]{height}[_-](\d+)', f['url'], 'vbr', default=None) r'[_x]%d[_-](\d+)' % height, f['url'], 'vbr', default=None)
if vbr: if vbr:
f['vbr'] = int(vbr) f['vbr'] = int(vbr)
formats.extend(m3u8_formats) formats.extend(m3u8_formats)

View file

@ -49,7 +49,8 @@ def _real_extract(self, url):
# Request the extended version of the api for extra fields like artist and title # Request the extended version of the api for extra fields like artist and title
api_response = self._download_json( api_response = self._download_json(
f'http://www.audiomack.com/api/music/url/song/{album_url_tag}?extended=1&_={int(time.time())}', 'http://www.audiomack.com/api/music/url/song/%s?extended=1&_=%d' % (
album_url_tag, time.time()),
album_url_tag) album_url_tag)
# API is inconsistent with errors # API is inconsistent with errors
@ -119,8 +120,9 @@ def _real_extract(self, url):
for track_no in itertools.count(): for track_no in itertools.count():
# Get song's metadata # Get song's metadata
api_response = self._download_json( api_response = self._download_json(
f'http://www.audiomack.com/api/music/url/album/{album_url_tag}/{track_no}?extended=1&_={int(time.time())}', 'http://www.audiomack.com/api/music/url/album/%s/%d?extended=1&_=%d'
album_url_tag, note=f'Querying song information ({track_no + 1})') % (album_url_tag, track_no, time.time()), album_url_tag,
note=f'Querying song information ({track_no + 1})')
# Total failure, only occurs when url is totally wrong # Total failure, only occurs when url is totally wrong
# Won't happen in middle of valid playlist (next case) # Won't happen in middle of valid playlist (next case)

View file

@ -1641,7 +1641,7 @@ def _entries(self, category, subcategory, query):
f'The subcategory {subcategory} isn\'t supported for this category. Supported subcategories: {list(rid_map[category].keys())}') f'The subcategory {subcategory} isn\'t supported for this category. Supported subcategories: {list(rid_map[category].keys())}')
rid_value = rid_map[category][subcategory] rid_value = rid_map[category][subcategory]
api_url = f'https://api.bilibili.com/x/web-interface/newlist?rid={rid_value}&type=1&ps=20&jsonp=jsonp' api_url = 'https://api.bilibili.com/x/web-interface/newlist?rid=%d&type=1&ps=20&jsonp=jsonp' % rid_value
page_json = self._download_json(api_url, query, query={'Search_key': query, 'pn': '1'}) page_json = self._download_json(api_url, query, query={'Search_key': query, 'pn': '1'})
page_data = traverse_obj(page_json, ('data', 'page'), expected_type=dict) page_data = traverse_obj(page_json, ('data', 'page'), expected_type=dict)
count, size = int_or_none(page_data.get('count')), int_or_none(page_data.get('size')) count, size = int_or_none(page_data.get('count')), int_or_none(page_data.get('size'))

View file

@ -2230,7 +2230,7 @@ def build_stream_name():
# format_id intact. # format_id intact.
if not live: if not live:
stream_name = build_stream_name() stream_name = build_stream_name()
format_id[1] = stream_name or f'{int(tbr) or len(formats)}' format_id[1] = stream_name or '%d' % (tbr or len(formats))
f = { f = {
'format_id': join_nonempty(*format_id), 'format_id': join_nonempty(*format_id),
'format_index': idx, 'format_index': idx,
@ -2469,7 +2469,7 @@ def _parse_smil_formats_and_subtitles(
'url': streamer, 'url': streamer,
'play_path': src, 'play_path': src,
'ext': 'flv', 'ext': 'flv',
'format_id': f'rtmp-{(rtmp_count if bitrate is None else bitrate):.0f}', 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate, 'tbr': bitrate,
'filesize': filesize, 'filesize': filesize,
'width': width, 'width': width,
@ -2493,7 +2493,7 @@ def _parse_smil_formats_and_subtitles(
if len(m3u8_formats) == 1: if len(m3u8_formats) == 1:
m3u8_count += 1 m3u8_count += 1
m3u8_formats[0].update({ m3u8_formats[0].update({
'format_id': f'hls-{(m3u8_count if bitrate is None else bitrate):.0f}', 'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate, 'tbr': bitrate,
'width': width, 'width': width,
'height': height, 'height': height,
@ -2524,7 +2524,7 @@ def _parse_smil_formats_and_subtitles(
formats.append({ formats.append({
'url': src_url, 'url': src_url,
'ext': ext or src_ext or 'flv', 'ext': ext or src_ext or 'flv',
'format_id': f'http-{(bitrate or http_count):.0f}', 'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate, 'tbr': bitrate,
'filesize': filesize, 'filesize': filesize,
'width': width, 'width': width,
@ -3985,7 +3985,7 @@ def _real_extract(self, query):
if n <= 0: if n <= 0:
raise ExtractorError(f'invalid download number {n} for query "{query}"') raise ExtractorError(f'invalid download number {n} for query "{query}"')
elif n > self._MAX_RESULTS: elif n > self._MAX_RESULTS:
self.report_warning(f'{self._SEARCH_KEY} returns max {self._MAX_RESULTS} results (you requested {n})') self.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS n = self._MAX_RESULTS
return self._get_n_results(query, n) return self._get_n_results(query, n)

View file

@ -33,7 +33,7 @@ def _real_extract(self, url):
# request setClientTimezone.php to get PHPSESSID cookie which is need to get valid json data in the next request # request setClientTimezone.php to get PHPSESSID cookie which is need to get valid json data in the next request
self._request_webpage(HEADRequest( self._request_webpage(HEADRequest(
'http://www.cultureunplugged.com/setClientTimezone.php?timeOffset=%d' % -(time.timezone / 3600)), display_id) # noqa: UP031 'http://www.cultureunplugged.com/setClientTimezone.php?timeOffset=%d' % -(time.timezone / 3600)), display_id)
movie_data = self._download_json( movie_data = self._download_json(
f'http://www.cultureunplugged.com/movie-data/cu-{video_id}.json', display_id) f'http://www.cultureunplugged.com/movie-data/cu-{video_id}.json', display_id)

View file

@ -454,7 +454,7 @@ def _fetch_page(self, playlist_id, page):
url url
} }
} }
}''' % ('false' if self._FAMILY_FILTER else 'true', self._PAGE_SIZE, page), # noqa: UP031 }''' % ('false' if self._FAMILY_FILTER else 'true', self._PAGE_SIZE, page),
f'Downloading page {page}')['videos'] f'Downloading page {page}')['videos']
for edge in videos['edges']: for edge in videos['edges']:
node = edge['node'] node = edge['node']

View file

@ -125,7 +125,7 @@ def _real_extract(self, url):
try: try:
api_json = self._download_webpage( api_json = self._download_webpage(
next_url, playlist_id, next_url, playlist_id,
note=f'Downloading song information {i + 1}/{track_count}', note='Downloading song information %d/%d' % (i + 1, track_count),
errnote='Failed to download song information') errnote='Failed to download song information')
except ExtractorError: except ExtractorError:
if download_tries > 3: if download_tries > 3:

View file

@ -37,8 +37,8 @@ def _real_extract(self, url):
if upload_date_str: if upload_date_str:
upload_date_list = upload_date_str.split() upload_date_list = upload_date_str.split()
upload_date_list.reverse() upload_date_list.reverse()
upload_date_list[1] = f'{(month_by_name(upload_date_list[1], lang="fr") or 0):02d}' upload_date_list[1] = '%02d' % (month_by_name(upload_date_list[1], lang='fr') or 0)
upload_date_list[2] = f'{int(upload_date_list[2]):02d}' upload_date_list[2] = '%02d' % int(upload_date_list[2])
upload_date = ''.join(upload_date_list) upload_date = ''.join(upload_date_list)
else: else:
upload_date = None upload_date = None

View file

@ -73,7 +73,7 @@ def _real_extract(self, url):
'path': 'video/' + display_id, 'path': 'video/' + display_id,
})['id'] })['id']
node = self._download_json( node = self._download_json(
f'https://brooklyn.gaia.com/node/{node_id}', node_id) 'https://brooklyn.gaia.com/node/%d' % node_id, node_id)
vdata = node[vtype] vdata = node[vtype]
media_id = str(vdata['nid']) media_id = str(vdata['nid'])
title = node['title'] title = node['title']

View file

@ -42,8 +42,9 @@ def _get_comments(self, post_num_id, post_hash_id):
is_scrolled = sort_by in ('new', 'you') is_scrolled = sort_by in ('new', 'you')
for page in itertools.count(1): for page in itertools.count(1):
comments_data = self._call_api( comments_data = self._call_api(
f'comments/Fireside_Post/{post_num_id}/{sort_by}?' 'comments/Fireside_Post/%s/%s?%s=%d' % (
f'{("scroll_id" if is_scrolled else "page")}={scroll_id if is_scrolled else page}', post_num_id, sort_by,
'scroll_id' if is_scrolled else 'page', scroll_id if is_scrolled else page),
post_hash_id, note=f'Downloading comments list page {page}') post_hash_id, note=f'Downloading comments list page {page}')
if not comments_data.get('comments'): if not comments_data.get('comments'):
break break

View file

@ -114,7 +114,7 @@ def _real_extract(self, url):
f'{self.IE_NAME} returned error: {message}', expected=True) f'{self.IE_NAME} returned error: {message}', expected=True)
hash_code = security_hash[:2] hash_code = security_hash[:2]
padding = f'{random.randint(1, 10000000000):010d}' padding = '%010d' % random.randint(1, 10000000000)
if hash_code in ('04', '14'): if hash_code in ('04', '14'):
received_time = security_hash[3:13] received_time = security_hash[3:13]
received_md5 = security_hash[24:] received_md5 = security_hash[24:]

View file

@ -105,8 +105,8 @@ def date(self, scheme):
d = time.localtime(self.timestamp) d = time.localtime(self.timestamp)
strings = { strings = {
'y': str(d.tm_year), 'y': str(d.tm_year),
'm': f'{d.tm_mon:02d}', 'm': '%02d' % d.tm_mon,
'd': f'{d.tm_mday:02d}', 'd': '%02d' % d.tm_mday,
} }
self.target += ''.join(strings[c] for c in scheme) self.target += ''.join(strings[c] for c in scheme)

View file

@ -34,7 +34,7 @@ def _parse_japanese_date(text):
# example input: 令和5年3月34日 # example input: 令和5年3月34日
# even though each era have their end, don't check here # even though each era have their end, don't check here
year += ERA_TABLE[era] year += ERA_TABLE[era]
return f'{year:04d}{month:02d}{day:02d}' return '%04d%02d%02d' % (year, month, day)
def _parse_japanese_duration(text): def _parse_japanese_duration(text):

View file

@ -113,7 +113,7 @@ def _check_errors(self, play_json):
if flag == 1: if flag == 1:
self.raise_geo_restricted() self.raise_geo_restricted()
else: else:
raise ExtractorError(f'Generic error. flag = {flag}', expected=True) raise ExtractorError('Generic error. flag = %d' % flag, expected=True)
def _real_extract(self, url): def _real_extract(self, url):
media_id = self._match_id(url) media_id = self._match_id(url)
@ -318,7 +318,7 @@ def get_play_json(cf, timestamp):
if play_json.get('message'): if play_json.get('message'):
raise ExtractorError('Letv cloud said: {}'.format(play_json['message']), expected=True) raise ExtractorError('Letv cloud said: {}'.format(play_json['message']), expected=True)
elif play_json.get('code'): elif play_json.get('code'):
raise ExtractorError('Letv cloud returned error {}'.format(play_json['code']), expected=True) raise ExtractorError('Letv cloud returned error %d' % play_json['code'], expected=True)
else: else:
raise ExtractorError('Letv cloud returned an unknown error') raise ExtractorError('Letv cloud returned an unknown error')

View file

@ -124,7 +124,7 @@ def _extract_info(self, pc, mobile, i, referer):
if rtmp: if rtmp:
format_id = 'rtmp' format_id = 'rtmp'
if stream.get('videoBitRate'): if stream.get('videoBitRate'):
format_id += '-{}'.format(int_or_none(stream['videoBitRate'])) format_id += '-%d' % int_or_none(stream['videoBitRate'])
http_format_id = format_id.replace('rtmp', 'http') http_format_id = format_id.replace('rtmp', 'http')
CDN_HOSTS = ( CDN_HOSTS = (

View file

@ -61,7 +61,7 @@ def _call_api(self, course_slug, fields, video_slug=None, resolution=None):
'videoSlug': video_slug, 'videoSlug': video_slug,
'resolution': f'_{resolution}', 'resolution': f'_{resolution}',
}) })
sub = f' {resolution}p' sub = ' %dp' % resolution
api_url = 'https://www.linkedin.com/learning-api/detailedCourses' api_url = 'https://www.linkedin.com/learning-api/detailedCourses'
if not self._get_cookies(api_url).get('JSESSIONID'): if not self._get_cookies(api_url).get('JSESSIONID'):
self.raise_login_required() self.raise_login_required()

View file

@ -100,7 +100,7 @@ def _parse_smil_formats_and_subtitles(
furl += '&ssek=' + vn.attrib['clipBegin'] furl += '&ssek=' + vn.attrib['clipBegin']
formats.append({ formats.append({
'url': furl, 'url': furl,
'format_id': f'smil_{tbr:.0f}', 'format_id': 'smil_%d' % tbr,
'ext': 'flv', 'ext': 'flv',
'tbr': tbr, 'tbr': tbr,
'preference': -1000, # Strictly inferior than all other formats? 'preference': -1000, # Strictly inferior than all other formats?

View file

@ -99,7 +99,7 @@ def _real_extract(self, url):
aspect_ratio = source_width / source_height if source_width and source_height else 16 / 9 aspect_ratio = source_width / source_height if source_width and source_height else 16 / 9
def add_item(container, item_url, height, id_key='format_id', item_id=None): def add_item(container, item_url, height, id_key='format_id', item_id=None):
item_id = item_id or f'{height}p' item_id = item_id or '%dp' % height
if item_id not in item_url: if item_id not in item_url:
return return
width = int(round(aspect_ratio * height)) width = int(round(aspect_ratio * height))

View file

@ -210,7 +210,7 @@ def _real_extract(self, url):
video_urls = [] video_urls = []
stream_id = self._STREAM_TYPES.get( stream_id = self._STREAM_TYPES.get(
stream_type, f'type{stream_type}') stream_type, 'type%u' % stream_type)
stream_formats = [] stream_formats = []
for unum, video in enumerate(video_urls): for unum, video in enumerate(video_urls):

View file

@ -48,6 +48,6 @@ def _real_extract(self, url):
'description': description, 'description': description,
'channel': channel, 'channel': channel,
'channel_id': channel_id, 'channel_id': channel_id,
'release_date': ('{:04d}{:02d}{:02d}'.format(*map(int, release_date))) if release_date else None, 'release_date': ('%04d%02d%02d' % tuple(map(int, release_date))) if release_date else None,
}) })
return info return info

View file

@ -162,7 +162,7 @@ def _extract_free_formats(self, video, video_id):
ps = str(stream_data['originalDomain']) ps = str(stream_data['originalDomain'])
if stream_data['applyFolderHierarchy'] == 1: if stream_data['applyFolderHierarchy'] == 1:
s = f'{int(video_id):04d}'[::-1] s = ('%04d' % int(video_id))[::-1]
ps += f'/{s[0:2]}/{s[2:4]}' ps += f'/{s[0:2]}/{s[2:4]}'
ps += f'/{video_id}/{video_hash}_' ps += f'/{video_id}/{video_hash}_'
@ -294,7 +294,7 @@ def get_cdn_shield_base(shield_type='', static=False):
else: else:
prefix = 'd' if static else 'p' prefix = 'd' if static else 'p'
account = int(stream_data['azureAccount'].replace('nexxplayplus', '').replace('nexxplayfb', '')) account = int(stream_data['azureAccount'].replace('nexxplayplus', '').replace('nexxplayfb', ''))
return f'http://nx-{prefix}{account:02d}.akamaized.net/' return 'http://nx-%s%02d.akamaized.net/' % (prefix, account)
language = video['general'].get('language_raw') or '' language = video['general'].get('language_raw') or ''

View file

@ -34,7 +34,7 @@ def _extract_nrk_formats(self, asset_url, video_id):
asset_url, video_id, 'mp4', 'm3u8_native', fatal=False) asset_url, video_id, 'mp4', 'm3u8_native', fatal=False)
if not formats and re.search(self._CDN_REPL_REGEX, asset_url): if not formats and re.search(self._CDN_REPL_REGEX, asset_url):
formats = self._extract_m3u8_formats( formats = self._extract_m3u8_formats(
re.sub(self._CDN_REPL_REGEX, f'://nrk-od-{random.randint(0, 99):02d}.akamaized.net/no/', asset_url), re.sub(self._CDN_REPL_REGEX, '://nrk-od-%02d.akamaized.net/no/' % random.randint(0, 99), asset_url),
video_id, 'mp4', 'm3u8_native', fatal=False) video_id, 'mp4', 'm3u8_native', fatal=False)
return formats return formats

View file

@ -486,7 +486,7 @@ def _extract_webpage(self, url):
prg_id = prg_id.split('q')[1] prg_id = prg_id.split('q')[1]
prg_id = int(prg_id, 16) prg_id = int(prg_id, 16)
getdir = self._download_json( getdir = self._download_json(
f'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir{prg_id}.json', 'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir%d.json' % prg_id,
presumptive_id, 'Downloading getdir JSON', presumptive_id, 'Downloading getdir JSON',
transform_source=strip_jsonp) transform_source=strip_jsonp)
return getdir['mid'], presumptive_id, upload_date, description return getdir['mid'], presumptive_id, upload_date, description

View file

@ -350,7 +350,7 @@ def guess_allowed_qualities():
'locale': 'en', 'locale': 'en',
'moduleName': name, 'moduleName': name,
'mediaType': ext, 'mediaType': ext,
'quality': '{}x{}'.format(f['width'], f['height']), 'quality': '%dx%d' % (f['width'], f['height']),
} }
format_id = f'{ext}-{quality}' format_id = f'{ext}-{quality}'

View file

@ -185,7 +185,7 @@ def _real_extract(self, url):
return videos[0] return videos[0]
playlist_title = series_name playlist_title = series_name
if season_number is not None: if season_number is not None:
playlist_title += f' - Season {season_number}' playlist_title += ' - Season %d' % season_number
return self.playlist_result(videos, return self.playlist_result(videos,
playlist_id=video_json.get('_id', info_slug), playlist_id=video_json.get('_id', info_slug),
playlist_title=playlist_title) playlist_title=playlist_title)

View file

@ -61,7 +61,8 @@ def _real_extract(self, url):
primary_image_number = info.get('primaryImageNumber') primary_image_number = info.get('primaryImageNumber')
thumbnail = None thumbnail = None
if movie_id and primary_image_number: if movie_id and primary_image_number:
thumbnail = f'http://pic.aebn.net/dis/t/{movie_id}/{movie_id}_{primary_image_number:08d}.jpg' thumbnail = 'http://pic.aebn.net/dis/t/%s/%s_%08d.jpg' % (
movie_id, movie_id, primary_image_number)
start = int_or_none(info.get('startSecond')) start = int_or_none(info.get('startSecond'))
end = int_or_none(info.get('endSecond')) end = int_or_none(info.get('endSecond'))
duration = end - start if start and end else None duration = end - start if start and end else None

View file

@ -52,7 +52,7 @@ def _real_extract(self, url):
thumbnail = self._og_search_thumbnail(webpage) thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage) description = self._og_search_description(webpage)
upload_date = '{:04d}{:02d}{:02d}'.format( upload_date = '%04d%02d%02d' % (
int(mobj.group('y')), int(mobj.group('y')),
int(mobj.group('m')), int(mobj.group('m')),
int(mobj.group('d')), int(mobj.group('d')),

View file

@ -320,7 +320,7 @@ def _real_extract(self, url):
del f['protocol'] del f['protocol']
f.update({ f.update({
'format_id': m3u8_f['format_id'].replace('hls-', 'http-'), 'format_id': m3u8_f['format_id'].replace('hls-', 'http-'),
'url': re.sub(height_re, f'-{height}p.', http_url), 'url': re.sub(height_re, '-%dp.' % height, http_url),
}) })
formats.append(f) formats.append(f)
else: else:

View file

@ -204,7 +204,7 @@ def extract_bitrate(url):
ext = media.get('ext') or determine_ext(media_url, 'mp4') ext = media.get('ext') or determine_ext(media_url, 'mp4')
format_id = ext format_id = ext
if rate: if rate:
format_id += f'-{rate:.0f}k' format_id += '-%dk' % rate
formats.append({ formats.append({
'format_id': format_id, 'format_id': format_id,
'url': urljoin(download_base, media_url), 'url': urljoin(download_base, media_url),

View file

@ -146,7 +146,7 @@ def _real_extract(self, url):
self.report_drm(video_id) self.report_drm(video_id)
dash_url = content['videoURL'] dash_url = content['videoURL']
headers = { headers = {
'x-playback-session-id': f'{uuid.uuid4().hex}-{int(time.time() * 1000)}', 'x-playback-session-id': '%s-%d' % (uuid.uuid4().hex, time.time() * 1000),
} }
formats = self._extract_mpd_formats( formats = self._extract_mpd_formats(
dash_url, video_id, mpd_id='dash', headers=headers, fatal=False) dash_url, video_id, mpd_id='dash', headers=headers, fatal=False)

View file

@ -353,7 +353,7 @@ class ThePlatformFeedIE(ThePlatformBaseIE):
def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}, account_id=None): def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}, account_id=None):
real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query) real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query)
entry = self._download_json(real_url, video_id)['entries'][0] entry = self._download_json(real_url, video_id)['entries'][0]
main_smil_url = f'http://link.theplatform.com/s/{provider_id}/media/guid/{account_id}/{entry["guid"]}' if account_id else entry.get('plmedia$publicUrl') main_smil_url = 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % (provider_id, account_id, entry['guid']) if account_id else entry.get('plmedia$publicUrl')
formats = [] formats = []
subtitles = {} subtitles = {}

View file

@ -294,7 +294,7 @@ def _entries(self, uploader_id):
yield self.url_result(urljoin(base_url, mobj.group('url'))) yield self.url_result(urljoin(base_url, mobj.group('url')))
next_url = self._search_regex( next_url = self._search_regex(
r'<a href="(/%s/show/%d-\d+)[?"]' % (re.escape(uploader_id), page_num), # noqa: UP031 r'<a href="(/%s/show/%d-\d+)[?"]' % (re.escape(uploader_id), page_num),
webpage, 'next url', default=None) webpage, 'next url', default=None)
next_url = urljoin(base_url, next_url) next_url = urljoin(base_url, next_url)
if not next_url: if not next_url:

View file

@ -42,7 +42,7 @@ def _real_extract(self, url):
'quality': 3 if f['FileType'] == 'mp3' else 2, 'quality': 3 if f['FileType'] == 'mp3' else 2,
} for f in s['Files']] } for f in s['Files']]
e = { e = {
'id': '{}-{}'.format(s['BandId'], s['$id']), 'id': '%d-%s' % (s['BandId'], s['$id']),
'title': s['Title'], 'title': s['Title'],
'uploader_id': playlist_id, 'uploader_id': playlist_id,
'uploader': s.get('BandName', playlist_id), 'uploader': s.get('BandName', playlist_id),

View file

@ -550,7 +550,7 @@ def _entries(self, page_id, section):
while True: while True:
for video in video_list: for video in video_list:
v = self._VIDEO._make(video[:2]) v = self._VIDEO._make(video[:2])
video_id = f'{v.owner_id}_{v.id}' video_id = '%d_%d' % (v.owner_id, v.id)
yield self.url_result( yield self.url_result(
'http://vk.com/video' + video_id, VKIE.ie_key(), video_id) 'http://vk.com/video' + video_id, VKIE.ie_key(), video_id)
if count >= total: if count >= total:

View file

@ -55,7 +55,7 @@ def _call_api(self, url, video_id, **kwargs):
if response.get('Status') != 0: if response.get('Status') != 0:
message = traverse_obj(response, ('Value', 'Error', 'Message'), expected_type=str) message = traverse_obj(response, ('Value', 'Error', 'Message'), expected_type=str)
if not message: if not message:
message = f'There was a error in the response: {response.get("Status")}' message = 'There was a error in the response: %d' % response.get('Status')
raise ExtractorError(message, expected=False) raise ExtractorError(message, expected=False)
return response.get('Value') return response.get('Value')
@ -116,7 +116,7 @@ def _entries(self, channel_id):
break break
yield from playlist_data yield from playlist_data
last = playlist_data[-1] last = playlist_data[-1]
pager = '&pid={}&p_date={}&play_count={}'.format(last['PlaylistId'], last['Published'], last['PlayCount']) pager = '&pid=%d&p_date=%s&play_count=%s' % (last['PlaylistId'], last['Published'], last['PlayCount'])
def _real_extract(self, url): def _real_extract(self, url):
channel_id = self._match_id(url) channel_id = self._match_id(url)
@ -133,7 +133,7 @@ def _real_extract(self, url):
articles = itertools.chain([first_article], articles) if first_article else articles articles = itertools.chain([first_article], articles) if first_article else articles
playlist = ( playlist = (
self.url_result(smuggle_url('https://voicy.jp/channel/{}/{}'.format(channel_id, value['PlaylistId']), value), VoicyIE.ie_key()) self.url_result(smuggle_url('https://voicy.jp/channel/%s/%d' % (channel_id, value['PlaylistId']), value), VoicyIE.ie_key())
for value in articles) for value in articles)
return { return {
'_type': 'playlist', '_type': 'playlist',

View file

@ -51,7 +51,7 @@ def _real_extract(self, url):
'_type': 'url', '_type': 'url',
'id': uuid, 'id': uuid,
'title': video.get('title'), 'title': video.get('title'),
'url': f'http://mychannels.video/embed/{video["myChannelsVideo"]}', 'url': 'http://mychannels.video/embed/%d' % video['myChannelsVideo'],
'description': video.get('description'), 'description': video.get('description'),
'timestamp': parse_iso8601(video.get('publishedAt')), 'timestamp': parse_iso8601(video.get('publishedAt')),
'duration': int_or_none(video.get('duration')), 'duration': int_or_none(video.get('duration')),

View file

@ -43,7 +43,7 @@ def _real_extract(self, url):
r'>Recorded: (\d{2})-(Jan|Feb|Mar|Apr|May|Ju[nl]|Aug|Sep|Oct|Nov|Dec)-(\d{4})', r'>Recorded: (\d{2})-(Jan|Feb|Mar|Apr|May|Ju[nl]|Aug|Sep|Oct|Nov|Dec)-(\d{4})',
webpage) webpage)
if mobj: if mobj:
upload_date = f'{mobj.group(3)}{month_by_abbreviation(mobj.group(2)):02d}{mobj.group(1)}' upload_date = '%s%.2d%s' % (mobj.group(3), month_by_abbreviation(mobj.group(2)), mobj.group(1))
filesize = parse_filesize(self._html_search_regex( filesize = parse_filesize(self._html_search_regex(
r'>Size: ([^<]+)<', webpage, 'file size', fatal=False)) r'>Size: ([^<]+)<', webpage, 'file size', fatal=False))
duration = int_or_none(self._html_search_regex( duration = int_or_none(self._html_search_regex(

View file

@ -61,7 +61,7 @@ def _real_extract(self, url):
'encoded data') 'encoded data')
h = time.time() / 3600 h = time.time() / 3600
a = sum(map(int, [compat_ord(c) for c in k])) + int(video_id) + h a = sum(map(int, [compat_ord(c) for c in k])) + int(video_id) + h
video_url = f'http://x-minus.me/dl/minus?id={video_id}&tkn2={a}f{h:.0f}' video_url = 'http://x-minus.me/dl/minus?id=%s&tkn2=%df%d' % (video_id, a, h)
return { return {
'id': video_id, 'id': video_id,

View file

@ -74,7 +74,7 @@ def _extract_video_info(self, partner_id, video_id):
'app': mobj.group('app'), 'app': mobj.group('app'),
'ext': 'flv', 'ext': 'flv',
'tbr': tbr, 'tbr': tbr,
'format_id': f'rtmp-{tbr:.0f}', 'format_id': 'rtmp-%d' % tbr,
}) })
else: else:
formats.append({ formats.append({

View file

@ -165,7 +165,7 @@ def _real_extract(self, url):
raise ExtractorError( raise ExtractorError(
'Youku said: Sorry, this video is private', expected=True) 'Youku said: Sorry, this video is private', expected=True)
else: else:
msg = f'Youku server reported error {error.get("code")}' msg = 'Youku server reported error %i' % error.get('code')
if error_note is not None: if error_note is not None:
msg += ': ' + clean_html(error_note) msg += ': ' + clean_html(error_note)
raise ExtractorError(msg) raise ExtractorError(msg)

View file

@ -3083,8 +3083,8 @@ def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs): def gen_sig_code(idxs):
def _genslice(start, end, step): def _genslice(start, end, step):
starts = '' if start == 0 else str(start) starts = '' if start == 0 else str(start)
ends = f':{end + step}' if end + step >= 0 else ':' ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else f':{step}' steps = '' if step == 1 else (':%d' % step)
return f's[{starts}{ends}{steps}]' return f's[{starts}{ends}{steps}]'
step = None step = None
@ -3102,9 +3102,9 @@ def _genslice(start, end, step):
start = prev start = prev
continue continue
else: else:
yield f's[{prev}]' yield 's[%d]' % prev
if step is None: if step is None:
yield f's[{i}]' yield 's[%d]' % i
else: else:
yield _genslice(start, i, step) yield _genslice(start, i, step)
@ -3628,7 +3628,7 @@ def extract_thread(contents, entity_payloads):
if is_first_continuation: if is_first_continuation:
note_prefix = 'Downloading comment section API JSON' note_prefix = 'Downloading comment section API JSON'
else: else:
note_prefix = ' Downloading comment API JSON reply thread {} {}'.format( note_prefix = ' Downloading comment API JSON reply thread %d %s' % (
tracker['current_page_thread'], comment_prog_str) tracker['current_page_thread'], comment_prog_str)
else: else:
note_prefix = '{}Downloading comment{} API JSON page {} {}'.format( note_prefix = '{}Downloading comment{} API JSON page {} {}'.format(

View file

@ -724,8 +724,8 @@ def ffmpeg_escape(text):
metadata_file_content = ';FFMETADATA1\n' metadata_file_content = ';FFMETADATA1\n'
for chapter in chapters: for chapter in chapters:
metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n' metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n'
metadata_file_content += f'START={int(chapter["start_time"] * 1000)}\n' metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000)
metadata_file_content += f'END={int(chapter["end_time"] * 1000)}\n' metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000)
chapter_title = chapter.get('title') chapter_title = chapter.get('title')
if chapter_title: if chapter_title:
metadata_file_content += f'title={ffmpeg_escape(chapter_title)}\n' metadata_file_content += f'title={ffmpeg_escape(chapter_title)}\n'
@ -1042,7 +1042,7 @@ def _ffmpeg_args_for_chapter(self, number, chapter, info):
return return
chapter['filepath'] = destination chapter['filepath'] = destination
self.to_screen(f'Chapter {number:03d}; Destination: {destination}') self.to_screen('Chapter %03d; Destination: %s' % (number, destination))
return ( return (
destination, destination,
['-ss', str(chapter['start_time']), ['-ss', str(chapter['start_time']),

View file

@ -25,7 +25,7 @@ def bytes_to_intlist(bs):
def intlist_to_bytes(xs): def intlist_to_bytes(xs):
if not xs: if not xs:
return b'' return b''
return struct.pack('%dB' % len(xs), *xs) # noqa: UP031 return struct.pack('%dB' % len(xs), *xs)
compiled_regex_type = type(re.compile('')) compiled_regex_type = type(re.compile(''))

View file

@ -927,12 +927,12 @@ def timetuple_from_msec(msec):
def formatSeconds(secs, delim=':', msec=False): def formatSeconds(secs, delim=':', msec=False):
time = timetuple_from_msec(secs * 1000) time = timetuple_from_msec(secs * 1000)
if time.hours: if time.hours:
ret = f'{time.hours:.0f}{delim}{time.minutes:02.0f}{delim}{time.seconds:02.0f}' ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
elif time.minutes: elif time.minutes:
ret = f'{time.minutes:.0f}{delim}{time.seconds:02.0f}' ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
else: else:
ret = f'{time.seconds:.0f}' ret = '%d' % time.seconds
return f'{ret}.{time.milliseconds:03.0f}' if msec else ret return '%s.%03d' % (ret, time.milliseconds) if msec else ret
def bug_reports_message(before=';'): def bug_reports_message(before=';'):
@ -1434,7 +1434,7 @@ def __eq__(self, other):
def system_identifier(): def system_identifier():
python_implementation = platform.python_implementation() python_implementation = platform.python_implementation()
if python_implementation == 'PyPy' and hasattr(sys, 'pypy_version_info'): if python_implementation == 'PyPy' and hasattr(sys, 'pypy_version_info'):
python_implementation += ' version {}.{}.{}'.format(*sys.pypy_version_info[:3]) python_implementation += ' version %d.%d.%d' % sys.pypy_version_info[:3]
libc_ver = [] libc_ver = []
with contextlib.suppress(OSError): # We may not have access to the executable with contextlib.suppress(OSError): # We may not have access to the executable
libc_ver = platform.libc_ver() libc_ver = platform.libc_ver()
@ -3380,12 +3380,12 @@ def parse_dfxp_time_expr(time_expr):
def srt_subtitles_timecode(seconds): def srt_subtitles_timecode(seconds):
return '{:02.0f}:{:02.0f}:{:02.0f},{:03.0f}'.format(*timetuple_from_msec(seconds * 1000)) return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
def ass_subtitles_timecode(seconds): def ass_subtitles_timecode(seconds):
time = timetuple_from_msec(seconds * 1000) time = timetuple_from_msec(seconds * 1000)
return '{:01.0f}:{:02.0f}:{:02.0f}.{:02.0f}'.format(*time[:-1], time.milliseconds / 10) return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
def dfxp2srt(dfxp_data): def dfxp2srt(dfxp_data):
@ -3547,10 +3547,11 @@ def parse_node(node):
if not dur: if not dur:
continue continue
end_time = begin_time + dur end_time = begin_time + dur
out.append( out.append('%d\n%s --> %s\n%s\n\n' % (
f'{index}\n' index,
f'{srt_subtitles_timecode(begin_time)} --> {srt_subtitles_timecode(end_time)}\n' srt_subtitles_timecode(begin_time),
f'{parse_node(para)}\n\n') srt_subtitles_timecode(end_time),
parse_node(para)))
return ''.join(out) return ''.join(out)
@ -4814,7 +4815,7 @@ def remove_terminal_sequences(string):
def number_of_digits(number): def number_of_digits(number):
return len(f'{number}') return len('%d' % number)
def join_nonempty(*values, delim='-', from_dict=None): def join_nonempty(*values, delim='-', from_dict=None):

View file

@ -111,7 +111,7 @@ def _format_ts(ts):
Convert an MPEG PES timestamp into a WebVTT timestamp. Convert an MPEG PES timestamp into a WebVTT timestamp.
This will lose sub-millisecond precision. This will lose sub-millisecond precision.
""" """
return '{:02.0f}:{:02.0f}:{:02.0f}.{:03.0f}'.format(*timetuple_from_msec(int((ts + 45) // 90))) return '%02u:%02u:%02u.%03u' % timetuple_from_msec(int((ts + 45) // 90))
class Block: class Block: