diff --git a/devscripts/make_readme.py b/devscripts/make_readme.py index 8afd4c17f..cbb5859aa 100755 --- a/devscripts/make_readme.py +++ b/devscripts/make_readme.py @@ -75,7 +75,7 @@ def apply_patch(text, patch): ), ( # Avoid newline when a space is available b/w switch and description DISABLE_PATCH, # This creates issues with prepare_manpage - r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim), # noqa: UP031 + r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim), r'\1 ', ), ( # Replace brackets with a Markdown link diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py index e87b106f7..966d27a49 100644 --- a/test/test_YoutubeDL.py +++ b/test/test_YoutubeDL.py @@ -854,7 +854,7 @@ def expect_same_infodict(out): test('%(id+1-height+3)05d', '00158') test('%(width+100)05d', 'NA') test('%(filesize*8)d', '8192') - test('%(formats.0) 15s', ('% 15s' % FORMATS[0], None)) # noqa: UP031 + test('%(formats.0) 15s', ('% 15s' % FORMATS[0], None)) test('%(formats.0)r', (repr(FORMATS[0]), None)) test('%(height.0)03d', '001') test('%(-height.0)04d', '-001') diff --git a/test/test_download.py b/test/test_download.py index df9d1a4c3..3f36869d9 100755 --- a/test/test_download.py +++ b/test/test_download.py @@ -199,14 +199,14 @@ def try_rm_tcs_files(tcs=None): self, len(res_dict['entries']), test_case['playlist_mincount'], - 'Expected at least {} in playlist {}, but got only {}'.format( + 'Expected at least %d in playlist %s, but got only %d' % ( test_case['playlist_mincount'], test_case['url'], len(res_dict['entries']))) if 'playlist_count' in test_case: self.assertEqual( len(res_dict['entries']), test_case['playlist_count'], - 'Expected {} entries in playlist {}, but got {}.'.format( + 'Expected %d entries in playlist %s, but got %d.' % ( test_case['playlist_count'], test_case['url'], len(res_dict['entries']), diff --git a/yt_dlp/YoutubeDL.py b/yt_dlp/YoutubeDL.py index 880a4955d..4155c0561 100644 --- a/yt_dlp/YoutubeDL.py +++ b/yt_dlp/YoutubeDL.py @@ -1504,10 +1504,10 @@ def check_filter(): if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: - return f'Skipping {video_title}, because it has not reached minimum view count ({view_count}/{min_views})' + return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: - return f'Skipping {video_title}, because it has exceeded the maximum view count ({view_count}/{max_views})' + return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return f'Skipping "{video_title}" because it is age restricted' @@ -1654,7 +1654,7 @@ def _wait_for_video(self, ie_result={}): or ie_result.get('formats') or ie_result.get('url')): return - format_dur = lambda dur: '{:02.0f}:{:02.0f}:{:02.0f}'.format(*timetuple_from_msec(dur * 1000)[:-1]) + format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1] last_msg = '' def progress(msg): @@ -2634,7 +2634,7 @@ def check_thumbnails(thumbnails): if t.get('id') is None: t['id'] = str(i) if t.get('width') and t.get('height'): - t['resolution'] = '{}x{}'.format(t['width'], t['height']) + t['resolution'] = '%dx%d' % (t['width'], t['height']) t['url'] = sanitize_url(t['url']) if self.params.get('check_formats') is True: @@ -2694,7 +2694,7 @@ def _fill_common_fields(self, info_dict, final=True): # in order to always have clean titles. This is very common for TV series. for field in ('chapter', 'season', 'episode'): if final and info_dict.get(f'{field}_number') is not None and not info_dict.get(field): - info_dict[field] = '{} {}'.format(field.capitalize(), info_dict[f'{field}_number']) + info_dict[field] = '%s %d' % (field.capitalize(), info_dict[f'{field}_number']) for old_key, new_key in self._deprecated_multivalue_fields.items(): if new_key in info_dict and old_key in info_dict: @@ -3790,11 +3790,11 @@ def format_resolution(format, default='unknown'): if format.get('resolution') is not None: return format['resolution'] if format.get('width') and format.get('height'): - return '{}x{}'.format(format['width'], format['height']) + return '%dx%d' % (format['width'], format['height']) elif format.get('height'): return '{}p'.format(format['height']) elif format.get('width'): - return '{}x?'.format(format['width']) + return '%dx?' % format['width'] return default def _list_format_headers(self, *headers): @@ -3817,7 +3817,7 @@ def _format_note(self, fdict): if fdict.get('tbr') is not None: if res: res += ', ' - res += '{:4.0f}k'.format(fdict['tbr']) + res += '%4dk' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' @@ -3832,7 +3832,7 @@ def _format_note(self, fdict): elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: - res += '{:4.0f}k'.format(fdict['vbr']) + res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: if res: res += ', ' @@ -3843,15 +3843,15 @@ def _format_note(self, fdict): if fdict['acodec'] == 'none': res += 'video only' else: - res += '{:<5}'.format(fdict['acodec']) + res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: - res += '@{:3.0f}k'.format(fdict['abr']) + res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: - res += ' ({:5.0f}Hz)'.format(fdict['asr']) + res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' diff --git a/yt_dlp/downloader/common.py b/yt_dlp/downloader/common.py index 2aca01aec..e8dcb37cc 100644 --- a/yt_dlp/downloader/common.py +++ b/yt_dlp/downloader/common.py @@ -125,7 +125,7 @@ def format_seconds(seconds): time = timetuple_from_msec(seconds * 1000) if time.hours > 99: return '--:--:--' - return '{:02.0f}:{:02.0f}:{:02.0f}'.format(*time[:-1]) + return '%02d:%02d:%02d' % time[:-1] @classmethod def format_eta(cls, seconds): diff --git a/yt_dlp/downloader/external.py b/yt_dlp/downloader/external.py index 4b8c924ba..7f6b5b45c 100644 --- a/yt_dlp/downloader/external.py +++ b/yt_dlp/downloader/external.py @@ -76,7 +76,8 @@ def real_download(self, filename, info_dict): return True else: self.to_stderr('\n') - self.report_error(f'{self.get_basename()} exited with code {retval}') + self.report_error('%s exited with code %d' % ( + self.get_basename(), retval)) return False @classmethod diff --git a/yt_dlp/downloader/f4m.py b/yt_dlp/downloader/f4m.py index b00229942..22d0ebd26 100644 --- a/yt_dlp/downloader/f4m.py +++ b/yt_dlp/downloader/f4m.py @@ -25,7 +25,8 @@ def read_bytes(self, n): data = self.read(n) if len(data) < n: raise DataTruncatedError( - f'FlvReader error: need {n} bytes while only {len(data)} bytes got') + 'FlvReader error: need %d bytes while only %d bytes got' % ( + n, len(data))) return data # Utility functions for reading numbers and strings @@ -234,7 +235,7 @@ def remove_encrypted_media(media): def _add_ns(prop, ver=1): - return '{http://ns.adobe.com/f4m/%d.0}%s' % (ver, prop) # noqa: UP031 + return '{http://ns.adobe.com/f4m/%d.0}%s' % (ver, prop) def get_base_url(manifest): @@ -376,7 +377,7 @@ def real_download(self, filename, info_dict): frag_index += 1 if frag_index <= ctx['fragment_index']: continue - name = f'Seg{seg_i}-Frag{frag_i}' + name = 'Seg%d-Frag%d' % (seg_i, frag_i) query = [] if base_url_parsed.query: query.append(base_url_parsed.query) @@ -410,7 +411,7 @@ def real_download(self, filename, info_dict): if live and (err.status == 404 or err.status == 410): # We didn't keep up with the live window. Continue # with the next available fragment. - msg = f'Fragment {frag_i} unavailable' + msg = 'Fragment %d unavailable' % frag_i self.report_warning(msg) fragments_list = [] else: @@ -420,7 +421,7 @@ def real_download(self, filename, info_dict): fragments_list = self._update_live_fragments(bootstrap_url, frag_i) total_frags += len(fragments_list) if fragments_list and (fragments_list[0][1] > frag_i + 1): - msg = f'Missed {fragments_list[0][1] - (frag_i + 1)} fragments' + msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1)) self.report_warning(msg) return self._finish_frag_download(ctx, info_dict) diff --git a/yt_dlp/downloader/fc2.py b/yt_dlp/downloader/fc2.py index fe3dcc5ad..f9763debb 100644 --- a/yt_dlp/downloader/fc2.py +++ b/yt_dlp/downloader/fc2.py @@ -23,7 +23,7 @@ def heartbeat(): try: heartbeat_state[1] += 1 - ws.send('{"name":"heartbeat","arguments":{},"id":%d}' % heartbeat_state[1]) # noqa: UP031 + ws.send('{"name":"heartbeat","arguments":{},"id":%d}' % heartbeat_state[1]) except Exception: self.to_screen('[fc2:live] Heartbeat failed') diff --git a/yt_dlp/downloader/fragment.py b/yt_dlp/downloader/fragment.py index f029dfabb..98784e703 100644 --- a/yt_dlp/downloader/fragment.py +++ b/yt_dlp/downloader/fragment.py @@ -109,7 +109,7 @@ def _write_ytdl_file(self, ctx): frag_index_stream.close() def _download_fragment(self, ctx, frag_url, info_dict, headers=None, request_data=None): - fragment_filename = '{}-Frag{}'.format(ctx['tmpfilename'], ctx['fragment_index']) + fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], ctx['fragment_index']) fragment_info_dict = { 'url': frag_url, 'http_headers': headers or info_dict.get('http_headers'), @@ -156,10 +156,10 @@ def _append_fragment(self, ctx, frag_content): def _prepare_frag_download(self, ctx): if not ctx.setdefault('live', False): - total_frags_str = str(ctx['total_frags']) + total_frags_str = '%d' % ctx['total_frags'] ad_frags = ctx.get('ad_frags', 0) if ad_frags: - total_frags_str += f' (not including {ad_frags} ad)' + total_frags_str += ' (not including %d ad)' % ad_frags else: total_frags_str = 'unknown (live)' self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}') @@ -322,10 +322,10 @@ def _prepare_external_frag_download(self, ctx): if 'live' not in ctx: ctx['live'] = False if not ctx['live']: - total_frags_str = str(ctx['total_frags']) + total_frags_str = '%d' % ctx['total_frags'] ad_frags = ctx.get('ad_frags', 0) if ad_frags: - total_frags_str += f' (not including {ad_frags} ad)' + total_frags_str += ' (not including %d ad)' % ad_frags else: total_frags_str = 'unknown (live)' self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}') @@ -445,7 +445,7 @@ def download_fragment(fragment, ctx): headers = HTTPHeaderDict(info_dict.get('http_headers')) byte_range = fragment.get('byte_range') if byte_range: - headers['Range'] = 'bytes={}-{}'.format(byte_range['start'], byte_range['end'] - 1) + headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1) # Never skip the first fragment fatal = is_fatal(fragment.get('index') or (frag_index - 1)) diff --git a/yt_dlp/downloader/niconico.py b/yt_dlp/downloader/niconico.py index 9aeefeb21..462c6e2d6 100644 --- a/yt_dlp/downloader/niconico.py +++ b/yt_dlp/downloader/niconico.py @@ -42,7 +42,7 @@ def heartbeat(): timer[0].start() heartbeat_info_dict['ping']() - self.to_screen(f'[{self.FD_NAME}] Heartbeat with {heartbeat_interval} second interval ...') + self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval)) try: heartbeat() if type(fd).__name__ == 'HlsFD': diff --git a/yt_dlp/downloader/rtmp.py b/yt_dlp/downloader/rtmp.py index c0093b26a..1b831e5f3 100644 --- a/yt_dlp/downloader/rtmp.py +++ b/yt_dlp/downloader/rtmp.py @@ -208,5 +208,5 @@ def run_rtmpdump(args): return True else: self.to_stderr('\n') - self.report_error(f'rtmpdump exited with code {retval}') + self.report_error('rtmpdump exited with code %d' % retval) return False diff --git a/yt_dlp/downloader/rtsp.py b/yt_dlp/downloader/rtsp.py index 3a66d418b..b4b0be7e6 100644 --- a/yt_dlp/downloader/rtsp.py +++ b/yt_dlp/downloader/rtsp.py @@ -38,5 +38,5 @@ def real_download(self, filename, info_dict): return True else: self.to_stderr('\n') - self.report_error(f'{args[0]} exited with code {retval}') + self.report_error('%s exited with code %d' % (args[0], retval)) return False diff --git a/yt_dlp/extractor/adn.py b/yt_dlp/extractor/adn.py index a28a5d757..919e1d6af 100644 --- a/yt_dlp/extractor/adn.py +++ b/yt_dlp/extractor/adn.py @@ -134,7 +134,7 @@ def _get_subtitles(self, sub_url, video_id): ssa += os.linesep + 'Dialogue: Marked=0,{},{},Default,,0,0,0,,{}{}'.format( ass_subtitles_timecode(start), ass_subtitles_timecode(end), - '{\\a%d}' % alignment if alignment != 2 else '', # noqa: UP031 + '{\\a%d}' % alignment if alignment != 2 else '', text.replace('\n', '\\N').replace('', '{\\i1}').replace('', '{\\i0}')) if sub_lang == 'vostf': diff --git a/yt_dlp/extractor/arcpublishing.py b/yt_dlp/extractor/arcpublishing.py index e2d034a90..8da9bc4cc 100644 --- a/yt_dlp/extractor/arcpublishing.py +++ b/yt_dlp/extractor/arcpublishing.py @@ -118,7 +118,7 @@ def _real_extract(self, url): if isinstance(f['tbr'], float): f['vbr'] = f['tbr'] * 1000 del f['tbr'] - f['format_id'] = 'rtmp-{:.0f}'.format(f['vbr']) + f['format_id'] = 'rtmp-%d' % f['vbr'] formats.extend(smil_formats) elif stream_type in ('ts', 'hls'): m3u8_formats = self._extract_m3u8_formats( @@ -130,7 +130,7 @@ def _real_extract(self, url): if not height: continue vbr = self._search_regex( - fr'[_x]{height}[_-](\d+)', f['url'], 'vbr', default=None) + r'[_x]%d[_-](\d+)' % height, f['url'], 'vbr', default=None) if vbr: f['vbr'] = int(vbr) formats.extend(m3u8_formats) diff --git a/yt_dlp/extractor/audiomack.py b/yt_dlp/extractor/audiomack.py index 7f79148bc..1d4460c9f 100644 --- a/yt_dlp/extractor/audiomack.py +++ b/yt_dlp/extractor/audiomack.py @@ -49,7 +49,8 @@ def _real_extract(self, url): # Request the extended version of the api for extra fields like artist and title api_response = self._download_json( - f'http://www.audiomack.com/api/music/url/song/{album_url_tag}?extended=1&_={int(time.time())}', + 'http://www.audiomack.com/api/music/url/song/%s?extended=1&_=%d' % ( + album_url_tag, time.time()), album_url_tag) # API is inconsistent with errors @@ -119,8 +120,9 @@ def _real_extract(self, url): for track_no in itertools.count(): # Get song's metadata api_response = self._download_json( - f'http://www.audiomack.com/api/music/url/album/{album_url_tag}/{track_no}?extended=1&_={int(time.time())}', - album_url_tag, note=f'Querying song information ({track_no + 1})') + 'http://www.audiomack.com/api/music/url/album/%s/%d?extended=1&_=%d' + % (album_url_tag, track_no, time.time()), album_url_tag, + note=f'Querying song information ({track_no + 1})') # Total failure, only occurs when url is totally wrong # Won't happen in middle of valid playlist (next case) diff --git a/yt_dlp/extractor/bilibili.py b/yt_dlp/extractor/bilibili.py index d6de89ee5..528336a92 100644 --- a/yt_dlp/extractor/bilibili.py +++ b/yt_dlp/extractor/bilibili.py @@ -1641,7 +1641,7 @@ def _entries(self, category, subcategory, query): f'The subcategory {subcategory} isn\'t supported for this category. Supported subcategories: {list(rid_map[category].keys())}') rid_value = rid_map[category][subcategory] - api_url = f'https://api.bilibili.com/x/web-interface/newlist?rid={rid_value}&type=1&ps=20&jsonp=jsonp' + api_url = 'https://api.bilibili.com/x/web-interface/newlist?rid=%d&type=1&ps=20&jsonp=jsonp' % rid_value page_json = self._download_json(api_url, query, query={'Search_key': query, 'pn': '1'}) page_data = traverse_obj(page_json, ('data', 'page'), expected_type=dict) count, size = int_or_none(page_data.get('count')), int_or_none(page_data.get('size')) diff --git a/yt_dlp/extractor/common.py b/yt_dlp/extractor/common.py index d800f70be..946827dc4 100644 --- a/yt_dlp/extractor/common.py +++ b/yt_dlp/extractor/common.py @@ -2230,7 +2230,7 @@ def build_stream_name(): # format_id intact. if not live: stream_name = build_stream_name() - format_id[1] = stream_name or f'{int(tbr) or len(formats)}' + format_id[1] = stream_name or '%d' % (tbr or len(formats)) f = { 'format_id': join_nonempty(*format_id), 'format_index': idx, @@ -2469,7 +2469,7 @@ def _parse_smil_formats_and_subtitles( 'url': streamer, 'play_path': src, 'ext': 'flv', - 'format_id': f'rtmp-{(rtmp_count if bitrate is None else bitrate):.0f}', + 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate), 'tbr': bitrate, 'filesize': filesize, 'width': width, @@ -2493,7 +2493,7 @@ def _parse_smil_formats_and_subtitles( if len(m3u8_formats) == 1: m3u8_count += 1 m3u8_formats[0].update({ - 'format_id': f'hls-{(m3u8_count if bitrate is None else bitrate):.0f}', + 'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate), 'tbr': bitrate, 'width': width, 'height': height, @@ -2524,7 +2524,7 @@ def _parse_smil_formats_and_subtitles( formats.append({ 'url': src_url, 'ext': ext or src_ext or 'flv', - 'format_id': f'http-{(bitrate or http_count):.0f}', + 'format_id': 'http-%d' % (bitrate or http_count), 'tbr': bitrate, 'filesize': filesize, 'width': width, @@ -3985,7 +3985,7 @@ def _real_extract(self, query): if n <= 0: raise ExtractorError(f'invalid download number {n} for query "{query}"') elif n > self._MAX_RESULTS: - self.report_warning(f'{self._SEARCH_KEY} returns max {self._MAX_RESULTS} results (you requested {n})') + self.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n)) n = self._MAX_RESULTS return self._get_n_results(query, n) diff --git a/yt_dlp/extractor/cultureunplugged.py b/yt_dlp/extractor/cultureunplugged.py index b9464b399..8e6579c35 100644 --- a/yt_dlp/extractor/cultureunplugged.py +++ b/yt_dlp/extractor/cultureunplugged.py @@ -33,7 +33,7 @@ def _real_extract(self, url): # request setClientTimezone.php to get PHPSESSID cookie which is need to get valid json data in the next request self._request_webpage(HEADRequest( - 'http://www.cultureunplugged.com/setClientTimezone.php?timeOffset=%d' % -(time.timezone / 3600)), display_id) # noqa: UP031 + 'http://www.cultureunplugged.com/setClientTimezone.php?timeOffset=%d' % -(time.timezone / 3600)), display_id) movie_data = self._download_json( f'http://www.cultureunplugged.com/movie-data/cu-{video_id}.json', display_id) diff --git a/yt_dlp/extractor/dailymotion.py b/yt_dlp/extractor/dailymotion.py index 51f468829..cb1453d3f 100644 --- a/yt_dlp/extractor/dailymotion.py +++ b/yt_dlp/extractor/dailymotion.py @@ -454,7 +454,7 @@ def _fetch_page(self, playlist_id, page): url } } - }''' % ('false' if self._FAMILY_FILTER else 'true', self._PAGE_SIZE, page), # noqa: UP031 + }''' % ('false' if self._FAMILY_FILTER else 'true', self._PAGE_SIZE, page), f'Downloading page {page}')['videos'] for edge in videos['edges']: node = edge['node'] diff --git a/yt_dlp/extractor/eighttracks.py b/yt_dlp/extractor/eighttracks.py index 2f42ae75a..3ac4c56ae 100644 --- a/yt_dlp/extractor/eighttracks.py +++ b/yt_dlp/extractor/eighttracks.py @@ -125,7 +125,7 @@ def _real_extract(self, url): try: api_json = self._download_webpage( next_url, playlist_id, - note=f'Downloading song information {i + 1}/{track_count}', + note='Downloading song information %d/%d' % (i + 1, track_count), errnote='Failed to download song information') except ExtractorError: if download_tries > 3: diff --git a/yt_dlp/extractor/franceinter.py b/yt_dlp/extractor/franceinter.py index c9e9ddbab..779249b84 100644 --- a/yt_dlp/extractor/franceinter.py +++ b/yt_dlp/extractor/franceinter.py @@ -37,8 +37,8 @@ def _real_extract(self, url): if upload_date_str: upload_date_list = upload_date_str.split() upload_date_list.reverse() - upload_date_list[1] = f'{(month_by_name(upload_date_list[1], lang="fr") or 0):02d}' - upload_date_list[2] = f'{int(upload_date_list[2]):02d}' + upload_date_list[1] = '%02d' % (month_by_name(upload_date_list[1], lang='fr') or 0) + upload_date_list[2] = '%02d' % int(upload_date_list[2]) upload_date = ''.join(upload_date_list) else: upload_date = None diff --git a/yt_dlp/extractor/gaia.py b/yt_dlp/extractor/gaia.py index 9176c9b00..048ea517b 100644 --- a/yt_dlp/extractor/gaia.py +++ b/yt_dlp/extractor/gaia.py @@ -73,7 +73,7 @@ def _real_extract(self, url): 'path': 'video/' + display_id, })['id'] node = self._download_json( - f'https://brooklyn.gaia.com/node/{node_id}', node_id) + 'https://brooklyn.gaia.com/node/%d' % node_id, node_id) vdata = node[vtype] media_id = str(vdata['nid']) title = node['title'] diff --git a/yt_dlp/extractor/gamejolt.py b/yt_dlp/extractor/gamejolt.py index da945f26e..01386c142 100644 --- a/yt_dlp/extractor/gamejolt.py +++ b/yt_dlp/extractor/gamejolt.py @@ -42,8 +42,9 @@ def _get_comments(self, post_num_id, post_hash_id): is_scrolled = sort_by in ('new', 'you') for page in itertools.count(1): comments_data = self._call_api( - f'comments/Fireside_Post/{post_num_id}/{sort_by}?' - f'{("scroll_id" if is_scrolled else "page")}={scroll_id if is_scrolled else page}', + 'comments/Fireside_Post/%s/%s?%s=%d' % ( + post_num_id, sort_by, + 'scroll_id' if is_scrolled else 'page', scroll_id if is_scrolled else page), post_hash_id, note=f'Downloading comments list page {page}') if not comments_data.get('comments'): break diff --git a/yt_dlp/extractor/globo.py b/yt_dlp/extractor/globo.py index 083de1920..d72296be6 100644 --- a/yt_dlp/extractor/globo.py +++ b/yt_dlp/extractor/globo.py @@ -114,7 +114,7 @@ def _real_extract(self, url): f'{self.IE_NAME} returned error: {message}', expected=True) hash_code = security_hash[:2] - padding = f'{random.randint(1, 10000000000):010d}' + padding = '%010d' % random.randint(1, 10000000000) if hash_code in ('04', '14'): received_time = security_hash[3:13] received_md5 = security_hash[24:] diff --git a/yt_dlp/extractor/iqiyi.py b/yt_dlp/extractor/iqiyi.py index 3cf293ca7..735b44637 100644 --- a/yt_dlp/extractor/iqiyi.py +++ b/yt_dlp/extractor/iqiyi.py @@ -105,8 +105,8 @@ def date(self, scheme): d = time.localtime(self.timestamp) strings = { 'y': str(d.tm_year), - 'm': f'{d.tm_mon:02d}', - 'd': f'{d.tm_mday:02d}', + 'm': '%02d' % d.tm_mon, + 'd': '%02d' % d.tm_mday, } self.target += ''.join(strings[c] for c in scheme) diff --git a/yt_dlp/extractor/japandiet.py b/yt_dlp/extractor/japandiet.py index 4971a1375..994da22ae 100644 --- a/yt_dlp/extractor/japandiet.py +++ b/yt_dlp/extractor/japandiet.py @@ -34,7 +34,7 @@ def _parse_japanese_date(text): # example input: 令和5年3月34日 # even though each era have their end, don't check here year += ERA_TABLE[era] - return f'{year:04d}{month:02d}{day:02d}' + return '%04d%02d%02d' % (year, month, day) def _parse_japanese_duration(text): diff --git a/yt_dlp/extractor/leeco.py b/yt_dlp/extractor/leeco.py index 9d664c1b7..58baa3fea 100644 --- a/yt_dlp/extractor/leeco.py +++ b/yt_dlp/extractor/leeco.py @@ -113,7 +113,7 @@ def _check_errors(self, play_json): if flag == 1: self.raise_geo_restricted() else: - raise ExtractorError(f'Generic error. flag = {flag}', expected=True) + raise ExtractorError('Generic error. flag = %d' % flag, expected=True) def _real_extract(self, url): media_id = self._match_id(url) @@ -318,7 +318,7 @@ def get_play_json(cf, timestamp): if play_json.get('message'): raise ExtractorError('Letv cloud said: {}'.format(play_json['message']), expected=True) elif play_json.get('code'): - raise ExtractorError('Letv cloud returned error {}'.format(play_json['code']), expected=True) + raise ExtractorError('Letv cloud returned error %d' % play_json['code'], expected=True) else: raise ExtractorError('Letv cloud returned an unknown error') diff --git a/yt_dlp/extractor/limelight.py b/yt_dlp/extractor/limelight.py index 0b5cc28ac..763a01448 100644 --- a/yt_dlp/extractor/limelight.py +++ b/yt_dlp/extractor/limelight.py @@ -124,7 +124,7 @@ def _extract_info(self, pc, mobile, i, referer): if rtmp: format_id = 'rtmp' if stream.get('videoBitRate'): - format_id += '-{}'.format(int_or_none(stream['videoBitRate'])) + format_id += '-%d' % int_or_none(stream['videoBitRate']) http_format_id = format_id.replace('rtmp', 'http') CDN_HOSTS = ( diff --git a/yt_dlp/extractor/linkedin.py b/yt_dlp/extractor/linkedin.py index c1f776d64..c8c8ae52a 100644 --- a/yt_dlp/extractor/linkedin.py +++ b/yt_dlp/extractor/linkedin.py @@ -61,7 +61,7 @@ def _call_api(self, course_slug, fields, video_slug=None, resolution=None): 'videoSlug': video_slug, 'resolution': f'_{resolution}', }) - sub = f' {resolution}p' + sub = ' %dp' % resolution api_url = 'https://www.linkedin.com/learning-api/detailedCourses' if not self._get_cookies(api_url).get('JSESSIONID'): self.raise_login_required() diff --git a/yt_dlp/extractor/livestream.py b/yt_dlp/extractor/livestream.py index 3bdadf960..7f7947ee7 100644 --- a/yt_dlp/extractor/livestream.py +++ b/yt_dlp/extractor/livestream.py @@ -100,7 +100,7 @@ def _parse_smil_formats_and_subtitles( furl += '&ssek=' + vn.attrib['clipBegin'] formats.append({ 'url': furl, - 'format_id': f'smil_{tbr:.0f}', + 'format_id': 'smil_%d' % tbr, 'ext': 'flv', 'tbr': tbr, 'preference': -1000, # Strictly inferior than all other formats? diff --git a/yt_dlp/extractor/medaltv.py b/yt_dlp/extractor/medaltv.py index 02575f1e9..d64dbfe63 100644 --- a/yt_dlp/extractor/medaltv.py +++ b/yt_dlp/extractor/medaltv.py @@ -99,7 +99,7 @@ def _real_extract(self, url): aspect_ratio = source_width / source_height if source_width and source_height else 16 / 9 def add_item(container, item_url, height, id_key='format_id', item_id=None): - item_id = item_id or f'{height}p' + item_id = item_id or '%dp' % height if item_id not in item_url: return width = int(round(aspect_ratio * height)) diff --git a/yt_dlp/extractor/mediasite.py b/yt_dlp/extractor/mediasite.py index 35b8db2df..ad7ab27e2 100644 --- a/yt_dlp/extractor/mediasite.py +++ b/yt_dlp/extractor/mediasite.py @@ -210,7 +210,7 @@ def _real_extract(self, url): video_urls = [] stream_id = self._STREAM_TYPES.get( - stream_type, f'type{stream_type}') + stream_type, 'type%u' % stream_type) stream_formats = [] for unum, video in enumerate(video_urls): diff --git a/yt_dlp/extractor/newspicks.py b/yt_dlp/extractor/newspicks.py index bb7e50bfb..4a1cb0a73 100644 --- a/yt_dlp/extractor/newspicks.py +++ b/yt_dlp/extractor/newspicks.py @@ -48,6 +48,6 @@ def _real_extract(self, url): 'description': description, 'channel': channel, 'channel_id': channel_id, - 'release_date': ('{:04d}{:02d}{:02d}'.format(*map(int, release_date))) if release_date else None, + 'release_date': ('%04d%02d%02d' % tuple(map(int, release_date))) if release_date else None, }) return info diff --git a/yt_dlp/extractor/nexx.py b/yt_dlp/extractor/nexx.py index 140dee765..ee1bc281c 100644 --- a/yt_dlp/extractor/nexx.py +++ b/yt_dlp/extractor/nexx.py @@ -162,7 +162,7 @@ def _extract_free_formats(self, video, video_id): ps = str(stream_data['originalDomain']) if stream_data['applyFolderHierarchy'] == 1: - s = f'{int(video_id):04d}'[::-1] + s = ('%04d' % int(video_id))[::-1] ps += f'/{s[0:2]}/{s[2:4]}' ps += f'/{video_id}/{video_hash}_' @@ -294,7 +294,7 @@ def get_cdn_shield_base(shield_type='', static=False): else: prefix = 'd' if static else 'p' account = int(stream_data['azureAccount'].replace('nexxplayplus', '').replace('nexxplayfb', '')) - return f'http://nx-{prefix}{account:02d}.akamaized.net/' + return 'http://nx-%s%02d.akamaized.net/' % (prefix, account) language = video['general'].get('language_raw') or '' diff --git a/yt_dlp/extractor/nrk.py b/yt_dlp/extractor/nrk.py index 2e9973047..658ae5f91 100644 --- a/yt_dlp/extractor/nrk.py +++ b/yt_dlp/extractor/nrk.py @@ -34,7 +34,7 @@ def _extract_nrk_formats(self, asset_url, video_id): asset_url, video_id, 'mp4', 'm3u8_native', fatal=False) if not formats and re.search(self._CDN_REPL_REGEX, asset_url): formats = self._extract_m3u8_formats( - re.sub(self._CDN_REPL_REGEX, f'://nrk-od-{random.randint(0, 99):02d}.akamaized.net/no/', asset_url), + re.sub(self._CDN_REPL_REGEX, '://nrk-od-%02d.akamaized.net/no/' % random.randint(0, 99), asset_url), video_id, 'mp4', 'm3u8_native', fatal=False) return formats diff --git a/yt_dlp/extractor/pbs.py b/yt_dlp/extractor/pbs.py index 9cdd18614..686796491 100644 --- a/yt_dlp/extractor/pbs.py +++ b/yt_dlp/extractor/pbs.py @@ -486,7 +486,7 @@ def _extract_webpage(self, url): prg_id = prg_id.split('q')[1] prg_id = int(prg_id, 16) getdir = self._download_json( - f'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir{prg_id}.json', + 'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir%d.json' % prg_id, presumptive_id, 'Downloading getdir JSON', transform_source=strip_jsonp) return getdir['mid'], presumptive_id, upload_date, description diff --git a/yt_dlp/extractor/pluralsight.py b/yt_dlp/extractor/pluralsight.py index 90c0c57d8..d3f03f7ee 100644 --- a/yt_dlp/extractor/pluralsight.py +++ b/yt_dlp/extractor/pluralsight.py @@ -350,7 +350,7 @@ def guess_allowed_qualities(): 'locale': 'en', 'moduleName': name, 'mediaType': ext, - 'quality': '{}x{}'.format(f['width'], f['height']), + 'quality': '%dx%d' % (f['width'], f['height']), } format_id = f'{ext}-{quality}' diff --git a/yt_dlp/extractor/plutotv.py b/yt_dlp/extractor/plutotv.py index 972295148..234ee987b 100644 --- a/yt_dlp/extractor/plutotv.py +++ b/yt_dlp/extractor/plutotv.py @@ -185,7 +185,7 @@ def _real_extract(self, url): return videos[0] playlist_title = series_name if season_number is not None: - playlist_title += f' - Season {season_number}' + playlist_title += ' - Season %d' % season_number return self.playlist_result(videos, playlist_id=video_json.get('_id', info_slug), playlist_title=playlist_title) diff --git a/yt_dlp/extractor/pornotube.py b/yt_dlp/extractor/pornotube.py index 85c8ca81c..80c9b278d 100644 --- a/yt_dlp/extractor/pornotube.py +++ b/yt_dlp/extractor/pornotube.py @@ -61,7 +61,8 @@ def _real_extract(self, url): primary_image_number = info.get('primaryImageNumber') thumbnail = None if movie_id and primary_image_number: - thumbnail = f'http://pic.aebn.net/dis/t/{movie_id}/{movie_id}_{primary_image_number:08d}.jpg' + thumbnail = 'http://pic.aebn.net/dis/t/%s/%s_%08d.jpg' % ( + movie_id, movie_id, primary_image_number) start = int_or_none(info.get('startSecond')) end = int_or_none(info.get('endSecond')) duration = end - start if start and end else None diff --git a/yt_dlp/extractor/presstv.py b/yt_dlp/extractor/presstv.py index 2808330b6..30eb64b90 100644 --- a/yt_dlp/extractor/presstv.py +++ b/yt_dlp/extractor/presstv.py @@ -52,7 +52,7 @@ def _real_extract(self, url): thumbnail = self._og_search_thumbnail(webpage) description = self._og_search_description(webpage) - upload_date = '{:04d}{:02d}{:02d}'.format( + upload_date = '%04d%02d%02d' % ( int(mobj.group('y')), int(mobj.group('m')), int(mobj.group('d')), diff --git a/yt_dlp/extractor/redbee.py b/yt_dlp/extractor/redbee.py index 457c99bf4..d43bb0bab 100644 --- a/yt_dlp/extractor/redbee.py +++ b/yt_dlp/extractor/redbee.py @@ -320,7 +320,7 @@ def _real_extract(self, url): del f['protocol'] f.update({ 'format_id': m3u8_f['format_id'].replace('hls-', 'http-'), - 'url': re.sub(height_re, f'-{height}p.', http_url), + 'url': re.sub(height_re, '-%dp.' % height, http_url), }) formats.append(f) else: diff --git a/yt_dlp/extractor/rts.py b/yt_dlp/extractor/rts.py index 27cfe70d8..dc1e2d3b4 100644 --- a/yt_dlp/extractor/rts.py +++ b/yt_dlp/extractor/rts.py @@ -204,7 +204,7 @@ def extract_bitrate(url): ext = media.get('ext') or determine_ext(media_url, 'mp4') format_id = ext if rate: - format_id += f'-{rate:.0f}k' + format_id += '-%dk' % rate formats.append({ 'format_id': format_id, 'url': urljoin(download_base, media_url), diff --git a/yt_dlp/extractor/sonyliv.py b/yt_dlp/extractor/sonyliv.py index ea9bf7979..0cd914cbb 100644 --- a/yt_dlp/extractor/sonyliv.py +++ b/yt_dlp/extractor/sonyliv.py @@ -146,7 +146,7 @@ def _real_extract(self, url): self.report_drm(video_id) dash_url = content['videoURL'] headers = { - 'x-playback-session-id': f'{uuid.uuid4().hex}-{int(time.time() * 1000)}', + 'x-playback-session-id': '%s-%d' % (uuid.uuid4().hex, time.time() * 1000), } formats = self._extract_mpd_formats( dash_url, video_id, mpd_id='dash', headers=headers, fatal=False) diff --git a/yt_dlp/extractor/theplatform.py b/yt_dlp/extractor/theplatform.py index 0d810686b..7c1769c2d 100644 --- a/yt_dlp/extractor/theplatform.py +++ b/yt_dlp/extractor/theplatform.py @@ -353,7 +353,7 @@ class ThePlatformFeedIE(ThePlatformBaseIE): def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}, account_id=None): real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query) entry = self._download_json(real_url, video_id)['entries'][0] - main_smil_url = f'http://link.theplatform.com/s/{provider_id}/media/guid/{account_id}/{entry["guid"]}' if account_id else entry.get('plmedia$publicUrl') + main_smil_url = 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % (provider_id, account_id, entry['guid']) if account_id else entry.get('plmedia$publicUrl') formats = [] subtitles = {} diff --git a/yt_dlp/extractor/twitcasting.py b/yt_dlp/extractor/twitcasting.py index 5ac26a9e2..bf9c6348c 100644 --- a/yt_dlp/extractor/twitcasting.py +++ b/yt_dlp/extractor/twitcasting.py @@ -294,7 +294,7 @@ def _entries(self, uploader_id): yield self.url_result(urljoin(base_url, mobj.group('url'))) next_url = self._search_regex( - r'= total: diff --git a/yt_dlp/extractor/voicy.py b/yt_dlp/extractor/voicy.py index f296b422a..f83c3f941 100644 --- a/yt_dlp/extractor/voicy.py +++ b/yt_dlp/extractor/voicy.py @@ -55,7 +55,7 @@ def _call_api(self, url, video_id, **kwargs): if response.get('Status') != 0: message = traverse_obj(response, ('Value', 'Error', 'Message'), expected_type=str) if not message: - message = f'There was a error in the response: {response.get("Status")}' + message = 'There was a error in the response: %d' % response.get('Status') raise ExtractorError(message, expected=False) return response.get('Value') @@ -116,7 +116,7 @@ def _entries(self, channel_id): break yield from playlist_data last = playlist_data[-1] - pager = '&pid={}&p_date={}&play_count={}'.format(last['PlaylistId'], last['Published'], last['PlayCount']) + pager = '&pid=%d&p_date=%s&play_count=%s' % (last['PlaylistId'], last['Published'], last['PlayCount']) def _real_extract(self, url): channel_id = self._match_id(url) @@ -133,7 +133,7 @@ def _real_extract(self, url): articles = itertools.chain([first_article], articles) if first_article else articles playlist = ( - self.url_result(smuggle_url('https://voicy.jp/channel/{}/{}'.format(channel_id, value['PlaylistId']), value), VoicyIE.ie_key()) + self.url_result(smuggle_url('https://voicy.jp/channel/%s/%d' % (channel_id, value['PlaylistId']), value), VoicyIE.ie_key()) for value in articles) return { '_type': 'playlist', diff --git a/yt_dlp/extractor/vtm.py b/yt_dlp/extractor/vtm.py index c9a3686e5..41b41ec17 100644 --- a/yt_dlp/extractor/vtm.py +++ b/yt_dlp/extractor/vtm.py @@ -51,7 +51,7 @@ def _real_extract(self, url): '_type': 'url', 'id': uuid, 'title': video.get('title'), - 'url': f'http://mychannels.video/embed/{video["myChannelsVideo"]}', + 'url': 'http://mychannels.video/embed/%d' % video['myChannelsVideo'], 'description': video.get('description'), 'timestamp': parse_iso8601(video.get('publishedAt')), 'duration': int_or_none(video.get('duration')), diff --git a/yt_dlp/extractor/xboxclips.py b/yt_dlp/extractor/xboxclips.py index 7f3c61509..d726e62af 100644 --- a/yt_dlp/extractor/xboxclips.py +++ b/yt_dlp/extractor/xboxclips.py @@ -43,7 +43,7 @@ def _real_extract(self, url): r'>Recorded: (\d{2})-(Jan|Feb|Mar|Apr|May|Ju[nl]|Aug|Sep|Oct|Nov|Dec)-(\d{4})', webpage) if mobj: - upload_date = f'{mobj.group(3)}{month_by_abbreviation(mobj.group(2)):02d}{mobj.group(1)}' + upload_date = '%s%.2d%s' % (mobj.group(3), month_by_abbreviation(mobj.group(2)), mobj.group(1)) filesize = parse_filesize(self._html_search_regex( r'>Size: ([^<]+)<', webpage, 'file size', fatal=False)) duration = int_or_none(self._html_search_regex( diff --git a/yt_dlp/extractor/xminus.py b/yt_dlp/extractor/xminus.py index e202a7255..af9cf40ab 100644 --- a/yt_dlp/extractor/xminus.py +++ b/yt_dlp/extractor/xminus.py @@ -61,7 +61,7 @@ def _real_extract(self, url): 'encoded data') h = time.time() / 3600 a = sum(map(int, [compat_ord(c) for c in k])) + int(video_id) + h - video_url = f'http://x-minus.me/dl/minus?id={video_id}&tkn2={a}f{h:.0f}' + video_url = 'http://x-minus.me/dl/minus?id=%s&tkn2=%df%d' % (video_id, a, h) return { 'id': video_id, diff --git a/yt_dlp/extractor/xstream.py b/yt_dlp/extractor/xstream.py index 281212ca5..f7b48322c 100644 --- a/yt_dlp/extractor/xstream.py +++ b/yt_dlp/extractor/xstream.py @@ -74,7 +74,7 @@ def _extract_video_info(self, partner_id, video_id): 'app': mobj.group('app'), 'ext': 'flv', 'tbr': tbr, - 'format_id': f'rtmp-{tbr:.0f}', + 'format_id': 'rtmp-%d' % tbr, }) else: formats.append({ diff --git a/yt_dlp/extractor/youku.py b/yt_dlp/extractor/youku.py index f47469a43..3bdfa6c93 100644 --- a/yt_dlp/extractor/youku.py +++ b/yt_dlp/extractor/youku.py @@ -165,7 +165,7 @@ def _real_extract(self, url): raise ExtractorError( 'Youku said: Sorry, this video is private', expected=True) else: - msg = f'Youku server reported error {error.get("code")}' + msg = 'Youku server reported error %i' % error.get('code') if error_note is not None: msg += ': ' + clean_html(error_note) raise ExtractorError(msg) diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index fed218b94..bf1d9e557 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -3083,8 +3083,8 @@ def _print_sig_code(self, func, example_sig): def gen_sig_code(idxs): def _genslice(start, end, step): starts = '' if start == 0 else str(start) - ends = f':{end + step}' if end + step >= 0 else ':' - steps = '' if step == 1 else f':{step}' + ends = (':%d' % (end + step)) if end + step >= 0 else ':' + steps = '' if step == 1 else (':%d' % step) return f's[{starts}{ends}{steps}]' step = None @@ -3102,9 +3102,9 @@ def _genslice(start, end, step): start = prev continue else: - yield f's[{prev}]' + yield 's[%d]' % prev if step is None: - yield f's[{i}]' + yield 's[%d]' % i else: yield _genslice(start, i, step) @@ -3628,7 +3628,7 @@ def extract_thread(contents, entity_payloads): if is_first_continuation: note_prefix = 'Downloading comment section API JSON' else: - note_prefix = ' Downloading comment API JSON reply thread {} {}'.format( + note_prefix = ' Downloading comment API JSON reply thread %d %s' % ( tracker['current_page_thread'], comment_prog_str) else: note_prefix = '{}Downloading comment{} API JSON page {} {}'.format( diff --git a/yt_dlp/postprocessor/ffmpeg.py b/yt_dlp/postprocessor/ffmpeg.py index 9b9eb7c1c..8965806ae 100644 --- a/yt_dlp/postprocessor/ffmpeg.py +++ b/yt_dlp/postprocessor/ffmpeg.py @@ -724,8 +724,8 @@ def ffmpeg_escape(text): metadata_file_content = ';FFMETADATA1\n' for chapter in chapters: metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n' - metadata_file_content += f'START={int(chapter["start_time"] * 1000)}\n' - metadata_file_content += f'END={int(chapter["end_time"] * 1000)}\n' + metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000) + metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000) chapter_title = chapter.get('title') if chapter_title: metadata_file_content += f'title={ffmpeg_escape(chapter_title)}\n' @@ -1042,7 +1042,7 @@ def _ffmpeg_args_for_chapter(self, number, chapter, info): return chapter['filepath'] = destination - self.to_screen(f'Chapter {number:03d}; Destination: {destination}') + self.to_screen('Chapter %03d; Destination: %s' % (number, destination)) return ( destination, ['-ss', str(chapter['start_time']), diff --git a/yt_dlp/utils/_deprecated.py b/yt_dlp/utils/_deprecated.py index b7e093a1b..e4762699b 100644 --- a/yt_dlp/utils/_deprecated.py +++ b/yt_dlp/utils/_deprecated.py @@ -25,7 +25,7 @@ def bytes_to_intlist(bs): def intlist_to_bytes(xs): if not xs: return b'' - return struct.pack('%dB' % len(xs), *xs) # noqa: UP031 + return struct.pack('%dB' % len(xs), *xs) compiled_regex_type = type(re.compile('')) diff --git a/yt_dlp/utils/_utils.py b/yt_dlp/utils/_utils.py index ec0ebb59e..699bf1e7f 100644 --- a/yt_dlp/utils/_utils.py +++ b/yt_dlp/utils/_utils.py @@ -927,12 +927,12 @@ def timetuple_from_msec(msec): def formatSeconds(secs, delim=':', msec=False): time = timetuple_from_msec(secs * 1000) if time.hours: - ret = f'{time.hours:.0f}{delim}{time.minutes:02.0f}{delim}{time.seconds:02.0f}' + ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds) elif time.minutes: - ret = f'{time.minutes:.0f}{delim}{time.seconds:02.0f}' + ret = '%d%s%02d' % (time.minutes, delim, time.seconds) else: - ret = f'{time.seconds:.0f}' - return f'{ret}.{time.milliseconds:03.0f}' if msec else ret + ret = '%d' % time.seconds + return '%s.%03d' % (ret, time.milliseconds) if msec else ret def bug_reports_message(before=';'): @@ -1434,7 +1434,7 @@ def __eq__(self, other): def system_identifier(): python_implementation = platform.python_implementation() if python_implementation == 'PyPy' and hasattr(sys, 'pypy_version_info'): - python_implementation += ' version {}.{}.{}'.format(*sys.pypy_version_info[:3]) + python_implementation += ' version %d.%d.%d' % sys.pypy_version_info[:3] libc_ver = [] with contextlib.suppress(OSError): # We may not have access to the executable libc_ver = platform.libc_ver() @@ -3380,12 +3380,12 @@ def parse_dfxp_time_expr(time_expr): def srt_subtitles_timecode(seconds): - return '{:02.0f}:{:02.0f}:{:02.0f},{:03.0f}'.format(*timetuple_from_msec(seconds * 1000)) + return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000) def ass_subtitles_timecode(seconds): time = timetuple_from_msec(seconds * 1000) - return '{:01.0f}:{:02.0f}:{:02.0f}.{:02.0f}'.format(*time[:-1], time.milliseconds / 10) + return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10) def dfxp2srt(dfxp_data): @@ -3547,10 +3547,11 @@ def parse_node(node): if not dur: continue end_time = begin_time + dur - out.append( - f'{index}\n' - f'{srt_subtitles_timecode(begin_time)} --> {srt_subtitles_timecode(end_time)}\n' - f'{parse_node(para)}\n\n') + out.append('%d\n%s --> %s\n%s\n\n' % ( + index, + srt_subtitles_timecode(begin_time), + srt_subtitles_timecode(end_time), + parse_node(para))) return ''.join(out) @@ -4814,7 +4815,7 @@ def remove_terminal_sequences(string): def number_of_digits(number): - return len(f'{number}') + return len('%d' % number) def join_nonempty(*values, delim='-', from_dict=None): diff --git a/yt_dlp/webvtt.py b/yt_dlp/webvtt.py index a89489eea..9f1a5086b 100644 --- a/yt_dlp/webvtt.py +++ b/yt_dlp/webvtt.py @@ -111,7 +111,7 @@ def _format_ts(ts): Convert an MPEG PES timestamp into a WebVTT timestamp. This will lose sub-millisecond precision. """ - return '{:02.0f}:{:02.0f}:{:02.0f}.{:03.0f}'.format(*timetuple_from_msec(int((ts + 45) // 90))) + return '%02u:%02u:%02u.%03u' % timetuple_from_msec(int((ts + 45) // 90)) class Block: