[cleanup] Upgrade syntax

Using https://github.com/asottile/pyupgrade

1. `__future__` imports and `coding: utf-8` were removed
2. Files were rewritten with `pyupgrade --py36-plus --keep-percent-format`
3. f-strings were cherry-picked from `pyupgrade --py36-plus`

Extractors are left untouched (except removing header) to avoid unnecessary merge conflicts
This commit is contained in:
pukkandan 2022-04-11 20:40:28 +05:30
parent f9934b9614
commit 86e5f3ed2e
No known key found for this signature in database
GPG key ID: 7EEE9E1E817D0A39
1009 changed files with 375 additions and 3224 deletions

View file

@ -178,7 +178,6 @@ ## Adding support for a new site
1. Start with this simple template and save it to `yt_dlp/extractor/yourextractor.py`: 1. Start with this simple template and save it to `yt_dlp/extractor/yourextractor.py`:
```python ```python
# coding: utf-8
from .common import InfoExtractor from .common import InfoExtractor

View file

@ -1,11 +1,9 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
import os import os
from os.path import dirname as dirn from os.path import dirname as dirn
import sys import sys
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
import yt_dlp import yt_dlp
BASH_COMPLETION_FILE = "completions/bash/yt-dlp" BASH_COMPLETION_FILE = "completions/bash/yt-dlp"

View file

@ -1,6 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
""" """
This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
if we are not 'age_limit' tagging some porn site if we are not 'age_limit' tagging some porn site
@ -29,7 +27,7 @@
try: try:
webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read() webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
except Exception: except Exception:
print('\nFail: {0}'.format(test['name'])) print('\nFail: {}'.format(test['name']))
continue continue
webpage = webpage.decode('utf8', 'replace') webpage = webpage.decode('utf8', 'replace')
@ -39,7 +37,7 @@
elif METHOD == 'LIST': elif METHOD == 'LIST':
domain = compat_urllib_parse_urlparse(test['url']).netloc domain = compat_urllib_parse_urlparse(test['url']).netloc
if not domain: if not domain:
print('\nFail: {0}'.format(test['name'])) print('\nFail: {}'.format(test['name']))
continue continue
domain = '.'.join(domain.split('.')[-2:]) domain = '.'.join(domain.split('.')[-2:])
@ -47,11 +45,11 @@
if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict'] if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict']
or test['info_dict']['age_limit'] != 18): or test['info_dict']['age_limit'] != 18):
print('\nPotential missing age_limit check: {0}'.format(test['name'])) print('\nPotential missing age_limit check: {}'.format(test['name']))
elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict'] elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict']
and test['info_dict']['age_limit'] == 18): and test['info_dict']['age_limit'] == 18):
print('\nPotential false negative: {0}'.format(test['name'])) print('\nPotential false negative: {}'.format(test['name']))
else: else:
sys.stdout.write('.') sys.stdout.write('.')

View file

@ -1,12 +1,10 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
import optparse import optparse
import os import os
from os.path import dirname as dirn from os.path import dirname as dirn
import sys import sys
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
import yt_dlp import yt_dlp
from yt_dlp.utils import shell_quote from yt_dlp.utils import shell_quote

View file

@ -1,6 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
import codecs import codecs
import subprocess import subprocess

View file

@ -1,4 +1,3 @@
# coding: utf-8
import re import re
from ..utils import bug_reports_message, write_string from ..utils import bug_reports_message, write_string

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
import io
import optparse import optparse
import re import re
@ -16,7 +13,7 @@ def main():
infile, outfile = args infile, outfile = args
with io.open(infile, encoding='utf-8') as inf: with open(infile, encoding='utf-8') as inf:
readme = inf.read() readme = inf.read()
bug_text = re.search( bug_text = re.search(
@ -26,7 +23,7 @@ def main():
out = bug_text + dev_text out = bug_text + dev_text
with io.open(outfile, 'w', encoding='utf-8') as outf: with open(outfile, 'w', encoding='utf-8') as outf:
outf.write(out) outf.write(out)

View file

@ -1,6 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
import io import io
import optparse import optparse
@ -13,7 +11,7 @@ def main():
infile, outfile = args infile, outfile = args
with io.open(infile, encoding='utf-8') as inf: with open(infile, encoding='utf-8') as inf:
issue_template_tmpl = inf.read() issue_template_tmpl = inf.read()
# Get the version from yt_dlp/version.py without importing the package # Get the version from yt_dlp/version.py without importing the package
@ -22,8 +20,9 @@ def main():
out = issue_template_tmpl % {'version': locals()['__version__']} out = issue_template_tmpl % {'version': locals()['__version__']}
with io.open(outfile, 'w', encoding='utf-8') as outf: with open(outfile, 'w', encoding='utf-8') as outf:
outf.write(out) outf.write(out)
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View file

@ -1,13 +1,10 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals, print_function
from inspect import getsource from inspect import getsource
import io
import os import os
from os.path import dirname as dirn from os.path import dirname as dirn
import sys import sys
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
lazy_extractors_filename = sys.argv[1] if len(sys.argv) > 1 else 'yt_dlp/extractor/lazy_extractors.py' lazy_extractors_filename = sys.argv[1] if len(sys.argv) > 1 else 'yt_dlp/extractor/lazy_extractors.py'
if os.path.exists(lazy_extractors_filename): if os.path.exists(lazy_extractors_filename):
@ -25,7 +22,7 @@
if os.path.exists(plugins_blocked_dirname): if os.path.exists(plugins_blocked_dirname):
os.rename(plugins_blocked_dirname, plugins_dirname) os.rename(plugins_blocked_dirname, plugins_dirname)
with open('devscripts/lazy_load_template.py', 'rt') as f: with open('devscripts/lazy_load_template.py') as f:
module_template = f.read() module_template = f.read()
CLASS_PROPERTIES = ['ie_key', 'working', '_match_valid_url', 'suitable', '_match_id', 'get_temp_id'] CLASS_PROPERTIES = ['ie_key', 'working', '_match_valid_url', 'suitable', '_match_id', 'get_temp_id']
@ -72,7 +69,7 @@ def build_lazy_ie(ie, name):
ordered_cls = [] ordered_cls = []
while classes: while classes:
for c in classes[:]: for c in classes[:]:
bases = set(c.__bases__) - set((object, InfoExtractor, SearchInfoExtractor)) bases = set(c.__bases__) - {object, InfoExtractor, SearchInfoExtractor}
stop = False stop = False
for b in bases: for b in bases:
if b not in classes and b not in ordered_cls: if b not in classes and b not in ordered_cls:
@ -97,9 +94,9 @@ def build_lazy_ie(ie, name):
names.append(name) names.append(name)
module_contents.append( module_contents.append(
'\n_ALL_CLASSES = [{0}]'.format(', '.join(names))) '\n_ALL_CLASSES = [{}]'.format(', '.join(names)))
module_src = '\n'.join(module_contents) + '\n' module_src = '\n'.join(module_contents) + '\n'
with io.open(lazy_extractors_filename, 'wt', encoding='utf-8') as f: with open(lazy_extractors_filename, 'wt', encoding='utf-8') as f:
f.write(module_src) f.write(module_src)

View file

@ -2,10 +2,6 @@
# yt-dlp --help | make_readme.py # yt-dlp --help | make_readme.py
# This must be run in a console of correct width # This must be run in a console of correct width
from __future__ import unicode_literals
import io
import sys import sys
import re import re
@ -15,7 +11,7 @@
if isinstance(helptext, bytes): if isinstance(helptext, bytes):
helptext = helptext.decode('utf-8') helptext = helptext.decode('utf-8')
with io.open(README_FILE, encoding='utf-8') as f: with open(README_FILE, encoding='utf-8') as f:
oldreadme = f.read() oldreadme = f.read()
header = oldreadme[:oldreadme.index('## General Options:')] header = oldreadme[:oldreadme.index('## General Options:')]
@ -25,7 +21,7 @@
options = re.sub(r'(?m)^ (\w.+)$', r'## \1', options) options = re.sub(r'(?m)^ (\w.+)$', r'## \1', options)
options = options + '\n' options = options + '\n'
with io.open(README_FILE, 'w', encoding='utf-8') as f: with open(README_FILE, 'w', encoding='utf-8') as f:
f.write(header) f.write(header)
f.write(options) f.write(options)
f.write(footer) f.write(footer)

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
import io
import optparse import optparse
import os import os
import sys import sys
@ -23,11 +20,11 @@ def main():
def gen_ies_md(ies): def gen_ies_md(ies):
for ie in ies: for ie in ies:
ie_md = '**{0}**'.format(ie.IE_NAME) ie_md = f'**{ie.IE_NAME}**'
if ie.IE_DESC is False: if ie.IE_DESC is False:
continue continue
if ie.IE_DESC is not None: if ie.IE_DESC is not None:
ie_md += ': {0}'.format(ie.IE_DESC) ie_md += f': {ie.IE_DESC}'
search_key = getattr(ie, 'SEARCH_KEY', None) search_key = getattr(ie, 'SEARCH_KEY', None)
if search_key is not None: if search_key is not None:
ie_md += f'; "{ie.SEARCH_KEY}:" prefix' ie_md += f'; "{ie.SEARCH_KEY}:" prefix'
@ -40,7 +37,7 @@ def gen_ies_md(ies):
' - ' + md + '\n' ' - ' + md + '\n'
for md in gen_ies_md(ies)) for md in gen_ies_md(ies))
with io.open(outfile, 'w', encoding='utf-8') as outf: with open(outfile, 'w', encoding='utf-8') as outf:
outf.write(out) outf.write(out)

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
import io
import optparse import optparse
import os.path import os.path
import re import re
@ -32,14 +29,14 @@ def main():
outfile, = args outfile, = args
with io.open(README_FILE, encoding='utf-8') as f: with open(README_FILE, encoding='utf-8') as f:
readme = f.read() readme = f.read()
readme = filter_excluded_sections(readme) readme = filter_excluded_sections(readme)
readme = move_sections(readme) readme = move_sections(readme)
readme = filter_options(readme) readme = filter_options(readme)
with io.open(outfile, 'w', encoding='utf-8') as outf: with open(outfile, 'w', encoding='utf-8') as outf:
outf.write(PREFIX + readme) outf.write(PREFIX + readme)

View file

@ -1,6 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
import json import json
import os import os
import re import re
@ -27,7 +25,7 @@
sha256sum = tarball_file['digests']['sha256'] sha256sum = tarball_file['digests']['sha256']
url = tarball_file['url'] url = tarball_file['url']
with open(filename, 'r') as r: with open(filename) as r:
formulae_text = r.read() formulae_text = r.read()
formulae_text = re.sub(r'sha256 "[0-9a-f]*?"', 'sha256 "%s"' % sha256sum, formulae_text) formulae_text = re.sub(r'sha256 "[0-9a-f]*?"', 'sha256 "%s"' % sha256sum, formulae_text)

View file

@ -4,7 +4,7 @@
import subprocess import subprocess
with open('yt_dlp/version.py', 'rt') as f: with open('yt_dlp/version.py') as f:
exec(compile(f.read(), 'yt_dlp/version.py', 'exec')) exec(compile(f.read(), 'yt_dlp/version.py', 'exec'))
old_version = locals()['__version__'] old_version = locals()['__version__']

View file

@ -1,11 +1,9 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
import os import os
from os.path import dirname as dirn from os.path import dirname as dirn
import sys import sys
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
import yt_dlp import yt_dlp
ZSH_COMPLETION_FILE = "completions/zsh/_yt-dlp" ZSH_COMPLETION_FILE = "completions/zsh/_yt-dlp"

View file

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
import os import os
import platform import platform
import sys import sys

View file

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
import os.path import os.path
import warnings import warnings
import sys import sys

View file

@ -1,7 +1,4 @@
from __future__ import unicode_literals
import errno import errno
import io
import hashlib import hashlib
import json import json
import os.path import os.path
@ -35,10 +32,10 @@ def get_params(override=None):
'parameters.json') 'parameters.json')
LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'local_parameters.json') 'local_parameters.json')
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf: with open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf) parameters = json.load(pf)
if os.path.exists(LOCAL_PARAMETERS_FILE): if os.path.exists(LOCAL_PARAMETERS_FILE):
with io.open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf: with open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf:
parameters.update(json.load(pf)) parameters.update(json.load(pf))
if override: if override:
parameters.update(override) parameters.update(override)
@ -63,7 +60,7 @@ def report_warning(message):
_msg_header = '\033[0;33mWARNING:\033[0m' _msg_header = '\033[0;33mWARNING:\033[0m'
else: else:
_msg_header = 'WARNING:' _msg_header = 'WARNING:'
output = '%s %s\n' % (_msg_header, message) output = f'{_msg_header} {message}\n'
if 'b' in getattr(sys.stderr, 'mode', ''): if 'b' in getattr(sys.stderr, 'mode', ''):
output = output.encode(preferredencoding()) output = output.encode(preferredencoding())
sys.stderr.write(output) sys.stderr.write(output)
@ -74,7 +71,7 @@ def __init__(self, override=None):
# Different instances of the downloader can't share the same dictionary # Different instances of the downloader can't share the same dictionary
# some test set the "sublang" parameter, which would break the md5 checks. # some test set the "sublang" parameter, which would break the md5 checks.
params = get_params(override=override) params = get_params(override=override)
super(FakeYDL, self).__init__(params, auto_init=False) super().__init__(params, auto_init=False)
self.result = [] self.result = []
def to_screen(self, s, skip_eol=None): def to_screen(self, s, skip_eol=None):
@ -99,8 +96,7 @@ def report_warning(self, message):
def gettestcases(include_onlymatching=False): def gettestcases(include_onlymatching=False):
for ie in yt_dlp.extractor.gen_extractors(): for ie in yt_dlp.extractor.gen_extractors():
for tc in ie.get_testcases(include_onlymatching): yield from ie.get_testcases(include_onlymatching)
yield tc
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest() md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
@ -113,33 +109,30 @@ def expect_value(self, got, expected, field):
self.assertTrue( self.assertTrue(
isinstance(got, compat_str), isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % ( f'Expected a {compat_str.__name__} object, but got {type(got).__name__} for field {field}')
compat_str.__name__, type(got).__name__, field))
self.assertTrue( self.assertTrue(
match_rex.match(got), match_rex.match(got),
'field %s (value: %r) should match %r' % (field, got, match_str)) f'field {field} (value: {got!r}) should match {match_str!r}')
elif isinstance(expected, compat_str) and expected.startswith('startswith:'): elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
start_str = expected[len('startswith:'):] start_str = expected[len('startswith:'):]
self.assertTrue( self.assertTrue(
isinstance(got, compat_str), isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % ( f'Expected a {compat_str.__name__} object, but got {type(got).__name__} for field {field}')
compat_str.__name__, type(got).__name__, field))
self.assertTrue( self.assertTrue(
got.startswith(start_str), got.startswith(start_str),
'field %s (value: %r) should start with %r' % (field, got, start_str)) f'field {field} (value: {got!r}) should start with {start_str!r}')
elif isinstance(expected, compat_str) and expected.startswith('contains:'): elif isinstance(expected, compat_str) and expected.startswith('contains:'):
contains_str = expected[len('contains:'):] contains_str = expected[len('contains:'):]
self.assertTrue( self.assertTrue(
isinstance(got, compat_str), isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % ( f'Expected a {compat_str.__name__} object, but got {type(got).__name__} for field {field}')
compat_str.__name__, type(got).__name__, field))
self.assertTrue( self.assertTrue(
contains_str in got, contains_str in got,
'field %s (value: %r) should contain %r' % (field, got, contains_str)) f'field {field} (value: {got!r}) should contain {contains_str!r}')
elif isinstance(expected, type): elif isinstance(expected, type):
self.assertTrue( self.assertTrue(
isinstance(got, expected), isinstance(got, expected),
'Expected type %r for field %s, but got value %r of type %r' % (expected, field, got, type(got))) f'Expected type {expected!r} for field {field}, but got value {got!r} of type {type(got)!r}')
elif isinstance(expected, dict) and isinstance(got, dict): elif isinstance(expected, dict) and isinstance(got, dict):
expect_dict(self, got, expected) expect_dict(self, got, expected)
elif isinstance(expected, list) and isinstance(got, list): elif isinstance(expected, list) and isinstance(got, list):
@ -159,13 +152,12 @@ def expect_value(self, got, expected, field):
if isinstance(expected, compat_str) and expected.startswith('md5:'): if isinstance(expected, compat_str) and expected.startswith('md5:'):
self.assertTrue( self.assertTrue(
isinstance(got, compat_str), isinstance(got, compat_str),
'Expected field %s to be a unicode object, but got value %r of type %r' % (field, got, type(got))) f'Expected field {field} to be a unicode object, but got value {got!r} of type {type(got)!r}')
got = 'md5:' + md5(got) got = 'md5:' + md5(got)
elif isinstance(expected, compat_str) and re.match(r'^(?:min|max)?count:\d+', expected): elif isinstance(expected, compat_str) and re.match(r'^(?:min|max)?count:\d+', expected):
self.assertTrue( self.assertTrue(
isinstance(got, (list, dict)), isinstance(got, (list, dict)),
'Expected field %s to be a list or a dict, but it is of type %s' % ( f'Expected field {field} to be a list or a dict, but it is of type {type(got).__name__}')
field, type(got).__name__))
op, _, expected_num = expected.partition(':') op, _, expected_num = expected.partition(':')
expected_num = int(expected_num) expected_num = int(expected_num)
if op == 'mincount': if op == 'mincount':
@ -185,7 +177,7 @@ def expect_value(self, got, expected, field):
return return
self.assertEqual( self.assertEqual(
expected, got, expected, got,
'Invalid value for field %s, expected %r, got %r' % (field, expected, got)) f'Invalid value for field {field}, expected {expected!r}, got {got!r}')
def expect_dict(self, got_dict, expected_dict): def expect_dict(self, got_dict, expected_dict):
@ -260,13 +252,13 @@ def _repr(v):
info_dict_str = '' info_dict_str = ''
if len(missing_keys) != len(expected_dict): if len(missing_keys) != len(expected_dict):
info_dict_str += ''.join( info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(v)) f' {_repr(k)}: {_repr(v)},\n'
for k, v in test_info_dict.items() if k not in missing_keys) for k, v in test_info_dict.items() if k not in missing_keys)
if info_dict_str: if info_dict_str:
info_dict_str += '\n' info_dict_str += '\n'
info_dict_str += ''.join( info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(test_info_dict[k])) f' {_repr(k)}: {_repr(test_info_dict[k])},\n'
for k in missing_keys) for k in missing_keys)
write_string( write_string(
'\n\'info_dict\': {\n' + info_dict_str + '},\n', out=sys.stderr) '\n\'info_dict\': {\n' + info_dict_str + '},\n', out=sys.stderr)
@ -295,21 +287,21 @@ def assertRegexpMatches(self, text, regexp, msg=None):
def assertGreaterEqual(self, got, expected, msg=None): def assertGreaterEqual(self, got, expected, msg=None):
if not (got >= expected): if not (got >= expected):
if msg is None: if msg is None:
msg = '%r not greater than or equal to %r' % (got, expected) msg = f'{got!r} not greater than or equal to {expected!r}'
self.assertTrue(got >= expected, msg) self.assertTrue(got >= expected, msg)
def assertLessEqual(self, got, expected, msg=None): def assertLessEqual(self, got, expected, msg=None):
if not (got <= expected): if not (got <= expected):
if msg is None: if msg is None:
msg = '%r not less than or equal to %r' % (got, expected) msg = f'{got!r} not less than or equal to {expected!r}'
self.assertTrue(got <= expected, msg) self.assertTrue(got <= expected, msg)
def assertEqual(self, got, expected, msg=None): def assertEqual(self, got, expected, msg=None):
if not (got == expected): if not (got == expected):
if msg is None: if msg is None:
msg = '%r not equal to %r' % (got, expected) msg = f'{got!r} not equal to {expected!r}'
self.assertTrue(got == expected, msg) self.assertTrue(got == expected, msg)

View file

@ -1,9 +1,5 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import io
import os import os
import sys import sys
import unittest import unittest
@ -1011,8 +1007,7 @@ def test_parse_m3u8_formats(self):
] ]
for m3u8_file, m3u8_url, expected_formats, expected_subs in _TEST_CASES: for m3u8_file, m3u8_url, expected_formats, expected_subs in _TEST_CASES:
with io.open('./test/testdata/m3u8/%s.m3u8' % m3u8_file, with open('./test/testdata/m3u8/%s.m3u8' % m3u8_file, encoding='utf-8') as f:
mode='r', encoding='utf-8') as f:
formats, subs = self.ie._parse_m3u8_formats_and_subtitles( formats, subs = self.ie._parse_m3u8_formats_and_subtitles(
f.read(), m3u8_url, ext='mp4') f.read(), m3u8_url, ext='mp4')
self.ie._sort_formats(formats) self.ie._sort_formats(formats)
@ -1357,8 +1352,7 @@ def test_parse_mpd_formats(self):
] ]
for mpd_file, mpd_url, mpd_base_url, expected_formats, expected_subtitles in _TEST_CASES: for mpd_file, mpd_url, mpd_base_url, expected_formats, expected_subtitles in _TEST_CASES:
with io.open('./test/testdata/mpd/%s.mpd' % mpd_file, with open('./test/testdata/mpd/%s.mpd' % mpd_file, encoding='utf-8') as f:
mode='r', encoding='utf-8') as f:
formats, subtitles = self.ie._parse_mpd_formats_and_subtitles( formats, subtitles = self.ie._parse_mpd_formats_and_subtitles(
compat_etree_fromstring(f.read().encode('utf-8')), compat_etree_fromstring(f.read().encode('utf-8')),
mpd_base_url=mpd_base_url, mpd_url=mpd_url) mpd_base_url=mpd_base_url, mpd_url=mpd_url)
@ -1549,8 +1543,7 @@ def test_parse_ism_formats(self):
] ]
for ism_file, ism_url, expected_formats, expected_subtitles in _TEST_CASES: for ism_file, ism_url, expected_formats, expected_subtitles in _TEST_CASES:
with io.open('./test/testdata/ism/%s.Manifest' % ism_file, with open('./test/testdata/ism/%s.Manifest' % ism_file, encoding='utf-8') as f:
mode='r', encoding='utf-8') as f:
formats, subtitles = self.ie._parse_ism_formats_and_subtitles( formats, subtitles = self.ie._parse_ism_formats_and_subtitles(
compat_etree_fromstring(f.read().encode('utf-8')), ism_url=ism_url) compat_etree_fromstring(f.read().encode('utf-8')), ism_url=ism_url)
self.ie._sort_formats(formats) self.ie._sort_formats(formats)
@ -1576,8 +1569,7 @@ def test_parse_f4m_formats(self):
] ]
for f4m_file, f4m_url, expected_formats in _TEST_CASES: for f4m_file, f4m_url, expected_formats in _TEST_CASES:
with io.open('./test/testdata/f4m/%s.f4m' % f4m_file, with open('./test/testdata/f4m/%s.f4m' % f4m_file, encoding='utf-8') as f:
mode='r', encoding='utf-8') as f:
formats = self.ie._parse_f4m_formats( formats = self.ie._parse_f4m_formats(
compat_etree_fromstring(f.read().encode('utf-8')), compat_etree_fromstring(f.read().encode('utf-8')),
f4m_url, None) f4m_url, None)
@ -1624,8 +1616,7 @@ def test_parse_xspf(self):
] ]
for xspf_file, xspf_url, expected_entries in _TEST_CASES: for xspf_file, xspf_url, expected_entries in _TEST_CASES:
with io.open('./test/testdata/xspf/%s.xspf' % xspf_file, with open('./test/testdata/xspf/%s.xspf' % xspf_file, encoding='utf-8') as f:
mode='r', encoding='utf-8') as f:
entries = self.ie._parse_xspf( entries = self.ie._parse_xspf(
compat_etree_fromstring(f.read().encode('utf-8')), compat_etree_fromstring(f.read().encode('utf-8')),
xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url) xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)

View file

@ -1,8 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -25,7 +21,7 @@
class YDL(FakeYDL): class YDL(FakeYDL):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(YDL, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.downloaded_info_dicts = [] self.downloaded_info_dicts = []
self.msgs = [] self.msgs = []
@ -551,11 +547,11 @@ def test_subtitles(self):
def s_formats(lang, autocaption=False): def s_formats(lang, autocaption=False):
return [{ return [{
'ext': ext, 'ext': ext,
'url': 'http://localhost/video.%s.%s' % (lang, ext), 'url': f'http://localhost/video.{lang}.{ext}',
'_auto': autocaption, '_auto': autocaption,
} for ext in ['vtt', 'srt', 'ass']] } for ext in ['vtt', 'srt', 'ass']]
subtitles = dict((l, s_formats(l)) for l in ['en', 'fr', 'es']) subtitles = {l: s_formats(l) for l in ['en', 'fr', 'es']}
auto_captions = dict((l, s_formats(l, True)) for l in ['it', 'pt', 'es']) auto_captions = {l: s_formats(l, True) for l in ['it', 'pt', 'es']}
info_dict = { info_dict = {
'id': 'test', 'id': 'test',
'title': 'Test', 'title': 'Test',
@ -580,7 +576,7 @@ def get_info(params={}):
result = get_info({'writesubtitles': True}) result = get_info({'writesubtitles': True})
subs = result['requested_subtitles'] subs = result['requested_subtitles']
self.assertTrue(subs) self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['en'])) self.assertEqual(set(subs.keys()), {'en'})
self.assertTrue(subs['en'].get('data') is None) self.assertTrue(subs['en'].get('data') is None)
self.assertEqual(subs['en']['ext'], 'ass') self.assertEqual(subs['en']['ext'], 'ass')
@ -591,39 +587,39 @@ def get_info(params={}):
result = get_info({'writesubtitles': True, 'subtitleslangs': ['es', 'fr', 'it']}) result = get_info({'writesubtitles': True, 'subtitleslangs': ['es', 'fr', 'it']})
subs = result['requested_subtitles'] subs = result['requested_subtitles']
self.assertTrue(subs) self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'fr'])) self.assertEqual(set(subs.keys()), {'es', 'fr'})
result = get_info({'writesubtitles': True, 'subtitleslangs': ['all', '-en']}) result = get_info({'writesubtitles': True, 'subtitleslangs': ['all', '-en']})
subs = result['requested_subtitles'] subs = result['requested_subtitles']
self.assertTrue(subs) self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'fr'])) self.assertEqual(set(subs.keys()), {'es', 'fr'})
result = get_info({'writesubtitles': True, 'subtitleslangs': ['en', 'fr', '-en']}) result = get_info({'writesubtitles': True, 'subtitleslangs': ['en', 'fr', '-en']})
subs = result['requested_subtitles'] subs = result['requested_subtitles']
self.assertTrue(subs) self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['fr'])) self.assertEqual(set(subs.keys()), {'fr'})
result = get_info({'writesubtitles': True, 'subtitleslangs': ['-en', 'en']}) result = get_info({'writesubtitles': True, 'subtitleslangs': ['-en', 'en']})
subs = result['requested_subtitles'] subs = result['requested_subtitles']
self.assertTrue(subs) self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['en'])) self.assertEqual(set(subs.keys()), {'en'})
result = get_info({'writesubtitles': True, 'subtitleslangs': ['e.+']}) result = get_info({'writesubtitles': True, 'subtitleslangs': ['e.+']})
subs = result['requested_subtitles'] subs = result['requested_subtitles']
self.assertTrue(subs) self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'en'])) self.assertEqual(set(subs.keys()), {'es', 'en'})
result = get_info({'writesubtitles': True, 'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']}) result = get_info({'writesubtitles': True, 'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
subs = result['requested_subtitles'] subs = result['requested_subtitles']
self.assertTrue(subs) self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'pt'])) self.assertEqual(set(subs.keys()), {'es', 'pt'})
self.assertFalse(subs['es']['_auto']) self.assertFalse(subs['es']['_auto'])
self.assertTrue(subs['pt']['_auto']) self.assertTrue(subs['pt']['_auto'])
result = get_info({'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']}) result = get_info({'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
subs = result['requested_subtitles'] subs = result['requested_subtitles']
self.assertTrue(subs) self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'pt'])) self.assertEqual(set(subs.keys()), {'es', 'pt'})
self.assertTrue(subs['es']['_auto']) self.assertTrue(subs['es']['_auto'])
self.assertTrue(subs['pt']['_auto']) self.assertTrue(subs['pt']['_auto'])
@ -1082,7 +1078,7 @@ def test_ignoreerrors_for_playlist_with_url_transparent_iterable_entries(self):
class _YDL(YDL): class _YDL(YDL):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(_YDL, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
def trouble(self, s, tb=None): def trouble(self, s, tb=None):
pass pass

View file

@ -1,8 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
import os import os
import re import re
import sys import sys

View file

@ -1,6 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys

View file

@ -1,6 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -81,11 +78,11 @@ def test_no_duplicates(self):
url = tc['url'] url = tc['url']
for ie in ies: for ie in ies:
if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'): if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'):
self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url)) self.assertTrue(ie.suitable(url), f'{type(ie).__name__} should match URL {url!r}')
else: else:
self.assertFalse( self.assertFalse(
ie.suitable(url), ie.suitable(url),
'%s should not match URL %r . That URL belongs to %s.' % (type(ie).__name__, url, tc['name'])) f'{type(ie).__name__} should not match URL {url!r} . That URL belongs to {tc["name"]}.')
def test_keywords(self): def test_keywords(self):
self.assertMatch(':ytsubs', ['youtube:subscriptions']) self.assertMatch(':ytsubs', ['youtube:subscriptions'])
@ -120,7 +117,7 @@ def test_no_duplicated_ie_names(self):
for (ie_name, ie_list) in name_accu.items(): for (ie_name, ie_list) in name_accu.items():
self.assertEqual( self.assertEqual(
len(ie_list), 1, len(ie_list), 1,
'Multiple extractors with the same IE_NAME "%s" (%s)' % (ie_name, ', '.join(ie_list))) f'Multiple extractors with the same IE_NAME "{ie_name}" ({", ".join(ie_list)})')
if __name__ == '__main__': if __name__ == '__main__':

View file

@ -1,8 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
import shutil import shutil
# Allow direct execution # Allow direct execution

View file

@ -1,8 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -48,7 +44,7 @@ def test_all_present(self):
all_names = yt_dlp.compat.__all__ all_names = yt_dlp.compat.__all__
present_names = set(filter( present_names = set(filter(
lambda c: '_' in c and not c.startswith('_'), lambda c: '_' in c and not c.startswith('_'),
dir(yt_dlp.compat))) - set(['unicode_literals']) dir(yt_dlp.compat))) - {'unicode_literals'}
self.assertEqual(all_names, sorted(present_names)) self.assertEqual(all_names, sorted(present_names))
def test_compat_urllib_parse_unquote(self): def test_compat_urllib_parse_unquote(self):

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -21,7 +18,6 @@
import hashlib import hashlib
import io
import json import json
import socket import socket
@ -46,7 +42,7 @@ class YoutubeDL(yt_dlp.YoutubeDL):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.to_stderr = self.to_screen self.to_stderr = self.to_screen
self.processed_info_dicts = [] self.processed_info_dicts = []
super(YoutubeDL, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
def report_warning(self, message): def report_warning(self, message):
# Don't accept warnings during tests # Don't accept warnings during tests
@ -54,7 +50,7 @@ def report_warning(self, message):
def process_info(self, info_dict): def process_info(self, info_dict):
self.processed_info_dicts.append(info_dict.copy()) self.processed_info_dicts.append(info_dict.copy())
return super(YoutubeDL, self).process_info(info_dict) return super().process_info(info_dict)
def _file_md5(fn): def _file_md5(fn):
@ -80,7 +76,7 @@ def __str__(self):
def strclass(cls): def strclass(cls):
"""From 2.7's unittest; 2.6 had _strclass so we can't import it.""" """From 2.7's unittest; 2.6 had _strclass so we can't import it."""
return '%s.%s' % (cls.__module__, cls.__name__) return f'{cls.__module__}.{cls.__name__}'
add_ie = getattr(self, self._testMethodName).add_ie add_ie = getattr(self, self._testMethodName).add_ie
return '%s (%s)%s:' % (self._testMethodName, return '%s (%s)%s:' % (self._testMethodName,
@ -179,7 +175,7 @@ def try_rm_tcs_files(tcs=None):
report_warning('%s failed due to network errors, skipping...' % tname) report_warning('%s failed due to network errors, skipping...' % tname)
return return
print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num)) print(f'Retrying: {try_num} failed tries\n\n##########\n\n')
try_num += 1 try_num += 1
else: else:
@ -245,7 +241,7 @@ def try_rm_tcs_files(tcs=None):
self.assertTrue( self.assertTrue(
os.path.exists(info_json_fn), os.path.exists(info_json_fn),
'Missing info file %s' % info_json_fn) 'Missing info file %s' % info_json_fn)
with io.open(info_json_fn, encoding='utf-8') as infof: with open(info_json_fn, encoding='utf-8') as infof:
info_dict = json.load(infof) info_dict = json.load(infof)
expect_info_dict(self, info_dict, tc.get('info_dict', {})) expect_info_dict(self, info_dict, tc.get('info_dict', {}))
finally: finally:

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import re import re
@ -66,7 +63,7 @@ def do_GET(self):
assert False assert False
class FakeLogger(object): class FakeLogger:
def debug(self, msg): def debug(self, msg):
pass pass

View file

@ -1,8 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
import unittest import unittest
import sys import sys
@ -45,7 +41,7 @@ def test_lazy_extractors(self):
finally: finally:
try: try:
os.remove('yt_dlp/extractor/lazy_extractors.py') os.remove('yt_dlp/extractor/lazy_extractors.py')
except (IOError, OSError): except OSError:
pass pass

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -41,7 +38,7 @@ def do_GET(self):
assert False assert False
class FakeLogger(object): class FakeLogger:
def debug(self, msg): def debug(self, msg):
pass pass
@ -117,23 +114,23 @@ def setUp(self):
self.geo_proxy_thread.start() self.geo_proxy_thread.start()
def test_proxy(self): def test_proxy(self):
geo_proxy = '127.0.0.1:{0}'.format(self.geo_port) geo_proxy = f'127.0.0.1:{self.geo_port}'
ydl = YoutubeDL({ ydl = YoutubeDL({
'proxy': '127.0.0.1:{0}'.format(self.port), 'proxy': f'127.0.0.1:{self.port}',
'geo_verification_proxy': geo_proxy, 'geo_verification_proxy': geo_proxy,
}) })
url = 'http://foo.com/bar' url = 'http://foo.com/bar'
response = ydl.urlopen(url).read().decode('utf-8') response = ydl.urlopen(url).read().decode('utf-8')
self.assertEqual(response, 'normal: {0}'.format(url)) self.assertEqual(response, f'normal: {url}')
req = compat_urllib_request.Request(url) req = compat_urllib_request.Request(url)
req.add_header('Ytdl-request-proxy', geo_proxy) req.add_header('Ytdl-request-proxy', geo_proxy)
response = ydl.urlopen(req).read().decode('utf-8') response = ydl.urlopen(req).read().decode('utf-8')
self.assertEqual(response, 'geo: {0}'.format(url)) self.assertEqual(response, f'geo: {url}')
def test_proxy_with_idn(self): def test_proxy_with_idn(self):
ydl = YoutubeDL({ ydl = YoutubeDL({
'proxy': '127.0.0.1:{0}'.format(self.port), 'proxy': f'127.0.0.1:{self.port}',
}) })
url = 'http://中文.tw/' url = 'http://中文.tw/'
response = ydl.urlopen(url).read().decode('utf-8') response = ydl.urlopen(url).read().decode('utf-8')

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -12,7 +9,7 @@
from yt_dlp.extractor import IqiyiIE from yt_dlp.extractor import IqiyiIE
class WarningLogger(object): class WarningLogger:
def __init__(self): def __init__(self):
self.messages = [] self.messages = []

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import os import os
import sys import sys
import unittest import unittest

View file

@ -1,6 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
import os import os
from os.path import join from os.path import join
import subprocess import subprocess

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
import os import os
import sys import sys
import unittest import unittest
@ -14,7 +11,7 @@
class YoutubeDL(yt_dlp.YoutubeDL): class YoutubeDL(yt_dlp.YoutubeDL):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(YoutubeDL, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.to_stderr = self.to_screen self.to_stderr = self.to_screen

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys

View file

@ -1,6 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -54,7 +52,7 @@ def getSubtitles(self):
if sub_info.get('data') is None: if sub_info.get('data') is None:
uf = self.DL.urlopen(sub_info['url']) uf = self.DL.urlopen(sub_info['url'])
sub_info['data'] = uf.read().decode('utf-8') sub_info['data'] = uf.read().decode('utf-8')
return dict((l, sub_info['data']) for l, sub_info in subtitles.items()) return {l: sub_info['data'] for l, sub_info in subtitles.items()}
@is_download_test @is_download_test
@ -163,7 +161,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr'])) self.assertEqual(set(subtitles.keys()), {'de', 'en', 'es', 'fr'})
self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888') self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8') self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8')
@ -186,7 +184,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['heb'])) self.assertEqual(set(subtitles.keys()), {'heb'})
self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920') self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
def test_nosubtitles(self): def test_nosubtitles(self):
@ -208,7 +206,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['cs'])) self.assertEqual(set(subtitles.keys()), {'cs'})
self.assertTrue(len(subtitles['cs']) > 20000) self.assertTrue(len(subtitles['cs']) > 20000)
def test_nosubtitles(self): def test_nosubtitles(self):
@ -229,7 +227,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(set(subtitles.keys()), {'en'})
self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7') self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7')
@ -242,7 +240,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['nl'])) self.assertEqual(set(subtitles.keys()), {'nl'})
self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4') self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
@ -252,13 +250,13 @@ class TestMTVSubtitles(BaseTestSubtitles):
IE = ComedyCentralIE IE = ComedyCentralIE
def getInfoDict(self): def getInfoDict(self):
return super(TestMTVSubtitles, self).getInfoDict()['entries'][0] return super().getInfoDict()['entries'][0]
def test_allsubtitles(self): def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(set(subtitles.keys()), {'en'})
self.assertEqual(md5(subtitles['en']), '78206b8d8a0cfa9da64dc026eea48961') self.assertEqual(md5(subtitles['en']), '78206b8d8a0cfa9da64dc026eea48961')
@ -271,7 +269,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['no'])) self.assertEqual(set(subtitles.keys()), {'no'})
self.assertEqual(md5(subtitles['no']), '544fa917d3197fcbee64634559221cc2') self.assertEqual(md5(subtitles['no']), '544fa917d3197fcbee64634559221cc2')
@ -284,7 +282,7 @@ def test_subtitles_key(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['it'])) self.assertEqual(set(subtitles.keys()), {'it'})
self.assertEqual(md5(subtitles['it']), 'b1d90a98755126b61e667567a1f6680a') self.assertEqual(md5(subtitles['it']), 'b1d90a98755126b61e667567a1f6680a')
def test_subtitles_array_key(self): def test_subtitles_array_key(self):
@ -292,7 +290,7 @@ def test_subtitles_array_key(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['it'])) self.assertEqual(set(subtitles.keys()), {'it'})
self.assertEqual(md5(subtitles['it']), '4b3264186fbb103508abe5311cfcb9cd') self.assertEqual(md5(subtitles['it']), '4b3264186fbb103508abe5311cfcb9cd')
@ -305,7 +303,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(set(subtitles.keys()), {'en'})
self.assertEqual(md5(subtitles['en']), '53cb083a5914b2d84ef1ab67b880d18a') self.assertEqual(md5(subtitles['en']), '53cb083a5914b2d84ef1ab67b880d18a')
@ -320,7 +318,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(set(subtitles.keys()), {'en'})
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b') self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
@ -333,7 +331,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(set(subtitles.keys()), {'en'})
self.assertEqual(md5(subtitles['en']), '48649a22e82b2da21c9a67a395eedade') self.assertEqual(md5(subtitles['en']), '48649a22e82b2da21c9a67a395eedade')
@ -348,7 +346,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['es'])) self.assertEqual(set(subtitles.keys()), {'es'})
self.assertEqual(md5(subtitles['es']), '69e70cae2d40574fb7316f31d6eb7fca') self.assertEqual(md5(subtitles['es']), '69e70cae2d40574fb7316f31d6eb7fca')
@ -361,7 +359,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(set(subtitles.keys()), {'en'})
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c') self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
def test_subtitles_in_page(self): def test_subtitles_in_page(self):
@ -369,7 +367,7 @@ def test_subtitles_in_page(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(set(subtitles.keys()), {'en'})
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c') self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
@ -382,7 +380,7 @@ def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(set(subtitles.keys()), {'en'})
def test_subtitles_dfxp_format(self): def test_subtitles_dfxp_format(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys

View file

@ -1,8 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -266,7 +262,7 @@ def test_extract_basic_auth(self):
def test_expand_path(self): def test_expand_path(self):
def env(var): def env(var):
return '%{0}%'.format(var) if sys.platform == 'win32' else '${0}'.format(var) return f'%{var}%' if sys.platform == 'win32' else f'${var}'
compat_setenv('yt_dlp_EXPATH_PATH', 'expanded') compat_setenv('yt_dlp_EXPATH_PATH', 'expanded')
self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded') self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded')
@ -666,8 +662,7 @@ def testPL(size, pagesize, sliceargs, expected):
def get_page(pagenum): def get_page(pagenum):
firstid = pagenum * pagesize firstid = pagenum * pagesize
upto = min(size, pagenum * pagesize + pagesize) upto = min(size, pagenum * pagesize + pagesize)
for i in range(firstid, upto): yield from range(firstid, upto)
yield i
pl = OnDemandPagedList(get_page, pagesize) pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs) got = pl.getslice(*sliceargs)
@ -736,7 +731,7 @@ def test_multipart_encode(self):
multipart_encode({b'field': b'value'}, boundary='AAAAAA')[0], multipart_encode({b'field': b'value'}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="field"\r\n\r\nvalue\r\n--AAAAAA--\r\n') b'--AAAAAA\r\nContent-Disposition: form-data; name="field"\r\n\r\nvalue\r\n--AAAAAA--\r\n')
self.assertEqual( self.assertEqual(
multipart_encode({'欄位'.encode('utf-8'): ''.encode('utf-8')}, boundary='AAAAAA')[0], multipart_encode({'欄位'.encode(): ''.encode()}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="\xe6\xac\x84\xe4\xbd\x8d"\r\n\r\n\xe5\x80\xbc\r\n--AAAAAA--\r\n') b'--AAAAAA\r\nContent-Disposition: form-data; name="\xe6\xac\x84\xe4\xbd\x8d"\r\n\r\n\xe5\x80\xbc\r\n--AAAAAA--\r\n')
self.assertRaises( self.assertRaises(
ValueError, multipart_encode, {b'field': b'value'}, boundary='value') ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
@ -1397,7 +1392,7 @@ def test_dfxp2srt(self):
<p begin="3" dur="-1">Ignored, three</p> <p begin="3" dur="-1">Ignored, three</p>
</div> </div>
</body> </body>
</tt>'''.encode('utf-8') </tt>'''.encode()
srt_data = '''1 srt_data = '''1
00:00:00,000 --> 00:00:01,000 00:00:00,000 --> 00:00:01,000
The following line contains Chinese characters and special symbols The following line contains Chinese characters and special symbols
@ -1415,14 +1410,14 @@ def test_dfxp2srt(self):
''' '''
self.assertEqual(dfxp2srt(dfxp_data), srt_data) self.assertEqual(dfxp2srt(dfxp_data), srt_data)
dfxp_data_no_default_namespace = '''<?xml version="1.0" encoding="UTF-8"?> dfxp_data_no_default_namespace = b'''<?xml version="1.0" encoding="UTF-8"?>
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter"> <tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body> <body>
<div xml:lang="en"> <div xml:lang="en">
<p begin="0" end="1">The first line</p> <p begin="0" end="1">The first line</p>
</div> </div>
</body> </body>
</tt>'''.encode('utf-8') </tt>'''
srt_data = '''1 srt_data = '''1
00:00:00,000 --> 00:00:01,000 00:00:00,000 --> 00:00:01,000
The first line The first line
@ -1430,7 +1425,7 @@ def test_dfxp2srt(self):
''' '''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data) self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
dfxp_data_with_style = '''<?xml version="1.0" encoding="utf-8"?> dfxp_data_with_style = b'''<?xml version="1.0" encoding="utf-8"?>
<tt xmlns="http://www.w3.org/2006/10/ttaf1" xmlns:ttp="http://www.w3.org/2006/10/ttaf1#parameter" ttp:timeBase="media" xmlns:tts="http://www.w3.org/2006/10/ttaf1#style" xml:lang="en" xmlns:ttm="http://www.w3.org/2006/10/ttaf1#metadata"> <tt xmlns="http://www.w3.org/2006/10/ttaf1" xmlns:ttp="http://www.w3.org/2006/10/ttaf1#parameter" ttp:timeBase="media" xmlns:tts="http://www.w3.org/2006/10/ttaf1#style" xml:lang="en" xmlns:ttm="http://www.w3.org/2006/10/ttaf1#metadata">
<head> <head>
<styling> <styling>
@ -1448,7 +1443,7 @@ def test_dfxp2srt(self):
<p style="s1" tts:textDecoration="underline" begin="00:00:09.56" id="p2" end="00:00:12.36"><span style="s2" tts:color="lime">inner<br /> </span>style</p> <p style="s1" tts:textDecoration="underline" begin="00:00:09.56" id="p2" end="00:00:12.36"><span style="s2" tts:color="lime">inner<br /> </span>style</p>
</div> </div>
</body> </body>
</tt>'''.encode('utf-8') </tt>'''
srt_data = '''1 srt_data = '''1
00:00:02,080 --> 00:00:05,840 00:00:02,080 --> 00:00:05,840
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font> <font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>

View file

@ -1,8 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
import unittest import unittest
import sys import sys

View file

@ -1,7 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -21,7 +18,7 @@ import yt_dlp.extractor
class YoutubeDL(yt_dlp.YoutubeDL): class YoutubeDL(yt_dlp.YoutubeDL):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(YoutubeDL, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.to_stderr = self.to_screen self.to_stderr = self.to_screen
@ -52,7 +49,7 @@ class TestAnnotations(unittest.TestCase):
ydl.download([TEST_ID]) ydl.download([TEST_ID])
self.assertTrue(os.path.exists(ANNOTATIONS_FILE)) self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
annoxml = None annoxml = None
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof: with open(ANNOTATIONS_FILE, encoding='utf-8') as annof:
annoxml = xml.etree.ElementTree.parse(annof) annoxml = xml.etree.ElementTree.parse(annof)
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML') self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
root = annoxml.getroot() root = annoxml.getroot()

View file

@ -1,6 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys

View file

@ -1,6 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys

View file

@ -1,14 +1,10 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import re import re
import string import string
import urllib.request import urllib.request
@ -149,7 +145,7 @@ def test_func(self):
if not os.path.exists(fn): if not os.path.exists(fn):
urllib.request.urlretrieve(url, fn) urllib.request.urlretrieve(url, fn)
with io.open(fn, encoding='utf-8') as testf: with open(fn, encoding='utf-8') as testf:
jscode = testf.read() jscode = testf.read()
self.assertEqual(sig_func(jscode, sig_input), expected_sig) self.assertEqual(sig_func(jscode, sig_input), expected_sig)

View file

@ -1,8 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import collections import collections
import contextlib import contextlib
import datetime import datetime
@ -165,7 +161,7 @@
import ctypes import ctypes
class YoutubeDL(object): class YoutubeDL:
"""YoutubeDL class. """YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the YoutubeDL objects are the ones responsible of downloading the
@ -501,7 +497,7 @@ class YoutubeDL(object):
care about HLS. (only for youtube) care about HLS. (only for youtube)
""" """
_NUMERIC_FIELDS = set(( _NUMERIC_FIELDS = {
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx', 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'timestamp', 'release_timestamp', 'timestamp', 'release_timestamp',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
@ -509,7 +505,7 @@ class YoutubeDL(object):
'start_time', 'end_time', 'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number', 'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year', 'track_number', 'disc_number', 'release_year',
)) }
_format_fields = { _format_fields = {
# NB: Keep in sync with the docstring of extractor/common.py # NB: Keep in sync with the docstring of extractor/common.py
@ -576,7 +572,7 @@ def __init__(self, params=None, auto_init=True):
def check_deprecated(param, option, suggestion): def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None: if self.params.get(param) is not None:
self.report_warning('%s is deprecated. Use %s instead' % (option, suggestion)) self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
return True return True
return False return False
@ -693,7 +689,7 @@ def preload_download_archive(fn):
with locked_file(fn, 'r', encoding='utf-8') as archive_file: with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file: for line in archive_file:
self.archive.add(line.strip()) self.archive.add(line.strip())
except IOError as ioe: except OSError as ioe:
if ioe.errno != errno.ENOENT: if ioe.errno != errno.ENOENT:
raise raise
return False return False
@ -990,11 +986,9 @@ def parse_outtmpl(self):
outtmpl_dict.update({ outtmpl_dict.update({
k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items()
if outtmpl_dict.get(k) is None}) if outtmpl_dict.get(k) is None})
for key, val in outtmpl_dict.items(): for _, val in outtmpl_dict.items():
if isinstance(val, bytes): if isinstance(val, bytes):
self.report_warning( self.report_warning('Parameter outtmpl is bytes, but should be a unicode string')
'Parameter outtmpl is bytes, but should be a unicode string. '
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
return outtmpl_dict return outtmpl_dict
def get_output_path(self, dir_type='', filename=None): def get_output_path(self, dir_type='', filename=None):
@ -1013,7 +1007,7 @@ def _outtmpl_expandpath(outtmpl):
# '%%' intact for template dict substitution step. Working around # '%%' intact for template dict substitution step. Working around
# with boundary-alike separator hack. # with boundary-alike separator hack.
sep = ''.join([random.choice(ascii_letters) for _ in range(32)]) sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep)) outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
# outtmpl should be expand_path'ed before template dict substitution # outtmpl should be expand_path'ed before template dict substitution
# because meta fields may contain env variables we don't want to # because meta fields may contain env variables we don't want to
@ -1173,7 +1167,7 @@ def create_key(outer_mobj):
fmt = outer_mobj.group('format') fmt = outer_mobj.group('format')
if fmt == 's' and value is not None and key in field_size_compat_map.keys(): if fmt == 's' and value is not None and key in field_size_compat_map.keys():
fmt = '0{:d}d'.format(field_size_compat_map[key]) fmt = f'0{field_size_compat_map[key]:d}d'
value = default if value is None else value if replacement is None else replacement value = default if value is None else value if replacement is None else replacement
@ -1188,7 +1182,7 @@ def create_key(outer_mobj):
value = map(str, variadic(value) if '#' in flags else [value]) value = map(str, variadic(value) if '#' in flags else [value])
value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
elif fmt[-1] == 'B': # bytes elif fmt[-1] == 'B': # bytes
value = f'%{str_fmt}'.encode('utf-8') % str(value).encode('utf-8') value = f'%{str_fmt}'.encode() % str(value).encode('utf-8')
value, fmt = value.decode('utf-8', 'ignore'), 's' value, fmt = value.decode('utf-8', 'ignore'), 's'
elif fmt[-1] == 'U': # unicode normalized elif fmt[-1] == 'U': # unicode normalized
value, fmt = unicodedata.normalize( value, fmt = unicodedata.normalize(
@ -1301,7 +1295,7 @@ def check_filter():
if date is not None: if date is not None:
dateRange = self.params.get('daterange', DateRange()) dateRange = self.params.get('daterange', DateRange())
if date not in dateRange: if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
view_count = info_dict.get('view_count') view_count = info_dict.get('view_count')
if view_count is not None: if view_count is not None:
min_views = self.params.get('min_views') min_views = self.params.get('min_views')
@ -1765,14 +1759,14 @@ def get_entry(i):
x_forwarded_for = ie_result.get('__x_forwarded_for_ip') x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries)) self.to_screen(f'[{ie_result["extractor"]}] playlist {playlist}: {msg % n_entries}')
failures = 0 failures = 0
max_failures = self.params.get('skip_playlist_after_errors') or float('inf') max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
for i, entry_tuple in enumerate(entries, 1): for i, entry_tuple in enumerate(entries, 1):
playlist_index, entry = entry_tuple playlist_index, entry = entry_tuple
if 'playlist-index' in self.params.get('compat_opts', []): if 'playlist-index' in self.params.get('compat_opts', []):
playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1 playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) self.to_screen(f'[download] Downloading video {i} of {n_entries}')
# This __x_forwarded_for_ip thing is a bit ugly but requires # This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes # minimal changes
if x_forwarded_for: if x_forwarded_for:
@ -1940,7 +1934,7 @@ def build_format_selector(self, format_spec):
def syntax_error(note, start): def syntax_error(note, start):
message = ( message = (
'Invalid format specification: ' 'Invalid format specification: '
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
return SyntaxError(message) return SyntaxError(message)
PICKFIRST = 'PICKFIRST' PICKFIRST = 'PICKFIRST'
@ -2044,7 +2038,7 @@ def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, ins
raise syntax_error('Expected a selector', start) raise syntax_error('Expected a selector', start)
current_selector = FormatSelector(MERGE, (selector_1, selector_2), []) current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
else: else:
raise syntax_error('Operator not recognized: "{0}"'.format(string), start) raise syntax_error(f'Operator not recognized: "{string}"', start)
elif type == tokenize.ENDMARKER: elif type == tokenize.ENDMARKER:
break break
if current_selector: if current_selector:
@ -2244,7 +2238,7 @@ def final_selector(ctx):
except tokenize.TokenError: except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
class TokenIterator(object): class TokenIterator:
def __init__(self, tokens): def __init__(self, tokens):
self.tokens = tokens self.tokens = tokens
self.counter = 0 self.counter = 0
@ -2644,7 +2638,7 @@ def is_wellformed(f):
if max_downloads_reached: if max_downloads_reached:
break break
write_archive = set(f.get('__write_download_archive', False) for f in formats_to_download) write_archive = {f.get('__write_download_archive', False) for f in formats_to_download}
assert write_archive.issubset({True, False, 'ignore'}) assert write_archive.issubset({True, False, 'ignore'})
if True in write_archive and False not in write_archive: if True in write_archive and False not in write_archive:
self.record_download_archive(info_dict) self.record_download_archive(info_dict)
@ -2712,7 +2706,7 @@ def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
for lang in requested_langs: for lang in requested_langs:
formats = available_subs.get(lang) formats = available_subs.get(lang)
if formats is None: if formats is None:
self.report_warning('%s subtitles not available for %s' % (lang, video_id)) self.report_warning(f'{lang} subtitles not available for {video_id}')
continue continue
for ext in formats_preference: for ext in formats_preference:
if ext == 'best': if ext == 'best':
@ -2755,7 +2749,7 @@ def format_tmpl(tmpl):
tmpl = format_tmpl(tmpl) tmpl = format_tmpl(tmpl)
self.to_screen(f'[info] Writing {tmpl!r} to: {filename}') self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
if self._ensure_dir_exists(filename): if self._ensure_dir_exists(filename):
with io.open(filename, 'a', encoding='utf-8') as f: with open(filename, 'a', encoding='utf-8') as f:
f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n') f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
def __forced_printings(self, info_dict, filename, incomplete): def __forced_printings(self, info_dict, filename, incomplete):
@ -2920,11 +2914,11 @@ def process_info(self, info_dict):
else: else:
try: try:
self.to_screen('[info] Writing video annotations to: ' + annofn) self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations']) annofile.write(info_dict['annotations'])
except (KeyError, TypeError): except (KeyError, TypeError):
self.report_warning('There are no annotations to write.') self.report_warning('There are no annotations to write.')
except (OSError, IOError): except OSError:
self.report_error('Cannot write annotations file: ' + annofn) self.report_error('Cannot write annotations file: ' + annofn)
return return
@ -2943,13 +2937,13 @@ def _write_link_file(link_type):
return True return True
try: try:
self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}') self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
newline='\r\n' if link_type == 'url' else '\n') as linkfile: newline='\r\n' if link_type == 'url' else '\n') as linkfile:
template_vars = {'url': url} template_vars = {'url': url}
if link_type == 'desktop': if link_type == 'desktop':
template_vars['filename'] = linkfn[:-(len(link_type) + 1)] template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
linkfile.write(LINK_TEMPLATES[link_type] % template_vars) linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
except (OSError, IOError): except OSError:
self.report_error(f'Cannot write internet shortcut {linkfn}') self.report_error(f'Cannot write internet shortcut {linkfn}')
return False return False
return True return True
@ -3014,10 +3008,10 @@ def compatible_formats(formats):
return False return False
# Check extension # Check extension
exts = set(format.get('ext') for format in formats) exts = {format.get('ext') for format in formats}
COMPATIBLE_EXTS = ( COMPATIBLE_EXTS = (
set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')), {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'},
set(('webm',)), {'webm'},
) )
for ext_sets in COMPATIBLE_EXTS: for ext_sets in COMPATIBLE_EXTS:
if ext_sets.issuperset(exts): if ext_sets.issuperset(exts):
@ -3050,7 +3044,7 @@ def correct_ext(filename, ext=new_ext):
os.path.splitext(filename)[0] os.path.splitext(filename)[0]
if filename_real_ext in (old_ext, new_ext) if filename_real_ext in (old_ext, new_ext)
else filename) else filename)
return '%s.%s' % (filename_wo_ext, ext) return f'{filename_wo_ext}.{ext}'
# Ensure filename always has a correct extension for successful merge # Ensure filename always has a correct extension for successful merge
full_filename = correct_ext(full_filename) full_filename = correct_ext(full_filename)
@ -3135,10 +3129,10 @@ def correct_ext(filename, ext=new_ext):
except network_exceptions as err: except network_exceptions as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err)) self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return return
except (OSError, IOError) as err: except OSError as err:
raise UnavailableVideoError(err) raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err: except (ContentTooShortError, ) as err:
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
return return
if success and full_filename != '-': if success and full_filename != '-':
@ -3343,7 +3337,7 @@ def run_pp(self, pp, infodict):
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
try: try:
os.remove(encodeFilename(old_filename)) os.remove(encodeFilename(old_filename))
except (IOError, OSError): except OSError:
self.report_warning('Unable to remove downloaded original file') self.report_warning('Unable to remove downloaded original file')
if old_filename in infodict['__files_to_move']: if old_filename in infodict['__files_to_move']:
del infodict['__files_to_move'][old_filename] del infodict['__files_to_move'][old_filename]
@ -3388,7 +3382,7 @@ def _make_archive_id(self, info_dict):
break break
else: else:
return return
return '%s %s' % (extractor.lower(), video_id) return f'{extractor.lower()} {video_id}'
def in_download_archive(self, info_dict): def in_download_archive(self, info_dict):
fn = self.params.get('download_archive') fn = self.params.get('download_archive')
@ -3791,7 +3785,7 @@ def _write_info_json(self, label, ie_result, infofn, overwrite=None):
try: try:
write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn) write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
return True return True
except (OSError, IOError): except OSError:
self.report_error(f'Cannot write {label} metadata to JSON file {infofn}') self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
return None return None
@ -3812,9 +3806,9 @@ def _write_description(self, label, ie_result, descfn):
else: else:
try: try:
self.to_screen(f'[info] Writing {label} description to: {descfn}') self.to_screen(f'[info] Writing {label} description to: {descfn}')
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(ie_result['description']) descfile.write(ie_result['description'])
except (OSError, IOError): except OSError:
self.report_error(f'Cannot write {label} description file {descfn}') self.report_error(f'Cannot write {label} description file {descfn}')
return None return None
return True return True
@ -3848,12 +3842,12 @@ def _write_subtitles(self, info_dict, filename):
try: try:
# Use newline='' to prevent conversion of newline characters # Use newline='' to prevent conversion of newline characters
# See https://github.com/ytdl-org/youtube-dl/issues/10268 # See https://github.com/ytdl-org/youtube-dl/issues/10268
with io.open(sub_filename, 'w', encoding='utf-8', newline='') as subfile: with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data']) subfile.write(sub_info['data'])
sub_info['filepath'] = sub_filename sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final)) ret.append((sub_filename, sub_filename_final))
continue continue
except (OSError, IOError): except OSError:
self.report_error(f'Cannot write video subtitles file {sub_filename}') self.report_error(f'Cannot write video subtitles file {sub_filename}')
return None return None

View file

@ -1,11 +1,8 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# coding: utf-8
f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541 f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541
__license__ = 'Public Domain' __license__ = 'Public Domain'
import io
import itertools import itertools
import os import os
import random import random
@ -67,13 +64,12 @@ def get_urls(urls, batchfile, verbose):
'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D')) 'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D'))
batchfd = sys.stdin batchfd = sys.stdin
else: else:
batchfd = io.open( batchfd = open(
expand_path(batchfile), expand_path(batchfile), encoding='utf-8', errors='ignore')
'r', encoding='utf-8', errors='ignore')
batch_urls = read_batch_urls(batchfd) batch_urls = read_batch_urls(batchfd)
if verbose: if verbose:
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n') write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
except IOError: except OSError:
sys.exit('ERROR: batch file %s could not be read' % batchfile) sys.exit('ERROR: batch file %s could not be read' % batchfile)
_enc = preferredencoding() _enc = preferredencoding()
return [ return [

View file

@ -1,6 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import unicode_literals
# Execute with # Execute with
# $ python -m yt_dlp # $ python -m yt_dlp

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
from math import ceil from math import ceil
from .compat import ( from .compat import (

View file

@ -1,7 +1,4 @@
from __future__ import unicode_literals
import errno import errno
import io
import json import json
import os import os
import re import re
@ -15,7 +12,7 @@
) )
class Cache(object): class Cache:
def __init__(self, ydl): def __init__(self, ydl):
self._ydl = ydl self._ydl = ydl
@ -31,7 +28,7 @@ def _get_cache_fn(self, section, key, dtype):
'invalid section %r' % section 'invalid section %r' % section
assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key
return os.path.join( return os.path.join(
self._get_root_dir(), section, '%s.%s' % (key, dtype)) self._get_root_dir(), section, f'{key}.{dtype}')
@property @property
def enabled(self): def enabled(self):
@ -54,8 +51,7 @@ def store(self, section, key, data, dtype='json'):
write_json_file(data, fn) write_json_file(data, fn)
except Exception: except Exception:
tb = traceback.format_exc() tb = traceback.format_exc()
self._ydl.report_warning( self._ydl.report_warning(f'Writing cache to {fn!r} failed: {tb}')
'Writing cache to %r failed: %s' % (fn, tb))
def load(self, section, key, dtype='json', default=None): def load(self, section, key, dtype='json', default=None):
assert dtype in ('json',) assert dtype in ('json',)
@ -66,17 +62,16 @@ def load(self, section, key, dtype='json', default=None):
cache_fn = self._get_cache_fn(section, key, dtype) cache_fn = self._get_cache_fn(section, key, dtype)
try: try:
try: try:
with io.open(cache_fn, 'r', encoding='utf-8') as cachef: with open(cache_fn, encoding='utf-8') as cachef:
self._ydl.write_debug(f'Loading {section}.{key} from cache') self._ydl.write_debug(f'Loading {section}.{key} from cache')
return json.load(cachef) return json.load(cachef)
except ValueError: except ValueError:
try: try:
file_size = os.path.getsize(cache_fn) file_size = os.path.getsize(cache_fn)
except (OSError, IOError) as oe: except OSError as oe:
file_size = str(oe) file_size = str(oe)
self._ydl.report_warning( self._ydl.report_warning(f'Cache retrieval from {cache_fn} failed ({file_size})')
'Cache retrieval from %s failed (%s)' % (cache_fn, file_size)) except OSError:
except IOError:
pass # No cache available pass # No cache available
return default return default

View file

@ -1,5 +1,3 @@
# coding: utf-8
import asyncio import asyncio
import base64 import base64
import collections import collections

View file

@ -125,7 +125,7 @@ def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(),
elif browser_name in CHROMIUM_BASED_BROWSERS: elif browser_name in CHROMIUM_BASED_BROWSERS:
return _extract_chrome_cookies(browser_name, profile, keyring, logger) return _extract_chrome_cookies(browser_name, profile, keyring, logger)
else: else:
raise ValueError('unknown browser: {}'.format(browser_name)) raise ValueError(f'unknown browser: {browser_name}')
def _extract_firefox_cookies(profile, logger): def _extract_firefox_cookies(profile, logger):
@ -144,8 +144,8 @@ def _extract_firefox_cookies(profile, logger):
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger) cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger)
if cookie_database_path is None: if cookie_database_path is None:
raise FileNotFoundError('could not find firefox cookies database in {}'.format(search_root)) raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path)) logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir: with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None cursor = None
@ -164,7 +164,7 @@ def _extract_firefox_cookies(profile, logger):
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False, path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
comment=None, comment_url=None, rest={}) comment=None, comment_url=None, rest={})
jar.set_cookie(cookie) jar.set_cookie(cookie)
logger.info('Extracted {} cookies from firefox'.format(len(jar))) logger.info(f'Extracted {len(jar)} cookies from firefox')
return jar return jar
finally: finally:
if cursor is not None: if cursor is not None:
@ -179,7 +179,7 @@ def _firefox_browser_dir():
elif sys.platform == 'darwin': elif sys.platform == 'darwin':
return os.path.expanduser('~/Library/Application Support/Firefox') return os.path.expanduser('~/Library/Application Support/Firefox')
else: else:
raise ValueError('unsupported platform: {}'.format(sys.platform)) raise ValueError(f'unsupported platform: {sys.platform}')
def _get_chromium_based_browser_settings(browser_name): def _get_chromium_based_browser_settings(browser_name):
@ -219,7 +219,7 @@ def _get_chromium_based_browser_settings(browser_name):
}[browser_name] }[browser_name]
else: else:
raise ValueError('unsupported platform: {}'.format(sys.platform)) raise ValueError(f'unsupported platform: {sys.platform}')
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE: # Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return" # dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
@ -242,7 +242,7 @@ def _get_chromium_based_browser_settings(browser_name):
def _extract_chrome_cookies(browser_name, profile, keyring, logger): def _extract_chrome_cookies(browser_name, profile, keyring, logger):
logger.info('Extracting cookies from {}'.format(browser_name)) logger.info(f'Extracting cookies from {browser_name}')
if not SQLITE_AVAILABLE: if not SQLITE_AVAILABLE:
logger.warning(('Cannot extract cookies from {} without sqlite3 support. ' logger.warning(('Cannot extract cookies from {} without sqlite3 support. '
@ -260,13 +260,13 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
if config['supports_profiles']: if config['supports_profiles']:
search_root = os.path.join(config['browser_dir'], profile) search_root = os.path.join(config['browser_dir'], profile)
else: else:
logger.error('{} does not support profiles'.format(browser_name)) logger.error(f'{browser_name} does not support profiles')
search_root = config['browser_dir'] search_root = config['browser_dir']
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger) cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger)
if cookie_database_path is None: if cookie_database_path is None:
raise FileNotFoundError('could not find {} cookies database in "{}"'.format(browser_name, search_root)) raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"')
logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path)) logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring) decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring)
@ -295,13 +295,13 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
unencrypted_cookies += 1 unencrypted_cookies += 1
jar.set_cookie(cookie) jar.set_cookie(cookie)
if failed_cookies > 0: if failed_cookies > 0:
failed_message = ' ({} could not be decrypted)'.format(failed_cookies) failed_message = f' ({failed_cookies} could not be decrypted)'
else: else:
failed_message = '' failed_message = ''
logger.info('Extracted {} cookies from {}{}'.format(len(jar), browser_name, failed_message)) logger.info(f'Extracted {len(jar)} cookies from {browser_name}{failed_message}')
counts = decryptor.cookie_counts.copy() counts = decryptor.cookie_counts.copy()
counts['unencrypted'] = unencrypted_cookies counts['unencrypted'] = unencrypted_cookies
logger.debug('cookie version breakdown: {}'.format(counts)) logger.debug(f'cookie version breakdown: {counts}')
return jar return jar
finally: finally:
if cursor is not None: if cursor is not None:
@ -492,7 +492,7 @@ def _extract_safari_cookies(profile, logger):
if profile is not None: if profile is not None:
logger.error('safari does not support profiles') logger.error('safari does not support profiles')
if sys.platform != 'darwin': if sys.platform != 'darwin':
raise ValueError('unsupported platform: {}'.format(sys.platform)) raise ValueError(f'unsupported platform: {sys.platform}')
cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies') cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies')
@ -506,7 +506,7 @@ def _extract_safari_cookies(profile, logger):
cookies_data = f.read() cookies_data = f.read()
jar = parse_safari_cookies(cookies_data, logger=logger) jar = parse_safari_cookies(cookies_data, logger=logger)
logger.info('Extracted {} cookies from safari'.format(len(jar))) logger.info(f'Extracted {len(jar)} cookies from safari')
return jar return jar
@ -522,7 +522,7 @@ def __init__(self, data, logger):
def read_bytes(self, num_bytes): def read_bytes(self, num_bytes):
if num_bytes < 0: if num_bytes < 0:
raise ParserError('invalid read of {} bytes'.format(num_bytes)) raise ParserError(f'invalid read of {num_bytes} bytes')
end = self.cursor + num_bytes end = self.cursor + num_bytes
if end > len(self._data): if end > len(self._data):
raise ParserError('reached end of input') raise ParserError('reached end of input')
@ -533,7 +533,7 @@ def read_bytes(self, num_bytes):
def expect_bytes(self, expected_value, message): def expect_bytes(self, expected_value, message):
value = self.read_bytes(len(expected_value)) value = self.read_bytes(len(expected_value))
if value != expected_value: if value != expected_value:
raise ParserError('unexpected value: {} != {} ({})'.format(value, expected_value, message)) raise ParserError(f'unexpected value: {value} != {expected_value} ({message})')
def read_uint(self, big_endian=False): def read_uint(self, big_endian=False):
data_format = '>I' if big_endian else '<I' data_format = '>I' if big_endian else '<I'
@ -557,7 +557,7 @@ def skip(self, num_bytes, description='unknown'):
self._logger.debug('skipping {} bytes ({}): {}'.format( self._logger.debug('skipping {} bytes ({}): {}'.format(
num_bytes, description, self.read_bytes(num_bytes))) num_bytes, description, self.read_bytes(num_bytes)))
elif num_bytes < 0: elif num_bytes < 0:
raise ParserError('invalid skip of {} bytes'.format(num_bytes)) raise ParserError(f'invalid skip of {num_bytes} bytes')
def skip_to(self, offset, description='unknown'): def skip_to(self, offset, description='unknown'):
self.skip(offset - self.cursor, description) self.skip(offset - self.cursor, description)
@ -584,7 +584,7 @@ def _parse_safari_cookies_page(data, jar, logger):
number_of_cookies = p.read_uint() number_of_cookies = p.read_uint()
record_offsets = [p.read_uint() for _ in range(number_of_cookies)] record_offsets = [p.read_uint() for _ in range(number_of_cookies)]
if number_of_cookies == 0: if number_of_cookies == 0:
logger.debug('a cookies page of size {} has no cookies'.format(len(data))) logger.debug(f'a cookies page of size {len(data)} has no cookies')
return return
p.skip_to(record_offsets[0], 'unknown page header field') p.skip_to(record_offsets[0], 'unknown page header field')
@ -730,7 +730,7 @@ def _choose_linux_keyring(logger):
SelectBackend SelectBackend
""" """
desktop_environment = _get_linux_desktop_environment(os.environ) desktop_environment = _get_linux_desktop_environment(os.environ)
logger.debug('detected desktop environment: {}'.format(desktop_environment.name)) logger.debug(f'detected desktop environment: {desktop_environment.name}')
if desktop_environment == _LinuxDesktopEnvironment.KDE: if desktop_environment == _LinuxDesktopEnvironment.KDE:
linux_keyring = _LinuxKeyring.KWALLET linux_keyring = _LinuxKeyring.KWALLET
elif desktop_environment == _LinuxDesktopEnvironment.OTHER: elif desktop_environment == _LinuxDesktopEnvironment.OTHER:
@ -764,10 +764,10 @@ def _get_kwallet_network_wallet(logger):
return default_wallet return default_wallet
else: else:
network_wallet = stdout.decode('utf-8').strip() network_wallet = stdout.decode('utf-8').strip()
logger.debug('NetworkWallet = "{}"'.format(network_wallet)) logger.debug(f'NetworkWallet = "{network_wallet}"')
return network_wallet return network_wallet
except Exception as e: except Exception as e:
logger.warning('exception while obtaining NetworkWallet: {}'.format(e)) logger.warning(f'exception while obtaining NetworkWallet: {e}')
return default_wallet return default_wallet
@ -785,8 +785,8 @@ def _get_kwallet_password(browser_keyring_name, logger):
try: try:
proc = Popen([ proc = Popen([
'kwallet-query', 'kwallet-query',
'--read-password', '{} Safe Storage'.format(browser_keyring_name), '--read-password', f'{browser_keyring_name} Safe Storage',
'--folder', '{} Keys'.format(browser_keyring_name), '--folder', f'{browser_keyring_name} Keys',
network_wallet network_wallet
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
@ -818,7 +818,7 @@ def _get_kwallet_password(browser_keyring_name, logger):
def _get_gnome_keyring_password(browser_keyring_name, logger): def _get_gnome_keyring_password(browser_keyring_name, logger):
if not SECRETSTORAGE_AVAILABLE: if not SECRETSTORAGE_AVAILABLE:
logger.error('secretstorage not available {}'.format(SECRETSTORAGE_UNAVAILABLE_REASON)) logger.error(f'secretstorage not available {SECRETSTORAGE_UNAVAILABLE_REASON}')
return b'' return b''
# the Gnome keyring does not seem to organise keys in the same way as KWallet, # the Gnome keyring does not seem to organise keys in the same way as KWallet,
# using `dbus-monitor` during startup, it can be observed that chromium lists all keys # using `dbus-monitor` during startup, it can be observed that chromium lists all keys
@ -827,7 +827,7 @@ def _get_gnome_keyring_password(browser_keyring_name, logger):
with contextlib.closing(secretstorage.dbus_init()) as con: with contextlib.closing(secretstorage.dbus_init()) as con:
col = secretstorage.get_default_collection(con) col = secretstorage.get_default_collection(con)
for item in col.get_all_items(): for item in col.get_all_items():
if item.get_label() == '{} Safe Storage'.format(browser_keyring_name): if item.get_label() == f'{browser_keyring_name} Safe Storage':
return item.get_secret() return item.get_secret()
else: else:
logger.error('failed to read from keyring') logger.error('failed to read from keyring')
@ -861,7 +861,7 @@ def _get_mac_keyring_password(browser_keyring_name, logger):
['security', 'find-generic-password', ['security', 'find-generic-password',
'-w', # write password to stdout '-w', # write password to stdout
'-a', browser_keyring_name, # match 'account' '-a', browser_keyring_name, # match 'account'
'-s', '{} Safe Storage'.format(browser_keyring_name)], # match 'service' '-s', f'{browser_keyring_name} Safe Storage'], # match 'service'
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill() stdout, stderr = proc.communicate_or_kill()
@ -879,7 +879,7 @@ def _get_windows_v10_key(browser_root, logger):
logger.error('could not find local state file') logger.error('could not find local state file')
return None return None
logger.debug(f'Found local state file at "{path}"') logger.debug(f'Found local state file at "{path}"')
with open(path, 'r', encoding='utf8') as f: with open(path, encoding='utf8') as f:
data = json.load(f) data = json.load(f)
try: try:
base64_key = data['os_crypt']['encrypted_key'] base64_key = data['os_crypt']['encrypted_key']
@ -966,7 +966,7 @@ def _open_database_copy(database_path, tmpdir):
def _get_column_names(cursor, table_name): def _get_column_names(cursor, table_name):
table_info = cursor.execute('PRAGMA table_info({})'.format(table_name)).fetchall() table_info = cursor.execute(f'PRAGMA table_info({table_name})').fetchall()
return [row[1].decode('utf-8') for row in table_info] return [row[1].decode('utf-8') for row in table_info]

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
from ..compat import compat_str from ..compat import compat_str
from ..utils import ( from ..utils import (
determine_protocol, determine_protocol,

View file

@ -1,5 +1,3 @@
from __future__ import division, unicode_literals
import os import os
import re import re
import time import time
@ -25,7 +23,7 @@
) )
class FileDownloader(object): class FileDownloader:
"""File Downloader class. """File Downloader class.
File downloader objects are the ones responsible of downloading the File downloader objects are the ones responsible of downloading the
@ -219,7 +217,7 @@ def inner(self, *args, **kwargs):
while True: while True:
try: try:
return func(self, *args, **kwargs) return func(self, *args, **kwargs)
except (IOError, OSError) as err: except OSError as err:
retry = retry + 1 retry = retry + 1
if retry > file_access_retries or err.errno not in (errno.EACCES, errno.EINVAL): if retry > file_access_retries or err.errno not in (errno.EACCES, errno.EINVAL):
if not fatal: if not fatal:
@ -486,4 +484,4 @@ def _debug_cmd(self, args, exe=None):
if exe is None: if exe is None:
exe = os.path.basename(str_args[0]) exe = os.path.basename(str_args[0])
self.write_debug('%s command line: %s' % (exe, shell_quote(str_args))) self.write_debug(f'{exe} command line: {shell_quote(str_args)}')

View file

@ -1,4 +1,3 @@
from __future__ import unicode_literals
import time import time
from ..downloader import get_suitable_downloader from ..downloader import get_suitable_downloader
@ -46,7 +45,7 @@ def real_download(self, filename, info_dict):
if real_downloader: if real_downloader:
self.to_screen( self.to_screen(
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename())) f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}')
info_dict['fragments'] = list(fragments_to_download) info_dict['fragments'] = list(fragments_to_download)
fd = real_downloader(self.ydl, self.params) fd = real_downloader(self.ydl, self.params)
return fd.real_download(filename, info_dict) return fd.real_download(filename, info_dict)

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
import os.path import os.path
import re import re
import subprocess import subprocess
@ -56,7 +54,7 @@ def real_download(self, filename, info_dict):
} }
if filename != '-': if filename != '-':
fsize = os.path.getsize(encodeFilename(tmpfilename)) fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('\r[%s] Downloaded %s bytes' % (self.get_basename(), fsize)) self.to_screen(f'\r[{self.get_basename()}] Downloaded {fsize} bytes')
self.try_rename(tmpfilename, filename) self.try_rename(tmpfilename, filename)
status.update({ status.update({
'downloaded_bytes': fsize, 'downloaded_bytes': fsize,
@ -157,7 +155,7 @@ def _call_downloader(self, tmpfilename, info_dict):
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index) fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
try: try:
src, _ = self.sanitize_open(fragment_filename, 'rb') src, _ = self.sanitize_open(fragment_filename, 'rb')
except IOError as err: except OSError as err:
if skip_unavailable_fragments and frag_index > 1: if skip_unavailable_fragments and frag_index > 1:
self.report_skip_fragment(frag_index, err) self.report_skip_fragment(frag_index, err)
continue continue
@ -179,7 +177,7 @@ def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '--location', '-o', tmpfilename, '--compressed'] cmd = [self.exe, '--location', '-o', tmpfilename, '--compressed']
if info_dict.get('http_headers') is not None: if info_dict.get('http_headers') is not None:
for key, val in info_dict['http_headers'].items(): for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)] cmd += ['--header', f'{key}: {val}']
cmd += self._bool_option('--continue-at', 'continuedl', '-', '0') cmd += self._bool_option('--continue-at', 'continuedl', '-', '0')
cmd += self._valueless_option('--silent', 'noprogress') cmd += self._valueless_option('--silent', 'noprogress')
@ -216,7 +214,7 @@ def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-o', tmpfilename] cmd = [self.exe, '-o', tmpfilename]
if info_dict.get('http_headers') is not None: if info_dict.get('http_headers') is not None:
for key, val in info_dict['http_headers'].items(): for key, val in info_dict['http_headers'].items():
cmd += ['-H', '%s: %s' % (key, val)] cmd += ['-H', f'{key}: {val}']
cmd += self._configuration_args() cmd += self._configuration_args()
cmd += ['--', info_dict['url']] cmd += ['--', info_dict['url']]
return cmd return cmd
@ -229,7 +227,7 @@ def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies', '--compression=auto'] cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies', '--compression=auto']
if info_dict.get('http_headers') is not None: if info_dict.get('http_headers') is not None:
for key, val in info_dict['http_headers'].items(): for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)] cmd += ['--header', f'{key}: {val}']
cmd += self._option('--limit-rate', 'ratelimit') cmd += self._option('--limit-rate', 'ratelimit')
retry = self._option('--tries', 'retries') retry = self._option('--tries', 'retries')
if len(retry) == 2: if len(retry) == 2:
@ -240,7 +238,7 @@ def _make_cmd(self, tmpfilename, info_dict):
proxy = self.params.get('proxy') proxy = self.params.get('proxy')
if proxy: if proxy:
for var in ('http_proxy', 'https_proxy'): for var in ('http_proxy', 'https_proxy'):
cmd += ['--execute', '%s=%s' % (var, proxy)] cmd += ['--execute', f'{var}={proxy}']
cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate') cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
cmd += self._configuration_args() cmd += self._configuration_args()
cmd += ['--', info_dict['url']] cmd += ['--', info_dict['url']]
@ -271,7 +269,7 @@ def _make_cmd(self, tmpfilename, info_dict):
if info_dict.get('http_headers') is not None: if info_dict.get('http_headers') is not None:
for key, val in info_dict['http_headers'].items(): for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)] cmd += ['--header', f'{key}: {val}']
cmd += self._option('--max-overall-download-limit', 'ratelimit') cmd += self._option('--max-overall-download-limit', 'ratelimit')
cmd += self._option('--interface', 'source_address') cmd += self._option('--interface', 'source_address')
cmd += self._option('--all-proxy', 'proxy') cmd += self._option('--all-proxy', 'proxy')
@ -289,10 +287,10 @@ def _make_cmd(self, tmpfilename, info_dict):
dn = os.path.dirname(tmpfilename) dn = os.path.dirname(tmpfilename)
if dn: if dn:
if not os.path.isabs(dn): if not os.path.isabs(dn):
dn = '.%s%s' % (os.path.sep, dn) dn = f'.{os.path.sep}{dn}'
cmd += ['--dir', dn + os.path.sep] cmd += ['--dir', dn + os.path.sep]
if 'fragments' not in info_dict: if 'fragments' not in info_dict:
cmd += ['--out', '.%s%s' % (os.path.sep, os.path.basename(tmpfilename))] cmd += ['--out', f'.{os.path.sep}{os.path.basename(tmpfilename)}']
cmd += ['--auto-file-renaming=false'] cmd += ['--auto-file-renaming=false']
if 'fragments' in info_dict: if 'fragments' in info_dict:
@ -320,7 +318,7 @@ def _make_cmd(self, tmpfilename, info_dict):
if info_dict.get('http_headers') is not None: if info_dict.get('http_headers') is not None:
for key, val in info_dict['http_headers'].items(): for key, val in info_dict['http_headers'].items():
cmd += ['%s:%s' % (key, val)] cmd += [f'{key}:{val}']
return cmd return cmd
@ -393,7 +391,7 @@ def _call_downloader(self, tmpfilename, info_dict):
headers = handle_youtubedl_headers(info_dict['http_headers']) headers = handle_youtubedl_headers(info_dict['http_headers'])
args += [ args += [
'-headers', '-headers',
''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())] ''.join(f'{key}: {val}\r\n' for key, val in headers.items())]
env = None env = None
proxy = self.params.get('proxy') proxy = self.params.get('proxy')

View file

@ -1,5 +1,3 @@
from __future__ import division, unicode_literals
import io import io
import itertools import itertools
import time import time

View file

@ -1,5 +1,3 @@
from __future__ import division, unicode_literals
import threading import threading
from .common import FileDownloader from .common import FileDownloader

View file

@ -1,5 +1,3 @@
from __future__ import division, unicode_literals
import http.client import http.client
import json import json
import math import math
@ -172,8 +170,7 @@ def _prepare_frag_download(self, ctx):
total_frags_str += ' (not including %d ad)' % ad_frags total_frags_str += ' (not including %d ad)' % ad_frags
else: else:
total_frags_str = 'unknown (live)' total_frags_str = 'unknown (live)'
self.to_screen( self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}')
'[%s] Total fragments: %s' % (self.FD_NAME, total_frags_str))
self.report_destination(ctx['filename']) self.report_destination(ctx['filename'])
dl = HttpQuietDownloader( dl = HttpQuietDownloader(
self.ydl, self.ydl,
@ -342,8 +339,7 @@ def _prepare_external_frag_download(self, ctx):
total_frags_str += ' (not including %d ad)' % ad_frags total_frags_str += ' (not including %d ad)' % ad_frags
else: else:
total_frags_str = 'unknown (live)' total_frags_str = 'unknown (live)'
self.to_screen( self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}')
'[%s] Total fragments: %s' % (self.FD_NAME, total_frags_str))
tmpfilename = self.temp_name(ctx['filename']) tmpfilename = self.temp_name(ctx['filename'])

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
import re import re
import io import io
import binascii import binascii
@ -102,8 +100,7 @@ def real_download(self, filename, info_dict):
if real_downloader and not real_downloader.supports_manifest(s): if real_downloader and not real_downloader.supports_manifest(s):
real_downloader = None real_downloader = None
if real_downloader: if real_downloader:
self.to_screen( self.to_screen(f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}')
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename()))
def is_ad_fragment_start(s): def is_ad_fragment_start(s):
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
import os import os
import ssl import ssl
import time import time
@ -221,10 +219,12 @@ def download():
min_data_len = self.params.get('min_filesize') min_data_len = self.params.get('min_filesize')
max_data_len = self.params.get('max_filesize') max_data_len = self.params.get('max_filesize')
if min_data_len is not None and data_len < min_data_len: if min_data_len is not None and data_len < min_data_len:
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) self.to_screen(
f'\r[download] File is smaller than min-filesize ({data_len} bytes < {min_data_len} bytes). Aborting.')
return False return False
if max_data_len is not None and data_len > max_data_len: if max_data_len is not None and data_len > max_data_len:
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) self.to_screen(
f'\r[download] File is larger than max-filesize ({data_len} bytes > {max_data_len} bytes). Aborting.')
return False return False
byte_counter = 0 + ctx.resume_len byte_counter = 0 + ctx.resume_len
@ -265,7 +265,7 @@ def retry(e):
assert ctx.stream is not None assert ctx.stream is not None
ctx.filename = self.undo_temp_name(ctx.tmpfilename) ctx.filename = self.undo_temp_name(ctx.tmpfilename)
self.report_destination(ctx.filename) self.report_destination(ctx.filename)
except (OSError, IOError) as err: except OSError as err:
self.report_error('unable to open for writing: %s' % str(err)) self.report_error('unable to open for writing: %s' % str(err))
return False return False
@ -277,7 +277,7 @@ def retry(e):
try: try:
ctx.stream.write(data_block) ctx.stream.write(data_block)
except (IOError, OSError) as err: except OSError as err:
self.to_stderr('\n') self.to_stderr('\n')
self.report_error('unable to write data: %s' % str(err)) self.report_error('unable to write data: %s' % str(err))
return False return False

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
import time import time
import binascii import binascii
import io import io

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import io import io
import quopri import quopri
import re import re

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import threading import threading
from .common import FileDownloader from .common import FileDownloader

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
import os import os
import re import re
import subprocess import subprocess

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
import os import os
import subprocess import subprocess
@ -32,7 +30,7 @@ def real_download(self, filename, info_dict):
retval = subprocess.call(args) retval = subprocess.call(args)
if retval == 0: if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename)) fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('\r[%s] %s bytes' % (args[0], fsize)) self.to_screen(f'\r[{args[0]}] {fsize} bytes')
self.try_rename(tmpfilename, filename) self.try_rename(tmpfilename, filename)
self._hook_progress({ self._hook_progress({
'downloaded_bytes': fsize, 'downloaded_bytes': fsize,

View file

@ -1,5 +1,3 @@
from __future__ import division, unicode_literals
import json import json
import time import time

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
import hashlib import hashlib
import hmac import hmac
import re import re

View file

@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .amp import AMPIE from .amp import AMPIE
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (

View file

@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str from ..compat import compat_str
from ..utils import ( from ..utils import (

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor

View file

@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
clean_html, clean_html,

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import base64 import base64
import binascii import binascii
import json import json

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_parse_qs, compat_parse_qs,

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import json import json
import re import re
import time import time

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
import functools import functools
import re import re

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import json import json
from .turner import TurnerBaseIE from .turner import TurnerBaseIE

View file

@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .theplatform import ThePlatformIE from .theplatform import ThePlatformIE
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import functools import functools
import re import re

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str from ..compat import compat_str
from ..utils import ( from ..utils import (

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import json import json
from .common import InfoExtractor from .common import InfoExtractor

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str from ..compat import compat_str
from ..utils import ( from ..utils import (

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
parse_iso8601, parse_iso8601,

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
clean_html, clean_html,

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from .youtube import YoutubeIE from .youtube import YoutubeIE
from .vimeo import VimeoIE from .vimeo import VimeoIE

View file

@ -1,4 +1,3 @@
# coding: utf-8
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import int_or_none from ..utils import int_or_none

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re import re
from .theplatform import ThePlatformIE from .theplatform import ThePlatformIE

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import json import json
from .common import InfoExtractor from .common import InfoExtractor

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
determine_ext, determine_ext,

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re import re
import urllib.parse import urllib.parse

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import base64 import base64
import hashlib import hashlib
import json import json

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
from .nfl import NFLTokenGenerator from .nfl import NFLTokenGenerator
__all__ = [ __all__ = [

View file

@ -1,6 +1,3 @@
from __future__ import unicode_literals
class TokenGenerator: class TokenGenerator:
def generate(self, anvack, mcp_id): def generate(self, anvack, mcp_id):
raise NotImplementedError('This method must be implemented by subclasses') raise NotImplementedError('This method must be implemented by subclasses')

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals
import json import json
from .common import TokenGenerator from .common import TokenGenerator

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re import re
from .yahoo import YahooIE from .yahoo import YahooIE

View file

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor

Some files were not shown because too many files have changed in this diff Show more