Fix typos

Closes #8200.
This commit is contained in:
Jakub Wilk 2016-01-10 16:17:47 +01:00 committed by Jaime Marquínez Ferrándiz
parent 3c91e41614
commit dfb1b1468c
16 changed files with 20 additions and 20 deletions

View file

@ -5,7 +5,7 @@
import datetime import datetime
import glob import glob
import io # For Python 2 compatibilty import io # For Python 2 compatibility
import os import os
import re import re

View file

@ -66,7 +66,7 @@ def test_info_json(self):
textTag = a.find('TEXT') textTag = a.find('TEXT')
text = textTag.text text = textTag.text
self.assertTrue(text in expected) # assertIn only added in python 2.7 self.assertTrue(text in expected) # assertIn only added in python 2.7
# remove the first occurance, there could be more than one annotation with the same text # remove the first occurrence, there could be more than one annotation with the same text
expected.remove(text) expected.remove(text)
# We should have seen (and removed) all the expected annotation texts. # We should have seen (and removed) all the expected annotation texts.
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.') self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')

View file

@ -1312,7 +1312,7 @@ def process_video_result(self, info_dict, download=True):
# only set the 'formats' fields if the original info_dict list them # only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique) # otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself, # element in the 'formats' field in info_dict is info_dict itself,
# wich can't be exported to json # which can't be exported to json
info_dict['formats'] = formats info_dict['formats'] = formats
if self.params.get('listformats'): if self.params.get('listformats'):
self.list_formats(info_dict) self.list_formats(info_dict)

View file

@ -313,9 +313,9 @@ def extract(self, url):
except ExtractorError: except ExtractorError:
raise raise
except compat_http_client.IncompleteRead as e: except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occured.', cause=e, expected=True) raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e: except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occured.', cause=e) raise ExtractorError('An extractor error has occurred.', cause=e)
def set_downloader(self, downloader): def set_downloader(self, downloader):
"""Sets the downloader for this IE.""" """Sets the downloader for this IE."""

View file

@ -105,7 +105,7 @@ def _login(self):
login_results, 'login error', default=None, group='error') login_results, 'login error', default=None, group='error')
if error: if error:
raise ExtractorError('Unable to login: %s' % error, expected=True) raise ExtractorError('Unable to login: %s' % error, expected=True)
self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
return return
fb_dtsg = self._search_regex( fb_dtsg = self._search_regex(
@ -126,7 +126,7 @@ def _login(self):
check_response = self._download_webpage(check_req, None, check_response = self._download_webpage(check_req, None,
note='Confirming login') note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None: if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.') self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err)) self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
return return

View file

@ -487,7 +487,7 @@ class GenericIE(InfoExtractor):
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9', 'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
} }
}, },
# Embeded Ustream video # Embedded Ustream video
{ {
'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm', 'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm',
'md5': '27b99cdb639c9b12a79bca876a073417', 'md5': '27b99cdb639c9b12a79bca876a073417',
@ -1644,7 +1644,7 @@ def _playlist_from_matches(matches, getter=None, ie=None):
if myvi_url: if myvi_url:
return self.url_result(myvi_url) return self.url_result(myvi_url)
# Look for embeded soundcloud player # Look for embedded soundcloud player
mobj = re.search( mobj = re.search(
r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"', r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
webpage) webpage)

View file

@ -32,7 +32,7 @@ class IviIE(InfoExtractor):
}, },
'skip': 'Only works from Russia', 'skip': 'Only works from Russia',
}, },
# Serial's serie # Serial's series
{ {
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549', 'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
'md5': '221f56b35e3ed815fde2df71032f4b3e', 'md5': '221f56b35e3ed815fde2df71032f4b3e',

View file

@ -17,7 +17,7 @@ class MDRIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html' _VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html'
_TESTS = [{ _TESTS = [{
# MDR regularily deletes its videos # MDR regularly deletes its videos
'url': 'http://www.mdr.de/fakt/video189002.html', 'url': 'http://www.mdr.de/fakt/video189002.html',
'only_matching': True, 'only_matching': True,
}, { }, {

View file

@ -100,7 +100,7 @@ def _real_extract(self, url):
class NBCSportsIE(InfoExtractor): class NBCSportsIE(InfoExtractor):
# Does not include https becuase its certificate is invalid # Does not include https because its certificate is invalid
_VALID_URL = r'http://www\.nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)' _VALID_URL = r'http://www\.nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)'
_TEST = { _TEST = {

View file

@ -223,7 +223,7 @@ def _real_extract(self, url):
response = self._download_webpage(request_url, playlist_title) response = self._download_webpage(request_url, playlist_title)
response = self._fix_json(response) response = self._fix_json(response)
if not response.strip(): if not response.strip():
self._downloader.report_warning('Got an empty reponse, trying ' self._downloader.report_warning('Got an empty response, trying '
'adding the "newvideos" parameter') 'adding the "newvideos" parameter')
response = self._download_webpage(request_url + '&newvideos=true', response = self._download_webpage(request_url + '&newvideos=true',
playlist_title) playlist_title)

View file

@ -37,7 +37,7 @@ def _real_extract(self, url):
formats = self._extract_m3u8_formats( formats = self._extract_m3u8_formats(
m3u8_url, display_id, 'mp4', 'm3u8_native', m3u8_url, display_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False) m3u8_id='hls', fatal=False)
# simular to GameSpotIE # similar to GameSpotIE
m3u8_path = compat_urlparse.urlparse(m3u8_url).path m3u8_path = compat_urlparse.urlparse(m3u8_url).path
QUALITIES_RE = r'((,[a-z]+\d+)+,?)' QUALITIES_RE = r'((,[a-z]+\d+)+,?)'
available_qualities = self._search_regex( available_qualities = self._search_regex(

View file

@ -7,7 +7,7 @@
class TestURLIE(InfoExtractor): class TestURLIE(InfoExtractor):
""" Allows adressing of the test cases as test:yout.*be_1 """ """ Allows addressing of the test cases as test:yout.*be_1 """
IE_DESC = False # Do not list IE_DESC = False # Do not list
_VALID_URL = r'test(?:url)?:(?P<id>(?P<extractor>.+?)(?:_(?P<num>[0-9]+))?)$' _VALID_URL = r'test(?:url)?:(?P<id>(?P<extractor>.+?)(?:_(?P<num>[0-9]+))?)$'

View file

@ -67,7 +67,7 @@ def _real_extract(self, url):
info = self._download_json( info = self._download_json(
'http://www.tv4play.se/player/assets/%s.json' % video_id, video_id, 'Downloading video info JSON') 'http://www.tv4play.se/player/assets/%s.json' % video_id, video_id, 'Downloading video info JSON')
# If is_geo_restricted is true, it doesn't neceserally mean we can't download it # If is_geo_restricted is true, it doesn't necessarily mean we can't download it
if info['is_geo_restricted']: if info['is_geo_restricted']:
self.report_warning('This content might not be available in your country due to licensing restrictions.') self.report_warning('This content might not be available in your country due to licensing restrictions.')
if info['requires_subscription']: if info['requires_subscription']:

View file

@ -170,7 +170,7 @@ class VideomoreVideoIE(InfoExtractor):
'skip_download': True, 'skip_download': True,
}, },
}, { }, {
# season single serie with og:video:iframe # season single series with og:video:iframe
'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya', 'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya',
'only_matching': True, 'only_matching': True,
}, { }, {

View file

@ -689,7 +689,7 @@ def resfunc(args):
elif mname in _builtin_classes: elif mname in _builtin_classes:
res = _builtin_classes[mname] res = _builtin_classes[mname]
else: else:
# Assume unitialized # Assume uninitialized
# TODO warn here # TODO warn here
res = undefined res = undefined
stack.append(res) stack.append(res)

View file

@ -984,7 +984,7 @@ def date_from_str(date_str):
if sign == '-': if sign == '-':
time = -time time = -time
unit = match.group('unit') unit = match.group('unit')
# A bad aproximation? # A bad approximation?
if unit == 'month': if unit == 'month':
unit = 'day' unit = 'day'
time *= 30 time *= 30
@ -1307,7 +1307,7 @@ def parse_filesize(s):
if s is None: if s is None:
return None return None
# The lower-case forms are of course incorrect and inofficial, # The lower-case forms are of course incorrect and unofficial,
# but we support those too # but we support those too
_UNIT_TABLE = { _UNIT_TABLE = {
'B': 1, 'B': 1,