Browse Source

Fix typos

Closes #8200.
totalwebcasting
Jakub Wilk 9 years ago
committed by Jaime Marquínez Ferrándiz
parent
commit
dfb1b1468c
16 changed files with 20 additions and 20 deletions
  1. +1
    -1
      devscripts/gh-pages/update-copyright.py
  2. +1
    -1
      test/test_write_annotations.py
  3. +1
    -1
      youtube_dl/YoutubeDL.py
  4. +2
    -2
      youtube_dl/extractor/common.py
  5. +2
    -2
      youtube_dl/extractor/facebook.py
  6. +2
    -2
      youtube_dl/extractor/generic.py
  7. +1
    -1
      youtube_dl/extractor/ivi.py
  8. +1
    -1
      youtube_dl/extractor/mdr.py
  9. +1
    -1
      youtube_dl/extractor/nbc.py
  10. +1
    -1
      youtube_dl/extractor/nhl.py
  11. +1
    -1
      youtube_dl/extractor/ora.py
  12. +1
    -1
      youtube_dl/extractor/testurl.py
  13. +1
    -1
      youtube_dl/extractor/tv4.py
  14. +1
    -1
      youtube_dl/extractor/videomore.py
  15. +1
    -1
      youtube_dl/swfinterp.py
  16. +2
    -2
      youtube_dl/utils.py

+ 1
- 1
devscripts/gh-pages/update-copyright.py View File

@ -5,7 +5,7 @@ from __future__ import with_statement, unicode_literals
import datetime import datetime
import glob import glob
import io # For Python 2 compatibilty
import io # For Python 2 compatibility
import os import os
import re import re


+ 1
- 1
test/test_write_annotations.py View File

@ -66,7 +66,7 @@ class TestAnnotations(unittest.TestCase):
textTag = a.find('TEXT') textTag = a.find('TEXT')
text = textTag.text text = textTag.text
self.assertTrue(text in expected) # assertIn only added in python 2.7 self.assertTrue(text in expected) # assertIn only added in python 2.7
# remove the first occurance, there could be more than one annotation with the same text
# remove the first occurrence, there could be more than one annotation with the same text
expected.remove(text) expected.remove(text)
# We should have seen (and removed) all the expected annotation texts. # We should have seen (and removed) all the expected annotation texts.
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.') self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')


+ 1
- 1
youtube_dl/YoutubeDL.py View File

@ -1312,7 +1312,7 @@ class YoutubeDL(object):
# only set the 'formats' fields if the original info_dict list them # only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique) # otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself, # element in the 'formats' field in info_dict is info_dict itself,
# wich can't be exported to json
# which can't be exported to json
info_dict['formats'] = formats info_dict['formats'] = formats
if self.params.get('listformats'): if self.params.get('listformats'):
self.list_formats(info_dict) self.list_formats(info_dict)


+ 2
- 2
youtube_dl/extractor/common.py View File

@ -313,9 +313,9 @@ class InfoExtractor(object):
except ExtractorError: except ExtractorError:
raise raise
except compat_http_client.IncompleteRead as e: except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occured.', cause=e, expected=True)
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e: except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occured.', cause=e)
raise ExtractorError('An extractor error has occurred.', cause=e)
def set_downloader(self, downloader): def set_downloader(self, downloader):
"""Sets the downloader for this IE.""" """Sets the downloader for this IE."""


+ 2
- 2
youtube_dl/extractor/facebook.py View File

@ -105,7 +105,7 @@ class FacebookIE(InfoExtractor):
login_results, 'login error', default=None, group='error') login_results, 'login error', default=None, group='error')
if error: if error:
raise ExtractorError('Unable to login: %s' % error, expected=True) raise ExtractorError('Unable to login: %s' % error, expected=True)
self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
return return
fb_dtsg = self._search_regex( fb_dtsg = self._search_regex(
@ -126,7 +126,7 @@ class FacebookIE(InfoExtractor):
check_response = self._download_webpage(check_req, None, check_response = self._download_webpage(check_req, None,
note='Confirming login') note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None: if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err)) self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
return return


+ 2
- 2
youtube_dl/extractor/generic.py View File

@ -487,7 +487,7 @@ class GenericIE(InfoExtractor):
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9', 'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
} }
}, },
# Embeded Ustream video
# Embedded Ustream video
{ {
'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm', 'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm',
'md5': '27b99cdb639c9b12a79bca876a073417', 'md5': '27b99cdb639c9b12a79bca876a073417',
@ -1644,7 +1644,7 @@ class GenericIE(InfoExtractor):
if myvi_url: if myvi_url:
return self.url_result(myvi_url) return self.url_result(myvi_url)
# Look for embeded soundcloud player
# Look for embedded soundcloud player
mobj = re.search( mobj = re.search(
r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"', r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
webpage) webpage)


+ 1
- 1
youtube_dl/extractor/ivi.py View File

@ -32,7 +32,7 @@ class IviIE(InfoExtractor):
}, },
'skip': 'Only works from Russia', 'skip': 'Only works from Russia',
}, },
# Serial's serie
# Serial's series
{ {
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549', 'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
'md5': '221f56b35e3ed815fde2df71032f4b3e', 'md5': '221f56b35e3ed815fde2df71032f4b3e',


+ 1
- 1
youtube_dl/extractor/mdr.py View File

@ -17,7 +17,7 @@ class MDRIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html' _VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html'
_TESTS = [{ _TESTS = [{
# MDR regularily deletes its videos
# MDR regularly deletes its videos
'url': 'http://www.mdr.de/fakt/video189002.html', 'url': 'http://www.mdr.de/fakt/video189002.html',
'only_matching': True, 'only_matching': True,
}, { }, {


+ 1
- 1
youtube_dl/extractor/nbc.py View File

@ -100,7 +100,7 @@ class NBCSportsVPlayerIE(InfoExtractor):
class NBCSportsIE(InfoExtractor): class NBCSportsIE(InfoExtractor):
# Does not include https becuase its certificate is invalid
# Does not include https because its certificate is invalid
_VALID_URL = r'http://www\.nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)' _VALID_URL = r'http://www\.nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)'
_TEST = { _TEST = {


+ 1
- 1
youtube_dl/extractor/nhl.py View File

@ -223,7 +223,7 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
response = self._download_webpage(request_url, playlist_title) response = self._download_webpage(request_url, playlist_title)
response = self._fix_json(response) response = self._fix_json(response)
if not response.strip(): if not response.strip():
self._downloader.report_warning('Got an empty reponse, trying '
self._downloader.report_warning('Got an empty response, trying '
'adding the "newvideos" parameter') 'adding the "newvideos" parameter')
response = self._download_webpage(request_url + '&newvideos=true', response = self._download_webpage(request_url + '&newvideos=true',
playlist_title) playlist_title)


+ 1
- 1
youtube_dl/extractor/ora.py View File

@ -37,7 +37,7 @@ class OraTVIE(InfoExtractor):
formats = self._extract_m3u8_formats( formats = self._extract_m3u8_formats(
m3u8_url, display_id, 'mp4', 'm3u8_native', m3u8_url, display_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False) m3u8_id='hls', fatal=False)
# simular to GameSpotIE
# similar to GameSpotIE
m3u8_path = compat_urlparse.urlparse(m3u8_url).path m3u8_path = compat_urlparse.urlparse(m3u8_url).path
QUALITIES_RE = r'((,[a-z]+\d+)+,?)' QUALITIES_RE = r'((,[a-z]+\d+)+,?)'
available_qualities = self._search_regex( available_qualities = self._search_regex(


+ 1
- 1
youtube_dl/extractor/testurl.py View File

@ -7,7 +7,7 @@ from ..utils import ExtractorError
class TestURLIE(InfoExtractor): class TestURLIE(InfoExtractor):
""" Allows adressing of the test cases as test:yout.*be_1 """
""" Allows addressing of the test cases as test:yout.*be_1 """
IE_DESC = False # Do not list IE_DESC = False # Do not list
_VALID_URL = r'test(?:url)?:(?P<id>(?P<extractor>.+?)(?:_(?P<num>[0-9]+))?)$' _VALID_URL = r'test(?:url)?:(?P<id>(?P<extractor>.+?)(?:_(?P<num>[0-9]+))?)$'


+ 1
- 1
youtube_dl/extractor/tv4.py View File

@ -67,7 +67,7 @@ class TV4IE(InfoExtractor):
info = self._download_json( info = self._download_json(
'http://www.tv4play.se/player/assets/%s.json' % video_id, video_id, 'Downloading video info JSON') 'http://www.tv4play.se/player/assets/%s.json' % video_id, video_id, 'Downloading video info JSON')
# If is_geo_restricted is true, it doesn't neceserally mean we can't download it
# If is_geo_restricted is true, it doesn't necessarily mean we can't download it
if info['is_geo_restricted']: if info['is_geo_restricted']:
self.report_warning('This content might not be available in your country due to licensing restrictions.') self.report_warning('This content might not be available in your country due to licensing restrictions.')
if info['requires_subscription']: if info['requires_subscription']:


+ 1
- 1
youtube_dl/extractor/videomore.py View File

@ -170,7 +170,7 @@ class VideomoreVideoIE(InfoExtractor):
'skip_download': True, 'skip_download': True,
}, },
}, { }, {
# season single serie with og:video:iframe
# season single series with og:video:iframe
'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya', 'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya',
'only_matching': True, 'only_matching': True,
}, { }, {


+ 1
- 1
youtube_dl/swfinterp.py View File

@ -689,7 +689,7 @@ class SWFInterpreter(object):
elif mname in _builtin_classes: elif mname in _builtin_classes:
res = _builtin_classes[mname] res = _builtin_classes[mname]
else: else:
# Assume unitialized
# Assume uninitialized
# TODO warn here # TODO warn here
res = undefined res = undefined
stack.append(res) stack.append(res)


+ 2
- 2
youtube_dl/utils.py View File

@ -984,7 +984,7 @@ def date_from_str(date_str):
if sign == '-': if sign == '-':
time = -time time = -time
unit = match.group('unit') unit = match.group('unit')
# A bad aproximation?
# A bad approximation?
if unit == 'month': if unit == 'month':
unit = 'day' unit = 'day'
time *= 30 time *= 30
@ -1307,7 +1307,7 @@ def parse_filesize(s):
if s is None: if s is None:
return None return None
# The lower-case forms are of course incorrect and inofficial,
# The lower-case forms are of course incorrect and unofficial,
# but we support those too # but we support those too
_UNIT_TABLE = { _UNIT_TABLE = {
'B': 1, 'B': 1,


Loading…
Cancel
Save