You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2498 lines
75 KiB

10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. from __future__ import unicode_literals
  4. import calendar
  5. import codecs
  6. import contextlib
  7. import ctypes
  8. import datetime
  9. import email.utils
  10. import errno
  11. import functools
  12. import gzip
  13. import itertools
  14. import io
  15. import json
  16. import locale
  17. import math
  18. import operator
  19. import os
  20. import pipes
  21. import platform
  22. import re
  23. import ssl
  24. import socket
  25. import struct
  26. import subprocess
  27. import sys
  28. import tempfile
  29. import traceback
  30. import xml.etree.ElementTree
  31. import zlib
  32. from .compat import (
  33. compat_basestring,
  34. compat_chr,
  35. compat_html_entities,
  36. compat_http_client,
  37. compat_kwargs,
  38. compat_parse_qs,
  39. compat_socket_create_connection,
  40. compat_str,
  41. compat_urllib_error,
  42. compat_urllib_parse,
  43. compat_urllib_parse_urlparse,
  44. compat_urllib_request,
  45. compat_urlparse,
  46. shlex_quote,
  47. )
  48. # This is not clearly defined otherwise
  49. compiled_regex_type = type(re.compile(''))
  50. std_headers = {
  51. 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)',
  52. 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
  53. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  54. 'Accept-Encoding': 'gzip, deflate',
  55. 'Accept-Language': 'en-us,en;q=0.5',
  56. }
  57. NO_DEFAULT = object()
  58. ENGLISH_MONTH_NAMES = [
  59. 'January', 'February', 'March', 'April', 'May', 'June',
  60. 'July', 'August', 'September', 'October', 'November', 'December']
  61. def preferredencoding():
  62. """Get preferred encoding.
  63. Returns the best encoding scheme for the system, based on
  64. locale.getpreferredencoding() and some further tweaks.
  65. """
  66. try:
  67. pref = locale.getpreferredencoding()
  68. 'TEST'.encode(pref)
  69. except Exception:
  70. pref = 'UTF-8'
  71. return pref
  72. def write_json_file(obj, fn):
  73. """ Encode obj as JSON and write it to fn, atomically if possible """
  74. fn = encodeFilename(fn)
  75. if sys.version_info < (3, 0) and sys.platform != 'win32':
  76. encoding = get_filesystem_encoding()
  77. # os.path.basename returns a bytes object, but NamedTemporaryFile
  78. # will fail if the filename contains non ascii characters unless we
  79. # use a unicode object
  80. path_basename = lambda f: os.path.basename(fn).decode(encoding)
  81. # the same for os.path.dirname
  82. path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
  83. else:
  84. path_basename = os.path.basename
  85. path_dirname = os.path.dirname
  86. args = {
  87. 'suffix': '.tmp',
  88. 'prefix': path_basename(fn) + '.',
  89. 'dir': path_dirname(fn),
  90. 'delete': False,
  91. }
  92. # In Python 2.x, json.dump expects a bytestream.
  93. # In Python 3.x, it writes to a character stream
  94. if sys.version_info < (3, 0):
  95. args['mode'] = 'wb'
  96. else:
  97. args.update({
  98. 'mode': 'w',
  99. 'encoding': 'utf-8',
  100. })
  101. tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
  102. try:
  103. with tf:
  104. json.dump(obj, tf)
  105. if sys.platform == 'win32':
  106. # Need to remove existing file on Windows, else os.rename raises
  107. # WindowsError or FileExistsError.
  108. try:
  109. os.unlink(fn)
  110. except OSError:
  111. pass
  112. os.rename(tf.name, fn)
  113. except Exception:
  114. try:
  115. os.remove(tf.name)
  116. except OSError:
  117. pass
  118. raise
  119. if sys.version_info >= (2, 7):
  120. def find_xpath_attr(node, xpath, key, val=None):
  121. """ Find the xpath xpath[@key=val] """
  122. assert re.match(r'^[a-zA-Z_-]+$', key)
  123. if val:
  124. assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val)
  125. expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
  126. return node.find(expr)
  127. else:
  128. def find_xpath_attr(node, xpath, key, val=None):
  129. # Here comes the crazy part: In 2.6, if the xpath is a unicode,
  130. # .//node does not match if a node is a direct child of . !
  131. if isinstance(xpath, compat_str):
  132. xpath = xpath.encode('ascii')
  133. for f in node.findall(xpath):
  134. if key not in f.attrib:
  135. continue
  136. if val is None or f.attrib.get(key) == val:
  137. return f
  138. return None
  139. # On python2.6 the xml.etree.ElementTree.Element methods don't support
  140. # the namespace parameter
  141. def xpath_with_ns(path, ns_map):
  142. components = [c.split(':') for c in path.split('/')]
  143. replaced = []
  144. for c in components:
  145. if len(c) == 1:
  146. replaced.append(c[0])
  147. else:
  148. ns, tag = c
  149. replaced.append('{%s}%s' % (ns_map[ns], tag))
  150. return '/'.join(replaced)
  151. def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
  152. if sys.version_info < (2, 7): # Crazy 2.6
  153. xpath = xpath.encode('ascii')
  154. n = node.find(xpath)
  155. if n is None:
  156. if default is not NO_DEFAULT:
  157. return default
  158. elif fatal:
  159. name = xpath if name is None else name
  160. raise ExtractorError('Could not find XML element %s' % name)
  161. else:
  162. return None
  163. return n
  164. def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
  165. n = xpath_element(node, xpath, name, fatal=fatal, default=default)
  166. if n is None or n == default:
  167. return n
  168. if n.text is None:
  169. if default is not NO_DEFAULT:
  170. return default
  171. elif fatal:
  172. name = xpath if name is None else name
  173. raise ExtractorError('Could not find XML element\'s text %s' % name)
  174. else:
  175. return None
  176. return n.text
  177. def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
  178. n = find_xpath_attr(node, xpath, key)
  179. if n is None:
  180. if default is not NO_DEFAULT:
  181. return default
  182. elif fatal:
  183. name = '%s[@%s]' % (xpath, key) if name is None else name
  184. raise ExtractorError('Could not find XML attribute %s' % name)
  185. else:
  186. return None
  187. return n.attrib[key]
  188. def get_element_by_id(id, html):
  189. """Return the content of the tag with the specified ID in the passed HTML document"""
  190. return get_element_by_attribute("id", id, html)
  191. def get_element_by_attribute(attribute, value, html):
  192. """Return the content of the tag with the specified attribute in the passed HTML document"""
  193. m = re.search(r'''(?xs)
  194. <([a-zA-Z0-9:._-]+)
  195. (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
  196. \s+%s=['"]?%s['"]?
  197. (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
  198. \s*>
  199. (?P<content>.*?)
  200. </\1>
  201. ''' % (re.escape(attribute), re.escape(value)), html)
  202. if not m:
  203. return None
  204. res = m.group('content')
  205. if res.startswith('"') or res.startswith("'"):
  206. res = res[1:-1]
  207. return unescapeHTML(res)
  208. def extract_attributes(attributes_str, attributes_regex=r'(?s)\s*([^\s=]+)\s*=\s*["\']([^"\']+)["\']'):
  209. attributes = re.findall(attributes_regex, attributes_str)
  210. attributes_dict = {}
  211. if attributes:
  212. for (attribute_name, attribute_value) in attributes:
  213. attributes_dict[attribute_name] = attribute_value
  214. return attributes_dict
  215. def clean_html(html):
  216. """Clean an HTML snippet into a readable string"""
  217. if html is None: # Convenience for sanitizing descriptions etc.
  218. return html
  219. # Newline vs <br />
  220. html = html.replace('\n', ' ')
  221. html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
  222. html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
  223. # Strip html tags
  224. html = re.sub('<.*?>', '', html)
  225. # Replace html entities
  226. html = unescapeHTML(html)
  227. return html.strip()
  228. def sanitize_open(filename, open_mode):
  229. """Try to open the given filename, and slightly tweak it if this fails.
  230. Attempts to open the given filename. If this fails, it tries to change
  231. the filename slightly, step by step, until it's either able to open it
  232. or it fails and raises a final exception, like the standard open()
  233. function.
  234. It returns the tuple (stream, definitive_file_name).
  235. """
  236. try:
  237. if filename == '-':
  238. if sys.platform == 'win32':
  239. import msvcrt
  240. msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
  241. return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
  242. stream = open(encodeFilename(filename), open_mode)
  243. return (stream, filename)
  244. except (IOError, OSError) as err:
  245. if err.errno in (errno.EACCES,):
  246. raise
  247. # In case of error, try to remove win32 forbidden chars
  248. alt_filename = sanitize_path(filename)
  249. if alt_filename == filename:
  250. raise
  251. else:
  252. # An exception here should be caught in the caller
  253. stream = open(encodeFilename(alt_filename), open_mode)
  254. return (stream, alt_filename)
  255. def timeconvert(timestr):
  256. """Convert RFC 2822 defined time string into system timestamp"""
  257. timestamp = None
  258. timetuple = email.utils.parsedate_tz(timestr)
  259. if timetuple is not None:
  260. timestamp = email.utils.mktime_tz(timetuple)
  261. return timestamp
  262. def sanitize_filename(s, restricted=False, is_id=False):
  263. """Sanitizes a string so it could be used as part of a filename.
  264. If restricted is set, use a stricter subset of allowed characters.
  265. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
  266. """
  267. def replace_insane(char):
  268. if char == '?' or ord(char) < 32 or ord(char) == 127:
  269. return ''
  270. elif char == '"':
  271. return '' if restricted else '\''
  272. elif char == ':':
  273. return '_-' if restricted else ' -'
  274. elif char in '\\/|*<>':
  275. return '_'
  276. if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
  277. return '_'
  278. if restricted and ord(char) > 127:
  279. return '_'
  280. return char
  281. # Handle timestamps
  282. s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
  283. result = ''.join(map(replace_insane, s))
  284. if not is_id:
  285. while '__' in result:
  286. result = result.replace('__', '_')
  287. result = result.strip('_')
  288. # Common case of "Foreign band name - English song title"
  289. if restricted and result.startswith('-_'):
  290. result = result[2:]
  291. if result.startswith('-'):
  292. result = '_' + result[len('-'):]
  293. result = result.lstrip('.')
  294. if not result:
  295. result = '_'
  296. return result
  297. def sanitize_path(s):
  298. """Sanitizes and normalizes path on Windows"""
  299. if sys.platform != 'win32':
  300. return s
  301. drive_or_unc, _ = os.path.splitdrive(s)
  302. if sys.version_info < (2, 7) and not drive_or_unc:
  303. drive_or_unc, _ = os.path.splitunc(s)
  304. norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
  305. if drive_or_unc:
  306. norm_path.pop(0)
  307. sanitized_path = [
  308. path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|\.$)', '#', path_part)
  309. for path_part in norm_path]
  310. if drive_or_unc:
  311. sanitized_path.insert(0, drive_or_unc + os.path.sep)
  312. return os.path.join(*sanitized_path)
  313. def orderedSet(iterable):
  314. """ Remove all duplicates from the input iterable """
  315. res = []
  316. for el in iterable:
  317. if el not in res:
  318. res.append(el)
  319. return res
  320. def _htmlentity_transform(entity):
  321. """Transforms an HTML entity to a character."""
  322. # Known non-numeric HTML entity
  323. if entity in compat_html_entities.name2codepoint:
  324. return compat_chr(compat_html_entities.name2codepoint[entity])
  325. mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
  326. if mobj is not None:
  327. numstr = mobj.group(1)
  328. if numstr.startswith('x'):
  329. base = 16
  330. numstr = '0%s' % numstr
  331. else:
  332. base = 10
  333. return compat_chr(int(numstr, base))
  334. # Unknown entity in name, return its literal representation
  335. return ('&%s;' % entity)
  336. def unescapeHTML(s):
  337. if s is None:
  338. return None
  339. assert type(s) == compat_str
  340. return re.sub(
  341. r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s)
  342. def get_subprocess_encoding():
  343. if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
  344. # For subprocess calls, encode with locale encoding
  345. # Refer to http://stackoverflow.com/a/9951851/35070
  346. encoding = preferredencoding()
  347. else:
  348. encoding = sys.getfilesystemencoding()
  349. if encoding is None:
  350. encoding = 'utf-8'
  351. return encoding
  352. def encodeFilename(s, for_subprocess=False):
  353. """
  354. @param s The name of the file
  355. """
  356. assert type(s) == compat_str
  357. # Python 3 has a Unicode API
  358. if sys.version_info >= (3, 0):
  359. return s
  360. # Pass '' directly to use Unicode APIs on Windows 2000 and up
  361. # (Detecting Windows NT 4 is tricky because 'major >= 4' would
  362. # match Windows 9x series as well. Besides, NT 4 is obsolete.)
  363. if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
  364. return s
  365. return s.encode(get_subprocess_encoding(), 'ignore')
  366. def decodeFilename(b, for_subprocess=False):
  367. if sys.version_info >= (3, 0):
  368. return b
  369. if not isinstance(b, bytes):
  370. return b
  371. return b.decode(get_subprocess_encoding(), 'ignore')
  372. def encodeArgument(s):
  373. if not isinstance(s, compat_str):
  374. # Legacy code that uses byte strings
  375. # Uncomment the following line after fixing all post processors
  376. # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
  377. s = s.decode('ascii')
  378. return encodeFilename(s, True)
  379. def decodeArgument(b):
  380. return decodeFilename(b, True)
  381. def decodeOption(optval):
  382. if optval is None:
  383. return optval
  384. if isinstance(optval, bytes):
  385. optval = optval.decode(preferredencoding())
  386. assert isinstance(optval, compat_str)
  387. return optval
  388. def formatSeconds(secs):
  389. if secs > 3600:
  390. return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
  391. elif secs > 60:
  392. return '%d:%02d' % (secs // 60, secs % 60)
  393. else:
  394. return '%d' % secs
  395. def make_HTTPS_handler(params, **kwargs):
  396. opts_no_check_certificate = params.get('nocheckcertificate', False)
  397. if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
  398. context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
  399. if opts_no_check_certificate:
  400. context.check_hostname = False
  401. context.verify_mode = ssl.CERT_NONE
  402. try:
  403. return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
  404. except TypeError:
  405. # Python 2.7.8
  406. # (create_default_context present but HTTPSHandler has no context=)
  407. pass
  408. if sys.version_info < (3, 2):
  409. return YoutubeDLHTTPSHandler(params, **kwargs)
  410. else: # Python < 3.4
  411. context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
  412. context.verify_mode = (ssl.CERT_NONE
  413. if opts_no_check_certificate
  414. else ssl.CERT_REQUIRED)
  415. context.set_default_verify_paths()
  416. return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
  417. def bug_reports_message():
  418. if ytdl_is_updateable():
  419. update_cmd = 'type youtube-dl -U to update'
  420. else:
  421. update_cmd = 'see https://yt-dl.org/update on how to update'
  422. msg = '; please report this issue on https://yt-dl.org/bug .'
  423. msg += ' Make sure you are using the latest version; %s.' % update_cmd
  424. msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
  425. return msg
  426. class ExtractorError(Exception):
  427. """Error during info extraction."""
  428. def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
  429. """ tb, if given, is the original traceback (so that it can be printed out).
  430. If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
  431. """
  432. if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
  433. expected = True
  434. if video_id is not None:
  435. msg = video_id + ': ' + msg
  436. if cause:
  437. msg += ' (caused by %r)' % cause
  438. if not expected:
  439. msg += bug_reports_message()
  440. super(ExtractorError, self).__init__(msg)
  441. self.traceback = tb
  442. self.exc_info = sys.exc_info() # preserve original exception
  443. self.cause = cause
  444. self.video_id = video_id
  445. def format_traceback(self):
  446. if self.traceback is None:
  447. return None
  448. return ''.join(traceback.format_tb(self.traceback))
  449. class UnsupportedError(ExtractorError):
  450. def __init__(self, url):
  451. super(UnsupportedError, self).__init__(
  452. 'Unsupported URL: %s' % url, expected=True)
  453. self.url = url
  454. class RegexNotFoundError(ExtractorError):
  455. """Error when a regex didn't match"""
  456. pass
  457. class DownloadError(Exception):
  458. """Download Error exception.
  459. This exception may be thrown by FileDownloader objects if they are not
  460. configured to continue on errors. They will contain the appropriate
  461. error message.
  462. """
  463. def __init__(self, msg, exc_info=None):
  464. """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
  465. super(DownloadError, self).__init__(msg)
  466. self.exc_info = exc_info
  467. class SameFileError(Exception):
  468. """Same File exception.
  469. This exception will be thrown by FileDownloader objects if they detect
  470. multiple files would have to be downloaded to the same file on disk.
  471. """
  472. pass
  473. class PostProcessingError(Exception):
  474. """Post Processing exception.
  475. This exception may be raised by PostProcessor's .run() method to
  476. indicate an error in the postprocessing task.
  477. """
  478. def __init__(self, msg):
  479. self.msg = msg
  480. class MaxDownloadsReached(Exception):
  481. """ --max-downloads limit has been reached. """
  482. pass
  483. class UnavailableVideoError(Exception):
  484. """Unavailable Format exception.
  485. This exception will be thrown when a video is requested
  486. in a format that is not available for that video.
  487. """
  488. pass
  489. class ContentTooShortError(Exception):
  490. """Content Too Short exception.
  491. This exception may be raised by FileDownloader objects when a file they
  492. download is too small for what the server announced first, indicating
  493. the connection was probably interrupted.
  494. """
  495. def __init__(self, downloaded, expected):
  496. # Both in bytes
  497. self.downloaded = downloaded
  498. self.expected = expected
  499. def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
  500. # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
  501. # expected HTTP responses to meet HTTP/1.0 or later (see also
  502. # https://github.com/rg3/youtube-dl/issues/6727)
  503. if sys.version_info < (3, 0):
  504. kwargs['strict'] = True
  505. hc = http_class(*args, **kwargs)
  506. source_address = ydl_handler._params.get('source_address')
  507. if source_address is not None:
  508. sa = (source_address, 0)
  509. if hasattr(hc, 'source_address'): # Python 2.7+
  510. hc.source_address = sa
  511. else: # Python 2.6
  512. def _hc_connect(self, *args, **kwargs):
  513. sock = compat_socket_create_connection(
  514. (self.host, self.port), self.timeout, sa)
  515. if is_https:
  516. self.sock = ssl.wrap_socket(
  517. sock, self.key_file, self.cert_file,
  518. ssl_version=ssl.PROTOCOL_TLSv1)
  519. else:
  520. self.sock = sock
  521. hc.connect = functools.partial(_hc_connect, hc)
  522. return hc
  523. class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
  524. """Handler for HTTP requests and responses.
  525. This class, when installed with an OpenerDirector, automatically adds
  526. the standard headers to every HTTP request and handles gzipped and
  527. deflated responses from web servers. If compression is to be avoided in
  528. a particular request, the original request in the program code only has
  529. to include the HTTP header "Youtubedl-No-Compression", which will be
  530. removed before making the real request.
  531. Part of this code was copied from:
  532. http://techknack.net/python-urllib2-handlers/
  533. Andrew Rowls, the author of that code, agreed to release it to the
  534. public domain.
  535. """
  536. def __init__(self, params, *args, **kwargs):
  537. compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
  538. self._params = params
  539. def http_open(self, req):
  540. return self.do_open(functools.partial(
  541. _create_http_connection, self, compat_http_client.HTTPConnection, False),
  542. req)
  543. @staticmethod
  544. def deflate(data):
  545. try:
  546. return zlib.decompress(data, -zlib.MAX_WBITS)
  547. except zlib.error:
  548. return zlib.decompress(data)
  549. @staticmethod
  550. def addinfourl_wrapper(stream, headers, url, code):
  551. if hasattr(compat_urllib_request.addinfourl, 'getcode'):
  552. return compat_urllib_request.addinfourl(stream, headers, url, code)
  553. ret = compat_urllib_request.addinfourl(stream, headers, url)
  554. ret.code = code
  555. return ret
  556. def http_request(self, req):
  557. # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
  558. # always respected by websites, some tend to give out URLs with non percent-encoded
  559. # non-ASCII characters (see telemb.py, ard.py [#3412])
  560. # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
  561. # To work around aforementioned issue we will replace request's original URL with
  562. # percent-encoded one
  563. # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
  564. # the code of this workaround has been moved here from YoutubeDL.urlopen()
  565. url = req.get_full_url()
  566. url_escaped = escape_url(url)
  567. # Substitute URL if any change after escaping
  568. if url != url_escaped:
  569. req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request
  570. new_req = req_type(
  571. url_escaped, data=req.data, headers=req.headers,
  572. origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
  573. new_req.timeout = req.timeout
  574. req = new_req
  575. for h, v in std_headers.items():
  576. # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
  577. # The dict keys are capitalized because of this bug by urllib
  578. if h.capitalize() not in req.headers:
  579. req.add_header(h, v)
  580. if 'Youtubedl-no-compression' in req.headers:
  581. if 'Accept-encoding' in req.headers:
  582. del req.headers['Accept-encoding']
  583. del req.headers['Youtubedl-no-compression']
  584. if sys.version_info < (2, 7) and '#' in req.get_full_url():
  585. # Python 2.6 is brain-dead when it comes to fragments
  586. req._Request__original = req._Request__original.partition('#')[0]
  587. req._Request__r_type = req._Request__r_type.partition('#')[0]
  588. return req
  589. def http_response(self, req, resp):
  590. old_resp = resp
  591. # gzip
  592. if resp.headers.get('Content-encoding', '') == 'gzip':
  593. content = resp.read()
  594. gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
  595. try:
  596. uncompressed = io.BytesIO(gz.read())
  597. except IOError as original_ioerror:
  598. # There may be junk add the end of the file
  599. # See http://stackoverflow.com/q/4928560/35070 for details
  600. for i in range(1, 1024):
  601. try:
  602. gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
  603. uncompressed = io.BytesIO(gz.read())
  604. except IOError:
  605. continue
  606. break
  607. else:
  608. raise original_ioerror
  609. resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
  610. resp.msg = old_resp.msg
  611. # deflate
  612. if resp.headers.get('Content-encoding', '') == 'deflate':
  613. gz = io.BytesIO(self.deflate(resp.read()))
  614. resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
  615. resp.msg = old_resp.msg
  616. # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
  617. # https://github.com/rg3/youtube-dl/issues/6457).
  618. if 300 <= resp.code < 400:
  619. location = resp.headers.get('Location')
  620. if location:
  621. # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
  622. if sys.version_info >= (3, 0):
  623. location = location.encode('iso-8859-1').decode('utf-8')
  624. location_escaped = escape_url(location)
  625. if location != location_escaped:
  626. del resp.headers['Location']
  627. resp.headers['Location'] = location_escaped
  628. return resp
  629. https_request = http_request
  630. https_response = http_response
  631. class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
  632. def __init__(self, params, https_conn_class=None, *args, **kwargs):
  633. compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
  634. self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
  635. self._params = params
  636. def https_open(self, req):
  637. kwargs = {}
  638. if hasattr(self, '_context'): # python > 2.6
  639. kwargs['context'] = self._context
  640. if hasattr(self, '_check_hostname'): # python 3.x
  641. kwargs['check_hostname'] = self._check_hostname
  642. return self.do_open(functools.partial(
  643. _create_http_connection, self, self._https_conn_class, True),
  644. req, **kwargs)
  645. class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
  646. def __init__(self, cookiejar=None):
  647. compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
  648. def http_response(self, request, response):
  649. # Python 2 will choke on next HTTP request in row if there are non-ASCII
  650. # characters in Set-Cookie HTTP header of last response (see
  651. # https://github.com/rg3/youtube-dl/issues/6769).
  652. # In order to at least prevent crashing we will percent encode Set-Cookie
  653. # header before HTTPCookieProcessor starts processing it.
  654. # if sys.version_info < (3, 0) and response.headers:
  655. # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
  656. # set_cookie = response.headers.get(set_cookie_header)
  657. # if set_cookie:
  658. # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
  659. # if set_cookie != set_cookie_escaped:
  660. # del response.headers[set_cookie_header]
  661. # response.headers[set_cookie_header] = set_cookie_escaped
  662. return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
  663. https_request = compat_urllib_request.HTTPCookieProcessor.http_request
  664. https_response = http_response
  665. def parse_iso8601(date_str, delimiter='T', timezone=None):
  666. """ Return a UNIX timestamp from the given date """
  667. if date_str is None:
  668. return None
  669. if timezone is None:
  670. m = re.search(
  671. r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
  672. date_str)
  673. if not m:
  674. timezone = datetime.timedelta()
  675. else:
  676. date_str = date_str[:-len(m.group(0))]
  677. if not m.group('sign'):
  678. timezone = datetime.timedelta()
  679. else:
  680. sign = 1 if m.group('sign') == '+' else -1
  681. timezone = datetime.timedelta(
  682. hours=sign * int(m.group('hours')),
  683. minutes=sign * int(m.group('minutes')))
  684. date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
  685. dt = datetime.datetime.strptime(date_str, date_format) - timezone
  686. return calendar.timegm(dt.timetuple())
  687. def unified_strdate(date_str, day_first=True):
  688. """Return a string with the date in the format YYYYMMDD"""
  689. if date_str is None:
  690. return None
  691. upload_date = None
  692. # Replace commas
  693. date_str = date_str.replace(',', ' ')
  694. # %z (UTC offset) is only supported in python>=3.2
  695. if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str):
  696. date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
  697. # Remove AM/PM + timezone
  698. date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
  699. format_expressions = [
  700. '%d %B %Y',
  701. '%d %b %Y',
  702. '%B %d %Y',
  703. '%b %d %Y',
  704. '%b %dst %Y %I:%M%p',
  705. '%b %dnd %Y %I:%M%p',
  706. '%b %dth %Y %I:%M%p',
  707. '%Y %m %d',
  708. '%Y-%m-%d',
  709. '%Y/%m/%d',
  710. '%Y/%m/%d %H:%M:%S',
  711. '%Y-%m-%d %H:%M:%S',
  712. '%Y-%m-%d %H:%M:%S.%f',
  713. '%d.%m.%Y %H:%M',
  714. '%d.%m.%Y %H.%M',
  715. '%Y-%m-%dT%H:%M:%SZ',
  716. '%Y-%m-%dT%H:%M:%S.%fZ',
  717. '%Y-%m-%dT%H:%M:%S.%f0Z',
  718. '%Y-%m-%dT%H:%M:%S',
  719. '%Y-%m-%dT%H:%M:%S.%f',
  720. '%Y-%m-%dT%H:%M',
  721. ]
  722. if day_first:
  723. format_expressions.extend([
  724. '%d-%m-%Y',
  725. '%d.%m.%Y',
  726. '%d/%m/%Y',
  727. '%d/%m/%y',
  728. '%d/%m/%Y %H:%M:%S',
  729. ])
  730. else:
  731. format_expressions.extend([
  732. '%m-%d-%Y',
  733. '%m.%d.%Y',
  734. '%m/%d/%Y',
  735. '%m/%d/%y',
  736. '%m/%d/%Y %H:%M:%S',
  737. ])
  738. for expression in format_expressions:
  739. try:
  740. upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
  741. except ValueError:
  742. pass
  743. if upload_date is None:
  744. timetuple = email.utils.parsedate_tz(date_str)
  745. if timetuple:
  746. upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
  747. return upload_date
  748. def determine_ext(url, default_ext='unknown_video'):
  749. if url is None:
  750. return default_ext
  751. guess = url.partition('?')[0].rpartition('.')[2]
  752. if re.match(r'^[A-Za-z0-9]+$', guess):
  753. return guess
  754. else:
  755. return default_ext
  756. def subtitles_filename(filename, sub_lang, sub_format):
  757. return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
  758. def date_from_str(date_str):
  759. """
  760. Return a datetime object from a string in the format YYYYMMDD or
  761. (now|today)[+-][0-9](day|week|month|year)(s)?"""
  762. today = datetime.date.today()
  763. if date_str in ('now', 'today'):
  764. return today
  765. if date_str == 'yesterday':
  766. return today - datetime.timedelta(days=1)
  767. match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
  768. if match is not None:
  769. sign = match.group('sign')
  770. time = int(match.group('time'))
  771. if sign == '-':
  772. time = -time
  773. unit = match.group('unit')
  774. # A bad aproximation?
  775. if unit == 'month':
  776. unit = 'day'
  777. time *= 30
  778. elif unit == 'year':
  779. unit = 'day'
  780. time *= 365
  781. unit += 's'
  782. delta = datetime.timedelta(**{unit: time})
  783. return today + delta
  784. return datetime.datetime.strptime(date_str, "%Y%m%d").date()
  785. def hyphenate_date(date_str):
  786. """
  787. Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
  788. match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
  789. if match is not None:
  790. return '-'.join(match.groups())
  791. else:
  792. return date_str
  793. class DateRange(object):
  794. """Represents a time interval between two dates"""
  795. def __init__(self, start=None, end=None):
  796. """start and end must be strings in the format accepted by date"""
  797. if start is not None:
  798. self.start = date_from_str(start)
  799. else:
  800. self.start = datetime.datetime.min.date()
  801. if end is not None:
  802. self.end = date_from_str(end)
  803. else:
  804. self.end = datetime.datetime.max.date()
  805. if self.start > self.end:
  806. raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
  807. @classmethod
  808. def day(cls, day):
  809. """Returns a range that only contains the given day"""
  810. return cls(day, day)
  811. def __contains__(self, date):
  812. """Check if the date is in the range"""
  813. if not isinstance(date, datetime.date):
  814. date = date_from_str(date)
  815. return self.start <= date <= self.end
  816. def __str__(self):
  817. return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
  818. def platform_name():
  819. """ Returns the platform name as a compat_str """
  820. res = platform.platform()
  821. if isinstance(res, bytes):
  822. res = res.decode(preferredencoding())
  823. assert isinstance(res, compat_str)
  824. return res
  825. def _windows_write_string(s, out):
  826. """ Returns True if the string was written using special methods,
  827. False if it has yet to be written out."""
  828. # Adapted from http://stackoverflow.com/a/3259271/35070
  829. import ctypes
  830. import ctypes.wintypes
  831. WIN_OUTPUT_IDS = {
  832. 1: -11,
  833. 2: -12,
  834. }
  835. try:
  836. fileno = out.fileno()
  837. except AttributeError:
  838. # If the output stream doesn't have a fileno, it's virtual
  839. return False
  840. except io.UnsupportedOperation:
  841. # Some strange Windows pseudo files?
  842. return False
  843. if fileno not in WIN_OUTPUT_IDS:
  844. return False
  845. GetStdHandle = ctypes.WINFUNCTYPE(
  846. ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
  847. (b"GetStdHandle", ctypes.windll.kernel32))
  848. h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
  849. WriteConsoleW = ctypes.WINFUNCTYPE(
  850. ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
  851. ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
  852. ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32))
  853. written = ctypes.wintypes.DWORD(0)
  854. GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32))
  855. FILE_TYPE_CHAR = 0x0002
  856. FILE_TYPE_REMOTE = 0x8000
  857. GetConsoleMode = ctypes.WINFUNCTYPE(
  858. ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
  859. ctypes.POINTER(ctypes.wintypes.DWORD))(
  860. (b"GetConsoleMode", ctypes.windll.kernel32))
  861. INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
  862. def not_a_console(handle):
  863. if handle == INVALID_HANDLE_VALUE or handle is None:
  864. return True
  865. return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
  866. GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
  867. if not_a_console(h):
  868. return False
  869. def next_nonbmp_pos(s):
  870. try:
  871. return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
  872. except StopIteration:
  873. return len(s)
  874. while s:
  875. count = min(next_nonbmp_pos(s), 1024)
  876. ret = WriteConsoleW(
  877. h, s, count if count else 2, ctypes.byref(written), None)
  878. if ret == 0:
  879. raise OSError('Failed to write string')
  880. if not count: # We just wrote a non-BMP character
  881. assert written.value == 2
  882. s = s[1:]
  883. else:
  884. assert written.value > 0
  885. s = s[written.value:]
  886. return True
  887. def write_string(s, out=None, encoding=None):
  888. if out is None:
  889. out = sys.stderr
  890. assert type(s) == compat_str
  891. if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
  892. if _windows_write_string(s, out):
  893. return
  894. if ('b' in getattr(out, 'mode', '') or
  895. sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
  896. byt = s.encode(encoding or preferredencoding(), 'ignore')
  897. out.write(byt)
  898. elif hasattr(out, 'buffer'):
  899. enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
  900. byt = s.encode(enc, 'ignore')
  901. out.buffer.write(byt)
  902. else:
  903. out.write(s)
  904. out.flush()
  905. def bytes_to_intlist(bs):
  906. if not bs:
  907. return []
  908. if isinstance(bs[0], int): # Python 3
  909. return list(bs)
  910. else:
  911. return [ord(c) for c in bs]
  912. def intlist_to_bytes(xs):
  913. if not xs:
  914. return b''
  915. return struct_pack('%dB' % len(xs), *xs)
  916. # Cross-platform file locking
  917. if sys.platform == 'win32':
  918. import ctypes.wintypes
  919. import msvcrt
  920. class OVERLAPPED(ctypes.Structure):
  921. _fields_ = [
  922. ('Internal', ctypes.wintypes.LPVOID),
  923. ('InternalHigh', ctypes.wintypes.LPVOID),
  924. ('Offset', ctypes.wintypes.DWORD),
  925. ('OffsetHigh', ctypes.wintypes.DWORD),
  926. ('hEvent', ctypes.wintypes.HANDLE),
  927. ]
  928. kernel32 = ctypes.windll.kernel32
  929. LockFileEx = kernel32.LockFileEx
  930. LockFileEx.argtypes = [
  931. ctypes.wintypes.HANDLE, # hFile
  932. ctypes.wintypes.DWORD, # dwFlags
  933. ctypes.wintypes.DWORD, # dwReserved
  934. ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
  935. ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
  936. ctypes.POINTER(OVERLAPPED) # Overlapped
  937. ]
  938. LockFileEx.restype = ctypes.wintypes.BOOL
  939. UnlockFileEx = kernel32.UnlockFileEx
  940. UnlockFileEx.argtypes = [
  941. ctypes.wintypes.HANDLE, # hFile
  942. ctypes.wintypes.DWORD, # dwReserved
  943. ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
  944. ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
  945. ctypes.POINTER(OVERLAPPED) # Overlapped
  946. ]
  947. UnlockFileEx.restype = ctypes.wintypes.BOOL
  948. whole_low = 0xffffffff
  949. whole_high = 0x7fffffff
  950. def _lock_file(f, exclusive):
  951. overlapped = OVERLAPPED()
  952. overlapped.Offset = 0
  953. overlapped.OffsetHigh = 0
  954. overlapped.hEvent = 0
  955. f._lock_file_overlapped_p = ctypes.pointer(overlapped)
  956. handle = msvcrt.get_osfhandle(f.fileno())
  957. if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
  958. whole_low, whole_high, f._lock_file_overlapped_p):
  959. raise OSError('Locking file failed: %r' % ctypes.FormatError())
  960. def _unlock_file(f):
  961. assert f._lock_file_overlapped_p
  962. handle = msvcrt.get_osfhandle(f.fileno())
  963. if not UnlockFileEx(handle, 0,
  964. whole_low, whole_high, f._lock_file_overlapped_p):
  965. raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
  966. else:
  967. import fcntl
  968. def _lock_file(f, exclusive):
  969. fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
  970. def _unlock_file(f):
  971. fcntl.flock(f, fcntl.LOCK_UN)
  972. class locked_file(object):
  973. def __init__(self, filename, mode, encoding=None):
  974. assert mode in ['r', 'a', 'w']
  975. self.f = io.open(filename, mode, encoding=encoding)
  976. self.mode = mode
  977. def __enter__(self):
  978. exclusive = self.mode != 'r'
  979. try:
  980. _lock_file(self.f, exclusive)
  981. except IOError:
  982. self.f.close()
  983. raise
  984. return self
  985. def __exit__(self, etype, value, traceback):
  986. try:
  987. _unlock_file(self.f)
  988. finally:
  989. self.f.close()
  990. def __iter__(self):
  991. return iter(self.f)
  992. def write(self, *args):
  993. return self.f.write(*args)
  994. def read(self, *args):
  995. return self.f.read(*args)
  996. def get_filesystem_encoding():
  997. encoding = sys.getfilesystemencoding()
  998. return encoding if encoding is not None else 'utf-8'
  999. def shell_quote(args):
  1000. quoted_args = []
  1001. encoding = get_filesystem_encoding()
  1002. for a in args:
  1003. if isinstance(a, bytes):
  1004. # We may get a filename encoded with 'encodeFilename'
  1005. a = a.decode(encoding)
  1006. quoted_args.append(pipes.quote(a))
  1007. return ' '.join(quoted_args)
  1008. def smuggle_url(url, data):
  1009. """ Pass additional data in a URL for internal use. """
  1010. sdata = compat_urllib_parse.urlencode(
  1011. {'__youtubedl_smuggle': json.dumps(data)})
  1012. return url + '#' + sdata
  1013. def unsmuggle_url(smug_url, default=None):
  1014. if '#__youtubedl_smuggle' not in smug_url:
  1015. return smug_url, default
  1016. url, _, sdata = smug_url.rpartition('#')
  1017. jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
  1018. data = json.loads(jsond)
  1019. return url, data
  1020. def format_bytes(bytes):
  1021. if bytes is None:
  1022. return 'N/A'
  1023. if type(bytes) is str:
  1024. bytes = float(bytes)
  1025. if bytes == 0.0:
  1026. exponent = 0
  1027. else:
  1028. exponent = int(math.log(bytes, 1024.0))
  1029. suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
  1030. converted = float(bytes) / float(1024 ** exponent)
  1031. return '%.2f%s' % (converted, suffix)
  1032. def parse_filesize(s):
  1033. if s is None:
  1034. return None
  1035. # The lower-case forms are of course incorrect and inofficial,
  1036. # but we support those too
  1037. _UNIT_TABLE = {
  1038. 'B': 1,
  1039. 'b': 1,
  1040. 'KiB': 1024,
  1041. 'KB': 1000,
  1042. 'kB': 1024,
  1043. 'Kb': 1000,
  1044. 'MiB': 1024 ** 2,
  1045. 'MB': 1000 ** 2,
  1046. 'mB': 1024 ** 2,
  1047. 'Mb': 1000 ** 2,
  1048. 'GiB': 1024 ** 3,
  1049. 'GB': 1000 ** 3,
  1050. 'gB': 1024 ** 3,
  1051. 'Gb': 1000 ** 3,
  1052. 'TiB': 1024 ** 4,
  1053. 'TB': 1000 ** 4,
  1054. 'tB': 1024 ** 4,
  1055. 'Tb': 1000 ** 4,
  1056. 'PiB': 1024 ** 5,
  1057. 'PB': 1000 ** 5,
  1058. 'pB': 1024 ** 5,
  1059. 'Pb': 1000 ** 5,
  1060. 'EiB': 1024 ** 6,
  1061. 'EB': 1000 ** 6,
  1062. 'eB': 1024 ** 6,
  1063. 'Eb': 1000 ** 6,
  1064. 'ZiB': 1024 ** 7,
  1065. 'ZB': 1000 ** 7,
  1066. 'zB': 1024 ** 7,
  1067. 'Zb': 1000 ** 7,
  1068. 'YiB': 1024 ** 8,
  1069. 'YB': 1000 ** 8,
  1070. 'yB': 1024 ** 8,
  1071. 'Yb': 1000 ** 8,
  1072. }
  1073. units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE)
  1074. m = re.match(
  1075. r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s)
  1076. if not m:
  1077. return None
  1078. num_str = m.group('num').replace(',', '.')
  1079. mult = _UNIT_TABLE[m.group('unit')]
  1080. return int(float(num_str) * mult)
  1081. def month_by_name(name):
  1082. """ Return the number of a month by (locale-independently) English name """
  1083. try:
  1084. return ENGLISH_MONTH_NAMES.index(name) + 1
  1085. except ValueError:
  1086. return None
  1087. def month_by_abbreviation(abbrev):
  1088. """ Return the number of a month by (locale-independently) English
  1089. abbreviations """
  1090. try:
  1091. return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
  1092. except ValueError:
  1093. return None
  1094. def fix_xml_ampersands(xml_str):
  1095. """Replace all the '&' by '&amp;' in XML"""
  1096. return re.sub(
  1097. r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
  1098. '&amp;',
  1099. xml_str)
  1100. def setproctitle(title):
  1101. assert isinstance(title, compat_str)
  1102. try:
  1103. libc = ctypes.cdll.LoadLibrary("libc.so.6")
  1104. except OSError:
  1105. return
  1106. title_bytes = title.encode('utf-8')
  1107. buf = ctypes.create_string_buffer(len(title_bytes))
  1108. buf.value = title_bytes
  1109. try:
  1110. libc.prctl(15, buf, 0, 0, 0)
  1111. except AttributeError:
  1112. return # Strange libc, just skip this
  1113. def remove_start(s, start):
  1114. if s.startswith(start):
  1115. return s[len(start):]
  1116. return s
  1117. def remove_end(s, end):
  1118. if s.endswith(end):
  1119. return s[:-len(end)]
  1120. return s
  1121. def url_basename(url):
  1122. path = compat_urlparse.urlparse(url).path
  1123. return path.strip('/').split('/')[-1]
  1124. class HEADRequest(compat_urllib_request.Request):
  1125. def get_method(self):
  1126. return "HEAD"
  1127. def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
  1128. if get_attr:
  1129. if v is not None:
  1130. v = getattr(v, get_attr, None)
  1131. if v == '':
  1132. v = None
  1133. return default if v is None else (int(v) * invscale // scale)
  1134. def str_or_none(v, default=None):
  1135. return default if v is None else compat_str(v)
  1136. def str_to_int(int_str):
  1137. """ A more relaxed version of int_or_none """
  1138. if int_str is None:
  1139. return None
  1140. int_str = re.sub(r'[,\.\+]', '', int_str)
  1141. return int(int_str)
  1142. def float_or_none(v, scale=1, invscale=1, default=None):
  1143. return default if v is None else (float(v) * invscale / scale)
  1144. def parse_duration(s):
  1145. if not isinstance(s, compat_basestring):
  1146. return None
  1147. s = s.strip()
  1148. m = re.match(
  1149. r'''(?ix)(?:P?T)?
  1150. (?:
  1151. (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*|
  1152. (?P<only_hours>[0-9.]+)\s*(?:hours?)|
  1153. \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*|
  1154. (?:
  1155. (?:
  1156. (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)?
  1157. (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*
  1158. )?
  1159. (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*
  1160. )?
  1161. (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?
  1162. )$''', s)
  1163. if not m:
  1164. return None
  1165. res = 0
  1166. if m.group('only_mins'):
  1167. return float_or_none(m.group('only_mins'), invscale=60)
  1168. if m.group('only_hours'):
  1169. return float_or_none(m.group('only_hours'), invscale=60 * 60)
  1170. if m.group('secs'):
  1171. res += int(m.group('secs'))
  1172. if m.group('mins_reversed'):
  1173. res += int(m.group('mins_reversed')) * 60
  1174. if m.group('mins'):
  1175. res += int(m.group('mins')) * 60
  1176. if m.group('hours'):
  1177. res += int(m.group('hours')) * 60 * 60
  1178. if m.group('hours_reversed'):
  1179. res += int(m.group('hours_reversed')) * 60 * 60
  1180. if m.group('days'):
  1181. res += int(m.group('days')) * 24 * 60 * 60
  1182. if m.group('ms'):
  1183. res += float(m.group('ms'))
  1184. return res
  1185. def prepend_extension(filename, ext, expected_real_ext=None):
  1186. name, real_ext = os.path.splitext(filename)
  1187. return (
  1188. '{0}.{1}{2}'.format(name, ext, real_ext)
  1189. if not expected_real_ext or real_ext[1:] == expected_real_ext
  1190. else '{0}.{1}'.format(filename, ext))
  1191. def replace_extension(filename, ext, expected_real_ext=None):
  1192. name, real_ext = os.path.splitext(filename)
  1193. return '{0}.{1}'.format(
  1194. name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
  1195. ext)
  1196. def check_executable(exe, args=[]):
  1197. """ Checks if the given binary is installed somewhere in PATH, and returns its name.
  1198. args can be a list of arguments for a short output (like -version) """
  1199. try:
  1200. subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
  1201. except OSError:
  1202. return False
  1203. return exe
  1204. def get_exe_version(exe, args=['--version'],
  1205. version_re=None, unrecognized='present'):
  1206. """ Returns the version of the specified executable,
  1207. or False if the executable is not present """
  1208. try:
  1209. out, _ = subprocess.Popen(
  1210. [encodeArgument(exe)] + args,
  1211. stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
  1212. except OSError:
  1213. return False
  1214. if isinstance(out, bytes): # Python 2.x
  1215. out = out.decode('ascii', 'ignore')
  1216. return detect_exe_version(out, version_re, unrecognized)
  1217. def detect_exe_version(output, version_re=None, unrecognized='present'):
  1218. assert isinstance(output, compat_str)
  1219. if version_re is None:
  1220. version_re = r'version\s+([-0-9._a-zA-Z]+)'
  1221. m = re.search(version_re, output)
  1222. if m:
  1223. return m.group(1)
  1224. else:
  1225. return unrecognized
  1226. class PagedList(object):
  1227. def __len__(self):
  1228. # This is only useful for tests
  1229. return len(self.getslice())
  1230. class OnDemandPagedList(PagedList):
  1231. def __init__(self, pagefunc, pagesize):
  1232. self._pagefunc = pagefunc
  1233. self._pagesize = pagesize
  1234. def getslice(self, start=0, end=None):
  1235. res = []
  1236. for pagenum in itertools.count(start // self._pagesize):
  1237. firstid = pagenum * self._pagesize
  1238. nextfirstid = pagenum * self._pagesize + self._pagesize
  1239. if start >= nextfirstid:
  1240. continue
  1241. page_results = list(self._pagefunc(pagenum))
  1242. startv = (
  1243. start % self._pagesize
  1244. if firstid <= start < nextfirstid
  1245. else 0)
  1246. endv = (
  1247. ((end - 1) % self._pagesize) + 1
  1248. if (end is not None and firstid <= end <= nextfirstid)
  1249. else None)
  1250. if startv != 0 or endv is not None:
  1251. page_results = page_results[startv:endv]
  1252. res.extend(page_results)
  1253. # A little optimization - if current page is not "full", ie. does
  1254. # not contain page_size videos then we can assume that this page
  1255. # is the last one - there are no more ids on further pages -
  1256. # i.e. no need to query again.
  1257. if len(page_results) + startv < self._pagesize:
  1258. break
  1259. # If we got the whole page, but the next page is not interesting,
  1260. # break out early as well
  1261. if end == nextfirstid:
  1262. break
  1263. return res
  1264. class InAdvancePagedList(PagedList):
  1265. def __init__(self, pagefunc, pagecount, pagesize):
  1266. self._pagefunc = pagefunc
  1267. self._pagecount = pagecount
  1268. self._pagesize = pagesize
  1269. def getslice(self, start=0, end=None):
  1270. res = []
  1271. start_page = start // self._pagesize
  1272. end_page = (
  1273. self._pagecount if end is None else (end // self._pagesize + 1))
  1274. skip_elems = start - start_page * self._pagesize
  1275. only_more = None if end is None else end - start
  1276. for pagenum in range(start_page, end_page):
  1277. page = list(self._pagefunc(pagenum))
  1278. if skip_elems:
  1279. page = page[skip_elems:]
  1280. skip_elems = None
  1281. if only_more is not None:
  1282. if len(page) < only_more:
  1283. only_more -= len(page)
  1284. else:
  1285. page = page[:only_more]
  1286. res.extend(page)
  1287. break
  1288. res.extend(page)
  1289. return res
  1290. def uppercase_escape(s):
  1291. unicode_escape = codecs.getdecoder('unicode_escape')
  1292. return re.sub(
  1293. r'\\U[0-9a-fA-F]{8}',
  1294. lambda m: unicode_escape(m.group(0))[0],
  1295. s)
  1296. def lowercase_escape(s):
  1297. unicode_escape = codecs.getdecoder('unicode_escape')
  1298. return re.sub(
  1299. r'\\u[0-9a-fA-F]{4}',
  1300. lambda m: unicode_escape(m.group(0))[0],
  1301. s)
  1302. def escape_rfc3986(s):
  1303. """Escape non-ASCII characters as suggested by RFC 3986"""
  1304. if sys.version_info < (3, 0) and isinstance(s, compat_str):
  1305. s = s.encode('utf-8')
  1306. return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
  1307. def escape_url(url):
  1308. """Escape URL as suggested by RFC 3986"""
  1309. url_parsed = compat_urllib_parse_urlparse(url)
  1310. return url_parsed._replace(
  1311. path=escape_rfc3986(url_parsed.path),
  1312. params=escape_rfc3986(url_parsed.params),
  1313. query=escape_rfc3986(url_parsed.query),
  1314. fragment=escape_rfc3986(url_parsed.fragment)
  1315. ).geturl()
  1316. try:
  1317. struct.pack('!I', 0)
  1318. except TypeError:
  1319. # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument
  1320. def struct_pack(spec, *args):
  1321. if isinstance(spec, compat_str):
  1322. spec = spec.encode('ascii')
  1323. return struct.pack(spec, *args)
  1324. def struct_unpack(spec, *args):
  1325. if isinstance(spec, compat_str):
  1326. spec = spec.encode('ascii')
  1327. return struct.unpack(spec, *args)
  1328. else:
  1329. struct_pack = struct.pack
  1330. struct_unpack = struct.unpack
  1331. def read_batch_urls(batch_fd):
  1332. def fixup(url):
  1333. if not isinstance(url, compat_str):
  1334. url = url.decode('utf-8', 'replace')
  1335. BOM_UTF8 = '\xef\xbb\xbf'
  1336. if url.startswith(BOM_UTF8):
  1337. url = url[len(BOM_UTF8):]
  1338. url = url.strip()
  1339. if url.startswith(('#', ';', ']')):
  1340. return False
  1341. return url
  1342. with contextlib.closing(batch_fd) as fd:
  1343. return [url for url in map(fixup, fd) if url]
  1344. def urlencode_postdata(*args, **kargs):
  1345. return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
  1346. def encode_dict(d, encoding='utf-8'):
  1347. return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items())
  1348. try:
  1349. etree_iter = xml.etree.ElementTree.Element.iter
  1350. except AttributeError: # Python <=2.6
  1351. etree_iter = lambda n: n.findall('.//*')
  1352. def parse_xml(s):
  1353. class TreeBuilder(xml.etree.ElementTree.TreeBuilder):
  1354. def doctype(self, name, pubid, system):
  1355. pass # Ignore doctypes
  1356. parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder())
  1357. kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {}
  1358. tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
  1359. # Fix up XML parser in Python 2.x
  1360. if sys.version_info < (3, 0):
  1361. for n in etree_iter(tree):
  1362. if n.text is not None:
  1363. if not isinstance(n.text, compat_str):
  1364. n.text = n.text.decode('utf-8')
  1365. return tree
  1366. US_RATINGS = {
  1367. 'G': 0,
  1368. 'PG': 10,
  1369. 'PG-13': 13,
  1370. 'R': 16,
  1371. 'NC': 18,
  1372. }
  1373. def parse_age_limit(s):
  1374. if s is None:
  1375. return None
  1376. m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
  1377. return int(m.group('age')) if m else US_RATINGS.get(s, None)
  1378. def strip_jsonp(code):
  1379. return re.sub(
  1380. r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
  1381. def js_to_json(code):
  1382. def fix_kv(m):
  1383. v = m.group(0)
  1384. if v in ('true', 'false', 'null'):
  1385. return v
  1386. if v.startswith('"'):
  1387. return v
  1388. if v.startswith("'"):
  1389. v = v[1:-1]
  1390. v = re.sub(r"\\\\|\\'|\"", lambda m: {
  1391. '\\\\': '\\\\',
  1392. "\\'": "'",
  1393. '"': '\\"',
  1394. }[m.group(0)], v)
  1395. return '"%s"' % v
  1396. res = re.sub(r'''(?x)
  1397. "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"|
  1398. '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'|
  1399. [a-zA-Z_][.a-zA-Z_0-9]*
  1400. ''', fix_kv, code)
  1401. res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res)
  1402. return res
  1403. def qualities(quality_ids):
  1404. """ Get a numeric quality value out of a list of possible values """
  1405. def q(qid):
  1406. try:
  1407. return quality_ids.index(qid)
  1408. except ValueError:
  1409. return -1
  1410. return q
  1411. DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
  1412. def limit_length(s, length):
  1413. """ Add ellipses to overly long strings """
  1414. if s is None:
  1415. return None
  1416. ELLIPSES = '...'
  1417. if len(s) > length:
  1418. return s[:length - len(ELLIPSES)] + ELLIPSES
  1419. return s
  1420. def version_tuple(v):
  1421. return tuple(int(e) for e in re.split(r'[-.]', v))
  1422. def is_outdated_version(version, limit, assume_new=True):
  1423. if not version:
  1424. return not assume_new
  1425. try:
  1426. return version_tuple(version) < version_tuple(limit)
  1427. except ValueError:
  1428. return not assume_new
  1429. def ytdl_is_updateable():
  1430. """ Returns if youtube-dl can be updated with -U """
  1431. from zipimport import zipimporter
  1432. return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
  1433. def args_to_str(args):
  1434. # Get a short string representation for a subprocess command
  1435. return ' '.join(shlex_quote(a) for a in args)
  1436. def mimetype2ext(mt):
  1437. _, _, res = mt.rpartition('/')
  1438. return {
  1439. 'x-ms-wmv': 'wmv',
  1440. 'x-mp4-fragmented': 'mp4',
  1441. 'ttml+xml': 'ttml',
  1442. }.get(res, res)
  1443. def urlhandle_detect_ext(url_handle):
  1444. try:
  1445. url_handle.headers
  1446. getheader = lambda h: url_handle.headers[h]
  1447. except AttributeError: # Python < 3
  1448. getheader = url_handle.info().getheader
  1449. cd = getheader('Content-Disposition')
  1450. if cd:
  1451. m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
  1452. if m:
  1453. e = determine_ext(m.group('filename'), default_ext=None)
  1454. if e:
  1455. return e
  1456. return mimetype2ext(getheader('Content-Type'))
  1457. def age_restricted(content_limit, age_limit):
  1458. """ Returns True iff the content should be blocked """
  1459. if age_limit is None: # No limit set
  1460. return False
  1461. if content_limit is None:
  1462. return False # Content available for everyone
  1463. return age_limit < content_limit
  1464. def is_html(first_bytes):
  1465. """ Detect whether a file contains HTML by examining its first bytes. """
  1466. BOMS = [
  1467. (b'\xef\xbb\xbf', 'utf-8'),
  1468. (b'\x00\x00\xfe\xff', 'utf-32-be'),
  1469. (b'\xff\xfe\x00\x00', 'utf-32-le'),
  1470. (b'\xff\xfe', 'utf-16-le'),
  1471. (b'\xfe\xff', 'utf-16-be'),
  1472. ]
  1473. for bom, enc in BOMS:
  1474. if first_bytes.startswith(bom):
  1475. s = first_bytes[len(bom):].decode(enc, 'replace')
  1476. break
  1477. else:
  1478. s = first_bytes.decode('utf-8', 'replace')
  1479. return re.match(r'^\s*<', s)
  1480. def determine_protocol(info_dict):
  1481. protocol = info_dict.get('protocol')
  1482. if protocol is not None:
  1483. return protocol
  1484. url = info_dict['url']
  1485. if url.startswith('rtmp'):
  1486. return 'rtmp'
  1487. elif url.startswith('mms'):
  1488. return 'mms'
  1489. elif url.startswith('rtsp'):
  1490. return 'rtsp'
  1491. ext = determine_ext(url)
  1492. if ext == 'm3u8':
  1493. return 'm3u8'
  1494. elif ext == 'f4m':
  1495. return 'f4m'
  1496. return compat_urllib_parse_urlparse(url).scheme
  1497. def render_table(header_row, data):
  1498. """ Render a list of rows, each as a list of values """
  1499. table = [header_row] + data
  1500. max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
  1501. format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
  1502. return '\n'.join(format_str % tuple(row) for row in table)
  1503. def _match_one(filter_part, dct):
  1504. COMPARISON_OPERATORS = {
  1505. '<': operator.lt,
  1506. '<=': operator.le,
  1507. '>': operator.gt,
  1508. '>=': operator.ge,
  1509. '=': operator.eq,
  1510. '!=': operator.ne,
  1511. }
  1512. operator_rex = re.compile(r'''(?x)\s*
  1513. (?P<key>[a-z_]+)
  1514. \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
  1515. (?:
  1516. (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
  1517. (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
  1518. )
  1519. \s*$
  1520. ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
  1521. m = operator_rex.search(filter_part)
  1522. if m:
  1523. op = COMPARISON_OPERATORS[m.group('op')]
  1524. if m.group('strval') is not None:
  1525. if m.group('op') not in ('=', '!='):
  1526. raise ValueError(
  1527. 'Operator %s does not support string values!' % m.group('op'))
  1528. comparison_value = m.group('strval')
  1529. else:
  1530. try:
  1531. comparison_value = int(m.group('intval'))
  1532. except ValueError:
  1533. comparison_value = parse_filesize(m.group('intval'))
  1534. if comparison_value is None:
  1535. comparison_value = parse_filesize(m.group('intval') + 'B')
  1536. if comparison_value is None:
  1537. raise ValueError(
  1538. 'Invalid integer value %r in filter part %r' % (
  1539. m.group('intval'), filter_part))
  1540. actual_value = dct.get(m.group('key'))
  1541. if actual_value is None:
  1542. return m.group('none_inclusive')
  1543. return op(actual_value, comparison_value)
  1544. UNARY_OPERATORS = {
  1545. '': lambda v: v is not None,
  1546. '!': lambda v: v is None,
  1547. }
  1548. operator_rex = re.compile(r'''(?x)\s*
  1549. (?P<op>%s)\s*(?P<key>[a-z_]+)
  1550. \s*$
  1551. ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
  1552. m = operator_rex.search(filter_part)
  1553. if m:
  1554. op = UNARY_OPERATORS[m.group('op')]
  1555. actual_value = dct.get(m.group('key'))
  1556. return op(actual_value)
  1557. raise ValueError('Invalid filter part %r' % filter_part)
  1558. def match_str(filter_str, dct):
  1559. """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
  1560. return all(
  1561. _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
  1562. def match_filter_func(filter_str):
  1563. def _match_func(info_dict):
  1564. if match_str(filter_str, info_dict):
  1565. return None
  1566. else:
  1567. video_title = info_dict.get('title', info_dict.get('id', 'video'))
  1568. return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
  1569. return _match_func
  1570. def parse_dfxp_time_expr(time_expr):
  1571. if not time_expr:
  1572. return 0.0
  1573. mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
  1574. if mobj:
  1575. return float(mobj.group('time_offset'))
  1576. mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr)
  1577. if mobj:
  1578. return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3))
  1579. def srt_subtitles_timecode(seconds):
  1580. return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
  1581. def dfxp2srt(dfxp_data):
  1582. _x = functools.partial(xpath_with_ns, ns_map={
  1583. 'ttml': 'http://www.w3.org/ns/ttml',
  1584. 'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
  1585. })
  1586. def parse_node(node):
  1587. str_or_empty = functools.partial(str_or_none, default='')
  1588. out = str_or_empty(node.text)
  1589. for child in node:
  1590. if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
  1591. out += '\n' + str_or_empty(child.tail)
  1592. elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'):
  1593. out += str_or_empty(parse_node(child))
  1594. else:
  1595. out += str_or_empty(xml.etree.ElementTree.tostring(child))
  1596. return out
  1597. dfxp = xml.etree.ElementTree.fromstring(dfxp_data.encode('utf-8'))
  1598. out = []
  1599. paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p')
  1600. if not paras:
  1601. raise ValueError('Invalid dfxp/TTML subtitle')
  1602. for para, index in zip(paras, itertools.count(1)):
  1603. begin_time = parse_dfxp_time_expr(para.attrib['begin'])
  1604. end_time = parse_dfxp_time_expr(para.attrib.get('end'))
  1605. if not end_time:
  1606. end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur'])
  1607. out.append('%d\n%s --> %s\n%s\n\n' % (
  1608. index,
  1609. srt_subtitles_timecode(begin_time),
  1610. srt_subtitles_timecode(end_time),
  1611. parse_node(para)))
  1612. return ''.join(out)
  1613. def cli_option(params, command_option, param):
  1614. param = params.get(param)
  1615. return [command_option, param] if param is not None else []
  1616. def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
  1617. param = params.get(param)
  1618. assert isinstance(param, bool)
  1619. if separator:
  1620. return [command_option + separator + (true_value if param else false_value)]
  1621. return [command_option, true_value if param else false_value]
  1622. def cli_valueless_option(params, command_option, param, expected_value=True):
  1623. param = params.get(param)
  1624. return [command_option] if param == expected_value else []
  1625. def cli_configuration_args(params, param, default=[]):
  1626. ex_args = params.get(param)
  1627. if ex_args is None:
  1628. return default
  1629. assert isinstance(ex_args, list)
  1630. return ex_args
  1631. class ISO639Utils(object):
  1632. # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
  1633. _lang_map = {
  1634. 'aa': 'aar',
  1635. 'ab': 'abk',
  1636. 'ae': 'ave',
  1637. 'af': 'afr',
  1638. 'ak': 'aka',
  1639. 'am': 'amh',
  1640. 'an': 'arg',
  1641. 'ar': 'ara',
  1642. 'as': 'asm',
  1643. 'av': 'ava',
  1644. 'ay': 'aym',
  1645. 'az': 'aze',
  1646. 'ba': 'bak',
  1647. 'be': 'bel',
  1648. 'bg': 'bul',
  1649. 'bh': 'bih',
  1650. 'bi': 'bis',
  1651. 'bm': 'bam',
  1652. 'bn': 'ben',
  1653. 'bo': 'bod',
  1654. 'br': 'bre',
  1655. 'bs': 'bos',
  1656. 'ca': 'cat',
  1657. 'ce': 'che',
  1658. 'ch': 'cha',
  1659. 'co': 'cos',
  1660. 'cr': 'cre',
  1661. 'cs': 'ces',
  1662. 'cu': 'chu',
  1663. 'cv': 'chv',
  1664. 'cy': 'cym',
  1665. 'da': 'dan',
  1666. 'de': 'deu',
  1667. 'dv': 'div',
  1668. 'dz': 'dzo',
  1669. 'ee': 'ewe',
  1670. 'el': 'ell',
  1671. 'en': 'eng',
  1672. 'eo': 'epo',
  1673. 'es': 'spa',
  1674. 'et': 'est',
  1675. 'eu': 'eus',
  1676. 'fa': 'fas',
  1677. 'ff': 'ful',
  1678. 'fi': 'fin',
  1679. 'fj': 'fij',
  1680. 'fo': 'fao',
  1681. 'fr': 'fra',
  1682. 'fy': 'fry',
  1683. 'ga': 'gle',
  1684. 'gd': 'gla',
  1685. 'gl': 'glg',
  1686. 'gn': 'grn',
  1687. 'gu': 'guj',
  1688. 'gv': 'glv',
  1689. 'ha': 'hau',
  1690. 'he': 'heb',
  1691. 'hi': 'hin',
  1692. 'ho': 'hmo',
  1693. 'hr': 'hrv',
  1694. 'ht': 'hat',
  1695. 'hu': 'hun',
  1696. 'hy': 'hye',
  1697. 'hz': 'her',
  1698. 'ia': 'ina',
  1699. 'id': 'ind',
  1700. 'ie': 'ile',
  1701. 'ig': 'ibo',
  1702. 'ii': 'iii',
  1703. 'ik': 'ipk',
  1704. 'io': 'ido',
  1705. 'is': 'isl',
  1706. 'it': 'ita',
  1707. 'iu': 'iku',
  1708. 'ja': 'jpn',
  1709. 'jv': 'jav',
  1710. 'ka': 'kat',
  1711. 'kg': 'kon',
  1712. 'ki': 'kik',
  1713. 'kj': 'kua',
  1714. 'kk': 'kaz',
  1715. 'kl': 'kal',
  1716. 'km': 'khm',
  1717. 'kn': 'kan',
  1718. 'ko': 'kor',
  1719. 'kr': 'kau',
  1720. 'ks': 'kas',
  1721. 'ku': 'kur',
  1722. 'kv': 'kom',
  1723. 'kw': 'cor',
  1724. 'ky': 'kir',
  1725. 'la': 'lat',
  1726. 'lb': 'ltz',
  1727. 'lg': 'lug',
  1728. 'li': 'lim',
  1729. 'ln': 'lin',
  1730. 'lo': 'lao',
  1731. 'lt': 'lit',
  1732. 'lu': 'lub',
  1733. 'lv': 'lav',
  1734. 'mg': 'mlg',
  1735. 'mh': 'mah',
  1736. 'mi': 'mri',
  1737. 'mk': 'mkd',
  1738. 'ml': 'mal',
  1739. 'mn': 'mon',
  1740. 'mr': 'mar',
  1741. 'ms': 'msa',
  1742. 'mt': 'mlt',
  1743. 'my': 'mya',
  1744. 'na': 'nau',
  1745. 'nb': 'nob',
  1746. 'nd': 'nde',
  1747. 'ne': 'nep',
  1748. 'ng': 'ndo',
  1749. 'nl': 'nld',
  1750. 'nn': 'nno',
  1751. 'no': 'nor',
  1752. 'nr': 'nbl',
  1753. 'nv': 'nav',
  1754. 'ny': 'nya',
  1755. 'oc': 'oci',
  1756. 'oj': 'oji',
  1757. 'om': 'orm',
  1758. 'or': 'ori',
  1759. 'os': 'oss',
  1760. 'pa': 'pan',
  1761. 'pi': 'pli',
  1762. 'pl': 'pol',
  1763. 'ps': 'pus',
  1764. 'pt': 'por',
  1765. 'qu': 'que',
  1766. 'rm': 'roh',
  1767. 'rn': 'run',
  1768. 'ro': 'ron',
  1769. 'ru': 'rus',
  1770. 'rw': 'kin',
  1771. 'sa': 'san',
  1772. 'sc': 'srd',
  1773. 'sd': 'snd',
  1774. 'se': 'sme',
  1775. 'sg': 'sag',
  1776. 'si': 'sin',
  1777. 'sk': 'slk',
  1778. 'sl': 'slv',
  1779. 'sm': 'smo',
  1780. 'sn': 'sna',
  1781. 'so': 'som',
  1782. 'sq': 'sqi',
  1783. 'sr': 'srp',
  1784. 'ss': 'ssw',
  1785. 'st': 'sot',
  1786. 'su': 'sun',
  1787. 'sv': 'swe',
  1788. 'sw': 'swa',
  1789. 'ta': 'tam',
  1790. 'te': 'tel',
  1791. 'tg': 'tgk',
  1792. 'th': 'tha',
  1793. 'ti': 'tir',
  1794. 'tk': 'tuk',
  1795. 'tl': 'tgl',
  1796. 'tn': 'tsn',
  1797. 'to': 'ton',
  1798. 'tr': 'tur',
  1799. 'ts': 'tso',
  1800. 'tt': 'tat',
  1801. 'tw': 'twi',
  1802. 'ty': 'tah',
  1803. 'ug': 'uig',
  1804. 'uk': 'ukr',
  1805. 'ur': 'urd',
  1806. 'uz': 'uzb',
  1807. 've': 'ven',
  1808. 'vi': 'vie',
  1809. 'vo': 'vol',
  1810. 'wa': 'wln',
  1811. 'wo': 'wol',
  1812. 'xh': 'xho',
  1813. 'yi': 'yid',
  1814. 'yo': 'yor',
  1815. 'za': 'zha',
  1816. 'zh': 'zho',
  1817. 'zu': 'zul',
  1818. }
  1819. @classmethod
  1820. def short2long(cls, code):
  1821. """Convert language code from ISO 639-1 to ISO 639-2/T"""
  1822. return cls._lang_map.get(code[:2])
  1823. @classmethod
  1824. def long2short(cls, code):
  1825. """Convert language code from ISO 639-2/T to ISO 639-1"""
  1826. for short_name, long_name in cls._lang_map.items():
  1827. if long_name == code:
  1828. return short_name
  1829. class ISO3166Utils(object):
  1830. # From http://data.okfn.org/data/core/country-list
  1831. _country_map = {
  1832. 'AF': 'Afghanistan',
  1833. 'AX': 'Åland Islands',
  1834. 'AL': 'Albania',
  1835. 'DZ': 'Algeria',
  1836. 'AS': 'American Samoa',
  1837. 'AD': 'Andorra',
  1838. 'AO': 'Angola',
  1839. 'AI': 'Anguilla',
  1840. 'AQ': 'Antarctica',
  1841. 'AG': 'Antigua and Barbuda',
  1842. 'AR': 'Argentina',
  1843. 'AM': 'Armenia',
  1844. 'AW': 'Aruba',
  1845. 'AU': 'Australia',
  1846. 'AT': 'Austria',
  1847. 'AZ': 'Azerbaijan',
  1848. 'BS': 'Bahamas',
  1849. 'BH': 'Bahrain',
  1850. 'BD': 'Bangladesh',
  1851. 'BB': 'Barbados',
  1852. 'BY': 'Belarus',
  1853. 'BE': 'Belgium',
  1854. 'BZ': 'Belize',
  1855. 'BJ': 'Benin',
  1856. 'BM': 'Bermuda',
  1857. 'BT': 'Bhutan',
  1858. 'BO': 'Bolivia, Plurinational State of',
  1859. 'BQ': 'Bonaire, Sint Eustatius and Saba',
  1860. 'BA': 'Bosnia and Herzegovina',
  1861. 'BW': 'Botswana',
  1862. 'BV': 'Bouvet Island',
  1863. 'BR': 'Brazil',
  1864. 'IO': 'British Indian Ocean Territory',
  1865. 'BN': 'Brunei Darussalam',
  1866. 'BG': 'Bulgaria',
  1867. 'BF': 'Burkina Faso',
  1868. 'BI': 'Burundi',
  1869. 'KH': 'Cambodia',
  1870. 'CM': 'Cameroon',
  1871. 'CA': 'Canada',
  1872. 'CV': 'Cape Verde',
  1873. 'KY': 'Cayman Islands',
  1874. 'CF': 'Central African Republic',
  1875. 'TD': 'Chad',
  1876. 'CL': 'Chile',
  1877. 'CN': 'China',
  1878. 'CX': 'Christmas Island',
  1879. 'CC': 'Cocos (Keeling) Islands',
  1880. 'CO': 'Colombia',
  1881. 'KM': 'Comoros',
  1882. 'CG': 'Congo',
  1883. 'CD': 'Congo, the Democratic Republic of the',
  1884. 'CK': 'Cook Islands',
  1885. 'CR': 'Costa Rica',
  1886. 'CI': 'Côte d\'Ivoire',
  1887. 'HR': 'Croatia',
  1888. 'CU': 'Cuba',
  1889. 'CW': 'Curaçao',
  1890. 'CY': 'Cyprus',
  1891. 'CZ': 'Czech Republic',
  1892. 'DK': 'Denmark',
  1893. 'DJ': 'Djibouti',
  1894. 'DM': 'Dominica',
  1895. 'DO': 'Dominican Republic',
  1896. 'EC': 'Ecuador',
  1897. 'EG': 'Egypt',
  1898. 'SV': 'El Salvador',
  1899. 'GQ': 'Equatorial Guinea',
  1900. 'ER': 'Eritrea',
  1901. 'EE': 'Estonia',
  1902. 'ET': 'Ethiopia',
  1903. 'FK': 'Falkland Islands (Malvinas)',
  1904. 'FO': 'Faroe Islands',
  1905. 'FJ': 'Fiji',
  1906. 'FI': 'Finland',
  1907. 'FR': 'France',
  1908. 'GF': 'French Guiana',
  1909. 'PF': 'French Polynesia',
  1910. 'TF': 'French Southern Territories',
  1911. 'GA': 'Gabon',
  1912. 'GM': 'Gambia',
  1913. 'GE': 'Georgia',
  1914. 'DE': 'Germany',
  1915. 'GH': 'Ghana',
  1916. 'GI': 'Gibraltar',
  1917. 'GR': 'Greece',
  1918. 'GL': 'Greenland',
  1919. 'GD': 'Grenada',
  1920. 'GP': 'Guadeloupe',
  1921. 'GU': 'Guam',
  1922. 'GT': 'Guatemala',
  1923. 'GG': 'Guernsey',
  1924. 'GN': 'Guinea',
  1925. 'GW': 'Guinea-Bissau',
  1926. 'GY': 'Guyana',
  1927. 'HT': 'Haiti',
  1928. 'HM': 'Heard Island and McDonald Islands',
  1929. 'VA': 'Holy See (Vatican City State)',
  1930. 'HN': 'Honduras',
  1931. 'HK': 'Hong Kong',
  1932. 'HU': 'Hungary',
  1933. 'IS': 'Iceland',
  1934. 'IN': 'India',
  1935. 'ID': 'Indonesia',
  1936. 'IR': 'Iran, Islamic Republic of',
  1937. 'IQ': 'Iraq',
  1938. 'IE': 'Ireland',
  1939. 'IM': 'Isle of Man',
  1940. 'IL': 'Israel',
  1941. 'IT': 'Italy',
  1942. 'JM': 'Jamaica',
  1943. 'JP': 'Japan',
  1944. 'JE': 'Jersey',
  1945. 'JO': 'Jordan',
  1946. 'KZ': 'Kazakhstan',
  1947. 'KE': 'Kenya',
  1948. 'KI': 'Kiribati',
  1949. 'KP': 'Korea, Democratic People\'s Republic of',
  1950. 'KR': 'Korea, Republic of',
  1951. 'KW': 'Kuwait',
  1952. 'KG': 'Kyrgyzstan',
  1953. 'LA': 'Lao People\'s Democratic Republic',
  1954. 'LV': 'Latvia',
  1955. 'LB': 'Lebanon',
  1956. 'LS': 'Lesotho',
  1957. 'LR': 'Liberia',
  1958. 'LY': 'Libya',
  1959. 'LI': 'Liechtenstein',
  1960. 'LT': 'Lithuania',
  1961. 'LU': 'Luxembourg',
  1962. 'MO': 'Macao',
  1963. 'MK': 'Macedonia, the Former Yugoslav Republic of',
  1964. 'MG': 'Madagascar',
  1965. 'MW': 'Malawi',
  1966. 'MY': 'Malaysia',
  1967. 'MV': 'Maldives',
  1968. 'ML': 'Mali',
  1969. 'MT': 'Malta',
  1970. 'MH': 'Marshall Islands',
  1971. 'MQ': 'Martinique',
  1972. 'MR': 'Mauritania',
  1973. 'MU': 'Mauritius',
  1974. 'YT': 'Mayotte',
  1975. 'MX': 'Mexico',
  1976. 'FM': 'Micronesia, Federated States of',
  1977. 'MD': 'Moldova, Republic of',
  1978. 'MC': 'Monaco',
  1979. 'MN': 'Mongolia',
  1980. 'ME': 'Montenegro',
  1981. 'MS': 'Montserrat',
  1982. 'MA': 'Morocco',
  1983. 'MZ': 'Mozambique',
  1984. 'MM': 'Myanmar',
  1985. 'NA': 'Namibia',
  1986. 'NR': 'Nauru',
  1987. 'NP': 'Nepal',
  1988. 'NL': 'Netherlands',
  1989. 'NC': 'New Caledonia',
  1990. 'NZ': 'New Zealand',
  1991. 'NI': 'Nicaragua',
  1992. 'NE': 'Niger',
  1993. 'NG': 'Nigeria',
  1994. 'NU': 'Niue',
  1995. 'NF': 'Norfolk Island',
  1996. 'MP': 'Northern Mariana Islands',
  1997. 'NO': 'Norway',
  1998. 'OM': 'Oman',
  1999. 'PK': 'Pakistan',
  2000. 'PW': 'Palau',
  2001. 'PS': 'Palestine, State of',
  2002. 'PA': 'Panama',
  2003. 'PG': 'Papua New Guinea',
  2004. 'PY': 'Paraguay',
  2005. 'PE': 'Peru',
  2006. 'PH': 'Philippines',
  2007. 'PN': 'Pitcairn',
  2008. 'PL': 'Poland',
  2009. 'PT': 'Portugal',
  2010. 'PR': 'Puerto Rico',
  2011. 'QA': 'Qatar',
  2012. 'RE': 'Réunion',
  2013. 'RO': 'Romania',
  2014. 'RU': 'Russian Federation',
  2015. 'RW': 'Rwanda',
  2016. 'BL': 'Saint Barthélemy',
  2017. 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
  2018. 'KN': 'Saint Kitts and Nevis',
  2019. 'LC': 'Saint Lucia',
  2020. 'MF': 'Saint Martin (French part)',
  2021. 'PM': 'Saint Pierre and Miquelon',
  2022. 'VC': 'Saint Vincent and the Grenadines',
  2023. 'WS': 'Samoa',
  2024. 'SM': 'San Marino',
  2025. 'ST': 'Sao Tome and Principe',
  2026. 'SA': 'Saudi Arabia',
  2027. 'SN': 'Senegal',
  2028. 'RS': 'Serbia',
  2029. 'SC': 'Seychelles',
  2030. 'SL': 'Sierra Leone',
  2031. 'SG': 'Singapore',
  2032. 'SX': 'Sint Maarten (Dutch part)',
  2033. 'SK': 'Slovakia',
  2034. 'SI': 'Slovenia',
  2035. 'SB': 'Solomon Islands',
  2036. 'SO': 'Somalia',
  2037. 'ZA': 'South Africa',
  2038. 'GS': 'South Georgia and the South Sandwich Islands',
  2039. 'SS': 'South Sudan',
  2040. 'ES': 'Spain',
  2041. 'LK': 'Sri Lanka',
  2042. 'SD': 'Sudan',
  2043. 'SR': 'Suriname',
  2044. 'SJ': 'Svalbard and Jan Mayen',
  2045. 'SZ': 'Swaziland',
  2046. 'SE': 'Sweden',
  2047. 'CH': 'Switzerland',
  2048. 'SY': 'Syrian Arab Republic',
  2049. 'TW': 'Taiwan, Province of China',
  2050. 'TJ': 'Tajikistan',
  2051. 'TZ': 'Tanzania, United Republic of',
  2052. 'TH': 'Thailand',
  2053. 'TL': 'Timor-Leste',
  2054. 'TG': 'Togo',
  2055. 'TK': 'Tokelau',
  2056. 'TO': 'Tonga',
  2057. 'TT': 'Trinidad and Tobago',
  2058. 'TN': 'Tunisia',
  2059. 'TR': 'Turkey',
  2060. 'TM': 'Turkmenistan',
  2061. 'TC': 'Turks and Caicos Islands',
  2062. 'TV': 'Tuvalu',
  2063. 'UG': 'Uganda',
  2064. 'UA': 'Ukraine',
  2065. 'AE': 'United Arab Emirates',
  2066. 'GB': 'United Kingdom',
  2067. 'US': 'United States',
  2068. 'UM': 'United States Minor Outlying Islands',
  2069. 'UY': 'Uruguay',
  2070. 'UZ': 'Uzbekistan',
  2071. 'VU': 'Vanuatu',
  2072. 'VE': 'Venezuela, Bolivarian Republic of',
  2073. 'VN': 'Viet Nam',
  2074. 'VG': 'Virgin Islands, British',
  2075. 'VI': 'Virgin Islands, U.S.',
  2076. 'WF': 'Wallis and Futuna',
  2077. 'EH': 'Western Sahara',
  2078. 'YE': 'Yemen',
  2079. 'ZM': 'Zambia',
  2080. 'ZW': 'Zimbabwe',
  2081. }
  2082. @classmethod
  2083. def short2full(cls, code):
  2084. """Convert an ISO 3166-2 country code to the corresponding full name"""
  2085. return cls._country_map.get(code.upper())
  2086. class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
  2087. def __init__(self, proxies=None):
  2088. # Set default handlers
  2089. for type in ('http', 'https'):
  2090. setattr(self, '%s_open' % type,
  2091. lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
  2092. meth(r, proxy, type))
  2093. return compat_urllib_request.ProxyHandler.__init__(self, proxies)
  2094. def proxy_open(self, req, proxy, type):
  2095. req_proxy = req.headers.get('Ytdl-request-proxy')
  2096. if req_proxy is not None:
  2097. proxy = req_proxy
  2098. del req.headers['Ytdl-request-proxy']
  2099. if proxy == '__noproxy__':
  2100. return None # No Proxy
  2101. return compat_urllib_request.ProxyHandler.proxy_open(
  2102. self, req, proxy, type)