You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2828 lines
84 KiB

10 years ago
10 years ago
10 years ago
9 years ago
9 years ago
9 years ago
9 years ago
10 years ago
10 years ago
9 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
[utils] Remove Content-encoding from headers after decompression With cn_verification_proxy, our http_response() is called twice, one from PerRequestProxyHandler.proxy_open() and another from normal YoutubeDL.urlopen(). As a result, for proxies honoring Accept-Encoding, the following bug occurs: $ youtube-dl -vs --cn-verification-proxy https://secure.uku.im:993 "test:letv" [debug] System config: [] [debug] User config: [] [debug] Command-line args: ['-vs', '--cn-verification-proxy', 'https://secure.uku.im:993', 'test:letv'] [debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2015.12.23 [debug] Git HEAD: 97f18fa [debug] Python version 3.5.1 - Linux-4.3.3-1-ARCH-x86_64-with-arch-Arch-Linux [debug] exe versions: ffmpeg 2.8.4, ffprobe 2.8.4, rtmpdump 2.4 [debug] Proxy map: {} [TestURL] Test URL: http://www.letv.com/ptv/vplay/22005890.html [Letv] 22005890: Downloading webpage [Letv] 22005890: Downloading playJson data ERROR: Unable to download JSON metadata: Not a gzipped file (b'{"') (caused by OSError('Not a gzipped file (b\'{"\')',)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/extractor/common.py", line 330, in _request_webpage return self._downloader.urlopen(url_or_request) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/YoutubeDL.py", line 1886, in urlopen return self._opener.open(req, timeout=self._socket_timeout) File "/usr/lib/python3.5/urllib/request.py", line 471, in open response = meth(req, response) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 773, in http_response raise original_ioerror File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 761, in http_response uncompressed = io.BytesIO(gz.read()) File "/usr/lib/python3.5/gzip.py", line 274, in read return self._buffer.read(size) File "/usr/lib/python3.5/gzip.py", line 461, in read if not self._read_gzip_header(): File "/usr/lib/python3.5/gzip.py", line 409, in _read_gzip_header raise OSError('Not a gzipped file (%r)' % magic)
9 years ago
[utils] Remove Content-encoding from headers after decompression With cn_verification_proxy, our http_response() is called twice, one from PerRequestProxyHandler.proxy_open() and another from normal YoutubeDL.urlopen(). As a result, for proxies honoring Accept-Encoding, the following bug occurs: $ youtube-dl -vs --cn-verification-proxy https://secure.uku.im:993 "test:letv" [debug] System config: [] [debug] User config: [] [debug] Command-line args: ['-vs', '--cn-verification-proxy', 'https://secure.uku.im:993', 'test:letv'] [debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2015.12.23 [debug] Git HEAD: 97f18fa [debug] Python version 3.5.1 - Linux-4.3.3-1-ARCH-x86_64-with-arch-Arch-Linux [debug] exe versions: ffmpeg 2.8.4, ffprobe 2.8.4, rtmpdump 2.4 [debug] Proxy map: {} [TestURL] Test URL: http://www.letv.com/ptv/vplay/22005890.html [Letv] 22005890: Downloading webpage [Letv] 22005890: Downloading playJson data ERROR: Unable to download JSON metadata: Not a gzipped file (b'{"') (caused by OSError('Not a gzipped file (b\'{"\')',)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/extractor/common.py", line 330, in _request_webpage return self._downloader.urlopen(url_or_request) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/YoutubeDL.py", line 1886, in urlopen return self._opener.open(req, timeout=self._socket_timeout) File "/usr/lib/python3.5/urllib/request.py", line 471, in open response = meth(req, response) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 773, in http_response raise original_ioerror File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 761, in http_response uncompressed = io.BytesIO(gz.read()) File "/usr/lib/python3.5/gzip.py", line 274, in read return self._buffer.read(size) File "/usr/lib/python3.5/gzip.py", line 461, in read if not self._read_gzip_header(): File "/usr/lib/python3.5/gzip.py", line 409, in _read_gzip_header raise OSError('Not a gzipped file (%r)' % magic)
9 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
9 years ago
10 years ago
10 years ago
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. from __future__ import unicode_literals
  4. import base64
  5. import binascii
  6. import calendar
  7. import codecs
  8. import contextlib
  9. import ctypes
  10. import datetime
  11. import email.utils
  12. import errno
  13. import functools
  14. import gzip
  15. import io
  16. import itertools
  17. import json
  18. import locale
  19. import math
  20. import operator
  21. import os
  22. import pipes
  23. import platform
  24. import re
  25. import socket
  26. import ssl
  27. import subprocess
  28. import sys
  29. import tempfile
  30. import traceback
  31. import xml.etree.ElementTree
  32. import zlib
  33. from .compat import (
  34. compat_HTMLParser,
  35. compat_basestring,
  36. compat_chr,
  37. compat_etree_fromstring,
  38. compat_html_entities,
  39. compat_http_client,
  40. compat_kwargs,
  41. compat_parse_qs,
  42. compat_shlex_quote,
  43. compat_socket_create_connection,
  44. compat_str,
  45. compat_struct_pack,
  46. compat_urllib_error,
  47. compat_urllib_parse,
  48. compat_urllib_parse_urlencode,
  49. compat_urllib_parse_urlparse,
  50. compat_urllib_parse_unquote_plus,
  51. compat_urllib_request,
  52. compat_urlparse,
  53. compat_xpath,
  54. )
  55. from .socks import (
  56. ProxyType,
  57. sockssocket,
  58. )
  59. def register_socks_protocols():
  60. # "Register" SOCKS protocols
  61. # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
  62. # URLs with protocols not in urlparse.uses_netloc are not handled correctly
  63. for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
  64. if scheme not in compat_urlparse.uses_netloc:
  65. compat_urlparse.uses_netloc.append(scheme)
  66. # This is not clearly defined otherwise
  67. compiled_regex_type = type(re.compile(''))
  68. std_headers = {
  69. 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/44.0 (Chrome)',
  70. 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
  71. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  72. 'Accept-Encoding': 'gzip, deflate',
  73. 'Accept-Language': 'en-us,en;q=0.5',
  74. }
  75. NO_DEFAULT = object()
  76. ENGLISH_MONTH_NAMES = [
  77. 'January', 'February', 'March', 'April', 'May', 'June',
  78. 'July', 'August', 'September', 'October', 'November', 'December']
  79. KNOWN_EXTENSIONS = (
  80. 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
  81. 'flv', 'f4v', 'f4a', 'f4b',
  82. 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
  83. 'mkv', 'mka', 'mk3d',
  84. 'avi', 'divx',
  85. 'mov',
  86. 'asf', 'wmv', 'wma',
  87. '3gp', '3g2',
  88. 'mp3',
  89. 'flac',
  90. 'ape',
  91. 'wav',
  92. 'f4f', 'f4m', 'm3u8', 'smil')
  93. # needed for sanitizing filenames in restricted mode
  94. ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØŒÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøœùúûüýþÿ',
  95. itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOO', ['OE'], 'UUUUYP', ['ss'],
  96. 'aaaaaa', ['ae'], 'ceeeeiiiionoooooo', ['oe'], 'uuuuypy')))
  97. def preferredencoding():
  98. """Get preferred encoding.
  99. Returns the best encoding scheme for the system, based on
  100. locale.getpreferredencoding() and some further tweaks.
  101. """
  102. try:
  103. pref = locale.getpreferredencoding()
  104. 'TEST'.encode(pref)
  105. except Exception:
  106. pref = 'UTF-8'
  107. return pref
  108. def write_json_file(obj, fn):
  109. """ Encode obj as JSON and write it to fn, atomically if possible """
  110. fn = encodeFilename(fn)
  111. if sys.version_info < (3, 0) and sys.platform != 'win32':
  112. encoding = get_filesystem_encoding()
  113. # os.path.basename returns a bytes object, but NamedTemporaryFile
  114. # will fail if the filename contains non ascii characters unless we
  115. # use a unicode object
  116. path_basename = lambda f: os.path.basename(fn).decode(encoding)
  117. # the same for os.path.dirname
  118. path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
  119. else:
  120. path_basename = os.path.basename
  121. path_dirname = os.path.dirname
  122. args = {
  123. 'suffix': '.tmp',
  124. 'prefix': path_basename(fn) + '.',
  125. 'dir': path_dirname(fn),
  126. 'delete': False,
  127. }
  128. # In Python 2.x, json.dump expects a bytestream.
  129. # In Python 3.x, it writes to a character stream
  130. if sys.version_info < (3, 0):
  131. args['mode'] = 'wb'
  132. else:
  133. args.update({
  134. 'mode': 'w',
  135. 'encoding': 'utf-8',
  136. })
  137. tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
  138. try:
  139. with tf:
  140. json.dump(obj, tf)
  141. if sys.platform == 'win32':
  142. # Need to remove existing file on Windows, else os.rename raises
  143. # WindowsError or FileExistsError.
  144. try:
  145. os.unlink(fn)
  146. except OSError:
  147. pass
  148. os.rename(tf.name, fn)
  149. except Exception:
  150. try:
  151. os.remove(tf.name)
  152. except OSError:
  153. pass
  154. raise
  155. if sys.version_info >= (2, 7):
  156. def find_xpath_attr(node, xpath, key, val=None):
  157. """ Find the xpath xpath[@key=val] """
  158. assert re.match(r'^[a-zA-Z_-]+$', key)
  159. expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
  160. return node.find(expr)
  161. else:
  162. def find_xpath_attr(node, xpath, key, val=None):
  163. for f in node.findall(compat_xpath(xpath)):
  164. if key not in f.attrib:
  165. continue
  166. if val is None or f.attrib.get(key) == val:
  167. return f
  168. return None
  169. # On python2.6 the xml.etree.ElementTree.Element methods don't support
  170. # the namespace parameter
  171. def xpath_with_ns(path, ns_map):
  172. components = [c.split(':') for c in path.split('/')]
  173. replaced = []
  174. for c in components:
  175. if len(c) == 1:
  176. replaced.append(c[0])
  177. else:
  178. ns, tag = c
  179. replaced.append('{%s}%s' % (ns_map[ns], tag))
  180. return '/'.join(replaced)
  181. def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
  182. def _find_xpath(xpath):
  183. return node.find(compat_xpath(xpath))
  184. if isinstance(xpath, (str, compat_str)):
  185. n = _find_xpath(xpath)
  186. else:
  187. for xp in xpath:
  188. n = _find_xpath(xp)
  189. if n is not None:
  190. break
  191. if n is None:
  192. if default is not NO_DEFAULT:
  193. return default
  194. elif fatal:
  195. name = xpath if name is None else name
  196. raise ExtractorError('Could not find XML element %s' % name)
  197. else:
  198. return None
  199. return n
  200. def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
  201. n = xpath_element(node, xpath, name, fatal=fatal, default=default)
  202. if n is None or n == default:
  203. return n
  204. if n.text is None:
  205. if default is not NO_DEFAULT:
  206. return default
  207. elif fatal:
  208. name = xpath if name is None else name
  209. raise ExtractorError('Could not find XML element\'s text %s' % name)
  210. else:
  211. return None
  212. return n.text
  213. def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
  214. n = find_xpath_attr(node, xpath, key)
  215. if n is None:
  216. if default is not NO_DEFAULT:
  217. return default
  218. elif fatal:
  219. name = '%s[@%s]' % (xpath, key) if name is None else name
  220. raise ExtractorError('Could not find XML attribute %s' % name)
  221. else:
  222. return None
  223. return n.attrib[key]
  224. def get_element_by_id(id, html):
  225. """Return the content of the tag with the specified ID in the passed HTML document"""
  226. return get_element_by_attribute('id', id, html)
  227. def get_element_by_attribute(attribute, value, html):
  228. """Return the content of the tag with the specified attribute in the passed HTML document"""
  229. m = re.search(r'''(?xs)
  230. <([a-zA-Z0-9:._-]+)
  231. (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
  232. \s+%s=['"]?%s['"]?
  233. (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
  234. \s*>
  235. (?P<content>.*?)
  236. </\1>
  237. ''' % (re.escape(attribute), re.escape(value)), html)
  238. if not m:
  239. return None
  240. res = m.group('content')
  241. if res.startswith('"') or res.startswith("'"):
  242. res = res[1:-1]
  243. return unescapeHTML(res)
  244. class HTMLAttributeParser(compat_HTMLParser):
  245. """Trivial HTML parser to gather the attributes for a single element"""
  246. def __init__(self):
  247. self.attrs = {}
  248. compat_HTMLParser.__init__(self)
  249. def handle_starttag(self, tag, attrs):
  250. self.attrs = dict(attrs)
  251. def extract_attributes(html_element):
  252. """Given a string for an HTML element such as
  253. <el
  254. a="foo" B="bar" c="&98;az" d=boz
  255. empty= noval entity="&amp;"
  256. sq='"' dq="'"
  257. >
  258. Decode and return a dictionary of attributes.
  259. {
  260. 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
  261. 'empty': '', 'noval': None, 'entity': '&',
  262. 'sq': '"', 'dq': '\''
  263. }.
  264. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
  265. but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
  266. """
  267. parser = HTMLAttributeParser()
  268. parser.feed(html_element)
  269. parser.close()
  270. return parser.attrs
  271. def clean_html(html):
  272. """Clean an HTML snippet into a readable string"""
  273. if html is None: # Convenience for sanitizing descriptions etc.
  274. return html
  275. # Newline vs <br />
  276. html = html.replace('\n', ' ')
  277. html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
  278. html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
  279. # Strip html tags
  280. html = re.sub('<.*?>', '', html)
  281. # Replace html entities
  282. html = unescapeHTML(html)
  283. return html.strip()
  284. def sanitize_open(filename, open_mode):
  285. """Try to open the given filename, and slightly tweak it if this fails.
  286. Attempts to open the given filename. If this fails, it tries to change
  287. the filename slightly, step by step, until it's either able to open it
  288. or it fails and raises a final exception, like the standard open()
  289. function.
  290. It returns the tuple (stream, definitive_file_name).
  291. """
  292. try:
  293. if filename == '-':
  294. if sys.platform == 'win32':
  295. import msvcrt
  296. msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
  297. return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
  298. stream = open(encodeFilename(filename), open_mode)
  299. return (stream, filename)
  300. except (IOError, OSError) as err:
  301. if err.errno in (errno.EACCES,):
  302. raise
  303. # In case of error, try to remove win32 forbidden chars
  304. alt_filename = sanitize_path(filename)
  305. if alt_filename == filename:
  306. raise
  307. else:
  308. # An exception here should be caught in the caller
  309. stream = open(encodeFilename(alt_filename), open_mode)
  310. return (stream, alt_filename)
  311. def timeconvert(timestr):
  312. """Convert RFC 2822 defined time string into system timestamp"""
  313. timestamp = None
  314. timetuple = email.utils.parsedate_tz(timestr)
  315. if timetuple is not None:
  316. timestamp = email.utils.mktime_tz(timetuple)
  317. return timestamp
  318. def sanitize_filename(s, restricted=False, is_id=False):
  319. """Sanitizes a string so it could be used as part of a filename.
  320. If restricted is set, use a stricter subset of allowed characters.
  321. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
  322. """
  323. def replace_insane(char):
  324. if restricted and char in ACCENT_CHARS:
  325. return ACCENT_CHARS[char]
  326. if char == '?' or ord(char) < 32 or ord(char) == 127:
  327. return ''
  328. elif char == '"':
  329. return '' if restricted else '\''
  330. elif char == ':':
  331. return '_-' if restricted else ' -'
  332. elif char in '\\/|*<>':
  333. return '_'
  334. if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
  335. return '_'
  336. if restricted and ord(char) > 127:
  337. return '_'
  338. return char
  339. # Handle timestamps
  340. s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
  341. result = ''.join(map(replace_insane, s))
  342. if not is_id:
  343. while '__' in result:
  344. result = result.replace('__', '_')
  345. result = result.strip('_')
  346. # Common case of "Foreign band name - English song title"
  347. if restricted and result.startswith('-_'):
  348. result = result[2:]
  349. if result.startswith('-'):
  350. result = '_' + result[len('-'):]
  351. result = result.lstrip('.')
  352. if not result:
  353. result = '_'
  354. return result
  355. def sanitize_path(s):
  356. """Sanitizes and normalizes path on Windows"""
  357. if sys.platform != 'win32':
  358. return s
  359. drive_or_unc, _ = os.path.splitdrive(s)
  360. if sys.version_info < (2, 7) and not drive_or_unc:
  361. drive_or_unc, _ = os.path.splitunc(s)
  362. norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
  363. if drive_or_unc:
  364. norm_path.pop(0)
  365. sanitized_path = [
  366. path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part)
  367. for path_part in norm_path]
  368. if drive_or_unc:
  369. sanitized_path.insert(0, drive_or_unc + os.path.sep)
  370. return os.path.join(*sanitized_path)
  371. # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
  372. # unwanted failures due to missing protocol
  373. def sanitize_url(url):
  374. return 'http:%s' % url if url.startswith('//') else url
  375. def sanitized_Request(url, *args, **kwargs):
  376. return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
  377. def orderedSet(iterable):
  378. """ Remove all duplicates from the input iterable """
  379. res = []
  380. for el in iterable:
  381. if el not in res:
  382. res.append(el)
  383. return res
  384. def _htmlentity_transform(entity):
  385. """Transforms an HTML entity to a character."""
  386. # Known non-numeric HTML entity
  387. if entity in compat_html_entities.name2codepoint:
  388. return compat_chr(compat_html_entities.name2codepoint[entity])
  389. mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
  390. if mobj is not None:
  391. numstr = mobj.group(1)
  392. if numstr.startswith('x'):
  393. base = 16
  394. numstr = '0%s' % numstr
  395. else:
  396. base = 10
  397. # See https://github.com/rg3/youtube-dl/issues/7518
  398. try:
  399. return compat_chr(int(numstr, base))
  400. except ValueError:
  401. pass
  402. # Unknown entity in name, return its literal representation
  403. return '&%s;' % entity
  404. def unescapeHTML(s):
  405. if s is None:
  406. return None
  407. assert type(s) == compat_str
  408. return re.sub(
  409. r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s)
  410. def get_subprocess_encoding():
  411. if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
  412. # For subprocess calls, encode with locale encoding
  413. # Refer to http://stackoverflow.com/a/9951851/35070
  414. encoding = preferredencoding()
  415. else:
  416. encoding = sys.getfilesystemencoding()
  417. if encoding is None:
  418. encoding = 'utf-8'
  419. return encoding
  420. def encodeFilename(s, for_subprocess=False):
  421. """
  422. @param s The name of the file
  423. """
  424. assert type(s) == compat_str
  425. # Python 3 has a Unicode API
  426. if sys.version_info >= (3, 0):
  427. return s
  428. # Pass '' directly to use Unicode APIs on Windows 2000 and up
  429. # (Detecting Windows NT 4 is tricky because 'major >= 4' would
  430. # match Windows 9x series as well. Besides, NT 4 is obsolete.)
  431. if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
  432. return s
  433. # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
  434. if sys.platform.startswith('java'):
  435. return s
  436. return s.encode(get_subprocess_encoding(), 'ignore')
  437. def decodeFilename(b, for_subprocess=False):
  438. if sys.version_info >= (3, 0):
  439. return b
  440. if not isinstance(b, bytes):
  441. return b
  442. return b.decode(get_subprocess_encoding(), 'ignore')
  443. def encodeArgument(s):
  444. if not isinstance(s, compat_str):
  445. # Legacy code that uses byte strings
  446. # Uncomment the following line after fixing all post processors
  447. # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
  448. s = s.decode('ascii')
  449. return encodeFilename(s, True)
  450. def decodeArgument(b):
  451. return decodeFilename(b, True)
  452. def decodeOption(optval):
  453. if optval is None:
  454. return optval
  455. if isinstance(optval, bytes):
  456. optval = optval.decode(preferredencoding())
  457. assert isinstance(optval, compat_str)
  458. return optval
  459. def formatSeconds(secs):
  460. if secs > 3600:
  461. return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
  462. elif secs > 60:
  463. return '%d:%02d' % (secs // 60, secs % 60)
  464. else:
  465. return '%d' % secs
  466. def make_HTTPS_handler(params, **kwargs):
  467. opts_no_check_certificate = params.get('nocheckcertificate', False)
  468. if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
  469. context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
  470. if opts_no_check_certificate:
  471. context.check_hostname = False
  472. context.verify_mode = ssl.CERT_NONE
  473. try:
  474. return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
  475. except TypeError:
  476. # Python 2.7.8
  477. # (create_default_context present but HTTPSHandler has no context=)
  478. pass
  479. if sys.version_info < (3, 2):
  480. return YoutubeDLHTTPSHandler(params, **kwargs)
  481. else: # Python < 3.4
  482. context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
  483. context.verify_mode = (ssl.CERT_NONE
  484. if opts_no_check_certificate
  485. else ssl.CERT_REQUIRED)
  486. context.set_default_verify_paths()
  487. return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
  488. def bug_reports_message():
  489. if ytdl_is_updateable():
  490. update_cmd = 'type youtube-dl -U to update'
  491. else:
  492. update_cmd = 'see https://yt-dl.org/update on how to update'
  493. msg = '; please report this issue on https://yt-dl.org/bug .'
  494. msg += ' Make sure you are using the latest version; %s.' % update_cmd
  495. msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
  496. return msg
  497. class ExtractorError(Exception):
  498. """Error during info extraction."""
  499. def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
  500. """ tb, if given, is the original traceback (so that it can be printed out).
  501. If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
  502. """
  503. if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
  504. expected = True
  505. if video_id is not None:
  506. msg = video_id + ': ' + msg
  507. if cause:
  508. msg += ' (caused by %r)' % cause
  509. if not expected:
  510. msg += bug_reports_message()
  511. super(ExtractorError, self).__init__(msg)
  512. self.traceback = tb
  513. self.exc_info = sys.exc_info() # preserve original exception
  514. self.cause = cause
  515. self.video_id = video_id
  516. def format_traceback(self):
  517. if self.traceback is None:
  518. return None
  519. return ''.join(traceback.format_tb(self.traceback))
  520. class UnsupportedError(ExtractorError):
  521. def __init__(self, url):
  522. super(UnsupportedError, self).__init__(
  523. 'Unsupported URL: %s' % url, expected=True)
  524. self.url = url
  525. class RegexNotFoundError(ExtractorError):
  526. """Error when a regex didn't match"""
  527. pass
  528. class DownloadError(Exception):
  529. """Download Error exception.
  530. This exception may be thrown by FileDownloader objects if they are not
  531. configured to continue on errors. They will contain the appropriate
  532. error message.
  533. """
  534. def __init__(self, msg, exc_info=None):
  535. """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
  536. super(DownloadError, self).__init__(msg)
  537. self.exc_info = exc_info
  538. class SameFileError(Exception):
  539. """Same File exception.
  540. This exception will be thrown by FileDownloader objects if they detect
  541. multiple files would have to be downloaded to the same file on disk.
  542. """
  543. pass
  544. class PostProcessingError(Exception):
  545. """Post Processing exception.
  546. This exception may be raised by PostProcessor's .run() method to
  547. indicate an error in the postprocessing task.
  548. """
  549. def __init__(self, msg):
  550. self.msg = msg
  551. class MaxDownloadsReached(Exception):
  552. """ --max-downloads limit has been reached. """
  553. pass
  554. class UnavailableVideoError(Exception):
  555. """Unavailable Format exception.
  556. This exception will be thrown when a video is requested
  557. in a format that is not available for that video.
  558. """
  559. pass
  560. class ContentTooShortError(Exception):
  561. """Content Too Short exception.
  562. This exception may be raised by FileDownloader objects when a file they
  563. download is too small for what the server announced first, indicating
  564. the connection was probably interrupted.
  565. """
  566. def __init__(self, downloaded, expected):
  567. # Both in bytes
  568. self.downloaded = downloaded
  569. self.expected = expected
  570. def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
  571. # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
  572. # expected HTTP responses to meet HTTP/1.0 or later (see also
  573. # https://github.com/rg3/youtube-dl/issues/6727)
  574. if sys.version_info < (3, 0):
  575. kwargs[b'strict'] = True
  576. hc = http_class(*args, **kwargs)
  577. source_address = ydl_handler._params.get('source_address')
  578. if source_address is not None:
  579. sa = (source_address, 0)
  580. if hasattr(hc, 'source_address'): # Python 2.7+
  581. hc.source_address = sa
  582. else: # Python 2.6
  583. def _hc_connect(self, *args, **kwargs):
  584. sock = compat_socket_create_connection(
  585. (self.host, self.port), self.timeout, sa)
  586. if is_https:
  587. self.sock = ssl.wrap_socket(
  588. sock, self.key_file, self.cert_file,
  589. ssl_version=ssl.PROTOCOL_TLSv1)
  590. else:
  591. self.sock = sock
  592. hc.connect = functools.partial(_hc_connect, hc)
  593. return hc
  594. def handle_youtubedl_headers(headers):
  595. filtered_headers = headers
  596. if 'Youtubedl-no-compression' in filtered_headers:
  597. filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
  598. del filtered_headers['Youtubedl-no-compression']
  599. return filtered_headers
  600. class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
  601. """Handler for HTTP requests and responses.
  602. This class, when installed with an OpenerDirector, automatically adds
  603. the standard headers to every HTTP request and handles gzipped and
  604. deflated responses from web servers. If compression is to be avoided in
  605. a particular request, the original request in the program code only has
  606. to include the HTTP header "Youtubedl-no-compression", which will be
  607. removed before making the real request.
  608. Part of this code was copied from:
  609. http://techknack.net/python-urllib2-handlers/
  610. Andrew Rowls, the author of that code, agreed to release it to the
  611. public domain.
  612. """
  613. def __init__(self, params, *args, **kwargs):
  614. compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
  615. self._params = params
  616. def http_open(self, req):
  617. conn_class = compat_http_client.HTTPConnection
  618. socks_proxy = req.headers.get('Ytdl-socks-proxy')
  619. if socks_proxy:
  620. conn_class = make_socks_conn_class(conn_class, socks_proxy)
  621. del req.headers['Ytdl-socks-proxy']
  622. return self.do_open(functools.partial(
  623. _create_http_connection, self, conn_class, False),
  624. req)
  625. @staticmethod
  626. def deflate(data):
  627. try:
  628. return zlib.decompress(data, -zlib.MAX_WBITS)
  629. except zlib.error:
  630. return zlib.decompress(data)
  631. @staticmethod
  632. def addinfourl_wrapper(stream, headers, url, code):
  633. if hasattr(compat_urllib_request.addinfourl, 'getcode'):
  634. return compat_urllib_request.addinfourl(stream, headers, url, code)
  635. ret = compat_urllib_request.addinfourl(stream, headers, url)
  636. ret.code = code
  637. return ret
  638. def http_request(self, req):
  639. # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
  640. # always respected by websites, some tend to give out URLs with non percent-encoded
  641. # non-ASCII characters (see telemb.py, ard.py [#3412])
  642. # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
  643. # To work around aforementioned issue we will replace request's original URL with
  644. # percent-encoded one
  645. # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
  646. # the code of this workaround has been moved here from YoutubeDL.urlopen()
  647. url = req.get_full_url()
  648. url_escaped = escape_url(url)
  649. # Substitute URL if any change after escaping
  650. if url != url_escaped:
  651. req = update_Request(req, url=url_escaped)
  652. for h, v in std_headers.items():
  653. # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
  654. # The dict keys are capitalized because of this bug by urllib
  655. if h.capitalize() not in req.headers:
  656. req.add_header(h, v)
  657. req.headers = handle_youtubedl_headers(req.headers)
  658. if sys.version_info < (2, 7) and '#' in req.get_full_url():
  659. # Python 2.6 is brain-dead when it comes to fragments
  660. req._Request__original = req._Request__original.partition('#')[0]
  661. req._Request__r_type = req._Request__r_type.partition('#')[0]
  662. return req
  663. def http_response(self, req, resp):
  664. old_resp = resp
  665. # gzip
  666. if resp.headers.get('Content-encoding', '') == 'gzip':
  667. content = resp.read()
  668. gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
  669. try:
  670. uncompressed = io.BytesIO(gz.read())
  671. except IOError as original_ioerror:
  672. # There may be junk add the end of the file
  673. # See http://stackoverflow.com/q/4928560/35070 for details
  674. for i in range(1, 1024):
  675. try:
  676. gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
  677. uncompressed = io.BytesIO(gz.read())
  678. except IOError:
  679. continue
  680. break
  681. else:
  682. raise original_ioerror
  683. resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
  684. resp.msg = old_resp.msg
  685. del resp.headers['Content-encoding']
  686. # deflate
  687. if resp.headers.get('Content-encoding', '') == 'deflate':
  688. gz = io.BytesIO(self.deflate(resp.read()))
  689. resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
  690. resp.msg = old_resp.msg
  691. del resp.headers['Content-encoding']
  692. # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
  693. # https://github.com/rg3/youtube-dl/issues/6457).
  694. if 300 <= resp.code < 400:
  695. location = resp.headers.get('Location')
  696. if location:
  697. # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
  698. if sys.version_info >= (3, 0):
  699. location = location.encode('iso-8859-1').decode('utf-8')
  700. location_escaped = escape_url(location)
  701. if location != location_escaped:
  702. del resp.headers['Location']
  703. resp.headers['Location'] = location_escaped
  704. return resp
  705. https_request = http_request
  706. https_response = http_response
  707. def make_socks_conn_class(base_class, socks_proxy):
  708. assert issubclass(base_class, (
  709. compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
  710. url_components = compat_urlparse.urlparse(socks_proxy)
  711. if url_components.scheme.lower() == 'socks5':
  712. socks_type = ProxyType.SOCKS5
  713. elif url_components.scheme.lower() in ('socks', 'socks4'):
  714. socks_type = ProxyType.SOCKS4
  715. elif url_components.scheme.lower() == 'socks4a':
  716. socks_type = ProxyType.SOCKS4A
  717. def unquote_if_non_empty(s):
  718. if not s:
  719. return s
  720. return compat_urllib_parse_unquote_plus(s)
  721. proxy_args = (
  722. socks_type,
  723. url_components.hostname, url_components.port or 1080,
  724. True, # Remote DNS
  725. unquote_if_non_empty(url_components.username),
  726. unquote_if_non_empty(url_components.password),
  727. )
  728. class SocksConnection(base_class):
  729. def connect(self):
  730. self.sock = sockssocket()
  731. self.sock.setproxy(*proxy_args)
  732. if type(self.timeout) in (int, float):
  733. self.sock.settimeout(self.timeout)
  734. self.sock.connect((self.host, self.port))
  735. if isinstance(self, compat_http_client.HTTPSConnection):
  736. if hasattr(self, '_context'): # Python > 2.6
  737. self.sock = self._context.wrap_socket(
  738. self.sock, server_hostname=self.host)
  739. else:
  740. self.sock = ssl.wrap_socket(self.sock)
  741. return SocksConnection
  742. class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
  743. def __init__(self, params, https_conn_class=None, *args, **kwargs):
  744. compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
  745. self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
  746. self._params = params
  747. def https_open(self, req):
  748. kwargs = {}
  749. conn_class = self._https_conn_class
  750. if hasattr(self, '_context'): # python > 2.6
  751. kwargs['context'] = self._context
  752. if hasattr(self, '_check_hostname'): # python 3.x
  753. kwargs['check_hostname'] = self._check_hostname
  754. socks_proxy = req.headers.get('Ytdl-socks-proxy')
  755. if socks_proxy:
  756. conn_class = make_socks_conn_class(conn_class, socks_proxy)
  757. del req.headers['Ytdl-socks-proxy']
  758. return self.do_open(functools.partial(
  759. _create_http_connection, self, conn_class, True),
  760. req, **kwargs)
  761. class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
  762. def __init__(self, cookiejar=None):
  763. compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
  764. def http_response(self, request, response):
  765. # Python 2 will choke on next HTTP request in row if there are non-ASCII
  766. # characters in Set-Cookie HTTP header of last response (see
  767. # https://github.com/rg3/youtube-dl/issues/6769).
  768. # In order to at least prevent crashing we will percent encode Set-Cookie
  769. # header before HTTPCookieProcessor starts processing it.
  770. # if sys.version_info < (3, 0) and response.headers:
  771. # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
  772. # set_cookie = response.headers.get(set_cookie_header)
  773. # if set_cookie:
  774. # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
  775. # if set_cookie != set_cookie_escaped:
  776. # del response.headers[set_cookie_header]
  777. # response.headers[set_cookie_header] = set_cookie_escaped
  778. return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
  779. https_request = compat_urllib_request.HTTPCookieProcessor.http_request
  780. https_response = http_response
  781. def parse_iso8601(date_str, delimiter='T', timezone=None):
  782. """ Return a UNIX timestamp from the given date """
  783. if date_str is None:
  784. return None
  785. date_str = re.sub(r'\.[0-9]+', '', date_str)
  786. if timezone is None:
  787. m = re.search(
  788. r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
  789. date_str)
  790. if not m:
  791. timezone = datetime.timedelta()
  792. else:
  793. date_str = date_str[:-len(m.group(0))]
  794. if not m.group('sign'):
  795. timezone = datetime.timedelta()
  796. else:
  797. sign = 1 if m.group('sign') == '+' else -1
  798. timezone = datetime.timedelta(
  799. hours=sign * int(m.group('hours')),
  800. minutes=sign * int(m.group('minutes')))
  801. try:
  802. date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
  803. dt = datetime.datetime.strptime(date_str, date_format) - timezone
  804. return calendar.timegm(dt.timetuple())
  805. except ValueError:
  806. pass
  807. def unified_strdate(date_str, day_first=True):
  808. """Return a string with the date in the format YYYYMMDD"""
  809. if date_str is None:
  810. return None
  811. upload_date = None
  812. # Replace commas
  813. date_str = date_str.replace(',', ' ')
  814. # %z (UTC offset) is only supported in python>=3.2
  815. if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str):
  816. date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
  817. # Remove AM/PM + timezone
  818. date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
  819. format_expressions = [
  820. '%d %B %Y',
  821. '%d %b %Y',
  822. '%B %d %Y',
  823. '%b %d %Y',
  824. '%b %dst %Y %I:%M',
  825. '%b %dnd %Y %I:%M',
  826. '%b %dth %Y %I:%M',
  827. '%Y %m %d',
  828. '%Y-%m-%d',
  829. '%Y/%m/%d',
  830. '%Y/%m/%d %H:%M:%S',
  831. '%Y-%m-%d %H:%M:%S',
  832. '%Y-%m-%d %H:%M:%S.%f',
  833. '%d.%m.%Y %H:%M',
  834. '%d.%m.%Y %H.%M',
  835. '%Y-%m-%dT%H:%M:%SZ',
  836. '%Y-%m-%dT%H:%M:%S.%fZ',
  837. '%Y-%m-%dT%H:%M:%S.%f0Z',
  838. '%Y-%m-%dT%H:%M:%S',
  839. '%Y-%m-%dT%H:%M:%S.%f',
  840. '%Y-%m-%dT%H:%M',
  841. ]
  842. if day_first:
  843. format_expressions.extend([
  844. '%d-%m-%Y',
  845. '%d.%m.%Y',
  846. '%d/%m/%Y',
  847. '%d/%m/%y',
  848. '%d/%m/%Y %H:%M:%S',
  849. ])
  850. else:
  851. format_expressions.extend([
  852. '%m-%d-%Y',
  853. '%m.%d.%Y',
  854. '%m/%d/%Y',
  855. '%m/%d/%y',
  856. '%m/%d/%Y %H:%M:%S',
  857. ])
  858. for expression in format_expressions:
  859. try:
  860. upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
  861. except ValueError:
  862. pass
  863. if upload_date is None:
  864. timetuple = email.utils.parsedate_tz(date_str)
  865. if timetuple:
  866. try:
  867. upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
  868. except ValueError:
  869. pass
  870. if upload_date is not None:
  871. return compat_str(upload_date)
  872. def determine_ext(url, default_ext='unknown_video'):
  873. if url is None:
  874. return default_ext
  875. guess = url.partition('?')[0].rpartition('.')[2]
  876. if re.match(r'^[A-Za-z0-9]+$', guess):
  877. return guess
  878. # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
  879. elif guess.rstrip('/') in KNOWN_EXTENSIONS:
  880. return guess.rstrip('/')
  881. else:
  882. return default_ext
  883. def subtitles_filename(filename, sub_lang, sub_format):
  884. return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
  885. def date_from_str(date_str):
  886. """
  887. Return a datetime object from a string in the format YYYYMMDD or
  888. (now|today)[+-][0-9](day|week|month|year)(s)?"""
  889. today = datetime.date.today()
  890. if date_str in ('now', 'today'):
  891. return today
  892. if date_str == 'yesterday':
  893. return today - datetime.timedelta(days=1)
  894. match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
  895. if match is not None:
  896. sign = match.group('sign')
  897. time = int(match.group('time'))
  898. if sign == '-':
  899. time = -time
  900. unit = match.group('unit')
  901. # A bad approximation?
  902. if unit == 'month':
  903. unit = 'day'
  904. time *= 30
  905. elif unit == 'year':
  906. unit = 'day'
  907. time *= 365
  908. unit += 's'
  909. delta = datetime.timedelta(**{unit: time})
  910. return today + delta
  911. return datetime.datetime.strptime(date_str, '%Y%m%d').date()
  912. def hyphenate_date(date_str):
  913. """
  914. Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
  915. match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
  916. if match is not None:
  917. return '-'.join(match.groups())
  918. else:
  919. return date_str
  920. class DateRange(object):
  921. """Represents a time interval between two dates"""
  922. def __init__(self, start=None, end=None):
  923. """start and end must be strings in the format accepted by date"""
  924. if start is not None:
  925. self.start = date_from_str(start)
  926. else:
  927. self.start = datetime.datetime.min.date()
  928. if end is not None:
  929. self.end = date_from_str(end)
  930. else:
  931. self.end = datetime.datetime.max.date()
  932. if self.start > self.end:
  933. raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
  934. @classmethod
  935. def day(cls, day):
  936. """Returns a range that only contains the given day"""
  937. return cls(day, day)
  938. def __contains__(self, date):
  939. """Check if the date is in the range"""
  940. if not isinstance(date, datetime.date):
  941. date = date_from_str(date)
  942. return self.start <= date <= self.end
  943. def __str__(self):
  944. return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
  945. def platform_name():
  946. """ Returns the platform name as a compat_str """
  947. res = platform.platform()
  948. if isinstance(res, bytes):
  949. res = res.decode(preferredencoding())
  950. assert isinstance(res, compat_str)
  951. return res
  952. def _windows_write_string(s, out):
  953. """ Returns True if the string was written using special methods,
  954. False if it has yet to be written out."""
  955. # Adapted from http://stackoverflow.com/a/3259271/35070
  956. import ctypes
  957. import ctypes.wintypes
  958. WIN_OUTPUT_IDS = {
  959. 1: -11,
  960. 2: -12,
  961. }
  962. try:
  963. fileno = out.fileno()
  964. except AttributeError:
  965. # If the output stream doesn't have a fileno, it's virtual
  966. return False
  967. except io.UnsupportedOperation:
  968. # Some strange Windows pseudo files?
  969. return False
  970. if fileno not in WIN_OUTPUT_IDS:
  971. return False
  972. GetStdHandle = ctypes.WINFUNCTYPE(
  973. ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
  974. (b'GetStdHandle', ctypes.windll.kernel32))
  975. h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
  976. WriteConsoleW = ctypes.WINFUNCTYPE(
  977. ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
  978. ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
  979. ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
  980. written = ctypes.wintypes.DWORD(0)
  981. GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
  982. FILE_TYPE_CHAR = 0x0002
  983. FILE_TYPE_REMOTE = 0x8000
  984. GetConsoleMode = ctypes.WINFUNCTYPE(
  985. ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
  986. ctypes.POINTER(ctypes.wintypes.DWORD))(
  987. (b'GetConsoleMode', ctypes.windll.kernel32))
  988. INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
  989. def not_a_console(handle):
  990. if handle == INVALID_HANDLE_VALUE or handle is None:
  991. return True
  992. return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
  993. GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
  994. if not_a_console(h):
  995. return False
  996. def next_nonbmp_pos(s):
  997. try:
  998. return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
  999. except StopIteration:
  1000. return len(s)
  1001. while s:
  1002. count = min(next_nonbmp_pos(s), 1024)
  1003. ret = WriteConsoleW(
  1004. h, s, count if count else 2, ctypes.byref(written), None)
  1005. if ret == 0:
  1006. raise OSError('Failed to write string')
  1007. if not count: # We just wrote a non-BMP character
  1008. assert written.value == 2
  1009. s = s[1:]
  1010. else:
  1011. assert written.value > 0
  1012. s = s[written.value:]
  1013. return True
  1014. def write_string(s, out=None, encoding=None):
  1015. if out is None:
  1016. out = sys.stderr
  1017. assert type(s) == compat_str
  1018. if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
  1019. if _windows_write_string(s, out):
  1020. return
  1021. if ('b' in getattr(out, 'mode', '') or
  1022. sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
  1023. byt = s.encode(encoding or preferredencoding(), 'ignore')
  1024. out.write(byt)
  1025. elif hasattr(out, 'buffer'):
  1026. enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
  1027. byt = s.encode(enc, 'ignore')
  1028. out.buffer.write(byt)
  1029. else:
  1030. out.write(s)
  1031. out.flush()
  1032. def bytes_to_intlist(bs):
  1033. if not bs:
  1034. return []
  1035. if isinstance(bs[0], int): # Python 3
  1036. return list(bs)
  1037. else:
  1038. return [ord(c) for c in bs]
  1039. def intlist_to_bytes(xs):
  1040. if not xs:
  1041. return b''
  1042. return compat_struct_pack('%dB' % len(xs), *xs)
  1043. # Cross-platform file locking
  1044. if sys.platform == 'win32':
  1045. import ctypes.wintypes
  1046. import msvcrt
  1047. class OVERLAPPED(ctypes.Structure):
  1048. _fields_ = [
  1049. ('Internal', ctypes.wintypes.LPVOID),
  1050. ('InternalHigh', ctypes.wintypes.LPVOID),
  1051. ('Offset', ctypes.wintypes.DWORD),
  1052. ('OffsetHigh', ctypes.wintypes.DWORD),
  1053. ('hEvent', ctypes.wintypes.HANDLE),
  1054. ]
  1055. kernel32 = ctypes.windll.kernel32
  1056. LockFileEx = kernel32.LockFileEx
  1057. LockFileEx.argtypes = [
  1058. ctypes.wintypes.HANDLE, # hFile
  1059. ctypes.wintypes.DWORD, # dwFlags
  1060. ctypes.wintypes.DWORD, # dwReserved
  1061. ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
  1062. ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
  1063. ctypes.POINTER(OVERLAPPED) # Overlapped
  1064. ]
  1065. LockFileEx.restype = ctypes.wintypes.BOOL
  1066. UnlockFileEx = kernel32.UnlockFileEx
  1067. UnlockFileEx.argtypes = [
  1068. ctypes.wintypes.HANDLE, # hFile
  1069. ctypes.wintypes.DWORD, # dwReserved
  1070. ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
  1071. ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
  1072. ctypes.POINTER(OVERLAPPED) # Overlapped
  1073. ]
  1074. UnlockFileEx.restype = ctypes.wintypes.BOOL
  1075. whole_low = 0xffffffff
  1076. whole_high = 0x7fffffff
  1077. def _lock_file(f, exclusive):
  1078. overlapped = OVERLAPPED()
  1079. overlapped.Offset = 0
  1080. overlapped.OffsetHigh = 0
  1081. overlapped.hEvent = 0
  1082. f._lock_file_overlapped_p = ctypes.pointer(overlapped)
  1083. handle = msvcrt.get_osfhandle(f.fileno())
  1084. if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
  1085. whole_low, whole_high, f._lock_file_overlapped_p):
  1086. raise OSError('Locking file failed: %r' % ctypes.FormatError())
  1087. def _unlock_file(f):
  1088. assert f._lock_file_overlapped_p
  1089. handle = msvcrt.get_osfhandle(f.fileno())
  1090. if not UnlockFileEx(handle, 0,
  1091. whole_low, whole_high, f._lock_file_overlapped_p):
  1092. raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
  1093. else:
  1094. # Some platforms, such as Jython, is missing fcntl
  1095. try:
  1096. import fcntl
  1097. def _lock_file(f, exclusive):
  1098. fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
  1099. def _unlock_file(f):
  1100. fcntl.flock(f, fcntl.LOCK_UN)
  1101. except ImportError:
  1102. UNSUPPORTED_MSG = 'file locking is not supported on this platform'
  1103. def _lock_file(f, exclusive):
  1104. raise IOError(UNSUPPORTED_MSG)
  1105. def _unlock_file(f):
  1106. raise IOError(UNSUPPORTED_MSG)
  1107. class locked_file(object):
  1108. def __init__(self, filename, mode, encoding=None):
  1109. assert mode in ['r', 'a', 'w']
  1110. self.f = io.open(filename, mode, encoding=encoding)
  1111. self.mode = mode
  1112. def __enter__(self):
  1113. exclusive = self.mode != 'r'
  1114. try:
  1115. _lock_file(self.f, exclusive)
  1116. except IOError:
  1117. self.f.close()
  1118. raise
  1119. return self
  1120. def __exit__(self, etype, value, traceback):
  1121. try:
  1122. _unlock_file(self.f)
  1123. finally:
  1124. self.f.close()
  1125. def __iter__(self):
  1126. return iter(self.f)
  1127. def write(self, *args):
  1128. return self.f.write(*args)
  1129. def read(self, *args):
  1130. return self.f.read(*args)
  1131. def get_filesystem_encoding():
  1132. encoding = sys.getfilesystemencoding()
  1133. return encoding if encoding is not None else 'utf-8'
  1134. def shell_quote(args):
  1135. quoted_args = []
  1136. encoding = get_filesystem_encoding()
  1137. for a in args:
  1138. if isinstance(a, bytes):
  1139. # We may get a filename encoded with 'encodeFilename'
  1140. a = a.decode(encoding)
  1141. quoted_args.append(pipes.quote(a))
  1142. return ' '.join(quoted_args)
  1143. def smuggle_url(url, data):
  1144. """ Pass additional data in a URL for internal use. """
  1145. sdata = compat_urllib_parse_urlencode(
  1146. {'__youtubedl_smuggle': json.dumps(data)})
  1147. return url + '#' + sdata
  1148. def unsmuggle_url(smug_url, default=None):
  1149. if '#__youtubedl_smuggle' not in smug_url:
  1150. return smug_url, default
  1151. url, _, sdata = smug_url.rpartition('#')
  1152. jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
  1153. data = json.loads(jsond)
  1154. return url, data
  1155. def format_bytes(bytes):
  1156. if bytes is None:
  1157. return 'N/A'
  1158. if type(bytes) is str:
  1159. bytes = float(bytes)
  1160. if bytes == 0.0:
  1161. exponent = 0
  1162. else:
  1163. exponent = int(math.log(bytes, 1024.0))
  1164. suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
  1165. converted = float(bytes) / float(1024 ** exponent)
  1166. return '%.2f%s' % (converted, suffix)
  1167. def lookup_unit_table(unit_table, s):
  1168. units_re = '|'.join(re.escape(u) for u in unit_table)
  1169. m = re.match(
  1170. r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
  1171. if not m:
  1172. return None
  1173. num_str = m.group('num').replace(',', '.')
  1174. mult = unit_table[m.group('unit')]
  1175. return int(float(num_str) * mult)
  1176. def parse_filesize(s):
  1177. if s is None:
  1178. return None
  1179. # The lower-case forms are of course incorrect and unofficial,
  1180. # but we support those too
  1181. _UNIT_TABLE = {
  1182. 'B': 1,
  1183. 'b': 1,
  1184. 'KiB': 1024,
  1185. 'KB': 1000,
  1186. 'kB': 1024,
  1187. 'Kb': 1000,
  1188. 'MiB': 1024 ** 2,
  1189. 'MB': 1000 ** 2,
  1190. 'mB': 1024 ** 2,
  1191. 'Mb': 1000 ** 2,
  1192. 'GiB': 1024 ** 3,
  1193. 'GB': 1000 ** 3,
  1194. 'gB': 1024 ** 3,
  1195. 'Gb': 1000 ** 3,
  1196. 'TiB': 1024 ** 4,
  1197. 'TB': 1000 ** 4,
  1198. 'tB': 1024 ** 4,
  1199. 'Tb': 1000 ** 4,
  1200. 'PiB': 1024 ** 5,
  1201. 'PB': 1000 ** 5,
  1202. 'pB': 1024 ** 5,
  1203. 'Pb': 1000 ** 5,
  1204. 'EiB': 1024 ** 6,
  1205. 'EB': 1000 ** 6,
  1206. 'eB': 1024 ** 6,
  1207. 'Eb': 1000 ** 6,
  1208. 'ZiB': 1024 ** 7,
  1209. 'ZB': 1000 ** 7,
  1210. 'zB': 1024 ** 7,
  1211. 'Zb': 1000 ** 7,
  1212. 'YiB': 1024 ** 8,
  1213. 'YB': 1000 ** 8,
  1214. 'yB': 1024 ** 8,
  1215. 'Yb': 1000 ** 8,
  1216. }
  1217. return lookup_unit_table(_UNIT_TABLE, s)
  1218. def parse_count(s):
  1219. if s is None:
  1220. return None
  1221. s = s.strip()
  1222. if re.match(r'^[\d,.]+$', s):
  1223. return str_to_int(s)
  1224. _UNIT_TABLE = {
  1225. 'k': 1000,
  1226. 'K': 1000,
  1227. 'm': 1000 ** 2,
  1228. 'M': 1000 ** 2,
  1229. 'kk': 1000 ** 2,
  1230. 'KK': 1000 ** 2,
  1231. }
  1232. return lookup_unit_table(_UNIT_TABLE, s)
  1233. def month_by_name(name):
  1234. """ Return the number of a month by (locale-independently) English name """
  1235. try:
  1236. return ENGLISH_MONTH_NAMES.index(name) + 1
  1237. except ValueError:
  1238. return None
  1239. def month_by_abbreviation(abbrev):
  1240. """ Return the number of a month by (locale-independently) English
  1241. abbreviations """
  1242. try:
  1243. return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
  1244. except ValueError:
  1245. return None
  1246. def fix_xml_ampersands(xml_str):
  1247. """Replace all the '&' by '&amp;' in XML"""
  1248. return re.sub(
  1249. r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
  1250. '&amp;',
  1251. xml_str)
  1252. def setproctitle(title):
  1253. assert isinstance(title, compat_str)
  1254. # ctypes in Jython is not complete
  1255. # http://bugs.jython.org/issue2148
  1256. if sys.platform.startswith('java'):
  1257. return
  1258. try:
  1259. libc = ctypes.cdll.LoadLibrary('libc.so.6')
  1260. except OSError:
  1261. return
  1262. title_bytes = title.encode('utf-8')
  1263. buf = ctypes.create_string_buffer(len(title_bytes))
  1264. buf.value = title_bytes
  1265. try:
  1266. libc.prctl(15, buf, 0, 0, 0)
  1267. except AttributeError:
  1268. return # Strange libc, just skip this
  1269. def remove_start(s, start):
  1270. return s[len(start):] if s is not None and s.startswith(start) else s
  1271. def remove_end(s, end):
  1272. return s[:-len(end)] if s is not None and s.endswith(end) else s
  1273. def remove_quotes(s):
  1274. if s is None or len(s) < 2:
  1275. return s
  1276. for quote in ('"', "'", ):
  1277. if s[0] == quote and s[-1] == quote:
  1278. return s[1:-1]
  1279. return s
  1280. def url_basename(url):
  1281. path = compat_urlparse.urlparse(url).path
  1282. return path.strip('/').split('/')[-1]
  1283. class HEADRequest(compat_urllib_request.Request):
  1284. def get_method(self):
  1285. return 'HEAD'
  1286. def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
  1287. if get_attr:
  1288. if v is not None:
  1289. v = getattr(v, get_attr, None)
  1290. if v == '':
  1291. v = None
  1292. if v is None:
  1293. return default
  1294. try:
  1295. return int(v) * invscale // scale
  1296. except ValueError:
  1297. return default
  1298. def str_or_none(v, default=None):
  1299. return default if v is None else compat_str(v)
  1300. def str_to_int(int_str):
  1301. """ A more relaxed version of int_or_none """
  1302. if int_str is None:
  1303. return None
  1304. int_str = re.sub(r'[,\.\+]', '', int_str)
  1305. return int(int_str)
  1306. def float_or_none(v, scale=1, invscale=1, default=None):
  1307. if v is None:
  1308. return default
  1309. try:
  1310. return float(v) * invscale / scale
  1311. except ValueError:
  1312. return default
  1313. def parse_duration(s):
  1314. if not isinstance(s, compat_basestring):
  1315. return None
  1316. s = s.strip()
  1317. days, hours, mins, secs, ms = [None] * 5
  1318. m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?$', s)
  1319. if m:
  1320. days, hours, mins, secs, ms = m.groups()
  1321. else:
  1322. m = re.match(
  1323. r'''(?ix)(?:P?T)?
  1324. (?:
  1325. (?P<days>[0-9]+)\s*d(?:ays?)?\s*
  1326. )?
  1327. (?:
  1328. (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
  1329. )?
  1330. (?:
  1331. (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
  1332. )?
  1333. (?:
  1334. (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
  1335. )?$''', s)
  1336. if m:
  1337. days, hours, mins, secs, ms = m.groups()
  1338. else:
  1339. m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)$', s)
  1340. if m:
  1341. hours, mins = m.groups()
  1342. else:
  1343. return None
  1344. duration = 0
  1345. if secs:
  1346. duration += float(secs)
  1347. if mins:
  1348. duration += float(mins) * 60
  1349. if hours:
  1350. duration += float(hours) * 60 * 60
  1351. if days:
  1352. duration += float(days) * 24 * 60 * 60
  1353. if ms:
  1354. duration += float(ms)
  1355. return duration
  1356. def prepend_extension(filename, ext, expected_real_ext=None):
  1357. name, real_ext = os.path.splitext(filename)
  1358. return (
  1359. '{0}.{1}{2}'.format(name, ext, real_ext)
  1360. if not expected_real_ext or real_ext[1:] == expected_real_ext
  1361. else '{0}.{1}'.format(filename, ext))
  1362. def replace_extension(filename, ext, expected_real_ext=None):
  1363. name, real_ext = os.path.splitext(filename)
  1364. return '{0}.{1}'.format(
  1365. name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
  1366. ext)
  1367. def check_executable(exe, args=[]):
  1368. """ Checks if the given binary is installed somewhere in PATH, and returns its name.
  1369. args can be a list of arguments for a short output (like -version) """
  1370. try:
  1371. subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
  1372. except OSError:
  1373. return False
  1374. return exe
  1375. def get_exe_version(exe, args=['--version'],
  1376. version_re=None, unrecognized='present'):
  1377. """ Returns the version of the specified executable,
  1378. or False if the executable is not present """
  1379. try:
  1380. out, _ = subprocess.Popen(
  1381. [encodeArgument(exe)] + args,
  1382. stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
  1383. except OSError:
  1384. return False
  1385. if isinstance(out, bytes): # Python 2.x
  1386. out = out.decode('ascii', 'ignore')
  1387. return detect_exe_version(out, version_re, unrecognized)
  1388. def detect_exe_version(output, version_re=None, unrecognized='present'):
  1389. assert isinstance(output, compat_str)
  1390. if version_re is None:
  1391. version_re = r'version\s+([-0-9._a-zA-Z]+)'
  1392. m = re.search(version_re, output)
  1393. if m:
  1394. return m.group(1)
  1395. else:
  1396. return unrecognized
  1397. class PagedList(object):
  1398. def __len__(self):
  1399. # This is only useful for tests
  1400. return len(self.getslice())
  1401. class OnDemandPagedList(PagedList):
  1402. def __init__(self, pagefunc, pagesize, use_cache=False):
  1403. self._pagefunc = pagefunc
  1404. self._pagesize = pagesize
  1405. self._use_cache = use_cache
  1406. if use_cache:
  1407. self._cache = {}
  1408. def getslice(self, start=0, end=None):
  1409. res = []
  1410. for pagenum in itertools.count(start // self._pagesize):
  1411. firstid = pagenum * self._pagesize
  1412. nextfirstid = pagenum * self._pagesize + self._pagesize
  1413. if start >= nextfirstid:
  1414. continue
  1415. page_results = None
  1416. if self._use_cache:
  1417. page_results = self._cache.get(pagenum)
  1418. if page_results is None:
  1419. page_results = list(self._pagefunc(pagenum))
  1420. if self._use_cache:
  1421. self._cache[pagenum] = page_results
  1422. startv = (
  1423. start % self._pagesize
  1424. if firstid <= start < nextfirstid
  1425. else 0)
  1426. endv = (
  1427. ((end - 1) % self._pagesize) + 1
  1428. if (end is not None and firstid <= end <= nextfirstid)
  1429. else None)
  1430. if startv != 0 or endv is not None:
  1431. page_results = page_results[startv:endv]
  1432. res.extend(page_results)
  1433. # A little optimization - if current page is not "full", ie. does
  1434. # not contain page_size videos then we can assume that this page
  1435. # is the last one - there are no more ids on further pages -
  1436. # i.e. no need to query again.
  1437. if len(page_results) + startv < self._pagesize:
  1438. break
  1439. # If we got the whole page, but the next page is not interesting,
  1440. # break out early as well
  1441. if end == nextfirstid:
  1442. break
  1443. return res
  1444. class InAdvancePagedList(PagedList):
  1445. def __init__(self, pagefunc, pagecount, pagesize):
  1446. self._pagefunc = pagefunc
  1447. self._pagecount = pagecount
  1448. self._pagesize = pagesize
  1449. def getslice(self, start=0, end=None):
  1450. res = []
  1451. start_page = start // self._pagesize
  1452. end_page = (
  1453. self._pagecount if end is None else (end // self._pagesize + 1))
  1454. skip_elems = start - start_page * self._pagesize
  1455. only_more = None if end is None else end - start
  1456. for pagenum in range(start_page, end_page):
  1457. page = list(self._pagefunc(pagenum))
  1458. if skip_elems:
  1459. page = page[skip_elems:]
  1460. skip_elems = None
  1461. if only_more is not None:
  1462. if len(page) < only_more:
  1463. only_more -= len(page)
  1464. else:
  1465. page = page[:only_more]
  1466. res.extend(page)
  1467. break
  1468. res.extend(page)
  1469. return res
  1470. def uppercase_escape(s):
  1471. unicode_escape = codecs.getdecoder('unicode_escape')
  1472. return re.sub(
  1473. r'\\U[0-9a-fA-F]{8}',
  1474. lambda m: unicode_escape(m.group(0))[0],
  1475. s)
  1476. def lowercase_escape(s):
  1477. unicode_escape = codecs.getdecoder('unicode_escape')
  1478. return re.sub(
  1479. r'\\u[0-9a-fA-F]{4}',
  1480. lambda m: unicode_escape(m.group(0))[0],
  1481. s)
  1482. def escape_rfc3986(s):
  1483. """Escape non-ASCII characters as suggested by RFC 3986"""
  1484. if sys.version_info < (3, 0) and isinstance(s, compat_str):
  1485. s = s.encode('utf-8')
  1486. return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
  1487. def escape_url(url):
  1488. """Escape URL as suggested by RFC 3986"""
  1489. url_parsed = compat_urllib_parse_urlparse(url)
  1490. return url_parsed._replace(
  1491. netloc=url_parsed.netloc.encode('idna').decode('ascii'),
  1492. path=escape_rfc3986(url_parsed.path),
  1493. params=escape_rfc3986(url_parsed.params),
  1494. query=escape_rfc3986(url_parsed.query),
  1495. fragment=escape_rfc3986(url_parsed.fragment)
  1496. ).geturl()
  1497. def read_batch_urls(batch_fd):
  1498. def fixup(url):
  1499. if not isinstance(url, compat_str):
  1500. url = url.decode('utf-8', 'replace')
  1501. BOM_UTF8 = '\xef\xbb\xbf'
  1502. if url.startswith(BOM_UTF8):
  1503. url = url[len(BOM_UTF8):]
  1504. url = url.strip()
  1505. if url.startswith(('#', ';', ']')):
  1506. return False
  1507. return url
  1508. with contextlib.closing(batch_fd) as fd:
  1509. return [url for url in map(fixup, fd) if url]
  1510. def urlencode_postdata(*args, **kargs):
  1511. return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
  1512. def update_url_query(url, query):
  1513. if not query:
  1514. return url
  1515. parsed_url = compat_urlparse.urlparse(url)
  1516. qs = compat_parse_qs(parsed_url.query)
  1517. qs.update(query)
  1518. return compat_urlparse.urlunparse(parsed_url._replace(
  1519. query=compat_urllib_parse_urlencode(qs, True)))
  1520. def update_Request(req, url=None, data=None, headers={}, query={}):
  1521. req_headers = req.headers.copy()
  1522. req_headers.update(headers)
  1523. req_data = data or req.data
  1524. req_url = update_url_query(url or req.get_full_url(), query)
  1525. req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request
  1526. new_req = req_type(
  1527. req_url, data=req_data, headers=req_headers,
  1528. origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
  1529. if hasattr(req, 'timeout'):
  1530. new_req.timeout = req.timeout
  1531. return new_req
  1532. def dict_get(d, key_or_keys, default=None, skip_false_values=True):
  1533. if isinstance(key_or_keys, (list, tuple)):
  1534. for key in key_or_keys:
  1535. if key not in d or d[key] is None or skip_false_values and not d[key]:
  1536. continue
  1537. return d[key]
  1538. return default
  1539. return d.get(key_or_keys, default)
  1540. def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
  1541. return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
  1542. US_RATINGS = {
  1543. 'G': 0,
  1544. 'PG': 10,
  1545. 'PG-13': 13,
  1546. 'R': 16,
  1547. 'NC': 18,
  1548. }
  1549. def parse_age_limit(s):
  1550. if s is None:
  1551. return None
  1552. m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
  1553. return int(m.group('age')) if m else US_RATINGS.get(s)
  1554. def strip_jsonp(code):
  1555. return re.sub(
  1556. r'(?s)^[a-zA-Z0-9_.]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
  1557. def js_to_json(code):
  1558. def fix_kv(m):
  1559. v = m.group(0)
  1560. if v in ('true', 'false', 'null'):
  1561. return v
  1562. elif v.startswith('/*') or v == ',':
  1563. return ""
  1564. if v[0] in ("'", '"'):
  1565. v = re.sub(r'(?s)\\.|"', lambda m: {
  1566. '"': '\\"',
  1567. "\\'": "'",
  1568. '\\\n': '',
  1569. '\\x': '\\u00',
  1570. }.get(m.group(0), m.group(0)), v[1:-1])
  1571. INTEGER_TABLE = (
  1572. (r'^0[xX][0-9a-fA-F]+', 16),
  1573. (r'^0+[0-7]+', 8),
  1574. )
  1575. for regex, base in INTEGER_TABLE:
  1576. im = re.match(regex, v)
  1577. if im:
  1578. i = int(im.group(0), base)
  1579. return '"%d":' % i if v.endswith(':') else '%d' % i
  1580. return '"%s"' % v
  1581. return re.sub(r'''(?sx)
  1582. "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
  1583. '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
  1584. /\*.*?\*/|,(?=\s*[\]}])|
  1585. [a-zA-Z_][.a-zA-Z_0-9]*|
  1586. (?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?|
  1587. [0-9]+(?=\s*:)
  1588. ''', fix_kv, code)
  1589. def qualities(quality_ids):
  1590. """ Get a numeric quality value out of a list of possible values """
  1591. def q(qid):
  1592. try:
  1593. return quality_ids.index(qid)
  1594. except ValueError:
  1595. return -1
  1596. return q
  1597. DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
  1598. def limit_length(s, length):
  1599. """ Add ellipses to overly long strings """
  1600. if s is None:
  1601. return None
  1602. ELLIPSES = '...'
  1603. if len(s) > length:
  1604. return s[:length - len(ELLIPSES)] + ELLIPSES
  1605. return s
  1606. def version_tuple(v):
  1607. return tuple(int(e) for e in re.split(r'[-.]', v))
  1608. def is_outdated_version(version, limit, assume_new=True):
  1609. if not version:
  1610. return not assume_new
  1611. try:
  1612. return version_tuple(version) < version_tuple(limit)
  1613. except ValueError:
  1614. return not assume_new
  1615. def ytdl_is_updateable():
  1616. """ Returns if youtube-dl can be updated with -U """
  1617. from zipimport import zipimporter
  1618. return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
  1619. def args_to_str(args):
  1620. # Get a short string representation for a subprocess command
  1621. return ' '.join(compat_shlex_quote(a) for a in args)
  1622. def error_to_compat_str(err):
  1623. err_str = str(err)
  1624. # On python 2 error byte string must be decoded with proper
  1625. # encoding rather than ascii
  1626. if sys.version_info[0] < 3:
  1627. err_str = err_str.decode(preferredencoding())
  1628. return err_str
  1629. def mimetype2ext(mt):
  1630. if mt is None:
  1631. return None
  1632. ext = {
  1633. 'audio/mp4': 'm4a',
  1634. }.get(mt)
  1635. if ext is not None:
  1636. return ext
  1637. _, _, res = mt.rpartition('/')
  1638. return {
  1639. '3gpp': '3gp',
  1640. 'smptett+xml': 'tt',
  1641. 'srt': 'srt',
  1642. 'ttaf+xml': 'dfxp',
  1643. 'ttml+xml': 'ttml',
  1644. 'vtt': 'vtt',
  1645. 'x-flv': 'flv',
  1646. 'x-mp4-fragmented': 'mp4',
  1647. 'x-ms-wmv': 'wmv',
  1648. }.get(res, res)
  1649. def urlhandle_detect_ext(url_handle):
  1650. getheader = url_handle.headers.get
  1651. cd = getheader('Content-Disposition')
  1652. if cd:
  1653. m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
  1654. if m:
  1655. e = determine_ext(m.group('filename'), default_ext=None)
  1656. if e:
  1657. return e
  1658. return mimetype2ext(getheader('Content-Type'))
  1659. def encode_data_uri(data, mime_type):
  1660. return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
  1661. def age_restricted(content_limit, age_limit):
  1662. """ Returns True iff the content should be blocked """
  1663. if age_limit is None: # No limit set
  1664. return False
  1665. if content_limit is None:
  1666. return False # Content available for everyone
  1667. return age_limit < content_limit
  1668. def is_html(first_bytes):
  1669. """ Detect whether a file contains HTML by examining its first bytes. """
  1670. BOMS = [
  1671. (b'\xef\xbb\xbf', 'utf-8'),
  1672. (b'\x00\x00\xfe\xff', 'utf-32-be'),
  1673. (b'\xff\xfe\x00\x00', 'utf-32-le'),
  1674. (b'\xff\xfe', 'utf-16-le'),
  1675. (b'\xfe\xff', 'utf-16-be'),
  1676. ]
  1677. for bom, enc in BOMS:
  1678. if first_bytes.startswith(bom):
  1679. s = first_bytes[len(bom):].decode(enc, 'replace')
  1680. break
  1681. else:
  1682. s = first_bytes.decode('utf-8', 'replace')
  1683. return re.match(r'^\s*<', s)
  1684. def determine_protocol(info_dict):
  1685. protocol = info_dict.get('protocol')
  1686. if protocol is not None:
  1687. return protocol
  1688. url = info_dict['url']
  1689. if url.startswith('rtmp'):
  1690. return 'rtmp'
  1691. elif url.startswith('mms'):
  1692. return 'mms'
  1693. elif url.startswith('rtsp'):
  1694. return 'rtsp'
  1695. ext = determine_ext(url)
  1696. if ext == 'm3u8':
  1697. return 'm3u8'
  1698. elif ext == 'f4m':
  1699. return 'f4m'
  1700. return compat_urllib_parse_urlparse(url).scheme
  1701. def render_table(header_row, data):
  1702. """ Render a list of rows, each as a list of values """
  1703. table = [header_row] + data
  1704. max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
  1705. format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
  1706. return '\n'.join(format_str % tuple(row) for row in table)
  1707. def _match_one(filter_part, dct):
  1708. COMPARISON_OPERATORS = {
  1709. '<': operator.lt,
  1710. '<=': operator.le,
  1711. '>': operator.gt,
  1712. '>=': operator.ge,
  1713. '=': operator.eq,
  1714. '!=': operator.ne,
  1715. }
  1716. operator_rex = re.compile(r'''(?x)\s*
  1717. (?P<key>[a-z_]+)
  1718. \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
  1719. (?:
  1720. (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
  1721. (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
  1722. )
  1723. \s*$
  1724. ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
  1725. m = operator_rex.search(filter_part)
  1726. if m:
  1727. op = COMPARISON_OPERATORS[m.group('op')]
  1728. if m.group('strval') is not None:
  1729. if m.group('op') not in ('=', '!='):
  1730. raise ValueError(
  1731. 'Operator %s does not support string values!' % m.group('op'))
  1732. comparison_value = m.group('strval')
  1733. else:
  1734. try:
  1735. comparison_value = int(m.group('intval'))
  1736. except ValueError:
  1737. comparison_value = parse_filesize(m.group('intval'))
  1738. if comparison_value is None:
  1739. comparison_value = parse_filesize(m.group('intval') + 'B')
  1740. if comparison_value is None:
  1741. raise ValueError(
  1742. 'Invalid integer value %r in filter part %r' % (
  1743. m.group('intval'), filter_part))
  1744. actual_value = dct.get(m.group('key'))
  1745. if actual_value is None:
  1746. return m.group('none_inclusive')
  1747. return op(actual_value, comparison_value)
  1748. UNARY_OPERATORS = {
  1749. '': lambda v: v is not None,
  1750. '!': lambda v: v is None,
  1751. }
  1752. operator_rex = re.compile(r'''(?x)\s*
  1753. (?P<op>%s)\s*(?P<key>[a-z_]+)
  1754. \s*$
  1755. ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
  1756. m = operator_rex.search(filter_part)
  1757. if m:
  1758. op = UNARY_OPERATORS[m.group('op')]
  1759. actual_value = dct.get(m.group('key'))
  1760. return op(actual_value)
  1761. raise ValueError('Invalid filter part %r' % filter_part)
  1762. def match_str(filter_str, dct):
  1763. """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
  1764. return all(
  1765. _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
  1766. def match_filter_func(filter_str):
  1767. def _match_func(info_dict):
  1768. if match_str(filter_str, info_dict):
  1769. return None
  1770. else:
  1771. video_title = info_dict.get('title', info_dict.get('id', 'video'))
  1772. return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
  1773. return _match_func
  1774. def parse_dfxp_time_expr(time_expr):
  1775. if not time_expr:
  1776. return
  1777. mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
  1778. if mobj:
  1779. return float(mobj.group('time_offset'))
  1780. mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
  1781. if mobj:
  1782. return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
  1783. def srt_subtitles_timecode(seconds):
  1784. return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
  1785. def dfxp2srt(dfxp_data):
  1786. _x = functools.partial(xpath_with_ns, ns_map={
  1787. 'ttml': 'http://www.w3.org/ns/ttml',
  1788. 'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
  1789. 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1',
  1790. })
  1791. class TTMLPElementParser(object):
  1792. out = ''
  1793. def start(self, tag, attrib):
  1794. if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
  1795. self.out += '\n'
  1796. def end(self, tag):
  1797. pass
  1798. def data(self, data):
  1799. self.out += data
  1800. def close(self):
  1801. return self.out.strip()
  1802. def parse_node(node):
  1803. target = TTMLPElementParser()
  1804. parser = xml.etree.ElementTree.XMLParser(target=target)
  1805. parser.feed(xml.etree.ElementTree.tostring(node))
  1806. return parser.close()
  1807. dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
  1808. out = []
  1809. paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p')
  1810. if not paras:
  1811. raise ValueError('Invalid dfxp/TTML subtitle')
  1812. for para, index in zip(paras, itertools.count(1)):
  1813. begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
  1814. end_time = parse_dfxp_time_expr(para.attrib.get('end'))
  1815. dur = parse_dfxp_time_expr(para.attrib.get('dur'))
  1816. if begin_time is None:
  1817. continue
  1818. if not end_time:
  1819. if not dur:
  1820. continue
  1821. end_time = begin_time + dur
  1822. out.append('%d\n%s --> %s\n%s\n\n' % (
  1823. index,
  1824. srt_subtitles_timecode(begin_time),
  1825. srt_subtitles_timecode(end_time),
  1826. parse_node(para)))
  1827. return ''.join(out)
  1828. def cli_option(params, command_option, param):
  1829. param = params.get(param)
  1830. return [command_option, param] if param is not None else []
  1831. def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
  1832. param = params.get(param)
  1833. assert isinstance(param, bool)
  1834. if separator:
  1835. return [command_option + separator + (true_value if param else false_value)]
  1836. return [command_option, true_value if param else false_value]
  1837. def cli_valueless_option(params, command_option, param, expected_value=True):
  1838. param = params.get(param)
  1839. return [command_option] if param == expected_value else []
  1840. def cli_configuration_args(params, param, default=[]):
  1841. ex_args = params.get(param)
  1842. if ex_args is None:
  1843. return default
  1844. assert isinstance(ex_args, list)
  1845. return ex_args
  1846. class ISO639Utils(object):
  1847. # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
  1848. _lang_map = {
  1849. 'aa': 'aar',
  1850. 'ab': 'abk',
  1851. 'ae': 'ave',
  1852. 'af': 'afr',
  1853. 'ak': 'aka',
  1854. 'am': 'amh',
  1855. 'an': 'arg',
  1856. 'ar': 'ara',
  1857. 'as': 'asm',
  1858. 'av': 'ava',
  1859. 'ay': 'aym',
  1860. 'az': 'aze',
  1861. 'ba': 'bak',
  1862. 'be': 'bel',
  1863. 'bg': 'bul',
  1864. 'bh': 'bih',
  1865. 'bi': 'bis',
  1866. 'bm': 'bam',
  1867. 'bn': 'ben',
  1868. 'bo': 'bod',
  1869. 'br': 'bre',
  1870. 'bs': 'bos',
  1871. 'ca': 'cat',
  1872. 'ce': 'che',
  1873. 'ch': 'cha',
  1874. 'co': 'cos',
  1875. 'cr': 'cre',
  1876. 'cs': 'ces',
  1877. 'cu': 'chu',
  1878. 'cv': 'chv',
  1879. 'cy': 'cym',
  1880. 'da': 'dan',
  1881. 'de': 'deu',
  1882. 'dv': 'div',
  1883. 'dz': 'dzo',
  1884. 'ee': 'ewe',
  1885. 'el': 'ell',
  1886. 'en': 'eng',
  1887. 'eo': 'epo',
  1888. 'es': 'spa',
  1889. 'et': 'est',
  1890. 'eu': 'eus',
  1891. 'fa': 'fas',
  1892. 'ff': 'ful',
  1893. 'fi': 'fin',
  1894. 'fj': 'fij',
  1895. 'fo': 'fao',
  1896. 'fr': 'fra',
  1897. 'fy': 'fry',
  1898. 'ga': 'gle',
  1899. 'gd': 'gla',
  1900. 'gl': 'glg',
  1901. 'gn': 'grn',
  1902. 'gu': 'guj',
  1903. 'gv': 'glv',
  1904. 'ha': 'hau',
  1905. 'he': 'heb',
  1906. 'hi': 'hin',
  1907. 'ho': 'hmo',
  1908. 'hr': 'hrv',
  1909. 'ht': 'hat',
  1910. 'hu': 'hun',
  1911. 'hy': 'hye',
  1912. 'hz': 'her',
  1913. 'ia': 'ina',
  1914. 'id': 'ind',
  1915. 'ie': 'ile',
  1916. 'ig': 'ibo',
  1917. 'ii': 'iii',
  1918. 'ik': 'ipk',
  1919. 'io': 'ido',
  1920. 'is': 'isl',
  1921. 'it': 'ita',
  1922. 'iu': 'iku',
  1923. 'ja': 'jpn',
  1924. 'jv': 'jav',
  1925. 'ka': 'kat',
  1926. 'kg': 'kon',
  1927. 'ki': 'kik',
  1928. 'kj': 'kua',
  1929. 'kk': 'kaz',
  1930. 'kl': 'kal',
  1931. 'km': 'khm',
  1932. 'kn': 'kan',
  1933. 'ko': 'kor',
  1934. 'kr': 'kau',
  1935. 'ks': 'kas',
  1936. 'ku': 'kur',
  1937. 'kv': 'kom',
  1938. 'kw': 'cor',
  1939. 'ky': 'kir',
  1940. 'la': 'lat',
  1941. 'lb': 'ltz',
  1942. 'lg': 'lug',
  1943. 'li': 'lim',
  1944. 'ln': 'lin',
  1945. 'lo': 'lao',
  1946. 'lt': 'lit',
  1947. 'lu': 'lub',
  1948. 'lv': 'lav',
  1949. 'mg': 'mlg',
  1950. 'mh': 'mah',
  1951. 'mi': 'mri',
  1952. 'mk': 'mkd',
  1953. 'ml': 'mal',
  1954. 'mn': 'mon',
  1955. 'mr': 'mar',
  1956. 'ms': 'msa',
  1957. 'mt': 'mlt',
  1958. 'my': 'mya',
  1959. 'na': 'nau',
  1960. 'nb': 'nob',
  1961. 'nd': 'nde',
  1962. 'ne': 'nep',
  1963. 'ng': 'ndo',
  1964. 'nl': 'nld',
  1965. 'nn': 'nno',
  1966. 'no': 'nor',
  1967. 'nr': 'nbl',
  1968. 'nv': 'nav',
  1969. 'ny': 'nya',
  1970. 'oc': 'oci',
  1971. 'oj': 'oji',
  1972. 'om': 'orm',
  1973. 'or': 'ori',
  1974. 'os': 'oss',
  1975. 'pa': 'pan',
  1976. 'pi': 'pli',
  1977. 'pl': 'pol',
  1978. 'ps': 'pus',
  1979. 'pt': 'por',
  1980. 'qu': 'que',
  1981. 'rm': 'roh',
  1982. 'rn': 'run',
  1983. 'ro': 'ron',
  1984. 'ru': 'rus',
  1985. 'rw': 'kin',
  1986. 'sa': 'san',
  1987. 'sc': 'srd',
  1988. 'sd': 'snd',
  1989. 'se': 'sme',
  1990. 'sg': 'sag',
  1991. 'si': 'sin',
  1992. 'sk': 'slk',
  1993. 'sl': 'slv',
  1994. 'sm': 'smo',
  1995. 'sn': 'sna',
  1996. 'so': 'som',
  1997. 'sq': 'sqi',
  1998. 'sr': 'srp',
  1999. 'ss': 'ssw',
  2000. 'st': 'sot',
  2001. 'su': 'sun',
  2002. 'sv': 'swe',
  2003. 'sw': 'swa',
  2004. 'ta': 'tam',
  2005. 'te': 'tel',
  2006. 'tg': 'tgk',
  2007. 'th': 'tha',
  2008. 'ti': 'tir',
  2009. 'tk': 'tuk',
  2010. 'tl': 'tgl',
  2011. 'tn': 'tsn',
  2012. 'to': 'ton',
  2013. 'tr': 'tur',
  2014. 'ts': 'tso',
  2015. 'tt': 'tat',
  2016. 'tw': 'twi',
  2017. 'ty': 'tah',
  2018. 'ug': 'uig',
  2019. 'uk': 'ukr',
  2020. 'ur': 'urd',
  2021. 'uz': 'uzb',
  2022. 've': 'ven',
  2023. 'vi': 'vie',
  2024. 'vo': 'vol',
  2025. 'wa': 'wln',
  2026. 'wo': 'wol',
  2027. 'xh': 'xho',
  2028. 'yi': 'yid',
  2029. 'yo': 'yor',
  2030. 'za': 'zha',
  2031. 'zh': 'zho',
  2032. 'zu': 'zul',
  2033. }
  2034. @classmethod
  2035. def short2long(cls, code):
  2036. """Convert language code from ISO 639-1 to ISO 639-2/T"""
  2037. return cls._lang_map.get(code[:2])
  2038. @classmethod
  2039. def long2short(cls, code):
  2040. """Convert language code from ISO 639-2/T to ISO 639-1"""
  2041. for short_name, long_name in cls._lang_map.items():
  2042. if long_name == code:
  2043. return short_name
  2044. class ISO3166Utils(object):
  2045. # From http://data.okfn.org/data/core/country-list
  2046. _country_map = {
  2047. 'AF': 'Afghanistan',
  2048. 'AX': 'Åland Islands',
  2049. 'AL': 'Albania',
  2050. 'DZ': 'Algeria',
  2051. 'AS': 'American Samoa',
  2052. 'AD': 'Andorra',
  2053. 'AO': 'Angola',
  2054. 'AI': 'Anguilla',
  2055. 'AQ': 'Antarctica',
  2056. 'AG': 'Antigua and Barbuda',
  2057. 'AR': 'Argentina',
  2058. 'AM': 'Armenia',
  2059. 'AW': 'Aruba',
  2060. 'AU': 'Australia',
  2061. 'AT': 'Austria',
  2062. 'AZ': 'Azerbaijan',
  2063. 'BS': 'Bahamas',
  2064. 'BH': 'Bahrain',
  2065. 'BD': 'Bangladesh',
  2066. 'BB': 'Barbados',
  2067. 'BY': 'Belarus',
  2068. 'BE': 'Belgium',
  2069. 'BZ': 'Belize',
  2070. 'BJ': 'Benin',
  2071. 'BM': 'Bermuda',
  2072. 'BT': 'Bhutan',
  2073. 'BO': 'Bolivia, Plurinational State of',
  2074. 'BQ': 'Bonaire, Sint Eustatius and Saba',
  2075. 'BA': 'Bosnia and Herzegovina',
  2076. 'BW': 'Botswana',
  2077. 'BV': 'Bouvet Island',
  2078. 'BR': 'Brazil',
  2079. 'IO': 'British Indian Ocean Territory',
  2080. 'BN': 'Brunei Darussalam',
  2081. 'BG': 'Bulgaria',
  2082. 'BF': 'Burkina Faso',
  2083. 'BI': 'Burundi',
  2084. 'KH': 'Cambodia',
  2085. 'CM': 'Cameroon',
  2086. 'CA': 'Canada',
  2087. 'CV': 'Cape Verde',
  2088. 'KY': 'Cayman Islands',
  2089. 'CF': 'Central African Republic',
  2090. 'TD': 'Chad',
  2091. 'CL': 'Chile',
  2092. 'CN': 'China',
  2093. 'CX': 'Christmas Island',
  2094. 'CC': 'Cocos (Keeling) Islands',
  2095. 'CO': 'Colombia',
  2096. 'KM': 'Comoros',
  2097. 'CG': 'Congo',
  2098. 'CD': 'Congo, the Democratic Republic of the',
  2099. 'CK': 'Cook Islands',
  2100. 'CR': 'Costa Rica',
  2101. 'CI': 'Côte d\'Ivoire',
  2102. 'HR': 'Croatia',
  2103. 'CU': 'Cuba',
  2104. 'CW': 'Curaçao',
  2105. 'CY': 'Cyprus',
  2106. 'CZ': 'Czech Republic',
  2107. 'DK': 'Denmark',
  2108. 'DJ': 'Djibouti',
  2109. 'DM': 'Dominica',
  2110. 'DO': 'Dominican Republic',
  2111. 'EC': 'Ecuador',
  2112. 'EG': 'Egypt',
  2113. 'SV': 'El Salvador',
  2114. 'GQ': 'Equatorial Guinea',
  2115. 'ER': 'Eritrea',
  2116. 'EE': 'Estonia',
  2117. 'ET': 'Ethiopia',
  2118. 'FK': 'Falkland Islands (Malvinas)',
  2119. 'FO': 'Faroe Islands',
  2120. 'FJ': 'Fiji',
  2121. 'FI': 'Finland',
  2122. 'FR': 'France',
  2123. 'GF': 'French Guiana',
  2124. 'PF': 'French Polynesia',
  2125. 'TF': 'French Southern Territories',
  2126. 'GA': 'Gabon',
  2127. 'GM': 'Gambia',
  2128. 'GE': 'Georgia',
  2129. 'DE': 'Germany',
  2130. 'GH': 'Ghana',
  2131. 'GI': 'Gibraltar',
  2132. 'GR': 'Greece',
  2133. 'GL': 'Greenland',
  2134. 'GD': 'Grenada',
  2135. 'GP': 'Guadeloupe',
  2136. 'GU': 'Guam',
  2137. 'GT': 'Guatemala',
  2138. 'GG': 'Guernsey',
  2139. 'GN': 'Guinea',
  2140. 'GW': 'Guinea-Bissau',
  2141. 'GY': 'Guyana',
  2142. 'HT': 'Haiti',
  2143. 'HM': 'Heard Island and McDonald Islands',
  2144. 'VA': 'Holy See (Vatican City State)',
  2145. 'HN': 'Honduras',
  2146. 'HK': 'Hong Kong',
  2147. 'HU': 'Hungary',
  2148. 'IS': 'Iceland',
  2149. 'IN': 'India',
  2150. 'ID': 'Indonesia',
  2151. 'IR': 'Iran, Islamic Republic of',
  2152. 'IQ': 'Iraq',
  2153. 'IE': 'Ireland',
  2154. 'IM': 'Isle of Man',
  2155. 'IL': 'Israel',
  2156. 'IT': 'Italy',
  2157. 'JM': 'Jamaica',
  2158. 'JP': 'Japan',
  2159. 'JE': 'Jersey',
  2160. 'JO': 'Jordan',
  2161. 'KZ': 'Kazakhstan',
  2162. 'KE': 'Kenya',
  2163. 'KI': 'Kiribati',
  2164. 'KP': 'Korea, Democratic People\'s Republic of',
  2165. 'KR': 'Korea, Republic of',
  2166. 'KW': 'Kuwait',
  2167. 'KG': 'Kyrgyzstan',
  2168. 'LA': 'Lao People\'s Democratic Republic',
  2169. 'LV': 'Latvia',
  2170. 'LB': 'Lebanon',
  2171. 'LS': 'Lesotho',
  2172. 'LR': 'Liberia',
  2173. 'LY': 'Libya',
  2174. 'LI': 'Liechtenstein',
  2175. 'LT': 'Lithuania',
  2176. 'LU': 'Luxembourg',
  2177. 'MO': 'Macao',
  2178. 'MK': 'Macedonia, the Former Yugoslav Republic of',
  2179. 'MG': 'Madagascar',
  2180. 'MW': 'Malawi',
  2181. 'MY': 'Malaysia',
  2182. 'MV': 'Maldives',
  2183. 'ML': 'Mali',
  2184. 'MT': 'Malta',
  2185. 'MH': 'Marshall Islands',
  2186. 'MQ': 'Martinique',
  2187. 'MR': 'Mauritania',
  2188. 'MU': 'Mauritius',
  2189. 'YT': 'Mayotte',
  2190. 'MX': 'Mexico',
  2191. 'FM': 'Micronesia, Federated States of',
  2192. 'MD': 'Moldova, Republic of',
  2193. 'MC': 'Monaco',
  2194. 'MN': 'Mongolia',
  2195. 'ME': 'Montenegro',
  2196. 'MS': 'Montserrat',
  2197. 'MA': 'Morocco',
  2198. 'MZ': 'Mozambique',
  2199. 'MM': 'Myanmar',
  2200. 'NA': 'Namibia',
  2201. 'NR': 'Nauru',
  2202. 'NP': 'Nepal',
  2203. 'NL': 'Netherlands',
  2204. 'NC': 'New Caledonia',
  2205. 'NZ': 'New Zealand',
  2206. 'NI': 'Nicaragua',
  2207. 'NE': 'Niger',
  2208. 'NG': 'Nigeria',
  2209. 'NU': 'Niue',
  2210. 'NF': 'Norfolk Island',
  2211. 'MP': 'Northern Mariana Islands',
  2212. 'NO': 'Norway',
  2213. 'OM': 'Oman',
  2214. 'PK': 'Pakistan',
  2215. 'PW': 'Palau',
  2216. 'PS': 'Palestine, State of',
  2217. 'PA': 'Panama',
  2218. 'PG': 'Papua New Guinea',
  2219. 'PY': 'Paraguay',
  2220. 'PE': 'Peru',
  2221. 'PH': 'Philippines',
  2222. 'PN': 'Pitcairn',
  2223. 'PL': 'Poland',
  2224. 'PT': 'Portugal',
  2225. 'PR': 'Puerto Rico',
  2226. 'QA': 'Qatar',
  2227. 'RE': 'Réunion',
  2228. 'RO': 'Romania',
  2229. 'RU': 'Russian Federation',
  2230. 'RW': 'Rwanda',
  2231. 'BL': 'Saint Barthélemy',
  2232. 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
  2233. 'KN': 'Saint Kitts and Nevis',
  2234. 'LC': 'Saint Lucia',
  2235. 'MF': 'Saint Martin (French part)',
  2236. 'PM': 'Saint Pierre and Miquelon',
  2237. 'VC': 'Saint Vincent and the Grenadines',
  2238. 'WS': 'Samoa',
  2239. 'SM': 'San Marino',
  2240. 'ST': 'Sao Tome and Principe',
  2241. 'SA': 'Saudi Arabia',
  2242. 'SN': 'Senegal',
  2243. 'RS': 'Serbia',
  2244. 'SC': 'Seychelles',
  2245. 'SL': 'Sierra Leone',
  2246. 'SG': 'Singapore',
  2247. 'SX': 'Sint Maarten (Dutch part)',
  2248. 'SK': 'Slovakia',
  2249. 'SI': 'Slovenia',
  2250. 'SB': 'Solomon Islands',
  2251. 'SO': 'Somalia',
  2252. 'ZA': 'South Africa',
  2253. 'GS': 'South Georgia and the South Sandwich Islands',
  2254. 'SS': 'South Sudan',
  2255. 'ES': 'Spain',
  2256. 'LK': 'Sri Lanka',
  2257. 'SD': 'Sudan',
  2258. 'SR': 'Suriname',
  2259. 'SJ': 'Svalbard and Jan Mayen',
  2260. 'SZ': 'Swaziland',
  2261. 'SE': 'Sweden',
  2262. 'CH': 'Switzerland',
  2263. 'SY': 'Syrian Arab Republic',
  2264. 'TW': 'Taiwan, Province of China',
  2265. 'TJ': 'Tajikistan',
  2266. 'TZ': 'Tanzania, United Republic of',
  2267. 'TH': 'Thailand',
  2268. 'TL': 'Timor-Leste',
  2269. 'TG': 'Togo',
  2270. 'TK': 'Tokelau',
  2271. 'TO': 'Tonga',
  2272. 'TT': 'Trinidad and Tobago',
  2273. 'TN': 'Tunisia',
  2274. 'TR': 'Turkey',
  2275. 'TM': 'Turkmenistan',
  2276. 'TC': 'Turks and Caicos Islands',
  2277. 'TV': 'Tuvalu',
  2278. 'UG': 'Uganda',
  2279. 'UA': 'Ukraine',
  2280. 'AE': 'United Arab Emirates',
  2281. 'GB': 'United Kingdom',
  2282. 'US': 'United States',
  2283. 'UM': 'United States Minor Outlying Islands',
  2284. 'UY': 'Uruguay',
  2285. 'UZ': 'Uzbekistan',
  2286. 'VU': 'Vanuatu',
  2287. 'VE': 'Venezuela, Bolivarian Republic of',
  2288. 'VN': 'Viet Nam',
  2289. 'VG': 'Virgin Islands, British',
  2290. 'VI': 'Virgin Islands, U.S.',
  2291. 'WF': 'Wallis and Futuna',
  2292. 'EH': 'Western Sahara',
  2293. 'YE': 'Yemen',
  2294. 'ZM': 'Zambia',
  2295. 'ZW': 'Zimbabwe',
  2296. }
  2297. @classmethod
  2298. def short2full(cls, code):
  2299. """Convert an ISO 3166-2 country code to the corresponding full name"""
  2300. return cls._country_map.get(code.upper())
  2301. class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
  2302. def __init__(self, proxies=None):
  2303. # Set default handlers
  2304. for type in ('http', 'https'):
  2305. setattr(self, '%s_open' % type,
  2306. lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
  2307. meth(r, proxy, type))
  2308. return compat_urllib_request.ProxyHandler.__init__(self, proxies)
  2309. def proxy_open(self, req, proxy, type):
  2310. req_proxy = req.headers.get('Ytdl-request-proxy')
  2311. if req_proxy is not None:
  2312. proxy = req_proxy
  2313. del req.headers['Ytdl-request-proxy']
  2314. if proxy == '__noproxy__':
  2315. return None # No Proxy
  2316. if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
  2317. req.add_header('Ytdl-socks-proxy', proxy)
  2318. # youtube-dl's http/https handlers do wrapping the socket with socks
  2319. return None
  2320. return compat_urllib_request.ProxyHandler.proxy_open(
  2321. self, req, proxy, type)
  2322. def ohdave_rsa_encrypt(data, exponent, modulus):
  2323. '''
  2324. Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
  2325. Input:
  2326. data: data to encrypt, bytes-like object
  2327. exponent, modulus: parameter e and N of RSA algorithm, both integer
  2328. Output: hex string of encrypted data
  2329. Limitation: supports one block encryption only
  2330. '''
  2331. payload = int(binascii.hexlify(data[::-1]), 16)
  2332. encrypted = pow(payload, exponent, modulus)
  2333. return '%x' % encrypted
  2334. def encode_base_n(num, n, table=None):
  2335. FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
  2336. if not table:
  2337. table = FULL_TABLE[:n]
  2338. if n > len(table):
  2339. raise ValueError('base %d exceeds table length %d' % (n, len(table)))
  2340. if num == 0:
  2341. return table[0]
  2342. ret = ''
  2343. while num:
  2344. ret = table[num % n] + ret
  2345. num = num // n
  2346. return ret
  2347. def decode_packed_codes(code):
  2348. mobj = re.search(
  2349. r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)",
  2350. code)
  2351. obfucasted_code, base, count, symbols = mobj.groups()
  2352. base = int(base)
  2353. count = int(count)
  2354. symbols = symbols.split('|')
  2355. symbol_table = {}
  2356. while count:
  2357. count -= 1
  2358. base_n_count = encode_base_n(count, base)
  2359. symbol_table[base_n_count] = symbols[count] or base_n_count
  2360. return re.sub(
  2361. r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
  2362. obfucasted_code)