You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3848 lines
117 KiB

10 years ago
10 years ago
10 years ago
9 years ago
9 years ago
9 years ago
9 years ago
10 years ago
10 years ago
9 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
[utils] Remove Content-encoding from headers after decompression With cn_verification_proxy, our http_response() is called twice, one from PerRequestProxyHandler.proxy_open() and another from normal YoutubeDL.urlopen(). As a result, for proxies honoring Accept-Encoding, the following bug occurs: $ youtube-dl -vs --cn-verification-proxy https://secure.uku.im:993 "test:letv" [debug] System config: [] [debug] User config: [] [debug] Command-line args: ['-vs', '--cn-verification-proxy', 'https://secure.uku.im:993', 'test:letv'] [debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2015.12.23 [debug] Git HEAD: 97f18fa [debug] Python version 3.5.1 - Linux-4.3.3-1-ARCH-x86_64-with-arch-Arch-Linux [debug] exe versions: ffmpeg 2.8.4, ffprobe 2.8.4, rtmpdump 2.4 [debug] Proxy map: {} [TestURL] Test URL: http://www.letv.com/ptv/vplay/22005890.html [Letv] 22005890: Downloading webpage [Letv] 22005890: Downloading playJson data ERROR: Unable to download JSON metadata: Not a gzipped file (b'{"') (caused by OSError('Not a gzipped file (b\'{"\')',)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/extractor/common.py", line 330, in _request_webpage return self._downloader.urlopen(url_or_request) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/YoutubeDL.py", line 1886, in urlopen return self._opener.open(req, timeout=self._socket_timeout) File "/usr/lib/python3.5/urllib/request.py", line 471, in open response = meth(req, response) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 773, in http_response raise original_ioerror File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 761, in http_response uncompressed = io.BytesIO(gz.read()) File "/usr/lib/python3.5/gzip.py", line 274, in read return self._buffer.read(size) File "/usr/lib/python3.5/gzip.py", line 461, in read if not self._read_gzip_header(): File "/usr/lib/python3.5/gzip.py", line 409, in _read_gzip_header raise OSError('Not a gzipped file (%r)' % magic)
9 years ago
[utils] Remove Content-encoding from headers after decompression With cn_verification_proxy, our http_response() is called twice, one from PerRequestProxyHandler.proxy_open() and another from normal YoutubeDL.urlopen(). As a result, for proxies honoring Accept-Encoding, the following bug occurs: $ youtube-dl -vs --cn-verification-proxy https://secure.uku.im:993 "test:letv" [debug] System config: [] [debug] User config: [] [debug] Command-line args: ['-vs', '--cn-verification-proxy', 'https://secure.uku.im:993', 'test:letv'] [debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2015.12.23 [debug] Git HEAD: 97f18fa [debug] Python version 3.5.1 - Linux-4.3.3-1-ARCH-x86_64-with-arch-Arch-Linux [debug] exe versions: ffmpeg 2.8.4, ffprobe 2.8.4, rtmpdump 2.4 [debug] Proxy map: {} [TestURL] Test URL: http://www.letv.com/ptv/vplay/22005890.html [Letv] 22005890: Downloading webpage [Letv] 22005890: Downloading playJson data ERROR: Unable to download JSON metadata: Not a gzipped file (b'{"') (caused by OSError('Not a gzipped file (b\'{"\')',)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/extractor/common.py", line 330, in _request_webpage return self._downloader.urlopen(url_or_request) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/YoutubeDL.py", line 1886, in urlopen return self._opener.open(req, timeout=self._socket_timeout) File "/usr/lib/python3.5/urllib/request.py", line 471, in open response = meth(req, response) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 773, in http_response raise original_ioerror File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 761, in http_response uncompressed = io.BytesIO(gz.read()) File "/usr/lib/python3.5/gzip.py", line 274, in read return self._buffer.read(size) File "/usr/lib/python3.5/gzip.py", line 461, in read if not self._read_gzip_header(): File "/usr/lib/python3.5/gzip.py", line 409, in _read_gzip_header raise OSError('Not a gzipped file (%r)' % magic)
9 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
9 years ago
10 years ago
10 years ago
  1. #!/usr/bin/env python
  2. # coding: utf-8
  3. from __future__ import unicode_literals
  4. import base64
  5. import binascii
  6. import calendar
  7. import codecs
  8. import contextlib
  9. import ctypes
  10. import datetime
  11. import email.utils
  12. import email.header
  13. import errno
  14. import functools
  15. import gzip
  16. import io
  17. import itertools
  18. import json
  19. import locale
  20. import math
  21. import operator
  22. import os
  23. import platform
  24. import random
  25. import re
  26. import socket
  27. import ssl
  28. import subprocess
  29. import sys
  30. import tempfile
  31. import traceback
  32. import xml.etree.ElementTree
  33. import zlib
  34. from .compat import (
  35. compat_HTMLParseError,
  36. compat_HTMLParser,
  37. compat_basestring,
  38. compat_chr,
  39. compat_ctypes_WINFUNCTYPE,
  40. compat_etree_fromstring,
  41. compat_expanduser,
  42. compat_html_entities,
  43. compat_html_entities_html5,
  44. compat_http_client,
  45. compat_kwargs,
  46. compat_os_name,
  47. compat_parse_qs,
  48. compat_shlex_quote,
  49. compat_socket_create_connection,
  50. compat_str,
  51. compat_struct_pack,
  52. compat_struct_unpack,
  53. compat_urllib_error,
  54. compat_urllib_parse,
  55. compat_urllib_parse_urlencode,
  56. compat_urllib_parse_urlparse,
  57. compat_urllib_parse_unquote_plus,
  58. compat_urllib_request,
  59. compat_urlparse,
  60. compat_xpath,
  61. )
  62. from .socks import (
  63. ProxyType,
  64. sockssocket,
  65. )
  66. def register_socks_protocols():
  67. # "Register" SOCKS protocols
  68. # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
  69. # URLs with protocols not in urlparse.uses_netloc are not handled correctly
  70. for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
  71. if scheme not in compat_urlparse.uses_netloc:
  72. compat_urlparse.uses_netloc.append(scheme)
  73. # This is not clearly defined otherwise
  74. compiled_regex_type = type(re.compile(''))
  75. std_headers = {
  76. 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)',
  77. 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
  78. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  79. 'Accept-Encoding': 'gzip, deflate',
  80. 'Accept-Language': 'en-us,en;q=0.5',
  81. }
  82. USER_AGENTS = {
  83. 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
  84. }
  85. NO_DEFAULT = object()
  86. ENGLISH_MONTH_NAMES = [
  87. 'January', 'February', 'March', 'April', 'May', 'June',
  88. 'July', 'August', 'September', 'October', 'November', 'December']
  89. MONTH_NAMES = {
  90. 'en': ENGLISH_MONTH_NAMES,
  91. 'fr': [
  92. 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
  93. 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
  94. }
  95. KNOWN_EXTENSIONS = (
  96. 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
  97. 'flv', 'f4v', 'f4a', 'f4b',
  98. 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
  99. 'mkv', 'mka', 'mk3d',
  100. 'avi', 'divx',
  101. 'mov',
  102. 'asf', 'wmv', 'wma',
  103. '3gp', '3g2',
  104. 'mp3',
  105. 'flac',
  106. 'ape',
  107. 'wav',
  108. 'f4f', 'f4m', 'm3u8', 'smil')
  109. # needed for sanitizing filenames in restricted mode
  110. ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
  111. itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
  112. 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
  113. DATE_FORMATS = (
  114. '%d %B %Y',
  115. '%d %b %Y',
  116. '%B %d %Y',
  117. '%B %dst %Y',
  118. '%B %dnd %Y',
  119. '%B %dth %Y',
  120. '%b %d %Y',
  121. '%b %dst %Y',
  122. '%b %dnd %Y',
  123. '%b %dth %Y',
  124. '%b %dst %Y %I:%M',
  125. '%b %dnd %Y %I:%M',
  126. '%b %dth %Y %I:%M',
  127. '%Y %m %d',
  128. '%Y-%m-%d',
  129. '%Y/%m/%d',
  130. '%Y/%m/%d %H:%M',
  131. '%Y/%m/%d %H:%M:%S',
  132. '%Y-%m-%d %H:%M',
  133. '%Y-%m-%d %H:%M:%S',
  134. '%Y-%m-%d %H:%M:%S.%f',
  135. '%d.%m.%Y %H:%M',
  136. '%d.%m.%Y %H.%M',
  137. '%Y-%m-%dT%H:%M:%SZ',
  138. '%Y-%m-%dT%H:%M:%S.%fZ',
  139. '%Y-%m-%dT%H:%M:%S.%f0Z',
  140. '%Y-%m-%dT%H:%M:%S',
  141. '%Y-%m-%dT%H:%M:%S.%f',
  142. '%Y-%m-%dT%H:%M',
  143. '%b %d %Y at %H:%M',
  144. '%b %d %Y at %H:%M:%S',
  145. '%B %d %Y at %H:%M',
  146. '%B %d %Y at %H:%M:%S',
  147. )
  148. DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
  149. DATE_FORMATS_DAY_FIRST.extend([
  150. '%d-%m-%Y',
  151. '%d.%m.%Y',
  152. '%d.%m.%y',
  153. '%d/%m/%Y',
  154. '%d/%m/%y',
  155. '%d/%m/%Y %H:%M:%S',
  156. ])
  157. DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
  158. DATE_FORMATS_MONTH_FIRST.extend([
  159. '%m-%d-%Y',
  160. '%m.%d.%Y',
  161. '%m/%d/%Y',
  162. '%m/%d/%y',
  163. '%m/%d/%Y %H:%M:%S',
  164. ])
  165. PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
  166. def preferredencoding():
  167. """Get preferred encoding.
  168. Returns the best encoding scheme for the system, based on
  169. locale.getpreferredencoding() and some further tweaks.
  170. """
  171. try:
  172. pref = locale.getpreferredencoding()
  173. 'TEST'.encode(pref)
  174. except Exception:
  175. pref = 'UTF-8'
  176. return pref
  177. def write_json_file(obj, fn):
  178. """ Encode obj as JSON and write it to fn, atomically if possible """
  179. fn = encodeFilename(fn)
  180. if sys.version_info < (3, 0) and sys.platform != 'win32':
  181. encoding = get_filesystem_encoding()
  182. # os.path.basename returns a bytes object, but NamedTemporaryFile
  183. # will fail if the filename contains non ascii characters unless we
  184. # use a unicode object
  185. path_basename = lambda f: os.path.basename(fn).decode(encoding)
  186. # the same for os.path.dirname
  187. path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
  188. else:
  189. path_basename = os.path.basename
  190. path_dirname = os.path.dirname
  191. args = {
  192. 'suffix': '.tmp',
  193. 'prefix': path_basename(fn) + '.',
  194. 'dir': path_dirname(fn),
  195. 'delete': False,
  196. }
  197. # In Python 2.x, json.dump expects a bytestream.
  198. # In Python 3.x, it writes to a character stream
  199. if sys.version_info < (3, 0):
  200. args['mode'] = 'wb'
  201. else:
  202. args.update({
  203. 'mode': 'w',
  204. 'encoding': 'utf-8',
  205. })
  206. tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
  207. try:
  208. with tf:
  209. json.dump(obj, tf)
  210. if sys.platform == 'win32':
  211. # Need to remove existing file on Windows, else os.rename raises
  212. # WindowsError or FileExistsError.
  213. try:
  214. os.unlink(fn)
  215. except OSError:
  216. pass
  217. os.rename(tf.name, fn)
  218. except Exception:
  219. try:
  220. os.remove(tf.name)
  221. except OSError:
  222. pass
  223. raise
  224. if sys.version_info >= (2, 7):
  225. def find_xpath_attr(node, xpath, key, val=None):
  226. """ Find the xpath xpath[@key=val] """
  227. assert re.match(r'^[a-zA-Z_-]+$', key)
  228. expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
  229. return node.find(expr)
  230. else:
  231. def find_xpath_attr(node, xpath, key, val=None):
  232. for f in node.findall(compat_xpath(xpath)):
  233. if key not in f.attrib:
  234. continue
  235. if val is None or f.attrib.get(key) == val:
  236. return f
  237. return None
  238. # On python2.6 the xml.etree.ElementTree.Element methods don't support
  239. # the namespace parameter
  240. def xpath_with_ns(path, ns_map):
  241. components = [c.split(':') for c in path.split('/')]
  242. replaced = []
  243. for c in components:
  244. if len(c) == 1:
  245. replaced.append(c[0])
  246. else:
  247. ns, tag = c
  248. replaced.append('{%s}%s' % (ns_map[ns], tag))
  249. return '/'.join(replaced)
  250. def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
  251. def _find_xpath(xpath):
  252. return node.find(compat_xpath(xpath))
  253. if isinstance(xpath, (str, compat_str)):
  254. n = _find_xpath(xpath)
  255. else:
  256. for xp in xpath:
  257. n = _find_xpath(xp)
  258. if n is not None:
  259. break
  260. if n is None:
  261. if default is not NO_DEFAULT:
  262. return default
  263. elif fatal:
  264. name = xpath if name is None else name
  265. raise ExtractorError('Could not find XML element %s' % name)
  266. else:
  267. return None
  268. return n
  269. def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
  270. n = xpath_element(node, xpath, name, fatal=fatal, default=default)
  271. if n is None or n == default:
  272. return n
  273. if n.text is None:
  274. if default is not NO_DEFAULT:
  275. return default
  276. elif fatal:
  277. name = xpath if name is None else name
  278. raise ExtractorError('Could not find XML element\'s text %s' % name)
  279. else:
  280. return None
  281. return n.text
  282. def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
  283. n = find_xpath_attr(node, xpath, key)
  284. if n is None:
  285. if default is not NO_DEFAULT:
  286. return default
  287. elif fatal:
  288. name = '%s[@%s]' % (xpath, key) if name is None else name
  289. raise ExtractorError('Could not find XML attribute %s' % name)
  290. else:
  291. return None
  292. return n.attrib[key]
  293. def get_element_by_id(id, html):
  294. """Return the content of the tag with the specified ID in the passed HTML document"""
  295. return get_element_by_attribute('id', id, html)
  296. def get_element_by_class(class_name, html):
  297. """Return the content of the first tag with the specified class in the passed HTML document"""
  298. retval = get_elements_by_class(class_name, html)
  299. return retval[0] if retval else None
  300. def get_element_by_attribute(attribute, value, html, escape_value=True):
  301. retval = get_elements_by_attribute(attribute, value, html, escape_value)
  302. return retval[0] if retval else None
  303. def get_elements_by_class(class_name, html):
  304. """Return the content of all tags with the specified class in the passed HTML document as a list"""
  305. return get_elements_by_attribute(
  306. 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
  307. html, escape_value=False)
  308. def get_elements_by_attribute(attribute, value, html, escape_value=True):
  309. """Return the content of the tag with the specified attribute in the passed HTML document"""
  310. value = re.escape(value) if escape_value else value
  311. retlist = []
  312. for m in re.finditer(r'''(?xs)
  313. <([a-zA-Z0-9:._-]+)
  314. (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
  315. \s+%s=['"]?%s['"]?
  316. (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
  317. \s*>
  318. (?P<content>.*?)
  319. </\1>
  320. ''' % (re.escape(attribute), value), html):
  321. res = m.group('content')
  322. if res.startswith('"') or res.startswith("'"):
  323. res = res[1:-1]
  324. retlist.append(unescapeHTML(res))
  325. return retlist
  326. class HTMLAttributeParser(compat_HTMLParser):
  327. """Trivial HTML parser to gather the attributes for a single element"""
  328. def __init__(self):
  329. self.attrs = {}
  330. compat_HTMLParser.__init__(self)
  331. def handle_starttag(self, tag, attrs):
  332. self.attrs = dict(attrs)
  333. def extract_attributes(html_element):
  334. """Given a string for an HTML element such as
  335. <el
  336. a="foo" B="bar" c="&98;az" d=boz
  337. empty= noval entity="&amp;"
  338. sq='"' dq="'"
  339. >
  340. Decode and return a dictionary of attributes.
  341. {
  342. 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
  343. 'empty': '', 'noval': None, 'entity': '&',
  344. 'sq': '"', 'dq': '\''
  345. }.
  346. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
  347. but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
  348. """
  349. parser = HTMLAttributeParser()
  350. try:
  351. parser.feed(html_element)
  352. parser.close()
  353. # Older Python may throw HTMLParseError in case of malformed HTML
  354. except compat_HTMLParseError:
  355. pass
  356. return parser.attrs
  357. def clean_html(html):
  358. """Clean an HTML snippet into a readable string"""
  359. if html is None: # Convenience for sanitizing descriptions etc.
  360. return html
  361. # Newline vs <br />
  362. html = html.replace('\n', ' ')
  363. html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
  364. html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
  365. # Strip html tags
  366. html = re.sub('<.*?>', '', html)
  367. # Replace html entities
  368. html = unescapeHTML(html)
  369. return html.strip()
  370. def sanitize_open(filename, open_mode):
  371. """Try to open the given filename, and slightly tweak it if this fails.
  372. Attempts to open the given filename. If this fails, it tries to change
  373. the filename slightly, step by step, until it's either able to open it
  374. or it fails and raises a final exception, like the standard open()
  375. function.
  376. It returns the tuple (stream, definitive_file_name).
  377. """
  378. try:
  379. if filename == '-':
  380. if sys.platform == 'win32':
  381. import msvcrt
  382. msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
  383. return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
  384. stream = open(encodeFilename(filename), open_mode)
  385. return (stream, filename)
  386. except (IOError, OSError) as err:
  387. if err.errno in (errno.EACCES,):
  388. raise
  389. # In case of error, try to remove win32 forbidden chars
  390. alt_filename = sanitize_path(filename)
  391. if alt_filename == filename:
  392. raise
  393. else:
  394. # An exception here should be caught in the caller
  395. stream = open(encodeFilename(alt_filename), open_mode)
  396. return (stream, alt_filename)
  397. def timeconvert(timestr):
  398. """Convert RFC 2822 defined time string into system timestamp"""
  399. timestamp = None
  400. timetuple = email.utils.parsedate_tz(timestr)
  401. if timetuple is not None:
  402. timestamp = email.utils.mktime_tz(timetuple)
  403. return timestamp
  404. def sanitize_filename(s, restricted=False, is_id=False):
  405. """Sanitizes a string so it could be used as part of a filename.
  406. If restricted is set, use a stricter subset of allowed characters.
  407. Set is_id if this is not an arbitrary string, but an ID that should be kept
  408. if possible.
  409. """
  410. def replace_insane(char):
  411. if restricted and char in ACCENT_CHARS:
  412. return ACCENT_CHARS[char]
  413. if char == '?' or ord(char) < 32 or ord(char) == 127:
  414. return ''
  415. elif char == '"':
  416. return '' if restricted else '\''
  417. elif char == ':':
  418. return '_-' if restricted else ' -'
  419. elif char in '\\/|*<>':
  420. return '_'
  421. if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
  422. return '_'
  423. if restricted and ord(char) > 127:
  424. return '_'
  425. return char
  426. # Handle timestamps
  427. s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
  428. result = ''.join(map(replace_insane, s))
  429. if not is_id:
  430. while '__' in result:
  431. result = result.replace('__', '_')
  432. result = result.strip('_')
  433. # Common case of "Foreign band name - English song title"
  434. if restricted and result.startswith('-_'):
  435. result = result[2:]
  436. if result.startswith('-'):
  437. result = '_' + result[len('-'):]
  438. result = result.lstrip('.')
  439. if not result:
  440. result = '_'
  441. return result
  442. def sanitize_path(s):
  443. """Sanitizes and normalizes path on Windows"""
  444. if sys.platform != 'win32':
  445. return s
  446. drive_or_unc, _ = os.path.splitdrive(s)
  447. if sys.version_info < (2, 7) and not drive_or_unc:
  448. drive_or_unc, _ = os.path.splitunc(s)
  449. norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
  450. if drive_or_unc:
  451. norm_path.pop(0)
  452. sanitized_path = [
  453. path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
  454. for path_part in norm_path]
  455. if drive_or_unc:
  456. sanitized_path.insert(0, drive_or_unc + os.path.sep)
  457. return os.path.join(*sanitized_path)
  458. # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
  459. # unwanted failures due to missing protocol
  460. def sanitize_url(url):
  461. return 'http:%s' % url if url.startswith('//') else url
  462. def sanitized_Request(url, *args, **kwargs):
  463. return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
  464. def expand_path(s):
  465. """Expand shell variables and ~"""
  466. return os.path.expandvars(compat_expanduser(s))
  467. def orderedSet(iterable):
  468. """ Remove all duplicates from the input iterable """
  469. res = []
  470. for el in iterable:
  471. if el not in res:
  472. res.append(el)
  473. return res
  474. def _htmlentity_transform(entity_with_semicolon):
  475. """Transforms an HTML entity to a character."""
  476. entity = entity_with_semicolon[:-1]
  477. # Known non-numeric HTML entity
  478. if entity in compat_html_entities.name2codepoint:
  479. return compat_chr(compat_html_entities.name2codepoint[entity])
  480. # TODO: HTML5 allows entities without a semicolon. For example,
  481. # '&Eacuteric' should be decoded as 'Éric'.
  482. if entity_with_semicolon in compat_html_entities_html5:
  483. return compat_html_entities_html5[entity_with_semicolon]
  484. mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
  485. if mobj is not None:
  486. numstr = mobj.group(1)
  487. if numstr.startswith('x'):
  488. base = 16
  489. numstr = '0%s' % numstr
  490. else:
  491. base = 10
  492. # See https://github.com/rg3/youtube-dl/issues/7518
  493. try:
  494. return compat_chr(int(numstr, base))
  495. except ValueError:
  496. pass
  497. # Unknown entity in name, return its literal representation
  498. return '&%s;' % entity
  499. def unescapeHTML(s):
  500. if s is None:
  501. return None
  502. assert type(s) == compat_str
  503. return re.sub(
  504. r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
  505. def get_subprocess_encoding():
  506. if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
  507. # For subprocess calls, encode with locale encoding
  508. # Refer to http://stackoverflow.com/a/9951851/35070
  509. encoding = preferredencoding()
  510. else:
  511. encoding = sys.getfilesystemencoding()
  512. if encoding is None:
  513. encoding = 'utf-8'
  514. return encoding
  515. def encodeFilename(s, for_subprocess=False):
  516. """
  517. @param s The name of the file
  518. """
  519. assert type(s) == compat_str
  520. # Python 3 has a Unicode API
  521. if sys.version_info >= (3, 0):
  522. return s
  523. # Pass '' directly to use Unicode APIs on Windows 2000 and up
  524. # (Detecting Windows NT 4 is tricky because 'major >= 4' would
  525. # match Windows 9x series as well. Besides, NT 4 is obsolete.)
  526. if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
  527. return s
  528. # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
  529. if sys.platform.startswith('java'):
  530. return s
  531. return s.encode(get_subprocess_encoding(), 'ignore')
  532. def decodeFilename(b, for_subprocess=False):
  533. if sys.version_info >= (3, 0):
  534. return b
  535. if not isinstance(b, bytes):
  536. return b
  537. return b.decode(get_subprocess_encoding(), 'ignore')
  538. def encodeArgument(s):
  539. if not isinstance(s, compat_str):
  540. # Legacy code that uses byte strings
  541. # Uncomment the following line after fixing all post processors
  542. # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
  543. s = s.decode('ascii')
  544. return encodeFilename(s, True)
  545. def decodeArgument(b):
  546. return decodeFilename(b, True)
  547. def decodeOption(optval):
  548. if optval is None:
  549. return optval
  550. if isinstance(optval, bytes):
  551. optval = optval.decode(preferredencoding())
  552. assert isinstance(optval, compat_str)
  553. return optval
  554. def formatSeconds(secs):
  555. if secs > 3600:
  556. return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
  557. elif secs > 60:
  558. return '%d:%02d' % (secs // 60, secs % 60)
  559. else:
  560. return '%d' % secs
  561. def make_HTTPS_handler(params, **kwargs):
  562. opts_no_check_certificate = params.get('nocheckcertificate', False)
  563. if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
  564. context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
  565. if opts_no_check_certificate:
  566. context.check_hostname = False
  567. context.verify_mode = ssl.CERT_NONE
  568. try:
  569. return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
  570. except TypeError:
  571. # Python 2.7.8
  572. # (create_default_context present but HTTPSHandler has no context=)
  573. pass
  574. if sys.version_info < (3, 2):
  575. return YoutubeDLHTTPSHandler(params, **kwargs)
  576. else: # Python < 3.4
  577. context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
  578. context.verify_mode = (ssl.CERT_NONE
  579. if opts_no_check_certificate
  580. else ssl.CERT_REQUIRED)
  581. context.set_default_verify_paths()
  582. return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
  583. def bug_reports_message():
  584. if ytdl_is_updateable():
  585. update_cmd = 'type youtube-dl -U to update'
  586. else:
  587. update_cmd = 'see https://yt-dl.org/update on how to update'
  588. msg = '; please report this issue on https://yt-dl.org/bug .'
  589. msg += ' Make sure you are using the latest version; %s.' % update_cmd
  590. msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
  591. return msg
  592. class YoutubeDLError(Exception):
  593. """Base exception for YoutubeDL errors."""
  594. pass
  595. class ExtractorError(YoutubeDLError):
  596. """Error during info extraction."""
  597. def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
  598. """ tb, if given, is the original traceback (so that it can be printed out).
  599. If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
  600. """
  601. if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
  602. expected = True
  603. if video_id is not None:
  604. msg = video_id + ': ' + msg
  605. if cause:
  606. msg += ' (caused by %r)' % cause
  607. if not expected:
  608. msg += bug_reports_message()
  609. super(ExtractorError, self).__init__(msg)
  610. self.traceback = tb
  611. self.exc_info = sys.exc_info() # preserve original exception
  612. self.cause = cause
  613. self.video_id = video_id
  614. def format_traceback(self):
  615. if self.traceback is None:
  616. return None
  617. return ''.join(traceback.format_tb(self.traceback))
  618. class UnsupportedError(ExtractorError):
  619. def __init__(self, url):
  620. super(UnsupportedError, self).__init__(
  621. 'Unsupported URL: %s' % url, expected=True)
  622. self.url = url
  623. class RegexNotFoundError(ExtractorError):
  624. """Error when a regex didn't match"""
  625. pass
  626. class GeoRestrictedError(ExtractorError):
  627. """Geographic restriction Error exception.
  628. This exception may be thrown when a video is not available from your
  629. geographic location due to geographic restrictions imposed by a website.
  630. """
  631. def __init__(self, msg, countries=None):
  632. super(GeoRestrictedError, self).__init__(msg, expected=True)
  633. self.msg = msg
  634. self.countries = countries
  635. class DownloadError(YoutubeDLError):
  636. """Download Error exception.
  637. This exception may be thrown by FileDownloader objects if they are not
  638. configured to continue on errors. They will contain the appropriate
  639. error message.
  640. """
  641. def __init__(self, msg, exc_info=None):
  642. """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
  643. super(DownloadError, self).__init__(msg)
  644. self.exc_info = exc_info
  645. class SameFileError(YoutubeDLError):
  646. """Same File exception.
  647. This exception will be thrown by FileDownloader objects if they detect
  648. multiple files would have to be downloaded to the same file on disk.
  649. """
  650. pass
  651. class PostProcessingError(YoutubeDLError):
  652. """Post Processing exception.
  653. This exception may be raised by PostProcessor's .run() method to
  654. indicate an error in the postprocessing task.
  655. """
  656. def __init__(self, msg):
  657. super(PostProcessingError, self).__init__(msg)
  658. self.msg = msg
  659. class MaxDownloadsReached(YoutubeDLError):
  660. """ --max-downloads limit has been reached. """
  661. pass
  662. class UnavailableVideoError(YoutubeDLError):
  663. """Unavailable Format exception.
  664. This exception will be thrown when a video is requested
  665. in a format that is not available for that video.
  666. """
  667. pass
  668. class ContentTooShortError(YoutubeDLError):
  669. """Content Too Short exception.
  670. This exception may be raised by FileDownloader objects when a file they
  671. download is too small for what the server announced first, indicating
  672. the connection was probably interrupted.
  673. """
  674. def __init__(self, downloaded, expected):
  675. super(ContentTooShortError, self).__init__(
  676. 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
  677. )
  678. # Both in bytes
  679. self.downloaded = downloaded
  680. self.expected = expected
  681. class XAttrMetadataError(YoutubeDLError):
  682. def __init__(self, code=None, msg='Unknown error'):
  683. super(XAttrMetadataError, self).__init__(msg)
  684. self.code = code
  685. self.msg = msg
  686. # Parsing code and msg
  687. if (self.code in (errno.ENOSPC, errno.EDQUOT) or
  688. 'No space left' in self.msg or 'Disk quota excedded' in self.msg):
  689. self.reason = 'NO_SPACE'
  690. elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
  691. self.reason = 'VALUE_TOO_LONG'
  692. else:
  693. self.reason = 'NOT_SUPPORTED'
  694. class XAttrUnavailableError(YoutubeDLError):
  695. pass
  696. def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
  697. # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
  698. # expected HTTP responses to meet HTTP/1.0 or later (see also
  699. # https://github.com/rg3/youtube-dl/issues/6727)
  700. if sys.version_info < (3, 0):
  701. kwargs['strict'] = True
  702. hc = http_class(*args, **compat_kwargs(kwargs))
  703. source_address = ydl_handler._params.get('source_address')
  704. if source_address is not None:
  705. sa = (source_address, 0)
  706. if hasattr(hc, 'source_address'): # Python 2.7+
  707. hc.source_address = sa
  708. else: # Python 2.6
  709. def _hc_connect(self, *args, **kwargs):
  710. sock = compat_socket_create_connection(
  711. (self.host, self.port), self.timeout, sa)
  712. if is_https:
  713. self.sock = ssl.wrap_socket(
  714. sock, self.key_file, self.cert_file,
  715. ssl_version=ssl.PROTOCOL_TLSv1)
  716. else:
  717. self.sock = sock
  718. hc.connect = functools.partial(_hc_connect, hc)
  719. return hc
  720. def handle_youtubedl_headers(headers):
  721. filtered_headers = headers
  722. if 'Youtubedl-no-compression' in filtered_headers:
  723. filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
  724. del filtered_headers['Youtubedl-no-compression']
  725. return filtered_headers
  726. class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
  727. """Handler for HTTP requests and responses.
  728. This class, when installed with an OpenerDirector, automatically adds
  729. the standard headers to every HTTP request and handles gzipped and
  730. deflated responses from web servers. If compression is to be avoided in
  731. a particular request, the original request in the program code only has
  732. to include the HTTP header "Youtubedl-no-compression", which will be
  733. removed before making the real request.
  734. Part of this code was copied from:
  735. http://techknack.net/python-urllib2-handlers/
  736. Andrew Rowls, the author of that code, agreed to release it to the
  737. public domain.
  738. """
  739. def __init__(self, params, *args, **kwargs):
  740. compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
  741. self._params = params
  742. def http_open(self, req):
  743. conn_class = compat_http_client.HTTPConnection
  744. socks_proxy = req.headers.get('Ytdl-socks-proxy')
  745. if socks_proxy:
  746. conn_class = make_socks_conn_class(conn_class, socks_proxy)
  747. del req.headers['Ytdl-socks-proxy']
  748. return self.do_open(functools.partial(
  749. _create_http_connection, self, conn_class, False),
  750. req)
  751. @staticmethod
  752. def deflate(data):
  753. try:
  754. return zlib.decompress(data, -zlib.MAX_WBITS)
  755. except zlib.error:
  756. return zlib.decompress(data)
  757. def http_request(self, req):
  758. # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
  759. # always respected by websites, some tend to give out URLs with non percent-encoded
  760. # non-ASCII characters (see telemb.py, ard.py [#3412])
  761. # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
  762. # To work around aforementioned issue we will replace request's original URL with
  763. # percent-encoded one
  764. # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
  765. # the code of this workaround has been moved here from YoutubeDL.urlopen()
  766. url = req.get_full_url()
  767. url_escaped = escape_url(url)
  768. # Substitute URL if any change after escaping
  769. if url != url_escaped:
  770. req = update_Request(req, url=url_escaped)
  771. for h, v in std_headers.items():
  772. # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
  773. # The dict keys are capitalized because of this bug by urllib
  774. if h.capitalize() not in req.headers:
  775. req.add_header(h, v)
  776. req.headers = handle_youtubedl_headers(req.headers)
  777. if sys.version_info < (2, 7) and '#' in req.get_full_url():
  778. # Python 2.6 is brain-dead when it comes to fragments
  779. req._Request__original = req._Request__original.partition('#')[0]
  780. req._Request__r_type = req._Request__r_type.partition('#')[0]
  781. return req
  782. def http_response(self, req, resp):
  783. old_resp = resp
  784. # gzip
  785. if resp.headers.get('Content-encoding', '') == 'gzip':
  786. content = resp.read()
  787. gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
  788. try:
  789. uncompressed = io.BytesIO(gz.read())
  790. except IOError as original_ioerror:
  791. # There may be junk add the end of the file
  792. # See http://stackoverflow.com/q/4928560/35070 for details
  793. for i in range(1, 1024):
  794. try:
  795. gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
  796. uncompressed = io.BytesIO(gz.read())
  797. except IOError:
  798. continue
  799. break
  800. else:
  801. raise original_ioerror
  802. resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
  803. resp.msg = old_resp.msg
  804. del resp.headers['Content-encoding']
  805. # deflate
  806. if resp.headers.get('Content-encoding', '') == 'deflate':
  807. gz = io.BytesIO(self.deflate(resp.read()))
  808. resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
  809. resp.msg = old_resp.msg
  810. del resp.headers['Content-encoding']
  811. # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
  812. # https://github.com/rg3/youtube-dl/issues/6457).
  813. if 300 <= resp.code < 400:
  814. location = resp.headers.get('Location')
  815. if location:
  816. # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
  817. if sys.version_info >= (3, 0):
  818. location = location.encode('iso-8859-1').decode('utf-8')
  819. else:
  820. location = location.decode('utf-8')
  821. location_escaped = escape_url(location)
  822. if location != location_escaped:
  823. del resp.headers['Location']
  824. if sys.version_info < (3, 0):
  825. location_escaped = location_escaped.encode('utf-8')
  826. resp.headers['Location'] = location_escaped
  827. return resp
  828. https_request = http_request
  829. https_response = http_response
  830. def make_socks_conn_class(base_class, socks_proxy):
  831. assert issubclass(base_class, (
  832. compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
  833. url_components = compat_urlparse.urlparse(socks_proxy)
  834. if url_components.scheme.lower() == 'socks5':
  835. socks_type = ProxyType.SOCKS5
  836. elif url_components.scheme.lower() in ('socks', 'socks4'):
  837. socks_type = ProxyType.SOCKS4
  838. elif url_components.scheme.lower() == 'socks4a':
  839. socks_type = ProxyType.SOCKS4A
  840. def unquote_if_non_empty(s):
  841. if not s:
  842. return s
  843. return compat_urllib_parse_unquote_plus(s)
  844. proxy_args = (
  845. socks_type,
  846. url_components.hostname, url_components.port or 1080,
  847. True, # Remote DNS
  848. unquote_if_non_empty(url_components.username),
  849. unquote_if_non_empty(url_components.password),
  850. )
  851. class SocksConnection(base_class):
  852. def connect(self):
  853. self.sock = sockssocket()
  854. self.sock.setproxy(*proxy_args)
  855. if type(self.timeout) in (int, float):
  856. self.sock.settimeout(self.timeout)
  857. self.sock.connect((self.host, self.port))
  858. if isinstance(self, compat_http_client.HTTPSConnection):
  859. if hasattr(self, '_context'): # Python > 2.6
  860. self.sock = self._context.wrap_socket(
  861. self.sock, server_hostname=self.host)
  862. else:
  863. self.sock = ssl.wrap_socket(self.sock)
  864. return SocksConnection
  865. class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
  866. def __init__(self, params, https_conn_class=None, *args, **kwargs):
  867. compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
  868. self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
  869. self._params = params
  870. def https_open(self, req):
  871. kwargs = {}
  872. conn_class = self._https_conn_class
  873. if hasattr(self, '_context'): # python > 2.6
  874. kwargs['context'] = self._context
  875. if hasattr(self, '_check_hostname'): # python 3.x
  876. kwargs['check_hostname'] = self._check_hostname
  877. socks_proxy = req.headers.get('Ytdl-socks-proxy')
  878. if socks_proxy:
  879. conn_class = make_socks_conn_class(conn_class, socks_proxy)
  880. del req.headers['Ytdl-socks-proxy']
  881. return self.do_open(functools.partial(
  882. _create_http_connection, self, conn_class, True),
  883. req, **kwargs)
  884. class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
  885. def __init__(self, cookiejar=None):
  886. compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
  887. def http_response(self, request, response):
  888. # Python 2 will choke on next HTTP request in row if there are non-ASCII
  889. # characters in Set-Cookie HTTP header of last response (see
  890. # https://github.com/rg3/youtube-dl/issues/6769).
  891. # In order to at least prevent crashing we will percent encode Set-Cookie
  892. # header before HTTPCookieProcessor starts processing it.
  893. # if sys.version_info < (3, 0) and response.headers:
  894. # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
  895. # set_cookie = response.headers.get(set_cookie_header)
  896. # if set_cookie:
  897. # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
  898. # if set_cookie != set_cookie_escaped:
  899. # del response.headers[set_cookie_header]
  900. # response.headers[set_cookie_header] = set_cookie_escaped
  901. return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
  902. https_request = compat_urllib_request.HTTPCookieProcessor.http_request
  903. https_response = http_response
  904. def extract_timezone(date_str):
  905. m = re.search(
  906. r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
  907. date_str)
  908. if not m:
  909. timezone = datetime.timedelta()
  910. else:
  911. date_str = date_str[:-len(m.group('tz'))]
  912. if not m.group('sign'):
  913. timezone = datetime.timedelta()
  914. else:
  915. sign = 1 if m.group('sign') == '+' else -1
  916. timezone = datetime.timedelta(
  917. hours=sign * int(m.group('hours')),
  918. minutes=sign * int(m.group('minutes')))
  919. return timezone, date_str
  920. def parse_iso8601(date_str, delimiter='T', timezone=None):
  921. """ Return a UNIX timestamp from the given date """
  922. if date_str is None:
  923. return None
  924. date_str = re.sub(r'\.[0-9]+', '', date_str)
  925. if timezone is None:
  926. timezone, date_str = extract_timezone(date_str)
  927. try:
  928. date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
  929. dt = datetime.datetime.strptime(date_str, date_format) - timezone
  930. return calendar.timegm(dt.timetuple())
  931. except ValueError:
  932. pass
  933. def date_formats(day_first=True):
  934. return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
  935. def unified_strdate(date_str, day_first=True):
  936. """Return a string with the date in the format YYYYMMDD"""
  937. if date_str is None:
  938. return None
  939. upload_date = None
  940. # Replace commas
  941. date_str = date_str.replace(',', ' ')
  942. # Remove AM/PM + timezone
  943. date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
  944. _, date_str = extract_timezone(date_str)
  945. for expression in date_formats(day_first):
  946. try:
  947. upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
  948. except ValueError:
  949. pass
  950. if upload_date is None:
  951. timetuple = email.utils.parsedate_tz(date_str)
  952. if timetuple:
  953. try:
  954. upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
  955. except ValueError:
  956. pass
  957. if upload_date is not None:
  958. return compat_str(upload_date)
  959. def unified_timestamp(date_str, day_first=True):
  960. if date_str is None:
  961. return None
  962. date_str = re.sub(r'[,|]', '', date_str)
  963. pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
  964. timezone, date_str = extract_timezone(date_str)
  965. # Remove AM/PM + timezone
  966. date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
  967. # Remove unrecognized timezones from ISO 8601 alike timestamps
  968. m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
  969. if m:
  970. date_str = date_str[:-len(m.group('tz'))]
  971. for expression in date_formats(day_first):
  972. try:
  973. dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
  974. return calendar.timegm(dt.timetuple())
  975. except ValueError:
  976. pass
  977. timetuple = email.utils.parsedate_tz(date_str)
  978. if timetuple:
  979. return calendar.timegm(timetuple) + pm_delta * 3600
  980. def determine_ext(url, default_ext='unknown_video'):
  981. if url is None:
  982. return default_ext
  983. guess = url.partition('?')[0].rpartition('.')[2]
  984. if re.match(r'^[A-Za-z0-9]+$', guess):
  985. return guess
  986. # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
  987. elif guess.rstrip('/') in KNOWN_EXTENSIONS:
  988. return guess.rstrip('/')
  989. else:
  990. return default_ext
  991. def subtitles_filename(filename, sub_lang, sub_format):
  992. return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
  993. def date_from_str(date_str):
  994. """
  995. Return a datetime object from a string in the format YYYYMMDD or
  996. (now|today)[+-][0-9](day|week|month|year)(s)?"""
  997. today = datetime.date.today()
  998. if date_str in ('now', 'today'):
  999. return today
  1000. if date_str == 'yesterday':
  1001. return today - datetime.timedelta(days=1)
  1002. match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
  1003. if match is not None:
  1004. sign = match.group('sign')
  1005. time = int(match.group('time'))
  1006. if sign == '-':
  1007. time = -time
  1008. unit = match.group('unit')
  1009. # A bad approximation?
  1010. if unit == 'month':
  1011. unit = 'day'
  1012. time *= 30
  1013. elif unit == 'year':
  1014. unit = 'day'
  1015. time *= 365
  1016. unit += 's'
  1017. delta = datetime.timedelta(**{unit: time})
  1018. return today + delta
  1019. return datetime.datetime.strptime(date_str, '%Y%m%d').date()
  1020. def hyphenate_date(date_str):
  1021. """
  1022. Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
  1023. match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
  1024. if match is not None:
  1025. return '-'.join(match.groups())
  1026. else:
  1027. return date_str
  1028. class DateRange(object):
  1029. """Represents a time interval between two dates"""
  1030. def __init__(self, start=None, end=None):
  1031. """start and end must be strings in the format accepted by date"""
  1032. if start is not None:
  1033. self.start = date_from_str(start)
  1034. else:
  1035. self.start = datetime.datetime.min.date()
  1036. if end is not None:
  1037. self.end = date_from_str(end)
  1038. else:
  1039. self.end = datetime.datetime.max.date()
  1040. if self.start > self.end:
  1041. raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
  1042. @classmethod
  1043. def day(cls, day):
  1044. """Returns a range that only contains the given day"""
  1045. return cls(day, day)
  1046. def __contains__(self, date):
  1047. """Check if the date is in the range"""
  1048. if not isinstance(date, datetime.date):
  1049. date = date_from_str(date)
  1050. return self.start <= date <= self.end
  1051. def __str__(self):
  1052. return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
  1053. def platform_name():
  1054. """ Returns the platform name as a compat_str """
  1055. res = platform.platform()
  1056. if isinstance(res, bytes):
  1057. res = res.decode(preferredencoding())
  1058. assert isinstance(res, compat_str)
  1059. return res
  1060. def _windows_write_string(s, out):
  1061. """ Returns True if the string was written using special methods,
  1062. False if it has yet to be written out."""
  1063. # Adapted from http://stackoverflow.com/a/3259271/35070
  1064. import ctypes
  1065. import ctypes.wintypes
  1066. WIN_OUTPUT_IDS = {
  1067. 1: -11,
  1068. 2: -12,
  1069. }
  1070. try:
  1071. fileno = out.fileno()
  1072. except AttributeError:
  1073. # If the output stream doesn't have a fileno, it's virtual
  1074. return False
  1075. except io.UnsupportedOperation:
  1076. # Some strange Windows pseudo files?
  1077. return False
  1078. if fileno not in WIN_OUTPUT_IDS:
  1079. return False
  1080. GetStdHandle = compat_ctypes_WINFUNCTYPE(
  1081. ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
  1082. ('GetStdHandle', ctypes.windll.kernel32))
  1083. h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
  1084. WriteConsoleW = compat_ctypes_WINFUNCTYPE(
  1085. ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
  1086. ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
  1087. ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
  1088. written = ctypes.wintypes.DWORD(0)
  1089. GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
  1090. FILE_TYPE_CHAR = 0x0002
  1091. FILE_TYPE_REMOTE = 0x8000
  1092. GetConsoleMode = compat_ctypes_WINFUNCTYPE(
  1093. ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
  1094. ctypes.POINTER(ctypes.wintypes.DWORD))(
  1095. ('GetConsoleMode', ctypes.windll.kernel32))
  1096. INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
  1097. def not_a_console(handle):
  1098. if handle == INVALID_HANDLE_VALUE or handle is None:
  1099. return True
  1100. return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
  1101. GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
  1102. if not_a_console(h):
  1103. return False
  1104. def next_nonbmp_pos(s):
  1105. try:
  1106. return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
  1107. except StopIteration:
  1108. return len(s)
  1109. while s:
  1110. count = min(next_nonbmp_pos(s), 1024)
  1111. ret = WriteConsoleW(
  1112. h, s, count if count else 2, ctypes.byref(written), None)
  1113. if ret == 0:
  1114. raise OSError('Failed to write string')
  1115. if not count: # We just wrote a non-BMP character
  1116. assert written.value == 2
  1117. s = s[1:]
  1118. else:
  1119. assert written.value > 0
  1120. s = s[written.value:]
  1121. return True
  1122. def write_string(s, out=None, encoding=None):
  1123. if out is None:
  1124. out = sys.stderr
  1125. assert type(s) == compat_str
  1126. if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
  1127. if _windows_write_string(s, out):
  1128. return
  1129. if ('b' in getattr(out, 'mode', '') or
  1130. sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
  1131. byt = s.encode(encoding or preferredencoding(), 'ignore')
  1132. out.write(byt)
  1133. elif hasattr(out, 'buffer'):
  1134. enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
  1135. byt = s.encode(enc, 'ignore')
  1136. out.buffer.write(byt)
  1137. else:
  1138. out.write(s)
  1139. out.flush()
  1140. def bytes_to_intlist(bs):
  1141. if not bs:
  1142. return []
  1143. if isinstance(bs[0], int): # Python 3
  1144. return list(bs)
  1145. else:
  1146. return [ord(c) for c in bs]
  1147. def intlist_to_bytes(xs):
  1148. if not xs:
  1149. return b''
  1150. return compat_struct_pack('%dB' % len(xs), *xs)
  1151. # Cross-platform file locking
  1152. if sys.platform == 'win32':
  1153. import ctypes.wintypes
  1154. import msvcrt
  1155. class OVERLAPPED(ctypes.Structure):
  1156. _fields_ = [
  1157. ('Internal', ctypes.wintypes.LPVOID),
  1158. ('InternalHigh', ctypes.wintypes.LPVOID),
  1159. ('Offset', ctypes.wintypes.DWORD),
  1160. ('OffsetHigh', ctypes.wintypes.DWORD),
  1161. ('hEvent', ctypes.wintypes.HANDLE),
  1162. ]
  1163. kernel32 = ctypes.windll.kernel32
  1164. LockFileEx = kernel32.LockFileEx
  1165. LockFileEx.argtypes = [
  1166. ctypes.wintypes.HANDLE, # hFile
  1167. ctypes.wintypes.DWORD, # dwFlags
  1168. ctypes.wintypes.DWORD, # dwReserved
  1169. ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
  1170. ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
  1171. ctypes.POINTER(OVERLAPPED) # Overlapped
  1172. ]
  1173. LockFileEx.restype = ctypes.wintypes.BOOL
  1174. UnlockFileEx = kernel32.UnlockFileEx
  1175. UnlockFileEx.argtypes = [
  1176. ctypes.wintypes.HANDLE, # hFile
  1177. ctypes.wintypes.DWORD, # dwReserved
  1178. ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
  1179. ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
  1180. ctypes.POINTER(OVERLAPPED) # Overlapped
  1181. ]
  1182. UnlockFileEx.restype = ctypes.wintypes.BOOL
  1183. whole_low = 0xffffffff
  1184. whole_high = 0x7fffffff
  1185. def _lock_file(f, exclusive):
  1186. overlapped = OVERLAPPED()
  1187. overlapped.Offset = 0
  1188. overlapped.OffsetHigh = 0
  1189. overlapped.hEvent = 0
  1190. f._lock_file_overlapped_p = ctypes.pointer(overlapped)
  1191. handle = msvcrt.get_osfhandle(f.fileno())
  1192. if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
  1193. whole_low, whole_high, f._lock_file_overlapped_p):
  1194. raise OSError('Locking file failed: %r' % ctypes.FormatError())
  1195. def _unlock_file(f):
  1196. assert f._lock_file_overlapped_p
  1197. handle = msvcrt.get_osfhandle(f.fileno())
  1198. if not UnlockFileEx(handle, 0,
  1199. whole_low, whole_high, f._lock_file_overlapped_p):
  1200. raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
  1201. else:
  1202. # Some platforms, such as Jython, is missing fcntl
  1203. try:
  1204. import fcntl
  1205. def _lock_file(f, exclusive):
  1206. fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
  1207. def _unlock_file(f):
  1208. fcntl.flock(f, fcntl.LOCK_UN)
  1209. except ImportError:
  1210. UNSUPPORTED_MSG = 'file locking is not supported on this platform'
  1211. def _lock_file(f, exclusive):
  1212. raise IOError(UNSUPPORTED_MSG)
  1213. def _unlock_file(f):
  1214. raise IOError(UNSUPPORTED_MSG)
  1215. class locked_file(object):
  1216. def __init__(self, filename, mode, encoding=None):
  1217. assert mode in ['r', 'a', 'w']
  1218. self.f = io.open(filename, mode, encoding=encoding)
  1219. self.mode = mode
  1220. def __enter__(self):
  1221. exclusive = self.mode != 'r'
  1222. try:
  1223. _lock_file(self.f, exclusive)
  1224. except IOError:
  1225. self.f.close()
  1226. raise
  1227. return self
  1228. def __exit__(self, etype, value, traceback):
  1229. try:
  1230. _unlock_file(self.f)
  1231. finally:
  1232. self.f.close()
  1233. def __iter__(self):
  1234. return iter(self.f)
  1235. def write(self, *args):
  1236. return self.f.write(*args)
  1237. def read(self, *args):
  1238. return self.f.read(*args)
  1239. def get_filesystem_encoding():
  1240. encoding = sys.getfilesystemencoding()
  1241. return encoding if encoding is not None else 'utf-8'
  1242. def shell_quote(args):
  1243. quoted_args = []
  1244. encoding = get_filesystem_encoding()
  1245. for a in args:
  1246. if isinstance(a, bytes):
  1247. # We may get a filename encoded with 'encodeFilename'
  1248. a = a.decode(encoding)
  1249. quoted_args.append(compat_shlex_quote(a))
  1250. return ' '.join(quoted_args)
  1251. def smuggle_url(url, data):
  1252. """ Pass additional data in a URL for internal use. """
  1253. url, idata = unsmuggle_url(url, {})
  1254. data.update(idata)
  1255. sdata = compat_urllib_parse_urlencode(
  1256. {'__youtubedl_smuggle': json.dumps(data)})
  1257. return url + '#' + sdata
  1258. def unsmuggle_url(smug_url, default=None):
  1259. if '#__youtubedl_smuggle' not in smug_url:
  1260. return smug_url, default
  1261. url, _, sdata = smug_url.rpartition('#')
  1262. jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
  1263. data = json.loads(jsond)
  1264. return url, data
  1265. def format_bytes(bytes):
  1266. if bytes is None:
  1267. return 'N/A'
  1268. if type(bytes) is str:
  1269. bytes = float(bytes)
  1270. if bytes == 0.0:
  1271. exponent = 0
  1272. else:
  1273. exponent = int(math.log(bytes, 1024.0))
  1274. suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
  1275. converted = float(bytes) / float(1024 ** exponent)
  1276. return '%.2f%s' % (converted, suffix)
  1277. def lookup_unit_table(unit_table, s):
  1278. units_re = '|'.join(re.escape(u) for u in unit_table)
  1279. m = re.match(
  1280. r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
  1281. if not m:
  1282. return None
  1283. num_str = m.group('num').replace(',', '.')
  1284. mult = unit_table[m.group('unit')]
  1285. return int(float(num_str) * mult)
  1286. def parse_filesize(s):
  1287. if s is None:
  1288. return None
  1289. # The lower-case forms are of course incorrect and unofficial,
  1290. # but we support those too
  1291. _UNIT_TABLE = {
  1292. 'B': 1,
  1293. 'b': 1,
  1294. 'bytes': 1,
  1295. 'KiB': 1024,
  1296. 'KB': 1000,
  1297. 'kB': 1024,
  1298. 'Kb': 1000,
  1299. 'kb': 1000,
  1300. 'kilobytes': 1000,
  1301. 'kibibytes': 1024,
  1302. 'MiB': 1024 ** 2,
  1303. 'MB': 1000 ** 2,
  1304. 'mB': 1024 ** 2,
  1305. 'Mb': 1000 ** 2,
  1306. 'mb': 1000 ** 2,
  1307. 'megabytes': 1000 ** 2,
  1308. 'mebibytes': 1024 ** 2,
  1309. 'GiB': 1024 ** 3,
  1310. 'GB': 1000 ** 3,
  1311. 'gB': 1024 ** 3,
  1312. 'Gb': 1000 ** 3,
  1313. 'gb': 1000 ** 3,
  1314. 'gigabytes': 1000 ** 3,
  1315. 'gibibytes': 1024 ** 3,
  1316. 'TiB': 1024 ** 4,
  1317. 'TB': 1000 ** 4,
  1318. 'tB': 1024 ** 4,
  1319. 'Tb': 1000 ** 4,
  1320. 'tb': 1000 ** 4,
  1321. 'terabytes': 1000 ** 4,
  1322. 'tebibytes': 1024 ** 4,
  1323. 'PiB': 1024 ** 5,
  1324. 'PB': 1000 ** 5,
  1325. 'pB': 1024 ** 5,
  1326. 'Pb': 1000 ** 5,
  1327. 'pb': 1000 ** 5,
  1328. 'petabytes': 1000 ** 5,
  1329. 'pebibytes': 1024 ** 5,
  1330. 'EiB': 1024 ** 6,
  1331. 'EB': 1000 ** 6,
  1332. 'eB': 1024 ** 6,
  1333. 'Eb': 1000 ** 6,
  1334. 'eb': 1000 ** 6,
  1335. 'exabytes': 1000 ** 6,
  1336. 'exbibytes': 1024 ** 6,
  1337. 'ZiB': 1024 ** 7,
  1338. 'ZB': 1000 ** 7,
  1339. 'zB': 1024 ** 7,
  1340. 'Zb': 1000 ** 7,
  1341. 'zb': 1000 ** 7,
  1342. 'zettabytes': 1000 ** 7,
  1343. 'zebibytes': 1024 ** 7,
  1344. 'YiB': 1024 ** 8,
  1345. 'YB': 1000 ** 8,
  1346. 'yB': 1024 ** 8,
  1347. 'Yb': 1000 ** 8,
  1348. 'yb': 1000 ** 8,
  1349. 'yottabytes': 1000 ** 8,
  1350. 'yobibytes': 1024 ** 8,
  1351. }
  1352. return lookup_unit_table(_UNIT_TABLE, s)
  1353. def parse_count(s):
  1354. if s is None:
  1355. return None
  1356. s = s.strip()
  1357. if re.match(r'^[\d,.]+$', s):
  1358. return str_to_int(s)
  1359. _UNIT_TABLE = {
  1360. 'k': 1000,
  1361. 'K': 1000,
  1362. 'm': 1000 ** 2,
  1363. 'M': 1000 ** 2,
  1364. 'kk': 1000 ** 2,
  1365. 'KK': 1000 ** 2,
  1366. }
  1367. return lookup_unit_table(_UNIT_TABLE, s)
  1368. def month_by_name(name, lang='en'):
  1369. """ Return the number of a month by (locale-independently) English name """
  1370. month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
  1371. try:
  1372. return month_names.index(name) + 1
  1373. except ValueError:
  1374. return None
  1375. def month_by_abbreviation(abbrev):
  1376. """ Return the number of a month by (locale-independently) English
  1377. abbreviations """
  1378. try:
  1379. return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
  1380. except ValueError:
  1381. return None
  1382. def fix_xml_ampersands(xml_str):
  1383. """Replace all the '&' by '&amp;' in XML"""
  1384. return re.sub(
  1385. r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
  1386. '&amp;',
  1387. xml_str)
  1388. def setproctitle(title):
  1389. assert isinstance(title, compat_str)
  1390. # ctypes in Jython is not complete
  1391. # http://bugs.jython.org/issue2148
  1392. if sys.platform.startswith('java'):
  1393. return
  1394. try:
  1395. libc = ctypes.cdll.LoadLibrary('libc.so.6')
  1396. except OSError:
  1397. return
  1398. except TypeError:
  1399. # LoadLibrary in Windows Python 2.7.13 only expects
  1400. # a bytestring, but since unicode_literals turns
  1401. # every string into a unicode string, it fails.
  1402. return
  1403. title_bytes = title.encode('utf-8')
  1404. buf = ctypes.create_string_buffer(len(title_bytes))
  1405. buf.value = title_bytes
  1406. try:
  1407. libc.prctl(15, buf, 0, 0, 0)
  1408. except AttributeError:
  1409. return # Strange libc, just skip this
  1410. def remove_start(s, start):
  1411. return s[len(start):] if s is not None and s.startswith(start) else s
  1412. def remove_end(s, end):
  1413. return s[:-len(end)] if s is not None and s.endswith(end) else s
  1414. def remove_quotes(s):
  1415. if s is None or len(s) < 2:
  1416. return s
  1417. for quote in ('"', "'", ):
  1418. if s[0] == quote and s[-1] == quote:
  1419. return s[1:-1]
  1420. return s
  1421. def url_basename(url):
  1422. path = compat_urlparse.urlparse(url).path
  1423. return path.strip('/').split('/')[-1]
  1424. def base_url(url):
  1425. return re.match(r'https?://[^?#&]+/', url).group()
  1426. def urljoin(base, path):
  1427. if isinstance(path, bytes):
  1428. path = path.decode('utf-8')
  1429. if not isinstance(path, compat_str) or not path:
  1430. return None
  1431. if re.match(r'^(?:https?:)?//', path):
  1432. return path
  1433. if isinstance(base, bytes):
  1434. base = base.decode('utf-8')
  1435. if not isinstance(base, compat_str) or not re.match(
  1436. r'^(?:https?:)?//', base):
  1437. return None
  1438. return compat_urlparse.urljoin(base, path)
  1439. class HEADRequest(compat_urllib_request.Request):
  1440. def get_method(self):
  1441. return 'HEAD'
  1442. class PUTRequest(compat_urllib_request.Request):
  1443. def get_method(self):
  1444. return 'PUT'
  1445. def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
  1446. if get_attr:
  1447. if v is not None:
  1448. v = getattr(v, get_attr, None)
  1449. if v == '':
  1450. v = None
  1451. if v is None:
  1452. return default
  1453. try:
  1454. return int(v) * invscale // scale
  1455. except ValueError:
  1456. return default
  1457. def str_or_none(v, default=None):
  1458. return default if v is None else compat_str(v)
  1459. def str_to_int(int_str):
  1460. """ A more relaxed version of int_or_none """
  1461. if int_str is None:
  1462. return None
  1463. int_str = re.sub(r'[,\.\+]', '', int_str)
  1464. return int(int_str)
  1465. def float_or_none(v, scale=1, invscale=1, default=None):
  1466. if v is None:
  1467. return default
  1468. try:
  1469. return float(v) * invscale / scale
  1470. except ValueError:
  1471. return default
  1472. def bool_or_none(v, default=None):
  1473. return v if isinstance(v, bool) else default
  1474. def strip_or_none(v):
  1475. return None if v is None else v.strip()
  1476. def parse_duration(s):
  1477. if not isinstance(s, compat_basestring):
  1478. return None
  1479. s = s.strip()
  1480. days, hours, mins, secs, ms = [None] * 5
  1481. m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
  1482. if m:
  1483. days, hours, mins, secs, ms = m.groups()
  1484. else:
  1485. m = re.match(
  1486. r'''(?ix)(?:P?
  1487. (?:
  1488. [0-9]+\s*y(?:ears?)?\s*
  1489. )?
  1490. (?:
  1491. [0-9]+\s*m(?:onths?)?\s*
  1492. )?
  1493. (?:
  1494. [0-9]+\s*w(?:eeks?)?\s*
  1495. )?
  1496. (?:
  1497. (?P<days>[0-9]+)\s*d(?:ays?)?\s*
  1498. )?
  1499. T)?
  1500. (?:
  1501. (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
  1502. )?
  1503. (?:
  1504. (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
  1505. )?
  1506. (?:
  1507. (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
  1508. )?Z?$''', s)
  1509. if m:
  1510. days, hours, mins, secs, ms = m.groups()
  1511. else:
  1512. m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
  1513. if m:
  1514. hours, mins = m.groups()
  1515. else:
  1516. return None
  1517. duration = 0
  1518. if secs:
  1519. duration += float(secs)
  1520. if mins:
  1521. duration += float(mins) * 60
  1522. if hours:
  1523. duration += float(hours) * 60 * 60
  1524. if days:
  1525. duration += float(days) * 24 * 60 * 60
  1526. if ms:
  1527. duration += float(ms)
  1528. return duration
  1529. def prepend_extension(filename, ext, expected_real_ext=None):
  1530. name, real_ext = os.path.splitext(filename)
  1531. return (
  1532. '{0}.{1}{2}'.format(name, ext, real_ext)
  1533. if not expected_real_ext or real_ext[1:] == expected_real_ext
  1534. else '{0}.{1}'.format(filename, ext))
  1535. def replace_extension(filename, ext, expected_real_ext=None):
  1536. name, real_ext = os.path.splitext(filename)
  1537. return '{0}.{1}'.format(
  1538. name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
  1539. ext)
  1540. def check_executable(exe, args=[]):
  1541. """ Checks if the given binary is installed somewhere in PATH, and returns its name.
  1542. args can be a list of arguments for a short output (like -version) """
  1543. try:
  1544. subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
  1545. except OSError:
  1546. return False
  1547. return exe
  1548. def get_exe_version(exe, args=['--version'],
  1549. version_re=None, unrecognized='present'):
  1550. """ Returns the version of the specified executable,
  1551. or False if the executable is not present """
  1552. try:
  1553. # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
  1554. # SIGTTOU if youtube-dl is run in the background.
  1555. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656
  1556. out, _ = subprocess.Popen(
  1557. [encodeArgument(exe)] + args,
  1558. stdin=subprocess.PIPE,
  1559. stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
  1560. except OSError:
  1561. return False
  1562. if isinstance(out, bytes): # Python 2.x
  1563. out = out.decode('ascii', 'ignore')
  1564. return detect_exe_version(out, version_re, unrecognized)
  1565. def detect_exe_version(output, version_re=None, unrecognized='present'):
  1566. assert isinstance(output, compat_str)
  1567. if version_re is None:
  1568. version_re = r'version\s+([-0-9._a-zA-Z]+)'
  1569. m = re.search(version_re, output)
  1570. if m:
  1571. return m.group(1)
  1572. else:
  1573. return unrecognized
  1574. class PagedList(object):
  1575. def __len__(self):
  1576. # This is only useful for tests
  1577. return len(self.getslice())
  1578. class OnDemandPagedList(PagedList):
  1579. def __init__(self, pagefunc, pagesize, use_cache=True):
  1580. self._pagefunc = pagefunc
  1581. self._pagesize = pagesize
  1582. self._use_cache = use_cache
  1583. if use_cache:
  1584. self._cache = {}
  1585. def getslice(self, start=0, end=None):
  1586. res = []
  1587. for pagenum in itertools.count(start // self._pagesize):
  1588. firstid = pagenum * self._pagesize
  1589. nextfirstid = pagenum * self._pagesize + self._pagesize
  1590. if start >= nextfirstid:
  1591. continue
  1592. page_results = None
  1593. if self._use_cache:
  1594. page_results = self._cache.get(pagenum)
  1595. if page_results is None:
  1596. page_results = list(self._pagefunc(pagenum))
  1597. if self._use_cache:
  1598. self._cache[pagenum] = page_results
  1599. startv = (
  1600. start % self._pagesize
  1601. if firstid <= start < nextfirstid
  1602. else 0)
  1603. endv = (
  1604. ((end - 1) % self._pagesize) + 1
  1605. if (end is not None and firstid <= end <= nextfirstid)
  1606. else None)
  1607. if startv != 0 or endv is not None:
  1608. page_results = page_results[startv:endv]
  1609. res.extend(page_results)
  1610. # A little optimization - if current page is not "full", ie. does
  1611. # not contain page_size videos then we can assume that this page
  1612. # is the last one - there are no more ids on further pages -
  1613. # i.e. no need to query again.
  1614. if len(page_results) + startv < self._pagesize:
  1615. break
  1616. # If we got the whole page, but the next page is not interesting,
  1617. # break out early as well
  1618. if end == nextfirstid:
  1619. break
  1620. return res
  1621. class InAdvancePagedList(PagedList):
  1622. def __init__(self, pagefunc, pagecount, pagesize):
  1623. self._pagefunc = pagefunc
  1624. self._pagecount = pagecount
  1625. self._pagesize = pagesize
  1626. def getslice(self, start=0, end=None):
  1627. res = []
  1628. start_page = start // self._pagesize
  1629. end_page = (
  1630. self._pagecount if end is None else (end // self._pagesize + 1))
  1631. skip_elems = start - start_page * self._pagesize
  1632. only_more = None if end is None else end - start
  1633. for pagenum in range(start_page, end_page):
  1634. page = list(self._pagefunc(pagenum))
  1635. if skip_elems:
  1636. page = page[skip_elems:]
  1637. skip_elems = None
  1638. if only_more is not None:
  1639. if len(page) < only_more:
  1640. only_more -= len(page)
  1641. else:
  1642. page = page[:only_more]
  1643. res.extend(page)
  1644. break
  1645. res.extend(page)
  1646. return res
  1647. def uppercase_escape(s):
  1648. unicode_escape = codecs.getdecoder('unicode_escape')
  1649. return re.sub(
  1650. r'\\U[0-9a-fA-F]{8}',
  1651. lambda m: unicode_escape(m.group(0))[0],
  1652. s)
  1653. def lowercase_escape(s):
  1654. unicode_escape = codecs.getdecoder('unicode_escape')
  1655. return re.sub(
  1656. r'\\u[0-9a-fA-F]{4}',
  1657. lambda m: unicode_escape(m.group(0))[0],
  1658. s)
  1659. def escape_rfc3986(s):
  1660. """Escape non-ASCII characters as suggested by RFC 3986"""
  1661. if sys.version_info < (3, 0) and isinstance(s, compat_str):
  1662. s = s.encode('utf-8')
  1663. return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
  1664. def escape_url(url):
  1665. """Escape URL as suggested by RFC 3986"""
  1666. url_parsed = compat_urllib_parse_urlparse(url)
  1667. return url_parsed._replace(
  1668. netloc=url_parsed.netloc.encode('idna').decode('ascii'),
  1669. path=escape_rfc3986(url_parsed.path),
  1670. params=escape_rfc3986(url_parsed.params),
  1671. query=escape_rfc3986(url_parsed.query),
  1672. fragment=escape_rfc3986(url_parsed.fragment)
  1673. ).geturl()
  1674. def read_batch_urls(batch_fd):
  1675. def fixup(url):
  1676. if not isinstance(url, compat_str):
  1677. url = url.decode('utf-8', 'replace')
  1678. BOM_UTF8 = '\xef\xbb\xbf'
  1679. if url.startswith(BOM_UTF8):
  1680. url = url[len(BOM_UTF8):]
  1681. url = url.strip()
  1682. if url.startswith(('#', ';', ']')):
  1683. return False
  1684. return url
  1685. with contextlib.closing(batch_fd) as fd:
  1686. return [url for url in map(fixup, fd) if url]
  1687. def urlencode_postdata(*args, **kargs):
  1688. return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
  1689. def update_url_query(url, query):
  1690. if not query:
  1691. return url
  1692. parsed_url = compat_urlparse.urlparse(url)
  1693. qs = compat_parse_qs(parsed_url.query)
  1694. qs.update(query)
  1695. return compat_urlparse.urlunparse(parsed_url._replace(
  1696. query=compat_urllib_parse_urlencode(qs, True)))
  1697. def update_Request(req, url=None, data=None, headers={}, query={}):
  1698. req_headers = req.headers.copy()
  1699. req_headers.update(headers)
  1700. req_data = data or req.data
  1701. req_url = update_url_query(url or req.get_full_url(), query)
  1702. req_get_method = req.get_method()
  1703. if req_get_method == 'HEAD':
  1704. req_type = HEADRequest
  1705. elif req_get_method == 'PUT':
  1706. req_type = PUTRequest
  1707. else:
  1708. req_type = compat_urllib_request.Request
  1709. new_req = req_type(
  1710. req_url, data=req_data, headers=req_headers,
  1711. origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
  1712. if hasattr(req, 'timeout'):
  1713. new_req.timeout = req.timeout
  1714. return new_req
  1715. def _multipart_encode_impl(data, boundary):
  1716. content_type = 'multipart/form-data; boundary=%s' % boundary
  1717. out = b''
  1718. for k, v in data.items():
  1719. out += b'--' + boundary.encode('ascii') + b'\r\n'
  1720. if isinstance(k, compat_str):
  1721. k = k.encode('utf-8')
  1722. if isinstance(v, compat_str):
  1723. v = v.encode('utf-8')
  1724. # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
  1725. # suggests sending UTF-8 directly. Firefox sends UTF-8, too
  1726. content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
  1727. if boundary.encode('ascii') in content:
  1728. raise ValueError('Boundary overlaps with data')
  1729. out += content
  1730. out += b'--' + boundary.encode('ascii') + b'--\r\n'
  1731. return out, content_type
  1732. def multipart_encode(data, boundary=None):
  1733. '''
  1734. Encode a dict to RFC 7578-compliant form-data
  1735. data:
  1736. A dict where keys and values can be either Unicode or bytes-like
  1737. objects.
  1738. boundary:
  1739. If specified a Unicode object, it's used as the boundary. Otherwise
  1740. a random boundary is generated.
  1741. Reference: https://tools.ietf.org/html/rfc7578
  1742. '''
  1743. has_specified_boundary = boundary is not None
  1744. while True:
  1745. if boundary is None:
  1746. boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
  1747. try:
  1748. out, content_type = _multipart_encode_impl(data, boundary)
  1749. break
  1750. except ValueError:
  1751. if has_specified_boundary:
  1752. raise
  1753. boundary = None
  1754. return out, content_type
  1755. def dict_get(d, key_or_keys, default=None, skip_false_values=True):
  1756. if isinstance(key_or_keys, (list, tuple)):
  1757. for key in key_or_keys:
  1758. if key not in d or d[key] is None or skip_false_values and not d[key]:
  1759. continue
  1760. return d[key]
  1761. return default
  1762. return d.get(key_or_keys, default)
  1763. def try_get(src, getter, expected_type=None):
  1764. if not isinstance(getter, (list, tuple)):
  1765. getter = [getter]
  1766. for get in getter:
  1767. try:
  1768. v = get(src)
  1769. except (AttributeError, KeyError, TypeError, IndexError):
  1770. pass
  1771. else:
  1772. if expected_type is None or isinstance(v, expected_type):
  1773. return v
  1774. def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
  1775. return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
  1776. US_RATINGS = {
  1777. 'G': 0,
  1778. 'PG': 10,
  1779. 'PG-13': 13,
  1780. 'R': 16,
  1781. 'NC': 18,
  1782. }
  1783. TV_PARENTAL_GUIDELINES = {
  1784. 'TV-Y': 0,
  1785. 'TV-Y7': 7,
  1786. 'TV-G': 0,
  1787. 'TV-PG': 0,
  1788. 'TV-14': 14,
  1789. 'TV-MA': 17,
  1790. }
  1791. def parse_age_limit(s):
  1792. if type(s) == int:
  1793. return s if 0 <= s <= 21 else None
  1794. if not isinstance(s, compat_basestring):
  1795. return None
  1796. m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
  1797. if m:
  1798. return int(m.group('age'))
  1799. if s in US_RATINGS:
  1800. return US_RATINGS[s]
  1801. return TV_PARENTAL_GUIDELINES.get(s)
  1802. def strip_jsonp(code):
  1803. return re.sub(
  1804. r'''(?sx)^
  1805. (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]+)
  1806. (?:\s*&&\s*(?P=func_name))?
  1807. \s*\(\s*(?P<callback_data>.*)\);?
  1808. \s*?(?://[^\n]*)*$''',
  1809. r'\g<callback_data>', code)
  1810. def js_to_json(code):
  1811. COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*'
  1812. SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
  1813. INTEGER_TABLE = (
  1814. (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
  1815. (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
  1816. )
  1817. def fix_kv(m):
  1818. v = m.group(0)
  1819. if v in ('true', 'false', 'null'):
  1820. return v
  1821. elif v.startswith('/*') or v.startswith('//') or v == ',':
  1822. return ""
  1823. if v[0] in ("'", '"'):
  1824. v = re.sub(r'(?s)\\.|"', lambda m: {
  1825. '"': '\\"',
  1826. "\\'": "'",
  1827. '\\\n': '',
  1828. '\\x': '\\u00',
  1829. }.get(m.group(0), m.group(0)), v[1:-1])
  1830. for regex, base in INTEGER_TABLE:
  1831. im = re.match(regex, v)
  1832. if im:
  1833. i = int(im.group(1), base)
  1834. return '"%d":' % i if v.endswith(':') else '%d' % i
  1835. return '"%s"' % v
  1836. return re.sub(r'''(?sx)
  1837. "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
  1838. '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
  1839. {comment}|,(?={skip}[\]}}])|
  1840. (?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*|
  1841. \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
  1842. [0-9]+(?={skip}:)
  1843. '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
  1844. def qualities(quality_ids):
  1845. """ Get a numeric quality value out of a list of possible values """
  1846. def q(qid):
  1847. try:
  1848. return quality_ids.index(qid)
  1849. except ValueError:
  1850. return -1
  1851. return q
  1852. DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
  1853. def limit_length(s, length):
  1854. """ Add ellipses to overly long strings """
  1855. if s is None:
  1856. return None
  1857. ELLIPSES = '...'
  1858. if len(s) > length:
  1859. return s[:length - len(ELLIPSES)] + ELLIPSES
  1860. return s
  1861. def version_tuple(v):
  1862. return tuple(int(e) for e in re.split(r'[-.]', v))
  1863. def is_outdated_version(version, limit, assume_new=True):
  1864. if not version:
  1865. return not assume_new
  1866. try:
  1867. return version_tuple(version) < version_tuple(limit)
  1868. except ValueError:
  1869. return not assume_new
  1870. def ytdl_is_updateable():
  1871. """ Returns if youtube-dl can be updated with -U """
  1872. from zipimport import zipimporter
  1873. return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
  1874. def args_to_str(args):
  1875. # Get a short string representation for a subprocess command
  1876. return ' '.join(compat_shlex_quote(a) for a in args)
  1877. def error_to_compat_str(err):
  1878. err_str = str(err)
  1879. # On python 2 error byte string must be decoded with proper
  1880. # encoding rather than ascii
  1881. if sys.version_info[0] < 3:
  1882. err_str = err_str.decode(preferredencoding())
  1883. return err_str
  1884. def mimetype2ext(mt):
  1885. if mt is None:
  1886. return None
  1887. ext = {
  1888. 'audio/mp4': 'm4a',
  1889. # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
  1890. # it's the most popular one
  1891. 'audio/mpeg': 'mp3',
  1892. }.get(mt)
  1893. if ext is not None:
  1894. return ext
  1895. _, _, res = mt.rpartition('/')
  1896. res = res.split(';')[0].strip().lower()
  1897. return {
  1898. '3gpp': '3gp',
  1899. 'smptett+xml': 'tt',
  1900. 'ttaf+xml': 'dfxp',
  1901. 'ttml+xml': 'ttml',
  1902. 'x-flv': 'flv',
  1903. 'x-mp4-fragmented': 'mp4',
  1904. 'x-ms-sami': 'sami',
  1905. 'x-ms-wmv': 'wmv',
  1906. 'mpegurl': 'm3u8',
  1907. 'x-mpegurl': 'm3u8',
  1908. 'vnd.apple.mpegurl': 'm3u8',
  1909. 'dash+xml': 'mpd',
  1910. 'f4m+xml': 'f4m',
  1911. 'hds+xml': 'f4m',
  1912. 'vnd.ms-sstr+xml': 'ism',
  1913. 'quicktime': 'mov',
  1914. 'mp2t': 'ts',
  1915. }.get(res, res)
  1916. def parse_codecs(codecs_str):
  1917. # http://tools.ietf.org/html/rfc6381
  1918. if not codecs_str:
  1919. return {}
  1920. splited_codecs = list(filter(None, map(
  1921. lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
  1922. vcodec, acodec = None, None
  1923. for full_codec in splited_codecs:
  1924. codec = full_codec.split('.')[0]
  1925. if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1'):
  1926. if not vcodec:
  1927. vcodec = full_codec
  1928. elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
  1929. if not acodec:
  1930. acodec = full_codec
  1931. else:
  1932. write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
  1933. if not vcodec and not acodec:
  1934. if len(splited_codecs) == 2:
  1935. return {
  1936. 'vcodec': vcodec,
  1937. 'acodec': acodec,
  1938. }
  1939. elif len(splited_codecs) == 1:
  1940. return {
  1941. 'vcodec': 'none',
  1942. 'acodec': vcodec,
  1943. }
  1944. else:
  1945. return {
  1946. 'vcodec': vcodec or 'none',
  1947. 'acodec': acodec or 'none',
  1948. }
  1949. return {}
  1950. def urlhandle_detect_ext(url_handle):
  1951. getheader = url_handle.headers.get
  1952. cd = getheader('Content-Disposition')
  1953. if cd:
  1954. m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
  1955. if m:
  1956. e = determine_ext(m.group('filename'), default_ext=None)
  1957. if e:
  1958. return e
  1959. return mimetype2ext(getheader('Content-Type'))
  1960. def encode_data_uri(data, mime_type):
  1961. return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
  1962. def age_restricted(content_limit, age_limit):
  1963. """ Returns True iff the content should be blocked """
  1964. if age_limit is None: # No limit set
  1965. return False
  1966. if content_limit is None:
  1967. return False # Content available for everyone
  1968. return age_limit < content_limit
  1969. def is_html(first_bytes):
  1970. """ Detect whether a file contains HTML by examining its first bytes. """
  1971. BOMS = [
  1972. (b'\xef\xbb\xbf', 'utf-8'),
  1973. (b'\x00\x00\xfe\xff', 'utf-32-be'),
  1974. (b'\xff\xfe\x00\x00', 'utf-32-le'),
  1975. (b'\xff\xfe', 'utf-16-le'),
  1976. (b'\xfe\xff', 'utf-16-be'),
  1977. ]
  1978. for bom, enc in BOMS:
  1979. if first_bytes.startswith(bom):
  1980. s = first_bytes[len(bom):].decode(enc, 'replace')
  1981. break
  1982. else:
  1983. s = first_bytes.decode('utf-8', 'replace')
  1984. return re.match(r'^\s*<', s)
  1985. def determine_protocol(info_dict):
  1986. protocol = info_dict.get('protocol')
  1987. if protocol is not None:
  1988. return protocol
  1989. url = info_dict['url']
  1990. if url.startswith('rtmp'):
  1991. return 'rtmp'
  1992. elif url.startswith('mms'):
  1993. return 'mms'
  1994. elif url.startswith('rtsp'):
  1995. return 'rtsp'
  1996. ext = determine_ext(url)
  1997. if ext == 'm3u8':
  1998. return 'm3u8'
  1999. elif ext == 'f4m':
  2000. return 'f4m'
  2001. return compat_urllib_parse_urlparse(url).scheme
  2002. def render_table(header_row, data):
  2003. """ Render a list of rows, each as a list of values """
  2004. table = [header_row] + data
  2005. max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
  2006. format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
  2007. return '\n'.join(format_str % tuple(row) for row in table)
  2008. def _match_one(filter_part, dct):
  2009. COMPARISON_OPERATORS = {
  2010. '<': operator.lt,
  2011. '<=': operator.le,
  2012. '>': operator.gt,
  2013. '>=': operator.ge,
  2014. '=': operator.eq,
  2015. '!=': operator.ne,
  2016. }
  2017. operator_rex = re.compile(r'''(?x)\s*
  2018. (?P<key>[a-z_]+)
  2019. \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
  2020. (?:
  2021. (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
  2022. (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)|
  2023. (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
  2024. )
  2025. \s*$
  2026. ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
  2027. m = operator_rex.search(filter_part)
  2028. if m:
  2029. op = COMPARISON_OPERATORS[m.group('op')]
  2030. actual_value = dct.get(m.group('key'))
  2031. if (m.group('quotedstrval') is not None or
  2032. m.group('strval') is not None or
  2033. # If the original field is a string and matching comparisonvalue is
  2034. # a number we should respect the origin of the original field
  2035. # and process comparison value as a string (see
  2036. # https://github.com/rg3/youtube-dl/issues/11082).
  2037. actual_value is not None and m.group('intval') is not None and
  2038. isinstance(actual_value, compat_str)):
  2039. if m.group('op') not in ('=', '!='):
  2040. raise ValueError(
  2041. 'Operator %s does not support string values!' % m.group('op'))
  2042. comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
  2043. quote = m.group('quote')
  2044. if quote is not None:
  2045. comparison_value = comparison_value.replace(r'\%s' % quote, quote)
  2046. else:
  2047. try:
  2048. comparison_value = int(m.group('intval'))
  2049. except ValueError:
  2050. comparison_value = parse_filesize(m.group('intval'))
  2051. if comparison_value is None:
  2052. comparison_value = parse_filesize(m.group('intval') + 'B')
  2053. if comparison_value is None:
  2054. raise ValueError(
  2055. 'Invalid integer value %r in filter part %r' % (
  2056. m.group('intval'), filter_part))
  2057. if actual_value is None:
  2058. return m.group('none_inclusive')
  2059. return op(actual_value, comparison_value)
  2060. UNARY_OPERATORS = {
  2061. '': lambda v: v is not None,
  2062. '!': lambda v: v is None,
  2063. }
  2064. operator_rex = re.compile(r'''(?x)\s*
  2065. (?P<op>%s)\s*(?P<key>[a-z_]+)
  2066. \s*$
  2067. ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
  2068. m = operator_rex.search(filter_part)
  2069. if m:
  2070. op = UNARY_OPERATORS[m.group('op')]
  2071. actual_value = dct.get(m.group('key'))
  2072. return op(actual_value)
  2073. raise ValueError('Invalid filter part %r' % filter_part)
  2074. def match_str(filter_str, dct):
  2075. """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
  2076. return all(
  2077. _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
  2078. def match_filter_func(filter_str):
  2079. def _match_func(info_dict):
  2080. if match_str(filter_str, info_dict):
  2081. return None
  2082. else:
  2083. video_title = info_dict.get('title', info_dict.get('id', 'video'))
  2084. return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
  2085. return _match_func
  2086. def parse_dfxp_time_expr(time_expr):
  2087. if not time_expr:
  2088. return
  2089. mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
  2090. if mobj:
  2091. return float(mobj.group('time_offset'))
  2092. mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
  2093. if mobj:
  2094. return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
  2095. def srt_subtitles_timecode(seconds):
  2096. return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
  2097. def dfxp2srt(dfxp_data):
  2098. '''
  2099. @param dfxp_data A bytes-like object containing DFXP data
  2100. @returns A unicode object containing converted SRT data
  2101. '''
  2102. LEGACY_NAMESPACES = (
  2103. (b'http://www.w3.org/ns/ttml', [
  2104. b'http://www.w3.org/2004/11/ttaf1',
  2105. b'http://www.w3.org/2006/04/ttaf1',
  2106. b'http://www.w3.org/2006/10/ttaf1',
  2107. ]),
  2108. (b'http://www.w3.org/ns/ttml#styling', [
  2109. b'http://www.w3.org/ns/ttml#style',
  2110. ]),
  2111. )
  2112. SUPPORTED_STYLING = [
  2113. 'color',
  2114. 'fontFamily',
  2115. 'fontSize',
  2116. 'fontStyle',
  2117. 'fontWeight',
  2118. 'textDecoration'
  2119. ]
  2120. _x = functools.partial(xpath_with_ns, ns_map={
  2121. 'ttml': 'http://www.w3.org/ns/ttml',
  2122. 'tts': 'http://www.w3.org/ns/ttml#styling',
  2123. })
  2124. styles = {}
  2125. default_style = {}
  2126. class TTMLPElementParser(object):
  2127. _out = ''
  2128. _unclosed_elements = []
  2129. _applied_styles = []
  2130. def start(self, tag, attrib):
  2131. if tag in (_x('ttml:br'), 'br'):
  2132. self._out += '\n'
  2133. else:
  2134. unclosed_elements = []
  2135. style = {}
  2136. element_style_id = attrib.get('style')
  2137. if default_style:
  2138. style.update(default_style)
  2139. if element_style_id:
  2140. style.update(styles.get(element_style_id, {}))
  2141. for prop in SUPPORTED_STYLING:
  2142. prop_val = attrib.get(_x('tts:' + prop))
  2143. if prop_val:
  2144. style[prop] = prop_val
  2145. if style:
  2146. font = ''
  2147. for k, v in sorted(style.items()):
  2148. if self._applied_styles and self._applied_styles[-1].get(k) == v:
  2149. continue
  2150. if k == 'color':
  2151. font += ' color="%s"' % v
  2152. elif k == 'fontSize':
  2153. font += ' size="%s"' % v
  2154. elif k == 'fontFamily':
  2155. font += ' face="%s"' % v
  2156. elif k == 'fontWeight' and v == 'bold':
  2157. self._out += '<b>'
  2158. unclosed_elements.append('b')
  2159. elif k == 'fontStyle' and v == 'italic':
  2160. self._out += '<i>'
  2161. unclosed_elements.append('i')
  2162. elif k == 'textDecoration' and v == 'underline':
  2163. self._out += '<u>'
  2164. unclosed_elements.append('u')
  2165. if font:
  2166. self._out += '<font' + font + '>'
  2167. unclosed_elements.append('font')
  2168. applied_style = {}
  2169. if self._applied_styles:
  2170. applied_style.update(self._applied_styles[-1])
  2171. applied_style.update(style)
  2172. self._applied_styles.append(applied_style)
  2173. self._unclosed_elements.append(unclosed_elements)
  2174. def end(self, tag):
  2175. if tag not in (_x('ttml:br'), 'br'):
  2176. unclosed_elements = self._unclosed_elements.pop()
  2177. for element in reversed(unclosed_elements):
  2178. self._out += '</%s>' % element
  2179. if unclosed_elements and self._applied_styles:
  2180. self._applied_styles.pop()
  2181. def data(self, data):
  2182. self._out += data
  2183. def close(self):
  2184. return self._out.strip()
  2185. def parse_node(node):
  2186. target = TTMLPElementParser()
  2187. parser = xml.etree.ElementTree.XMLParser(target=target)
  2188. parser.feed(xml.etree.ElementTree.tostring(node))
  2189. return parser.close()
  2190. for k, v in LEGACY_NAMESPACES:
  2191. for ns in v:
  2192. dfxp_data = dfxp_data.replace(ns, k)
  2193. dfxp = compat_etree_fromstring(dfxp_data)
  2194. out = []
  2195. paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
  2196. if not paras:
  2197. raise ValueError('Invalid dfxp/TTML subtitle')
  2198. repeat = False
  2199. while True:
  2200. for style in dfxp.findall(_x('.//ttml:style')):
  2201. style_id = style.get('id')
  2202. parent_style_id = style.get('style')
  2203. if parent_style_id:
  2204. if parent_style_id not in styles:
  2205. repeat = True
  2206. continue
  2207. styles[style_id] = styles[parent_style_id].copy()
  2208. for prop in SUPPORTED_STYLING:
  2209. prop_val = style.get(_x('tts:' + prop))
  2210. if prop_val:
  2211. styles.setdefault(style_id, {})[prop] = prop_val
  2212. if repeat:
  2213. repeat = False
  2214. else:
  2215. break
  2216. for p in ('body', 'div'):
  2217. ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
  2218. if ele is None:
  2219. continue
  2220. style = styles.get(ele.get('style'))
  2221. if not style:
  2222. continue
  2223. default_style.update(style)
  2224. for para, index in zip(paras, itertools.count(1)):
  2225. begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
  2226. end_time = parse_dfxp_time_expr(para.attrib.get('end'))
  2227. dur = parse_dfxp_time_expr(para.attrib.get('dur'))
  2228. if begin_time is None:
  2229. continue
  2230. if not end_time:
  2231. if not dur:
  2232. continue
  2233. end_time = begin_time + dur
  2234. out.append('%d\n%s --> %s\n%s\n\n' % (
  2235. index,
  2236. srt_subtitles_timecode(begin_time),
  2237. srt_subtitles_timecode(end_time),
  2238. parse_node(para)))
  2239. return ''.join(out)
  2240. def cli_option(params, command_option, param):
  2241. param = params.get(param)
  2242. if param:
  2243. param = compat_str(param)
  2244. return [command_option, param] if param is not None else []
  2245. def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
  2246. param = params.get(param)
  2247. if param is None:
  2248. return []
  2249. assert isinstance(param, bool)
  2250. if separator:
  2251. return [command_option + separator + (true_value if param else false_value)]
  2252. return [command_option, true_value if param else false_value]
  2253. def cli_valueless_option(params, command_option, param, expected_value=True):
  2254. param = params.get(param)
  2255. return [command_option] if param == expected_value else []
  2256. def cli_configuration_args(params, param, default=[]):
  2257. ex_args = params.get(param)
  2258. if ex_args is None:
  2259. return default
  2260. assert isinstance(ex_args, list)
  2261. return ex_args
  2262. class ISO639Utils(object):
  2263. # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
  2264. _lang_map = {
  2265. 'aa': 'aar',
  2266. 'ab': 'abk',
  2267. 'ae': 'ave',
  2268. 'af': 'afr',
  2269. 'ak': 'aka',
  2270. 'am': 'amh',
  2271. 'an': 'arg',
  2272. 'ar': 'ara',
  2273. 'as': 'asm',
  2274. 'av': 'ava',
  2275. 'ay': 'aym',
  2276. 'az': 'aze',
  2277. 'ba': 'bak',
  2278. 'be': 'bel',
  2279. 'bg': 'bul',
  2280. 'bh': 'bih',
  2281. 'bi': 'bis',
  2282. 'bm': 'bam',
  2283. 'bn': 'ben',
  2284. 'bo': 'bod',
  2285. 'br': 'bre',
  2286. 'bs': 'bos',
  2287. 'ca': 'cat',
  2288. 'ce': 'che',
  2289. 'ch': 'cha',
  2290. 'co': 'cos',
  2291. 'cr': 'cre',
  2292. 'cs': 'ces',
  2293. 'cu': 'chu',
  2294. 'cv': 'chv',
  2295. 'cy': 'cym',
  2296. 'da': 'dan',
  2297. 'de': 'deu',
  2298. 'dv': 'div',
  2299. 'dz': 'dzo',
  2300. 'ee': 'ewe',
  2301. 'el': 'ell',
  2302. 'en': 'eng',
  2303. 'eo': 'epo',
  2304. 'es': 'spa',
  2305. 'et': 'est',
  2306. 'eu': 'eus',
  2307. 'fa': 'fas',
  2308. 'ff': 'ful',
  2309. 'fi': 'fin',
  2310. 'fj': 'fij',
  2311. 'fo': 'fao',
  2312. 'fr': 'fra',
  2313. 'fy': 'fry',
  2314. 'ga': 'gle',
  2315. 'gd': 'gla',
  2316. 'gl': 'glg',
  2317. 'gn': 'grn',
  2318. 'gu': 'guj',
  2319. 'gv': 'glv',
  2320. 'ha': 'hau',
  2321. 'he': 'heb',
  2322. 'hi': 'hin',
  2323. 'ho': 'hmo',
  2324. 'hr': 'hrv',
  2325. 'ht': 'hat',
  2326. 'hu': 'hun',
  2327. 'hy': 'hye',
  2328. 'hz': 'her',
  2329. 'ia': 'ina',
  2330. 'id': 'ind',
  2331. 'ie': 'ile',
  2332. 'ig': 'ibo',
  2333. 'ii': 'iii',
  2334. 'ik': 'ipk',
  2335. 'io': 'ido',
  2336. 'is': 'isl',
  2337. 'it': 'ita',
  2338. 'iu': 'iku',
  2339. 'ja': 'jpn',
  2340. 'jv': 'jav',
  2341. 'ka': 'kat',
  2342. 'kg': 'kon',
  2343. 'ki': 'kik',
  2344. 'kj': 'kua',
  2345. 'kk': 'kaz',
  2346. 'kl': 'kal',
  2347. 'km': 'khm',
  2348. 'kn': 'kan',
  2349. 'ko': 'kor',
  2350. 'kr': 'kau',
  2351. 'ks': 'kas',
  2352. 'ku': 'kur',
  2353. 'kv': 'kom',
  2354. 'kw': 'cor',
  2355. 'ky': 'kir',
  2356. 'la': 'lat',
  2357. 'lb': 'ltz',
  2358. 'lg': 'lug',
  2359. 'li': 'lim',
  2360. 'ln': 'lin',
  2361. 'lo': 'lao',
  2362. 'lt': 'lit',
  2363. 'lu': 'lub',
  2364. 'lv': 'lav',
  2365. 'mg': 'mlg',
  2366. 'mh': 'mah',
  2367. 'mi': 'mri',
  2368. 'mk': 'mkd',
  2369. 'ml': 'mal',
  2370. 'mn': 'mon',
  2371. 'mr': 'mar',
  2372. 'ms': 'msa',
  2373. 'mt': 'mlt',
  2374. 'my': 'mya',
  2375. 'na': 'nau',
  2376. 'nb': 'nob',
  2377. 'nd': 'nde',
  2378. 'ne': 'nep',
  2379. 'ng': 'ndo',
  2380. 'nl': 'nld',
  2381. 'nn': 'nno',
  2382. 'no': 'nor',
  2383. 'nr': 'nbl',
  2384. 'nv': 'nav',
  2385. 'ny': 'nya',
  2386. 'oc': 'oci',
  2387. 'oj': 'oji',
  2388. 'om': 'orm',
  2389. 'or': 'ori',
  2390. 'os': 'oss',
  2391. 'pa': 'pan',
  2392. 'pi': 'pli',
  2393. 'pl': 'pol',
  2394. 'ps': 'pus',
  2395. 'pt': 'por',
  2396. 'qu': 'que',
  2397. 'rm': 'roh',
  2398. 'rn': 'run',
  2399. 'ro': 'ron',
  2400. 'ru': 'rus',
  2401. 'rw': 'kin',
  2402. 'sa': 'san',
  2403. 'sc': 'srd',
  2404. 'sd': 'snd',
  2405. 'se': 'sme',
  2406. 'sg': 'sag',
  2407. 'si': 'sin',
  2408. 'sk': 'slk',
  2409. 'sl': 'slv',
  2410. 'sm': 'smo',
  2411. 'sn': 'sna',
  2412. 'so': 'som',
  2413. 'sq': 'sqi',
  2414. 'sr': 'srp',
  2415. 'ss': 'ssw',
  2416. 'st': 'sot',
  2417. 'su': 'sun',
  2418. 'sv': 'swe',
  2419. 'sw': 'swa',
  2420. 'ta': 'tam',
  2421. 'te': 'tel',
  2422. 'tg': 'tgk',
  2423. 'th': 'tha',
  2424. 'ti': 'tir',
  2425. 'tk': 'tuk',
  2426. 'tl': 'tgl',
  2427. 'tn': 'tsn',
  2428. 'to': 'ton',
  2429. 'tr': 'tur',
  2430. 'ts': 'tso',
  2431. 'tt': 'tat',
  2432. 'tw': 'twi',
  2433. 'ty': 'tah',
  2434. 'ug': 'uig',
  2435. 'uk': 'ukr',
  2436. 'ur': 'urd',
  2437. 'uz': 'uzb',
  2438. 've': 'ven',
  2439. 'vi': 'vie',
  2440. 'vo': 'vol',
  2441. 'wa': 'wln',
  2442. 'wo': 'wol',
  2443. 'xh': 'xho',
  2444. 'yi': 'yid',
  2445. 'yo': 'yor',
  2446. 'za': 'zha',
  2447. 'zh': 'zho',
  2448. 'zu': 'zul',
  2449. }
  2450. @classmethod
  2451. def short2long(cls, code):
  2452. """Convert language code from ISO 639-1 to ISO 639-2/T"""
  2453. return cls._lang_map.get(code[:2])
  2454. @classmethod
  2455. def long2short(cls, code):
  2456. """Convert language code from ISO 639-2/T to ISO 639-1"""
  2457. for short_name, long_name in cls._lang_map.items():
  2458. if long_name == code:
  2459. return short_name
  2460. class ISO3166Utils(object):
  2461. # From http://data.okfn.org/data/core/country-list
  2462. _country_map = {
  2463. 'AF': 'Afghanistan',
  2464. 'AX': 'Åland Islands',
  2465. 'AL': 'Albania',
  2466. 'DZ': 'Algeria',
  2467. 'AS': 'American Samoa',
  2468. 'AD': 'Andorra',
  2469. 'AO': 'Angola',
  2470. 'AI': 'Anguilla',
  2471. 'AQ': 'Antarctica',
  2472. 'AG': 'Antigua and Barbuda',
  2473. 'AR': 'Argentina',
  2474. 'AM': 'Armenia',
  2475. 'AW': 'Aruba',
  2476. 'AU': 'Australia',
  2477. 'AT': 'Austria',
  2478. 'AZ': 'Azerbaijan',
  2479. 'BS': 'Bahamas',
  2480. 'BH': 'Bahrain',
  2481. 'BD': 'Bangladesh',
  2482. 'BB': 'Barbados',
  2483. 'BY': 'Belarus',
  2484. 'BE': 'Belgium',
  2485. 'BZ': 'Belize',
  2486. 'BJ': 'Benin',
  2487. 'BM': 'Bermuda',
  2488. 'BT': 'Bhutan',
  2489. 'BO': 'Bolivia, Plurinational State of',
  2490. 'BQ': 'Bonaire, Sint Eustatius and Saba',
  2491. 'BA': 'Bosnia and Herzegovina',
  2492. 'BW': 'Botswana',
  2493. 'BV': 'Bouvet Island',
  2494. 'BR': 'Brazil',
  2495. 'IO': 'British Indian Ocean Territory',
  2496. 'BN': 'Brunei Darussalam',
  2497. 'BG': 'Bulgaria',
  2498. 'BF': 'Burkina Faso',
  2499. 'BI': 'Burundi',
  2500. 'KH': 'Cambodia',
  2501. 'CM': 'Cameroon',
  2502. 'CA': 'Canada',
  2503. 'CV': 'Cape Verde',
  2504. 'KY': 'Cayman Islands',
  2505. 'CF': 'Central African Republic',
  2506. 'TD': 'Chad',
  2507. 'CL': 'Chile',
  2508. 'CN': 'China',
  2509. 'CX': 'Christmas Island',
  2510. 'CC': 'Cocos (Keeling) Islands',
  2511. 'CO': 'Colombia',
  2512. 'KM': 'Comoros',
  2513. 'CG': 'Congo',
  2514. 'CD': 'Congo, the Democratic Republic of the',
  2515. 'CK': 'Cook Islands',
  2516. 'CR': 'Costa Rica',
  2517. 'CI': 'Côte d\'Ivoire',
  2518. 'HR': 'Croatia',
  2519. 'CU': 'Cuba',
  2520. 'CW': 'Curaçao',
  2521. 'CY': 'Cyprus',
  2522. 'CZ': 'Czech Republic',
  2523. 'DK': 'Denmark',
  2524. 'DJ': 'Djibouti',
  2525. 'DM': 'Dominica',
  2526. 'DO': 'Dominican Republic',
  2527. 'EC': 'Ecuador',
  2528. 'EG': 'Egypt',
  2529. 'SV': 'El Salvador',
  2530. 'GQ': 'Equatorial Guinea',
  2531. 'ER': 'Eritrea',
  2532. 'EE': 'Estonia',
  2533. 'ET': 'Ethiopia',
  2534. 'FK': 'Falkland Islands (Malvinas)',
  2535. 'FO': 'Faroe Islands',
  2536. 'FJ': 'Fiji',
  2537. 'FI': 'Finland',
  2538. 'FR': 'France',
  2539. 'GF': 'French Guiana',
  2540. 'PF': 'French Polynesia',
  2541. 'TF': 'French Southern Territories',
  2542. 'GA': 'Gabon',
  2543. 'GM': 'Gambia',
  2544. 'GE': 'Georgia',
  2545. 'DE': 'Germany',
  2546. 'GH': 'Ghana',
  2547. 'GI': 'Gibraltar',
  2548. 'GR': 'Greece',
  2549. 'GL': 'Greenland',
  2550. 'GD': 'Grenada',
  2551. 'GP': 'Guadeloupe',
  2552. 'GU': 'Guam',
  2553. 'GT': 'Guatemala',
  2554. 'GG': 'Guernsey',
  2555. 'GN': 'Guinea',
  2556. 'GW': 'Guinea-Bissau',
  2557. 'GY': 'Guyana',
  2558. 'HT': 'Haiti',
  2559. 'HM': 'Heard Island and McDonald Islands',
  2560. 'VA': 'Holy See (Vatican City State)',
  2561. 'HN': 'Honduras',
  2562. 'HK': 'Hong Kong',
  2563. 'HU': 'Hungary',
  2564. 'IS': 'Iceland',
  2565. 'IN': 'India',
  2566. 'ID': 'Indonesia',
  2567. 'IR': 'Iran, Islamic Republic of',
  2568. 'IQ': 'Iraq',
  2569. 'IE': 'Ireland',
  2570. 'IM': 'Isle of Man',
  2571. 'IL': 'Israel',
  2572. 'IT': 'Italy',
  2573. 'JM': 'Jamaica',
  2574. 'JP': 'Japan',
  2575. 'JE': 'Jersey',
  2576. 'JO': 'Jordan',
  2577. 'KZ': 'Kazakhstan',
  2578. 'KE': 'Kenya',
  2579. 'KI': 'Kiribati',
  2580. 'KP': 'Korea, Democratic People\'s Republic of',
  2581. 'KR': 'Korea, Republic of',
  2582. 'KW': 'Kuwait',
  2583. 'KG': 'Kyrgyzstan',
  2584. 'LA': 'Lao People\'s Democratic Republic',
  2585. 'LV': 'Latvia',
  2586. 'LB': 'Lebanon',
  2587. 'LS': 'Lesotho',
  2588. 'LR': 'Liberia',
  2589. 'LY': 'Libya',
  2590. 'LI': 'Liechtenstein',
  2591. 'LT': 'Lithuania',
  2592. 'LU': 'Luxembourg',
  2593. 'MO': 'Macao',
  2594. 'MK': 'Macedonia, the Former Yugoslav Republic of',
  2595. 'MG': 'Madagascar',
  2596. 'MW': 'Malawi',
  2597. 'MY': 'Malaysia',
  2598. 'MV': 'Maldives',
  2599. 'ML': 'Mali',
  2600. 'MT': 'Malta',
  2601. 'MH': 'Marshall Islands',
  2602. 'MQ': 'Martinique',
  2603. 'MR': 'Mauritania',
  2604. 'MU': 'Mauritius',
  2605. 'YT': 'Mayotte',
  2606. 'MX': 'Mexico',
  2607. 'FM': 'Micronesia, Federated States of',
  2608. 'MD': 'Moldova, Republic of',
  2609. 'MC': 'Monaco',
  2610. 'MN': 'Mongolia',
  2611. 'ME': 'Montenegro',
  2612. 'MS': 'Montserrat',
  2613. 'MA': 'Morocco',
  2614. 'MZ': 'Mozambique',
  2615. 'MM': 'Myanmar',
  2616. 'NA': 'Namibia',
  2617. 'NR': 'Nauru',
  2618. 'NP': 'Nepal',
  2619. 'NL': 'Netherlands',
  2620. 'NC': 'New Caledonia',
  2621. 'NZ': 'New Zealand',
  2622. 'NI': 'Nicaragua',
  2623. 'NE': 'Niger',
  2624. 'NG': 'Nigeria',
  2625. 'NU': 'Niue',
  2626. 'NF': 'Norfolk Island',
  2627. 'MP': 'Northern Mariana Islands',
  2628. 'NO': 'Norway',
  2629. 'OM': 'Oman',
  2630. 'PK': 'Pakistan',
  2631. 'PW': 'Palau',
  2632. 'PS': 'Palestine, State of',
  2633. 'PA': 'Panama',
  2634. 'PG': 'Papua New Guinea',
  2635. 'PY': 'Paraguay',
  2636. 'PE': 'Peru',
  2637. 'PH': 'Philippines',
  2638. 'PN': 'Pitcairn',
  2639. 'PL': 'Poland',
  2640. 'PT': 'Portugal',
  2641. 'PR': 'Puerto Rico',
  2642. 'QA': 'Qatar',
  2643. 'RE': 'Réunion',
  2644. 'RO': 'Romania',
  2645. 'RU': 'Russian Federation',
  2646. 'RW': 'Rwanda',
  2647. 'BL': 'Saint Barthélemy',
  2648. 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
  2649. 'KN': 'Saint Kitts and Nevis',
  2650. 'LC': 'Saint Lucia',
  2651. 'MF': 'Saint Martin (French part)',
  2652. 'PM': 'Saint Pierre and Miquelon',
  2653. 'VC': 'Saint Vincent and the Grenadines',
  2654. 'WS': 'Samoa',
  2655. 'SM': 'San Marino',
  2656. 'ST': 'Sao Tome and Principe',
  2657. 'SA': 'Saudi Arabia',
  2658. 'SN': 'Senegal',
  2659. 'RS': 'Serbia',
  2660. 'SC': 'Seychelles',
  2661. 'SL': 'Sierra Leone',
  2662. 'SG': 'Singapore',
  2663. 'SX': 'Sint Maarten (Dutch part)',
  2664. 'SK': 'Slovakia',
  2665. 'SI': 'Slovenia',
  2666. 'SB': 'Solomon Islands',
  2667. 'SO': 'Somalia',
  2668. 'ZA': 'South Africa',
  2669. 'GS': 'South Georgia and the South Sandwich Islands',
  2670. 'SS': 'South Sudan',
  2671. 'ES': 'Spain',
  2672. 'LK': 'Sri Lanka',
  2673. 'SD': 'Sudan',
  2674. 'SR': 'Suriname',
  2675. 'SJ': 'Svalbard and Jan Mayen',
  2676. 'SZ': 'Swaziland',
  2677. 'SE': 'Sweden',
  2678. 'CH': 'Switzerland',
  2679. 'SY': 'Syrian Arab Republic',
  2680. 'TW': 'Taiwan, Province of China',
  2681. 'TJ': 'Tajikistan',
  2682. 'TZ': 'Tanzania, United Republic of',
  2683. 'TH': 'Thailand',
  2684. 'TL': 'Timor-Leste',
  2685. 'TG': 'Togo',
  2686. 'TK': 'Tokelau',
  2687. 'TO': 'Tonga',
  2688. 'TT': 'Trinidad and Tobago',
  2689. 'TN': 'Tunisia',
  2690. 'TR': 'Turkey',
  2691. 'TM': 'Turkmenistan',
  2692. 'TC': 'Turks and Caicos Islands',
  2693. 'TV': 'Tuvalu',
  2694. 'UG': 'Uganda',
  2695. 'UA': 'Ukraine',
  2696. 'AE': 'United Arab Emirates',
  2697. 'GB': 'United Kingdom',
  2698. 'US': 'United States',
  2699. 'UM': 'United States Minor Outlying Islands',
  2700. 'UY': 'Uruguay',
  2701. 'UZ': 'Uzbekistan',
  2702. 'VU': 'Vanuatu',
  2703. 'VE': 'Venezuela, Bolivarian Republic of',
  2704. 'VN': 'Viet Nam',
  2705. 'VG': 'Virgin Islands, British',
  2706. 'VI': 'Virgin Islands, U.S.',
  2707. 'WF': 'Wallis and Futuna',
  2708. 'EH': 'Western Sahara',
  2709. 'YE': 'Yemen',
  2710. 'ZM': 'Zambia',
  2711. 'ZW': 'Zimbabwe',
  2712. }
  2713. @classmethod
  2714. def short2full(cls, code):
  2715. """Convert an ISO 3166-2 country code to the corresponding full name"""
  2716. return cls._country_map.get(code.upper())
  2717. class GeoUtils(object):
  2718. # Major IPv4 address blocks per country
  2719. _country_ip_map = {
  2720. 'AD': '85.94.160.0/19',
  2721. 'AE': '94.200.0.0/13',
  2722. 'AF': '149.54.0.0/17',
  2723. 'AG': '209.59.64.0/18',
  2724. 'AI': '204.14.248.0/21',
  2725. 'AL': '46.99.0.0/16',
  2726. 'AM': '46.70.0.0/15',
  2727. 'AO': '105.168.0.0/13',
  2728. 'AP': '159.117.192.0/21',
  2729. 'AR': '181.0.0.0/12',
  2730. 'AS': '202.70.112.0/20',
  2731. 'AT': '84.112.0.0/13',
  2732. 'AU': '1.128.0.0/11',
  2733. 'AW': '181.41.0.0/18',
  2734. 'AZ': '5.191.0.0/16',
  2735. 'BA': '31.176.128.0/17',
  2736. 'BB': '65.48.128.0/17',
  2737. 'BD': '114.130.0.0/16',
  2738. 'BE': '57.0.0.0/8',
  2739. 'BF': '129.45.128.0/17',
  2740. 'BG': '95.42.0.0/15',
  2741. 'BH': '37.131.0.0/17',
  2742. 'BI': '154.117.192.0/18',
  2743. 'BJ': '137.255.0.0/16',
  2744. 'BL': '192.131.134.0/24',
  2745. 'BM': '196.12.64.0/18',
  2746. 'BN': '156.31.0.0/16',
  2747. 'BO': '161.56.0.0/16',
  2748. 'BQ': '161.0.80.0/20',
  2749. 'BR': '152.240.0.0/12',
  2750. 'BS': '24.51.64.0/18',
  2751. 'BT': '119.2.96.0/19',
  2752. 'BW': '168.167.0.0/16',
  2753. 'BY': '178.120.0.0/13',
  2754. 'BZ': '179.42.192.0/18',
  2755. 'CA': '99.224.0.0/11',
  2756. 'CD': '41.243.0.0/16',
  2757. 'CF': '196.32.200.0/21',
  2758. 'CG': '197.214.128.0/17',
  2759. 'CH': '85.0.0.0/13',
  2760. 'CI': '154.232.0.0/14',
  2761. 'CK': '202.65.32.0/19',
  2762. 'CL': '152.172.0.0/14',
  2763. 'CM': '165.210.0.0/15',
  2764. 'CN': '36.128.0.0/10',
  2765. 'CO': '181.240.0.0/12',
  2766. 'CR': '201.192.0.0/12',
  2767. 'CU': '152.206.0.0/15',
  2768. 'CV': '165.90.96.0/19',
  2769. 'CW': '190.88.128.0/17',
  2770. 'CY': '46.198.0.0/15',
  2771. 'CZ': '88.100.0.0/14',
  2772. 'DE': '53.0.0.0/8',
  2773. 'DJ': '197.241.0.0/17',
  2774. 'DK': '87.48.0.0/12',
  2775. 'DM': '192.243.48.0/20',
  2776. 'DO': '152.166.0.0/15',
  2777. 'DZ': '41.96.0.0/12',
  2778. 'EC': '186.68.0.0/15',
  2779. 'EE': '90.190.0.0/15',
  2780. 'EG': '156.160.0.0/11',
  2781. 'ER': '196.200.96.0/20',
  2782. 'ES': '88.0.0.0/11',
  2783. 'ET': '196.188.0.0/14',
  2784. 'EU': '2.16.0.0/13',
  2785. 'FI': '91.152.0.0/13',
  2786. 'FJ': '144.120.0.0/16',
  2787. 'FM': '119.252.112.0/20',
  2788. 'FO': '88.85.32.0/19',
  2789. 'FR': '90.0.0.0/9',
  2790. 'GA': '41.158.0.0/15',
  2791. 'GB': '25.0.0.0/8',
  2792. 'GD': '74.122.88.0/21',
  2793. 'GE': '31.146.0.0/16',
  2794. 'GF': '161.22.64.0/18',
  2795. 'GG': '62.68.160.0/19',
  2796. 'GH': '45.208.0.0/14',
  2797. 'GI': '85.115.128.0/19',
  2798. 'GL': '88.83.0.0/19',
  2799. 'GM': '160.182.0.0/15',
  2800. 'GN': '197.149.192.0/18',
  2801. 'GP': '104.250.0.0/19',
  2802. 'GQ': '105.235.224.0/20',
  2803. 'GR': '94.64.0.0/13',
  2804. 'GT': '168.234.0.0/16',
  2805. 'GU': '168.123.0.0/16',
  2806. 'GW': '197.214.80.0/20',
  2807. 'GY': '181.41.64.0/18',
  2808. 'HK': '113.252.0.0/14',
  2809. 'HN': '181.210.0.0/16',
  2810. 'HR': '93.136.0.0/13',
  2811. 'HT': '148.102.128.0/17',
  2812. 'HU': '84.0.0.0/14',
  2813. 'ID': '39.192.0.0/10',
  2814. 'IE': '87.32.0.0/12',
  2815. 'IL': '79.176.0.0/13',
  2816. 'IM': '5.62.80.0/20',
  2817. 'IN': '117.192.0.0/10',
  2818. 'IO': '203.83.48.0/21',
  2819. 'IQ': '37.236.0.0/14',
  2820. 'IR': '2.176.0.0/12',
  2821. 'IS': '82.221.0.0/16',
  2822. 'IT': '79.0.0.0/10',
  2823. 'JE': '87.244.64.0/18',
  2824. 'JM': '72.27.0.0/17',
  2825. 'JO': '176.29.0.0/16',
  2826. 'JP': '126.0.0.0/8',
  2827. 'KE': '105.48.0.0/12',
  2828. 'KG': '158.181.128.0/17',
  2829. 'KH': '36.37.128.0/17',
  2830. 'KI': '103.25.140.0/22',
  2831. 'KM': '197.255.224.0/20',
  2832. 'KN': '198.32.32.0/19',
  2833. 'KP': '175.45.176.0/22',
  2834. 'KR': '175.192.0.0/10',
  2835. 'KW': '37.36.0.0/14',
  2836. 'KY': '64.96.0.0/15',
  2837. 'KZ': '2.72.0.0/13',
  2838. 'LA': '115.84.64.0/18',
  2839. 'LB': '178.135.0.0/16',
  2840. 'LC': '192.147.231.0/24',
  2841. 'LI': '82.117.0.0/19',
  2842. 'LK': '112.134.0.0/15',
  2843. 'LR': '41.86.0.0/19',
  2844. 'LS': '129.232.0.0/17',
  2845. 'LT': '78.56.0.0/13',
  2846. 'LU': '188.42.0.0/16',
  2847. 'LV': '46.109.0.0/16',
  2848. 'LY': '41.252.0.0/14',
  2849. 'MA': '105.128.0.0/11',
  2850. 'MC': '88.209.64.0/18',
  2851. 'MD': '37.246.0.0/16',
  2852. 'ME': '178.175.0.0/17',
  2853. 'MF': '74.112.232.0/21',
  2854. 'MG': '154.126.0.0/17',
  2855. 'MH': '117.103.88.0/21',
  2856. 'MK': '77.28.0.0/15',
  2857. 'ML': '154.118.128.0/18',
  2858. 'MM': '37.111.0.0/17',
  2859. 'MN': '49.0.128.0/17',
  2860. 'MO': '60.246.0.0/16',
  2861. 'MP': '202.88.64.0/20',
  2862. 'MQ': '109.203.224.0/19',
  2863. 'MR': '41.188.64.0/18',
  2864. 'MS': '208.90.112.0/22',
  2865. 'MT': '46.11.0.0/16',
  2866. 'MU': '105.16.0.0/12',
  2867. 'MV': '27.114.128.0/18',
  2868. 'MW': '105.234.0.0/16',
  2869. 'MX': '187.192.0.0/11',
  2870. 'MY': '175.136.0.0/13',
  2871. 'MZ': '197.218.0.0/15',
  2872. 'NA': '41.182.0.0/16',
  2873. 'NC': '101.101.0.0/18',
  2874. 'NE': '197.214.0.0/18',
  2875. 'NF': '203.17.240.0/22',
  2876. 'NG': '105.112.0.0/12',
  2877. 'NI': '186.76.0.0/15',
  2878. 'NL': '145.96.0.0/11',
  2879. 'NO': '84.208.0.0/13',
  2880. 'NP': '36.252.0.0/15',
  2881. 'NR': '203.98.224.0/19',
  2882. 'NU': '49.156.48.0/22',
  2883. 'NZ': '49.224.0.0/14',
  2884. 'OM': '5.36.0.0/15',
  2885. 'PA': '186.72.0.0/15',
  2886. 'PE': '186.160.0.0/14',
  2887. 'PF': '123.50.64.0/18',
  2888. 'PG': '124.240.192.0/19',
  2889. 'PH': '49.144.0.0/13',
  2890. 'PK': '39.32.0.0/11',
  2891. 'PL': '83.0.0.0/11',
  2892. 'PM': '70.36.0.0/20',
  2893. 'PR': '66.50.0.0/16',
  2894. 'PS': '188.161.0.0/16',
  2895. 'PT': '85.240.0.0/13',
  2896. 'PW': '202.124.224.0/20',
  2897. 'PY': '181.120.0.0/14',
  2898. 'QA': '37.210.0.0/15',
  2899. 'RE': '139.26.0.0/16',
  2900. 'RO': '79.112.0.0/13',
  2901. 'RS': '178.220.0.0/14',
  2902. 'RU': '5.136.0.0/13',
  2903. 'RW': '105.178.0.0/15',
  2904. 'SA': '188.48.0.0/13',
  2905. 'SB': '202.1.160.0/19',
  2906. 'SC': '154.192.0.0/11',
  2907. 'SD': '154.96.0.0/13',
  2908. 'SE': '78.64.0.0/12',
  2909. 'SG': '152.56.0.0/14',
  2910. 'SI': '188.196.0.0/14',
  2911. 'SK': '78.98.0.0/15',
  2912. 'SL': '197.215.0.0/17',
  2913. 'SM': '89.186.32.0/19',
  2914. 'SN': '41.82.0.0/15',
  2915. 'SO': '197.220.64.0/19',
  2916. 'SR': '186.179.128.0/17',
  2917. 'SS': '105.235.208.0/21',
  2918. 'ST': '197.159.160.0/19',
  2919. 'SV': '168.243.0.0/16',
  2920. 'SX': '190.102.0.0/20',
  2921. 'SY': '5.0.0.0/16',
  2922. 'SZ': '41.84.224.0/19',
  2923. 'TC': '65.255.48.0/20',
  2924. 'TD': '154.68.128.0/19',
  2925. 'TG': '196.168.0.0/14',
  2926. 'TH': '171.96.0.0/13',
  2927. 'TJ': '85.9.128.0/18',
  2928. 'TK': '27.96.24.0/21',
  2929. 'TL': '180.189.160.0/20',
  2930. 'TM': '95.85.96.0/19',
  2931. 'TN': '197.0.0.0/11',
  2932. 'TO': '175.176.144.0/21',
  2933. 'TR': '78.160.0.0/11',
  2934. 'TT': '186.44.0.0/15',
  2935. 'TV': '202.2.96.0/19',
  2936. 'TW': '120.96.0.0/11',
  2937. 'TZ': '156.156.0.0/14',
  2938. 'UA': '93.72.0.0/13',
  2939. 'UG': '154.224.0.0/13',
  2940. 'US': '3.0.0.0/8',
  2941. 'UY': '167.56.0.0/13',
  2942. 'UZ': '82.215.64.0/18',
  2943. 'VA': '212.77.0.0/19',
  2944. 'VC': '24.92.144.0/20',
  2945. 'VE': '186.88.0.0/13',
  2946. 'VG': '172.103.64.0/18',
  2947. 'VI': '146.226.0.0/16',
  2948. 'VN': '14.160.0.0/11',
  2949. 'VU': '202.80.32.0/20',
  2950. 'WF': '117.20.32.0/21',
  2951. 'WS': '202.4.32.0/19',
  2952. 'YE': '134.35.0.0/16',
  2953. 'YT': '41.242.116.0/22',
  2954. 'ZA': '41.0.0.0/11',
  2955. 'ZM': '165.56.0.0/13',
  2956. 'ZW': '41.85.192.0/19',
  2957. }
  2958. @classmethod
  2959. def random_ipv4(cls, code):
  2960. block = cls._country_ip_map.get(code.upper())
  2961. if not block:
  2962. return None
  2963. addr, preflen = block.split('/')
  2964. addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
  2965. addr_max = addr_min | (0xffffffff >> int(preflen))
  2966. return compat_str(socket.inet_ntoa(
  2967. compat_struct_pack('!L', random.randint(addr_min, addr_max))))
  2968. class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
  2969. def __init__(self, proxies=None):
  2970. # Set default handlers
  2971. for type in ('http', 'https'):
  2972. setattr(self, '%s_open' % type,
  2973. lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
  2974. meth(r, proxy, type))
  2975. return compat_urllib_request.ProxyHandler.__init__(self, proxies)
  2976. def proxy_open(self, req, proxy, type):
  2977. req_proxy = req.headers.get('Ytdl-request-proxy')
  2978. if req_proxy is not None:
  2979. proxy = req_proxy
  2980. del req.headers['Ytdl-request-proxy']
  2981. if proxy == '__noproxy__':
  2982. return None # No Proxy
  2983. if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
  2984. req.add_header('Ytdl-socks-proxy', proxy)
  2985. # youtube-dl's http/https handlers do wrapping the socket with socks
  2986. return None
  2987. return compat_urllib_request.ProxyHandler.proxy_open(
  2988. self, req, proxy, type)
  2989. # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
  2990. # released into Public Domain
  2991. # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
  2992. def long_to_bytes(n, blocksize=0):
  2993. """long_to_bytes(n:long, blocksize:int) : string
  2994. Convert a long integer to a byte string.
  2995. If optional blocksize is given and greater than zero, pad the front of the
  2996. byte string with binary zeros so that the length is a multiple of
  2997. blocksize.
  2998. """
  2999. # after much testing, this algorithm was deemed to be the fastest
  3000. s = b''
  3001. n = int(n)
  3002. while n > 0:
  3003. s = compat_struct_pack('>I', n & 0xffffffff) + s
  3004. n = n >> 32
  3005. # strip off leading zeros
  3006. for i in range(len(s)):
  3007. if s[i] != b'\000'[0]:
  3008. break
  3009. else:
  3010. # only happens when n == 0
  3011. s = b'\000'
  3012. i = 0
  3013. s = s[i:]
  3014. # add back some pad bytes. this could be done more efficiently w.r.t. the
  3015. # de-padding being done above, but sigh...
  3016. if blocksize > 0 and len(s) % blocksize:
  3017. s = (blocksize - len(s) % blocksize) * b'\000' + s
  3018. return s
  3019. def bytes_to_long(s):
  3020. """bytes_to_long(string) : long
  3021. Convert a byte string to a long integer.
  3022. This is (essentially) the inverse of long_to_bytes().
  3023. """
  3024. acc = 0
  3025. length = len(s)
  3026. if length % 4:
  3027. extra = (4 - length % 4)
  3028. s = b'\000' * extra + s
  3029. length = length + extra
  3030. for i in range(0, length, 4):
  3031. acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
  3032. return acc
  3033. def ohdave_rsa_encrypt(data, exponent, modulus):
  3034. '''
  3035. Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
  3036. Input:
  3037. data: data to encrypt, bytes-like object
  3038. exponent, modulus: parameter e and N of RSA algorithm, both integer
  3039. Output: hex string of encrypted data
  3040. Limitation: supports one block encryption only
  3041. '''
  3042. payload = int(binascii.hexlify(data[::-1]), 16)
  3043. encrypted = pow(payload, exponent, modulus)
  3044. return '%x' % encrypted
  3045. def pkcs1pad(data, length):
  3046. """
  3047. Padding input data with PKCS#1 scheme
  3048. @param {int[]} data input data
  3049. @param {int} length target length
  3050. @returns {int[]} padded data
  3051. """
  3052. if len(data) > length - 11:
  3053. raise ValueError('Input data too long for PKCS#1 padding')
  3054. pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
  3055. return [0, 2] + pseudo_random + [0] + data
  3056. def encode_base_n(num, n, table=None):
  3057. FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
  3058. if not table:
  3059. table = FULL_TABLE[:n]
  3060. if n > len(table):
  3061. raise ValueError('base %d exceeds table length %d' % (n, len(table)))
  3062. if num == 0:
  3063. return table[0]
  3064. ret = ''
  3065. while num:
  3066. ret = table[num % n] + ret
  3067. num = num // n
  3068. return ret
  3069. def decode_packed_codes(code):
  3070. mobj = re.search(PACKED_CODES_RE, code)
  3071. obfucasted_code, base, count, symbols = mobj.groups()
  3072. base = int(base)
  3073. count = int(count)
  3074. symbols = symbols.split('|')
  3075. symbol_table = {}
  3076. while count:
  3077. count -= 1
  3078. base_n_count = encode_base_n(count, base)
  3079. symbol_table[base_n_count] = symbols[count] or base_n_count
  3080. return re.sub(
  3081. r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
  3082. obfucasted_code)
  3083. def parse_m3u8_attributes(attrib):
  3084. info = {}
  3085. for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
  3086. if val.startswith('"'):
  3087. val = val[1:-1]
  3088. info[key] = val
  3089. return info
  3090. def urshift(val, n):
  3091. return val >> n if val >= 0 else (val + 0x100000000) >> n
  3092. # Based on png2str() written by @gdkchan and improved by @yokrysty
  3093. # Originally posted at https://github.com/rg3/youtube-dl/issues/9706
  3094. def decode_png(png_data):
  3095. # Reference: https://www.w3.org/TR/PNG/
  3096. header = png_data[8:]
  3097. if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
  3098. raise IOError('Not a valid PNG file.')
  3099. int_map = {1: '>B', 2: '>H', 4: '>I'}
  3100. unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
  3101. chunks = []
  3102. while header:
  3103. length = unpack_integer(header[:4])
  3104. header = header[4:]
  3105. chunk_type = header[:4]
  3106. header = header[4:]
  3107. chunk_data = header[:length]
  3108. header = header[length:]
  3109. header = header[4:] # Skip CRC
  3110. chunks.append({
  3111. 'type': chunk_type,
  3112. 'length': length,
  3113. 'data': chunk_data
  3114. })
  3115. ihdr = chunks[0]['data']
  3116. width = unpack_integer(ihdr[:4])
  3117. height = unpack_integer(ihdr[4:8])
  3118. idat = b''
  3119. for chunk in chunks:
  3120. if chunk['type'] == b'IDAT':
  3121. idat += chunk['data']
  3122. if not idat:
  3123. raise IOError('Unable to read PNG data.')
  3124. decompressed_data = bytearray(zlib.decompress(idat))
  3125. stride = width * 3
  3126. pixels = []
  3127. def _get_pixel(idx):
  3128. x = idx % stride
  3129. y = idx // stride
  3130. return pixels[y][x]
  3131. for y in range(height):
  3132. basePos = y * (1 + stride)
  3133. filter_type = decompressed_data[basePos]
  3134. current_row = []
  3135. pixels.append(current_row)
  3136. for x in range(stride):
  3137. color = decompressed_data[1 + basePos + x]
  3138. basex = y * stride + x
  3139. left = 0
  3140. up = 0
  3141. if x > 2:
  3142. left = _get_pixel(basex - 3)
  3143. if y > 0:
  3144. up = _get_pixel(basex - stride)
  3145. if filter_type == 1: # Sub
  3146. color = (color + left) & 0xff
  3147. elif filter_type == 2: # Up
  3148. color = (color + up) & 0xff
  3149. elif filter_type == 3: # Average
  3150. color = (color + ((left + up) >> 1)) & 0xff
  3151. elif filter_type == 4: # Paeth
  3152. a = left
  3153. b = up
  3154. c = 0
  3155. if x > 2 and y > 0:
  3156. c = _get_pixel(basex - stride - 3)
  3157. p = a + b - c
  3158. pa = abs(p - a)
  3159. pb = abs(p - b)
  3160. pc = abs(p - c)
  3161. if pa <= pb and pa <= pc:
  3162. color = (color + a) & 0xff
  3163. elif pb <= pc:
  3164. color = (color + b) & 0xff
  3165. else:
  3166. color = (color + c) & 0xff
  3167. current_row.append(color)
  3168. return width, height, pixels
  3169. def write_xattr(path, key, value):
  3170. # This mess below finds the best xattr tool for the job
  3171. try:
  3172. # try the pyxattr module...
  3173. import xattr
  3174. if hasattr(xattr, 'set'): # pyxattr
  3175. # Unicode arguments are not supported in python-pyxattr until
  3176. # version 0.5.0
  3177. # See https://github.com/rg3/youtube-dl/issues/5498
  3178. pyxattr_required_version = '0.5.0'
  3179. if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
  3180. # TODO: fallback to CLI tools
  3181. raise XAttrUnavailableError(
  3182. 'python-pyxattr is detected but is too old. '
  3183. 'youtube-dl requires %s or above while your version is %s. '
  3184. 'Falling back to other xattr implementations' % (
  3185. pyxattr_required_version, xattr.__version__))
  3186. setxattr = xattr.set
  3187. else: # xattr
  3188. setxattr = xattr.setxattr
  3189. try:
  3190. setxattr(path, key, value)
  3191. except EnvironmentError as e:
  3192. raise XAttrMetadataError(e.errno, e.strerror)
  3193. except ImportError:
  3194. if compat_os_name == 'nt':
  3195. # Write xattrs to NTFS Alternate Data Streams:
  3196. # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
  3197. assert ':' not in key
  3198. assert os.path.exists(path)
  3199. ads_fn = path + ':' + key
  3200. try:
  3201. with open(ads_fn, 'wb') as f:
  3202. f.write(value)
  3203. except EnvironmentError as e:
  3204. raise XAttrMetadataError(e.errno, e.strerror)
  3205. else:
  3206. user_has_setfattr = check_executable('setfattr', ['--version'])
  3207. user_has_xattr = check_executable('xattr', ['-h'])
  3208. if user_has_setfattr or user_has_xattr:
  3209. value = value.decode('utf-8')
  3210. if user_has_setfattr:
  3211. executable = 'setfattr'
  3212. opts = ['-n', key, '-v', value]
  3213. elif user_has_xattr:
  3214. executable = 'xattr'
  3215. opts = ['-w', key, value]
  3216. cmd = ([encodeFilename(executable, True)] +
  3217. [encodeArgument(o) for o in opts] +
  3218. [encodeFilename(path, True)])
  3219. try:
  3220. p = subprocess.Popen(
  3221. cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
  3222. except EnvironmentError as e:
  3223. raise XAttrMetadataError(e.errno, e.strerror)
  3224. stdout, stderr = p.communicate()
  3225. stderr = stderr.decode('utf-8', 'replace')
  3226. if p.returncode != 0:
  3227. raise XAttrMetadataError(p.returncode, stderr)
  3228. else:
  3229. # On Unix, and can't find pyxattr, setfattr, or xattr.
  3230. if sys.platform.startswith('linux'):
  3231. raise XAttrUnavailableError(
  3232. "Couldn't find a tool to set the xattrs. "
  3233. "Install either the python 'pyxattr' or 'xattr' "
  3234. "modules, or the GNU 'attr' package "
  3235. "(which contains the 'setfattr' tool).")
  3236. else:
  3237. raise XAttrUnavailableError(
  3238. "Couldn't find a tool to set the xattrs. "
  3239. "Install either the python 'xattr' module, "
  3240. "or the 'xattr' binary.")
  3241. def random_birthday(year_field, month_field, day_field):
  3242. return {
  3243. year_field: str(random.randint(1950, 1995)),
  3244. month_field: str(random.randint(1, 12)),
  3245. day_field: str(random.randint(1, 31)),
  3246. }