You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3234 lines
96 KiB

10 years ago
10 years ago
10 years ago
9 years ago
9 years ago
9 years ago
9 years ago
10 years ago
10 years ago
9 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
[utils] Remove Content-encoding from headers after decompression With cn_verification_proxy, our http_response() is called twice, one from PerRequestProxyHandler.proxy_open() and another from normal YoutubeDL.urlopen(). As a result, for proxies honoring Accept-Encoding, the following bug occurs: $ youtube-dl -vs --cn-verification-proxy https://secure.uku.im:993 "test:letv" [debug] System config: [] [debug] User config: [] [debug] Command-line args: ['-vs', '--cn-verification-proxy', 'https://secure.uku.im:993', 'test:letv'] [debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2015.12.23 [debug] Git HEAD: 97f18fa [debug] Python version 3.5.1 - Linux-4.3.3-1-ARCH-x86_64-with-arch-Arch-Linux [debug] exe versions: ffmpeg 2.8.4, ffprobe 2.8.4, rtmpdump 2.4 [debug] Proxy map: {} [TestURL] Test URL: http://www.letv.com/ptv/vplay/22005890.html [Letv] 22005890: Downloading webpage [Letv] 22005890: Downloading playJson data ERROR: Unable to download JSON metadata: Not a gzipped file (b'{"') (caused by OSError('Not a gzipped file (b\'{"\')',)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/extractor/common.py", line 330, in _request_webpage return self._downloader.urlopen(url_or_request) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/YoutubeDL.py", line 1886, in urlopen return self._opener.open(req, timeout=self._socket_timeout) File "/usr/lib/python3.5/urllib/request.py", line 471, in open response = meth(req, response) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 773, in http_response raise original_ioerror File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 761, in http_response uncompressed = io.BytesIO(gz.read()) File "/usr/lib/python3.5/gzip.py", line 274, in read return self._buffer.read(size) File "/usr/lib/python3.5/gzip.py", line 461, in read if not self._read_gzip_header(): File "/usr/lib/python3.5/gzip.py", line 409, in _read_gzip_header raise OSError('Not a gzipped file (%r)' % magic)
9 years ago
[utils] Remove Content-encoding from headers after decompression With cn_verification_proxy, our http_response() is called twice, one from PerRequestProxyHandler.proxy_open() and another from normal YoutubeDL.urlopen(). As a result, for proxies honoring Accept-Encoding, the following bug occurs: $ youtube-dl -vs --cn-verification-proxy https://secure.uku.im:993 "test:letv" [debug] System config: [] [debug] User config: [] [debug] Command-line args: ['-vs', '--cn-verification-proxy', 'https://secure.uku.im:993', 'test:letv'] [debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2015.12.23 [debug] Git HEAD: 97f18fa [debug] Python version 3.5.1 - Linux-4.3.3-1-ARCH-x86_64-with-arch-Arch-Linux [debug] exe versions: ffmpeg 2.8.4, ffprobe 2.8.4, rtmpdump 2.4 [debug] Proxy map: {} [TestURL] Test URL: http://www.letv.com/ptv/vplay/22005890.html [Letv] 22005890: Downloading webpage [Letv] 22005890: Downloading playJson data ERROR: Unable to download JSON metadata: Not a gzipped file (b'{"') (caused by OSError('Not a gzipped file (b\'{"\')',)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/extractor/common.py", line 330, in _request_webpage return self._downloader.urlopen(url_or_request) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/YoutubeDL.py", line 1886, in urlopen return self._opener.open(req, timeout=self._socket_timeout) File "/usr/lib/python3.5/urllib/request.py", line 471, in open response = meth(req, response) File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 773, in http_response raise original_ioerror File "/home/yen/Executables/Multimedia/youtube-dl/youtube_dl/utils.py", line 761, in http_response uncompressed = io.BytesIO(gz.read()) File "/usr/lib/python3.5/gzip.py", line 274, in read return self._buffer.read(size) File "/usr/lib/python3.5/gzip.py", line 461, in read if not self._read_gzip_header(): File "/usr/lib/python3.5/gzip.py", line 409, in _read_gzip_header raise OSError('Not a gzipped file (%r)' % magic)
9 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
9 years ago
10 years ago
10 years ago
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. from __future__ import unicode_literals
  4. import base64
  5. import binascii
  6. import calendar
  7. import codecs
  8. import contextlib
  9. import ctypes
  10. import datetime
  11. import email.utils
  12. import errno
  13. import functools
  14. import gzip
  15. import io
  16. import itertools
  17. import json
  18. import locale
  19. import math
  20. import operator
  21. import os
  22. import pipes
  23. import platform
  24. import re
  25. import socket
  26. import ssl
  27. import subprocess
  28. import sys
  29. import tempfile
  30. import traceback
  31. import xml.etree.ElementTree
  32. import zlib
  33. from .compat import (
  34. compat_HTMLParser,
  35. compat_basestring,
  36. compat_chr,
  37. compat_etree_fromstring,
  38. compat_html_entities,
  39. compat_html_entities_html5,
  40. compat_http_client,
  41. compat_kwargs,
  42. compat_os_name,
  43. compat_parse_qs,
  44. compat_shlex_quote,
  45. compat_socket_create_connection,
  46. compat_str,
  47. compat_struct_pack,
  48. compat_struct_unpack,
  49. compat_urllib_error,
  50. compat_urllib_parse,
  51. compat_urllib_parse_urlencode,
  52. compat_urllib_parse_urlparse,
  53. compat_urllib_parse_unquote_plus,
  54. compat_urllib_request,
  55. compat_urlparse,
  56. compat_xpath,
  57. )
  58. from .socks import (
  59. ProxyType,
  60. sockssocket,
  61. )
  62. def register_socks_protocols():
  63. # "Register" SOCKS protocols
  64. # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
  65. # URLs with protocols not in urlparse.uses_netloc are not handled correctly
  66. for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
  67. if scheme not in compat_urlparse.uses_netloc:
  68. compat_urlparse.uses_netloc.append(scheme)
  69. # This is not clearly defined otherwise
  70. compiled_regex_type = type(re.compile(''))
  71. std_headers = {
  72. 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)',
  73. 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
  74. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  75. 'Accept-Encoding': 'gzip, deflate',
  76. 'Accept-Language': 'en-us,en;q=0.5',
  77. }
  78. NO_DEFAULT = object()
  79. ENGLISH_MONTH_NAMES = [
  80. 'January', 'February', 'March', 'April', 'May', 'June',
  81. 'July', 'August', 'September', 'October', 'November', 'December']
  82. MONTH_NAMES = {
  83. 'en': ENGLISH_MONTH_NAMES,
  84. 'fr': [
  85. 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
  86. 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
  87. }
  88. KNOWN_EXTENSIONS = (
  89. 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
  90. 'flv', 'f4v', 'f4a', 'f4b',
  91. 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
  92. 'mkv', 'mka', 'mk3d',
  93. 'avi', 'divx',
  94. 'mov',
  95. 'asf', 'wmv', 'wma',
  96. '3gp', '3g2',
  97. 'mp3',
  98. 'flac',
  99. 'ape',
  100. 'wav',
  101. 'f4f', 'f4m', 'm3u8', 'smil')
  102. # needed for sanitizing filenames in restricted mode
  103. ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
  104. itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
  105. 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
  106. DATE_FORMATS = (
  107. '%d %B %Y',
  108. '%d %b %Y',
  109. '%B %d %Y',
  110. '%b %d %Y',
  111. '%b %dst %Y %I:%M',
  112. '%b %dnd %Y %I:%M',
  113. '%b %dth %Y %I:%M',
  114. '%Y %m %d',
  115. '%Y-%m-%d',
  116. '%Y/%m/%d',
  117. '%Y/%m/%d %H:%M',
  118. '%Y/%m/%d %H:%M:%S',
  119. '%Y-%m-%d %H:%M:%S',
  120. '%Y-%m-%d %H:%M:%S.%f',
  121. '%d.%m.%Y %H:%M',
  122. '%d.%m.%Y %H.%M',
  123. '%Y-%m-%dT%H:%M:%SZ',
  124. '%Y-%m-%dT%H:%M:%S.%fZ',
  125. '%Y-%m-%dT%H:%M:%S.%f0Z',
  126. '%Y-%m-%dT%H:%M:%S',
  127. '%Y-%m-%dT%H:%M:%S.%f',
  128. '%Y-%m-%dT%H:%M',
  129. '%b %d %Y at %H:%M',
  130. '%b %d %Y at %H:%M:%S',
  131. )
  132. DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
  133. DATE_FORMATS_DAY_FIRST.extend([
  134. '%d-%m-%Y',
  135. '%d.%m.%Y',
  136. '%d.%m.%y',
  137. '%d/%m/%Y',
  138. '%d/%m/%y',
  139. '%d/%m/%Y %H:%M:%S',
  140. ])
  141. DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
  142. DATE_FORMATS_MONTH_FIRST.extend([
  143. '%m-%d-%Y',
  144. '%m.%d.%Y',
  145. '%m/%d/%Y',
  146. '%m/%d/%y',
  147. '%m/%d/%Y %H:%M:%S',
  148. ])
  149. def preferredencoding():
  150. """Get preferred encoding.
  151. Returns the best encoding scheme for the system, based on
  152. locale.getpreferredencoding() and some further tweaks.
  153. """
  154. try:
  155. pref = locale.getpreferredencoding()
  156. 'TEST'.encode(pref)
  157. except Exception:
  158. pref = 'UTF-8'
  159. return pref
  160. def write_json_file(obj, fn):
  161. """ Encode obj as JSON and write it to fn, atomically if possible """
  162. fn = encodeFilename(fn)
  163. if sys.version_info < (3, 0) and sys.platform != 'win32':
  164. encoding = get_filesystem_encoding()
  165. # os.path.basename returns a bytes object, but NamedTemporaryFile
  166. # will fail if the filename contains non ascii characters unless we
  167. # use a unicode object
  168. path_basename = lambda f: os.path.basename(fn).decode(encoding)
  169. # the same for os.path.dirname
  170. path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
  171. else:
  172. path_basename = os.path.basename
  173. path_dirname = os.path.dirname
  174. args = {
  175. 'suffix': '.tmp',
  176. 'prefix': path_basename(fn) + '.',
  177. 'dir': path_dirname(fn),
  178. 'delete': False,
  179. }
  180. # In Python 2.x, json.dump expects a bytestream.
  181. # In Python 3.x, it writes to a character stream
  182. if sys.version_info < (3, 0):
  183. args['mode'] = 'wb'
  184. else:
  185. args.update({
  186. 'mode': 'w',
  187. 'encoding': 'utf-8',
  188. })
  189. tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
  190. try:
  191. with tf:
  192. json.dump(obj, tf)
  193. if sys.platform == 'win32':
  194. # Need to remove existing file on Windows, else os.rename raises
  195. # WindowsError or FileExistsError.
  196. try:
  197. os.unlink(fn)
  198. except OSError:
  199. pass
  200. os.rename(tf.name, fn)
  201. except Exception:
  202. try:
  203. os.remove(tf.name)
  204. except OSError:
  205. pass
  206. raise
  207. if sys.version_info >= (2, 7):
  208. def find_xpath_attr(node, xpath, key, val=None):
  209. """ Find the xpath xpath[@key=val] """
  210. assert re.match(r'^[a-zA-Z_-]+$', key)
  211. expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
  212. return node.find(expr)
  213. else:
  214. def find_xpath_attr(node, xpath, key, val=None):
  215. for f in node.findall(compat_xpath(xpath)):
  216. if key not in f.attrib:
  217. continue
  218. if val is None or f.attrib.get(key) == val:
  219. return f
  220. return None
  221. # On python2.6 the xml.etree.ElementTree.Element methods don't support
  222. # the namespace parameter
  223. def xpath_with_ns(path, ns_map):
  224. components = [c.split(':') for c in path.split('/')]
  225. replaced = []
  226. for c in components:
  227. if len(c) == 1:
  228. replaced.append(c[0])
  229. else:
  230. ns, tag = c
  231. replaced.append('{%s}%s' % (ns_map[ns], tag))
  232. return '/'.join(replaced)
  233. def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
  234. def _find_xpath(xpath):
  235. return node.find(compat_xpath(xpath))
  236. if isinstance(xpath, (str, compat_str)):
  237. n = _find_xpath(xpath)
  238. else:
  239. for xp in xpath:
  240. n = _find_xpath(xp)
  241. if n is not None:
  242. break
  243. if n is None:
  244. if default is not NO_DEFAULT:
  245. return default
  246. elif fatal:
  247. name = xpath if name is None else name
  248. raise ExtractorError('Could not find XML element %s' % name)
  249. else:
  250. return None
  251. return n
  252. def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
  253. n = xpath_element(node, xpath, name, fatal=fatal, default=default)
  254. if n is None or n == default:
  255. return n
  256. if n.text is None:
  257. if default is not NO_DEFAULT:
  258. return default
  259. elif fatal:
  260. name = xpath if name is None else name
  261. raise ExtractorError('Could not find XML element\'s text %s' % name)
  262. else:
  263. return None
  264. return n.text
  265. def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
  266. n = find_xpath_attr(node, xpath, key)
  267. if n is None:
  268. if default is not NO_DEFAULT:
  269. return default
  270. elif fatal:
  271. name = '%s[@%s]' % (xpath, key) if name is None else name
  272. raise ExtractorError('Could not find XML attribute %s' % name)
  273. else:
  274. return None
  275. return n.attrib[key]
  276. def get_element_by_id(id, html):
  277. """Return the content of the tag with the specified ID in the passed HTML document"""
  278. return get_element_by_attribute('id', id, html)
  279. def get_element_by_class(class_name, html):
  280. return get_element_by_attribute(
  281. 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
  282. html, escape_value=False)
  283. def get_element_by_attribute(attribute, value, html, escape_value=True):
  284. """Return the content of the tag with the specified attribute in the passed HTML document"""
  285. value = re.escape(value) if escape_value else value
  286. m = re.search(r'''(?xs)
  287. <([a-zA-Z0-9:._-]+)
  288. (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
  289. \s+%s=['"]?%s['"]?
  290. (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
  291. \s*>
  292. (?P<content>.*?)
  293. </\1>
  294. ''' % (re.escape(attribute), value), html)
  295. if not m:
  296. return None
  297. res = m.group('content')
  298. if res.startswith('"') or res.startswith("'"):
  299. res = res[1:-1]
  300. return unescapeHTML(res)
  301. class HTMLAttributeParser(compat_HTMLParser):
  302. """Trivial HTML parser to gather the attributes for a single element"""
  303. def __init__(self):
  304. self.attrs = {}
  305. compat_HTMLParser.__init__(self)
  306. def handle_starttag(self, tag, attrs):
  307. self.attrs = dict(attrs)
  308. def extract_attributes(html_element):
  309. """Given a string for an HTML element such as
  310. <el
  311. a="foo" B="bar" c="&98;az" d=boz
  312. empty= noval entity="&amp;"
  313. sq='"' dq="'"
  314. >
  315. Decode and return a dictionary of attributes.
  316. {
  317. 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
  318. 'empty': '', 'noval': None, 'entity': '&',
  319. 'sq': '"', 'dq': '\''
  320. }.
  321. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
  322. but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
  323. """
  324. parser = HTMLAttributeParser()
  325. parser.feed(html_element)
  326. parser.close()
  327. return parser.attrs
  328. def clean_html(html):
  329. """Clean an HTML snippet into a readable string"""
  330. if html is None: # Convenience for sanitizing descriptions etc.
  331. return html
  332. # Newline vs <br />
  333. html = html.replace('\n', ' ')
  334. html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
  335. html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
  336. # Strip html tags
  337. html = re.sub('<.*?>', '', html)
  338. # Replace html entities
  339. html = unescapeHTML(html)
  340. return html.strip()
  341. def sanitize_open(filename, open_mode):
  342. """Try to open the given filename, and slightly tweak it if this fails.
  343. Attempts to open the given filename. If this fails, it tries to change
  344. the filename slightly, step by step, until it's either able to open it
  345. or it fails and raises a final exception, like the standard open()
  346. function.
  347. It returns the tuple (stream, definitive_file_name).
  348. """
  349. try:
  350. if filename == '-':
  351. if sys.platform == 'win32':
  352. import msvcrt
  353. msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
  354. return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
  355. stream = open(encodeFilename(filename), open_mode)
  356. return (stream, filename)
  357. except (IOError, OSError) as err:
  358. if err.errno in (errno.EACCES,):
  359. raise
  360. # In case of error, try to remove win32 forbidden chars
  361. alt_filename = sanitize_path(filename)
  362. if alt_filename == filename:
  363. raise
  364. else:
  365. # An exception here should be caught in the caller
  366. stream = open(encodeFilename(alt_filename), open_mode)
  367. return (stream, alt_filename)
  368. def timeconvert(timestr):
  369. """Convert RFC 2822 defined time string into system timestamp"""
  370. timestamp = None
  371. timetuple = email.utils.parsedate_tz(timestr)
  372. if timetuple is not None:
  373. timestamp = email.utils.mktime_tz(timetuple)
  374. return timestamp
  375. def sanitize_filename(s, restricted=False, is_id=False):
  376. """Sanitizes a string so it could be used as part of a filename.
  377. If restricted is set, use a stricter subset of allowed characters.
  378. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
  379. """
  380. def replace_insane(char):
  381. if restricted and char in ACCENT_CHARS:
  382. return ACCENT_CHARS[char]
  383. if char == '?' or ord(char) < 32 or ord(char) == 127:
  384. return ''
  385. elif char == '"':
  386. return '' if restricted else '\''
  387. elif char == ':':
  388. return '_-' if restricted else ' -'
  389. elif char in '\\/|*<>':
  390. return '_'
  391. if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
  392. return '_'
  393. if restricted and ord(char) > 127:
  394. return '_'
  395. return char
  396. # Handle timestamps
  397. s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
  398. result = ''.join(map(replace_insane, s))
  399. if not is_id:
  400. while '__' in result:
  401. result = result.replace('__', '_')
  402. result = result.strip('_')
  403. # Common case of "Foreign band name - English song title"
  404. if restricted and result.startswith('-_'):
  405. result = result[2:]
  406. if result.startswith('-'):
  407. result = '_' + result[len('-'):]
  408. result = result.lstrip('.')
  409. if not result:
  410. result = '_'
  411. return result
  412. def sanitize_path(s):
  413. """Sanitizes and normalizes path on Windows"""
  414. if sys.platform != 'win32':
  415. return s
  416. drive_or_unc, _ = os.path.splitdrive(s)
  417. if sys.version_info < (2, 7) and not drive_or_unc:
  418. drive_or_unc, _ = os.path.splitunc(s)
  419. norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
  420. if drive_or_unc:
  421. norm_path.pop(0)
  422. sanitized_path = [
  423. path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part)
  424. for path_part in norm_path]
  425. if drive_or_unc:
  426. sanitized_path.insert(0, drive_or_unc + os.path.sep)
  427. return os.path.join(*sanitized_path)
  428. # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
  429. # unwanted failures due to missing protocol
  430. def sanitize_url(url):
  431. return 'http:%s' % url if url.startswith('//') else url
  432. def sanitized_Request(url, *args, **kwargs):
  433. return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
  434. def orderedSet(iterable):
  435. """ Remove all duplicates from the input iterable """
  436. res = []
  437. for el in iterable:
  438. if el not in res:
  439. res.append(el)
  440. return res
  441. def _htmlentity_transform(entity_with_semicolon):
  442. """Transforms an HTML entity to a character."""
  443. entity = entity_with_semicolon[:-1]
  444. # Known non-numeric HTML entity
  445. if entity in compat_html_entities.name2codepoint:
  446. return compat_chr(compat_html_entities.name2codepoint[entity])
  447. # TODO: HTML5 allows entities without a semicolon. For example,
  448. # '&Eacuteric' should be decoded as 'Éric'.
  449. if entity_with_semicolon in compat_html_entities_html5:
  450. return compat_html_entities_html5[entity_with_semicolon]
  451. mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
  452. if mobj is not None:
  453. numstr = mobj.group(1)
  454. if numstr.startswith('x'):
  455. base = 16
  456. numstr = '0%s' % numstr
  457. else:
  458. base = 10
  459. # See https://github.com/rg3/youtube-dl/issues/7518
  460. try:
  461. return compat_chr(int(numstr, base))
  462. except ValueError:
  463. pass
  464. # Unknown entity in name, return its literal representation
  465. return '&%s;' % entity
  466. def unescapeHTML(s):
  467. if s is None:
  468. return None
  469. assert type(s) == compat_str
  470. return re.sub(
  471. r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
  472. def get_subprocess_encoding():
  473. if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
  474. # For subprocess calls, encode with locale encoding
  475. # Refer to http://stackoverflow.com/a/9951851/35070
  476. encoding = preferredencoding()
  477. else:
  478. encoding = sys.getfilesystemencoding()
  479. if encoding is None:
  480. encoding = 'utf-8'
  481. return encoding
  482. def encodeFilename(s, for_subprocess=False):
  483. """
  484. @param s The name of the file
  485. """
  486. assert type(s) == compat_str
  487. # Python 3 has a Unicode API
  488. if sys.version_info >= (3, 0):
  489. return s
  490. # Pass '' directly to use Unicode APIs on Windows 2000 and up
  491. # (Detecting Windows NT 4 is tricky because 'major >= 4' would
  492. # match Windows 9x series as well. Besides, NT 4 is obsolete.)
  493. if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
  494. return s
  495. # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
  496. if sys.platform.startswith('java'):
  497. return s
  498. return s.encode(get_subprocess_encoding(), 'ignore')
  499. def decodeFilename(b, for_subprocess=False):
  500. if sys.version_info >= (3, 0):
  501. return b
  502. if not isinstance(b, bytes):
  503. return b
  504. return b.decode(get_subprocess_encoding(), 'ignore')
  505. def encodeArgument(s):
  506. if not isinstance(s, compat_str):
  507. # Legacy code that uses byte strings
  508. # Uncomment the following line after fixing all post processors
  509. # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
  510. s = s.decode('ascii')
  511. return encodeFilename(s, True)
  512. def decodeArgument(b):
  513. return decodeFilename(b, True)
  514. def decodeOption(optval):
  515. if optval is None:
  516. return optval
  517. if isinstance(optval, bytes):
  518. optval = optval.decode(preferredencoding())
  519. assert isinstance(optval, compat_str)
  520. return optval
  521. def formatSeconds(secs):
  522. if secs > 3600:
  523. return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
  524. elif secs > 60:
  525. return '%d:%02d' % (secs // 60, secs % 60)
  526. else:
  527. return '%d' % secs
  528. def make_HTTPS_handler(params, **kwargs):
  529. opts_no_check_certificate = params.get('nocheckcertificate', False)
  530. if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
  531. context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
  532. if opts_no_check_certificate:
  533. context.check_hostname = False
  534. context.verify_mode = ssl.CERT_NONE
  535. try:
  536. return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
  537. except TypeError:
  538. # Python 2.7.8
  539. # (create_default_context present but HTTPSHandler has no context=)
  540. pass
  541. if sys.version_info < (3, 2):
  542. return YoutubeDLHTTPSHandler(params, **kwargs)
  543. else: # Python < 3.4
  544. context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
  545. context.verify_mode = (ssl.CERT_NONE
  546. if opts_no_check_certificate
  547. else ssl.CERT_REQUIRED)
  548. context.set_default_verify_paths()
  549. return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
  550. def bug_reports_message():
  551. if ytdl_is_updateable():
  552. update_cmd = 'type youtube-dl -U to update'
  553. else:
  554. update_cmd = 'see https://yt-dl.org/update on how to update'
  555. msg = '; please report this issue on https://yt-dl.org/bug .'
  556. msg += ' Make sure you are using the latest version; %s.' % update_cmd
  557. msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
  558. return msg
  559. class ExtractorError(Exception):
  560. """Error during info extraction."""
  561. def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
  562. """ tb, if given, is the original traceback (so that it can be printed out).
  563. If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
  564. """
  565. if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
  566. expected = True
  567. if video_id is not None:
  568. msg = video_id + ': ' + msg
  569. if cause:
  570. msg += ' (caused by %r)' % cause
  571. if not expected:
  572. msg += bug_reports_message()
  573. super(ExtractorError, self).__init__(msg)
  574. self.traceback = tb
  575. self.exc_info = sys.exc_info() # preserve original exception
  576. self.cause = cause
  577. self.video_id = video_id
  578. def format_traceback(self):
  579. if self.traceback is None:
  580. return None
  581. return ''.join(traceback.format_tb(self.traceback))
  582. class UnsupportedError(ExtractorError):
  583. def __init__(self, url):
  584. super(UnsupportedError, self).__init__(
  585. 'Unsupported URL: %s' % url, expected=True)
  586. self.url = url
  587. class RegexNotFoundError(ExtractorError):
  588. """Error when a regex didn't match"""
  589. pass
  590. class DownloadError(Exception):
  591. """Download Error exception.
  592. This exception may be thrown by FileDownloader objects if they are not
  593. configured to continue on errors. They will contain the appropriate
  594. error message.
  595. """
  596. def __init__(self, msg, exc_info=None):
  597. """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
  598. super(DownloadError, self).__init__(msg)
  599. self.exc_info = exc_info
  600. class SameFileError(Exception):
  601. """Same File exception.
  602. This exception will be thrown by FileDownloader objects if they detect
  603. multiple files would have to be downloaded to the same file on disk.
  604. """
  605. pass
  606. class PostProcessingError(Exception):
  607. """Post Processing exception.
  608. This exception may be raised by PostProcessor's .run() method to
  609. indicate an error in the postprocessing task.
  610. """
  611. def __init__(self, msg):
  612. self.msg = msg
  613. class MaxDownloadsReached(Exception):
  614. """ --max-downloads limit has been reached. """
  615. pass
  616. class UnavailableVideoError(Exception):
  617. """Unavailable Format exception.
  618. This exception will be thrown when a video is requested
  619. in a format that is not available for that video.
  620. """
  621. pass
  622. class ContentTooShortError(Exception):
  623. """Content Too Short exception.
  624. This exception may be raised by FileDownloader objects when a file they
  625. download is too small for what the server announced first, indicating
  626. the connection was probably interrupted.
  627. """
  628. def __init__(self, downloaded, expected):
  629. # Both in bytes
  630. self.downloaded = downloaded
  631. self.expected = expected
  632. class XAttrMetadataError(Exception):
  633. def __init__(self, code=None, msg='Unknown error'):
  634. super(XAttrMetadataError, self).__init__(msg)
  635. self.code = code
  636. # Parsing code and msg
  637. if (self.code in (errno.ENOSPC, errno.EDQUOT) or
  638. 'No space left' in self.msg or 'Disk quota excedded' in self.msg):
  639. self.reason = 'NO_SPACE'
  640. elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
  641. self.reason = 'VALUE_TOO_LONG'
  642. else:
  643. self.reason = 'NOT_SUPPORTED'
  644. class XAttrUnavailableError(Exception):
  645. pass
  646. def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
  647. # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
  648. # expected HTTP responses to meet HTTP/1.0 or later (see also
  649. # https://github.com/rg3/youtube-dl/issues/6727)
  650. if sys.version_info < (3, 0):
  651. kwargs[b'strict'] = True
  652. hc = http_class(*args, **kwargs)
  653. source_address = ydl_handler._params.get('source_address')
  654. if source_address is not None:
  655. sa = (source_address, 0)
  656. if hasattr(hc, 'source_address'): # Python 2.7+
  657. hc.source_address = sa
  658. else: # Python 2.6
  659. def _hc_connect(self, *args, **kwargs):
  660. sock = compat_socket_create_connection(
  661. (self.host, self.port), self.timeout, sa)
  662. if is_https:
  663. self.sock = ssl.wrap_socket(
  664. sock, self.key_file, self.cert_file,
  665. ssl_version=ssl.PROTOCOL_TLSv1)
  666. else:
  667. self.sock = sock
  668. hc.connect = functools.partial(_hc_connect, hc)
  669. return hc
  670. def handle_youtubedl_headers(headers):
  671. filtered_headers = headers
  672. if 'Youtubedl-no-compression' in filtered_headers:
  673. filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
  674. del filtered_headers['Youtubedl-no-compression']
  675. return filtered_headers
  676. class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
  677. """Handler for HTTP requests and responses.
  678. This class, when installed with an OpenerDirector, automatically adds
  679. the standard headers to every HTTP request and handles gzipped and
  680. deflated responses from web servers. If compression is to be avoided in
  681. a particular request, the original request in the program code only has
  682. to include the HTTP header "Youtubedl-no-compression", which will be
  683. removed before making the real request.
  684. Part of this code was copied from:
  685. http://techknack.net/python-urllib2-handlers/
  686. Andrew Rowls, the author of that code, agreed to release it to the
  687. public domain.
  688. """
  689. def __init__(self, params, *args, **kwargs):
  690. compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
  691. self._params = params
  692. def http_open(self, req):
  693. conn_class = compat_http_client.HTTPConnection
  694. socks_proxy = req.headers.get('Ytdl-socks-proxy')
  695. if socks_proxy:
  696. conn_class = make_socks_conn_class(conn_class, socks_proxy)
  697. del req.headers['Ytdl-socks-proxy']
  698. return self.do_open(functools.partial(
  699. _create_http_connection, self, conn_class, False),
  700. req)
  701. @staticmethod
  702. def deflate(data):
  703. try:
  704. return zlib.decompress(data, -zlib.MAX_WBITS)
  705. except zlib.error:
  706. return zlib.decompress(data)
  707. @staticmethod
  708. def addinfourl_wrapper(stream, headers, url, code):
  709. if hasattr(compat_urllib_request.addinfourl, 'getcode'):
  710. return compat_urllib_request.addinfourl(stream, headers, url, code)
  711. ret = compat_urllib_request.addinfourl(stream, headers, url)
  712. ret.code = code
  713. return ret
  714. def http_request(self, req):
  715. # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
  716. # always respected by websites, some tend to give out URLs with non percent-encoded
  717. # non-ASCII characters (see telemb.py, ard.py [#3412])
  718. # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
  719. # To work around aforementioned issue we will replace request's original URL with
  720. # percent-encoded one
  721. # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
  722. # the code of this workaround has been moved here from YoutubeDL.urlopen()
  723. url = req.get_full_url()
  724. url_escaped = escape_url(url)
  725. # Substitute URL if any change after escaping
  726. if url != url_escaped:
  727. req = update_Request(req, url=url_escaped)
  728. for h, v in std_headers.items():
  729. # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
  730. # The dict keys are capitalized because of this bug by urllib
  731. if h.capitalize() not in req.headers:
  732. req.add_header(h, v)
  733. req.headers = handle_youtubedl_headers(req.headers)
  734. if sys.version_info < (2, 7) and '#' in req.get_full_url():
  735. # Python 2.6 is brain-dead when it comes to fragments
  736. req._Request__original = req._Request__original.partition('#')[0]
  737. req._Request__r_type = req._Request__r_type.partition('#')[0]
  738. return req
  739. def http_response(self, req, resp):
  740. old_resp = resp
  741. # gzip
  742. if resp.headers.get('Content-encoding', '') == 'gzip':
  743. content = resp.read()
  744. gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
  745. try:
  746. uncompressed = io.BytesIO(gz.read())
  747. except IOError as original_ioerror:
  748. # There may be junk add the end of the file
  749. # See http://stackoverflow.com/q/4928560/35070 for details
  750. for i in range(1, 1024):
  751. try:
  752. gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
  753. uncompressed = io.BytesIO(gz.read())
  754. except IOError:
  755. continue
  756. break
  757. else:
  758. raise original_ioerror
  759. resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
  760. resp.msg = old_resp.msg
  761. del resp.headers['Content-encoding']
  762. # deflate
  763. if resp.headers.get('Content-encoding', '') == 'deflate':
  764. gz = io.BytesIO(self.deflate(resp.read()))
  765. resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
  766. resp.msg = old_resp.msg
  767. del resp.headers['Content-encoding']
  768. # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
  769. # https://github.com/rg3/youtube-dl/issues/6457).
  770. if 300 <= resp.code < 400:
  771. location = resp.headers.get('Location')
  772. if location:
  773. # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
  774. if sys.version_info >= (3, 0):
  775. location = location.encode('iso-8859-1').decode('utf-8')
  776. else:
  777. location = location.decode('utf-8')
  778. location_escaped = escape_url(location)
  779. if location != location_escaped:
  780. del resp.headers['Location']
  781. if sys.version_info < (3, 0):
  782. location_escaped = location_escaped.encode('utf-8')
  783. resp.headers['Location'] = location_escaped
  784. return resp
  785. https_request = http_request
  786. https_response = http_response
  787. def make_socks_conn_class(base_class, socks_proxy):
  788. assert issubclass(base_class, (
  789. compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
  790. url_components = compat_urlparse.urlparse(socks_proxy)
  791. if url_components.scheme.lower() == 'socks5':
  792. socks_type = ProxyType.SOCKS5
  793. elif url_components.scheme.lower() in ('socks', 'socks4'):
  794. socks_type = ProxyType.SOCKS4
  795. elif url_components.scheme.lower() == 'socks4a':
  796. socks_type = ProxyType.SOCKS4A
  797. def unquote_if_non_empty(s):
  798. if not s:
  799. return s
  800. return compat_urllib_parse_unquote_plus(s)
  801. proxy_args = (
  802. socks_type,
  803. url_components.hostname, url_components.port or 1080,
  804. True, # Remote DNS
  805. unquote_if_non_empty(url_components.username),
  806. unquote_if_non_empty(url_components.password),
  807. )
  808. class SocksConnection(base_class):
  809. def connect(self):
  810. self.sock = sockssocket()
  811. self.sock.setproxy(*proxy_args)
  812. if type(self.timeout) in (int, float):
  813. self.sock.settimeout(self.timeout)
  814. self.sock.connect((self.host, self.port))
  815. if isinstance(self, compat_http_client.HTTPSConnection):
  816. if hasattr(self, '_context'): # Python > 2.6
  817. self.sock = self._context.wrap_socket(
  818. self.sock, server_hostname=self.host)
  819. else:
  820. self.sock = ssl.wrap_socket(self.sock)
  821. return SocksConnection
  822. class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
  823. def __init__(self, params, https_conn_class=None, *args, **kwargs):
  824. compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
  825. self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
  826. self._params = params
  827. def https_open(self, req):
  828. kwargs = {}
  829. conn_class = self._https_conn_class
  830. if hasattr(self, '_context'): # python > 2.6
  831. kwargs['context'] = self._context
  832. if hasattr(self, '_check_hostname'): # python 3.x
  833. kwargs['check_hostname'] = self._check_hostname
  834. socks_proxy = req.headers.get('Ytdl-socks-proxy')
  835. if socks_proxy:
  836. conn_class = make_socks_conn_class(conn_class, socks_proxy)
  837. del req.headers['Ytdl-socks-proxy']
  838. return self.do_open(functools.partial(
  839. _create_http_connection, self, conn_class, True),
  840. req, **kwargs)
  841. class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
  842. def __init__(self, cookiejar=None):
  843. compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
  844. def http_response(self, request, response):
  845. # Python 2 will choke on next HTTP request in row if there are non-ASCII
  846. # characters in Set-Cookie HTTP header of last response (see
  847. # https://github.com/rg3/youtube-dl/issues/6769).
  848. # In order to at least prevent crashing we will percent encode Set-Cookie
  849. # header before HTTPCookieProcessor starts processing it.
  850. # if sys.version_info < (3, 0) and response.headers:
  851. # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
  852. # set_cookie = response.headers.get(set_cookie_header)
  853. # if set_cookie:
  854. # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
  855. # if set_cookie != set_cookie_escaped:
  856. # del response.headers[set_cookie_header]
  857. # response.headers[set_cookie_header] = set_cookie_escaped
  858. return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
  859. https_request = compat_urllib_request.HTTPCookieProcessor.http_request
  860. https_response = http_response
  861. def extract_timezone(date_str):
  862. m = re.search(
  863. r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
  864. date_str)
  865. if not m:
  866. timezone = datetime.timedelta()
  867. else:
  868. date_str = date_str[:-len(m.group('tz'))]
  869. if not m.group('sign'):
  870. timezone = datetime.timedelta()
  871. else:
  872. sign = 1 if m.group('sign') == '+' else -1
  873. timezone = datetime.timedelta(
  874. hours=sign * int(m.group('hours')),
  875. minutes=sign * int(m.group('minutes')))
  876. return timezone, date_str
  877. def parse_iso8601(date_str, delimiter='T', timezone=None):
  878. """ Return a UNIX timestamp from the given date """
  879. if date_str is None:
  880. return None
  881. date_str = re.sub(r'\.[0-9]+', '', date_str)
  882. if timezone is None:
  883. timezone, date_str = extract_timezone(date_str)
  884. try:
  885. date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
  886. dt = datetime.datetime.strptime(date_str, date_format) - timezone
  887. return calendar.timegm(dt.timetuple())
  888. except ValueError:
  889. pass
  890. def date_formats(day_first=True):
  891. return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
  892. def unified_strdate(date_str, day_first=True):
  893. """Return a string with the date in the format YYYYMMDD"""
  894. if date_str is None:
  895. return None
  896. upload_date = None
  897. # Replace commas
  898. date_str = date_str.replace(',', ' ')
  899. # Remove AM/PM + timezone
  900. date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
  901. _, date_str = extract_timezone(date_str)
  902. for expression in date_formats(day_first):
  903. try:
  904. upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
  905. except ValueError:
  906. pass
  907. if upload_date is None:
  908. timetuple = email.utils.parsedate_tz(date_str)
  909. if timetuple:
  910. try:
  911. upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
  912. except ValueError:
  913. pass
  914. if upload_date is not None:
  915. return compat_str(upload_date)
  916. def unified_timestamp(date_str, day_first=True):
  917. if date_str is None:
  918. return None
  919. date_str = date_str.replace(',', ' ')
  920. pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
  921. timezone, date_str = extract_timezone(date_str)
  922. # Remove AM/PM + timezone
  923. date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
  924. for expression in date_formats(day_first):
  925. try:
  926. dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
  927. return calendar.timegm(dt.timetuple())
  928. except ValueError:
  929. pass
  930. timetuple = email.utils.parsedate_tz(date_str)
  931. if timetuple:
  932. return calendar.timegm(timetuple) + pm_delta * 3600
  933. def determine_ext(url, default_ext='unknown_video'):
  934. if url is None:
  935. return default_ext
  936. guess = url.partition('?')[0].rpartition('.')[2]
  937. if re.match(r'^[A-Za-z0-9]+$', guess):
  938. return guess
  939. # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
  940. elif guess.rstrip('/') in KNOWN_EXTENSIONS:
  941. return guess.rstrip('/')
  942. else:
  943. return default_ext
  944. def subtitles_filename(filename, sub_lang, sub_format):
  945. return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
  946. def date_from_str(date_str):
  947. """
  948. Return a datetime object from a string in the format YYYYMMDD or
  949. (now|today)[+-][0-9](day|week|month|year)(s)?"""
  950. today = datetime.date.today()
  951. if date_str in ('now', 'today'):
  952. return today
  953. if date_str == 'yesterday':
  954. return today - datetime.timedelta(days=1)
  955. match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
  956. if match is not None:
  957. sign = match.group('sign')
  958. time = int(match.group('time'))
  959. if sign == '-':
  960. time = -time
  961. unit = match.group('unit')
  962. # A bad approximation?
  963. if unit == 'month':
  964. unit = 'day'
  965. time *= 30
  966. elif unit == 'year':
  967. unit = 'day'
  968. time *= 365
  969. unit += 's'
  970. delta = datetime.timedelta(**{unit: time})
  971. return today + delta
  972. return datetime.datetime.strptime(date_str, '%Y%m%d').date()
  973. def hyphenate_date(date_str):
  974. """
  975. Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
  976. match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
  977. if match is not None:
  978. return '-'.join(match.groups())
  979. else:
  980. return date_str
  981. class DateRange(object):
  982. """Represents a time interval between two dates"""
  983. def __init__(self, start=None, end=None):
  984. """start and end must be strings in the format accepted by date"""
  985. if start is not None:
  986. self.start = date_from_str(start)
  987. else:
  988. self.start = datetime.datetime.min.date()
  989. if end is not None:
  990. self.end = date_from_str(end)
  991. else:
  992. self.end = datetime.datetime.max.date()
  993. if self.start > self.end:
  994. raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
  995. @classmethod
  996. def day(cls, day):
  997. """Returns a range that only contains the given day"""
  998. return cls(day, day)
  999. def __contains__(self, date):
  1000. """Check if the date is in the range"""
  1001. if not isinstance(date, datetime.date):
  1002. date = date_from_str(date)
  1003. return self.start <= date <= self.end
  1004. def __str__(self):
  1005. return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
  1006. def platform_name():
  1007. """ Returns the platform name as a compat_str """
  1008. res = platform.platform()
  1009. if isinstance(res, bytes):
  1010. res = res.decode(preferredencoding())
  1011. assert isinstance(res, compat_str)
  1012. return res
  1013. def _windows_write_string(s, out):
  1014. """ Returns True if the string was written using special methods,
  1015. False if it has yet to be written out."""
  1016. # Adapted from http://stackoverflow.com/a/3259271/35070
  1017. import ctypes
  1018. import ctypes.wintypes
  1019. WIN_OUTPUT_IDS = {
  1020. 1: -11,
  1021. 2: -12,
  1022. }
  1023. try:
  1024. fileno = out.fileno()
  1025. except AttributeError:
  1026. # If the output stream doesn't have a fileno, it's virtual
  1027. return False
  1028. except io.UnsupportedOperation:
  1029. # Some strange Windows pseudo files?
  1030. return False
  1031. if fileno not in WIN_OUTPUT_IDS:
  1032. return False
  1033. GetStdHandle = ctypes.WINFUNCTYPE(
  1034. ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
  1035. (b'GetStdHandle', ctypes.windll.kernel32))
  1036. h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
  1037. WriteConsoleW = ctypes.WINFUNCTYPE(
  1038. ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
  1039. ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
  1040. ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
  1041. written = ctypes.wintypes.DWORD(0)
  1042. GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
  1043. FILE_TYPE_CHAR = 0x0002
  1044. FILE_TYPE_REMOTE = 0x8000
  1045. GetConsoleMode = ctypes.WINFUNCTYPE(
  1046. ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
  1047. ctypes.POINTER(ctypes.wintypes.DWORD))(
  1048. (b'GetConsoleMode', ctypes.windll.kernel32))
  1049. INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
  1050. def not_a_console(handle):
  1051. if handle == INVALID_HANDLE_VALUE or handle is None:
  1052. return True
  1053. return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
  1054. GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
  1055. if not_a_console(h):
  1056. return False
  1057. def next_nonbmp_pos(s):
  1058. try:
  1059. return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
  1060. except StopIteration:
  1061. return len(s)
  1062. while s:
  1063. count = min(next_nonbmp_pos(s), 1024)
  1064. ret = WriteConsoleW(
  1065. h, s, count if count else 2, ctypes.byref(written), None)
  1066. if ret == 0:
  1067. raise OSError('Failed to write string')
  1068. if not count: # We just wrote a non-BMP character
  1069. assert written.value == 2
  1070. s = s[1:]
  1071. else:
  1072. assert written.value > 0
  1073. s = s[written.value:]
  1074. return True
  1075. def write_string(s, out=None, encoding=None):
  1076. if out is None:
  1077. out = sys.stderr
  1078. assert type(s) == compat_str
  1079. if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
  1080. if _windows_write_string(s, out):
  1081. return
  1082. if ('b' in getattr(out, 'mode', '') or
  1083. sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
  1084. byt = s.encode(encoding or preferredencoding(), 'ignore')
  1085. out.write(byt)
  1086. elif hasattr(out, 'buffer'):
  1087. enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
  1088. byt = s.encode(enc, 'ignore')
  1089. out.buffer.write(byt)
  1090. else:
  1091. out.write(s)
  1092. out.flush()
  1093. def bytes_to_intlist(bs):
  1094. if not bs:
  1095. return []
  1096. if isinstance(bs[0], int): # Python 3
  1097. return list(bs)
  1098. else:
  1099. return [ord(c) for c in bs]
  1100. def intlist_to_bytes(xs):
  1101. if not xs:
  1102. return b''
  1103. return compat_struct_pack('%dB' % len(xs), *xs)
  1104. # Cross-platform file locking
  1105. if sys.platform == 'win32':
  1106. import ctypes.wintypes
  1107. import msvcrt
  1108. class OVERLAPPED(ctypes.Structure):
  1109. _fields_ = [
  1110. ('Internal', ctypes.wintypes.LPVOID),
  1111. ('InternalHigh', ctypes.wintypes.LPVOID),
  1112. ('Offset', ctypes.wintypes.DWORD),
  1113. ('OffsetHigh', ctypes.wintypes.DWORD),
  1114. ('hEvent', ctypes.wintypes.HANDLE),
  1115. ]
  1116. kernel32 = ctypes.windll.kernel32
  1117. LockFileEx = kernel32.LockFileEx
  1118. LockFileEx.argtypes = [
  1119. ctypes.wintypes.HANDLE, # hFile
  1120. ctypes.wintypes.DWORD, # dwFlags
  1121. ctypes.wintypes.DWORD, # dwReserved
  1122. ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
  1123. ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
  1124. ctypes.POINTER(OVERLAPPED) # Overlapped
  1125. ]
  1126. LockFileEx.restype = ctypes.wintypes.BOOL
  1127. UnlockFileEx = kernel32.UnlockFileEx
  1128. UnlockFileEx.argtypes = [
  1129. ctypes.wintypes.HANDLE, # hFile
  1130. ctypes.wintypes.DWORD, # dwReserved
  1131. ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
  1132. ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
  1133. ctypes.POINTER(OVERLAPPED) # Overlapped
  1134. ]
  1135. UnlockFileEx.restype = ctypes.wintypes.BOOL
  1136. whole_low = 0xffffffff
  1137. whole_high = 0x7fffffff
  1138. def _lock_file(f, exclusive):
  1139. overlapped = OVERLAPPED()
  1140. overlapped.Offset = 0
  1141. overlapped.OffsetHigh = 0
  1142. overlapped.hEvent = 0
  1143. f._lock_file_overlapped_p = ctypes.pointer(overlapped)
  1144. handle = msvcrt.get_osfhandle(f.fileno())
  1145. if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
  1146. whole_low, whole_high, f._lock_file_overlapped_p):
  1147. raise OSError('Locking file failed: %r' % ctypes.FormatError())
  1148. def _unlock_file(f):
  1149. assert f._lock_file_overlapped_p
  1150. handle = msvcrt.get_osfhandle(f.fileno())
  1151. if not UnlockFileEx(handle, 0,
  1152. whole_low, whole_high, f._lock_file_overlapped_p):
  1153. raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
  1154. else:
  1155. # Some platforms, such as Jython, is missing fcntl
  1156. try:
  1157. import fcntl
  1158. def _lock_file(f, exclusive):
  1159. fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
  1160. def _unlock_file(f):
  1161. fcntl.flock(f, fcntl.LOCK_UN)
  1162. except ImportError:
  1163. UNSUPPORTED_MSG = 'file locking is not supported on this platform'
  1164. def _lock_file(f, exclusive):
  1165. raise IOError(UNSUPPORTED_MSG)
  1166. def _unlock_file(f):
  1167. raise IOError(UNSUPPORTED_MSG)
  1168. class locked_file(object):
  1169. def __init__(self, filename, mode, encoding=None):
  1170. assert mode in ['r', 'a', 'w']
  1171. self.f = io.open(filename, mode, encoding=encoding)
  1172. self.mode = mode
  1173. def __enter__(self):
  1174. exclusive = self.mode != 'r'
  1175. try:
  1176. _lock_file(self.f, exclusive)
  1177. except IOError:
  1178. self.f.close()
  1179. raise
  1180. return self
  1181. def __exit__(self, etype, value, traceback):
  1182. try:
  1183. _unlock_file(self.f)
  1184. finally:
  1185. self.f.close()
  1186. def __iter__(self):
  1187. return iter(self.f)
  1188. def write(self, *args):
  1189. return self.f.write(*args)
  1190. def read(self, *args):
  1191. return self.f.read(*args)
  1192. def get_filesystem_encoding():
  1193. encoding = sys.getfilesystemencoding()
  1194. return encoding if encoding is not None else 'utf-8'
  1195. def shell_quote(args):
  1196. quoted_args = []
  1197. encoding = get_filesystem_encoding()
  1198. for a in args:
  1199. if isinstance(a, bytes):
  1200. # We may get a filename encoded with 'encodeFilename'
  1201. a = a.decode(encoding)
  1202. quoted_args.append(pipes.quote(a))
  1203. return ' '.join(quoted_args)
  1204. def smuggle_url(url, data):
  1205. """ Pass additional data in a URL for internal use. """
  1206. url, idata = unsmuggle_url(url, {})
  1207. data.update(idata)
  1208. sdata = compat_urllib_parse_urlencode(
  1209. {'__youtubedl_smuggle': json.dumps(data)})
  1210. return url + '#' + sdata
  1211. def unsmuggle_url(smug_url, default=None):
  1212. if '#__youtubedl_smuggle' not in smug_url:
  1213. return smug_url, default
  1214. url, _, sdata = smug_url.rpartition('#')
  1215. jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
  1216. data = json.loads(jsond)
  1217. return url, data
  1218. def format_bytes(bytes):
  1219. if bytes is None:
  1220. return 'N/A'
  1221. if type(bytes) is str:
  1222. bytes = float(bytes)
  1223. if bytes == 0.0:
  1224. exponent = 0
  1225. else:
  1226. exponent = int(math.log(bytes, 1024.0))
  1227. suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
  1228. converted = float(bytes) / float(1024 ** exponent)
  1229. return '%.2f%s' % (converted, suffix)
  1230. def lookup_unit_table(unit_table, s):
  1231. units_re = '|'.join(re.escape(u) for u in unit_table)
  1232. m = re.match(
  1233. r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
  1234. if not m:
  1235. return None
  1236. num_str = m.group('num').replace(',', '.')
  1237. mult = unit_table[m.group('unit')]
  1238. return int(float(num_str) * mult)
  1239. def parse_filesize(s):
  1240. if s is None:
  1241. return None
  1242. # The lower-case forms are of course incorrect and unofficial,
  1243. # but we support those too
  1244. _UNIT_TABLE = {
  1245. 'B': 1,
  1246. 'b': 1,
  1247. 'bytes': 1,
  1248. 'KiB': 1024,
  1249. 'KB': 1000,
  1250. 'kB': 1024,
  1251. 'Kb': 1000,
  1252. 'kb': 1000,
  1253. 'kilobytes': 1000,
  1254. 'kibibytes': 1024,
  1255. 'MiB': 1024 ** 2,
  1256. 'MB': 1000 ** 2,
  1257. 'mB': 1024 ** 2,
  1258. 'Mb': 1000 ** 2,
  1259. 'mb': 1000 ** 2,
  1260. 'megabytes': 1000 ** 2,
  1261. 'mebibytes': 1024 ** 2,
  1262. 'GiB': 1024 ** 3,
  1263. 'GB': 1000 ** 3,
  1264. 'gB': 1024 ** 3,
  1265. 'Gb': 1000 ** 3,
  1266. 'gb': 1000 ** 3,
  1267. 'gigabytes': 1000 ** 3,
  1268. 'gibibytes': 1024 ** 3,
  1269. 'TiB': 1024 ** 4,
  1270. 'TB': 1000 ** 4,
  1271. 'tB': 1024 ** 4,
  1272. 'Tb': 1000 ** 4,
  1273. 'tb': 1000 ** 4,
  1274. 'terabytes': 1000 ** 4,
  1275. 'tebibytes': 1024 ** 4,
  1276. 'PiB': 1024 ** 5,
  1277. 'PB': 1000 ** 5,
  1278. 'pB': 1024 ** 5,
  1279. 'Pb': 1000 ** 5,
  1280. 'pb': 1000 ** 5,
  1281. 'petabytes': 1000 ** 5,
  1282. 'pebibytes': 1024 ** 5,
  1283. 'EiB': 1024 ** 6,
  1284. 'EB': 1000 ** 6,
  1285. 'eB': 1024 ** 6,
  1286. 'Eb': 1000 ** 6,
  1287. 'eb': 1000 ** 6,
  1288. 'exabytes': 1000 ** 6,
  1289. 'exbibytes': 1024 ** 6,
  1290. 'ZiB': 1024 ** 7,
  1291. 'ZB': 1000 ** 7,
  1292. 'zB': 1024 ** 7,
  1293. 'Zb': 1000 ** 7,
  1294. 'zb': 1000 ** 7,
  1295. 'zettabytes': 1000 ** 7,
  1296. 'zebibytes': 1024 ** 7,
  1297. 'YiB': 1024 ** 8,
  1298. 'YB': 1000 ** 8,
  1299. 'yB': 1024 ** 8,
  1300. 'Yb': 1000 ** 8,
  1301. 'yb': 1000 ** 8,
  1302. 'yottabytes': 1000 ** 8,
  1303. 'yobibytes': 1024 ** 8,
  1304. }
  1305. return lookup_unit_table(_UNIT_TABLE, s)
  1306. def parse_count(s):
  1307. if s is None:
  1308. return None
  1309. s = s.strip()
  1310. if re.match(r'^[\d,.]+$', s):
  1311. return str_to_int(s)
  1312. _UNIT_TABLE = {
  1313. 'k': 1000,
  1314. 'K': 1000,
  1315. 'm': 1000 ** 2,
  1316. 'M': 1000 ** 2,
  1317. 'kk': 1000 ** 2,
  1318. 'KK': 1000 ** 2,
  1319. }
  1320. return lookup_unit_table(_UNIT_TABLE, s)
  1321. def month_by_name(name, lang='en'):
  1322. """ Return the number of a month by (locale-independently) English name """
  1323. month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
  1324. try:
  1325. return month_names.index(name) + 1
  1326. except ValueError:
  1327. return None
  1328. def month_by_abbreviation(abbrev):
  1329. """ Return the number of a month by (locale-independently) English
  1330. abbreviations """
  1331. try:
  1332. return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
  1333. except ValueError:
  1334. return None
  1335. def fix_xml_ampersands(xml_str):
  1336. """Replace all the '&' by '&amp;' in XML"""
  1337. return re.sub(
  1338. r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
  1339. '&amp;',
  1340. xml_str)
  1341. def setproctitle(title):
  1342. assert isinstance(title, compat_str)
  1343. # ctypes in Jython is not complete
  1344. # http://bugs.jython.org/issue2148
  1345. if sys.platform.startswith('java'):
  1346. return
  1347. try:
  1348. libc = ctypes.cdll.LoadLibrary('libc.so.6')
  1349. except OSError:
  1350. return
  1351. title_bytes = title.encode('utf-8')
  1352. buf = ctypes.create_string_buffer(len(title_bytes))
  1353. buf.value = title_bytes
  1354. try:
  1355. libc.prctl(15, buf, 0, 0, 0)
  1356. except AttributeError:
  1357. return # Strange libc, just skip this
  1358. def remove_start(s, start):
  1359. return s[len(start):] if s is not None and s.startswith(start) else s
  1360. def remove_end(s, end):
  1361. return s[:-len(end)] if s is not None and s.endswith(end) else s
  1362. def remove_quotes(s):
  1363. if s is None or len(s) < 2:
  1364. return s
  1365. for quote in ('"', "'", ):
  1366. if s[0] == quote and s[-1] == quote:
  1367. return s[1:-1]
  1368. return s
  1369. def url_basename(url):
  1370. path = compat_urlparse.urlparse(url).path
  1371. return path.strip('/').split('/')[-1]
  1372. class HEADRequest(compat_urllib_request.Request):
  1373. def get_method(self):
  1374. return 'HEAD'
  1375. class PUTRequest(compat_urllib_request.Request):
  1376. def get_method(self):
  1377. return 'PUT'
  1378. def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
  1379. if get_attr:
  1380. if v is not None:
  1381. v = getattr(v, get_attr, None)
  1382. if v == '':
  1383. v = None
  1384. if v is None:
  1385. return default
  1386. try:
  1387. return int(v) * invscale // scale
  1388. except ValueError:
  1389. return default
  1390. def str_or_none(v, default=None):
  1391. return default if v is None else compat_str(v)
  1392. def str_to_int(int_str):
  1393. """ A more relaxed version of int_or_none """
  1394. if int_str is None:
  1395. return None
  1396. int_str = re.sub(r'[,\.\+]', '', int_str)
  1397. return int(int_str)
  1398. def float_or_none(v, scale=1, invscale=1, default=None):
  1399. if v is None:
  1400. return default
  1401. try:
  1402. return float(v) * invscale / scale
  1403. except ValueError:
  1404. return default
  1405. def strip_or_none(v):
  1406. return None if v is None else v.strip()
  1407. def parse_duration(s):
  1408. if not isinstance(s, compat_basestring):
  1409. return None
  1410. s = s.strip()
  1411. days, hours, mins, secs, ms = [None] * 5
  1412. m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?$', s)
  1413. if m:
  1414. days, hours, mins, secs, ms = m.groups()
  1415. else:
  1416. m = re.match(
  1417. r'''(?ix)(?:P?T)?
  1418. (?:
  1419. (?P<days>[0-9]+)\s*d(?:ays?)?\s*
  1420. )?
  1421. (?:
  1422. (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
  1423. )?
  1424. (?:
  1425. (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
  1426. )?
  1427. (?:
  1428. (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
  1429. )?$''', s)
  1430. if m:
  1431. days, hours, mins, secs, ms = m.groups()
  1432. else:
  1433. m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)$', s)
  1434. if m:
  1435. hours, mins = m.groups()
  1436. else:
  1437. return None
  1438. duration = 0
  1439. if secs:
  1440. duration += float(secs)
  1441. if mins:
  1442. duration += float(mins) * 60
  1443. if hours:
  1444. duration += float(hours) * 60 * 60
  1445. if days:
  1446. duration += float(days) * 24 * 60 * 60
  1447. if ms:
  1448. duration += float(ms)
  1449. return duration
  1450. def prepend_extension(filename, ext, expected_real_ext=None):
  1451. name, real_ext = os.path.splitext(filename)
  1452. return (
  1453. '{0}.{1}{2}'.format(name, ext, real_ext)
  1454. if not expected_real_ext or real_ext[1:] == expected_real_ext
  1455. else '{0}.{1}'.format(filename, ext))
  1456. def replace_extension(filename, ext, expected_real_ext=None):
  1457. name, real_ext = os.path.splitext(filename)
  1458. return '{0}.{1}'.format(
  1459. name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
  1460. ext)
  1461. def check_executable(exe, args=[]):
  1462. """ Checks if the given binary is installed somewhere in PATH, and returns its name.
  1463. args can be a list of arguments for a short output (like -version) """
  1464. try:
  1465. subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
  1466. except OSError:
  1467. return False
  1468. return exe
  1469. def get_exe_version(exe, args=['--version'],
  1470. version_re=None, unrecognized='present'):
  1471. """ Returns the version of the specified executable,
  1472. or False if the executable is not present """
  1473. try:
  1474. out, _ = subprocess.Popen(
  1475. [encodeArgument(exe)] + args,
  1476. stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
  1477. except OSError:
  1478. return False
  1479. if isinstance(out, bytes): # Python 2.x
  1480. out = out.decode('ascii', 'ignore')
  1481. return detect_exe_version(out, version_re, unrecognized)
  1482. def detect_exe_version(output, version_re=None, unrecognized='present'):
  1483. assert isinstance(output, compat_str)
  1484. if version_re is None:
  1485. version_re = r'version\s+([-0-9._a-zA-Z]+)'
  1486. m = re.search(version_re, output)
  1487. if m:
  1488. return m.group(1)
  1489. else:
  1490. return unrecognized
  1491. class PagedList(object):
  1492. def __len__(self):
  1493. # This is only useful for tests
  1494. return len(self.getslice())
  1495. class OnDemandPagedList(PagedList):
  1496. def __init__(self, pagefunc, pagesize, use_cache=False):
  1497. self._pagefunc = pagefunc
  1498. self._pagesize = pagesize
  1499. self._use_cache = use_cache
  1500. if use_cache:
  1501. self._cache = {}
  1502. def getslice(self, start=0, end=None):
  1503. res = []
  1504. for pagenum in itertools.count(start // self._pagesize):
  1505. firstid = pagenum * self._pagesize
  1506. nextfirstid = pagenum * self._pagesize + self._pagesize
  1507. if start >= nextfirstid:
  1508. continue
  1509. page_results = None
  1510. if self._use_cache:
  1511. page_results = self._cache.get(pagenum)
  1512. if page_results is None:
  1513. page_results = list(self._pagefunc(pagenum))
  1514. if self._use_cache:
  1515. self._cache[pagenum] = page_results
  1516. startv = (
  1517. start % self._pagesize
  1518. if firstid <= start < nextfirstid
  1519. else 0)
  1520. endv = (
  1521. ((end - 1) % self._pagesize) + 1
  1522. if (end is not None and firstid <= end <= nextfirstid)
  1523. else None)
  1524. if startv != 0 or endv is not None:
  1525. page_results = page_results[startv:endv]
  1526. res.extend(page_results)
  1527. # A little optimization - if current page is not "full", ie. does
  1528. # not contain page_size videos then we can assume that this page
  1529. # is the last one - there are no more ids on further pages -
  1530. # i.e. no need to query again.
  1531. if len(page_results) + startv < self._pagesize:
  1532. break
  1533. # If we got the whole page, but the next page is not interesting,
  1534. # break out early as well
  1535. if end == nextfirstid:
  1536. break
  1537. return res
  1538. class InAdvancePagedList(PagedList):
  1539. def __init__(self, pagefunc, pagecount, pagesize):
  1540. self._pagefunc = pagefunc
  1541. self._pagecount = pagecount
  1542. self._pagesize = pagesize
  1543. def getslice(self, start=0, end=None):
  1544. res = []
  1545. start_page = start // self._pagesize
  1546. end_page = (
  1547. self._pagecount if end is None else (end // self._pagesize + 1))
  1548. skip_elems = start - start_page * self._pagesize
  1549. only_more = None if end is None else end - start
  1550. for pagenum in range(start_page, end_page):
  1551. page = list(self._pagefunc(pagenum))
  1552. if skip_elems:
  1553. page = page[skip_elems:]
  1554. skip_elems = None
  1555. if only_more is not None:
  1556. if len(page) < only_more:
  1557. only_more -= len(page)
  1558. else:
  1559. page = page[:only_more]
  1560. res.extend(page)
  1561. break
  1562. res.extend(page)
  1563. return res
  1564. def uppercase_escape(s):
  1565. unicode_escape = codecs.getdecoder('unicode_escape')
  1566. return re.sub(
  1567. r'\\U[0-9a-fA-F]{8}',
  1568. lambda m: unicode_escape(m.group(0))[0],
  1569. s)
  1570. def lowercase_escape(s):
  1571. unicode_escape = codecs.getdecoder('unicode_escape')
  1572. return re.sub(
  1573. r'\\u[0-9a-fA-F]{4}',
  1574. lambda m: unicode_escape(m.group(0))[0],
  1575. s)
  1576. def escape_rfc3986(s):
  1577. """Escape non-ASCII characters as suggested by RFC 3986"""
  1578. if sys.version_info < (3, 0) and isinstance(s, compat_str):
  1579. s = s.encode('utf-8')
  1580. return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
  1581. def escape_url(url):
  1582. """Escape URL as suggested by RFC 3986"""
  1583. url_parsed = compat_urllib_parse_urlparse(url)
  1584. return url_parsed._replace(
  1585. netloc=url_parsed.netloc.encode('idna').decode('ascii'),
  1586. path=escape_rfc3986(url_parsed.path),
  1587. params=escape_rfc3986(url_parsed.params),
  1588. query=escape_rfc3986(url_parsed.query),
  1589. fragment=escape_rfc3986(url_parsed.fragment)
  1590. ).geturl()
  1591. def read_batch_urls(batch_fd):
  1592. def fixup(url):
  1593. if not isinstance(url, compat_str):
  1594. url = url.decode('utf-8', 'replace')
  1595. BOM_UTF8 = '\xef\xbb\xbf'
  1596. if url.startswith(BOM_UTF8):
  1597. url = url[len(BOM_UTF8):]
  1598. url = url.strip()
  1599. if url.startswith(('#', ';', ']')):
  1600. return False
  1601. return url
  1602. with contextlib.closing(batch_fd) as fd:
  1603. return [url for url in map(fixup, fd) if url]
  1604. def urlencode_postdata(*args, **kargs):
  1605. return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
  1606. def update_url_query(url, query):
  1607. if not query:
  1608. return url
  1609. parsed_url = compat_urlparse.urlparse(url)
  1610. qs = compat_parse_qs(parsed_url.query)
  1611. qs.update(query)
  1612. return compat_urlparse.urlunparse(parsed_url._replace(
  1613. query=compat_urllib_parse_urlencode(qs, True)))
  1614. def update_Request(req, url=None, data=None, headers={}, query={}):
  1615. req_headers = req.headers.copy()
  1616. req_headers.update(headers)
  1617. req_data = data or req.data
  1618. req_url = update_url_query(url or req.get_full_url(), query)
  1619. req_get_method = req.get_method()
  1620. if req_get_method == 'HEAD':
  1621. req_type = HEADRequest
  1622. elif req_get_method == 'PUT':
  1623. req_type = PUTRequest
  1624. else:
  1625. req_type = compat_urllib_request.Request
  1626. new_req = req_type(
  1627. req_url, data=req_data, headers=req_headers,
  1628. origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
  1629. if hasattr(req, 'timeout'):
  1630. new_req.timeout = req.timeout
  1631. return new_req
  1632. def dict_get(d, key_or_keys, default=None, skip_false_values=True):
  1633. if isinstance(key_or_keys, (list, tuple)):
  1634. for key in key_or_keys:
  1635. if key not in d or d[key] is None or skip_false_values and not d[key]:
  1636. continue
  1637. return d[key]
  1638. return default
  1639. return d.get(key_or_keys, default)
  1640. def try_get(src, getter, expected_type=None):
  1641. try:
  1642. v = getter(src)
  1643. except (AttributeError, KeyError, TypeError, IndexError):
  1644. pass
  1645. else:
  1646. if expected_type is None or isinstance(v, expected_type):
  1647. return v
  1648. def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
  1649. return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
  1650. US_RATINGS = {
  1651. 'G': 0,
  1652. 'PG': 10,
  1653. 'PG-13': 13,
  1654. 'R': 16,
  1655. 'NC': 18,
  1656. }
  1657. TV_PARENTAL_GUIDELINES = {
  1658. 'TV-Y': 0,
  1659. 'TV-Y7': 7,
  1660. 'TV-G': 0,
  1661. 'TV-PG': 0,
  1662. 'TV-14': 14,
  1663. 'TV-MA': 17,
  1664. }
  1665. def parse_age_limit(s):
  1666. if type(s) == int:
  1667. return s if 0 <= s <= 21 else None
  1668. if not isinstance(s, compat_basestring):
  1669. return None
  1670. m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
  1671. if m:
  1672. return int(m.group('age'))
  1673. if s in US_RATINGS:
  1674. return US_RATINGS[s]
  1675. return TV_PARENTAL_GUIDELINES.get(s)
  1676. def strip_jsonp(code):
  1677. return re.sub(
  1678. r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
  1679. def js_to_json(code):
  1680. def fix_kv(m):
  1681. v = m.group(0)
  1682. if v in ('true', 'false', 'null'):
  1683. return v
  1684. elif v.startswith('/*') or v == ',':
  1685. return ""
  1686. if v[0] in ("'", '"'):
  1687. v = re.sub(r'(?s)\\.|"', lambda m: {
  1688. '"': '\\"',
  1689. "\\'": "'",
  1690. '\\\n': '',
  1691. '\\x': '\\u00',
  1692. }.get(m.group(0), m.group(0)), v[1:-1])
  1693. INTEGER_TABLE = (
  1694. (r'^(0[xX][0-9a-fA-F]+)\s*:?$', 16),
  1695. (r'^(0+[0-7]+)\s*:?$', 8),
  1696. )
  1697. for regex, base in INTEGER_TABLE:
  1698. im = re.match(regex, v)
  1699. if im:
  1700. i = int(im.group(1), base)
  1701. return '"%d":' % i if v.endswith(':') else '%d' % i
  1702. return '"%s"' % v
  1703. return re.sub(r'''(?sx)
  1704. "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
  1705. '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
  1706. /\*.*?\*/|,(?=\s*[\]}])|
  1707. [a-zA-Z_][.a-zA-Z_0-9]*|
  1708. \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?|
  1709. [0-9]+(?=\s*:)
  1710. ''', fix_kv, code)
  1711. def qualities(quality_ids):
  1712. """ Get a numeric quality value out of a list of possible values """
  1713. def q(qid):
  1714. try:
  1715. return quality_ids.index(qid)
  1716. except ValueError:
  1717. return -1
  1718. return q
  1719. DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
  1720. def limit_length(s, length):
  1721. """ Add ellipses to overly long strings """
  1722. if s is None:
  1723. return None
  1724. ELLIPSES = '...'
  1725. if len(s) > length:
  1726. return s[:length - len(ELLIPSES)] + ELLIPSES
  1727. return s
  1728. def version_tuple(v):
  1729. return tuple(int(e) for e in re.split(r'[-.]', v))
  1730. def is_outdated_version(version, limit, assume_new=True):
  1731. if not version:
  1732. return not assume_new
  1733. try:
  1734. return version_tuple(version) < version_tuple(limit)
  1735. except ValueError:
  1736. return not assume_new
  1737. def ytdl_is_updateable():
  1738. """ Returns if youtube-dl can be updated with -U """
  1739. from zipimport import zipimporter
  1740. return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
  1741. def args_to_str(args):
  1742. # Get a short string representation for a subprocess command
  1743. return ' '.join(compat_shlex_quote(a) for a in args)
  1744. def error_to_compat_str(err):
  1745. err_str = str(err)
  1746. # On python 2 error byte string must be decoded with proper
  1747. # encoding rather than ascii
  1748. if sys.version_info[0] < 3:
  1749. err_str = err_str.decode(preferredencoding())
  1750. return err_str
  1751. def mimetype2ext(mt):
  1752. if mt is None:
  1753. return None
  1754. ext = {
  1755. 'audio/mp4': 'm4a',
  1756. # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
  1757. # it's the most popular one
  1758. 'audio/mpeg': 'mp3',
  1759. }.get(mt)
  1760. if ext is not None:
  1761. return ext
  1762. _, _, res = mt.rpartition('/')
  1763. res = res.split(';')[0].strip().lower()
  1764. return {
  1765. '3gpp': '3gp',
  1766. 'smptett+xml': 'tt',
  1767. 'srt': 'srt',
  1768. 'ttaf+xml': 'dfxp',
  1769. 'ttml+xml': 'ttml',
  1770. 'vtt': 'vtt',
  1771. 'x-flv': 'flv',
  1772. 'x-mp4-fragmented': 'mp4',
  1773. 'x-ms-wmv': 'wmv',
  1774. 'mpegurl': 'm3u8',
  1775. 'x-mpegurl': 'm3u8',
  1776. 'vnd.apple.mpegurl': 'm3u8',
  1777. 'dash+xml': 'mpd',
  1778. 'f4m': 'f4m',
  1779. 'f4m+xml': 'f4m',
  1780. 'hds+xml': 'f4m',
  1781. 'vnd.ms-sstr+xml': 'ism',
  1782. 'quicktime': 'mov',
  1783. }.get(res, res)
  1784. def parse_codecs(codecs_str):
  1785. # http://tools.ietf.org/html/rfc6381
  1786. if not codecs_str:
  1787. return {}
  1788. splited_codecs = list(filter(None, map(
  1789. lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
  1790. vcodec, acodec = None, None
  1791. for full_codec in splited_codecs:
  1792. codec = full_codec.split('.')[0]
  1793. if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'):
  1794. if not vcodec:
  1795. vcodec = full_codec
  1796. elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'):
  1797. if not acodec:
  1798. acodec = full_codec
  1799. else:
  1800. write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr)
  1801. if not vcodec and not acodec:
  1802. if len(splited_codecs) == 2:
  1803. return {
  1804. 'vcodec': vcodec,
  1805. 'acodec': acodec,
  1806. }
  1807. elif len(splited_codecs) == 1:
  1808. return {
  1809. 'vcodec': 'none',
  1810. 'acodec': vcodec,
  1811. }
  1812. else:
  1813. return {
  1814. 'vcodec': vcodec or 'none',
  1815. 'acodec': acodec or 'none',
  1816. }
  1817. return {}
  1818. def urlhandle_detect_ext(url_handle):
  1819. getheader = url_handle.headers.get
  1820. cd = getheader('Content-Disposition')
  1821. if cd:
  1822. m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
  1823. if m:
  1824. e = determine_ext(m.group('filename'), default_ext=None)
  1825. if e:
  1826. return e
  1827. return mimetype2ext(getheader('Content-Type'))
  1828. def encode_data_uri(data, mime_type):
  1829. return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
  1830. def age_restricted(content_limit, age_limit):
  1831. """ Returns True iff the content should be blocked """
  1832. if age_limit is None: # No limit set
  1833. return False
  1834. if content_limit is None:
  1835. return False # Content available for everyone
  1836. return age_limit < content_limit
  1837. def is_html(first_bytes):
  1838. """ Detect whether a file contains HTML by examining its first bytes. """
  1839. BOMS = [
  1840. (b'\xef\xbb\xbf', 'utf-8'),
  1841. (b'\x00\x00\xfe\xff', 'utf-32-be'),
  1842. (b'\xff\xfe\x00\x00', 'utf-32-le'),
  1843. (b'\xff\xfe', 'utf-16-le'),
  1844. (b'\xfe\xff', 'utf-16-be'),
  1845. ]
  1846. for bom, enc in BOMS:
  1847. if first_bytes.startswith(bom):
  1848. s = first_bytes[len(bom):].decode(enc, 'replace')
  1849. break
  1850. else:
  1851. s = first_bytes.decode('utf-8', 'replace')
  1852. return re.match(r'^\s*<', s)
  1853. def determine_protocol(info_dict):
  1854. protocol = info_dict.get('protocol')
  1855. if protocol is not None:
  1856. return protocol
  1857. url = info_dict['url']
  1858. if url.startswith('rtmp'):
  1859. return 'rtmp'
  1860. elif url.startswith('mms'):
  1861. return 'mms'
  1862. elif url.startswith('rtsp'):
  1863. return 'rtsp'
  1864. ext = determine_ext(url)
  1865. if ext == 'm3u8':
  1866. return 'm3u8'
  1867. elif ext == 'f4m':
  1868. return 'f4m'
  1869. return compat_urllib_parse_urlparse(url).scheme
  1870. def render_table(header_row, data):
  1871. """ Render a list of rows, each as a list of values """
  1872. table = [header_row] + data
  1873. max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
  1874. format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
  1875. return '\n'.join(format_str % tuple(row) for row in table)
  1876. def _match_one(filter_part, dct):
  1877. COMPARISON_OPERATORS = {
  1878. '<': operator.lt,
  1879. '<=': operator.le,
  1880. '>': operator.gt,
  1881. '>=': operator.ge,
  1882. '=': operator.eq,
  1883. '!=': operator.ne,
  1884. }
  1885. operator_rex = re.compile(r'''(?x)\s*
  1886. (?P<key>[a-z_]+)
  1887. \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
  1888. (?:
  1889. (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
  1890. (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
  1891. )
  1892. \s*$
  1893. ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
  1894. m = operator_rex.search(filter_part)
  1895. if m:
  1896. op = COMPARISON_OPERATORS[m.group('op')]
  1897. if m.group('strval') is not None:
  1898. if m.group('op') not in ('=', '!='):
  1899. raise ValueError(
  1900. 'Operator %s does not support string values!' % m.group('op'))
  1901. comparison_value = m.group('strval')
  1902. else:
  1903. try:
  1904. comparison_value = int(m.group('intval'))
  1905. except ValueError:
  1906. comparison_value = parse_filesize(m.group('intval'))
  1907. if comparison_value is None:
  1908. comparison_value = parse_filesize(m.group('intval') + 'B')
  1909. if comparison_value is None:
  1910. raise ValueError(
  1911. 'Invalid integer value %r in filter part %r' % (
  1912. m.group('intval'), filter_part))
  1913. actual_value = dct.get(m.group('key'))
  1914. if actual_value is None:
  1915. return m.group('none_inclusive')
  1916. return op(actual_value, comparison_value)
  1917. UNARY_OPERATORS = {
  1918. '': lambda v: v is not None,
  1919. '!': lambda v: v is None,
  1920. }
  1921. operator_rex = re.compile(r'''(?x)\s*
  1922. (?P<op>%s)\s*(?P<key>[a-z_]+)
  1923. \s*$
  1924. ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
  1925. m = operator_rex.search(filter_part)
  1926. if m:
  1927. op = UNARY_OPERATORS[m.group('op')]
  1928. actual_value = dct.get(m.group('key'))
  1929. return op(actual_value)
  1930. raise ValueError('Invalid filter part %r' % filter_part)
  1931. def match_str(filter_str, dct):
  1932. """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
  1933. return all(
  1934. _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
  1935. def match_filter_func(filter_str):
  1936. def _match_func(info_dict):
  1937. if match_str(filter_str, info_dict):
  1938. return None
  1939. else:
  1940. video_title = info_dict.get('title', info_dict.get('id', 'video'))
  1941. return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
  1942. return _match_func
  1943. def parse_dfxp_time_expr(time_expr):
  1944. if not time_expr:
  1945. return
  1946. mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
  1947. if mobj:
  1948. return float(mobj.group('time_offset'))
  1949. mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
  1950. if mobj:
  1951. return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
  1952. def srt_subtitles_timecode(seconds):
  1953. return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
  1954. def dfxp2srt(dfxp_data):
  1955. _x = functools.partial(xpath_with_ns, ns_map={
  1956. 'ttml': 'http://www.w3.org/ns/ttml',
  1957. 'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
  1958. 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1',
  1959. })
  1960. class TTMLPElementParser(object):
  1961. out = ''
  1962. def start(self, tag, attrib):
  1963. if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
  1964. self.out += '\n'
  1965. def end(self, tag):
  1966. pass
  1967. def data(self, data):
  1968. self.out += data
  1969. def close(self):
  1970. return self.out.strip()
  1971. def parse_node(node):
  1972. target = TTMLPElementParser()
  1973. parser = xml.etree.ElementTree.XMLParser(target=target)
  1974. parser.feed(xml.etree.ElementTree.tostring(node))
  1975. return parser.close()
  1976. dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
  1977. out = []
  1978. paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p')
  1979. if not paras:
  1980. raise ValueError('Invalid dfxp/TTML subtitle')
  1981. for para, index in zip(paras, itertools.count(1)):
  1982. begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
  1983. end_time = parse_dfxp_time_expr(para.attrib.get('end'))
  1984. dur = parse_dfxp_time_expr(para.attrib.get('dur'))
  1985. if begin_time is None:
  1986. continue
  1987. if not end_time:
  1988. if not dur:
  1989. continue
  1990. end_time = begin_time + dur
  1991. out.append('%d\n%s --> %s\n%s\n\n' % (
  1992. index,
  1993. srt_subtitles_timecode(begin_time),
  1994. srt_subtitles_timecode(end_time),
  1995. parse_node(para)))
  1996. return ''.join(out)
  1997. def cli_option(params, command_option, param):
  1998. param = params.get(param)
  1999. if param:
  2000. param = compat_str(param)
  2001. return [command_option, param] if param is not None else []
  2002. def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
  2003. param = params.get(param)
  2004. assert isinstance(param, bool)
  2005. if separator:
  2006. return [command_option + separator + (true_value if param else false_value)]
  2007. return [command_option, true_value if param else false_value]
  2008. def cli_valueless_option(params, command_option, param, expected_value=True):
  2009. param = params.get(param)
  2010. return [command_option] if param == expected_value else []
  2011. def cli_configuration_args(params, param, default=[]):
  2012. ex_args = params.get(param)
  2013. if ex_args is None:
  2014. return default
  2015. assert isinstance(ex_args, list)
  2016. return ex_args
  2017. class ISO639Utils(object):
  2018. # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
  2019. _lang_map = {
  2020. 'aa': 'aar',
  2021. 'ab': 'abk',
  2022. 'ae': 'ave',
  2023. 'af': 'afr',
  2024. 'ak': 'aka',
  2025. 'am': 'amh',
  2026. 'an': 'arg',
  2027. 'ar': 'ara',
  2028. 'as': 'asm',
  2029. 'av': 'ava',
  2030. 'ay': 'aym',
  2031. 'az': 'aze',
  2032. 'ba': 'bak',
  2033. 'be': 'bel',
  2034. 'bg': 'bul',
  2035. 'bh': 'bih',
  2036. 'bi': 'bis',
  2037. 'bm': 'bam',
  2038. 'bn': 'ben',
  2039. 'bo': 'bod',
  2040. 'br': 'bre',
  2041. 'bs': 'bos',
  2042. 'ca': 'cat',
  2043. 'ce': 'che',
  2044. 'ch': 'cha',
  2045. 'co': 'cos',
  2046. 'cr': 'cre',
  2047. 'cs': 'ces',
  2048. 'cu': 'chu',
  2049. 'cv': 'chv',
  2050. 'cy': 'cym',
  2051. 'da': 'dan',
  2052. 'de': 'deu',
  2053. 'dv': 'div',
  2054. 'dz': 'dzo',
  2055. 'ee': 'ewe',
  2056. 'el': 'ell',
  2057. 'en': 'eng',
  2058. 'eo': 'epo',
  2059. 'es': 'spa',
  2060. 'et': 'est',
  2061. 'eu': 'eus',
  2062. 'fa': 'fas',
  2063. 'ff': 'ful',
  2064. 'fi': 'fin',
  2065. 'fj': 'fij',
  2066. 'fo': 'fao',
  2067. 'fr': 'fra',
  2068. 'fy': 'fry',
  2069. 'ga': 'gle',
  2070. 'gd': 'gla',
  2071. 'gl': 'glg',
  2072. 'gn': 'grn',
  2073. 'gu': 'guj',
  2074. 'gv': 'glv',
  2075. 'ha': 'hau',
  2076. 'he': 'heb',
  2077. 'hi': 'hin',
  2078. 'ho': 'hmo',
  2079. 'hr': 'hrv',
  2080. 'ht': 'hat',
  2081. 'hu': 'hun',
  2082. 'hy': 'hye',
  2083. 'hz': 'her',
  2084. 'ia': 'ina',
  2085. 'id': 'ind',
  2086. 'ie': 'ile',
  2087. 'ig': 'ibo',
  2088. 'ii': 'iii',
  2089. 'ik': 'ipk',
  2090. 'io': 'ido',
  2091. 'is': 'isl',
  2092. 'it': 'ita',
  2093. 'iu': 'iku',
  2094. 'ja': 'jpn',
  2095. 'jv': 'jav',
  2096. 'ka': 'kat',
  2097. 'kg': 'kon',
  2098. 'ki': 'kik',
  2099. 'kj': 'kua',
  2100. 'kk': 'kaz',
  2101. 'kl': 'kal',
  2102. 'km': 'khm',
  2103. 'kn': 'kan',
  2104. 'ko': 'kor',
  2105. 'kr': 'kau',
  2106. 'ks': 'kas',
  2107. 'ku': 'kur',
  2108. 'kv': 'kom',
  2109. 'kw': 'cor',
  2110. 'ky': 'kir',
  2111. 'la': 'lat',
  2112. 'lb': 'ltz',
  2113. 'lg': 'lug',
  2114. 'li': 'lim',
  2115. 'ln': 'lin',
  2116. 'lo': 'lao',
  2117. 'lt': 'lit',
  2118. 'lu': 'lub',
  2119. 'lv': 'lav',
  2120. 'mg': 'mlg',
  2121. 'mh': 'mah',
  2122. 'mi': 'mri',
  2123. 'mk': 'mkd',
  2124. 'ml': 'mal',
  2125. 'mn': 'mon',
  2126. 'mr': 'mar',
  2127. 'ms': 'msa',
  2128. 'mt': 'mlt',
  2129. 'my': 'mya',
  2130. 'na': 'nau',
  2131. 'nb': 'nob',
  2132. 'nd': 'nde',
  2133. 'ne': 'nep',
  2134. 'ng': 'ndo',
  2135. 'nl': 'nld',
  2136. 'nn': 'nno',
  2137. 'no': 'nor',
  2138. 'nr': 'nbl',
  2139. 'nv': 'nav',
  2140. 'ny': 'nya',
  2141. 'oc': 'oci',
  2142. 'oj': 'oji',
  2143. 'om': 'orm',
  2144. 'or': 'ori',
  2145. 'os': 'oss',
  2146. 'pa': 'pan',
  2147. 'pi': 'pli',
  2148. 'pl': 'pol',
  2149. 'ps': 'pus',
  2150. 'pt': 'por',
  2151. 'qu': 'que',
  2152. 'rm': 'roh',
  2153. 'rn': 'run',
  2154. 'ro': 'ron',
  2155. 'ru': 'rus',
  2156. 'rw': 'kin',
  2157. 'sa': 'san',
  2158. 'sc': 'srd',
  2159. 'sd': 'snd',
  2160. 'se': 'sme',
  2161. 'sg': 'sag',
  2162. 'si': 'sin',
  2163. 'sk': 'slk',
  2164. 'sl': 'slv',
  2165. 'sm': 'smo',
  2166. 'sn': 'sna',
  2167. 'so': 'som',
  2168. 'sq': 'sqi',
  2169. 'sr': 'srp',
  2170. 'ss': 'ssw',
  2171. 'st': 'sot',
  2172. 'su': 'sun',
  2173. 'sv': 'swe',
  2174. 'sw': 'swa',
  2175. 'ta': 'tam',
  2176. 'te': 'tel',
  2177. 'tg': 'tgk',
  2178. 'th': 'tha',
  2179. 'ti': 'tir',
  2180. 'tk': 'tuk',
  2181. 'tl': 'tgl',
  2182. 'tn': 'tsn',
  2183. 'to': 'ton',
  2184. 'tr': 'tur',
  2185. 'ts': 'tso',
  2186. 'tt': 'tat',
  2187. 'tw': 'twi',
  2188. 'ty': 'tah',
  2189. 'ug': 'uig',
  2190. 'uk': 'ukr',
  2191. 'ur': 'urd',
  2192. 'uz': 'uzb',
  2193. 've': 'ven',
  2194. 'vi': 'vie',
  2195. 'vo': 'vol',
  2196. 'wa': 'wln',
  2197. 'wo': 'wol',
  2198. 'xh': 'xho',
  2199. 'yi': 'yid',
  2200. 'yo': 'yor',
  2201. 'za': 'zha',
  2202. 'zh': 'zho',
  2203. 'zu': 'zul',
  2204. }
  2205. @classmethod
  2206. def short2long(cls, code):
  2207. """Convert language code from ISO 639-1 to ISO 639-2/T"""
  2208. return cls._lang_map.get(code[:2])
  2209. @classmethod
  2210. def long2short(cls, code):
  2211. """Convert language code from ISO 639-2/T to ISO 639-1"""
  2212. for short_name, long_name in cls._lang_map.items():
  2213. if long_name == code:
  2214. return short_name
  2215. class ISO3166Utils(object):
  2216. # From http://data.okfn.org/data/core/country-list
  2217. _country_map = {
  2218. 'AF': 'Afghanistan',
  2219. 'AX': 'Åland Islands',
  2220. 'AL': 'Albania',
  2221. 'DZ': 'Algeria',
  2222. 'AS': 'American Samoa',
  2223. 'AD': 'Andorra',
  2224. 'AO': 'Angola',
  2225. 'AI': 'Anguilla',
  2226. 'AQ': 'Antarctica',
  2227. 'AG': 'Antigua and Barbuda',
  2228. 'AR': 'Argentina',
  2229. 'AM': 'Armenia',
  2230. 'AW': 'Aruba',
  2231. 'AU': 'Australia',
  2232. 'AT': 'Austria',
  2233. 'AZ': 'Azerbaijan',
  2234. 'BS': 'Bahamas',
  2235. 'BH': 'Bahrain',
  2236. 'BD': 'Bangladesh',
  2237. 'BB': 'Barbados',
  2238. 'BY': 'Belarus',
  2239. 'BE': 'Belgium',
  2240. 'BZ': 'Belize',
  2241. 'BJ': 'Benin',
  2242. 'BM': 'Bermuda',
  2243. 'BT': 'Bhutan',
  2244. 'BO': 'Bolivia, Plurinational State of',
  2245. 'BQ': 'Bonaire, Sint Eustatius and Saba',
  2246. 'BA': 'Bosnia and Herzegovina',
  2247. 'BW': 'Botswana',
  2248. 'BV': 'Bouvet Island',
  2249. 'BR': 'Brazil',
  2250. 'IO': 'British Indian Ocean Territory',
  2251. 'BN': 'Brunei Darussalam',
  2252. 'BG': 'Bulgaria',
  2253. 'BF': 'Burkina Faso',
  2254. 'BI': 'Burundi',
  2255. 'KH': 'Cambodia',
  2256. 'CM': 'Cameroon',
  2257. 'CA': 'Canada',
  2258. 'CV': 'Cape Verde',
  2259. 'KY': 'Cayman Islands',
  2260. 'CF': 'Central African Republic',
  2261. 'TD': 'Chad',
  2262. 'CL': 'Chile',
  2263. 'CN': 'China',
  2264. 'CX': 'Christmas Island',
  2265. 'CC': 'Cocos (Keeling) Islands',
  2266. 'CO': 'Colombia',
  2267. 'KM': 'Comoros',
  2268. 'CG': 'Congo',
  2269. 'CD': 'Congo, the Democratic Republic of the',
  2270. 'CK': 'Cook Islands',
  2271. 'CR': 'Costa Rica',
  2272. 'CI': 'Côte d\'Ivoire',
  2273. 'HR': 'Croatia',
  2274. 'CU': 'Cuba',
  2275. 'CW': 'Curaçao',
  2276. 'CY': 'Cyprus',
  2277. 'CZ': 'Czech Republic',
  2278. 'DK': 'Denmark',
  2279. 'DJ': 'Djibouti',
  2280. 'DM': 'Dominica',
  2281. 'DO': 'Dominican Republic',
  2282. 'EC': 'Ecuador',
  2283. 'EG': 'Egypt',
  2284. 'SV': 'El Salvador',
  2285. 'GQ': 'Equatorial Guinea',
  2286. 'ER': 'Eritrea',
  2287. 'EE': 'Estonia',
  2288. 'ET': 'Ethiopia',
  2289. 'FK': 'Falkland Islands (Malvinas)',
  2290. 'FO': 'Faroe Islands',
  2291. 'FJ': 'Fiji',
  2292. 'FI': 'Finland',
  2293. 'FR': 'France',
  2294. 'GF': 'French Guiana',
  2295. 'PF': 'French Polynesia',
  2296. 'TF': 'French Southern Territories',
  2297. 'GA': 'Gabon',
  2298. 'GM': 'Gambia',
  2299. 'GE': 'Georgia',
  2300. 'DE': 'Germany',
  2301. 'GH': 'Ghana',
  2302. 'GI': 'Gibraltar',
  2303. 'GR': 'Greece',
  2304. 'GL': 'Greenland',
  2305. 'GD': 'Grenada',
  2306. 'GP': 'Guadeloupe',
  2307. 'GU': 'Guam',
  2308. 'GT': 'Guatemala',
  2309. 'GG': 'Guernsey',
  2310. 'GN': 'Guinea',
  2311. 'GW': 'Guinea-Bissau',
  2312. 'GY': 'Guyana',
  2313. 'HT': 'Haiti',
  2314. 'HM': 'Heard Island and McDonald Islands',
  2315. 'VA': 'Holy See (Vatican City State)',
  2316. 'HN': 'Honduras',
  2317. 'HK': 'Hong Kong',
  2318. 'HU': 'Hungary',
  2319. 'IS': 'Iceland',
  2320. 'IN': 'India',
  2321. 'ID': 'Indonesia',
  2322. 'IR': 'Iran, Islamic Republic of',
  2323. 'IQ': 'Iraq',
  2324. 'IE': 'Ireland',
  2325. 'IM': 'Isle of Man',
  2326. 'IL': 'Israel',
  2327. 'IT': 'Italy',
  2328. 'JM': 'Jamaica',
  2329. 'JP': 'Japan',
  2330. 'JE': 'Jersey',
  2331. 'JO': 'Jordan',
  2332. 'KZ': 'Kazakhstan',
  2333. 'KE': 'Kenya',
  2334. 'KI': 'Kiribati',
  2335. 'KP': 'Korea, Democratic People\'s Republic of',
  2336. 'KR': 'Korea, Republic of',
  2337. 'KW': 'Kuwait',
  2338. 'KG': 'Kyrgyzstan',
  2339. 'LA': 'Lao People\'s Democratic Republic',
  2340. 'LV': 'Latvia',
  2341. 'LB': 'Lebanon',
  2342. 'LS': 'Lesotho',
  2343. 'LR': 'Liberia',
  2344. 'LY': 'Libya',
  2345. 'LI': 'Liechtenstein',
  2346. 'LT': 'Lithuania',
  2347. 'LU': 'Luxembourg',
  2348. 'MO': 'Macao',
  2349. 'MK': 'Macedonia, the Former Yugoslav Republic of',
  2350. 'MG': 'Madagascar',
  2351. 'MW': 'Malawi',
  2352. 'MY': 'Malaysia',
  2353. 'MV': 'Maldives',
  2354. 'ML': 'Mali',
  2355. 'MT': 'Malta',
  2356. 'MH': 'Marshall Islands',
  2357. 'MQ': 'Martinique',
  2358. 'MR': 'Mauritania',
  2359. 'MU': 'Mauritius',
  2360. 'YT': 'Mayotte',
  2361. 'MX': 'Mexico',
  2362. 'FM': 'Micronesia, Federated States of',
  2363. 'MD': 'Moldova, Republic of',
  2364. 'MC': 'Monaco',
  2365. 'MN': 'Mongolia',
  2366. 'ME': 'Montenegro',
  2367. 'MS': 'Montserrat',
  2368. 'MA': 'Morocco',
  2369. 'MZ': 'Mozambique',
  2370. 'MM': 'Myanmar',
  2371. 'NA': 'Namibia',
  2372. 'NR': 'Nauru',
  2373. 'NP': 'Nepal',
  2374. 'NL': 'Netherlands',
  2375. 'NC': 'New Caledonia',
  2376. 'NZ': 'New Zealand',
  2377. 'NI': 'Nicaragua',
  2378. 'NE': 'Niger',
  2379. 'NG': 'Nigeria',
  2380. 'NU': 'Niue',
  2381. 'NF': 'Norfolk Island',
  2382. 'MP': 'Northern Mariana Islands',
  2383. 'NO': 'Norway',
  2384. 'OM': 'Oman',
  2385. 'PK': 'Pakistan',
  2386. 'PW': 'Palau',
  2387. 'PS': 'Palestine, State of',
  2388. 'PA': 'Panama',
  2389. 'PG': 'Papua New Guinea',
  2390. 'PY': 'Paraguay',
  2391. 'PE': 'Peru',
  2392. 'PH': 'Philippines',
  2393. 'PN': 'Pitcairn',
  2394. 'PL': 'Poland',
  2395. 'PT': 'Portugal',
  2396. 'PR': 'Puerto Rico',
  2397. 'QA': 'Qatar',
  2398. 'RE': 'Réunion',
  2399. 'RO': 'Romania',
  2400. 'RU': 'Russian Federation',
  2401. 'RW': 'Rwanda',
  2402. 'BL': 'Saint Barthélemy',
  2403. 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
  2404. 'KN': 'Saint Kitts and Nevis',
  2405. 'LC': 'Saint Lucia',
  2406. 'MF': 'Saint Martin (French part)',
  2407. 'PM': 'Saint Pierre and Miquelon',
  2408. 'VC': 'Saint Vincent and the Grenadines',
  2409. 'WS': 'Samoa',
  2410. 'SM': 'San Marino',
  2411. 'ST': 'Sao Tome and Principe',
  2412. 'SA': 'Saudi Arabia',
  2413. 'SN': 'Senegal',
  2414. 'RS': 'Serbia',
  2415. 'SC': 'Seychelles',
  2416. 'SL': 'Sierra Leone',
  2417. 'SG': 'Singapore',
  2418. 'SX': 'Sint Maarten (Dutch part)',
  2419. 'SK': 'Slovakia',
  2420. 'SI': 'Slovenia',
  2421. 'SB': 'Solomon Islands',
  2422. 'SO': 'Somalia',
  2423. 'ZA': 'South Africa',
  2424. 'GS': 'South Georgia and the South Sandwich Islands',
  2425. 'SS': 'South Sudan',
  2426. 'ES': 'Spain',
  2427. 'LK': 'Sri Lanka',
  2428. 'SD': 'Sudan',
  2429. 'SR': 'Suriname',
  2430. 'SJ': 'Svalbard and Jan Mayen',
  2431. 'SZ': 'Swaziland',
  2432. 'SE': 'Sweden',
  2433. 'CH': 'Switzerland',
  2434. 'SY': 'Syrian Arab Republic',
  2435. 'TW': 'Taiwan, Province of China',
  2436. 'TJ': 'Tajikistan',
  2437. 'TZ': 'Tanzania, United Republic of',
  2438. 'TH': 'Thailand',
  2439. 'TL': 'Timor-Leste',
  2440. 'TG': 'Togo',
  2441. 'TK': 'Tokelau',
  2442. 'TO': 'Tonga',
  2443. 'TT': 'Trinidad and Tobago',
  2444. 'TN': 'Tunisia',
  2445. 'TR': 'Turkey',
  2446. 'TM': 'Turkmenistan',
  2447. 'TC': 'Turks and Caicos Islands',
  2448. 'TV': 'Tuvalu',
  2449. 'UG': 'Uganda',
  2450. 'UA': 'Ukraine',
  2451. 'AE': 'United Arab Emirates',
  2452. 'GB': 'United Kingdom',
  2453. 'US': 'United States',
  2454. 'UM': 'United States Minor Outlying Islands',
  2455. 'UY': 'Uruguay',
  2456. 'UZ': 'Uzbekistan',
  2457. 'VU': 'Vanuatu',
  2458. 'VE': 'Venezuela, Bolivarian Republic of',
  2459. 'VN': 'Viet Nam',
  2460. 'VG': 'Virgin Islands, British',
  2461. 'VI': 'Virgin Islands, U.S.',
  2462. 'WF': 'Wallis and Futuna',
  2463. 'EH': 'Western Sahara',
  2464. 'YE': 'Yemen',
  2465. 'ZM': 'Zambia',
  2466. 'ZW': 'Zimbabwe',
  2467. }
  2468. @classmethod
  2469. def short2full(cls, code):
  2470. """Convert an ISO 3166-2 country code to the corresponding full name"""
  2471. return cls._country_map.get(code.upper())
  2472. class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
  2473. def __init__(self, proxies=None):
  2474. # Set default handlers
  2475. for type in ('http', 'https'):
  2476. setattr(self, '%s_open' % type,
  2477. lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
  2478. meth(r, proxy, type))
  2479. return compat_urllib_request.ProxyHandler.__init__(self, proxies)
  2480. def proxy_open(self, req, proxy, type):
  2481. req_proxy = req.headers.get('Ytdl-request-proxy')
  2482. if req_proxy is not None:
  2483. proxy = req_proxy
  2484. del req.headers['Ytdl-request-proxy']
  2485. if proxy == '__noproxy__':
  2486. return None # No Proxy
  2487. if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
  2488. req.add_header('Ytdl-socks-proxy', proxy)
  2489. # youtube-dl's http/https handlers do wrapping the socket with socks
  2490. return None
  2491. return compat_urllib_request.ProxyHandler.proxy_open(
  2492. self, req, proxy, type)
  2493. def ohdave_rsa_encrypt(data, exponent, modulus):
  2494. '''
  2495. Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
  2496. Input:
  2497. data: data to encrypt, bytes-like object
  2498. exponent, modulus: parameter e and N of RSA algorithm, both integer
  2499. Output: hex string of encrypted data
  2500. Limitation: supports one block encryption only
  2501. '''
  2502. payload = int(binascii.hexlify(data[::-1]), 16)
  2503. encrypted = pow(payload, exponent, modulus)
  2504. return '%x' % encrypted
  2505. def encode_base_n(num, n, table=None):
  2506. FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
  2507. if not table:
  2508. table = FULL_TABLE[:n]
  2509. if n > len(table):
  2510. raise ValueError('base %d exceeds table length %d' % (n, len(table)))
  2511. if num == 0:
  2512. return table[0]
  2513. ret = ''
  2514. while num:
  2515. ret = table[num % n] + ret
  2516. num = num // n
  2517. return ret
  2518. def decode_packed_codes(code):
  2519. mobj = re.search(
  2520. r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)",
  2521. code)
  2522. obfucasted_code, base, count, symbols = mobj.groups()
  2523. base = int(base)
  2524. count = int(count)
  2525. symbols = symbols.split('|')
  2526. symbol_table = {}
  2527. while count:
  2528. count -= 1
  2529. base_n_count = encode_base_n(count, base)
  2530. symbol_table[base_n_count] = symbols[count] or base_n_count
  2531. return re.sub(
  2532. r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
  2533. obfucasted_code)
  2534. def parse_m3u8_attributes(attrib):
  2535. info = {}
  2536. for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
  2537. if val.startswith('"'):
  2538. val = val[1:-1]
  2539. info[key] = val
  2540. return info
  2541. def urshift(val, n):
  2542. return val >> n if val >= 0 else (val + 0x100000000) >> n
  2543. # Based on png2str() written by @gdkchan and improved by @yokrysty
  2544. # Originally posted at https://github.com/rg3/youtube-dl/issues/9706
  2545. def decode_png(png_data):
  2546. # Reference: https://www.w3.org/TR/PNG/
  2547. header = png_data[8:]
  2548. if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
  2549. raise IOError('Not a valid PNG file.')
  2550. int_map = {1: '>B', 2: '>H', 4: '>I'}
  2551. unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
  2552. chunks = []
  2553. while header:
  2554. length = unpack_integer(header[:4])
  2555. header = header[4:]
  2556. chunk_type = header[:4]
  2557. header = header[4:]
  2558. chunk_data = header[:length]
  2559. header = header[length:]
  2560. header = header[4:] # Skip CRC
  2561. chunks.append({
  2562. 'type': chunk_type,
  2563. 'length': length,
  2564. 'data': chunk_data
  2565. })
  2566. ihdr = chunks[0]['data']
  2567. width = unpack_integer(ihdr[:4])
  2568. height = unpack_integer(ihdr[4:8])
  2569. idat = b''
  2570. for chunk in chunks:
  2571. if chunk['type'] == b'IDAT':
  2572. idat += chunk['data']
  2573. if not idat:
  2574. raise IOError('Unable to read PNG data.')
  2575. decompressed_data = bytearray(zlib.decompress(idat))
  2576. stride = width * 3
  2577. pixels = []
  2578. def _get_pixel(idx):
  2579. x = idx % stride
  2580. y = idx // stride
  2581. return pixels[y][x]
  2582. for y in range(height):
  2583. basePos = y * (1 + stride)
  2584. filter_type = decompressed_data[basePos]
  2585. current_row = []
  2586. pixels.append(current_row)
  2587. for x in range(stride):
  2588. color = decompressed_data[1 + basePos + x]
  2589. basex = y * stride + x
  2590. left = 0
  2591. up = 0
  2592. if x > 2:
  2593. left = _get_pixel(basex - 3)
  2594. if y > 0:
  2595. up = _get_pixel(basex - stride)
  2596. if filter_type == 1: # Sub
  2597. color = (color + left) & 0xff
  2598. elif filter_type == 2: # Up
  2599. color = (color + up) & 0xff
  2600. elif filter_type == 3: # Average
  2601. color = (color + ((left + up) >> 1)) & 0xff
  2602. elif filter_type == 4: # Paeth
  2603. a = left
  2604. b = up
  2605. c = 0
  2606. if x > 2 and y > 0:
  2607. c = _get_pixel(basex - stride - 3)
  2608. p = a + b - c
  2609. pa = abs(p - a)
  2610. pb = abs(p - b)
  2611. pc = abs(p - c)
  2612. if pa <= pb and pa <= pc:
  2613. color = (color + a) & 0xff
  2614. elif pb <= pc:
  2615. color = (color + b) & 0xff
  2616. else:
  2617. color = (color + c) & 0xff
  2618. current_row.append(color)
  2619. return width, height, pixels
  2620. def write_xattr(path, key, value):
  2621. # This mess below finds the best xattr tool for the job
  2622. try:
  2623. # try the pyxattr module...
  2624. import xattr
  2625. # Unicode arguments are not supported in python-pyxattr until
  2626. # version 0.5.0
  2627. # See https://github.com/rg3/youtube-dl/issues/5498
  2628. pyxattr_required_version = '0.5.0'
  2629. if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
  2630. # TODO: fallback to CLI tools
  2631. raise XAttrUnavailableError(
  2632. 'python-pyxattr is detected but is too old. '
  2633. 'youtube-dl requires %s or above while your version is %s. '
  2634. 'Falling back to other xattr implementations' % (
  2635. pyxattr_required_version, xattr.__version__))
  2636. try:
  2637. xattr.set(path, key, value)
  2638. except EnvironmentError as e:
  2639. raise XAttrMetadataError(e.errno, e.strerror)
  2640. except ImportError:
  2641. if compat_os_name == 'nt':
  2642. # Write xattrs to NTFS Alternate Data Streams:
  2643. # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
  2644. assert ':' not in key
  2645. assert os.path.exists(path)
  2646. ads_fn = path + ':' + key
  2647. try:
  2648. with open(ads_fn, 'wb') as f:
  2649. f.write(value)
  2650. except EnvironmentError as e:
  2651. raise XAttrMetadataError(e.errno, e.strerror)
  2652. else:
  2653. user_has_setfattr = check_executable('setfattr', ['--version'])
  2654. user_has_xattr = check_executable('xattr', ['-h'])
  2655. if user_has_setfattr or user_has_xattr:
  2656. value = value.decode('utf-8')
  2657. if user_has_setfattr:
  2658. executable = 'setfattr'
  2659. opts = ['-n', key, '-v', value]
  2660. elif user_has_xattr:
  2661. executable = 'xattr'
  2662. opts = ['-w', key, value]
  2663. cmd = ([encodeFilename(executable, True)] +
  2664. [encodeArgument(o) for o in opts] +
  2665. [encodeFilename(path, True)])
  2666. try:
  2667. p = subprocess.Popen(
  2668. cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
  2669. except EnvironmentError as e:
  2670. raise XAttrMetadataError(e.errno, e.strerror)
  2671. stdout, stderr = p.communicate()
  2672. stderr = stderr.decode('utf-8', 'replace')
  2673. if p.returncode != 0:
  2674. raise XAttrMetadataError(p.returncode, stderr)
  2675. else:
  2676. # On Unix, and can't find pyxattr, setfattr, or xattr.
  2677. if sys.platform.startswith('linux'):
  2678. raise XAttrUnavailableError(
  2679. "Couldn't find a tool to set the xattrs. "
  2680. "Install either the python 'pyxattr' or 'xattr' "
  2681. "modules, or the GNU 'attr' package "
  2682. "(which contains the 'setfattr' tool).")
  2683. else:
  2684. raise XAttrUnavailableError(
  2685. "Couldn't find a tool to set the xattrs. "
  2686. "Install either the python 'xattr' module, "
  2687. "or the 'xattr' binary.")