You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

629 lines
21 KiB

10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
9 years ago
10 years ago
10 years ago
10 years ago
  1. from __future__ import unicode_literals
  2. import binascii
  3. import collections
  4. import email
  5. import getpass
  6. import io
  7. import optparse
  8. import os
  9. import re
  10. import shlex
  11. import shutil
  12. import socket
  13. import subprocess
  14. import sys
  15. import itertools
  16. import xml.etree.ElementTree
  17. try:
  18. import urllib.request as compat_urllib_request
  19. except ImportError: # Python 2
  20. import urllib2 as compat_urllib_request
  21. try:
  22. import urllib.error as compat_urllib_error
  23. except ImportError: # Python 2
  24. import urllib2 as compat_urllib_error
  25. try:
  26. import urllib.parse as compat_urllib_parse
  27. except ImportError: # Python 2
  28. import urllib as compat_urllib_parse
  29. try:
  30. from urllib.parse import urlparse as compat_urllib_parse_urlparse
  31. except ImportError: # Python 2
  32. from urlparse import urlparse as compat_urllib_parse_urlparse
  33. try:
  34. import urllib.parse as compat_urlparse
  35. except ImportError: # Python 2
  36. import urlparse as compat_urlparse
  37. try:
  38. import urllib.response as compat_urllib_response
  39. except ImportError: # Python 2
  40. import urllib as compat_urllib_response
  41. try:
  42. import http.cookiejar as compat_cookiejar
  43. except ImportError: # Python 2
  44. import cookielib as compat_cookiejar
  45. try:
  46. import http.cookies as compat_cookies
  47. except ImportError: # Python 2
  48. import Cookie as compat_cookies
  49. try:
  50. import html.entities as compat_html_entities
  51. except ImportError: # Python 2
  52. import htmlentitydefs as compat_html_entities
  53. try:
  54. import http.client as compat_http_client
  55. except ImportError: # Python 2
  56. import httplib as compat_http_client
  57. try:
  58. from urllib.error import HTTPError as compat_HTTPError
  59. except ImportError: # Python 2
  60. from urllib2 import HTTPError as compat_HTTPError
  61. try:
  62. from urllib.request import urlretrieve as compat_urlretrieve
  63. except ImportError: # Python 2
  64. from urllib import urlretrieve as compat_urlretrieve
  65. try:
  66. from html.parser import HTMLParser as compat_HTMLParser
  67. except ImportError: # Python 2
  68. from HTMLParser import HTMLParser as compat_HTMLParser
  69. try:
  70. from subprocess import DEVNULL
  71. compat_subprocess_get_DEVNULL = lambda: DEVNULL
  72. except ImportError:
  73. compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
  74. try:
  75. import http.server as compat_http_server
  76. except ImportError:
  77. import BaseHTTPServer as compat_http_server
  78. try:
  79. compat_str = unicode # Python 2
  80. except NameError:
  81. compat_str = str
  82. try:
  83. from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
  84. from urllib.parse import unquote as compat_urllib_parse_unquote
  85. from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
  86. except ImportError: # Python 2
  87. _asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
  88. else re.compile('([\x00-\x7f]+)'))
  89. # HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
  90. # implementations from cpython 3.4.3's stdlib. Python 2's version
  91. # is apparently broken (see https://github.com/rg3/youtube-dl/pull/6244)
  92. def compat_urllib_parse_unquote_to_bytes(string):
  93. """unquote_to_bytes('abc%20def') -> b'abc def'."""
  94. # Note: strings are encoded as UTF-8. This is only an issue if it contains
  95. # unescaped non-ASCII characters, which URIs should not.
  96. if not string:
  97. # Is it a string-like object?
  98. string.split
  99. return b''
  100. if isinstance(string, compat_str):
  101. string = string.encode('utf-8')
  102. bits = string.split(b'%')
  103. if len(bits) == 1:
  104. return string
  105. res = [bits[0]]
  106. append = res.append
  107. for item in bits[1:]:
  108. try:
  109. append(compat_urllib_parse._hextochr[item[:2]])
  110. append(item[2:])
  111. except KeyError:
  112. append(b'%')
  113. append(item)
  114. return b''.join(res)
  115. def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
  116. """Replace %xx escapes by their single-character equivalent. The optional
  117. encoding and errors parameters specify how to decode percent-encoded
  118. sequences into Unicode characters, as accepted by the bytes.decode()
  119. method.
  120. By default, percent-encoded sequences are decoded with UTF-8, and invalid
  121. sequences are replaced by a placeholder character.
  122. unquote('abc%20def') -> 'abc def'.
  123. """
  124. if '%' not in string:
  125. string.split
  126. return string
  127. if encoding is None:
  128. encoding = 'utf-8'
  129. if errors is None:
  130. errors = 'replace'
  131. bits = _asciire.split(string)
  132. res = [bits[0]]
  133. append = res.append
  134. for i in range(1, len(bits), 2):
  135. append(compat_urllib_parse_unquote_to_bytes(bits[i]).decode(encoding, errors))
  136. append(bits[i + 1])
  137. return ''.join(res)
  138. def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace'):
  139. """Like unquote(), but also replace plus signs by spaces, as required for
  140. unquoting HTML form values.
  141. unquote_plus('%7e/abc+def') -> '~/abc def'
  142. """
  143. string = string.replace('+', ' ')
  144. return compat_urllib_parse_unquote(string, encoding, errors)
  145. try:
  146. from urllib.parse import urlencode as compat_urllib_parse_urlencode
  147. except ImportError: # Python 2
  148. # Python 2 will choke in urlencode on mixture of byte and unicode strings.
  149. # Possible solutions are to either port it from python 3 with all
  150. # the friends or manually ensure input query contains only byte strings.
  151. # We will stick with latter thus recursively encoding the whole query.
  152. def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
  153. def encode_elem(e):
  154. if isinstance(e, dict):
  155. e = encode_dict(e)
  156. elif isinstance(e, (list, tuple,)):
  157. list_e = encode_list(e)
  158. e = tuple(list_e) if isinstance(e, tuple) else list_e
  159. elif isinstance(e, compat_str):
  160. e = e.encode(encoding)
  161. return e
  162. def encode_dict(d):
  163. return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
  164. def encode_list(l):
  165. return [encode_elem(e) for e in l]
  166. return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
  167. try:
  168. from urllib.request import DataHandler as compat_urllib_request_DataHandler
  169. except ImportError: # Python < 3.4
  170. # Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
  171. class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
  172. def data_open(self, req):
  173. # data URLs as specified in RFC 2397.
  174. #
  175. # ignores POSTed data
  176. #
  177. # syntax:
  178. # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
  179. # mediatype := [ type "/" subtype ] *( ";" parameter )
  180. # data := *urlchar
  181. # parameter := attribute "=" value
  182. url = req.get_full_url()
  183. scheme, data = url.split(':', 1)
  184. mediatype, data = data.split(',', 1)
  185. # even base64 encoded data URLs might be quoted so unquote in any case:
  186. data = compat_urllib_parse_unquote_to_bytes(data)
  187. if mediatype.endswith(';base64'):
  188. data = binascii.a2b_base64(data)
  189. mediatype = mediatype[:-7]
  190. if not mediatype:
  191. mediatype = 'text/plain;charset=US-ASCII'
  192. headers = email.message_from_string(
  193. 'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
  194. return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
  195. try:
  196. compat_basestring = basestring # Python 2
  197. except NameError:
  198. compat_basestring = str
  199. try:
  200. compat_chr = unichr # Python 2
  201. except NameError:
  202. compat_chr = chr
  203. try:
  204. from xml.etree.ElementTree import ParseError as compat_xml_parse_error
  205. except ImportError: # Python 2.6
  206. from xml.parsers.expat import ExpatError as compat_xml_parse_error
  207. if sys.version_info[0] >= 3:
  208. compat_etree_fromstring = xml.etree.ElementTree.fromstring
  209. else:
  210. # python 2.x tries to encode unicode strings with ascii (see the
  211. # XMLParser._fixtext method)
  212. etree = xml.etree.ElementTree
  213. try:
  214. _etree_iter = etree.Element.iter
  215. except AttributeError: # Python <=2.6
  216. def _etree_iter(root):
  217. for el in root.findall('*'):
  218. yield el
  219. for sub in _etree_iter(el):
  220. yield sub
  221. # on 2.6 XML doesn't have a parser argument, function copied from CPython
  222. # 2.7 source
  223. def _XML(text, parser=None):
  224. if not parser:
  225. parser = etree.XMLParser(target=etree.TreeBuilder())
  226. parser.feed(text)
  227. return parser.close()
  228. def _element_factory(*args, **kwargs):
  229. el = etree.Element(*args, **kwargs)
  230. for k, v in el.items():
  231. if isinstance(v, bytes):
  232. el.set(k, v.decode('utf-8'))
  233. return el
  234. def compat_etree_fromstring(text):
  235. doc = _XML(text, parser=etree.XMLParser(target=etree.TreeBuilder(element_factory=_element_factory)))
  236. for el in _etree_iter(doc):
  237. if el.text is not None and isinstance(el.text, bytes):
  238. el.text = el.text.decode('utf-8')
  239. return doc
  240. if sys.version_info < (2, 7):
  241. # Here comes the crazy part: In 2.6, if the xpath is a unicode,
  242. # .//node does not match if a node is a direct child of . !
  243. def compat_xpath(xpath):
  244. if isinstance(xpath, compat_str):
  245. xpath = xpath.encode('ascii')
  246. return xpath
  247. else:
  248. compat_xpath = lambda xpath: xpath
  249. try:
  250. from urllib.parse import parse_qs as compat_parse_qs
  251. except ImportError: # Python 2
  252. # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
  253. # Python 2's version is apparently totally broken
  254. def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
  255. encoding='utf-8', errors='replace'):
  256. qs, _coerce_result = qs, compat_str
  257. pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
  258. r = []
  259. for name_value in pairs:
  260. if not name_value and not strict_parsing:
  261. continue
  262. nv = name_value.split('=', 1)
  263. if len(nv) != 2:
  264. if strict_parsing:
  265. raise ValueError('bad query field: %r' % (name_value,))
  266. # Handle case of a control-name with no equal sign
  267. if keep_blank_values:
  268. nv.append('')
  269. else:
  270. continue
  271. if len(nv[1]) or keep_blank_values:
  272. name = nv[0].replace('+', ' ')
  273. name = compat_urllib_parse_unquote(
  274. name, encoding=encoding, errors=errors)
  275. name = _coerce_result(name)
  276. value = nv[1].replace('+', ' ')
  277. value = compat_urllib_parse_unquote(
  278. value, encoding=encoding, errors=errors)
  279. value = _coerce_result(value)
  280. r.append((name, value))
  281. return r
  282. def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
  283. encoding='utf-8', errors='replace'):
  284. parsed_result = {}
  285. pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
  286. encoding=encoding, errors=errors)
  287. for name, value in pairs:
  288. if name in parsed_result:
  289. parsed_result[name].append(value)
  290. else:
  291. parsed_result[name] = [value]
  292. return parsed_result
  293. try:
  294. from shlex import quote as shlex_quote
  295. except ImportError: # Python < 3.3
  296. def shlex_quote(s):
  297. if re.match(r'^[-_\w./]+$', s):
  298. return s
  299. else:
  300. return "'" + s.replace("'", "'\"'\"'") + "'"
  301. if sys.version_info >= (2, 7, 3):
  302. compat_shlex_split = shlex.split
  303. else:
  304. # Working around shlex issue with unicode strings on some python 2
  305. # versions (see http://bugs.python.org/issue1548891)
  306. def compat_shlex_split(s, comments=False, posix=True):
  307. if isinstance(s, compat_str):
  308. s = s.encode('utf-8')
  309. return shlex.split(s, comments, posix)
  310. def compat_ord(c):
  311. if type(c) is int:
  312. return c
  313. else:
  314. return ord(c)
  315. compat_os_name = os._name if os.name == 'java' else os.name
  316. if sys.version_info >= (3, 0):
  317. compat_getenv = os.getenv
  318. compat_expanduser = os.path.expanduser
  319. else:
  320. # Environment variables should be decoded with filesystem encoding.
  321. # Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
  322. def compat_getenv(key, default=None):
  323. from .utils import get_filesystem_encoding
  324. env = os.getenv(key, default)
  325. if env:
  326. env = env.decode(get_filesystem_encoding())
  327. return env
  328. # HACK: The default implementations of os.path.expanduser from cpython do not decode
  329. # environment variables with filesystem encoding. We will work around this by
  330. # providing adjusted implementations.
  331. # The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
  332. # for different platforms with correct environment variables decoding.
  333. if compat_os_name == 'posix':
  334. def compat_expanduser(path):
  335. """Expand ~ and ~user constructions. If user or $HOME is unknown,
  336. do nothing."""
  337. if not path.startswith('~'):
  338. return path
  339. i = path.find('/', 1)
  340. if i < 0:
  341. i = len(path)
  342. if i == 1:
  343. if 'HOME' not in os.environ:
  344. import pwd
  345. userhome = pwd.getpwuid(os.getuid()).pw_dir
  346. else:
  347. userhome = compat_getenv('HOME')
  348. else:
  349. import pwd
  350. try:
  351. pwent = pwd.getpwnam(path[1:i])
  352. except KeyError:
  353. return path
  354. userhome = pwent.pw_dir
  355. userhome = userhome.rstrip('/')
  356. return (userhome + path[i:]) or '/'
  357. elif compat_os_name == 'nt' or compat_os_name == 'ce':
  358. def compat_expanduser(path):
  359. """Expand ~ and ~user constructs.
  360. If user or $HOME is unknown, do nothing."""
  361. if path[:1] != '~':
  362. return path
  363. i, n = 1, len(path)
  364. while i < n and path[i] not in '/\\':
  365. i = i + 1
  366. if 'HOME' in os.environ:
  367. userhome = compat_getenv('HOME')
  368. elif 'USERPROFILE' in os.environ:
  369. userhome = compat_getenv('USERPROFILE')
  370. elif 'HOMEPATH' not in os.environ:
  371. return path
  372. else:
  373. try:
  374. drive = compat_getenv('HOMEDRIVE')
  375. except KeyError:
  376. drive = ''
  377. userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
  378. if i != 1: # ~user
  379. userhome = os.path.join(os.path.dirname(userhome), path[1:i])
  380. return userhome + path[i:]
  381. else:
  382. compat_expanduser = os.path.expanduser
  383. if sys.version_info < (3, 0):
  384. def compat_print(s):
  385. from .utils import preferredencoding
  386. print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
  387. else:
  388. def compat_print(s):
  389. assert isinstance(s, compat_str)
  390. print(s)
  391. try:
  392. subprocess_check_output = subprocess.check_output
  393. except AttributeError:
  394. def subprocess_check_output(*args, **kwargs):
  395. assert 'input' not in kwargs
  396. p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
  397. output, _ = p.communicate()
  398. ret = p.poll()
  399. if ret:
  400. raise subprocess.CalledProcessError(ret, p.args, output=output)
  401. return output
  402. if sys.version_info < (3, 0) and sys.platform == 'win32':
  403. def compat_getpass(prompt, *args, **kwargs):
  404. if isinstance(prompt, compat_str):
  405. from .utils import preferredencoding
  406. prompt = prompt.encode(preferredencoding())
  407. return getpass.getpass(prompt, *args, **kwargs)
  408. else:
  409. compat_getpass = getpass.getpass
  410. # Python < 2.6.5 require kwargs to be bytes
  411. try:
  412. def _testfunc(x):
  413. pass
  414. _testfunc(**{'x': 0})
  415. except TypeError:
  416. def compat_kwargs(kwargs):
  417. return dict((bytes(k), v) for k, v in kwargs.items())
  418. else:
  419. compat_kwargs = lambda kwargs: kwargs
  420. if sys.version_info < (2, 7):
  421. def compat_socket_create_connection(address, timeout, source_address=None):
  422. host, port = address
  423. err = None
  424. for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
  425. af, socktype, proto, canonname, sa = res
  426. sock = None
  427. try:
  428. sock = socket.socket(af, socktype, proto)
  429. sock.settimeout(timeout)
  430. if source_address:
  431. sock.bind(source_address)
  432. sock.connect(sa)
  433. return sock
  434. except socket.error as _:
  435. err = _
  436. if sock is not None:
  437. sock.close()
  438. if err is not None:
  439. raise err
  440. else:
  441. raise socket.error('getaddrinfo returns an empty list')
  442. else:
  443. compat_socket_create_connection = socket.create_connection
  444. # Fix https://github.com/rg3/youtube-dl/issues/4223
  445. # See http://bugs.python.org/issue9161 for what is broken
  446. def workaround_optparse_bug9161():
  447. op = optparse.OptionParser()
  448. og = optparse.OptionGroup(op, 'foo')
  449. try:
  450. og.add_option('-t')
  451. except TypeError:
  452. real_add_option = optparse.OptionGroup.add_option
  453. def _compat_add_option(self, *args, **kwargs):
  454. enc = lambda v: (
  455. v.encode('ascii', 'replace') if isinstance(v, compat_str)
  456. else v)
  457. bargs = [enc(a) for a in args]
  458. bkwargs = dict(
  459. (k, enc(v)) for k, v in kwargs.items())
  460. return real_add_option(self, *bargs, **bkwargs)
  461. optparse.OptionGroup.add_option = _compat_add_option
  462. if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
  463. compat_get_terminal_size = shutil.get_terminal_size
  464. else:
  465. _terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
  466. def compat_get_terminal_size(fallback=(80, 24)):
  467. columns = compat_getenv('COLUMNS')
  468. if columns:
  469. columns = int(columns)
  470. else:
  471. columns = None
  472. lines = compat_getenv('LINES')
  473. if lines:
  474. lines = int(lines)
  475. else:
  476. lines = None
  477. if columns is None or lines is None or columns <= 0 or lines <= 0:
  478. try:
  479. sp = subprocess.Popen(
  480. ['stty', 'size'],
  481. stdout=subprocess.PIPE, stderr=subprocess.PIPE)
  482. out, err = sp.communicate()
  483. _lines, _columns = map(int, out.split())
  484. except Exception:
  485. _columns, _lines = _terminal_size(*fallback)
  486. if columns is None or columns <= 0:
  487. columns = _columns
  488. if lines is None or lines <= 0:
  489. lines = _lines
  490. return _terminal_size(columns, lines)
  491. try:
  492. itertools.count(start=0, step=1)
  493. compat_itertools_count = itertools.count
  494. except TypeError: # Python 2.6
  495. def compat_itertools_count(start=0, step=1):
  496. n = start
  497. while True:
  498. yield n
  499. n += step
  500. if sys.version_info >= (3, 0):
  501. from tokenize import tokenize as compat_tokenize_tokenize
  502. else:
  503. from tokenize import generate_tokens as compat_tokenize_tokenize
  504. __all__ = [
  505. 'compat_HTMLParser',
  506. 'compat_HTTPError',
  507. 'compat_basestring',
  508. 'compat_chr',
  509. 'compat_cookiejar',
  510. 'compat_cookies',
  511. 'compat_etree_fromstring',
  512. 'compat_expanduser',
  513. 'compat_get_terminal_size',
  514. 'compat_getenv',
  515. 'compat_getpass',
  516. 'compat_html_entities',
  517. 'compat_http_client',
  518. 'compat_http_server',
  519. 'compat_itertools_count',
  520. 'compat_kwargs',
  521. 'compat_ord',
  522. 'compat_os_name',
  523. 'compat_parse_qs',
  524. 'compat_print',
  525. 'compat_shlex_split',
  526. 'compat_socket_create_connection',
  527. 'compat_str',
  528. 'compat_subprocess_get_DEVNULL',
  529. 'compat_tokenize_tokenize',
  530. 'compat_urllib_error',
  531. 'compat_urllib_parse',
  532. 'compat_urllib_parse_unquote',
  533. 'compat_urllib_parse_unquote_plus',
  534. 'compat_urllib_parse_unquote_to_bytes',
  535. 'compat_urllib_parse_urlencode',
  536. 'compat_urllib_parse_urlparse',
  537. 'compat_urllib_request',
  538. 'compat_urllib_request_DataHandler',
  539. 'compat_urllib_response',
  540. 'compat_urlparse',
  541. 'compat_urlretrieve',
  542. 'compat_xml_parse_error',
  543. 'compat_xpath',
  544. 'shlex_quote',
  545. 'subprocess_check_output',
  546. 'workaround_optparse_bug9161',
  547. ]