You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1907 lines
83 KiB

11 years ago
  1. # coding: utf-8
  2. import collections
  3. import errno
  4. import io
  5. import itertools
  6. import json
  7. import os.path
  8. import re
  9. import string
  10. import struct
  11. import traceback
  12. import zlib
  13. from .common import InfoExtractor, SearchInfoExtractor
  14. from .subtitles import SubtitlesInfoExtractor
  15. from ..utils import (
  16. compat_chr,
  17. compat_parse_qs,
  18. compat_urllib_parse,
  19. compat_urllib_request,
  20. compat_urlparse,
  21. compat_str,
  22. clean_html,
  23. get_cachedir,
  24. get_element_by_id,
  25. get_element_by_attribute,
  26. ExtractorError,
  27. int_or_none,
  28. PagedList,
  29. unescapeHTML,
  30. unified_strdate,
  31. orderedSet,
  32. write_json_file,
  33. uppercase_escape,
  34. )
  35. class YoutubeBaseInfoExtractor(InfoExtractor):
  36. """Provide base functions for Youtube extractors"""
  37. _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
  38. _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
  39. _AGE_URL = 'https://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
  40. _NETRC_MACHINE = 'youtube'
  41. # If True it will raise an error if no login info is provided
  42. _LOGIN_REQUIRED = False
  43. def _set_language(self):
  44. return bool(self._download_webpage(
  45. self._LANG_URL, None,
  46. note=u'Setting language', errnote='unable to set language',
  47. fatal=False))
  48. def _login(self):
  49. (username, password) = self._get_login_info()
  50. # No authentication to be performed
  51. if username is None:
  52. if self._LOGIN_REQUIRED:
  53. raise ExtractorError(u'No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  54. return False
  55. login_page = self._download_webpage(
  56. self._LOGIN_URL, None,
  57. note=u'Downloading login page',
  58. errnote=u'unable to fetch login page', fatal=False)
  59. if login_page is False:
  60. return
  61. galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
  62. login_page, u'Login GALX parameter')
  63. # Log in
  64. login_form_strs = {
  65. u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
  66. u'Email': username,
  67. u'GALX': galx,
  68. u'Passwd': password,
  69. u'PersistentCookie': u'yes',
  70. u'_utf8': u'',
  71. u'bgresponse': u'js_disabled',
  72. u'checkConnection': u'',
  73. u'checkedDomains': u'youtube',
  74. u'dnConn': u'',
  75. u'pstMsg': u'0',
  76. u'rmShown': u'1',
  77. u'secTok': u'',
  78. u'signIn': u'Sign in',
  79. u'timeStmp': u'',
  80. u'service': u'youtube',
  81. u'uilel': u'3',
  82. u'hl': u'en_US',
  83. }
  84. # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
  85. # chokes on unicode
  86. login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
  87. login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
  88. req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
  89. login_results = self._download_webpage(
  90. req, None,
  91. note=u'Logging in', errnote=u'unable to log in', fatal=False)
  92. if login_results is False:
  93. return False
  94. if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
  95. self._downloader.report_warning(u'unable to log in: bad username or password')
  96. return False
  97. return True
  98. def _confirm_age(self):
  99. age_form = {
  100. 'next_url': '/',
  101. 'action_confirm': 'Confirm',
  102. }
  103. req = compat_urllib_request.Request(self._AGE_URL,
  104. compat_urllib_parse.urlencode(age_form).encode('ascii'))
  105. self._download_webpage(
  106. req, None,
  107. note=u'Confirming age', errnote=u'Unable to confirm age')
  108. return True
  109. def _real_initialize(self):
  110. if self._downloader is None:
  111. return
  112. if not self._set_language():
  113. return
  114. if not self._login():
  115. return
  116. self._confirm_age()
  117. class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
  118. IE_DESC = u'YouTube.com'
  119. _VALID_URL = r"""(?x)^
  120. (
  121. (?:https?://|//)? # http(s):// or protocol-independent URL (optional)
  122. (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
  123. (?:www\.)?deturl\.com/www\.youtube\.com/|
  124. (?:www\.)?pwnyoutube\.com/|
  125. (?:www\.)?yourepeat\.com/|
  126. tube\.majestyc\.net/|
  127. youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
  128. (?:.*?\#/)? # handle anchor (#/) redirect urls
  129. (?: # the various things that can precede the ID:
  130. (?:(?:v|embed|e)/) # v/ or embed/ or e/
  131. |(?: # or the v= param in all its forms
  132. (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
  133. (?:\?|\#!?) # the params delimiter ? or # or #!
  134. (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
  135. v=
  136. )
  137. ))
  138. |youtu\.be/ # just youtu.be/xxxx
  139. )
  140. )? # all until now is optional -> you can pass the naked ID
  141. ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
  142. (?(1).+)? # if we found the ID, everything can follow
  143. $"""
  144. _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
  145. _formats = {
  146. '5': {'ext': 'flv', 'width': 400, 'height': 240},
  147. '6': {'ext': 'flv', 'width': 450, 'height': 270},
  148. '13': {'ext': '3gp'},
  149. '17': {'ext': '3gp', 'width': 176, 'height': 144},
  150. '18': {'ext': 'mp4', 'width': 640, 'height': 360},
  151. '22': {'ext': 'mp4', 'width': 1280, 'height': 720},
  152. '34': {'ext': 'flv', 'width': 640, 'height': 360},
  153. '35': {'ext': 'flv', 'width': 854, 'height': 480},
  154. '36': {'ext': '3gp', 'width': 320, 'height': 240},
  155. '37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
  156. '38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
  157. '43': {'ext': 'webm', 'width': 640, 'height': 360},
  158. '44': {'ext': 'webm', 'width': 854, 'height': 480},
  159. '45': {'ext': 'webm', 'width': 1280, 'height': 720},
  160. '46': {'ext': 'webm', 'width': 1920, 'height': 1080},
  161. # 3d videos
  162. '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
  163. '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
  164. '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
  165. '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
  166. '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
  167. '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
  168. '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
  169. # Apple HTTP Live Streaming
  170. '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
  171. '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
  172. '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
  173. '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
  174. '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
  175. '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
  176. '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
  177. # DASH mp4 video
  178. '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  179. '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  180. '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  181. '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  182. '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  183. '138': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  184. '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  185. '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  186. # Dash mp4 audio
  187. '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
  188. '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 128, 'preference': -50},
  189. '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50},
  190. # Dash webm
  191. '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40},
  192. '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40},
  193. '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40},
  194. '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40},
  195. '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40},
  196. '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40},
  197. '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH webm', 'preference': -40},
  198. '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH webm', 'preference': -40},
  199. '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH webm', 'preference': -40},
  200. '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH webm', 'preference': -40},
  201. '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH webm', 'preference': -40},
  202. '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH webm', 'preference': -40},
  203. '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH webm', 'preference': -40},
  204. # Dash webm audio
  205. '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH webm audio', 'abr': 48, 'preference': -50},
  206. '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH webm audio', 'abr': 256, 'preference': -50},
  207. # RTMP (unnamed)
  208. '_rtmp': {'protocol': 'rtmp'},
  209. }
  210. IE_NAME = u'youtube'
  211. _TESTS = [
  212. {
  213. u"url": u"http://www.youtube.com/watch?v=BaW_jenozKc",
  214. u"file": u"BaW_jenozKc.mp4",
  215. u"info_dict": {
  216. u"title": u"youtube-dl test video \"'/\\ä↭𝕐",
  217. u"uploader": u"Philipp Hagemeister",
  218. u"uploader_id": u"phihag",
  219. u"upload_date": u"20121002",
  220. u"description": u"test chars: \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ."
  221. }
  222. },
  223. {
  224. u"url": u"http://www.youtube.com/watch?v=UxxajLWwzqY",
  225. u"file": u"UxxajLWwzqY.mp4",
  226. u"note": u"Test generic use_cipher_signature video (#897)",
  227. u"info_dict": {
  228. u"upload_date": u"20120506",
  229. u"title": u"Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]",
  230. u"description": u"md5:5b292926389560516e384ac437c0ec07",
  231. u"uploader": u"Icona Pop",
  232. u"uploader_id": u"IconaPop"
  233. }
  234. },
  235. {
  236. u"url": u"https://www.youtube.com/watch?v=07FYdnEawAQ",
  237. u"file": u"07FYdnEawAQ.mp4",
  238. u"note": u"Test VEVO video with age protection (#956)",
  239. u"info_dict": {
  240. u"upload_date": u"20130703",
  241. u"title": u"Justin Timberlake - Tunnel Vision (Explicit)",
  242. u"description": u"md5:64249768eec3bc4276236606ea996373",
  243. u"uploader": u"justintimberlakeVEVO",
  244. u"uploader_id": u"justintimberlakeVEVO"
  245. }
  246. },
  247. {
  248. u"url": u"//www.YouTube.com/watch?v=yZIXLfi8CZQ",
  249. u"file": u"yZIXLfi8CZQ.mp4",
  250. u"note": u"Embed-only video (#1746)",
  251. u"info_dict": {
  252. u"upload_date": u"20120608",
  253. u"title": u"Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012",
  254. u"description": u"md5:09b78bd971f1e3e289601dfba15ca4f7",
  255. u"uploader": u"SET India",
  256. u"uploader_id": u"setindia"
  257. }
  258. },
  259. {
  260. u"url": u"http://www.youtube.com/watch?v=a9LDPn-MO4I",
  261. u"file": u"a9LDPn-MO4I.m4a",
  262. u"note": u"256k DASH audio (format 141) via DASH manifest",
  263. u"info_dict": {
  264. u"upload_date": "20121002",
  265. u"uploader_id": "8KVIDEO",
  266. u"description": "No description available.",
  267. u"uploader": "8KVIDEO",
  268. u"title": "UHDTV TEST 8K VIDEO.mp4"
  269. },
  270. u"params": {
  271. u"youtube_include_dash_manifest": True,
  272. u"format": "141",
  273. },
  274. },
  275. # DASH manifest with encrypted signature
  276. {
  277. u'url': u'https://www.youtube.com/watch?v=IB3lcPjvWLA',
  278. u'info_dict': {
  279. u'id': u'IB3lcPjvWLA',
  280. u'ext': u'm4a',
  281. u'title': u'Afrojack - The Spark ft. Spree Wilson',
  282. u'description': u'md5:3199ed45ee8836572865580804d7ac0f',
  283. u'uploader': u'AfrojackVEVO',
  284. u'uploader_id': u'AfrojackVEVO',
  285. u'upload_date': u'20131011',
  286. },
  287. u"params": {
  288. u'youtube_include_dash_manifest': True,
  289. u'format': '141',
  290. },
  291. },
  292. ]
  293. @classmethod
  294. def suitable(cls, url):
  295. """Receives a URL and returns True if suitable for this IE."""
  296. if YoutubePlaylistIE.suitable(url): return False
  297. return re.match(cls._VALID_URL, url) is not None
  298. def __init__(self, *args, **kwargs):
  299. super(YoutubeIE, self).__init__(*args, **kwargs)
  300. self._player_cache = {}
  301. def report_video_info_webpage_download(self, video_id):
  302. """Report attempt to download video info webpage."""
  303. self.to_screen(u'%s: Downloading video info webpage' % video_id)
  304. def report_information_extraction(self, video_id):
  305. """Report attempt to extract video information."""
  306. self.to_screen(u'%s: Extracting video information' % video_id)
  307. def report_unavailable_format(self, video_id, format):
  308. """Report extracted video URL."""
  309. self.to_screen(u'%s: Format %s not available' % (video_id, format))
  310. def report_rtmp_download(self):
  311. """Indicate the download will use the RTMP protocol."""
  312. self.to_screen(u'RTMP download detected')
  313. def _extract_signature_function(self, video_id, player_url, slen):
  314. id_m = re.match(r'.*-(?P<id>[a-zA-Z0-9_-]+)\.(?P<ext>[a-z]+)$',
  315. player_url)
  316. player_type = id_m.group('ext')
  317. player_id = id_m.group('id')
  318. # Read from filesystem cache
  319. func_id = '%s_%s_%d' % (player_type, player_id, slen)
  320. assert os.path.basename(func_id) == func_id
  321. cache_dir = get_cachedir(self._downloader.params)
  322. cache_enabled = cache_dir is not None
  323. if cache_enabled:
  324. cache_fn = os.path.join(os.path.expanduser(cache_dir),
  325. u'youtube-sigfuncs',
  326. func_id + '.json')
  327. try:
  328. with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
  329. cache_spec = json.load(cachef)
  330. return lambda s: u''.join(s[i] for i in cache_spec)
  331. except IOError:
  332. pass # No cache available
  333. if player_type == 'js':
  334. code = self._download_webpage(
  335. player_url, video_id,
  336. note=u'Downloading %s player %s' % (player_type, player_id),
  337. errnote=u'Download of %s failed' % player_url)
  338. res = self._parse_sig_js(code)
  339. elif player_type == 'swf':
  340. urlh = self._request_webpage(
  341. player_url, video_id,
  342. note=u'Downloading %s player %s' % (player_type, player_id),
  343. errnote=u'Download of %s failed' % player_url)
  344. code = urlh.read()
  345. res = self._parse_sig_swf(code)
  346. else:
  347. assert False, 'Invalid player type %r' % player_type
  348. if cache_enabled:
  349. try:
  350. test_string = u''.join(map(compat_chr, range(slen)))
  351. cache_res = res(test_string)
  352. cache_spec = [ord(c) for c in cache_res]
  353. try:
  354. os.makedirs(os.path.dirname(cache_fn))
  355. except OSError as ose:
  356. if ose.errno != errno.EEXIST:
  357. raise
  358. write_json_file(cache_spec, cache_fn)
  359. except Exception:
  360. tb = traceback.format_exc()
  361. self._downloader.report_warning(
  362. u'Writing cache to %r failed: %s' % (cache_fn, tb))
  363. return res
  364. def _print_sig_code(self, func, slen):
  365. def gen_sig_code(idxs):
  366. def _genslice(start, end, step):
  367. starts = u'' if start == 0 else str(start)
  368. ends = (u':%d' % (end+step)) if end + step >= 0 else u':'
  369. steps = u'' if step == 1 else (u':%d' % step)
  370. return u's[%s%s%s]' % (starts, ends, steps)
  371. step = None
  372. start = '(Never used)' # Quelch pyflakes warnings - start will be
  373. # set as soon as step is set
  374. for i, prev in zip(idxs[1:], idxs[:-1]):
  375. if step is not None:
  376. if i - prev == step:
  377. continue
  378. yield _genslice(start, prev, step)
  379. step = None
  380. continue
  381. if i - prev in [-1, 1]:
  382. step = i - prev
  383. start = prev
  384. continue
  385. else:
  386. yield u's[%d]' % prev
  387. if step is None:
  388. yield u's[%d]' % i
  389. else:
  390. yield _genslice(start, i, step)
  391. test_string = u''.join(map(compat_chr, range(slen)))
  392. cache_res = func(test_string)
  393. cache_spec = [ord(c) for c in cache_res]
  394. expr_code = u' + '.join(gen_sig_code(cache_spec))
  395. code = u'if len(s) == %d:\n return %s\n' % (slen, expr_code)
  396. self.to_screen(u'Extracted signature function:\n' + code)
  397. def _parse_sig_js(self, jscode):
  398. funcname = self._search_regex(
  399. r'signature=([a-zA-Z]+)', jscode,
  400. u'Initial JS player signature function name')
  401. functions = {}
  402. def argidx(varname):
  403. return string.lowercase.index(varname)
  404. def interpret_statement(stmt, local_vars, allow_recursion=20):
  405. if allow_recursion < 0:
  406. raise ExtractorError(u'Recursion limit reached')
  407. if stmt.startswith(u'var '):
  408. stmt = stmt[len(u'var '):]
  409. ass_m = re.match(r'^(?P<out>[a-z]+)(?:\[(?P<index>[^\]]+)\])?' +
  410. r'=(?P<expr>.*)$', stmt)
  411. if ass_m:
  412. if ass_m.groupdict().get('index'):
  413. def assign(val):
  414. lvar = local_vars[ass_m.group('out')]
  415. idx = interpret_expression(ass_m.group('index'),
  416. local_vars, allow_recursion)
  417. assert isinstance(idx, int)
  418. lvar[idx] = val
  419. return val
  420. expr = ass_m.group('expr')
  421. else:
  422. def assign(val):
  423. local_vars[ass_m.group('out')] = val
  424. return val
  425. expr = ass_m.group('expr')
  426. elif stmt.startswith(u'return '):
  427. assign = lambda v: v
  428. expr = stmt[len(u'return '):]
  429. else:
  430. raise ExtractorError(
  431. u'Cannot determine left side of statement in %r' % stmt)
  432. v = interpret_expression(expr, local_vars, allow_recursion)
  433. return assign(v)
  434. def interpret_expression(expr, local_vars, allow_recursion):
  435. if expr.isdigit():
  436. return int(expr)
  437. if expr.isalpha():
  438. return local_vars[expr]
  439. m = re.match(r'^(?P<in>[a-z]+)\.(?P<member>.*)$', expr)
  440. if m:
  441. member = m.group('member')
  442. val = local_vars[m.group('in')]
  443. if member == 'split("")':
  444. return list(val)
  445. if member == 'join("")':
  446. return u''.join(val)
  447. if member == 'length':
  448. return len(val)
  449. if member == 'reverse()':
  450. return val[::-1]
  451. slice_m = re.match(r'slice\((?P<idx>.*)\)', member)
  452. if slice_m:
  453. idx = interpret_expression(
  454. slice_m.group('idx'), local_vars, allow_recursion-1)
  455. return val[idx:]
  456. m = re.match(
  457. r'^(?P<in>[a-z]+)\[(?P<idx>.+)\]$', expr)
  458. if m:
  459. val = local_vars[m.group('in')]
  460. idx = interpret_expression(m.group('idx'), local_vars,
  461. allow_recursion-1)
  462. return val[idx]
  463. m = re.match(r'^(?P<a>.+?)(?P<op>[%])(?P<b>.+?)$', expr)
  464. if m:
  465. a = interpret_expression(m.group('a'),
  466. local_vars, allow_recursion)
  467. b = interpret_expression(m.group('b'),
  468. local_vars, allow_recursion)
  469. return a % b
  470. m = re.match(
  471. r'^(?P<func>[a-zA-Z$]+)\((?P<args>[a-z0-9,]+)\)$', expr)
  472. if m:
  473. fname = m.group('func')
  474. if fname not in functions:
  475. functions[fname] = extract_function(fname)
  476. argvals = [int(v) if v.isdigit() else local_vars[v]
  477. for v in m.group('args').split(',')]
  478. return functions[fname](argvals)
  479. raise ExtractorError(u'Unsupported JS expression %r' % expr)
  480. def extract_function(funcname):
  481. func_m = re.search(
  482. r'function ' + re.escape(funcname) +
  483. r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}',
  484. jscode)
  485. argnames = func_m.group('args').split(',')
  486. def resf(args):
  487. local_vars = dict(zip(argnames, args))
  488. for stmt in func_m.group('code').split(';'):
  489. res = interpret_statement(stmt, local_vars)
  490. return res
  491. return resf
  492. initial_function = extract_function(funcname)
  493. return lambda s: initial_function([s])
  494. def _parse_sig_swf(self, file_contents):
  495. if file_contents[1:3] != b'WS':
  496. raise ExtractorError(
  497. u'Not an SWF file; header is %r' % file_contents[:3])
  498. if file_contents[:1] == b'C':
  499. content = zlib.decompress(file_contents[8:])
  500. else:
  501. raise NotImplementedError(u'Unsupported compression format %r' %
  502. file_contents[:1])
  503. def extract_tags(content):
  504. pos = 0
  505. while pos < len(content):
  506. header16 = struct.unpack('<H', content[pos:pos+2])[0]
  507. pos += 2
  508. tag_code = header16 >> 6
  509. tag_len = header16 & 0x3f
  510. if tag_len == 0x3f:
  511. tag_len = struct.unpack('<I', content[pos:pos+4])[0]
  512. pos += 4
  513. assert pos+tag_len <= len(content)
  514. yield (tag_code, content[pos:pos+tag_len])
  515. pos += tag_len
  516. code_tag = next(tag
  517. for tag_code, tag in extract_tags(content)
  518. if tag_code == 82)
  519. p = code_tag.index(b'\0', 4) + 1
  520. code_reader = io.BytesIO(code_tag[p:])
  521. # Parse ABC (AVM2 ByteCode)
  522. def read_int(reader=None):
  523. if reader is None:
  524. reader = code_reader
  525. res = 0
  526. shift = 0
  527. for _ in range(5):
  528. buf = reader.read(1)
  529. assert len(buf) == 1
  530. b = struct.unpack('<B', buf)[0]
  531. res = res | ((b & 0x7f) << shift)
  532. if b & 0x80 == 0:
  533. break
  534. shift += 7
  535. return res
  536. def u30(reader=None):
  537. res = read_int(reader)
  538. assert res & 0xf0000000 == 0
  539. return res
  540. u32 = read_int
  541. def s32(reader=None):
  542. v = read_int(reader)
  543. if v & 0x80000000 != 0:
  544. v = - ((v ^ 0xffffffff) + 1)
  545. return v
  546. def read_string(reader=None):
  547. if reader is None:
  548. reader = code_reader
  549. slen = u30(reader)
  550. resb = reader.read(slen)
  551. assert len(resb) == slen
  552. return resb.decode('utf-8')
  553. def read_bytes(count, reader=None):
  554. if reader is None:
  555. reader = code_reader
  556. resb = reader.read(count)
  557. assert len(resb) == count
  558. return resb
  559. def read_byte(reader=None):
  560. resb = read_bytes(1, reader=reader)
  561. res = struct.unpack('<B', resb)[0]
  562. return res
  563. # minor_version + major_version
  564. read_bytes(2 + 2)
  565. # Constant pool
  566. int_count = u30()
  567. for _c in range(1, int_count):
  568. s32()
  569. uint_count = u30()
  570. for _c in range(1, uint_count):
  571. u32()
  572. double_count = u30()
  573. read_bytes((double_count-1) * 8)
  574. string_count = u30()
  575. constant_strings = [u'']
  576. for _c in range(1, string_count):
  577. s = read_string()
  578. constant_strings.append(s)
  579. namespace_count = u30()
  580. for _c in range(1, namespace_count):
  581. read_bytes(1) # kind
  582. u30() # name
  583. ns_set_count = u30()
  584. for _c in range(1, ns_set_count):
  585. count = u30()
  586. for _c2 in range(count):
  587. u30()
  588. multiname_count = u30()
  589. MULTINAME_SIZES = {
  590. 0x07: 2, # QName
  591. 0x0d: 2, # QNameA
  592. 0x0f: 1, # RTQName
  593. 0x10: 1, # RTQNameA
  594. 0x11: 0, # RTQNameL
  595. 0x12: 0, # RTQNameLA
  596. 0x09: 2, # Multiname
  597. 0x0e: 2, # MultinameA
  598. 0x1b: 1, # MultinameL
  599. 0x1c: 1, # MultinameLA
  600. }
  601. multinames = [u'']
  602. for _c in range(1, multiname_count):
  603. kind = u30()
  604. assert kind in MULTINAME_SIZES, u'Invalid multiname kind %r' % kind
  605. if kind == 0x07:
  606. u30() # namespace_idx
  607. name_idx = u30()
  608. multinames.append(constant_strings[name_idx])
  609. else:
  610. multinames.append('[MULTINAME kind: %d]' % kind)
  611. for _c2 in range(MULTINAME_SIZES[kind]):
  612. u30()
  613. # Methods
  614. method_count = u30()
  615. MethodInfo = collections.namedtuple(
  616. 'MethodInfo',
  617. ['NEED_ARGUMENTS', 'NEED_REST'])
  618. method_infos = []
  619. for method_id in range(method_count):
  620. param_count = u30()
  621. u30() # return type
  622. for _ in range(param_count):
  623. u30() # param type
  624. u30() # name index (always 0 for youtube)
  625. flags = read_byte()
  626. if flags & 0x08 != 0:
  627. # Options present
  628. option_count = u30()
  629. for c in range(option_count):
  630. u30() # val
  631. read_bytes(1) # kind
  632. if flags & 0x80 != 0:
  633. # Param names present
  634. for _ in range(param_count):
  635. u30() # param name
  636. mi = MethodInfo(flags & 0x01 != 0, flags & 0x04 != 0)
  637. method_infos.append(mi)
  638. # Metadata
  639. metadata_count = u30()
  640. for _c in range(metadata_count):
  641. u30() # name
  642. item_count = u30()
  643. for _c2 in range(item_count):
  644. u30() # key
  645. u30() # value
  646. def parse_traits_info():
  647. trait_name_idx = u30()
  648. kind_full = read_byte()
  649. kind = kind_full & 0x0f
  650. attrs = kind_full >> 4
  651. methods = {}
  652. if kind in [0x00, 0x06]: # Slot or Const
  653. u30() # Slot id
  654. u30() # type_name_idx
  655. vindex = u30()
  656. if vindex != 0:
  657. read_byte() # vkind
  658. elif kind in [0x01, 0x02, 0x03]: # Method / Getter / Setter
  659. u30() # disp_id
  660. method_idx = u30()
  661. methods[multinames[trait_name_idx]] = method_idx
  662. elif kind == 0x04: # Class
  663. u30() # slot_id
  664. u30() # classi
  665. elif kind == 0x05: # Function
  666. u30() # slot_id
  667. function_idx = u30()
  668. methods[function_idx] = multinames[trait_name_idx]
  669. else:
  670. raise ExtractorError(u'Unsupported trait kind %d' % kind)
  671. if attrs & 0x4 != 0: # Metadata present
  672. metadata_count = u30()
  673. for _c3 in range(metadata_count):
  674. u30() # metadata index
  675. return methods
  676. # Classes
  677. TARGET_CLASSNAME = u'SignatureDecipher'
  678. searched_idx = multinames.index(TARGET_CLASSNAME)
  679. searched_class_id = None
  680. class_count = u30()
  681. for class_id in range(class_count):
  682. name_idx = u30()
  683. if name_idx == searched_idx:
  684. # We found the class we're looking for!
  685. searched_class_id = class_id
  686. u30() # super_name idx
  687. flags = read_byte()
  688. if flags & 0x08 != 0: # Protected namespace is present
  689. u30() # protected_ns_idx
  690. intrf_count = u30()
  691. for _c2 in range(intrf_count):
  692. u30()
  693. u30() # iinit
  694. trait_count = u30()
  695. for _c2 in range(trait_count):
  696. parse_traits_info()
  697. if searched_class_id is None:
  698. raise ExtractorError(u'Target class %r not found' %
  699. TARGET_CLASSNAME)
  700. method_names = {}
  701. method_idxs = {}
  702. for class_id in range(class_count):
  703. u30() # cinit
  704. trait_count = u30()
  705. for _c2 in range(trait_count):
  706. trait_methods = parse_traits_info()
  707. if class_id == searched_class_id:
  708. method_names.update(trait_methods.items())
  709. method_idxs.update(dict(
  710. (idx, name)
  711. for name, idx in trait_methods.items()))
  712. # Scripts
  713. script_count = u30()
  714. for _c in range(script_count):
  715. u30() # init
  716. trait_count = u30()
  717. for _c2 in range(trait_count):
  718. parse_traits_info()
  719. # Method bodies
  720. method_body_count = u30()
  721. Method = collections.namedtuple('Method', ['code', 'local_count'])
  722. methods = {}
  723. for _c in range(method_body_count):
  724. method_idx = u30()
  725. u30() # max_stack
  726. local_count = u30()
  727. u30() # init_scope_depth
  728. u30() # max_scope_depth
  729. code_length = u30()
  730. code = read_bytes(code_length)
  731. if method_idx in method_idxs:
  732. m = Method(code, local_count)
  733. methods[method_idxs[method_idx]] = m
  734. exception_count = u30()
  735. for _c2 in range(exception_count):
  736. u30() # from
  737. u30() # to
  738. u30() # target
  739. u30() # exc_type
  740. u30() # var_name
  741. trait_count = u30()
  742. for _c2 in range(trait_count):
  743. parse_traits_info()
  744. assert p + code_reader.tell() == len(code_tag)
  745. assert len(methods) == len(method_idxs)
  746. method_pyfunctions = {}
  747. def extract_function(func_name):
  748. if func_name in method_pyfunctions:
  749. return method_pyfunctions[func_name]
  750. if func_name not in methods:
  751. raise ExtractorError(u'Cannot find function %r' % func_name)
  752. m = methods[func_name]
  753. def resfunc(args):
  754. registers = ['(this)'] + list(args) + [None] * m.local_count
  755. stack = []
  756. coder = io.BytesIO(m.code)
  757. while True:
  758. opcode = struct.unpack('!B', coder.read(1))[0]
  759. if opcode == 36: # pushbyte
  760. v = struct.unpack('!B', coder.read(1))[0]
  761. stack.append(v)
  762. elif opcode == 44: # pushstring
  763. idx = u30(coder)
  764. stack.append(constant_strings[idx])
  765. elif opcode == 48: # pushscope
  766. # We don't implement the scope register, so we'll just
  767. # ignore the popped value
  768. stack.pop()
  769. elif opcode == 70: # callproperty
  770. index = u30(coder)
  771. mname = multinames[index]
  772. arg_count = u30(coder)
  773. args = list(reversed(
  774. [stack.pop() for _ in range(arg_count)]))
  775. obj = stack.pop()
  776. if mname == u'split':
  777. assert len(args) == 1
  778. assert isinstance(args[0], compat_str)
  779. assert isinstance(obj, compat_str)
  780. if args[0] == u'':
  781. res = list(obj)
  782. else:
  783. res = obj.split(args[0])
  784. stack.append(res)
  785. elif mname == u'slice':
  786. assert len(args) == 1
  787. assert isinstance(args[0], int)
  788. assert isinstance(obj, list)
  789. res = obj[args[0]:]
  790. stack.append(res)
  791. elif mname == u'join':
  792. assert len(args) == 1
  793. assert isinstance(args[0], compat_str)
  794. assert isinstance(obj, list)
  795. res = args[0].join(obj)
  796. stack.append(res)
  797. elif mname in method_pyfunctions:
  798. stack.append(method_pyfunctions[mname](args))
  799. else:
  800. raise NotImplementedError(
  801. u'Unsupported property %r on %r'
  802. % (mname, obj))
  803. elif opcode == 72: # returnvalue
  804. res = stack.pop()
  805. return res
  806. elif opcode == 79: # callpropvoid
  807. index = u30(coder)
  808. mname = multinames[index]
  809. arg_count = u30(coder)
  810. args = list(reversed(
  811. [stack.pop() for _ in range(arg_count)]))
  812. obj = stack.pop()
  813. if mname == u'reverse':
  814. assert isinstance(obj, list)
  815. obj.reverse()
  816. else:
  817. raise NotImplementedError(
  818. u'Unsupported (void) property %r on %r'
  819. % (mname, obj))
  820. elif opcode == 93: # findpropstrict
  821. index = u30(coder)
  822. mname = multinames[index]
  823. res = extract_function(mname)
  824. stack.append(res)
  825. elif opcode == 97: # setproperty
  826. index = u30(coder)
  827. value = stack.pop()
  828. idx = stack.pop()
  829. obj = stack.pop()
  830. assert isinstance(obj, list)
  831. assert isinstance(idx, int)
  832. obj[idx] = value
  833. elif opcode == 98: # getlocal
  834. index = u30(coder)
  835. stack.append(registers[index])
  836. elif opcode == 99: # setlocal
  837. index = u30(coder)
  838. value = stack.pop()
  839. registers[index] = value
  840. elif opcode == 102: # getproperty
  841. index = u30(coder)
  842. pname = multinames[index]
  843. if pname == u'length':
  844. obj = stack.pop()
  845. assert isinstance(obj, list)
  846. stack.append(len(obj))
  847. else: # Assume attribute access
  848. idx = stack.pop()
  849. assert isinstance(idx, int)
  850. obj = stack.pop()
  851. assert isinstance(obj, list)
  852. stack.append(obj[idx])
  853. elif opcode == 128: # coerce
  854. u30(coder)
  855. elif opcode == 133: # coerce_s
  856. assert isinstance(stack[-1], (type(None), compat_str))
  857. elif opcode == 164: # modulo
  858. value2 = stack.pop()
  859. value1 = stack.pop()
  860. res = value1 % value2
  861. stack.append(res)
  862. elif opcode == 208: # getlocal_0
  863. stack.append(registers[0])
  864. elif opcode == 209: # getlocal_1
  865. stack.append(registers[1])
  866. elif opcode == 210: # getlocal_2
  867. stack.append(registers[2])
  868. elif opcode == 211: # getlocal_3
  869. stack.append(registers[3])
  870. elif opcode == 214: # setlocal_2
  871. registers[2] = stack.pop()
  872. elif opcode == 215: # setlocal_3
  873. registers[3] = stack.pop()
  874. else:
  875. raise NotImplementedError(
  876. u'Unsupported opcode %d' % opcode)
  877. method_pyfunctions[func_name] = resfunc
  878. return resfunc
  879. initial_function = extract_function(u'decipher')
  880. return lambda s: initial_function([s])
  881. def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
  882. """Turn the encrypted s field into a working signature"""
  883. if player_url is not None:
  884. if player_url.startswith(u'//'):
  885. player_url = u'https:' + player_url
  886. try:
  887. player_id = (player_url, len(s))
  888. if player_id not in self._player_cache:
  889. func = self._extract_signature_function(
  890. video_id, player_url, len(s)
  891. )
  892. self._player_cache[player_id] = func
  893. func = self._player_cache[player_id]
  894. if self._downloader.params.get('youtube_print_sig_code'):
  895. self._print_sig_code(func, len(s))
  896. return func(s)
  897. except Exception:
  898. tb = traceback.format_exc()
  899. self._downloader.report_warning(
  900. u'Automatic signature extraction failed: ' + tb)
  901. self._downloader.report_warning(
  902. u'Warning: Falling back to static signature algorithm')
  903. return self._static_decrypt_signature(
  904. s, video_id, player_url, age_gate)
  905. def _static_decrypt_signature(self, s, video_id, player_url, age_gate):
  906. if age_gate:
  907. # The videos with age protection use another player, so the
  908. # algorithms can be different.
  909. if len(s) == 86:
  910. return s[2:63] + s[82] + s[64:82] + s[63]
  911. if len(s) == 93:
  912. return s[86:29:-1] + s[88] + s[28:5:-1]
  913. elif len(s) == 92:
  914. return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + s[80:83]
  915. elif len(s) == 91:
  916. return s[84:27:-1] + s[86] + s[26:5:-1]
  917. elif len(s) == 90:
  918. return s[25] + s[3:25] + s[2] + s[26:40] + s[77] + s[41:77] + s[89] + s[78:81]
  919. elif len(s) == 89:
  920. return s[84:78:-1] + s[87] + s[77:60:-1] + s[0] + s[59:3:-1]
  921. elif len(s) == 88:
  922. return s[7:28] + s[87] + s[29:45] + s[55] + s[46:55] + s[2] + s[56:87] + s[28]
  923. elif len(s) == 87:
  924. return s[6:27] + s[4] + s[28:39] + s[27] + s[40:59] + s[2] + s[60:]
  925. elif len(s) == 86:
  926. return s[80:72:-1] + s[16] + s[71:39:-1] + s[72] + s[38:16:-1] + s[82] + s[15::-1]
  927. elif len(s) == 85:
  928. return s[3:11] + s[0] + s[12:55] + s[84] + s[56:84]
  929. elif len(s) == 84:
  930. return s[78:70:-1] + s[14] + s[69:37:-1] + s[70] + s[36:14:-1] + s[80] + s[:14][::-1]
  931. elif len(s) == 83:
  932. return s[80:63:-1] + s[0] + s[62:0:-1] + s[63]
  933. elif len(s) == 82:
  934. return s[80:37:-1] + s[7] + s[36:7:-1] + s[0] + s[6:0:-1] + s[37]
  935. elif len(s) == 81:
  936. return s[56] + s[79:56:-1] + s[41] + s[55:41:-1] + s[80] + s[40:34:-1] + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9]
  937. elif len(s) == 80:
  938. return s[1:19] + s[0] + s[20:68] + s[19] + s[69:80]
  939. elif len(s) == 79:
  940. return s[54] + s[77:54:-1] + s[39] + s[53:39:-1] + s[78] + s[38:34:-1] + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9]
  941. else:
  942. raise ExtractorError(u'Unable to decrypt signature, key length %d not supported; retrying might work' % (len(s)))
  943. def _get_available_subtitles(self, video_id, webpage):
  944. try:
  945. sub_list = self._download_webpage(
  946. 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
  947. video_id, note=False)
  948. except ExtractorError as err:
  949. self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
  950. return {}
  951. lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
  952. sub_lang_list = {}
  953. for l in lang_list:
  954. lang = l[1]
  955. params = compat_urllib_parse.urlencode({
  956. 'lang': lang,
  957. 'v': video_id,
  958. 'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
  959. 'name': unescapeHTML(l[0]).encode('utf-8'),
  960. })
  961. url = u'https://www.youtube.com/api/timedtext?' + params
  962. sub_lang_list[lang] = url
  963. if not sub_lang_list:
  964. self._downloader.report_warning(u'video doesn\'t have subtitles')
  965. return {}
  966. return sub_lang_list
  967. def _get_available_automatic_caption(self, video_id, webpage):
  968. """We need the webpage for getting the captions url, pass it as an
  969. argument to speed up the process."""
  970. sub_format = self._downloader.params.get('subtitlesformat', 'srt')
  971. self.to_screen(u'%s: Looking for automatic captions' % video_id)
  972. mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
  973. err_msg = u'Couldn\'t find automatic captions for %s' % video_id
  974. if mobj is None:
  975. self._downloader.report_warning(err_msg)
  976. return {}
  977. player_config = json.loads(mobj.group(1))
  978. try:
  979. args = player_config[u'args']
  980. caption_url = args[u'ttsurl']
  981. timestamp = args[u'timestamp']
  982. # We get the available subtitles
  983. list_params = compat_urllib_parse.urlencode({
  984. 'type': 'list',
  985. 'tlangs': 1,
  986. 'asrs': 1,
  987. })
  988. list_url = caption_url + '&' + list_params
  989. caption_list = self._download_xml(list_url, video_id)
  990. original_lang_node = caption_list.find('track')
  991. if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
  992. self._downloader.report_warning(u'Video doesn\'t have automatic captions')
  993. return {}
  994. original_lang = original_lang_node.attrib['lang_code']
  995. sub_lang_list = {}
  996. for lang_node in caption_list.findall('target'):
  997. sub_lang = lang_node.attrib['lang_code']
  998. params = compat_urllib_parse.urlencode({
  999. 'lang': original_lang,
  1000. 'tlang': sub_lang,
  1001. 'fmt': sub_format,
  1002. 'ts': timestamp,
  1003. 'kind': 'asr',
  1004. })
  1005. sub_lang_list[sub_lang] = caption_url + '&' + params
  1006. return sub_lang_list
  1007. # An extractor error can be raise by the download process if there are
  1008. # no automatic captions but there are subtitles
  1009. except (KeyError, ExtractorError):
  1010. self._downloader.report_warning(err_msg)
  1011. return {}
  1012. @classmethod
  1013. def extract_id(cls, url):
  1014. mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
  1015. if mobj is None:
  1016. raise ExtractorError(u'Invalid URL: %s' % url)
  1017. video_id = mobj.group(2)
  1018. return video_id
  1019. def _extract_from_m3u8(self, manifest_url, video_id):
  1020. url_map = {}
  1021. def _get_urls(_manifest):
  1022. lines = _manifest.split('\n')
  1023. urls = filter(lambda l: l and not l.startswith('#'),
  1024. lines)
  1025. return urls
  1026. manifest = self._download_webpage(manifest_url, video_id, u'Downloading formats manifest')
  1027. formats_urls = _get_urls(manifest)
  1028. for format_url in formats_urls:
  1029. itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
  1030. url_map[itag] = format_url
  1031. return url_map
  1032. def _extract_annotations(self, video_id):
  1033. url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
  1034. return self._download_webpage(url, video_id, note=u'Searching for annotations.', errnote=u'Unable to download video annotations.')
  1035. def _real_extract(self, url):
  1036. proto = (
  1037. u'http' if self._downloader.params.get('prefer_insecure', False)
  1038. else u'https')
  1039. # Extract original video URL from URL with redirection, like age verification, using next_url parameter
  1040. mobj = re.search(self._NEXT_URL_RE, url)
  1041. if mobj:
  1042. url = proto + '://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
  1043. video_id = self.extract_id(url)
  1044. # Get video webpage
  1045. url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
  1046. video_webpage = self._download_webpage(url, video_id)
  1047. # Attempt to extract SWF player URL
  1048. mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
  1049. if mobj is not None:
  1050. player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
  1051. else:
  1052. player_url = None
  1053. # Get video info
  1054. self.report_video_info_webpage_download(video_id)
  1055. if re.search(r'player-age-gate-content">', video_webpage) is not None:
  1056. self.report_age_confirmation()
  1057. age_gate = True
  1058. # We simulate the access to the video from www.youtube.com/v/{video_id}
  1059. # this can be viewed without login into Youtube
  1060. data = compat_urllib_parse.urlencode({'video_id': video_id,
  1061. 'el': 'player_embedded',
  1062. 'gl': 'US',
  1063. 'hl': 'en',
  1064. 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
  1065. 'asv': 3,
  1066. 'sts':'1588',
  1067. })
  1068. video_info_url = proto + '://www.youtube.com/get_video_info?' + data
  1069. video_info_webpage = self._download_webpage(video_info_url, video_id,
  1070. note=False,
  1071. errnote='unable to download video info webpage')
  1072. video_info = compat_parse_qs(video_info_webpage)
  1073. else:
  1074. age_gate = False
  1075. for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
  1076. video_info_url = (proto + '://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
  1077. % (video_id, el_type))
  1078. video_info_webpage = self._download_webpage(video_info_url, video_id,
  1079. note=False,
  1080. errnote='unable to download video info webpage')
  1081. video_info = compat_parse_qs(video_info_webpage)
  1082. if 'token' in video_info:
  1083. break
  1084. if 'token' not in video_info:
  1085. if 'reason' in video_info:
  1086. raise ExtractorError(u'YouTube said: %s' % video_info['reason'][0], expected=True)
  1087. else:
  1088. raise ExtractorError(u'"token" parameter not in video info for unknown reason')
  1089. if 'view_count' in video_info:
  1090. view_count = int(video_info['view_count'][0])
  1091. else:
  1092. view_count = None
  1093. # Check for "rental" videos
  1094. if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
  1095. raise ExtractorError(u'"rental" videos not supported')
  1096. # Start extracting information
  1097. self.report_information_extraction(video_id)
  1098. # uploader
  1099. if 'author' not in video_info:
  1100. raise ExtractorError(u'Unable to extract uploader name')
  1101. video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
  1102. # uploader_id
  1103. video_uploader_id = None
  1104. mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
  1105. if mobj is not None:
  1106. video_uploader_id = mobj.group(1)
  1107. else:
  1108. self._downloader.report_warning(u'unable to extract uploader nickname')
  1109. # title
  1110. if 'title' in video_info:
  1111. video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
  1112. else:
  1113. self._downloader.report_warning(u'Unable to extract video title')
  1114. video_title = u'_'
  1115. # thumbnail image
  1116. # We try first to get a high quality image:
  1117. m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
  1118. video_webpage, re.DOTALL)
  1119. if m_thumb is not None:
  1120. video_thumbnail = m_thumb.group(1)
  1121. elif 'thumbnail_url' not in video_info:
  1122. self._downloader.report_warning(u'unable to extract video thumbnail')
  1123. video_thumbnail = None
  1124. else: # don't panic if we can't find it
  1125. video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
  1126. # upload date
  1127. upload_date = None
  1128. mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
  1129. if mobj is not None:
  1130. upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
  1131. upload_date = unified_strdate(upload_date)
  1132. # description
  1133. video_description = get_element_by_id("eow-description", video_webpage)
  1134. if video_description:
  1135. video_description = re.sub(r'''(?x)
  1136. <a\s+
  1137. (?:[a-zA-Z-]+="[^"]+"\s+)*?
  1138. title="([^"]+)"\s+
  1139. (?:[a-zA-Z-]+="[^"]+"\s+)*?
  1140. class="yt-uix-redirect-link"\s*>
  1141. [^<]+
  1142. </a>
  1143. ''', r'\1', video_description)
  1144. video_description = clean_html(video_description)
  1145. else:
  1146. fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
  1147. if fd_mobj:
  1148. video_description = unescapeHTML(fd_mobj.group(1))
  1149. else:
  1150. video_description = u''
  1151. def _extract_count(klass):
  1152. count = self._search_regex(
  1153. r'class="%s">([\d,]+)</span>' % re.escape(klass),
  1154. video_webpage, klass, default=None)
  1155. if count is not None:
  1156. return int(count.replace(',', ''))
  1157. return None
  1158. like_count = _extract_count(u'likes-count')
  1159. dislike_count = _extract_count(u'dislikes-count')
  1160. # subtitles
  1161. video_subtitles = self.extract_subtitles(video_id, video_webpage)
  1162. if self._downloader.params.get('listsubtitles', False):
  1163. self._list_available_subtitles(video_id, video_webpage)
  1164. return
  1165. if 'length_seconds' not in video_info:
  1166. self._downloader.report_warning(u'unable to extract video duration')
  1167. video_duration = None
  1168. else:
  1169. video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]))
  1170. # annotations
  1171. video_annotations = None
  1172. if self._downloader.params.get('writeannotations', False):
  1173. video_annotations = self._extract_annotations(video_id)
  1174. # Decide which formats to download
  1175. try:
  1176. mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
  1177. if not mobj:
  1178. raise ValueError('Could not find vevo ID')
  1179. json_code = uppercase_escape(mobj.group(1))
  1180. ytplayer_config = json.loads(json_code)
  1181. args = ytplayer_config['args']
  1182. # Easy way to know if the 's' value is in url_encoded_fmt_stream_map
  1183. # this signatures are encrypted
  1184. if 'url_encoded_fmt_stream_map' not in args:
  1185. raise ValueError(u'No stream_map present') # caught below
  1186. re_signature = re.compile(r'[&,]s=')
  1187. m_s = re_signature.search(args['url_encoded_fmt_stream_map'])
  1188. if m_s is not None:
  1189. self.to_screen(u'%s: Encrypted signatures detected.' % video_id)
  1190. video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
  1191. m_s = re_signature.search(args.get('adaptive_fmts', u''))
  1192. if m_s is not None:
  1193. if 'adaptive_fmts' in video_info:
  1194. video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
  1195. else:
  1196. video_info['adaptive_fmts'] = [args['adaptive_fmts']]
  1197. except ValueError:
  1198. pass
  1199. def _map_to_format_list(urlmap):
  1200. formats = []
  1201. for itag, video_real_url in urlmap.items():
  1202. dct = {
  1203. 'format_id': itag,
  1204. 'url': video_real_url,
  1205. 'player_url': player_url,
  1206. }
  1207. if itag in self._formats:
  1208. dct.update(self._formats[itag])
  1209. formats.append(dct)
  1210. return formats
  1211. if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
  1212. self.report_rtmp_download()
  1213. formats = [{
  1214. 'format_id': '_rtmp',
  1215. 'protocol': 'rtmp',
  1216. 'url': video_info['conn'][0],
  1217. 'player_url': player_url,
  1218. }]
  1219. elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
  1220. encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
  1221. if 'rtmpe%3Dyes' in encoded_url_map:
  1222. raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
  1223. url_map = {}
  1224. for url_data_str in encoded_url_map.split(','):
  1225. url_data = compat_parse_qs(url_data_str)
  1226. if 'itag' in url_data and 'url' in url_data:
  1227. url = url_data['url'][0]
  1228. if 'sig' in url_data:
  1229. url += '&signature=' + url_data['sig'][0]
  1230. elif 's' in url_data:
  1231. encrypted_sig = url_data['s'][0]
  1232. if self._downloader.params.get('verbose'):
  1233. if age_gate:
  1234. if player_url is None:
  1235. player_version = 'unknown'
  1236. else:
  1237. player_version = self._search_regex(
  1238. r'-(.+)\.swf$', player_url,
  1239. u'flash player', fatal=False)
  1240. player_desc = 'flash player %s' % player_version
  1241. else:
  1242. player_version = self._search_regex(
  1243. r'html5player-(.+?)\.js', video_webpage,
  1244. 'html5 player', fatal=False)
  1245. player_desc = u'html5 player %s' % player_version
  1246. parts_sizes = u'.'.join(compat_str(len(part)) for part in encrypted_sig.split('.'))
  1247. self.to_screen(u'encrypted signature length %d (%s), itag %s, %s' %
  1248. (len(encrypted_sig), parts_sizes, url_data['itag'][0], player_desc))
  1249. if not age_gate:
  1250. jsplayer_url_json = self._search_regex(
  1251. r'"assets":.+?"js":\s*("[^"]+")',
  1252. video_webpage, u'JS player URL')
  1253. player_url = json.loads(jsplayer_url_json)
  1254. signature = self._decrypt_signature(
  1255. encrypted_sig, video_id, player_url, age_gate)
  1256. url += '&signature=' + signature
  1257. if 'ratebypass' not in url:
  1258. url += '&ratebypass=yes'
  1259. url_map[url_data['itag'][0]] = url
  1260. formats = _map_to_format_list(url_map)
  1261. elif video_info.get('hlsvp'):
  1262. manifest_url = video_info['hlsvp'][0]
  1263. url_map = self._extract_from_m3u8(manifest_url, video_id)
  1264. formats = _map_to_format_list(url_map)
  1265. else:
  1266. raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
  1267. # Look for the DASH manifest
  1268. if (self._downloader.params.get('youtube_include_dash_manifest', False)):
  1269. try:
  1270. # The DASH manifest used needs to be the one from the original video_webpage.
  1271. # The one found in get_video_info seems to be using different signatures.
  1272. # However, in the case of an age restriction there won't be any embedded dashmpd in the video_webpage.
  1273. # Luckily, it seems, this case uses some kind of default signature (len == 86), so the
  1274. # combination of get_video_info and the _static_decrypt_signature() decryption fallback will work here.
  1275. if age_gate:
  1276. dash_manifest_url = video_info.get('dashmpd')[0]
  1277. else:
  1278. dash_manifest_url = ytplayer_config['args']['dashmpd']
  1279. def decrypt_sig(mobj):
  1280. s = mobj.group(1)
  1281. dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
  1282. return '/signature/%s' % dec_s
  1283. dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
  1284. dash_doc = self._download_xml(
  1285. dash_manifest_url, video_id,
  1286. note=u'Downloading DASH manifest',
  1287. errnote=u'Could not download DASH manifest')
  1288. for r in dash_doc.findall(u'.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
  1289. url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
  1290. if url_el is None:
  1291. continue
  1292. format_id = r.attrib['id']
  1293. video_url = url_el.text
  1294. filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
  1295. f = {
  1296. 'format_id': format_id,
  1297. 'url': video_url,
  1298. 'width': int_or_none(r.attrib.get('width')),
  1299. 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
  1300. 'asr': int_or_none(r.attrib.get('audioSamplingRate')),
  1301. 'filesize': filesize,
  1302. }
  1303. try:
  1304. existing_format = next(
  1305. fo for fo in formats
  1306. if fo['format_id'] == format_id)
  1307. except StopIteration:
  1308. f.update(self._formats.get(format_id, {}))
  1309. formats.append(f)
  1310. else:
  1311. existing_format.update(f)
  1312. except (ExtractorError, KeyError) as e:
  1313. self.report_warning(u'Skipping DASH manifest: %s' % e, video_id)
  1314. self._sort_formats(formats)
  1315. return {
  1316. 'id': video_id,
  1317. 'uploader': video_uploader,
  1318. 'uploader_id': video_uploader_id,
  1319. 'upload_date': upload_date,
  1320. 'title': video_title,
  1321. 'thumbnail': video_thumbnail,
  1322. 'description': video_description,
  1323. 'subtitles': video_subtitles,
  1324. 'duration': video_duration,
  1325. 'age_limit': 18 if age_gate else 0,
  1326. 'annotations': video_annotations,
  1327. 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
  1328. 'view_count': view_count,
  1329. 'like_count': like_count,
  1330. 'dislike_count': dislike_count,
  1331. 'formats': formats,
  1332. }
  1333. class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
  1334. IE_DESC = u'YouTube.com playlists'
  1335. _VALID_URL = r"""(?x)(?:
  1336. (?:https?://)?
  1337. (?:\w+\.)?
  1338. youtube\.com/
  1339. (?:
  1340. (?:course|view_play_list|my_playlists|artist|playlist|watch)
  1341. \? (?:.*?&)*? (?:p|a|list)=
  1342. | p/
  1343. )
  1344. (
  1345. (?:PL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
  1346. # Top tracks, they can also include dots
  1347. |(?:MC)[\w\.]*
  1348. )
  1349. .*
  1350. |
  1351. ((?:PL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,})
  1352. )"""
  1353. _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
  1354. _MORE_PAGES_INDICATOR = r'data-link-type="next"'
  1355. _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
  1356. IE_NAME = u'youtube:playlist'
  1357. def _real_initialize(self):
  1358. self._login()
  1359. def _ids_to_results(self, ids):
  1360. return [self.url_result(vid_id, 'Youtube', video_id=vid_id)
  1361. for vid_id in ids]
  1362. def _extract_mix(self, playlist_id):
  1363. # The mixes are generated from a a single video
  1364. # the id of the playlist is just 'RD' + video_id
  1365. url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
  1366. webpage = self._download_webpage(url, playlist_id, u'Downloading Youtube mix')
  1367. search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
  1368. title_span = (search_title('playlist-title') or
  1369. search_title('title long-title') or search_title('title'))
  1370. title = clean_html(title_span)
  1371. video_re = r'''(?x)data-video-username="(.*?)".*?
  1372. href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id)
  1373. matches = orderedSet(re.findall(video_re, webpage, flags=re.DOTALL))
  1374. # Some of the videos may have been deleted, their username field is empty
  1375. ids = [video_id for (username, video_id) in matches if username]
  1376. url_results = self._ids_to_results(ids)
  1377. return self.playlist_result(url_results, playlist_id, title)
  1378. def _real_extract(self, url):
  1379. # Extract playlist id
  1380. mobj = re.match(self._VALID_URL, url)
  1381. if mobj is None:
  1382. raise ExtractorError(u'Invalid URL: %s' % url)
  1383. playlist_id = mobj.group(1) or mobj.group(2)
  1384. # Check if it's a video-specific URL
  1385. query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
  1386. if 'v' in query_dict:
  1387. video_id = query_dict['v'][0]
  1388. if self._downloader.params.get('noplaylist'):
  1389. self.to_screen(u'Downloading just video %s because of --no-playlist' % video_id)
  1390. return self.url_result(video_id, 'Youtube', video_id=video_id)
  1391. else:
  1392. self.to_screen(u'Downloading playlist PL%s - add --no-playlist to just download video %s' % (playlist_id, video_id))
  1393. if playlist_id.startswith('RD'):
  1394. # Mixes require a custom extraction process
  1395. return self._extract_mix(playlist_id)
  1396. if playlist_id.startswith('TL'):
  1397. raise ExtractorError(u'For downloading YouTube.com top lists, use '
  1398. u'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
  1399. url = self._TEMPLATE_URL % playlist_id
  1400. page = self._download_webpage(url, playlist_id)
  1401. more_widget_html = content_html = page
  1402. # Extract the video ids from the playlist pages
  1403. ids = []
  1404. for page_num in itertools.count(1):
  1405. matches = re.finditer(self._VIDEO_RE, content_html)
  1406. # We remove the duplicates and the link with index 0
  1407. # (it's not the first video of the playlist)
  1408. new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
  1409. ids.extend(new_ids)
  1410. mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
  1411. if not mobj:
  1412. break
  1413. more = self._download_json(
  1414. 'https://youtube.com/%s' % mobj.group('more'), playlist_id, 'Downloading page #%s' % page_num)
  1415. content_html = more['content_html']
  1416. more_widget_html = more['load_more_widget_html']
  1417. playlist_title = self._html_search_regex(
  1418. r'<h1 class="pl-header-title">\s*(.*?)\s*</h1>', page, u'title')
  1419. url_results = self._ids_to_results(ids)
  1420. return self.playlist_result(url_results, playlist_id, playlist_title)
  1421. class YoutubeTopListIE(YoutubePlaylistIE):
  1422. IE_NAME = u'youtube:toplist'
  1423. IE_DESC = (u'YouTube.com top lists, "yttoplist:{channel}:{list title}"'
  1424. u' (Example: "yttoplist:music:Top Tracks")')
  1425. _VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'
  1426. def _real_extract(self, url):
  1427. mobj = re.match(self._VALID_URL, url)
  1428. channel = mobj.group('chann')
  1429. title = mobj.group('title')
  1430. query = compat_urllib_parse.urlencode({'title': title})
  1431. playlist_re = 'href="([^"]+?%s.*?)"' % re.escape(query)
  1432. channel_page = self._download_webpage('https://www.youtube.com/%s' % channel, title)
  1433. link = self._html_search_regex(playlist_re, channel_page, u'list')
  1434. url = compat_urlparse.urljoin('https://www.youtube.com/', link)
  1435. video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"'
  1436. ids = []
  1437. # sometimes the webpage doesn't contain the videos
  1438. # retry until we get them
  1439. for i in itertools.count(0):
  1440. msg = u'Downloading Youtube mix'
  1441. if i > 0:
  1442. msg += ', retry #%d' % i
  1443. webpage = self._download_webpage(url, title, msg)
  1444. ids = orderedSet(re.findall(video_re, webpage))
  1445. if ids:
  1446. break
  1447. url_results = self._ids_to_results(ids)
  1448. return self.playlist_result(url_results, playlist_title=title)
  1449. class YoutubeChannelIE(InfoExtractor):
  1450. IE_DESC = u'YouTube.com channels'
  1451. _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
  1452. _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
  1453. _MORE_PAGES_URL = 'https://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
  1454. IE_NAME = u'youtube:channel'
  1455. def extract_videos_from_page(self, page):
  1456. ids_in_page = []
  1457. for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
  1458. if mobj.group(1) not in ids_in_page:
  1459. ids_in_page.append(mobj.group(1))
  1460. return ids_in_page
  1461. def _real_extract(self, url):
  1462. # Extract channel id
  1463. mobj = re.match(self._VALID_URL, url)
  1464. if mobj is None:
  1465. raise ExtractorError(u'Invalid URL: %s' % url)
  1466. # Download channel page
  1467. channel_id = mobj.group(1)
  1468. video_ids = []
  1469. url = 'https://www.youtube.com/channel/%s/videos' % channel_id
  1470. channel_page = self._download_webpage(url, channel_id)
  1471. autogenerated = re.search(r'''(?x)
  1472. class="[^"]*?(?:
  1473. channel-header-autogenerated-label|
  1474. yt-channel-title-autogenerated
  1475. )[^"]*"''', channel_page) is not None
  1476. if autogenerated:
  1477. # The videos are contained in a single page
  1478. # the ajax pages can't be used, they are empty
  1479. video_ids = self.extract_videos_from_page(channel_page)
  1480. else:
  1481. # Download all channel pages using the json-based channel_ajax query
  1482. for pagenum in itertools.count(1):
  1483. url = self._MORE_PAGES_URL % (pagenum, channel_id)
  1484. page = self._download_json(
  1485. url, channel_id, note=u'Downloading page #%s' % pagenum,
  1486. transform_source=uppercase_escape)
  1487. ids_in_page = self.extract_videos_from_page(page['content_html'])
  1488. video_ids.extend(ids_in_page)
  1489. if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
  1490. break
  1491. self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
  1492. url_entries = [self.url_result(video_id, 'Youtube', video_id=video_id)
  1493. for video_id in video_ids]
  1494. return self.playlist_result(url_entries, channel_id)
  1495. class YoutubeUserIE(InfoExtractor):
  1496. IE_DESC = u'YouTube.com user videos (URL or "ytuser" keyword)'
  1497. _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
  1498. _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
  1499. _GDATA_PAGE_SIZE = 50
  1500. _GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
  1501. IE_NAME = u'youtube:user'
  1502. @classmethod
  1503. def suitable(cls, url):
  1504. # Don't return True if the url can be extracted with other youtube
  1505. # extractor, the regex would is too permissive and it would match.
  1506. other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
  1507. if any(ie.suitable(url) for ie in other_ies): return False
  1508. else: return super(YoutubeUserIE, cls).suitable(url)
  1509. def _real_extract(self, url):
  1510. # Extract username
  1511. mobj = re.match(self._VALID_URL, url)
  1512. if mobj is None:
  1513. raise ExtractorError(u'Invalid URL: %s' % url)
  1514. username = mobj.group(1)
  1515. # Download video ids using YouTube Data API. Result size per
  1516. # query is limited (currently to 50 videos) so we need to query
  1517. # page by page until there are no video ids - it means we got
  1518. # all of them.
  1519. def download_page(pagenum):
  1520. start_index = pagenum * self._GDATA_PAGE_SIZE + 1
  1521. gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
  1522. page = self._download_webpage(
  1523. gdata_url, username,
  1524. u'Downloading video ids from %d to %d' % (
  1525. start_index, start_index + self._GDATA_PAGE_SIZE))
  1526. try:
  1527. response = json.loads(page)
  1528. except ValueError as err:
  1529. raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
  1530. if 'entry' not in response['feed']:
  1531. return
  1532. # Extract video identifiers
  1533. entries = response['feed']['entry']
  1534. for entry in entries:
  1535. title = entry['title']['$t']
  1536. video_id = entry['id']['$t'].split('/')[-1]
  1537. yield {
  1538. '_type': 'url',
  1539. 'url': video_id,
  1540. 'ie_key': 'Youtube',
  1541. 'id': video_id,
  1542. 'title': title,
  1543. }
  1544. url_results = PagedList(download_page, self._GDATA_PAGE_SIZE)
  1545. return self.playlist_result(url_results, playlist_title=username)
  1546. class YoutubeSearchIE(SearchInfoExtractor):
  1547. IE_DESC = u'YouTube.com searches'
  1548. _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
  1549. _MAX_RESULTS = 1000
  1550. IE_NAME = u'youtube:search'
  1551. _SEARCH_KEY = 'ytsearch'
  1552. def _get_n_results(self, query, n):
  1553. """Get a specified number of results for a query"""
  1554. video_ids = []
  1555. pagenum = 0
  1556. limit = n
  1557. while (50 * pagenum) < limit:
  1558. result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1)
  1559. data_json = self._download_webpage(
  1560. result_url, video_id=u'query "%s"' % query,
  1561. note=u'Downloading page %s' % (pagenum + 1),
  1562. errnote=u'Unable to download API page')
  1563. data = json.loads(data_json)
  1564. api_response = data['data']
  1565. if 'items' not in api_response:
  1566. raise ExtractorError(
  1567. u'[youtube] No video results', expected=True)
  1568. new_ids = list(video['id'] for video in api_response['items'])
  1569. video_ids += new_ids
  1570. limit = min(n, api_response['totalItems'])
  1571. pagenum += 1
  1572. if len(video_ids) > n:
  1573. video_ids = video_ids[:n]
  1574. videos = [self.url_result(video_id, 'Youtube', video_id=video_id)
  1575. for video_id in video_ids]
  1576. return self.playlist_result(videos, query)
  1577. class YoutubeSearchDateIE(YoutubeSearchIE):
  1578. IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
  1579. _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
  1580. _SEARCH_KEY = 'ytsearchdate'
  1581. IE_DESC = u'YouTube.com searches, newest videos first'
  1582. class YoutubeSearchURLIE(InfoExtractor):
  1583. IE_DESC = u'YouTube.com search URLs'
  1584. IE_NAME = u'youtube:search_url'
  1585. _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
  1586. def _real_extract(self, url):
  1587. mobj = re.match(self._VALID_URL, url)
  1588. query = compat_urllib_parse.unquote_plus(mobj.group('query'))
  1589. webpage = self._download_webpage(url, query)
  1590. result_code = self._search_regex(
  1591. r'(?s)<ol id="search-results"(.*?)</ol>', webpage, u'result HTML')
  1592. part_codes = re.findall(
  1593. r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
  1594. entries = []
  1595. for part_code in part_codes:
  1596. part_title = self._html_search_regex(
  1597. r'(?s)title="([^"]+)"', part_code, 'item title', fatal=False)
  1598. part_url_snippet = self._html_search_regex(
  1599. r'(?s)href="([^"]+)"', part_code, 'item URL')
  1600. part_url = compat_urlparse.urljoin(
  1601. 'https://www.youtube.com/', part_url_snippet)
  1602. entries.append({
  1603. '_type': 'url',
  1604. 'url': part_url,
  1605. 'title': part_title,
  1606. })
  1607. return {
  1608. '_type': 'playlist',
  1609. 'entries': entries,
  1610. 'title': query,
  1611. }
  1612. class YoutubeShowIE(InfoExtractor):
  1613. IE_DESC = u'YouTube.com (multi-season) shows'
  1614. _VALID_URL = r'https?://www\.youtube\.com/show/(.*)'
  1615. IE_NAME = u'youtube:show'
  1616. def _real_extract(self, url):
  1617. mobj = re.match(self._VALID_URL, url)
  1618. show_name = mobj.group(1)
  1619. webpage = self._download_webpage(url, show_name, u'Downloading show webpage')
  1620. # There's one playlist for each season of the show
  1621. m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
  1622. self.to_screen(u'%s: Found %s seasons' % (show_name, len(m_seasons)))
  1623. return [self.url_result('https://www.youtube.com' + season.group(1), 'YoutubePlaylist') for season in m_seasons]
  1624. class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
  1625. """
  1626. Base class for extractors that fetch info from
  1627. http://www.youtube.com/feed_ajax
  1628. Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
  1629. """
  1630. _LOGIN_REQUIRED = True
  1631. # use action_load_personal_feed instead of action_load_system_feed
  1632. _PERSONAL_FEED = False
  1633. @property
  1634. def _FEED_TEMPLATE(self):
  1635. action = 'action_load_system_feed'
  1636. if self._PERSONAL_FEED:
  1637. action = 'action_load_personal_feed'
  1638. return 'https://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action, self._FEED_NAME)
  1639. @property
  1640. def IE_NAME(self):
  1641. return u'youtube:%s' % self._FEED_NAME
  1642. def _real_initialize(self):
  1643. self._login()
  1644. def _real_extract(self, url):
  1645. feed_entries = []
  1646. paging = 0
  1647. for i in itertools.count(1):
  1648. info = self._download_webpage(self._FEED_TEMPLATE % paging,
  1649. u'%s feed' % self._FEED_NAME,
  1650. u'Downloading page %s' % i)
  1651. info = json.loads(info)
  1652. feed_html = info['feed_html']
  1653. m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
  1654. ids = orderedSet(m.group(1) for m in m_ids)
  1655. feed_entries.extend(
  1656. self.url_result(video_id, 'Youtube', video_id=video_id)
  1657. for video_id in ids)
  1658. if info['paging'] is None:
  1659. break
  1660. paging = info['paging']
  1661. return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
  1662. class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
  1663. IE_DESC = u'YouTube.com subscriptions feed, "ytsubs" keyword(requires authentication)'
  1664. _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
  1665. _FEED_NAME = 'subscriptions'
  1666. _PLAYLIST_TITLE = u'Youtube Subscriptions'
  1667. class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
  1668. IE_DESC = u'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
  1669. _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
  1670. _FEED_NAME = 'recommended'
  1671. _PLAYLIST_TITLE = u'Youtube Recommended videos'
  1672. class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
  1673. IE_DESC = u'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
  1674. _VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
  1675. _FEED_NAME = 'watch_later'
  1676. _PLAYLIST_TITLE = u'Youtube Watch Later'
  1677. _PERSONAL_FEED = True
  1678. class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
  1679. IE_DESC = u'Youtube watch history, "ythistory" keyword (requires authentication)'
  1680. _VALID_URL = u'https?://www\.youtube\.com/feed/history|:ythistory'
  1681. _FEED_NAME = 'history'
  1682. _PERSONAL_FEED = True
  1683. _PLAYLIST_TITLE = u'Youtube Watch History'
  1684. class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
  1685. IE_NAME = u'youtube:favorites'
  1686. IE_DESC = u'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
  1687. _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
  1688. _LOGIN_REQUIRED = True
  1689. def _real_extract(self, url):
  1690. webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
  1691. playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, u'favourites playlist id')
  1692. return self.url_result(playlist_id, 'YoutubePlaylist')
  1693. class YoutubeTruncatedURLIE(InfoExtractor):
  1694. IE_NAME = 'youtube:truncated_url'
  1695. IE_DESC = False # Do not list
  1696. _VALID_URL = r'''(?x)
  1697. (?:https?://)?[^/]+/watch\?(?:feature=[a-z_]+)?$|
  1698. (?:https?://)?(?:www\.)?youtube\.com/attribution_link\?a=[^&]+$
  1699. '''
  1700. def _real_extract(self, url):
  1701. raise ExtractorError(
  1702. u'Did you forget to quote the URL? Remember that & is a meta '
  1703. u'character in most shells, so you want to put the URL in quotes, '
  1704. u'like youtube-dl '
  1705. u'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
  1706. u' or simply youtube-dl BaW_jenozKc .',
  1707. expected=True)