You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1538 lines
68 KiB

12 years ago
  1. # coding: utf-8
  2. import errno
  3. import io
  4. import itertools
  5. import json
  6. import os.path
  7. import re
  8. import traceback
  9. from .common import InfoExtractor, SearchInfoExtractor
  10. from .subtitles import SubtitlesInfoExtractor
  11. from ..jsinterp import JSInterpreter
  12. from ..swfinterp import SWFInterpreter
  13. from ..utils import (
  14. compat_chr,
  15. compat_parse_qs,
  16. compat_urllib_parse,
  17. compat_urllib_request,
  18. compat_urlparse,
  19. compat_str,
  20. clean_html,
  21. get_cachedir,
  22. get_element_by_id,
  23. get_element_by_attribute,
  24. ExtractorError,
  25. int_or_none,
  26. PagedList,
  27. unescapeHTML,
  28. unified_strdate,
  29. orderedSet,
  30. write_json_file,
  31. uppercase_escape,
  32. )
  33. class YoutubeBaseInfoExtractor(InfoExtractor):
  34. """Provide base functions for Youtube extractors"""
  35. _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
  36. _TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
  37. _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
  38. _AGE_URL = 'https://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
  39. _NETRC_MACHINE = 'youtube'
  40. # If True it will raise an error if no login info is provided
  41. _LOGIN_REQUIRED = False
  42. def _set_language(self):
  43. return bool(self._download_webpage(
  44. self._LANG_URL, None,
  45. note=u'Setting language', errnote='unable to set language',
  46. fatal=False))
  47. def _login(self):
  48. """
  49. Attempt to log in to YouTube.
  50. True is returned if successful or skipped.
  51. False is returned if login failed.
  52. If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
  53. """
  54. (username, password) = self._get_login_info()
  55. # No authentication to be performed
  56. if username is None:
  57. if self._LOGIN_REQUIRED:
  58. raise ExtractorError(u'No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  59. return True
  60. login_page = self._download_webpage(
  61. self._LOGIN_URL, None,
  62. note=u'Downloading login page',
  63. errnote=u'unable to fetch login page', fatal=False)
  64. if login_page is False:
  65. return
  66. galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
  67. login_page, u'Login GALX parameter')
  68. # Log in
  69. login_form_strs = {
  70. u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
  71. u'Email': username,
  72. u'GALX': galx,
  73. u'Passwd': password,
  74. u'PersistentCookie': u'yes',
  75. u'_utf8': u'',
  76. u'bgresponse': u'js_disabled',
  77. u'checkConnection': u'',
  78. u'checkedDomains': u'youtube',
  79. u'dnConn': u'',
  80. u'pstMsg': u'0',
  81. u'rmShown': u'1',
  82. u'secTok': u'',
  83. u'signIn': u'Sign in',
  84. u'timeStmp': u'',
  85. u'service': u'youtube',
  86. u'uilel': u'3',
  87. u'hl': u'en_US',
  88. }
  89. # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
  90. # chokes on unicode
  91. login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
  92. login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
  93. req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
  94. login_results = self._download_webpage(
  95. req, None,
  96. note=u'Logging in', errnote=u'unable to log in', fatal=False)
  97. if login_results is False:
  98. return False
  99. if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
  100. raise ExtractorError(u'Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
  101. # Two-Factor
  102. # TODO add SMS and phone call support - these require making a request and then prompting the user
  103. if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None:
  104. tfa_code = self._get_tfa_info()
  105. if tfa_code is None:
  106. self._downloader.report_warning(u'Two-factor authentication required. Provide it with --twofactor <code>')
  107. self._downloader.report_warning(u'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
  108. return False
  109. # Unlike the first login form, secTok and timeStmp are both required for the TFA form
  110. match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
  111. if match is None:
  112. self._downloader.report_warning(u'Failed to get secTok - did the page structure change?')
  113. secTok = match.group(1)
  114. match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
  115. if match is None:
  116. self._downloader.report_warning(u'Failed to get timeStmp - did the page structure change?')
  117. timeStmp = match.group(1)
  118. tfa_form_strs = {
  119. u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
  120. u'smsToken': u'',
  121. u'smsUserPin': tfa_code,
  122. u'smsVerifyPin': u'Verify',
  123. u'PersistentCookie': u'yes',
  124. u'checkConnection': u'',
  125. u'checkedDomains': u'youtube',
  126. u'pstMsg': u'1',
  127. u'secTok': secTok,
  128. u'timeStmp': timeStmp,
  129. u'service': u'youtube',
  130. u'hl': u'en_US',
  131. }
  132. tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in tfa_form_strs.items())
  133. tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
  134. tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
  135. tfa_results = self._download_webpage(
  136. tfa_req, None,
  137. note=u'Submitting TFA code', errnote=u'unable to submit tfa', fatal=False)
  138. if tfa_results is False:
  139. return False
  140. if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None:
  141. self._downloader.report_warning(u'Two-factor code expired. Please try again, or use a one-use backup code instead.')
  142. return False
  143. if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
  144. self._downloader.report_warning(u'unable to log in - did the page structure change?')
  145. return False
  146. if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
  147. self._downloader.report_warning(u'Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
  148. return False
  149. if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
  150. self._downloader.report_warning(u'unable to log in: bad username or password')
  151. return False
  152. return True
  153. def _confirm_age(self):
  154. age_form = {
  155. 'next_url': '/',
  156. 'action_confirm': 'Confirm',
  157. }
  158. req = compat_urllib_request.Request(self._AGE_URL,
  159. compat_urllib_parse.urlencode(age_form).encode('ascii'))
  160. self._download_webpage(
  161. req, None,
  162. note=u'Confirming age', errnote=u'Unable to confirm age')
  163. return True
  164. def _real_initialize(self):
  165. if self._downloader is None:
  166. return
  167. if not self._set_language():
  168. return
  169. if not self._login():
  170. return
  171. self._confirm_age()
  172. class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
  173. IE_DESC = u'YouTube.com'
  174. _VALID_URL = r"""(?x)^
  175. (
  176. (?:https?://|//)? # http(s):// or protocol-independent URL (optional)
  177. (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
  178. (?:www\.)?deturl\.com/www\.youtube\.com/|
  179. (?:www\.)?pwnyoutube\.com/|
  180. (?:www\.)?yourepeat\.com/|
  181. tube\.majestyc\.net/|
  182. youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
  183. (?:.*?\#/)? # handle anchor (#/) redirect urls
  184. (?: # the various things that can precede the ID:
  185. (?:(?:v|embed|e)/) # v/ or embed/ or e/
  186. |(?: # or the v= param in all its forms
  187. (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
  188. (?:\?|\#!?) # the params delimiter ? or # or #!
  189. (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
  190. v=
  191. )
  192. ))
  193. |youtu\.be/ # just youtu.be/xxxx
  194. |https?://(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
  195. )
  196. )? # all until now is optional -> you can pass the naked ID
  197. ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
  198. (?(1).+)? # if we found the ID, everything can follow
  199. $"""
  200. _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
  201. _formats = {
  202. '5': {'ext': 'flv', 'width': 400, 'height': 240},
  203. '6': {'ext': 'flv', 'width': 450, 'height': 270},
  204. '13': {'ext': '3gp'},
  205. '17': {'ext': '3gp', 'width': 176, 'height': 144},
  206. '18': {'ext': 'mp4', 'width': 640, 'height': 360},
  207. '22': {'ext': 'mp4', 'width': 1280, 'height': 720},
  208. '34': {'ext': 'flv', 'width': 640, 'height': 360},
  209. '35': {'ext': 'flv', 'width': 854, 'height': 480},
  210. '36': {'ext': '3gp', 'width': 320, 'height': 240},
  211. '37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
  212. '38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
  213. '43': {'ext': 'webm', 'width': 640, 'height': 360},
  214. '44': {'ext': 'webm', 'width': 854, 'height': 480},
  215. '45': {'ext': 'webm', 'width': 1280, 'height': 720},
  216. '46': {'ext': 'webm', 'width': 1920, 'height': 1080},
  217. # 3d videos
  218. '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
  219. '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
  220. '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
  221. '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
  222. '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
  223. '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
  224. '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
  225. # Apple HTTP Live Streaming
  226. '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
  227. '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
  228. '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
  229. '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
  230. '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
  231. '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
  232. '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
  233. # DASH mp4 video
  234. '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  235. '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  236. '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  237. '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  238. '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  239. '138': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  240. '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  241. '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  242. # Dash mp4 audio
  243. '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
  244. '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 128, 'preference': -50},
  245. '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50},
  246. # Dash webm
  247. '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
  248. '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
  249. '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
  250. '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
  251. '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
  252. '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
  253. '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  254. '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  255. '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  256. '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  257. '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  258. '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  259. '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  260. '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  261. '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
  262. # Dash webm audio
  263. '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
  264. '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
  265. # RTMP (unnamed)
  266. '_rtmp': {'protocol': 'rtmp'},
  267. }
  268. IE_NAME = u'youtube'
  269. _TESTS = [
  270. {
  271. u"url": u"http://www.youtube.com/watch?v=BaW_jenozKc",
  272. u"file": u"BaW_jenozKc.mp4",
  273. u"info_dict": {
  274. u"title": u"youtube-dl test video \"'/\\ä↭𝕐",
  275. u"uploader": u"Philipp Hagemeister",
  276. u"uploader_id": u"phihag",
  277. u"upload_date": u"20121002",
  278. u"description": u"test chars: \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .",
  279. u"categories": [u'Science & Technology'],
  280. 'like_count': int,
  281. 'dislike_count': int,
  282. }
  283. },
  284. {
  285. u"url": u"http://www.youtube.com/watch?v=UxxajLWwzqY",
  286. u"file": u"UxxajLWwzqY.mp4",
  287. u"note": u"Test generic use_cipher_signature video (#897)",
  288. u"info_dict": {
  289. u"upload_date": u"20120506",
  290. u"title": u"Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]",
  291. u"description": u"md5:fea86fda2d5a5784273df5c7cc994d9f",
  292. u"uploader": u"Icona Pop",
  293. u"uploader_id": u"IconaPop"
  294. }
  295. },
  296. {
  297. u"url": u"https://www.youtube.com/watch?v=07FYdnEawAQ",
  298. u"file": u"07FYdnEawAQ.mp4",
  299. u"note": u"Test VEVO video with age protection (#956)",
  300. u"info_dict": {
  301. u"upload_date": u"20130703",
  302. u"title": u"Justin Timberlake - Tunnel Vision (Explicit)",
  303. u"description": u"md5:64249768eec3bc4276236606ea996373",
  304. u"uploader": u"justintimberlakeVEVO",
  305. u"uploader_id": u"justintimberlakeVEVO"
  306. }
  307. },
  308. {
  309. u"url": u"//www.YouTube.com/watch?v=yZIXLfi8CZQ",
  310. u"file": u"yZIXLfi8CZQ.mp4",
  311. u"note": u"Embed-only video (#1746)",
  312. u"info_dict": {
  313. u"upload_date": u"20120608",
  314. u"title": u"Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012",
  315. u"description": u"md5:09b78bd971f1e3e289601dfba15ca4f7",
  316. u"uploader": u"SET India",
  317. u"uploader_id": u"setindia"
  318. }
  319. },
  320. {
  321. u"url": u"http://www.youtube.com/watch?v=a9LDPn-MO4I",
  322. u"file": u"a9LDPn-MO4I.m4a",
  323. u"note": u"256k DASH audio (format 141) via DASH manifest",
  324. u"info_dict": {
  325. u"upload_date": "20121002",
  326. u"uploader_id": "8KVIDEO",
  327. u"description": "No description available.",
  328. u"uploader": "8KVIDEO",
  329. u"title": "UHDTV TEST 8K VIDEO.mp4"
  330. },
  331. u"params": {
  332. u"youtube_include_dash_manifest": True,
  333. u"format": "141",
  334. },
  335. },
  336. # DASH manifest with encrypted signature
  337. {
  338. u'url': u'https://www.youtube.com/watch?v=IB3lcPjvWLA',
  339. u'info_dict': {
  340. u'id': u'IB3lcPjvWLA',
  341. u'ext': u'm4a',
  342. u'title': u'Afrojack - The Spark ft. Spree Wilson',
  343. u'description': u'md5:9717375db5a9a3992be4668bbf3bc0a8',
  344. u'uploader': u'AfrojackVEVO',
  345. u'uploader_id': u'AfrojackVEVO',
  346. u'upload_date': u'20131011',
  347. },
  348. u"params": {
  349. u'youtube_include_dash_manifest': True,
  350. u'format': '141',
  351. },
  352. },
  353. ]
  354. @classmethod
  355. def suitable(cls, url):
  356. """Receives a URL and returns True if suitable for this IE."""
  357. if YoutubePlaylistIE.suitable(url): return False
  358. return re.match(cls._VALID_URL, url) is not None
  359. def __init__(self, *args, **kwargs):
  360. super(YoutubeIE, self).__init__(*args, **kwargs)
  361. self._player_cache = {}
  362. def report_video_info_webpage_download(self, video_id):
  363. """Report attempt to download video info webpage."""
  364. self.to_screen(u'%s: Downloading video info webpage' % video_id)
  365. def report_information_extraction(self, video_id):
  366. """Report attempt to extract video information."""
  367. self.to_screen(u'%s: Extracting video information' % video_id)
  368. def report_unavailable_format(self, video_id, format):
  369. """Report extracted video URL."""
  370. self.to_screen(u'%s: Format %s not available' % (video_id, format))
  371. def report_rtmp_download(self):
  372. """Indicate the download will use the RTMP protocol."""
  373. self.to_screen(u'RTMP download detected')
  374. def _signature_cache_id(self, example_sig):
  375. """ Return a string representation of a signature """
  376. return u'.'.join(compat_str(len(part)) for part in example_sig.split('.'))
  377. def _extract_signature_function(self, video_id, player_url, example_sig):
  378. id_m = re.match(
  379. r'.*-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$',
  380. player_url)
  381. if not id_m:
  382. raise ExtractorError('Cannot identify player %r' % player_url)
  383. player_type = id_m.group('ext')
  384. player_id = id_m.group('id')
  385. # Read from filesystem cache
  386. func_id = '%s_%s_%s' % (
  387. player_type, player_id, self._signature_cache_id(example_sig))
  388. assert os.path.basename(func_id) == func_id
  389. cache_dir = get_cachedir(self._downloader.params)
  390. cache_enabled = cache_dir is not None
  391. if cache_enabled:
  392. cache_fn = os.path.join(os.path.expanduser(cache_dir),
  393. u'youtube-sigfuncs',
  394. func_id + '.json')
  395. try:
  396. with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
  397. cache_spec = json.load(cachef)
  398. return lambda s: u''.join(s[i] for i in cache_spec)
  399. except IOError:
  400. pass # No cache available
  401. except ValueError:
  402. try:
  403. file_size = os.path.getsize(cache_fn)
  404. except (OSError, IOError) as oe:
  405. file_size = str(oe)
  406. self._downloader.report_warning(
  407. u'Cache %s failed (%s)' % (cache_fn, file_size))
  408. if player_type == 'js':
  409. code = self._download_webpage(
  410. player_url, video_id,
  411. note=u'Downloading %s player %s' % (player_type, player_id),
  412. errnote=u'Download of %s failed' % player_url)
  413. res = self._parse_sig_js(code)
  414. elif player_type == 'swf':
  415. urlh = self._request_webpage(
  416. player_url, video_id,
  417. note=u'Downloading %s player %s' % (player_type, player_id),
  418. errnote=u'Download of %s failed' % player_url)
  419. code = urlh.read()
  420. res = self._parse_sig_swf(code)
  421. else:
  422. assert False, 'Invalid player type %r' % player_type
  423. if cache_enabled:
  424. try:
  425. test_string = u''.join(map(compat_chr, range(len(example_sig))))
  426. cache_res = res(test_string)
  427. cache_spec = [ord(c) for c in cache_res]
  428. try:
  429. os.makedirs(os.path.dirname(cache_fn))
  430. except OSError as ose:
  431. if ose.errno != errno.EEXIST:
  432. raise
  433. write_json_file(cache_spec, cache_fn)
  434. except Exception:
  435. tb = traceback.format_exc()
  436. self._downloader.report_warning(
  437. u'Writing cache to %r failed: %s' % (cache_fn, tb))
  438. return res
  439. def _print_sig_code(self, func, example_sig):
  440. def gen_sig_code(idxs):
  441. def _genslice(start, end, step):
  442. starts = u'' if start == 0 else str(start)
  443. ends = (u':%d' % (end+step)) if end + step >= 0 else u':'
  444. steps = u'' if step == 1 else (u':%d' % step)
  445. return u's[%s%s%s]' % (starts, ends, steps)
  446. step = None
  447. start = '(Never used)' # Quelch pyflakes warnings - start will be
  448. # set as soon as step is set
  449. for i, prev in zip(idxs[1:], idxs[:-1]):
  450. if step is not None:
  451. if i - prev == step:
  452. continue
  453. yield _genslice(start, prev, step)
  454. step = None
  455. continue
  456. if i - prev in [-1, 1]:
  457. step = i - prev
  458. start = prev
  459. continue
  460. else:
  461. yield u's[%d]' % prev
  462. if step is None:
  463. yield u's[%d]' % i
  464. else:
  465. yield _genslice(start, i, step)
  466. test_string = u''.join(map(compat_chr, range(len(example_sig))))
  467. cache_res = func(test_string)
  468. cache_spec = [ord(c) for c in cache_res]
  469. expr_code = u' + '.join(gen_sig_code(cache_spec))
  470. signature_id_tuple = '(%s)' % (
  471. ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
  472. code = (u'if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
  473. u' return %s\n') % (signature_id_tuple, expr_code)
  474. self.to_screen(u'Extracted signature function:\n' + code)
  475. def _parse_sig_js(self, jscode):
  476. funcname = self._search_regex(
  477. r'signature=([$a-zA-Z]+)', jscode,
  478. u'Initial JS player signature function name')
  479. jsi = JSInterpreter(jscode)
  480. initial_function = jsi.extract_function(funcname)
  481. return lambda s: initial_function([s])
  482. def _parse_sig_swf(self, file_contents):
  483. swfi = SWFInterpreter(file_contents)
  484. TARGET_CLASSNAME = u'SignatureDecipher'
  485. searched_class = swfi.extract_class(TARGET_CLASSNAME)
  486. initial_function = swfi.extract_function(searched_class, u'decipher')
  487. return lambda s: initial_function([s])
  488. def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
  489. """Turn the encrypted s field into a working signature"""
  490. if player_url is None:
  491. raise ExtractorError(u'Cannot decrypt signature without player_url')
  492. if player_url.startswith(u'//'):
  493. player_url = u'https:' + player_url
  494. try:
  495. player_id = (player_url, self._signature_cache_id(s))
  496. if player_id not in self._player_cache:
  497. func = self._extract_signature_function(
  498. video_id, player_url, s
  499. )
  500. self._player_cache[player_id] = func
  501. func = self._player_cache[player_id]
  502. if self._downloader.params.get('youtube_print_sig_code'):
  503. self._print_sig_code(func, s)
  504. return func(s)
  505. except Exception as e:
  506. tb = traceback.format_exc()
  507. raise ExtractorError(
  508. u'Signature extraction failed: ' + tb, cause=e)
  509. def _get_available_subtitles(self, video_id, webpage):
  510. try:
  511. sub_list = self._download_webpage(
  512. 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
  513. video_id, note=False)
  514. except ExtractorError as err:
  515. self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
  516. return {}
  517. lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
  518. sub_lang_list = {}
  519. for l in lang_list:
  520. lang = l[1]
  521. if lang in sub_lang_list:
  522. continue
  523. params = compat_urllib_parse.urlencode({
  524. 'lang': lang,
  525. 'v': video_id,
  526. 'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
  527. 'name': unescapeHTML(l[0]).encode('utf-8'),
  528. })
  529. url = u'https://www.youtube.com/api/timedtext?' + params
  530. sub_lang_list[lang] = url
  531. if not sub_lang_list:
  532. self._downloader.report_warning(u'video doesn\'t have subtitles')
  533. return {}
  534. return sub_lang_list
  535. def _get_available_automatic_caption(self, video_id, webpage):
  536. """We need the webpage for getting the captions url, pass it as an
  537. argument to speed up the process."""
  538. sub_format = self._downloader.params.get('subtitlesformat', 'srt')
  539. self.to_screen(u'%s: Looking for automatic captions' % video_id)
  540. mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
  541. err_msg = u'Couldn\'t find automatic captions for %s' % video_id
  542. if mobj is None:
  543. self._downloader.report_warning(err_msg)
  544. return {}
  545. player_config = json.loads(mobj.group(1))
  546. try:
  547. args = player_config[u'args']
  548. caption_url = args[u'ttsurl']
  549. timestamp = args[u'timestamp']
  550. # We get the available subtitles
  551. list_params = compat_urllib_parse.urlencode({
  552. 'type': 'list',
  553. 'tlangs': 1,
  554. 'asrs': 1,
  555. })
  556. list_url = caption_url + '&' + list_params
  557. caption_list = self._download_xml(list_url, video_id)
  558. original_lang_node = caption_list.find('track')
  559. if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
  560. self._downloader.report_warning(u'Video doesn\'t have automatic captions')
  561. return {}
  562. original_lang = original_lang_node.attrib['lang_code']
  563. sub_lang_list = {}
  564. for lang_node in caption_list.findall('target'):
  565. sub_lang = lang_node.attrib['lang_code']
  566. params = compat_urllib_parse.urlencode({
  567. 'lang': original_lang,
  568. 'tlang': sub_lang,
  569. 'fmt': sub_format,
  570. 'ts': timestamp,
  571. 'kind': 'asr',
  572. })
  573. sub_lang_list[sub_lang] = caption_url + '&' + params
  574. return sub_lang_list
  575. # An extractor error can be raise by the download process if there are
  576. # no automatic captions but there are subtitles
  577. except (KeyError, ExtractorError):
  578. self._downloader.report_warning(err_msg)
  579. return {}
  580. @classmethod
  581. def extract_id(cls, url):
  582. mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
  583. if mobj is None:
  584. raise ExtractorError(u'Invalid URL: %s' % url)
  585. video_id = mobj.group(2)
  586. return video_id
  587. def _extract_from_m3u8(self, manifest_url, video_id):
  588. url_map = {}
  589. def _get_urls(_manifest):
  590. lines = _manifest.split('\n')
  591. urls = filter(lambda l: l and not l.startswith('#'),
  592. lines)
  593. return urls
  594. manifest = self._download_webpage(manifest_url, video_id, u'Downloading formats manifest')
  595. formats_urls = _get_urls(manifest)
  596. for format_url in formats_urls:
  597. itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
  598. url_map[itag] = format_url
  599. return url_map
  600. def _extract_annotations(self, video_id):
  601. url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
  602. return self._download_webpage(url, video_id, note=u'Searching for annotations.', errnote=u'Unable to download video annotations.')
  603. def _real_extract(self, url):
  604. proto = (
  605. u'http' if self._downloader.params.get('prefer_insecure', False)
  606. else u'https')
  607. # Extract original video URL from URL with redirection, like age verification, using next_url parameter
  608. mobj = re.search(self._NEXT_URL_RE, url)
  609. if mobj:
  610. url = proto + '://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
  611. video_id = self.extract_id(url)
  612. # Get video webpage
  613. url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
  614. video_webpage = self._download_webpage(url, video_id)
  615. # Attempt to extract SWF player URL
  616. mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
  617. if mobj is not None:
  618. player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
  619. else:
  620. player_url = None
  621. # Get video info
  622. self.report_video_info_webpage_download(video_id)
  623. if re.search(r'player-age-gate-content">', video_webpage) is not None:
  624. self.report_age_confirmation()
  625. age_gate = True
  626. # We simulate the access to the video from www.youtube.com/v/{video_id}
  627. # this can be viewed without login into Youtube
  628. data = compat_urllib_parse.urlencode({
  629. 'video_id': video_id,
  630. 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
  631. 'sts': self._search_regex(
  632. r'"sts"\s*:\s*(\d+)', video_webpage, 'sts'),
  633. })
  634. video_info_url = proto + '://www.youtube.com/get_video_info?' + data
  635. video_info_webpage = self._download_webpage(video_info_url, video_id,
  636. note=False,
  637. errnote='unable to download video info webpage')
  638. video_info = compat_parse_qs(video_info_webpage)
  639. else:
  640. age_gate = False
  641. for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
  642. video_info_url = (proto + '://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
  643. % (video_id, el_type))
  644. video_info_webpage = self._download_webpage(video_info_url, video_id,
  645. note=False,
  646. errnote='unable to download video info webpage')
  647. video_info = compat_parse_qs(video_info_webpage)
  648. if 'token' in video_info:
  649. break
  650. if 'token' not in video_info:
  651. if 'reason' in video_info:
  652. raise ExtractorError(
  653. u'YouTube said: %s' % video_info['reason'][0],
  654. expected=True, video_id=video_id)
  655. else:
  656. raise ExtractorError(
  657. u'"token" parameter not in video info for unknown reason',
  658. video_id=video_id)
  659. if 'view_count' in video_info:
  660. view_count = int(video_info['view_count'][0])
  661. else:
  662. view_count = None
  663. # Check for "rental" videos
  664. if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
  665. raise ExtractorError(u'"rental" videos not supported')
  666. # Start extracting information
  667. self.report_information_extraction(video_id)
  668. # uploader
  669. if 'author' not in video_info:
  670. raise ExtractorError(u'Unable to extract uploader name')
  671. video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
  672. # uploader_id
  673. video_uploader_id = None
  674. mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
  675. if mobj is not None:
  676. video_uploader_id = mobj.group(1)
  677. else:
  678. self._downloader.report_warning(u'unable to extract uploader nickname')
  679. # title
  680. if 'title' in video_info:
  681. video_title = video_info['title'][0]
  682. else:
  683. self._downloader.report_warning(u'Unable to extract video title')
  684. video_title = u'_'
  685. # thumbnail image
  686. # We try first to get a high quality image:
  687. m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
  688. video_webpage, re.DOTALL)
  689. if m_thumb is not None:
  690. video_thumbnail = m_thumb.group(1)
  691. elif 'thumbnail_url' not in video_info:
  692. self._downloader.report_warning(u'unable to extract video thumbnail')
  693. video_thumbnail = None
  694. else: # don't panic if we can't find it
  695. video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
  696. # upload date
  697. upload_date = None
  698. mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage)
  699. if mobj is None:
  700. mobj = re.search(
  701. r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>',
  702. video_webpage)
  703. if mobj is not None:
  704. upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
  705. upload_date = unified_strdate(upload_date)
  706. m_cat_container = self._search_regex(
  707. r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
  708. video_webpage, 'categories', fatal=False)
  709. if m_cat_container:
  710. category = self._html_search_regex(
  711. r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
  712. default=None)
  713. video_categories = None if category is None else [category]
  714. else:
  715. video_categories = None
  716. # description
  717. video_description = get_element_by_id("eow-description", video_webpage)
  718. if video_description:
  719. video_description = re.sub(r'''(?x)
  720. <a\s+
  721. (?:[a-zA-Z-]+="[^"]+"\s+)*?
  722. title="([^"]+)"\s+
  723. (?:[a-zA-Z-]+="[^"]+"\s+)*?
  724. class="yt-uix-redirect-link"\s*>
  725. [^<]+
  726. </a>
  727. ''', r'\1', video_description)
  728. video_description = clean_html(video_description)
  729. else:
  730. fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
  731. if fd_mobj:
  732. video_description = unescapeHTML(fd_mobj.group(1))
  733. else:
  734. video_description = u''
  735. def _extract_count(count_name):
  736. count = self._search_regex(
  737. r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name),
  738. video_webpage, count_name, default=None)
  739. if count is not None:
  740. return int(count.replace(',', ''))
  741. return None
  742. like_count = _extract_count(u'like')
  743. dislike_count = _extract_count(u'dislike')
  744. # subtitles
  745. video_subtitles = self.extract_subtitles(video_id, video_webpage)
  746. if self._downloader.params.get('listsubtitles', False):
  747. self._list_available_subtitles(video_id, video_webpage)
  748. return
  749. if 'length_seconds' not in video_info:
  750. self._downloader.report_warning(u'unable to extract video duration')
  751. video_duration = None
  752. else:
  753. video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]))
  754. # annotations
  755. video_annotations = None
  756. if self._downloader.params.get('writeannotations', False):
  757. video_annotations = self._extract_annotations(video_id)
  758. # Decide which formats to download
  759. try:
  760. mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
  761. if not mobj:
  762. raise ValueError('Could not find vevo ID')
  763. json_code = uppercase_escape(mobj.group(1))
  764. ytplayer_config = json.loads(json_code)
  765. args = ytplayer_config['args']
  766. # Easy way to know if the 's' value is in url_encoded_fmt_stream_map
  767. # this signatures are encrypted
  768. if 'url_encoded_fmt_stream_map' not in args:
  769. raise ValueError(u'No stream_map present') # caught below
  770. re_signature = re.compile(r'[&,]s=')
  771. m_s = re_signature.search(args['url_encoded_fmt_stream_map'])
  772. if m_s is not None:
  773. self.to_screen(u'%s: Encrypted signatures detected.' % video_id)
  774. video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
  775. m_s = re_signature.search(args.get('adaptive_fmts', u''))
  776. if m_s is not None:
  777. if 'adaptive_fmts' in video_info:
  778. video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
  779. else:
  780. video_info['adaptive_fmts'] = [args['adaptive_fmts']]
  781. except ValueError:
  782. pass
  783. def _map_to_format_list(urlmap):
  784. formats = []
  785. for itag, video_real_url in urlmap.items():
  786. dct = {
  787. 'format_id': itag,
  788. 'url': video_real_url,
  789. 'player_url': player_url,
  790. }
  791. if itag in self._formats:
  792. dct.update(self._formats[itag])
  793. formats.append(dct)
  794. return formats
  795. if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
  796. self.report_rtmp_download()
  797. formats = [{
  798. 'format_id': '_rtmp',
  799. 'protocol': 'rtmp',
  800. 'url': video_info['conn'][0],
  801. 'player_url': player_url,
  802. }]
  803. elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
  804. encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
  805. if 'rtmpe%3Dyes' in encoded_url_map:
  806. raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
  807. url_map = {}
  808. for url_data_str in encoded_url_map.split(','):
  809. url_data = compat_parse_qs(url_data_str)
  810. if 'itag' not in url_data or 'url' not in url_data:
  811. continue
  812. format_id = url_data['itag'][0]
  813. url = url_data['url'][0]
  814. if 'sig' in url_data:
  815. url += '&signature=' + url_data['sig'][0]
  816. elif 's' in url_data:
  817. encrypted_sig = url_data['s'][0]
  818. if not age_gate:
  819. jsplayer_url_json = self._search_regex(
  820. r'"assets":.+?"js":\s*("[^"]+")',
  821. video_webpage, u'JS player URL')
  822. player_url = json.loads(jsplayer_url_json)
  823. if player_url is None:
  824. player_url_json = self._search_regex(
  825. r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
  826. video_webpage, u'age gate player URL')
  827. player_url = json.loads(player_url_json)
  828. if self._downloader.params.get('verbose'):
  829. if player_url is None:
  830. player_version = 'unknown'
  831. player_desc = 'unknown'
  832. else:
  833. if player_url.endswith('swf'):
  834. player_version = self._search_regex(
  835. r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
  836. u'flash player', fatal=False)
  837. player_desc = 'flash player %s' % player_version
  838. else:
  839. player_version = self._search_regex(
  840. r'html5player-([^/]+?)(?:/html5player)?\.js',
  841. player_url,
  842. 'html5 player', fatal=False)
  843. player_desc = u'html5 player %s' % player_version
  844. parts_sizes = self._signature_cache_id(encrypted_sig)
  845. self.to_screen(u'{%s} signature length %s, %s' %
  846. (format_id, parts_sizes, player_desc))
  847. signature = self._decrypt_signature(
  848. encrypted_sig, video_id, player_url, age_gate)
  849. url += '&signature=' + signature
  850. if 'ratebypass' not in url:
  851. url += '&ratebypass=yes'
  852. url_map[format_id] = url
  853. formats = _map_to_format_list(url_map)
  854. elif video_info.get('hlsvp'):
  855. manifest_url = video_info['hlsvp'][0]
  856. url_map = self._extract_from_m3u8(manifest_url, video_id)
  857. formats = _map_to_format_list(url_map)
  858. else:
  859. raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
  860. # Look for the DASH manifest
  861. if (self._downloader.params.get('youtube_include_dash_manifest', False)):
  862. try:
  863. # The DASH manifest used needs to be the one from the original video_webpage.
  864. # The one found in get_video_info seems to be using different signatures.
  865. # However, in the case of an age restriction there won't be any embedded dashmpd in the video_webpage.
  866. # Luckily, it seems, this case uses some kind of default signature (len == 86), so the
  867. # combination of get_video_info and the _static_decrypt_signature() decryption fallback will work here.
  868. if age_gate:
  869. dash_manifest_url = video_info.get('dashmpd')[0]
  870. else:
  871. dash_manifest_url = ytplayer_config['args']['dashmpd']
  872. def decrypt_sig(mobj):
  873. s = mobj.group(1)
  874. dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
  875. return '/signature/%s' % dec_s
  876. dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
  877. dash_doc = self._download_xml(
  878. dash_manifest_url, video_id,
  879. note=u'Downloading DASH manifest',
  880. errnote=u'Could not download DASH manifest')
  881. for r in dash_doc.findall(u'.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
  882. url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
  883. if url_el is None:
  884. continue
  885. format_id = r.attrib['id']
  886. video_url = url_el.text
  887. filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
  888. f = {
  889. 'format_id': format_id,
  890. 'url': video_url,
  891. 'width': int_or_none(r.attrib.get('width')),
  892. 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
  893. 'asr': int_or_none(r.attrib.get('audioSamplingRate')),
  894. 'filesize': filesize,
  895. }
  896. try:
  897. existing_format = next(
  898. fo for fo in formats
  899. if fo['format_id'] == format_id)
  900. except StopIteration:
  901. f.update(self._formats.get(format_id, {}))
  902. formats.append(f)
  903. else:
  904. existing_format.update(f)
  905. except (ExtractorError, KeyError) as e:
  906. self.report_warning(u'Skipping DASH manifest: %s' % e, video_id)
  907. self._sort_formats(formats)
  908. return {
  909. 'id': video_id,
  910. 'uploader': video_uploader,
  911. 'uploader_id': video_uploader_id,
  912. 'upload_date': upload_date,
  913. 'title': video_title,
  914. 'thumbnail': video_thumbnail,
  915. 'description': video_description,
  916. 'categories': video_categories,
  917. 'subtitles': video_subtitles,
  918. 'duration': video_duration,
  919. 'age_limit': 18 if age_gate else 0,
  920. 'annotations': video_annotations,
  921. 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
  922. 'view_count': view_count,
  923. 'like_count': like_count,
  924. 'dislike_count': dislike_count,
  925. 'formats': formats,
  926. }
  927. class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
  928. IE_DESC = u'YouTube.com playlists'
  929. _VALID_URL = r"""(?x)(?:
  930. (?:https?://)?
  931. (?:\w+\.)?
  932. youtube\.com/
  933. (?:
  934. (?:course|view_play_list|my_playlists|artist|playlist|watch)
  935. \? (?:.*?&)*? (?:p|a|list)=
  936. | p/
  937. )
  938. (
  939. (?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
  940. # Top tracks, they can also include dots
  941. |(?:MC)[\w\.]*
  942. )
  943. .*
  944. |
  945. ((?:PL|LL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,})
  946. )"""
  947. _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
  948. _MORE_PAGES_INDICATOR = r'data-link-type="next"'
  949. _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
  950. IE_NAME = u'youtube:playlist'
  951. def _real_initialize(self):
  952. self._login()
  953. def _ids_to_results(self, ids):
  954. return [
  955. self.url_result(vid_id, 'Youtube', video_id=vid_id)
  956. for vid_id in ids]
  957. def _extract_mix(self, playlist_id):
  958. # The mixes are generated from a a single video
  959. # the id of the playlist is just 'RD' + video_id
  960. url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
  961. webpage = self._download_webpage(
  962. url, playlist_id, u'Downloading Youtube mix')
  963. search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
  964. title_span = (
  965. search_title('playlist-title') or
  966. search_title('title long-title') or
  967. search_title('title'))
  968. title = clean_html(title_span)
  969. ids = orderedSet(re.findall(
  970. r'''(?xs)data-video-username=".*?".*?
  971. href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
  972. webpage))
  973. url_results = self._ids_to_results(ids)
  974. return self.playlist_result(url_results, playlist_id, title)
  975. def _real_extract(self, url):
  976. # Extract playlist id
  977. mobj = re.match(self._VALID_URL, url)
  978. if mobj is None:
  979. raise ExtractorError(u'Invalid URL: %s' % url)
  980. playlist_id = mobj.group(1) or mobj.group(2)
  981. # Check if it's a video-specific URL
  982. query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
  983. if 'v' in query_dict:
  984. video_id = query_dict['v'][0]
  985. if self._downloader.params.get('noplaylist'):
  986. self.to_screen(u'Downloading just video %s because of --no-playlist' % video_id)
  987. return self.url_result(video_id, 'Youtube', video_id=video_id)
  988. else:
  989. self.to_screen(u'Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
  990. if playlist_id.startswith('RD'):
  991. # Mixes require a custom extraction process
  992. return self._extract_mix(playlist_id)
  993. if playlist_id.startswith('TL'):
  994. raise ExtractorError(u'For downloading YouTube.com top lists, use '
  995. u'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
  996. url = self._TEMPLATE_URL % playlist_id
  997. page = self._download_webpage(url, playlist_id)
  998. more_widget_html = content_html = page
  999. # Check if the playlist exists or is private
  1000. if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None:
  1001. raise ExtractorError(
  1002. u'The playlist doesn\'t exist or is private, use --username or '
  1003. '--netrc to access it.',
  1004. expected=True)
  1005. # Extract the video ids from the playlist pages
  1006. ids = []
  1007. for page_num in itertools.count(1):
  1008. matches = re.finditer(self._VIDEO_RE, content_html)
  1009. # We remove the duplicates and the link with index 0
  1010. # (it's not the first video of the playlist)
  1011. new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
  1012. ids.extend(new_ids)
  1013. mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
  1014. if not mobj:
  1015. break
  1016. more = self._download_json(
  1017. 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
  1018. 'Downloading page #%s' % page_num,
  1019. transform_source=uppercase_escape)
  1020. content_html = more['content_html']
  1021. more_widget_html = more['load_more_widget_html']
  1022. playlist_title = self._html_search_regex(
  1023. r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
  1024. page, u'title')
  1025. url_results = self._ids_to_results(ids)
  1026. return self.playlist_result(url_results, playlist_id, playlist_title)
  1027. class YoutubeTopListIE(YoutubePlaylistIE):
  1028. IE_NAME = u'youtube:toplist'
  1029. IE_DESC = (u'YouTube.com top lists, "yttoplist:{channel}:{list title}"'
  1030. u' (Example: "yttoplist:music:Top Tracks")')
  1031. _VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'
  1032. def _real_extract(self, url):
  1033. mobj = re.match(self._VALID_URL, url)
  1034. channel = mobj.group('chann')
  1035. title = mobj.group('title')
  1036. query = compat_urllib_parse.urlencode({'title': title})
  1037. playlist_re = 'href="([^"]+?%s.*?)"' % re.escape(query)
  1038. channel_page = self._download_webpage('https://www.youtube.com/%s' % channel, title)
  1039. link = self._html_search_regex(playlist_re, channel_page, u'list')
  1040. url = compat_urlparse.urljoin('https://www.youtube.com/', link)
  1041. video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"'
  1042. ids = []
  1043. # sometimes the webpage doesn't contain the videos
  1044. # retry until we get them
  1045. for i in itertools.count(0):
  1046. msg = u'Downloading Youtube mix'
  1047. if i > 0:
  1048. msg += ', retry #%d' % i
  1049. webpage = self._download_webpage(url, title, msg)
  1050. ids = orderedSet(re.findall(video_re, webpage))
  1051. if ids:
  1052. break
  1053. url_results = self._ids_to_results(ids)
  1054. return self.playlist_result(url_results, playlist_title=title)
  1055. class YoutubeChannelIE(InfoExtractor):
  1056. IE_DESC = u'YouTube.com channels'
  1057. _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
  1058. _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
  1059. _MORE_PAGES_URL = 'https://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
  1060. IE_NAME = u'youtube:channel'
  1061. def extract_videos_from_page(self, page):
  1062. ids_in_page = []
  1063. for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
  1064. if mobj.group(1) not in ids_in_page:
  1065. ids_in_page.append(mobj.group(1))
  1066. return ids_in_page
  1067. def _real_extract(self, url):
  1068. # Extract channel id
  1069. mobj = re.match(self._VALID_URL, url)
  1070. if mobj is None:
  1071. raise ExtractorError(u'Invalid URL: %s' % url)
  1072. # Download channel page
  1073. channel_id = mobj.group(1)
  1074. video_ids = []
  1075. url = 'https://www.youtube.com/channel/%s/videos' % channel_id
  1076. channel_page = self._download_webpage(url, channel_id)
  1077. autogenerated = re.search(r'''(?x)
  1078. class="[^"]*?(?:
  1079. channel-header-autogenerated-label|
  1080. yt-channel-title-autogenerated
  1081. )[^"]*"''', channel_page) is not None
  1082. if autogenerated:
  1083. # The videos are contained in a single page
  1084. # the ajax pages can't be used, they are empty
  1085. video_ids = self.extract_videos_from_page(channel_page)
  1086. else:
  1087. # Download all channel pages using the json-based channel_ajax query
  1088. for pagenum in itertools.count(1):
  1089. url = self._MORE_PAGES_URL % (pagenum, channel_id)
  1090. page = self._download_json(
  1091. url, channel_id, note=u'Downloading page #%s' % pagenum,
  1092. transform_source=uppercase_escape)
  1093. ids_in_page = self.extract_videos_from_page(page['content_html'])
  1094. video_ids.extend(ids_in_page)
  1095. if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
  1096. break
  1097. self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
  1098. url_entries = [self.url_result(video_id, 'Youtube', video_id=video_id)
  1099. for video_id in video_ids]
  1100. return self.playlist_result(url_entries, channel_id)
  1101. class YoutubeUserIE(InfoExtractor):
  1102. IE_DESC = u'YouTube.com user videos (URL or "ytuser" keyword)'
  1103. _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
  1104. _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
  1105. _GDATA_PAGE_SIZE = 50
  1106. _GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
  1107. IE_NAME = u'youtube:user'
  1108. @classmethod
  1109. def suitable(cls, url):
  1110. # Don't return True if the url can be extracted with other youtube
  1111. # extractor, the regex would is too permissive and it would match.
  1112. other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
  1113. if any(ie.suitable(url) for ie in other_ies): return False
  1114. else: return super(YoutubeUserIE, cls).suitable(url)
  1115. def _real_extract(self, url):
  1116. # Extract username
  1117. mobj = re.match(self._VALID_URL, url)
  1118. if mobj is None:
  1119. raise ExtractorError(u'Invalid URL: %s' % url)
  1120. username = mobj.group(1)
  1121. # Download video ids using YouTube Data API. Result size per
  1122. # query is limited (currently to 50 videos) so we need to query
  1123. # page by page until there are no video ids - it means we got
  1124. # all of them.
  1125. def download_page(pagenum):
  1126. start_index = pagenum * self._GDATA_PAGE_SIZE + 1
  1127. gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
  1128. page = self._download_webpage(
  1129. gdata_url, username,
  1130. u'Downloading video ids from %d to %d' % (
  1131. start_index, start_index + self._GDATA_PAGE_SIZE))
  1132. try:
  1133. response = json.loads(page)
  1134. except ValueError as err:
  1135. raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
  1136. if 'entry' not in response['feed']:
  1137. return
  1138. # Extract video identifiers
  1139. entries = response['feed']['entry']
  1140. for entry in entries:
  1141. title = entry['title']['$t']
  1142. video_id = entry['id']['$t'].split('/')[-1]
  1143. yield {
  1144. '_type': 'url',
  1145. 'url': video_id,
  1146. 'ie_key': 'Youtube',
  1147. 'id': video_id,
  1148. 'title': title,
  1149. }
  1150. url_results = PagedList(download_page, self._GDATA_PAGE_SIZE)
  1151. return self.playlist_result(url_results, playlist_title=username)
  1152. class YoutubeSearchIE(SearchInfoExtractor):
  1153. IE_DESC = u'YouTube.com searches'
  1154. _API_URL = u'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
  1155. _MAX_RESULTS = 1000
  1156. IE_NAME = u'youtube:search'
  1157. _SEARCH_KEY = 'ytsearch'
  1158. def _get_n_results(self, query, n):
  1159. """Get a specified number of results for a query"""
  1160. video_ids = []
  1161. pagenum = 0
  1162. limit = n
  1163. PAGE_SIZE = 50
  1164. while (PAGE_SIZE * pagenum) < limit:
  1165. result_url = self._API_URL % (
  1166. compat_urllib_parse.quote_plus(query.encode('utf-8')),
  1167. (PAGE_SIZE * pagenum) + 1)
  1168. data_json = self._download_webpage(
  1169. result_url, video_id=u'query "%s"' % query,
  1170. note=u'Downloading page %s' % (pagenum + 1),
  1171. errnote=u'Unable to download API page')
  1172. data = json.loads(data_json)
  1173. api_response = data['data']
  1174. if 'items' not in api_response:
  1175. raise ExtractorError(
  1176. u'[youtube] No video results', expected=True)
  1177. new_ids = list(video['id'] for video in api_response['items'])
  1178. video_ids += new_ids
  1179. limit = min(n, api_response['totalItems'])
  1180. pagenum += 1
  1181. if len(video_ids) > n:
  1182. video_ids = video_ids[:n]
  1183. videos = [self.url_result(video_id, 'Youtube', video_id=video_id)
  1184. for video_id in video_ids]
  1185. return self.playlist_result(videos, query)
  1186. class YoutubeSearchDateIE(YoutubeSearchIE):
  1187. IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
  1188. _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
  1189. _SEARCH_KEY = 'ytsearchdate'
  1190. IE_DESC = u'YouTube.com searches, newest videos first'
  1191. class YoutubeSearchURLIE(InfoExtractor):
  1192. IE_DESC = u'YouTube.com search URLs'
  1193. IE_NAME = u'youtube:search_url'
  1194. _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
  1195. def _real_extract(self, url):
  1196. mobj = re.match(self._VALID_URL, url)
  1197. query = compat_urllib_parse.unquote_plus(mobj.group('query'))
  1198. webpage = self._download_webpage(url, query)
  1199. result_code = self._search_regex(
  1200. r'(?s)<ol class="item-section"(.*?)</ol>', webpage, u'result HTML')
  1201. part_codes = re.findall(
  1202. r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
  1203. entries = []
  1204. for part_code in part_codes:
  1205. part_title = self._html_search_regex(
  1206. [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
  1207. part_url_snippet = self._html_search_regex(
  1208. r'(?s)href="([^"]+)"', part_code, 'item URL')
  1209. part_url = compat_urlparse.urljoin(
  1210. 'https://www.youtube.com/', part_url_snippet)
  1211. entries.append({
  1212. '_type': 'url',
  1213. 'url': part_url,
  1214. 'title': part_title,
  1215. })
  1216. return {
  1217. '_type': 'playlist',
  1218. 'entries': entries,
  1219. 'title': query,
  1220. }
  1221. class YoutubeShowIE(InfoExtractor):
  1222. IE_DESC = u'YouTube.com (multi-season) shows'
  1223. _VALID_URL = r'https?://www\.youtube\.com/show/(.*)'
  1224. IE_NAME = u'youtube:show'
  1225. def _real_extract(self, url):
  1226. mobj = re.match(self._VALID_URL, url)
  1227. show_name = mobj.group(1)
  1228. webpage = self._download_webpage(url, show_name, u'Downloading show webpage')
  1229. # There's one playlist for each season of the show
  1230. m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
  1231. self.to_screen(u'%s: Found %s seasons' % (show_name, len(m_seasons)))
  1232. return [self.url_result('https://www.youtube.com' + season.group(1), 'YoutubePlaylist') for season in m_seasons]
  1233. class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
  1234. """
  1235. Base class for extractors that fetch info from
  1236. http://www.youtube.com/feed_ajax
  1237. Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
  1238. """
  1239. _LOGIN_REQUIRED = True
  1240. # use action_load_personal_feed instead of action_load_system_feed
  1241. _PERSONAL_FEED = False
  1242. @property
  1243. def _FEED_TEMPLATE(self):
  1244. action = 'action_load_system_feed'
  1245. if self._PERSONAL_FEED:
  1246. action = 'action_load_personal_feed'
  1247. return 'https://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action, self._FEED_NAME)
  1248. @property
  1249. def IE_NAME(self):
  1250. return u'youtube:%s' % self._FEED_NAME
  1251. def _real_initialize(self):
  1252. self._login()
  1253. def _real_extract(self, url):
  1254. feed_entries = []
  1255. paging = 0
  1256. for i in itertools.count(1):
  1257. info = self._download_json(self._FEED_TEMPLATE % paging,
  1258. u'%s feed' % self._FEED_NAME,
  1259. u'Downloading page %s' % i)
  1260. feed_html = info.get('feed_html') or info.get('content_html')
  1261. m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
  1262. ids = orderedSet(m.group(1) for m in m_ids)
  1263. feed_entries.extend(
  1264. self.url_result(video_id, 'Youtube', video_id=video_id)
  1265. for video_id in ids)
  1266. mobj = re.search(
  1267. r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
  1268. feed_html)
  1269. if mobj is None:
  1270. break
  1271. paging = mobj.group('paging')
  1272. return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
  1273. class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
  1274. IE_DESC = u'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
  1275. _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
  1276. _FEED_NAME = 'recommended'
  1277. _PLAYLIST_TITLE = u'Youtube Recommended videos'
  1278. class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
  1279. IE_DESC = u'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
  1280. _VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
  1281. _FEED_NAME = 'watch_later'
  1282. _PLAYLIST_TITLE = u'Youtube Watch Later'
  1283. _PERSONAL_FEED = True
  1284. class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
  1285. IE_DESC = u'Youtube watch history, "ythistory" keyword (requires authentication)'
  1286. _VALID_URL = u'https?://www\.youtube\.com/feed/history|:ythistory'
  1287. _FEED_NAME = 'history'
  1288. _PERSONAL_FEED = True
  1289. _PLAYLIST_TITLE = u'Youtube Watch History'
  1290. class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
  1291. IE_NAME = u'youtube:favorites'
  1292. IE_DESC = u'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
  1293. _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
  1294. _LOGIN_REQUIRED = True
  1295. def _real_extract(self, url):
  1296. webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
  1297. playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, u'favourites playlist id')
  1298. return self.url_result(playlist_id, 'YoutubePlaylist')
  1299. class YoutubeSubscriptionsIE(YoutubePlaylistIE):
  1300. IE_NAME = u'youtube:subscriptions'
  1301. IE_DESC = u'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
  1302. _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
  1303. def _real_extract(self, url):
  1304. title = u'Youtube Subscriptions'
  1305. page = self._download_webpage('https://www.youtube.com/feed/subscriptions', title)
  1306. # The extraction process is the same as for playlists, but the regex
  1307. # for the video ids doesn't contain an index
  1308. ids = []
  1309. more_widget_html = content_html = page
  1310. for page_num in itertools.count(1):
  1311. matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
  1312. new_ids = orderedSet(matches)
  1313. ids.extend(new_ids)
  1314. mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
  1315. if not mobj:
  1316. break
  1317. more = self._download_json(
  1318. 'https://youtube.com/%s' % mobj.group('more'), title,
  1319. 'Downloading page #%s' % page_num,
  1320. transform_source=uppercase_escape)
  1321. content_html = more['content_html']
  1322. more_widget_html = more['load_more_widget_html']
  1323. return {
  1324. '_type': 'playlist',
  1325. 'title': title,
  1326. 'entries': self._ids_to_results(ids),
  1327. }
  1328. class YoutubeTruncatedURLIE(InfoExtractor):
  1329. IE_NAME = 'youtube:truncated_url'
  1330. IE_DESC = False # Do not list
  1331. _VALID_URL = r'''(?x)
  1332. (?:https?://)?[^/]+/watch\?(?:
  1333. feature=[a-z_]+|
  1334. annotation_id=annotation_[^&]+
  1335. )?$|
  1336. (?:https?://)?(?:www\.)?youtube\.com/attribution_link\?a=[^&]+$
  1337. '''
  1338. _TESTS = [{
  1339. 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
  1340. 'only_matching': True,
  1341. }, {
  1342. 'url': 'http://www.youtube.com/watch?',
  1343. 'only_matching': True,
  1344. }]
  1345. def _real_extract(self, url):
  1346. raise ExtractorError(
  1347. u'Did you forget to quote the URL? Remember that & is a meta '
  1348. u'character in most shells, so you want to put the URL in quotes, '
  1349. u'like youtube-dl '
  1350. u'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
  1351. u' or simply youtube-dl BaW_jenozKc .',
  1352. expected=True)