You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

799 lines
35 KiB

  1. # coding: utf-8
  2. import json
  3. import netrc
  4. import re
  5. import socket
  6. from .common import InfoExtractor, SearchInfoExtractor
  7. from ..utils import (
  8. compat_http_client,
  9. compat_parse_qs,
  10. compat_urllib_error,
  11. compat_urllib_parse,
  12. compat_urllib_request,
  13. compat_str,
  14. clean_html,
  15. get_element_by_id,
  16. ExtractorError,
  17. unescapeHTML,
  18. unified_strdate,
  19. )
  20. class YoutubeIE(InfoExtractor):
  21. """Information extractor for youtube.com."""
  22. _VALID_URL = r"""^
  23. (
  24. (?:https?://)? # http(s):// (optional)
  25. (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
  26. tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains
  27. (?:.*?\#/)? # handle anchor (#/) redirect urls
  28. (?: # the various things that can precede the ID:
  29. (?:(?:v|embed|e)/) # v/ or embed/ or e/
  30. |(?: # or the v= param in all its forms
  31. (?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
  32. (?:\?|\#!?) # the params delimiter ? or # or #!
  33. (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
  34. v=
  35. )
  36. )? # optional -> youtube.com/xxxx is OK
  37. )? # all until now is optional -> you can pass the naked ID
  38. ([0-9A-Za-z_-]+) # here is it! the YouTube video ID
  39. (?(1).+)? # if we found the ID, everything can follow
  40. $"""
  41. _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
  42. _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
  43. _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
  44. _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
  45. _NETRC_MACHINE = 'youtube'
  46. # Listed in order of quality
  47. _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
  48. _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
  49. _video_extensions = {
  50. '13': '3gp',
  51. '17': 'mp4',
  52. '18': 'mp4',
  53. '22': 'mp4',
  54. '37': 'mp4',
  55. '38': 'mp4',
  56. '43': 'webm',
  57. '44': 'webm',
  58. '45': 'webm',
  59. '46': 'webm',
  60. }
  61. _video_dimensions = {
  62. '5': '240x400',
  63. '6': '???',
  64. '13': '???',
  65. '17': '144x176',
  66. '18': '360x640',
  67. '22': '720x1280',
  68. '34': '360x640',
  69. '35': '480x854',
  70. '37': '1080x1920',
  71. '38': '3072x4096',
  72. '43': '360x640',
  73. '44': '480x854',
  74. '45': '720x1280',
  75. '46': '1080x1920',
  76. }
  77. IE_NAME = u'youtube'
  78. @classmethod
  79. def suitable(cls, url):
  80. """Receives a URL and returns True if suitable for this IE."""
  81. if YoutubePlaylistIE.suitable(url): return False
  82. return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
  83. def report_lang(self):
  84. """Report attempt to set language."""
  85. self.to_screen(u'Setting language')
  86. def report_login(self):
  87. """Report attempt to log in."""
  88. self.to_screen(u'Logging in')
  89. def report_video_webpage_download(self, video_id):
  90. """Report attempt to download video webpage."""
  91. self.to_screen(u'%s: Downloading video webpage' % video_id)
  92. def report_video_info_webpage_download(self, video_id):
  93. """Report attempt to download video info webpage."""
  94. self.to_screen(u'%s: Downloading video info webpage' % video_id)
  95. def report_video_subtitles_download(self, video_id):
  96. """Report attempt to download video info webpage."""
  97. self.to_screen(u'%s: Checking available subtitles' % video_id)
  98. def report_video_subtitles_request(self, video_id, sub_lang, format):
  99. """Report attempt to download video info webpage."""
  100. self.to_screen(u'%s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
  101. def report_video_subtitles_available(self, video_id, sub_lang_list):
  102. """Report available subtitles."""
  103. sub_lang = ",".join(list(sub_lang_list.keys()))
  104. self.to_screen(u'%s: Available subtitles for video: %s' % (video_id, sub_lang))
  105. def report_information_extraction(self, video_id):
  106. """Report attempt to extract video information."""
  107. self.to_screen(u'%s: Extracting video information' % video_id)
  108. def report_unavailable_format(self, video_id, format):
  109. """Report extracted video URL."""
  110. self.to_screen(u'%s: Format %s not available' % (video_id, format))
  111. def report_rtmp_download(self):
  112. """Indicate the download will use the RTMP protocol."""
  113. self.to_screen(u'RTMP download detected')
  114. def _decrypt_signature(self, s):
  115. """Decrypt the key the two subkeys must have a length of 43"""
  116. (a,b) = s.split('.')
  117. if len(a) != 43 or len(b) != 43:
  118. raise ExtractorError(u'Unable to decrypt signature, subkeys lengths %d.%d not supported; retrying might work' % (len(a), len(b)))
  119. if self._downloader.params.get('verbose'):
  120. self.to_screen('encrypted signature length %d.%d' % (len(a), len(b)))
  121. b = ''.join([b[:8],a[0],b[9:18],b[-4],b[19:39], b[18]])[0:40]
  122. a = a[-40:]
  123. s_dec = '.'.join((a,b))[::-1]
  124. return s_dec
  125. def _get_available_subtitles(self, video_id):
  126. self.report_video_subtitles_download(video_id)
  127. request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
  128. try:
  129. sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
  130. except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
  131. return (u'unable to download video subtitles: %s' % compat_str(err), None)
  132. sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
  133. sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
  134. if not sub_lang_list:
  135. return (u'video doesn\'t have subtitles', None)
  136. return sub_lang_list
  137. def _list_available_subtitles(self, video_id):
  138. sub_lang_list = self._get_available_subtitles(video_id)
  139. self.report_video_subtitles_available(video_id, sub_lang_list)
  140. def _request_subtitle(self, sub_lang, sub_name, video_id, format):
  141. """
  142. Return tuple:
  143. (error_message, sub_lang, sub)
  144. """
  145. self.report_video_subtitles_request(video_id, sub_lang, format)
  146. params = compat_urllib_parse.urlencode({
  147. 'lang': sub_lang,
  148. 'name': sub_name,
  149. 'v': video_id,
  150. 'fmt': format,
  151. })
  152. url = 'http://www.youtube.com/api/timedtext?' + params
  153. try:
  154. sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
  155. except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
  156. return (u'unable to download video subtitles: %s' % compat_str(err), None, None)
  157. if not sub:
  158. return (u'Did not fetch video subtitles', None, None)
  159. return (None, sub_lang, sub)
  160. def _request_automatic_caption(self, video_id, webpage):
  161. """We need the webpage for getting the captions url, pass it as an
  162. argument to speed up the process."""
  163. sub_lang = self._downloader.params.get('subtitleslang') or 'en'
  164. sub_format = self._downloader.params.get('subtitlesformat')
  165. self.to_screen(u'%s: Looking for automatic captions' % video_id)
  166. mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
  167. err_msg = u'Couldn\'t find automatic captions for "%s"' % sub_lang
  168. if mobj is None:
  169. return [(err_msg, None, None)]
  170. player_config = json.loads(mobj.group(1))
  171. try:
  172. args = player_config[u'args']
  173. caption_url = args[u'ttsurl']
  174. timestamp = args[u'timestamp']
  175. params = compat_urllib_parse.urlencode({
  176. 'lang': 'en',
  177. 'tlang': sub_lang,
  178. 'fmt': sub_format,
  179. 'ts': timestamp,
  180. 'kind': 'asr',
  181. })
  182. subtitles_url = caption_url + '&' + params
  183. sub = self._download_webpage(subtitles_url, video_id, u'Downloading automatic captions')
  184. return [(None, sub_lang, sub)]
  185. except KeyError:
  186. return [(err_msg, None, None)]
  187. def _extract_subtitle(self, video_id):
  188. """
  189. Return a list with a tuple:
  190. [(error_message, sub_lang, sub)]
  191. """
  192. sub_lang_list = self._get_available_subtitles(video_id)
  193. sub_format = self._downloader.params.get('subtitlesformat')
  194. if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
  195. return [(sub_lang_list[0], None, None)]
  196. if self._downloader.params.get('subtitleslang', False):
  197. sub_lang = self._downloader.params.get('subtitleslang')
  198. elif 'en' in sub_lang_list:
  199. sub_lang = 'en'
  200. else:
  201. sub_lang = list(sub_lang_list.keys())[0]
  202. if not sub_lang in sub_lang_list:
  203. return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)]
  204. subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
  205. return [subtitle]
  206. def _extract_all_subtitles(self, video_id):
  207. sub_lang_list = self._get_available_subtitles(video_id)
  208. sub_format = self._downloader.params.get('subtitlesformat')
  209. if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
  210. return [(sub_lang_list[0], None, None)]
  211. subtitles = []
  212. for sub_lang in sub_lang_list:
  213. subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
  214. subtitles.append(subtitle)
  215. return subtitles
  216. def _print_formats(self, formats):
  217. print('Available formats:')
  218. for x in formats:
  219. print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')))
  220. def _real_initialize(self):
  221. if self._downloader is None:
  222. return
  223. username = None
  224. password = None
  225. downloader_params = self._downloader.params
  226. # Attempt to use provided username and password or .netrc data
  227. if downloader_params.get('username', None) is not None:
  228. username = downloader_params['username']
  229. password = downloader_params['password']
  230. elif downloader_params.get('usenetrc', False):
  231. try:
  232. info = netrc.netrc().authenticators(self._NETRC_MACHINE)
  233. if info is not None:
  234. username = info[0]
  235. password = info[2]
  236. else:
  237. raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
  238. except (IOError, netrc.NetrcParseError) as err:
  239. self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
  240. return
  241. # Set language
  242. request = compat_urllib_request.Request(self._LANG_URL)
  243. try:
  244. self.report_lang()
  245. compat_urllib_request.urlopen(request).read()
  246. except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
  247. self._downloader.report_warning(u'unable to set language: %s' % compat_str(err))
  248. return
  249. # No authentication to be performed
  250. if username is None:
  251. return
  252. request = compat_urllib_request.Request(self._LOGIN_URL)
  253. try:
  254. login_page = compat_urllib_request.urlopen(request).read().decode('utf-8')
  255. except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
  256. self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err))
  257. return
  258. galx = None
  259. dsh = None
  260. match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page)
  261. if match:
  262. galx = match.group(1)
  263. match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page)
  264. if match:
  265. dsh = match.group(1)
  266. # Log in
  267. login_form_strs = {
  268. u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
  269. u'Email': username,
  270. u'GALX': galx,
  271. u'Passwd': password,
  272. u'PersistentCookie': u'yes',
  273. u'_utf8': u'',
  274. u'bgresponse': u'js_disabled',
  275. u'checkConnection': u'',
  276. u'checkedDomains': u'youtube',
  277. u'dnConn': u'',
  278. u'dsh': dsh,
  279. u'pstMsg': u'0',
  280. u'rmShown': u'1',
  281. u'secTok': u'',
  282. u'signIn': u'Sign in',
  283. u'timeStmp': u'',
  284. u'service': u'youtube',
  285. u'uilel': u'3',
  286. u'hl': u'en_US',
  287. }
  288. # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
  289. # chokes on unicode
  290. login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
  291. login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
  292. request = compat_urllib_request.Request(self._LOGIN_URL, login_data)
  293. try:
  294. self.report_login()
  295. login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
  296. if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
  297. self._downloader.report_warning(u'unable to log in: bad username or password')
  298. return
  299. except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
  300. self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
  301. return
  302. # Confirm age
  303. age_form = {
  304. 'next_url': '/',
  305. 'action_confirm': 'Confirm',
  306. }
  307. request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form))
  308. try:
  309. self.report_age_confirmation()
  310. compat_urllib_request.urlopen(request).read().decode('utf-8')
  311. except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
  312. raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
  313. def _extract_id(self, url):
  314. mobj = re.match(self._VALID_URL, url, re.VERBOSE)
  315. if mobj is None:
  316. raise ExtractorError(u'Invalid URL: %s' % url)
  317. video_id = mobj.group(2)
  318. return video_id
  319. def _real_extract(self, url):
  320. # Extract original video URL from URL with redirection, like age verification, using next_url parameter
  321. mobj = re.search(self._NEXT_URL_RE, url)
  322. if mobj:
  323. url = 'https://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
  324. video_id = self._extract_id(url)
  325. # Get video webpage
  326. self.report_video_webpage_download(video_id)
  327. url = 'https://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
  328. request = compat_urllib_request.Request(url)
  329. try:
  330. video_webpage_bytes = compat_urllib_request.urlopen(request).read()
  331. except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
  332. raise ExtractorError(u'Unable to download video webpage: %s' % compat_str(err))
  333. video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
  334. # Attempt to extract SWF player URL
  335. mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
  336. if mobj is not None:
  337. player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
  338. else:
  339. player_url = None
  340. # Get video info
  341. self.report_video_info_webpage_download(video_id)
  342. for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
  343. video_info_url = ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
  344. % (video_id, el_type))
  345. video_info_webpage = self._download_webpage(video_info_url, video_id,
  346. note=False,
  347. errnote='unable to download video info webpage')
  348. video_info = compat_parse_qs(video_info_webpage)
  349. if 'token' in video_info:
  350. break
  351. if 'token' not in video_info:
  352. if 'reason' in video_info:
  353. raise ExtractorError(u'YouTube said: %s' % video_info['reason'][0])
  354. else:
  355. raise ExtractorError(u'"token" parameter not in video info for unknown reason')
  356. # Check for "rental" videos
  357. if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
  358. raise ExtractorError(u'"rental" videos not supported')
  359. # Start extracting information
  360. self.report_information_extraction(video_id)
  361. # uploader
  362. if 'author' not in video_info:
  363. raise ExtractorError(u'Unable to extract uploader name')
  364. video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
  365. # uploader_id
  366. video_uploader_id = None
  367. mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
  368. if mobj is not None:
  369. video_uploader_id = mobj.group(1)
  370. else:
  371. self._downloader.report_warning(u'unable to extract uploader nickname')
  372. # title
  373. if 'title' not in video_info:
  374. raise ExtractorError(u'Unable to extract video title')
  375. video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
  376. # thumbnail image
  377. if 'thumbnail_url' not in video_info:
  378. self._downloader.report_warning(u'unable to extract video thumbnail')
  379. video_thumbnail = ''
  380. else: # don't panic if we can't find it
  381. video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
  382. # upload date
  383. upload_date = None
  384. mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
  385. if mobj is not None:
  386. upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
  387. upload_date = unified_strdate(upload_date)
  388. # description
  389. video_description = get_element_by_id("eow-description", video_webpage)
  390. if video_description:
  391. video_description = clean_html(video_description)
  392. else:
  393. fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
  394. if fd_mobj:
  395. video_description = unescapeHTML(fd_mobj.group(1))
  396. else:
  397. video_description = u''
  398. # subtitles
  399. video_subtitles = None
  400. if self._downloader.params.get('writesubtitles', False):
  401. video_subtitles = self._extract_subtitle(video_id)
  402. if video_subtitles:
  403. (sub_error, sub_lang, sub) = video_subtitles[0]
  404. if sub_error:
  405. self._downloader.report_warning(sub_error)
  406. if self._downloader.params.get('writeautomaticsub', False):
  407. video_subtitles = self._request_automatic_caption(video_id, video_webpage)
  408. (sub_error, sub_lang, sub) = video_subtitles[0]
  409. if sub_error:
  410. self._downloader.report_warning(sub_error)
  411. if self._downloader.params.get('allsubtitles', False):
  412. video_subtitles = self._extract_all_subtitles(video_id)
  413. for video_subtitle in video_subtitles:
  414. (sub_error, sub_lang, sub) = video_subtitle
  415. if sub_error:
  416. self._downloader.report_warning(sub_error)
  417. if self._downloader.params.get('listsubtitles', False):
  418. self._list_available_subtitles(video_id)
  419. return
  420. if 'length_seconds' not in video_info:
  421. self._downloader.report_warning(u'unable to extract video duration')
  422. video_duration = ''
  423. else:
  424. video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
  425. # Decide which formats to download
  426. req_format = self._downloader.params.get('format', None)
  427. try:
  428. mobj = re.search(r';ytplayer.config = ({.*?});', video_webpage)
  429. if not mobj:
  430. raise ValueError('Could not find vevo ID')
  431. info = json.loads(mobj.group(1))
  432. args = info['args']
  433. # Easy way to know if the 's' value is in url_encoded_fmt_stream_map
  434. # this signatures are encrypted
  435. m_s = re.search(r'[&,]s=', args['url_encoded_fmt_stream_map'])
  436. if m_s is not None:
  437. self.to_screen(u'%s: Encrypted signatures detected.' % video_id)
  438. video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
  439. except ValueError:
  440. pass
  441. if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
  442. self.report_rtmp_download()
  443. video_url_list = [(None, video_info['conn'][0])]
  444. elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
  445. url_map = {}
  446. for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','):
  447. url_data = compat_parse_qs(url_data_str)
  448. if 'itag' in url_data and 'url' in url_data:
  449. url = url_data['url'][0]
  450. if 'sig' in url_data:
  451. url += '&signature=' + url_data['sig'][0]
  452. elif 's' in url_data:
  453. signature = self._decrypt_signature(url_data['s'][0])
  454. url += '&signature=' + signature
  455. if 'ratebypass' not in url:
  456. url += '&ratebypass=yes'
  457. url_map[url_data['itag'][0]] = url
  458. format_limit = self._downloader.params.get('format_limit', None)
  459. available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
  460. if format_limit is not None and format_limit in available_formats:
  461. format_list = available_formats[available_formats.index(format_limit):]
  462. else:
  463. format_list = available_formats
  464. existing_formats = [x for x in format_list if x in url_map]
  465. if len(existing_formats) == 0:
  466. raise ExtractorError(u'no known formats available for video')
  467. if self._downloader.params.get('listformats', None):
  468. self._print_formats(existing_formats)
  469. return
  470. if req_format is None or req_format == 'best':
  471. video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
  472. elif req_format == 'worst':
  473. video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
  474. elif req_format in ('-1', 'all'):
  475. video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
  476. else:
  477. # Specific formats. We pick the first in a slash-delimeted sequence.
  478. # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
  479. req_formats = req_format.split('/')
  480. video_url_list = None
  481. for rf in req_formats:
  482. if rf in url_map:
  483. video_url_list = [(rf, url_map[rf])]
  484. break
  485. if video_url_list is None:
  486. raise ExtractorError(u'requested format not available')
  487. else:
  488. raise ExtractorError(u'no conn or url_encoded_fmt_stream_map information found in video info')
  489. results = []
  490. for format_param, video_real_url in video_url_list:
  491. # Extension
  492. video_extension = self._video_extensions.get(format_param, 'flv')
  493. video_format = '{0} - {1}'.format(format_param if format_param else video_extension,
  494. self._video_dimensions.get(format_param, '???'))
  495. results.append({
  496. 'id': video_id,
  497. 'url': video_real_url,
  498. 'uploader': video_uploader,
  499. 'uploader_id': video_uploader_id,
  500. 'upload_date': upload_date,
  501. 'title': video_title,
  502. 'ext': video_extension,
  503. 'format': video_format,
  504. 'thumbnail': video_thumbnail,
  505. 'description': video_description,
  506. 'player_url': player_url,
  507. 'subtitles': video_subtitles,
  508. 'duration': video_duration
  509. })
  510. return results
  511. class YoutubePlaylistIE(InfoExtractor):
  512. """Information Extractor for YouTube playlists."""
  513. _VALID_URL = r"""(?:
  514. (?:https?://)?
  515. (?:\w+\.)?
  516. youtube\.com/
  517. (?:
  518. (?:course|view_play_list|my_playlists|artist|playlist|watch)
  519. \? (?:.*?&)*? (?:p|a|list)=
  520. | p/
  521. )
  522. ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
  523. .*
  524. |
  525. ((?:PL|EC|UU)[0-9A-Za-z-_]{10,})
  526. )"""
  527. _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json&safeSearch=none'
  528. _MAX_RESULTS = 50
  529. IE_NAME = u'youtube:playlist'
  530. @classmethod
  531. def suitable(cls, url):
  532. """Receives a URL and returns True if suitable for this IE."""
  533. return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
  534. def _real_extract(self, url):
  535. # Extract playlist id
  536. mobj = re.match(self._VALID_URL, url, re.VERBOSE)
  537. if mobj is None:
  538. raise ExtractorError(u'Invalid URL: %s' % url)
  539. # Download playlist videos from API
  540. playlist_id = mobj.group(1) or mobj.group(2)
  541. page_num = 1
  542. videos = []
  543. while True:
  544. url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1)
  545. page = self._download_webpage(url, playlist_id, u'Downloading page #%s' % page_num)
  546. try:
  547. response = json.loads(page)
  548. except ValueError as err:
  549. raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
  550. if 'feed' not in response:
  551. raise ExtractorError(u'Got a malformed response from YouTube API')
  552. playlist_title = response['feed']['title']['$t']
  553. if 'entry' not in response['feed']:
  554. # Number of videos is a multiple of self._MAX_RESULTS
  555. break
  556. for entry in response['feed']['entry']:
  557. index = entry['yt$position']['$t']
  558. if 'media$group' in entry and 'media$player' in entry['media$group']:
  559. videos.append((index, entry['media$group']['media$player']['url']))
  560. if len(response['feed']['entry']) < self._MAX_RESULTS:
  561. break
  562. page_num += 1
  563. videos = [v[1] for v in sorted(videos)]
  564. url_results = [self.url_result(url, 'Youtube') for url in videos]
  565. return [self.playlist_result(url_results, playlist_id, playlist_title)]
  566. class YoutubeChannelIE(InfoExtractor):
  567. """Information Extractor for YouTube channels."""
  568. _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
  569. _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
  570. _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
  571. _MORE_PAGES_URL = 'http://www.youtube.com/channel_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
  572. IE_NAME = u'youtube:channel'
  573. def extract_videos_from_page(self, page):
  574. ids_in_page = []
  575. for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
  576. if mobj.group(1) not in ids_in_page:
  577. ids_in_page.append(mobj.group(1))
  578. return ids_in_page
  579. def _real_extract(self, url):
  580. # Extract channel id
  581. mobj = re.match(self._VALID_URL, url)
  582. if mobj is None:
  583. raise ExtractorError(u'Invalid URL: %s' % url)
  584. # Download channel page
  585. channel_id = mobj.group(1)
  586. video_ids = []
  587. pagenum = 1
  588. url = self._TEMPLATE_URL % (channel_id, pagenum)
  589. page = self._download_webpage(url, channel_id,
  590. u'Downloading page #%s' % pagenum)
  591. # Extract video identifiers
  592. ids_in_page = self.extract_videos_from_page(page)
  593. video_ids.extend(ids_in_page)
  594. # Download any subsequent channel pages using the json-based channel_ajax query
  595. if self._MORE_PAGES_INDICATOR in page:
  596. while True:
  597. pagenum = pagenum + 1
  598. url = self._MORE_PAGES_URL % (pagenum, channel_id)
  599. page = self._download_webpage(url, channel_id,
  600. u'Downloading page #%s' % pagenum)
  601. page = json.loads(page)
  602. ids_in_page = self.extract_videos_from_page(page['content_html'])
  603. video_ids.extend(ids_in_page)
  604. if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
  605. break
  606. self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
  607. urls = ['http://www.youtube.com/watch?v=%s' % id for id in video_ids]
  608. url_entries = [self.url_result(url, 'Youtube') for url in urls]
  609. return [self.playlist_result(url_entries, channel_id)]
  610. class YoutubeUserIE(InfoExtractor):
  611. """Information Extractor for YouTube users."""
  612. _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
  613. _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
  614. _GDATA_PAGE_SIZE = 50
  615. _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
  616. _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
  617. IE_NAME = u'youtube:user'
  618. def _real_extract(self, url):
  619. # Extract username
  620. mobj = re.match(self._VALID_URL, url)
  621. if mobj is None:
  622. raise ExtractorError(u'Invalid URL: %s' % url)
  623. username = mobj.group(1)
  624. # Download video ids using YouTube Data API. Result size per
  625. # query is limited (currently to 50 videos) so we need to query
  626. # page by page until there are no video ids - it means we got
  627. # all of them.
  628. video_ids = []
  629. pagenum = 0
  630. while True:
  631. start_index = pagenum * self._GDATA_PAGE_SIZE + 1
  632. gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
  633. page = self._download_webpage(gdata_url, username,
  634. u'Downloading video ids from %d to %d' % (start_index, start_index + self._GDATA_PAGE_SIZE))
  635. # Extract video identifiers
  636. ids_in_page = []
  637. for mobj in re.finditer(self._VIDEO_INDICATOR, page):
  638. if mobj.group(1) not in ids_in_page:
  639. ids_in_page.append(mobj.group(1))
  640. video_ids.extend(ids_in_page)
  641. # A little optimization - if current page is not
  642. # "full", ie. does not contain PAGE_SIZE video ids then
  643. # we can assume that this page is the last one - there
  644. # are no more ids on further pages - no need to query
  645. # again.
  646. if len(ids_in_page) < self._GDATA_PAGE_SIZE:
  647. break
  648. pagenum += 1
  649. urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
  650. url_results = [self.url_result(url, 'Youtube') for url in urls]
  651. return [self.playlist_result(url_results, playlist_title = username)]
  652. class YoutubeSearchIE(SearchInfoExtractor):
  653. """Information Extractor for YouTube search queries."""
  654. _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
  655. _MAX_RESULTS = 1000
  656. IE_NAME = u'youtube:search'
  657. _SEARCH_KEY = 'ytsearch'
  658. def report_download_page(self, query, pagenum):
  659. """Report attempt to download search page with given number."""
  660. self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
  661. def _get_n_results(self, query, n):
  662. """Get a specified number of results for a query"""
  663. video_ids = []
  664. pagenum = 0
  665. limit = n
  666. while (50 * pagenum) < limit:
  667. self.report_download_page(query, pagenum+1)
  668. result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1)
  669. request = compat_urllib_request.Request(result_url)
  670. try:
  671. data = compat_urllib_request.urlopen(request).read().decode('utf-8')
  672. except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
  673. raise ExtractorError(u'Unable to download API page: %s' % compat_str(err))
  674. api_response = json.loads(data)['data']
  675. if not 'items' in api_response:
  676. raise ExtractorError(u'[youtube] No video results')
  677. new_ids = list(video['id'] for video in api_response['items'])
  678. video_ids += new_ids
  679. limit = min(n, api_response['totalItems'])
  680. pagenum += 1
  681. if len(video_ids) > n:
  682. video_ids = video_ids[:n]
  683. videos = [self.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids]
  684. return self.playlist_result(videos, query)