You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

607 lines
24 KiB

  1. # encoding: utf-8
  2. from __future__ import unicode_literals
  3. import json
  4. import re
  5. import itertools
  6. from .common import InfoExtractor
  7. from .subtitles import SubtitlesInfoExtractor
  8. from ..utils import (
  9. compat_HTTPError,
  10. compat_urllib_parse,
  11. compat_urllib_request,
  12. compat_urlparse,
  13. ExtractorError,
  14. InAdvancePagedList,
  15. int_or_none,
  16. RegexNotFoundError,
  17. std_headers,
  18. unsmuggle_url,
  19. urlencode_postdata,
  20. )
  21. class VimeoBaseInfoExtractor(InfoExtractor):
  22. _NETRC_MACHINE = 'vimeo'
  23. _LOGIN_REQUIRED = False
  24. def _login(self):
  25. (username, password) = self._get_login_info()
  26. if username is None:
  27. if self._LOGIN_REQUIRED:
  28. raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  29. return
  30. self.report_login()
  31. login_url = 'https://vimeo.com/log_in'
  32. webpage = self._download_webpage(login_url, None, False)
  33. token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
  34. data = urlencode_postdata({
  35. 'email': username,
  36. 'password': password,
  37. 'action': 'login',
  38. 'service': 'vimeo',
  39. 'token': token,
  40. })
  41. login_request = compat_urllib_request.Request(login_url, data)
  42. login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
  43. login_request.add_header('Cookie', 'xsrft=%s' % token)
  44. self._download_webpage(login_request, None, False, 'Wrong login info')
  45. class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
  46. """Information extractor for vimeo.com."""
  47. # _VALID_URL matches Vimeo URLs
  48. _VALID_URL = r'''(?x)
  49. https?://
  50. (?:(?:www|(?P<player>player))\.)?
  51. vimeo(?P<pro>pro)?\.com/
  52. (?!channels/[^/?#]+/?(?:$|[?#])|album/)
  53. (?:.*?/)?
  54. (?:(?:play_redirect_hls|moogaloop\.swf)\?clip_id=)?
  55. (?:videos?/)?
  56. (?P<id>[0-9]+)
  57. /?(?:[?&].*)?(?:[#].*)?$'''
  58. IE_NAME = 'vimeo'
  59. _TESTS = [
  60. {
  61. 'url': 'http://vimeo.com/56015672#at=0',
  62. 'md5': '8879b6cc097e987f02484baf890129e5',
  63. 'info_dict': {
  64. 'id': '56015672',
  65. 'ext': 'mp4',
  66. "upload_date": "20121220",
  67. "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
  68. "uploader_id": "user7108434",
  69. "uploader": "Filippo Valsorda",
  70. "title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
  71. "duration": 10,
  72. },
  73. },
  74. {
  75. 'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
  76. 'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
  77. 'note': 'Vimeo Pro video (#1197)',
  78. 'info_dict': {
  79. 'id': '68093876',
  80. 'ext': 'mp4',
  81. 'uploader_id': 'openstreetmapus',
  82. 'uploader': 'OpenStreetMap US',
  83. 'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
  84. 'description': 'md5:380943ec71b89736ff4bf27183233d09',
  85. 'duration': 1595,
  86. },
  87. },
  88. {
  89. 'url': 'http://player.vimeo.com/video/54469442',
  90. 'md5': '619b811a4417aa4abe78dc653becf511',
  91. 'note': 'Videos that embed the url in the player page',
  92. 'info_dict': {
  93. 'id': '54469442',
  94. 'ext': 'mp4',
  95. 'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
  96. 'uploader': 'The BLN & Business of Software',
  97. 'uploader_id': 'theblnbusinessofsoftware',
  98. 'duration': 3610,
  99. 'description': None,
  100. },
  101. },
  102. {
  103. 'url': 'http://vimeo.com/68375962',
  104. 'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
  105. 'note': 'Video protected with password',
  106. 'info_dict': {
  107. 'id': '68375962',
  108. 'ext': 'mp4',
  109. 'title': 'youtube-dl password protected test video',
  110. 'upload_date': '20130614',
  111. 'uploader_id': 'user18948128',
  112. 'uploader': 'Jaime Marquínez Ferrándiz',
  113. 'duration': 10,
  114. 'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.',
  115. },
  116. 'params': {
  117. 'videopassword': 'youtube-dl',
  118. },
  119. },
  120. {
  121. 'url': 'http://vimeo.com/channels/keypeele/75629013',
  122. 'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
  123. 'note': 'Video is freely available via original URL '
  124. 'and protected with password when accessed via http://vimeo.com/75629013',
  125. 'info_dict': {
  126. 'id': '75629013',
  127. 'ext': 'mp4',
  128. 'title': 'Key & Peele: Terrorist Interrogation',
  129. 'description': 'md5:8678b246399b070816b12313e8b4eb5c',
  130. 'uploader_id': 'atencio',
  131. 'uploader': 'Peter Atencio',
  132. 'duration': 187,
  133. },
  134. },
  135. {
  136. 'url': 'http://vimeo.com/76979871',
  137. 'md5': '3363dd6ffebe3784d56f4132317fd446',
  138. 'note': 'Video with subtitles',
  139. 'info_dict': {
  140. 'id': '76979871',
  141. 'ext': 'mp4',
  142. 'title': 'The New Vimeo Player (You Know, For Videos)',
  143. 'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
  144. 'upload_date': '20131015',
  145. 'uploader_id': 'staff',
  146. 'uploader': 'Vimeo Staff',
  147. 'duration': 62,
  148. }
  149. },
  150. {
  151. # from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
  152. 'url': 'https://player.vimeo.com/video/98044508',
  153. 'note': 'The js code contains assignments to the same variable as the config',
  154. 'info_dict': {
  155. 'id': '98044508',
  156. 'ext': 'mp4',
  157. 'title': 'Pier Solar OUYA Official Trailer',
  158. 'uploader': 'Tulio Gonçalves',
  159. 'uploader_id': 'user28849593',
  160. },
  161. },
  162. ]
  163. def _verify_video_password(self, url, video_id, webpage):
  164. password = self._downloader.params.get('videopassword', None)
  165. if password is None:
  166. raise ExtractorError('This video is protected by a password, use the --video-password option')
  167. token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
  168. data = compat_urllib_parse.urlencode({
  169. 'password': password,
  170. 'token': token,
  171. })
  172. # I didn't manage to use the password with https
  173. if url.startswith('https'):
  174. pass_url = url.replace('https', 'http')
  175. else:
  176. pass_url = url
  177. password_request = compat_urllib_request.Request(pass_url + '/password', data)
  178. password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
  179. password_request.add_header('Cookie', 'xsrft=%s' % token)
  180. self._download_webpage(password_request, video_id,
  181. 'Verifying the password',
  182. 'Wrong password')
  183. def _verify_player_video_password(self, url, video_id):
  184. password = self._downloader.params.get('videopassword', None)
  185. if password is None:
  186. raise ExtractorError('This video is protected by a password, use the --video-password option')
  187. data = compat_urllib_parse.urlencode({'password': password})
  188. pass_url = url + '/check-password'
  189. password_request = compat_urllib_request.Request(pass_url, data)
  190. password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
  191. return self._download_json(
  192. password_request, video_id,
  193. 'Verifying the password',
  194. 'Wrong password')
  195. def _real_initialize(self):
  196. self._login()
  197. def _real_extract(self, url):
  198. url, data = unsmuggle_url(url)
  199. headers = std_headers
  200. if data is not None:
  201. headers = headers.copy()
  202. headers.update(data)
  203. if 'Referer' not in headers:
  204. headers['Referer'] = url
  205. # Extract ID from URL
  206. mobj = re.match(self._VALID_URL, url)
  207. video_id = mobj.group('id')
  208. orig_url = url
  209. if mobj.group('pro') or mobj.group('player'):
  210. url = 'http://player.vimeo.com/video/' + video_id
  211. # Retrieve video webpage to extract further information
  212. request = compat_urllib_request.Request(url, None, headers)
  213. try:
  214. webpage = self._download_webpage(request, video_id)
  215. except ExtractorError as ee:
  216. if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
  217. errmsg = ee.cause.read()
  218. if b'Because of its privacy settings, this video cannot be played here' in errmsg:
  219. raise ExtractorError(
  220. 'Cannot download embed-only video without embedding '
  221. 'URL. Please call youtube-dl with the URL of the page '
  222. 'that embeds this video.',
  223. expected=True)
  224. raise
  225. # Now we begin extracting as much information as we can from what we
  226. # retrieved. First we extract the information common to all extractors,
  227. # and latter we extract those that are Vimeo specific.
  228. self.report_extraction(video_id)
  229. # Extract the config JSON
  230. try:
  231. try:
  232. config_url = self._html_search_regex(
  233. r' data-config-url="(.+?)"', webpage, 'config URL')
  234. config_json = self._download_webpage(config_url, video_id)
  235. config = json.loads(config_json)
  236. except RegexNotFoundError:
  237. # For pro videos or player.vimeo.com urls
  238. # We try to find out to which variable is assigned the config dic
  239. m_variable_name = re.search('(\w)\.video\.id', webpage)
  240. if m_variable_name is not None:
  241. config_re = r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))
  242. else:
  243. config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
  244. config = self._search_regex(config_re, webpage, 'info section',
  245. flags=re.DOTALL)
  246. config = json.loads(config)
  247. except Exception as e:
  248. if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
  249. raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
  250. if re.search('<form[^>]+?id="pw_form"', webpage) is not None:
  251. self._verify_video_password(url, video_id, webpage)
  252. return self._real_extract(url)
  253. else:
  254. raise ExtractorError('Unable to extract info section',
  255. cause=e)
  256. else:
  257. if config.get('view') == 4:
  258. config = self._verify_player_video_password(url, video_id)
  259. # Extract title
  260. video_title = config["video"]["title"]
  261. # Extract uploader and uploader_id
  262. video_uploader = config["video"]["owner"]["name"]
  263. video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
  264. # Extract video thumbnail
  265. video_thumbnail = config["video"].get("thumbnail")
  266. if video_thumbnail is None:
  267. video_thumbs = config["video"].get("thumbs")
  268. if video_thumbs and isinstance(video_thumbs, dict):
  269. _, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
  270. # Extract video description
  271. video_description = self._html_search_regex(
  272. r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
  273. webpage, 'description', default=None)
  274. if not video_description:
  275. video_description = self._html_search_meta(
  276. 'description', webpage, default=None)
  277. if not video_description and mobj.group('pro'):
  278. orig_webpage = self._download_webpage(
  279. orig_url, video_id,
  280. note='Downloading webpage for description',
  281. fatal=False)
  282. if orig_webpage:
  283. video_description = self._html_search_meta(
  284. 'description', orig_webpage, default=None)
  285. if not video_description and not mobj.group('player'):
  286. self._downloader.report_warning('Cannot find video description')
  287. # Extract video duration
  288. video_duration = int_or_none(config["video"].get("duration"))
  289. # Extract upload date
  290. video_upload_date = None
  291. mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
  292. if mobj is not None:
  293. video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
  294. try:
  295. view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
  296. like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
  297. comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
  298. except RegexNotFoundError:
  299. # This info is only available in vimeo.com/{id} urls
  300. view_count = None
  301. like_count = None
  302. comment_count = None
  303. # Vimeo specific: extract request signature and timestamp
  304. sig = config['request']['signature']
  305. timestamp = config['request']['timestamp']
  306. # Vimeo specific: extract video codec and quality information
  307. # First consider quality, then codecs, then take everything
  308. codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')]
  309. files = {'hd': [], 'sd': [], 'other': []}
  310. config_files = config["video"].get("files") or config["request"].get("files")
  311. for codec_name, codec_extension in codecs:
  312. for quality in config_files.get(codec_name, []):
  313. format_id = '-'.join((codec_name, quality)).lower()
  314. key = quality if quality in files else 'other'
  315. video_url = None
  316. if isinstance(config_files[codec_name], dict):
  317. file_info = config_files[codec_name][quality]
  318. video_url = file_info.get('url')
  319. else:
  320. file_info = {}
  321. if video_url is None:
  322. video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
  323. % (video_id, sig, timestamp, quality, codec_name.upper())
  324. files[key].append({
  325. 'ext': codec_extension,
  326. 'url': video_url,
  327. 'format_id': format_id,
  328. 'width': file_info.get('width'),
  329. 'height': file_info.get('height'),
  330. })
  331. formats = []
  332. for key in ('other', 'sd', 'hd'):
  333. formats += files[key]
  334. if len(formats) == 0:
  335. raise ExtractorError('No known codec found')
  336. subtitles = {}
  337. text_tracks = config['request'].get('text_tracks')
  338. if text_tracks:
  339. for tt in text_tracks:
  340. subtitles[tt['lang']] = 'http://vimeo.com' + tt['url']
  341. video_subtitles = self.extract_subtitles(video_id, subtitles)
  342. if self._downloader.params.get('listsubtitles', False):
  343. self._list_available_subtitles(video_id, subtitles)
  344. return
  345. return {
  346. 'id': video_id,
  347. 'uploader': video_uploader,
  348. 'uploader_id': video_uploader_id,
  349. 'upload_date': video_upload_date,
  350. 'title': video_title,
  351. 'thumbnail': video_thumbnail,
  352. 'description': video_description,
  353. 'duration': video_duration,
  354. 'formats': formats,
  355. 'webpage_url': url,
  356. 'view_count': view_count,
  357. 'like_count': like_count,
  358. 'comment_count': comment_count,
  359. 'subtitles': video_subtitles,
  360. }
  361. class VimeoChannelIE(InfoExtractor):
  362. IE_NAME = 'vimeo:channel'
  363. _VALID_URL = r'https?://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
  364. _MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
  365. _TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
  366. _TESTS = [{
  367. 'url': 'http://vimeo.com/channels/tributes',
  368. 'info_dict': {
  369. 'title': 'Vimeo Tributes',
  370. },
  371. 'playlist_mincount': 25,
  372. }]
  373. def _page_url(self, base_url, pagenum):
  374. return '%s/videos/page:%d/' % (base_url, pagenum)
  375. def _extract_list_title(self, webpage):
  376. return self._html_search_regex(self._TITLE_RE, webpage, 'list title')
  377. def _extract_videos(self, list_id, base_url):
  378. video_ids = []
  379. for pagenum in itertools.count(1):
  380. webpage = self._download_webpage(
  381. self._page_url(base_url, pagenum), list_id,
  382. 'Downloading page %s' % pagenum)
  383. video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage))
  384. if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
  385. break
  386. entries = [self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo')
  387. for video_id in video_ids]
  388. return {'_type': 'playlist',
  389. 'id': list_id,
  390. 'title': self._extract_list_title(webpage),
  391. 'entries': entries,
  392. }
  393. def _real_extract(self, url):
  394. mobj = re.match(self._VALID_URL, url)
  395. channel_id = mobj.group('id')
  396. return self._extract_videos(channel_id, 'http://vimeo.com/channels/%s' % channel_id)
  397. class VimeoUserIE(VimeoChannelIE):
  398. IE_NAME = 'vimeo:user'
  399. _VALID_URL = r'https?://vimeo\.com/(?![0-9]+(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
  400. _TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
  401. _TESTS = [{
  402. 'url': 'http://vimeo.com/nkistudio/videos',
  403. 'info_dict': {
  404. 'title': 'Nki',
  405. },
  406. 'playlist_mincount': 66,
  407. }]
  408. def _real_extract(self, url):
  409. mobj = re.match(self._VALID_URL, url)
  410. name = mobj.group('name')
  411. return self._extract_videos(name, 'http://vimeo.com/%s' % name)
  412. class VimeoAlbumIE(VimeoChannelIE):
  413. IE_NAME = 'vimeo:album'
  414. _VALID_URL = r'https?://vimeo\.com/album/(?P<id>\d+)'
  415. _TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
  416. _TESTS = [{
  417. 'url': 'http://vimeo.com/album/2632481',
  418. 'info_dict': {
  419. 'title': 'Staff Favorites: November 2013',
  420. },
  421. 'playlist_mincount': 13,
  422. }]
  423. def _page_url(self, base_url, pagenum):
  424. return '%s/page:%d/' % (base_url, pagenum)
  425. def _real_extract(self, url):
  426. mobj = re.match(self._VALID_URL, url)
  427. album_id = mobj.group('id')
  428. return self._extract_videos(album_id, 'http://vimeo.com/album/%s' % album_id)
  429. class VimeoGroupsIE(VimeoAlbumIE):
  430. IE_NAME = 'vimeo:group'
  431. _VALID_URL = r'(?:https?://)?vimeo\.com/groups/(?P<name>[^/]+)'
  432. _TESTS = [{
  433. 'url': 'http://vimeo.com/groups/rolexawards',
  434. 'info_dict': {
  435. 'title': 'Rolex Awards for Enterprise',
  436. },
  437. 'playlist_mincount': 73,
  438. }]
  439. def _extract_list_title(self, webpage):
  440. return self._og_search_title(webpage)
  441. def _real_extract(self, url):
  442. mobj = re.match(self._VALID_URL, url)
  443. name = mobj.group('name')
  444. return self._extract_videos(name, 'http://vimeo.com/groups/%s' % name)
  445. class VimeoReviewIE(InfoExtractor):
  446. IE_NAME = 'vimeo:review'
  447. IE_DESC = 'Review pages on vimeo'
  448. _VALID_URL = r'https?://vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
  449. _TESTS = [{
  450. 'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
  451. 'file': '75524534.mp4',
  452. 'md5': 'c507a72f780cacc12b2248bb4006d253',
  453. 'info_dict': {
  454. 'title': "DICK HARDWICK 'Comedian'",
  455. 'uploader': 'Richard Hardwick',
  456. }
  457. }, {
  458. 'note': 'video player needs Referer',
  459. 'url': 'http://vimeo.com/user22258446/review/91613211/13f927e053',
  460. 'md5': '6295fdab8f4bf6a002d058b2c6dce276',
  461. 'info_dict': {
  462. 'id': '91613211',
  463. 'ext': 'mp4',
  464. 'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
  465. 'uploader': 'DevWeek Events',
  466. 'duration': 2773,
  467. 'thumbnail': 're:^https?://.*\.jpg$',
  468. }
  469. }]
  470. def _real_extract(self, url):
  471. mobj = re.match(self._VALID_URL, url)
  472. video_id = mobj.group('id')
  473. player_url = 'https://player.vimeo.com/player/' + video_id
  474. return self.url_result(player_url, 'Vimeo', video_id)
  475. class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
  476. IE_NAME = 'vimeo:watchlater'
  477. IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
  478. _VALID_URL = r'https?://vimeo\.com/home/watchlater|:vimeowatchlater'
  479. _LOGIN_REQUIRED = True
  480. _TITLE_RE = r'href="/home/watchlater".*?>(.*?)<'
  481. _TESTS = [{
  482. 'url': 'http://vimeo.com/home/watchlater',
  483. 'only_matching': True,
  484. }]
  485. def _real_initialize(self):
  486. self._login()
  487. def _page_url(self, base_url, pagenum):
  488. url = '%s/page:%d/' % (base_url, pagenum)
  489. request = compat_urllib_request.Request(url)
  490. # Set the header to get a partial html page with the ids,
  491. # the normal page doesn't contain them.
  492. request.add_header('X-Requested-With', 'XMLHttpRequest')
  493. return request
  494. def _real_extract(self, url):
  495. return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')
  496. class VimeoLikesIE(InfoExtractor):
  497. _VALID_URL = r'https?://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
  498. IE_NAME = 'vimeo:likes'
  499. IE_DESC = 'Vimeo user likes'
  500. _TEST = {
  501. 'url': 'https://vimeo.com/user755559/likes/',
  502. 'playlist_mincount': 293,
  503. "info_dict": {
  504. "description": "See all the videos urza likes",
  505. "title": 'Videos urza likes',
  506. },
  507. }
  508. def _real_extract(self, url):
  509. user_id = self._match_id(url)
  510. webpage = self._download_webpage(url, user_id)
  511. page_count = self._int(
  512. self._search_regex(
  513. r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
  514. .*?</a></li>\s*<li\s+class="pagination_next">
  515. ''', webpage, 'page count'),
  516. 'page count', fatal=True)
  517. PAGE_SIZE = 12
  518. title = self._html_search_regex(
  519. r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
  520. description = self._html_search_meta('description', webpage)
  521. def _get_page(idx):
  522. page_url = '%s//vimeo.com/user%s/likes/page:%d/sort:date' % (
  523. self.http_scheme(), user_id, idx + 1)
  524. webpage = self._download_webpage(
  525. page_url, user_id,
  526. note='Downloading page %d/%d' % (idx + 1, page_count))
  527. video_list = self._search_regex(
  528. r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
  529. webpage, 'video content')
  530. paths = re.findall(
  531. r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
  532. for path in paths:
  533. yield {
  534. '_type': 'url',
  535. 'url': compat_urlparse.urljoin(page_url, path),
  536. }
  537. pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
  538. return {
  539. '_type': 'playlist',
  540. 'id': 'user%s_likes' % user_id,
  541. 'title': title,
  542. 'description': description,
  543. 'entries': pl,
  544. }