You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

609 lines
24 KiB

  1. # encoding: utf-8
  2. from __future__ import unicode_literals
  3. import json
  4. import re
  5. import itertools
  6. from .common import InfoExtractor
  7. from .subtitles import SubtitlesInfoExtractor
  8. from ..compat import (
  9. compat_HTTPError,
  10. compat_urllib_parse,
  11. compat_urllib_request,
  12. compat_urlparse,
  13. )
  14. from ..utils import (
  15. ExtractorError,
  16. InAdvancePagedList,
  17. int_or_none,
  18. RegexNotFoundError,
  19. std_headers,
  20. unsmuggle_url,
  21. urlencode_postdata,
  22. )
  23. class VimeoBaseInfoExtractor(InfoExtractor):
  24. _NETRC_MACHINE = 'vimeo'
  25. _LOGIN_REQUIRED = False
  26. def _login(self):
  27. (username, password) = self._get_login_info()
  28. if username is None:
  29. if self._LOGIN_REQUIRED:
  30. raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  31. return
  32. self.report_login()
  33. login_url = 'https://vimeo.com/log_in'
  34. webpage = self._download_webpage(login_url, None, False)
  35. token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
  36. data = urlencode_postdata({
  37. 'email': username,
  38. 'password': password,
  39. 'action': 'login',
  40. 'service': 'vimeo',
  41. 'token': token,
  42. })
  43. login_request = compat_urllib_request.Request(login_url, data)
  44. login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
  45. login_request.add_header('Cookie', 'xsrft=%s' % token)
  46. self._download_webpage(login_request, None, False, 'Wrong login info')
  47. class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
  48. """Information extractor for vimeo.com."""
  49. # _VALID_URL matches Vimeo URLs
  50. _VALID_URL = r'''(?x)
  51. https?://
  52. (?:(?:www|(?P<player>player))\.)?
  53. vimeo(?P<pro>pro)?\.com/
  54. (?!channels/[^/?#]+/?(?:$|[?#])|album/)
  55. (?:.*?/)?
  56. (?:(?:play_redirect_hls|moogaloop\.swf)\?clip_id=)?
  57. (?:videos?/)?
  58. (?P<id>[0-9]+)
  59. /?(?:[?&].*)?(?:[#].*)?$'''
  60. IE_NAME = 'vimeo'
  61. _TESTS = [
  62. {
  63. 'url': 'http://vimeo.com/56015672#at=0',
  64. 'md5': '8879b6cc097e987f02484baf890129e5',
  65. 'info_dict': {
  66. 'id': '56015672',
  67. 'ext': 'mp4',
  68. "upload_date": "20121220",
  69. "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
  70. "uploader_id": "user7108434",
  71. "uploader": "Filippo Valsorda",
  72. "title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
  73. "duration": 10,
  74. },
  75. },
  76. {
  77. 'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
  78. 'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
  79. 'note': 'Vimeo Pro video (#1197)',
  80. 'info_dict': {
  81. 'id': '68093876',
  82. 'ext': 'mp4',
  83. 'uploader_id': 'openstreetmapus',
  84. 'uploader': 'OpenStreetMap US',
  85. 'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
  86. 'description': 'md5:380943ec71b89736ff4bf27183233d09',
  87. 'duration': 1595,
  88. },
  89. },
  90. {
  91. 'url': 'http://player.vimeo.com/video/54469442',
  92. 'md5': '619b811a4417aa4abe78dc653becf511',
  93. 'note': 'Videos that embed the url in the player page',
  94. 'info_dict': {
  95. 'id': '54469442',
  96. 'ext': 'mp4',
  97. 'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
  98. 'uploader': 'The BLN & Business of Software',
  99. 'uploader_id': 'theblnbusinessofsoftware',
  100. 'duration': 3610,
  101. 'description': None,
  102. },
  103. },
  104. {
  105. 'url': 'http://vimeo.com/68375962',
  106. 'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
  107. 'note': 'Video protected with password',
  108. 'info_dict': {
  109. 'id': '68375962',
  110. 'ext': 'mp4',
  111. 'title': 'youtube-dl password protected test video',
  112. 'upload_date': '20130614',
  113. 'uploader_id': 'user18948128',
  114. 'uploader': 'Jaime Marquínez Ferrándiz',
  115. 'duration': 10,
  116. 'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.',
  117. },
  118. 'params': {
  119. 'videopassword': 'youtube-dl',
  120. },
  121. },
  122. {
  123. 'url': 'http://vimeo.com/channels/keypeele/75629013',
  124. 'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
  125. 'note': 'Video is freely available via original URL '
  126. 'and protected with password when accessed via http://vimeo.com/75629013',
  127. 'info_dict': {
  128. 'id': '75629013',
  129. 'ext': 'mp4',
  130. 'title': 'Key & Peele: Terrorist Interrogation',
  131. 'description': 'md5:8678b246399b070816b12313e8b4eb5c',
  132. 'uploader_id': 'atencio',
  133. 'uploader': 'Peter Atencio',
  134. 'duration': 187,
  135. },
  136. },
  137. {
  138. 'url': 'http://vimeo.com/76979871',
  139. 'md5': '3363dd6ffebe3784d56f4132317fd446',
  140. 'note': 'Video with subtitles',
  141. 'info_dict': {
  142. 'id': '76979871',
  143. 'ext': 'mp4',
  144. 'title': 'The New Vimeo Player (You Know, For Videos)',
  145. 'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
  146. 'upload_date': '20131015',
  147. 'uploader_id': 'staff',
  148. 'uploader': 'Vimeo Staff',
  149. 'duration': 62,
  150. }
  151. },
  152. {
  153. # from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
  154. 'url': 'https://player.vimeo.com/video/98044508',
  155. 'note': 'The js code contains assignments to the same variable as the config',
  156. 'info_dict': {
  157. 'id': '98044508',
  158. 'ext': 'mp4',
  159. 'title': 'Pier Solar OUYA Official Trailer',
  160. 'uploader': 'Tulio Gonçalves',
  161. 'uploader_id': 'user28849593',
  162. },
  163. },
  164. ]
  165. def _verify_video_password(self, url, video_id, webpage):
  166. password = self._downloader.params.get('videopassword', None)
  167. if password is None:
  168. raise ExtractorError('This video is protected by a password, use the --video-password option')
  169. token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
  170. data = compat_urllib_parse.urlencode({
  171. 'password': password,
  172. 'token': token,
  173. })
  174. # I didn't manage to use the password with https
  175. if url.startswith('https'):
  176. pass_url = url.replace('https', 'http')
  177. else:
  178. pass_url = url
  179. password_request = compat_urllib_request.Request(pass_url + '/password', data)
  180. password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
  181. password_request.add_header('Cookie', 'xsrft=%s' % token)
  182. self._download_webpage(password_request, video_id,
  183. 'Verifying the password',
  184. 'Wrong password')
  185. def _verify_player_video_password(self, url, video_id):
  186. password = self._downloader.params.get('videopassword', None)
  187. if password is None:
  188. raise ExtractorError('This video is protected by a password, use the --video-password option')
  189. data = compat_urllib_parse.urlencode({'password': password})
  190. pass_url = url + '/check-password'
  191. password_request = compat_urllib_request.Request(pass_url, data)
  192. password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
  193. return self._download_json(
  194. password_request, video_id,
  195. 'Verifying the password',
  196. 'Wrong password')
  197. def _real_initialize(self):
  198. self._login()
  199. def _real_extract(self, url):
  200. url, data = unsmuggle_url(url)
  201. headers = std_headers
  202. if data is not None:
  203. headers = headers.copy()
  204. headers.update(data)
  205. if 'Referer' not in headers:
  206. headers['Referer'] = url
  207. # Extract ID from URL
  208. mobj = re.match(self._VALID_URL, url)
  209. video_id = mobj.group('id')
  210. orig_url = url
  211. if mobj.group('pro') or mobj.group('player'):
  212. url = 'http://player.vimeo.com/video/' + video_id
  213. # Retrieve video webpage to extract further information
  214. request = compat_urllib_request.Request(url, None, headers)
  215. try:
  216. webpage = self._download_webpage(request, video_id)
  217. except ExtractorError as ee:
  218. if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
  219. errmsg = ee.cause.read()
  220. if b'Because of its privacy settings, this video cannot be played here' in errmsg:
  221. raise ExtractorError(
  222. 'Cannot download embed-only video without embedding '
  223. 'URL. Please call youtube-dl with the URL of the page '
  224. 'that embeds this video.',
  225. expected=True)
  226. raise
  227. # Now we begin extracting as much information as we can from what we
  228. # retrieved. First we extract the information common to all extractors,
  229. # and latter we extract those that are Vimeo specific.
  230. self.report_extraction(video_id)
  231. # Extract the config JSON
  232. try:
  233. try:
  234. config_url = self._html_search_regex(
  235. r' data-config-url="(.+?)"', webpage, 'config URL')
  236. config_json = self._download_webpage(config_url, video_id)
  237. config = json.loads(config_json)
  238. except RegexNotFoundError:
  239. # For pro videos or player.vimeo.com urls
  240. # We try to find out to which variable is assigned the config dic
  241. m_variable_name = re.search('(\w)\.video\.id', webpage)
  242. if m_variable_name is not None:
  243. config_re = r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))
  244. else:
  245. config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
  246. config = self._search_regex(config_re, webpage, 'info section',
  247. flags=re.DOTALL)
  248. config = json.loads(config)
  249. except Exception as e:
  250. if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
  251. raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
  252. if re.search('<form[^>]+?id="pw_form"', webpage) is not None:
  253. self._verify_video_password(url, video_id, webpage)
  254. return self._real_extract(url)
  255. else:
  256. raise ExtractorError('Unable to extract info section',
  257. cause=e)
  258. else:
  259. if config.get('view') == 4:
  260. config = self._verify_player_video_password(url, video_id)
  261. # Extract title
  262. video_title = config["video"]["title"]
  263. # Extract uploader and uploader_id
  264. video_uploader = config["video"]["owner"]["name"]
  265. video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
  266. # Extract video thumbnail
  267. video_thumbnail = config["video"].get("thumbnail")
  268. if video_thumbnail is None:
  269. video_thumbs = config["video"].get("thumbs")
  270. if video_thumbs and isinstance(video_thumbs, dict):
  271. _, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
  272. # Extract video description
  273. video_description = self._html_search_regex(
  274. r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
  275. webpage, 'description', default=None)
  276. if not video_description:
  277. video_description = self._html_search_meta(
  278. 'description', webpage, default=None)
  279. if not video_description and mobj.group('pro'):
  280. orig_webpage = self._download_webpage(
  281. orig_url, video_id,
  282. note='Downloading webpage for description',
  283. fatal=False)
  284. if orig_webpage:
  285. video_description = self._html_search_meta(
  286. 'description', orig_webpage, default=None)
  287. if not video_description and not mobj.group('player'):
  288. self._downloader.report_warning('Cannot find video description')
  289. # Extract video duration
  290. video_duration = int_or_none(config["video"].get("duration"))
  291. # Extract upload date
  292. video_upload_date = None
  293. mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
  294. if mobj is not None:
  295. video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
  296. try:
  297. view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
  298. like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
  299. comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
  300. except RegexNotFoundError:
  301. # This info is only available in vimeo.com/{id} urls
  302. view_count = None
  303. like_count = None
  304. comment_count = None
  305. # Vimeo specific: extract request signature and timestamp
  306. sig = config['request']['signature']
  307. timestamp = config['request']['timestamp']
  308. # Vimeo specific: extract video codec and quality information
  309. # First consider quality, then codecs, then take everything
  310. codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')]
  311. files = {'hd': [], 'sd': [], 'other': []}
  312. config_files = config["video"].get("files") or config["request"].get("files")
  313. for codec_name, codec_extension in codecs:
  314. for quality in config_files.get(codec_name, []):
  315. format_id = '-'.join((codec_name, quality)).lower()
  316. key = quality if quality in files else 'other'
  317. video_url = None
  318. if isinstance(config_files[codec_name], dict):
  319. file_info = config_files[codec_name][quality]
  320. video_url = file_info.get('url')
  321. else:
  322. file_info = {}
  323. if video_url is None:
  324. video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
  325. % (video_id, sig, timestamp, quality, codec_name.upper())
  326. files[key].append({
  327. 'ext': codec_extension,
  328. 'url': video_url,
  329. 'format_id': format_id,
  330. 'width': file_info.get('width'),
  331. 'height': file_info.get('height'),
  332. })
  333. formats = []
  334. for key in ('other', 'sd', 'hd'):
  335. formats += files[key]
  336. if len(formats) == 0:
  337. raise ExtractorError('No known codec found')
  338. subtitles = {}
  339. text_tracks = config['request'].get('text_tracks')
  340. if text_tracks:
  341. for tt in text_tracks:
  342. subtitles[tt['lang']] = 'http://vimeo.com' + tt['url']
  343. video_subtitles = self.extract_subtitles(video_id, subtitles)
  344. if self._downloader.params.get('listsubtitles', False):
  345. self._list_available_subtitles(video_id, subtitles)
  346. return
  347. return {
  348. 'id': video_id,
  349. 'uploader': video_uploader,
  350. 'uploader_id': video_uploader_id,
  351. 'upload_date': video_upload_date,
  352. 'title': video_title,
  353. 'thumbnail': video_thumbnail,
  354. 'description': video_description,
  355. 'duration': video_duration,
  356. 'formats': formats,
  357. 'webpage_url': url,
  358. 'view_count': view_count,
  359. 'like_count': like_count,
  360. 'comment_count': comment_count,
  361. 'subtitles': video_subtitles,
  362. }
  363. class VimeoChannelIE(InfoExtractor):
  364. IE_NAME = 'vimeo:channel'
  365. _VALID_URL = r'https?://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
  366. _MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
  367. _TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
  368. _TESTS = [{
  369. 'url': 'http://vimeo.com/channels/tributes',
  370. 'info_dict': {
  371. 'title': 'Vimeo Tributes',
  372. },
  373. 'playlist_mincount': 25,
  374. }]
  375. def _page_url(self, base_url, pagenum):
  376. return '%s/videos/page:%d/' % (base_url, pagenum)
  377. def _extract_list_title(self, webpage):
  378. return self._html_search_regex(self._TITLE_RE, webpage, 'list title')
  379. def _extract_videos(self, list_id, base_url):
  380. video_ids = []
  381. for pagenum in itertools.count(1):
  382. webpage = self._download_webpage(
  383. self._page_url(base_url, pagenum), list_id,
  384. 'Downloading page %s' % pagenum)
  385. video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage))
  386. if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
  387. break
  388. entries = [self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo')
  389. for video_id in video_ids]
  390. return {'_type': 'playlist',
  391. 'id': list_id,
  392. 'title': self._extract_list_title(webpage),
  393. 'entries': entries,
  394. }
  395. def _real_extract(self, url):
  396. mobj = re.match(self._VALID_URL, url)
  397. channel_id = mobj.group('id')
  398. return self._extract_videos(channel_id, 'http://vimeo.com/channels/%s' % channel_id)
  399. class VimeoUserIE(VimeoChannelIE):
  400. IE_NAME = 'vimeo:user'
  401. _VALID_URL = r'https?://vimeo\.com/(?![0-9]+(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
  402. _TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
  403. _TESTS = [{
  404. 'url': 'http://vimeo.com/nkistudio/videos',
  405. 'info_dict': {
  406. 'title': 'Nki',
  407. },
  408. 'playlist_mincount': 66,
  409. }]
  410. def _real_extract(self, url):
  411. mobj = re.match(self._VALID_URL, url)
  412. name = mobj.group('name')
  413. return self._extract_videos(name, 'http://vimeo.com/%s' % name)
  414. class VimeoAlbumIE(VimeoChannelIE):
  415. IE_NAME = 'vimeo:album'
  416. _VALID_URL = r'https?://vimeo\.com/album/(?P<id>\d+)'
  417. _TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
  418. _TESTS = [{
  419. 'url': 'http://vimeo.com/album/2632481',
  420. 'info_dict': {
  421. 'title': 'Staff Favorites: November 2013',
  422. },
  423. 'playlist_mincount': 13,
  424. }]
  425. def _page_url(self, base_url, pagenum):
  426. return '%s/page:%d/' % (base_url, pagenum)
  427. def _real_extract(self, url):
  428. mobj = re.match(self._VALID_URL, url)
  429. album_id = mobj.group('id')
  430. return self._extract_videos(album_id, 'http://vimeo.com/album/%s' % album_id)
  431. class VimeoGroupsIE(VimeoAlbumIE):
  432. IE_NAME = 'vimeo:group'
  433. _VALID_URL = r'(?:https?://)?vimeo\.com/groups/(?P<name>[^/]+)'
  434. _TESTS = [{
  435. 'url': 'http://vimeo.com/groups/rolexawards',
  436. 'info_dict': {
  437. 'title': 'Rolex Awards for Enterprise',
  438. },
  439. 'playlist_mincount': 73,
  440. }]
  441. def _extract_list_title(self, webpage):
  442. return self._og_search_title(webpage)
  443. def _real_extract(self, url):
  444. mobj = re.match(self._VALID_URL, url)
  445. name = mobj.group('name')
  446. return self._extract_videos(name, 'http://vimeo.com/groups/%s' % name)
  447. class VimeoReviewIE(InfoExtractor):
  448. IE_NAME = 'vimeo:review'
  449. IE_DESC = 'Review pages on vimeo'
  450. _VALID_URL = r'https?://vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
  451. _TESTS = [{
  452. 'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
  453. 'file': '75524534.mp4',
  454. 'md5': 'c507a72f780cacc12b2248bb4006d253',
  455. 'info_dict': {
  456. 'title': "DICK HARDWICK 'Comedian'",
  457. 'uploader': 'Richard Hardwick',
  458. }
  459. }, {
  460. 'note': 'video player needs Referer',
  461. 'url': 'http://vimeo.com/user22258446/review/91613211/13f927e053',
  462. 'md5': '6295fdab8f4bf6a002d058b2c6dce276',
  463. 'info_dict': {
  464. 'id': '91613211',
  465. 'ext': 'mp4',
  466. 'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
  467. 'uploader': 'DevWeek Events',
  468. 'duration': 2773,
  469. 'thumbnail': 're:^https?://.*\.jpg$',
  470. }
  471. }]
  472. def _real_extract(self, url):
  473. mobj = re.match(self._VALID_URL, url)
  474. video_id = mobj.group('id')
  475. player_url = 'https://player.vimeo.com/player/' + video_id
  476. return self.url_result(player_url, 'Vimeo', video_id)
  477. class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
  478. IE_NAME = 'vimeo:watchlater'
  479. IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
  480. _VALID_URL = r'https?://vimeo\.com/home/watchlater|:vimeowatchlater'
  481. _LOGIN_REQUIRED = True
  482. _TITLE_RE = r'href="/home/watchlater".*?>(.*?)<'
  483. _TESTS = [{
  484. 'url': 'http://vimeo.com/home/watchlater',
  485. 'only_matching': True,
  486. }]
  487. def _real_initialize(self):
  488. self._login()
  489. def _page_url(self, base_url, pagenum):
  490. url = '%s/page:%d/' % (base_url, pagenum)
  491. request = compat_urllib_request.Request(url)
  492. # Set the header to get a partial html page with the ids,
  493. # the normal page doesn't contain them.
  494. request.add_header('X-Requested-With', 'XMLHttpRequest')
  495. return request
  496. def _real_extract(self, url):
  497. return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')
  498. class VimeoLikesIE(InfoExtractor):
  499. _VALID_URL = r'https?://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
  500. IE_NAME = 'vimeo:likes'
  501. IE_DESC = 'Vimeo user likes'
  502. _TEST = {
  503. 'url': 'https://vimeo.com/user755559/likes/',
  504. 'playlist_mincount': 293,
  505. "info_dict": {
  506. "description": "See all the videos urza likes",
  507. "title": 'Videos urza likes',
  508. },
  509. }
  510. def _real_extract(self, url):
  511. user_id = self._match_id(url)
  512. webpage = self._download_webpage(url, user_id)
  513. page_count = self._int(
  514. self._search_regex(
  515. r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
  516. .*?</a></li>\s*<li\s+class="pagination_next">
  517. ''', webpage, 'page count'),
  518. 'page count', fatal=True)
  519. PAGE_SIZE = 12
  520. title = self._html_search_regex(
  521. r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
  522. description = self._html_search_meta('description', webpage)
  523. def _get_page(idx):
  524. page_url = '%s//vimeo.com/user%s/likes/page:%d/sort:date' % (
  525. self.http_scheme(), user_id, idx + 1)
  526. webpage = self._download_webpage(
  527. page_url, user_id,
  528. note='Downloading page %d/%d' % (idx + 1, page_count))
  529. video_list = self._search_regex(
  530. r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
  531. webpage, 'video content')
  532. paths = re.findall(
  533. r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
  534. for path in paths:
  535. yield {
  536. '_type': 'url',
  537. 'url': compat_urlparse.urljoin(page_url, path),
  538. }
  539. pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
  540. return {
  541. '_type': 'playlist',
  542. 'id': 'user%s_likes' % user_id,
  543. 'title': title,
  544. 'description': description,
  545. 'entries': pl,
  546. }