You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

170 lines
6.6 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import re
  4. from .common import InfoExtractor
  5. from ..compat import compat_str
  6. from ..utils import (
  7. int_or_none,
  8. parse_iso8601,
  9. unescapeHTML,
  10. qualities,
  11. )
  12. class Revision3EmbedIE(InfoExtractor):
  13. IE_NAME = 'revision3:embed'
  14. _VALID_URL = r'(?:revision3:(?:(?P<playlist_type>[^:]+):)?|https?://(?:(?:(?:www|embed)\.)?(?:revision3|animalist)|(?:(?:api|embed)\.)?seekernetwork)\.com/player/embed\?videoId=)(?P<playlist_id>\d+)'
  15. _TEST = {
  16. 'url': 'http://api.seekernetwork.com/player/embed?videoId=67558',
  17. 'md5': '83bcd157cab89ad7318dd7b8c9cf1306',
  18. 'info_dict': {
  19. 'id': '67558',
  20. 'ext': 'mp4',
  21. 'title': 'The Pros & Cons Of Zoos',
  22. 'description': 'Zoos are often depicted as a terrible place for animals to live, but is there any truth to this?',
  23. 'uploader_id': 'dnews',
  24. 'uploader': 'DNews',
  25. }
  26. }
  27. _API_KEY = 'ba9c741bce1b9d8e3defcc22193f3651b8867e62'
  28. def _real_extract(self, url):
  29. mobj = re.match(self._VALID_URL, url)
  30. playlist_id = mobj.group('playlist_id')
  31. playlist_type = mobj.group('playlist_type') or 'video_id'
  32. video_data = self._download_json(
  33. 'http://revision3.com/api/getPlaylist.json', playlist_id, query={
  34. 'api_key': self._API_KEY,
  35. 'codecs': 'h264,vp8,theora',
  36. playlist_type: playlist_id,
  37. })['items'][0]
  38. formats = []
  39. for vcodec, media in video_data['media'].items():
  40. for quality_id, quality in media.items():
  41. if quality_id == 'hls':
  42. formats.extend(self._extract_m3u8_formats(
  43. quality['url'], playlist_id, 'mp4',
  44. 'm3u8_native', m3u8_id='hls', fatal=False))
  45. else:
  46. formats.append({
  47. 'url': quality['url'],
  48. 'format_id': '%s-%s' % (vcodec, quality_id),
  49. 'tbr': int_or_none(quality.get('bitrate')),
  50. 'vcodec': vcodec,
  51. })
  52. self._sort_formats(formats)
  53. return {
  54. 'id': playlist_id,
  55. 'title': unescapeHTML(video_data['title']),
  56. 'description': unescapeHTML(video_data.get('summary')),
  57. 'uploader': video_data.get('show', {}).get('name'),
  58. 'uploader_id': video_data.get('show', {}).get('slug'),
  59. 'duration': int_or_none(video_data.get('duration')),
  60. 'formats': formats,
  61. }
  62. class Revision3IE(InfoExtractor):
  63. IE_NAME = 'revision'
  64. _VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:revision3|animalist)\.com)/(?P<id>[^/]+(?:/[^/?#]+)?)'
  65. _TESTS = [{
  66. 'url': 'http://www.revision3.com/technobuffalo/5-google-predictions-for-2016',
  67. 'md5': 'd94a72d85d0a829766de4deb8daaf7df',
  68. 'info_dict': {
  69. 'id': '71089',
  70. 'display_id': 'technobuffalo/5-google-predictions-for-2016',
  71. 'ext': 'webm',
  72. 'title': '5 Google Predictions for 2016',
  73. 'description': 'Google had a great 2015, but it\'s already time to look ahead. Here are our five predictions for 2016.',
  74. 'upload_date': '20151228',
  75. 'timestamp': 1451325600,
  76. 'duration': 187,
  77. 'uploader': 'TechnoBuffalo',
  78. 'uploader_id': 'technobuffalo',
  79. }
  80. }, {
  81. # Show
  82. 'url': 'http://revision3.com/variant',
  83. 'only_matching': True,
  84. }, {
  85. # Tag
  86. 'url': 'http://revision3.com/vr',
  87. 'only_matching': True,
  88. }]
  89. _PAGE_DATA_TEMPLATE = 'http://www.%s/apiProxy/ddn/%s?domain=%s'
  90. def _real_extract(self, url):
  91. domain, display_id = re.match(self._VALID_URL, url).groups()
  92. site = domain.split('.')[0]
  93. page_info = self._download_json(
  94. self._PAGE_DATA_TEMPLATE % (domain, display_id, domain), display_id)
  95. page_data = page_info['data']
  96. page_type = page_data['type']
  97. if page_type in ('episode', 'embed'):
  98. show_data = page_data['show']['data']
  99. page_id = compat_str(page_data['id'])
  100. video_id = compat_str(page_data['video']['data']['id'])
  101. preference = qualities(['mini', 'small', 'medium', 'large'])
  102. thumbnails = [{
  103. 'url': image_url,
  104. 'id': image_id,
  105. 'preference': preference(image_id)
  106. } for image_id, image_url in page_data.get('images', {}).items()]
  107. info = {
  108. 'id': page_id,
  109. 'display_id': display_id,
  110. 'title': unescapeHTML(page_data['name']),
  111. 'description': unescapeHTML(page_data.get('summary')),
  112. 'timestamp': parse_iso8601(page_data.get('publishTime'), ' '),
  113. 'author': page_data.get('author'),
  114. 'uploader': show_data.get('name'),
  115. 'uploader_id': show_data.get('slug'),
  116. 'thumbnails': thumbnails,
  117. 'extractor_key': site,
  118. }
  119. if page_type == 'embed':
  120. info.update({
  121. '_type': 'url_transparent',
  122. 'url': page_data['video']['data']['embed'],
  123. })
  124. return info
  125. info.update({
  126. '_type': 'url_transparent',
  127. 'url': 'revision3:%s' % video_id,
  128. })
  129. return info
  130. else:
  131. list_data = page_info[page_type]['data']
  132. episodes_data = page_info['episodes']['data']
  133. num_episodes = page_info['meta']['totalEpisodes']
  134. processed_episodes = 0
  135. entries = []
  136. page_num = 1
  137. while True:
  138. entries.extend([{
  139. '_type': 'url',
  140. 'url': 'http://%s%s' % (domain, episode['path']),
  141. 'id': compat_str(episode['id']),
  142. 'ie_key': 'Revision3',
  143. 'extractor_key': site,
  144. } for episode in episodes_data])
  145. processed_episodes += len(episodes_data)
  146. if processed_episodes == num_episodes:
  147. break
  148. page_num += 1
  149. episodes_data = self._download_json(self._PAGE_DATA_TEMPLATE % (
  150. domain, display_id + '/' + compat_str(page_num), domain),
  151. display_id)['episodes']['data']
  152. return self.playlist_result(
  153. entries, compat_str(list_data['id']),
  154. list_data.get('name'), list_data.get('summary'))