You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

241 lines
9.2 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import base64
  4. import json
  5. import hashlib
  6. import hmac
  7. import random
  8. import string
  9. import time
  10. from .common import InfoExtractor
  11. from ..compat import (
  12. compat_HTTPError,
  13. compat_urllib_parse_urlencode,
  14. compat_urllib_parse,
  15. )
  16. from ..utils import (
  17. ExtractorError,
  18. float_or_none,
  19. int_or_none,
  20. )
  21. class VRVBaseIE(InfoExtractor):
  22. _API_DOMAIN = None
  23. _API_PARAMS = {}
  24. _CMS_SIGNING = {}
  25. _TOKEN = None
  26. _TOKEN_SECRET = ''
  27. def _call_api(self, path, video_id, note, data=None):
  28. # https://tools.ietf.org/html/rfc5849#section-3
  29. base_url = self._API_DOMAIN + '/core/' + path
  30. query = [
  31. ('oauth_consumer_key', self._API_PARAMS['oAuthKey']),
  32. ('oauth_nonce', ''.join([random.choice(string.ascii_letters) for _ in range(32)])),
  33. ('oauth_signature_method', 'HMAC-SHA1'),
  34. ('oauth_timestamp', int(time.time())),
  35. ]
  36. if self._TOKEN:
  37. query.append(('oauth_token', self._TOKEN))
  38. encoded_query = compat_urllib_parse_urlencode(query)
  39. headers = self.geo_verification_headers()
  40. if data:
  41. data = json.dumps(data).encode()
  42. headers['Content-Type'] = 'application/json'
  43. base_string = '&'.join([
  44. 'POST' if data else 'GET',
  45. compat_urllib_parse.quote(base_url, ''),
  46. compat_urllib_parse.quote(encoded_query, '')])
  47. oauth_signature = base64.b64encode(hmac.new(
  48. (self._API_PARAMS['oAuthSecret'] + '&' + self._TOKEN_SECRET).encode('ascii'),
  49. base_string.encode(), hashlib.sha1).digest()).decode()
  50. encoded_query += '&oauth_signature=' + compat_urllib_parse.quote(oauth_signature, '')
  51. try:
  52. return self._download_json(
  53. '?'.join([base_url, encoded_query]), video_id,
  54. note='Downloading %s JSON metadata' % note, headers=headers, data=data)
  55. except ExtractorError as e:
  56. if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
  57. raise ExtractorError(json.loads(e.cause.read().decode())['message'], expected=True)
  58. raise
  59. def _call_cms(self, path, video_id, note):
  60. if not self._CMS_SIGNING:
  61. self._CMS_SIGNING = self._call_api('index', video_id, 'CMS Signing')['cms_signing']
  62. return self._download_json(
  63. self._API_DOMAIN + path, video_id, query=self._CMS_SIGNING,
  64. note='Downloading %s JSON metadata' % note, headers=self.geo_verification_headers())
  65. def _get_cms_resource(self, resource_key, video_id):
  66. return self._call_api(
  67. 'cms_resource', video_id, 'resource path', data={
  68. 'resource_key': resource_key,
  69. })['__links__']['cms_resource']['href']
  70. def _real_initialize(self):
  71. webpage = self._download_webpage(
  72. 'https://vrv.co/', None, headers=self.geo_verification_headers())
  73. self._API_PARAMS = self._parse_json(self._search_regex(
  74. [
  75. r'window\.__APP_CONFIG__\s*=\s*({.+?})(?:</script>|;)',
  76. r'window\.__APP_CONFIG__\s*=\s*({.+})'
  77. ], webpage, 'app config'), None)['cxApiParams']
  78. self._API_DOMAIN = self._API_PARAMS.get('apiDomain', 'https://api.vrv.co')
  79. class VRVIE(VRVBaseIE):
  80. IE_NAME = 'vrv'
  81. _VALID_URL = r'https?://(?:www\.)?vrv\.co/watch/(?P<id>[A-Z0-9]+)'
  82. _TESTS = [{
  83. 'url': 'https://vrv.co/watch/GR9PNZ396/Hidden-America-with-Jonah-Ray:BOSTON-WHERE-THE-PAST-IS-THE-PRESENT',
  84. 'info_dict': {
  85. 'id': 'GR9PNZ396',
  86. 'ext': 'mp4',
  87. 'title': 'BOSTON: WHERE THE PAST IS THE PRESENT',
  88. 'description': 'md5:4ec8844ac262ca2df9e67c0983c6b83f',
  89. 'uploader_id': 'seeso',
  90. },
  91. 'params': {
  92. # m3u8 download
  93. 'skip_download': True,
  94. },
  95. }]
  96. _NETRC_MACHINE = 'vrv'
  97. def _real_initialize(self):
  98. super(VRVIE, self)._real_initialize()
  99. email, password = self._get_login_info()
  100. if email is None:
  101. return
  102. token_credentials = self._call_api(
  103. 'authenticate/by:credentials', None, 'Token Credentials', data={
  104. 'email': email,
  105. 'password': password,
  106. })
  107. self._TOKEN = token_credentials['oauth_token']
  108. self._TOKEN_SECRET = token_credentials['oauth_token_secret']
  109. def _extract_vrv_formats(self, url, video_id, stream_format, audio_lang, hardsub_lang):
  110. if not url or stream_format not in ('hls', 'dash'):
  111. return []
  112. assert audio_lang or hardsub_lang
  113. stream_id_list = []
  114. if audio_lang:
  115. stream_id_list.append('audio-%s' % audio_lang)
  116. if hardsub_lang:
  117. stream_id_list.append('hardsub-%s' % hardsub_lang)
  118. stream_id = '-'.join(stream_id_list)
  119. format_id = '%s-%s' % (stream_format, stream_id)
  120. if stream_format == 'hls':
  121. adaptive_formats = self._extract_m3u8_formats(
  122. url, video_id, 'mp4', m3u8_id=format_id,
  123. note='Downloading %s m3u8 information' % stream_id,
  124. fatal=False)
  125. elif stream_format == 'dash':
  126. adaptive_formats = self._extract_mpd_formats(
  127. url, video_id, mpd_id=format_id,
  128. note='Downloading %s MPD information' % stream_id,
  129. fatal=False)
  130. if audio_lang:
  131. for f in adaptive_formats:
  132. if f.get('acodec') != 'none':
  133. f['language'] = audio_lang
  134. return adaptive_formats
  135. def _real_extract(self, url):
  136. video_id = self._match_id(url)
  137. episode_path = self._get_cms_resource(
  138. 'cms:/episodes/' + video_id, video_id)
  139. video_data = self._call_cms(episode_path, video_id, 'video')
  140. title = video_data['title']
  141. streams_path = video_data['__links__'].get('streams', {}).get('href')
  142. if not streams_path:
  143. self.raise_login_required()
  144. streams_json = self._call_cms(streams_path, video_id, 'streams')
  145. audio_locale = streams_json.get('audio_locale')
  146. formats = []
  147. for stream_type, streams in streams_json.get('streams', {}).items():
  148. if stream_type in ('adaptive_hls', 'adaptive_dash'):
  149. for stream in streams.values():
  150. formats.extend(self._extract_vrv_formats(
  151. stream.get('url'), video_id, stream_type.split('_')[1],
  152. audio_locale, stream.get('hardsub_locale')))
  153. self._sort_formats(formats)
  154. subtitles = {}
  155. for subtitle in streams_json.get('subtitles', {}).values():
  156. subtitle_url = subtitle.get('url')
  157. if not subtitle_url:
  158. continue
  159. subtitles.setdefault(subtitle.get('locale', 'en-US'), []).append({
  160. 'url': subtitle_url,
  161. 'ext': subtitle.get('format', 'ass'),
  162. })
  163. thumbnails = []
  164. for thumbnail in video_data.get('images', {}).get('thumbnails', []):
  165. thumbnail_url = thumbnail.get('source')
  166. if not thumbnail_url:
  167. continue
  168. thumbnails.append({
  169. 'url': thumbnail_url,
  170. 'width': int_or_none(thumbnail.get('width')),
  171. 'height': int_or_none(thumbnail.get('height')),
  172. })
  173. return {
  174. 'id': video_id,
  175. 'title': title,
  176. 'formats': formats,
  177. 'subtitles': subtitles,
  178. 'thumbnails': thumbnails,
  179. 'description': video_data.get('description'),
  180. 'duration': float_or_none(video_data.get('duration_ms'), 1000),
  181. 'uploader_id': video_data.get('channel_id'),
  182. 'series': video_data.get('series_title'),
  183. 'season': video_data.get('season_title'),
  184. 'season_number': int_or_none(video_data.get('season_number')),
  185. 'season_id': video_data.get('season_id'),
  186. 'episode': title,
  187. 'episode_number': int_or_none(video_data.get('episode_number')),
  188. 'episode_id': video_data.get('production_episode_id'),
  189. }
  190. class VRVSeriesIE(VRVBaseIE):
  191. IE_NAME = 'vrv:series'
  192. _VALID_URL = r'https?://(?:www\.)?vrv\.co/series/(?P<id>[A-Z0-9]+)'
  193. _TEST = {
  194. 'url': 'https://vrv.co/series/G68VXG3G6/The-Perfect-Insider',
  195. 'info_dict': {
  196. 'id': 'G68VXG3G6',
  197. },
  198. 'playlist_mincount': 11,
  199. }
  200. def _real_extract(self, url):
  201. series_id = self._match_id(url)
  202. seasons_path = self._get_cms_resource(
  203. 'cms:/seasons?series_id=' + series_id, series_id)
  204. seasons_data = self._call_cms(seasons_path, series_id, 'seasons')
  205. entries = []
  206. for season in seasons_data.get('items', []):
  207. episodes_path = season['__links__']['season/episodes']['href']
  208. episodes = self._call_cms(episodes_path, series_id, 'episodes')
  209. for episode in episodes.get('items', []):
  210. episode_id = episode['id']
  211. entries.append(self.url_result(
  212. 'https://vrv.co/watch/' + episode_id,
  213. 'VRV', episode_id, episode.get('title')))
  214. return self.playlist_result(entries, series_id)