You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

324 lines
13 KiB

10 years ago
10 years ago
  1. # -*- coding: utf-8 -*-
  2. from __future__ import unicode_literals
  3. import re
  4. import time
  5. import hmac
  6. import binascii
  7. import hashlib
  8. from .once import OnceIE
  9. from ..compat import (
  10. compat_parse_qs,
  11. compat_urllib_parse_urlparse,
  12. )
  13. from ..utils import (
  14. ExtractorError,
  15. float_or_none,
  16. int_or_none,
  17. sanitized_Request,
  18. unsmuggle_url,
  19. xpath_with_ns,
  20. mimetype2ext,
  21. find_xpath_attr,
  22. )
  23. default_ns = 'http://www.w3.org/2005/SMIL21/Language'
  24. _x = lambda p: xpath_with_ns(p, {'smil': default_ns})
  25. class ThePlatformBaseIE(OnceIE):
  26. def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'):
  27. meta = self._download_xml(smil_url, video_id, note=note, query={'format': 'SMIL'})
  28. error_element = find_xpath_attr(meta, _x('.//smil:ref'), 'src')
  29. if error_element is not None and error_element.attrib['src'].startswith(
  30. 'http://link.theplatform.com/s/errorFiles/Unavailable.'):
  31. raise ExtractorError(error_element.attrib['abstract'], expected=True)
  32. smil_formats = self._parse_smil_formats(
  33. meta, smil_url, video_id, namespace=default_ns,
  34. # the parameters are from syfy.com, other sites may use others,
  35. # they also work for nbc.com
  36. f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'},
  37. transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src))
  38. formats = []
  39. for _format in smil_formats:
  40. if OnceIE.suitable(_format['url']):
  41. formats.extend(self._extract_once_formats(_format['url']))
  42. else:
  43. formats.append(_format)
  44. self._sort_formats(formats)
  45. subtitles = self._parse_smil_subtitles(meta, default_ns)
  46. return formats, subtitles
  47. def get_metadata(self, path, video_id):
  48. info_url = 'http://link.theplatform.com/s/%s?format=preview' % path
  49. info = self._download_json(info_url, video_id)
  50. subtitles = {}
  51. captions = info.get('captions')
  52. if isinstance(captions, list):
  53. for caption in captions:
  54. lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
  55. subtitles[lang] = [{
  56. 'ext': mimetype2ext(mime),
  57. 'url': src,
  58. }]
  59. return {
  60. 'title': info['title'],
  61. 'subtitles': subtitles,
  62. 'description': info['description'],
  63. 'thumbnail': info['defaultThumbnailUrl'],
  64. 'duration': int_or_none(info.get('duration'), 1000),
  65. 'timestamp': int_or_none(info.get('pubDate'), 1000) or None,
  66. 'uploader': info.get('billingCode'),
  67. }
  68. class ThePlatformIE(ThePlatformBaseIE):
  69. _VALID_URL = r'''(?x)
  70. (?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
  71. (?:(?:(?:[^/]+/)+select/)?(?P<media>media/(?:guid/\d+/)?)|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
  72. |theplatform:)(?P<id>[^/\?&]+)'''
  73. _TESTS = [{
  74. # from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/
  75. 'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true',
  76. 'info_dict': {
  77. 'id': 'e9I_cZgTgIPd',
  78. 'ext': 'flv',
  79. 'title': 'Blackberry\'s big, bold Z30',
  80. 'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.',
  81. 'duration': 247,
  82. 'timestamp': 1383239700,
  83. 'upload_date': '20131031',
  84. 'uploader': 'CBSI-NEW',
  85. },
  86. 'params': {
  87. # rtmp download
  88. 'skip_download': True,
  89. },
  90. }, {
  91. # from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/
  92. 'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT',
  93. 'info_dict': {
  94. 'id': '22d_qsQ6MIRT',
  95. 'ext': 'flv',
  96. 'description': 'md5:ac330c9258c04f9d7512cf26b9595409',
  97. 'title': 'Tesla Model S: A second step towards a cleaner motoring future',
  98. 'timestamp': 1426176191,
  99. 'upload_date': '20150312',
  100. 'uploader': 'CBSI-NEW',
  101. },
  102. 'params': {
  103. # rtmp download
  104. 'skip_download': True,
  105. }
  106. }, {
  107. 'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD',
  108. 'info_dict': {
  109. 'id': 'yMBg9E8KFxZD',
  110. 'ext': 'mp4',
  111. 'description': 'md5:644ad9188d655b742f942bf2e06b002d',
  112. 'title': 'HIGHLIGHTS: USA bag first ever series Cup win',
  113. 'uploader': 'EGSM',
  114. }
  115. }, {
  116. 'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7',
  117. 'only_matching': True,
  118. }, {
  119. 'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701',
  120. 'md5': 'fb96bb3d85118930a5b055783a3bd992',
  121. 'info_dict': {
  122. 'id': 'tdy_or_siri_150701',
  123. 'ext': 'mp4',
  124. 'title': 'iPhone Siri’s sassy response to a math question has people talking',
  125. 'description': 'md5:a565d1deadd5086f3331d57298ec6333',
  126. 'duration': 83.0,
  127. 'thumbnail': 're:^https?://.*\.jpg$',
  128. 'timestamp': 1435752600,
  129. 'upload_date': '20150701',
  130. 'uploader': 'NBCU-NEWS',
  131. },
  132. }, {
  133. # From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1
  134. # geo-restricted (US), HLS encrypted with AES-128
  135. 'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781',
  136. 'only_matching': True,
  137. }]
  138. @staticmethod
  139. def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False):
  140. flags = '10' if include_qs else '00'
  141. expiration_date = '%x' % (int(time.time()) + life)
  142. def str_to_hex(str):
  143. return binascii.b2a_hex(str.encode('ascii')).decode('ascii')
  144. def hex_to_str(hex):
  145. return binascii.a2b_hex(hex)
  146. relative_path = re.match(r'https?://link.theplatform.com/s/([^?]+)', url).group(1)
  147. clear_text = hex_to_str(flags + expiration_date + str_to_hex(relative_path))
  148. checksum = hmac.new(sig_key.encode('ascii'), clear_text, hashlib.sha1).hexdigest()
  149. sig = flags + expiration_date + checksum + str_to_hex(sig_secret)
  150. return '%s&sig=%s' % (url, sig)
  151. def _real_extract(self, url):
  152. url, smuggled_data = unsmuggle_url(url, {})
  153. mobj = re.match(self._VALID_URL, url)
  154. provider_id = mobj.group('provider_id')
  155. video_id = mobj.group('id')
  156. if not provider_id:
  157. provider_id = 'dJ5BDC'
  158. path = provider_id + '/'
  159. if mobj.group('media'):
  160. path += mobj.group('media')
  161. path += video_id
  162. qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
  163. if 'guid' in qs_dict:
  164. webpage = self._download_webpage(url, video_id)
  165. scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
  166. feed_id = None
  167. # feed id usually locates in the last script.
  168. # Seems there's no pattern for the interested script filename, so
  169. # I try one by one
  170. for script in reversed(scripts):
  171. feed_script = self._download_webpage(
  172. self._proto_relative_url(script, 'http:'),
  173. video_id, 'Downloading feed script')
  174. feed_id = self._search_regex(
  175. r'defaultFeedId\s*:\s*"([^"]+)"', feed_script,
  176. 'default feed id', default=None)
  177. if feed_id is not None:
  178. break
  179. if feed_id is None:
  180. raise ExtractorError('Unable to find feed id')
  181. return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % (
  182. provider_id, feed_id, qs_dict['guid'][0]))
  183. if smuggled_data.get('force_smil_url', False):
  184. smil_url = url
  185. # Explicitly specified SMIL (see https://github.com/rg3/youtube-dl/issues/7385)
  186. elif '/guid/' in url:
  187. headers = {}
  188. source_url = smuggled_data.get('source_url')
  189. if source_url:
  190. headers['Referer'] = source_url
  191. request = sanitized_Request(url, headers=headers)
  192. webpage = self._download_webpage(request, video_id)
  193. smil_url = self._search_regex(
  194. r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml',
  195. webpage, 'smil url', group='url')
  196. path = self._search_regex(
  197. r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path')
  198. smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4'
  199. elif mobj.group('config'):
  200. config_url = url + '&form=json'
  201. config_url = config_url.replace('swf/', 'config/')
  202. config_url = config_url.replace('onsite/', 'onsite/config/')
  203. config = self._download_json(config_url, video_id, 'Downloading config')
  204. if 'releaseUrl' in config:
  205. release_url = config['releaseUrl']
  206. else:
  207. release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
  208. smil_url = release_url + '&formats=MPEG4&manifest=f4m'
  209. else:
  210. smil_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
  211. sig = smuggled_data.get('sig')
  212. if sig:
  213. smil_url = self._sign_url(smil_url, sig['key'], sig['secret'])
  214. formats, subtitles = self._extract_theplatform_smil(smil_url, video_id)
  215. ret = self.get_metadata(path, video_id)
  216. combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles)
  217. ret.update({
  218. 'id': video_id,
  219. 'formats': formats,
  220. 'subtitles': combined_subtitles,
  221. })
  222. return ret
  223. class ThePlatformFeedIE(ThePlatformBaseIE):
  224. _URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&byGuid=%s'
  225. _VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*byGuid=(?P<id>[a-zA-Z0-9_]+)'
  226. _TEST = {
  227. # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
  228. 'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
  229. 'md5': '6e32495b5073ab414471b615c5ded394',
  230. 'info_dict': {
  231. 'id': 'n_hardball_5biden_140207',
  232. 'ext': 'mp4',
  233. 'title': 'The Biden factor: will Joe run in 2016?',
  234. 'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.',
  235. 'thumbnail': 're:^https?://.*\.jpg$',
  236. 'upload_date': '20140208',
  237. 'timestamp': 1391824260,
  238. 'duration': 467.0,
  239. 'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
  240. },
  241. }
  242. def _real_extract(self, url):
  243. mobj = re.match(self._VALID_URL, url)
  244. video_id = mobj.group('id')
  245. provider_id = mobj.group('provider_id')
  246. feed_id = mobj.group('feed_id')
  247. real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, video_id)
  248. feed = self._download_json(real_url, video_id)
  249. entry = feed['entries'][0]
  250. formats = []
  251. subtitles = {}
  252. first_video_id = None
  253. duration = None
  254. for item in entry['media$content']:
  255. smil_url = item['plfile$url'] + '&mbr=true'
  256. cur_video_id = ThePlatformIE._match_id(smil_url)
  257. if first_video_id is None:
  258. first_video_id = cur_video_id
  259. duration = float_or_none(item.get('plfile$duration'))
  260. cur_formats, cur_subtitles = self._extract_theplatform_smil(smil_url, video_id, 'Downloading SMIL data for %s' % cur_video_id)
  261. formats.extend(cur_formats)
  262. subtitles = self._merge_subtitles(subtitles, cur_subtitles)
  263. self._sort_formats(formats)
  264. thumbnails = [{
  265. 'url': thumbnail['plfile$url'],
  266. 'width': int_or_none(thumbnail.get('plfile$width')),
  267. 'height': int_or_none(thumbnail.get('plfile$height')),
  268. } for thumbnail in entry.get('media$thumbnails', [])]
  269. timestamp = int_or_none(entry.get('media$availableDate'), scale=1000)
  270. categories = [item['media$name'] for item in entry.get('media$categories', [])]
  271. ret = self.get_metadata('%s/%s' % (provider_id, first_video_id), video_id)
  272. subtitles = self._merge_subtitles(subtitles, ret['subtitles'])
  273. ret.update({
  274. 'id': video_id,
  275. 'formats': formats,
  276. 'subtitles': subtitles,
  277. 'thumbnails': thumbnails,
  278. 'duration': duration,
  279. 'timestamp': timestamp,
  280. 'categories': categories,
  281. })
  282. return ret