You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

324 lines
13 KiB

10 years ago
10 years ago
  1. # -*- coding: utf-8 -*-
  2. from __future__ import unicode_literals
  3. import re
  4. import time
  5. import hmac
  6. import binascii
  7. import hashlib
  8. from .once import OnceIE
  9. from ..compat import (
  10. compat_parse_qs,
  11. compat_urllib_parse_urlparse,
  12. )
  13. from ..utils import (
  14. ExtractorError,
  15. float_or_none,
  16. int_or_none,
  17. sanitized_Request,
  18. unsmuggle_url,
  19. xpath_with_ns,
  20. mimetype2ext,
  21. find_xpath_attr,
  22. )
  23. default_ns = 'http://www.w3.org/2005/SMIL21/Language'
  24. _x = lambda p: xpath_with_ns(p, {'smil': default_ns})
  25. class ThePlatformBaseIE(OnceIE):
  26. def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'):
  27. meta = self._download_xml(smil_url, video_id, note=note, query={'format': 'SMIL'})
  28. error_element = find_xpath_attr(meta, _x('.//smil:ref'), 'src')
  29. if error_element is not None and error_element.attrib['src'].startswith(
  30. 'http://link.theplatform.com/s/errorFiles/Unavailable.'):
  31. raise ExtractorError(error_element.attrib['abstract'], expected=True)
  32. smil_formats = self._parse_smil_formats(
  33. meta, smil_url, video_id, namespace=default_ns,
  34. # the parameters are from syfy.com, other sites may use others,
  35. # they also work for nbc.com
  36. f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'},
  37. transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src))
  38. formats = []
  39. for _format in smil_formats:
  40. if OnceIE.suitable(_format['url']):
  41. formats.extend(self._extract_once_formats(_format['url']))
  42. else:
  43. formats.append(_format)
  44. subtitles = self._parse_smil_subtitles(meta, default_ns)
  45. return formats, subtitles
  46. def get_metadata(self, path, video_id):
  47. info_url = 'http://link.theplatform.com/s/%s?format=preview' % path
  48. info = self._download_json(info_url, video_id)
  49. subtitles = {}
  50. captions = info.get('captions')
  51. if isinstance(captions, list):
  52. for caption in captions:
  53. lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
  54. subtitles[lang] = [{
  55. 'ext': mimetype2ext(mime),
  56. 'url': src,
  57. }]
  58. return {
  59. 'title': info['title'],
  60. 'subtitles': subtitles,
  61. 'description': info['description'],
  62. 'thumbnail': info['defaultThumbnailUrl'],
  63. 'duration': int_or_none(info.get('duration'), 1000),
  64. 'timestamp': int_or_none(info.get('pubDate'), 1000) or None,
  65. 'uploader': info.get('billingCode'),
  66. }
  67. class ThePlatformIE(ThePlatformBaseIE):
  68. _VALID_URL = r'''(?x)
  69. (?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
  70. (?:(?:(?:[^/]+/)+select/)?(?P<media>media/(?:guid/\d+/)?)|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
  71. |theplatform:)(?P<id>[^/\?&]+)'''
  72. _TESTS = [{
  73. # from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/
  74. 'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true',
  75. 'info_dict': {
  76. 'id': 'e9I_cZgTgIPd',
  77. 'ext': 'flv',
  78. 'title': 'Blackberry\'s big, bold Z30',
  79. 'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.',
  80. 'duration': 247,
  81. 'timestamp': 1383239700,
  82. 'upload_date': '20131031',
  83. 'uploader': 'CBSI-NEW',
  84. },
  85. 'params': {
  86. # rtmp download
  87. 'skip_download': True,
  88. },
  89. }, {
  90. # from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/
  91. 'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT',
  92. 'info_dict': {
  93. 'id': '22d_qsQ6MIRT',
  94. 'ext': 'flv',
  95. 'description': 'md5:ac330c9258c04f9d7512cf26b9595409',
  96. 'title': 'Tesla Model S: A second step towards a cleaner motoring future',
  97. 'timestamp': 1426176191,
  98. 'upload_date': '20150312',
  99. 'uploader': 'CBSI-NEW',
  100. },
  101. 'params': {
  102. # rtmp download
  103. 'skip_download': True,
  104. }
  105. }, {
  106. 'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD',
  107. 'info_dict': {
  108. 'id': 'yMBg9E8KFxZD',
  109. 'ext': 'mp4',
  110. 'description': 'md5:644ad9188d655b742f942bf2e06b002d',
  111. 'title': 'HIGHLIGHTS: USA bag first ever series Cup win',
  112. 'uploader': 'EGSM',
  113. }
  114. }, {
  115. 'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7',
  116. 'only_matching': True,
  117. }, {
  118. 'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701',
  119. 'md5': 'fb96bb3d85118930a5b055783a3bd992',
  120. 'info_dict': {
  121. 'id': 'tdy_or_siri_150701',
  122. 'ext': 'mp4',
  123. 'title': 'iPhone Siri’s sassy response to a math question has people talking',
  124. 'description': 'md5:a565d1deadd5086f3331d57298ec6333',
  125. 'duration': 83.0,
  126. 'thumbnail': 're:^https?://.*\.jpg$',
  127. 'timestamp': 1435752600,
  128. 'upload_date': '20150701',
  129. 'uploader': 'NBCU-NEWS',
  130. },
  131. }, {
  132. # From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1
  133. # geo-restricted (US), HLS encrypted with AES-128
  134. 'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781',
  135. 'only_matching': True,
  136. }]
  137. @staticmethod
  138. def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False):
  139. flags = '10' if include_qs else '00'
  140. expiration_date = '%x' % (int(time.time()) + life)
  141. def str_to_hex(str):
  142. return binascii.b2a_hex(str.encode('ascii')).decode('ascii')
  143. def hex_to_bytes(hex):
  144. return binascii.a2b_hex(hex.encode('ascii'))
  145. relative_path = re.match(r'https?://link.theplatform.com/s/([^?]+)', url).group(1)
  146. clear_text = hex_to_bytes(flags + expiration_date + str_to_hex(relative_path))
  147. checksum = hmac.new(sig_key.encode('ascii'), clear_text, hashlib.sha1).hexdigest()
  148. sig = flags + expiration_date + checksum + str_to_hex(sig_secret)
  149. return '%s&sig=%s' % (url, sig)
  150. def _real_extract(self, url):
  151. url, smuggled_data = unsmuggle_url(url, {})
  152. mobj = re.match(self._VALID_URL, url)
  153. provider_id = mobj.group('provider_id')
  154. video_id = mobj.group('id')
  155. if not provider_id:
  156. provider_id = 'dJ5BDC'
  157. path = provider_id + '/'
  158. if mobj.group('media'):
  159. path += mobj.group('media')
  160. path += video_id
  161. qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
  162. if 'guid' in qs_dict:
  163. webpage = self._download_webpage(url, video_id)
  164. scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
  165. feed_id = None
  166. # feed id usually locates in the last script.
  167. # Seems there's no pattern for the interested script filename, so
  168. # I try one by one
  169. for script in reversed(scripts):
  170. feed_script = self._download_webpage(
  171. self._proto_relative_url(script, 'http:'),
  172. video_id, 'Downloading feed script')
  173. feed_id = self._search_regex(
  174. r'defaultFeedId\s*:\s*"([^"]+)"', feed_script,
  175. 'default feed id', default=None)
  176. if feed_id is not None:
  177. break
  178. if feed_id is None:
  179. raise ExtractorError('Unable to find feed id')
  180. return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % (
  181. provider_id, feed_id, qs_dict['guid'][0]))
  182. if smuggled_data.get('force_smil_url', False):
  183. smil_url = url
  184. # Explicitly specified SMIL (see https://github.com/rg3/youtube-dl/issues/7385)
  185. elif '/guid/' in url:
  186. headers = {}
  187. source_url = smuggled_data.get('source_url')
  188. if source_url:
  189. headers['Referer'] = source_url
  190. request = sanitized_Request(url, headers=headers)
  191. webpage = self._download_webpage(request, video_id)
  192. smil_url = self._search_regex(
  193. r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml',
  194. webpage, 'smil url', group='url')
  195. path = self._search_regex(
  196. r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path')
  197. smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4'
  198. elif mobj.group('config'):
  199. config_url = url + '&form=json'
  200. config_url = config_url.replace('swf/', 'config/')
  201. config_url = config_url.replace('onsite/', 'onsite/config/')
  202. config = self._download_json(config_url, video_id, 'Downloading config')
  203. if 'releaseUrl' in config:
  204. release_url = config['releaseUrl']
  205. else:
  206. release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
  207. smil_url = release_url + '&formats=MPEG4&manifest=f4m'
  208. else:
  209. smil_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
  210. sig = smuggled_data.get('sig')
  211. if sig:
  212. smil_url = self._sign_url(smil_url, sig['key'], sig['secret'])
  213. formats, subtitles = self._extract_theplatform_smil(smil_url, video_id)
  214. self._sort_formats(formats)
  215. ret = self.get_metadata(path, video_id)
  216. combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles)
  217. ret.update({
  218. 'id': video_id,
  219. 'formats': formats,
  220. 'subtitles': combined_subtitles,
  221. })
  222. return ret
  223. class ThePlatformFeedIE(ThePlatformBaseIE):
  224. _URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&byGuid=%s'
  225. _VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*byGuid=(?P<id>[a-zA-Z0-9_]+)'
  226. _TEST = {
  227. # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
  228. 'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
  229. 'md5': '6e32495b5073ab414471b615c5ded394',
  230. 'info_dict': {
  231. 'id': 'n_hardball_5biden_140207',
  232. 'ext': 'mp4',
  233. 'title': 'The Biden factor: will Joe run in 2016?',
  234. 'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.',
  235. 'thumbnail': 're:^https?://.*\.jpg$',
  236. 'upload_date': '20140208',
  237. 'timestamp': 1391824260,
  238. 'duration': 467.0,
  239. 'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
  240. 'uploader': 'NBCU-NEWS',
  241. },
  242. }
  243. def _real_extract(self, url):
  244. mobj = re.match(self._VALID_URL, url)
  245. video_id = mobj.group('id')
  246. provider_id = mobj.group('provider_id')
  247. feed_id = mobj.group('feed_id')
  248. real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, video_id)
  249. feed = self._download_json(real_url, video_id)
  250. entry = feed['entries'][0]
  251. formats = []
  252. subtitles = {}
  253. first_video_id = None
  254. duration = None
  255. for item in entry['media$content']:
  256. smil_url = item['plfile$url'] + '&mbr=true'
  257. cur_video_id = ThePlatformIE._match_id(smil_url)
  258. if first_video_id is None:
  259. first_video_id = cur_video_id
  260. duration = float_or_none(item.get('plfile$duration'))
  261. cur_formats, cur_subtitles = self._extract_theplatform_smil(smil_url, video_id, 'Downloading SMIL data for %s' % cur_video_id)
  262. formats.extend(cur_formats)
  263. subtitles = self._merge_subtitles(subtitles, cur_subtitles)
  264. self._sort_formats(formats)
  265. thumbnails = [{
  266. 'url': thumbnail['plfile$url'],
  267. 'width': int_or_none(thumbnail.get('plfile$width')),
  268. 'height': int_or_none(thumbnail.get('plfile$height')),
  269. } for thumbnail in entry.get('media$thumbnails', [])]
  270. timestamp = int_or_none(entry.get('media$availableDate'), scale=1000)
  271. categories = [item['media$name'] for item in entry.get('media$categories', [])]
  272. ret = self.get_metadata('%s/%s' % (provider_id, first_video_id), video_id)
  273. subtitles = self._merge_subtitles(subtitles, ret['subtitles'])
  274. ret.update({
  275. 'id': video_id,
  276. 'formats': formats,
  277. 'subtitles': subtitles,
  278. 'thumbnails': thumbnails,
  279. 'duration': duration,
  280. 'timestamp': timestamp,
  281. 'categories': categories,
  282. })
  283. return ret