You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

223 lines
8.6 KiB

10 years ago
  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import hmac
  4. import hashlib
  5. import base64
  6. from .common import InfoExtractor
  7. from ..utils import (
  8. determine_ext,
  9. float_or_none,
  10. int_or_none,
  11. js_to_json,
  12. mimetype2ext,
  13. parse_iso8601,
  14. remove_start,
  15. )
  16. class NYTimesBaseIE(InfoExtractor):
  17. _SECRET = b'pX(2MbU2);4N{7J8)>YwKRJ+/pQ3JkiU2Q^V>mFYv6g6gYvt6v'
  18. def _extract_video_from_id(self, video_id):
  19. # Authorization generation algorithm is reverse engineered from `signer` in
  20. # http://graphics8.nytimes.com/video/vhs/vhs-2.x.min.js
  21. path = '/svc/video/api/v3/video/' + video_id
  22. hm = hmac.new(self._SECRET, (path + ':vhs').encode(), hashlib.sha512).hexdigest()
  23. video_data = self._download_json('http://www.nytimes.com' + path, video_id, 'Downloading video JSON', headers={
  24. 'Authorization': 'NYTV ' + base64.b64encode(hm.encode()).decode(),
  25. 'X-NYTV': 'vhs',
  26. }, fatal=False)
  27. if not video_data:
  28. video_data = self._download_json(
  29. 'http://www.nytimes.com/svc/video/api/v2/video/' + video_id,
  30. video_id, 'Downloading video JSON')
  31. title = video_data['headline']
  32. def get_file_size(file_size):
  33. if isinstance(file_size, int):
  34. return file_size
  35. elif isinstance(file_size, dict):
  36. return int(file_size.get('value', 0))
  37. else:
  38. return None
  39. urls = []
  40. formats = []
  41. for video in video_data.get('renditions', []):
  42. video_url = video.get('url')
  43. format_id = video.get('type')
  44. if not video_url or format_id == 'thumbs' or video_url in urls:
  45. continue
  46. urls.append(video_url)
  47. ext = mimetype2ext(video.get('mimetype')) or determine_ext(video_url)
  48. if ext == 'm3u8':
  49. formats.extend(self._extract_m3u8_formats(
  50. video_url, video_id, 'mp4', 'm3u8_native',
  51. m3u8_id=format_id or 'hls', fatal=False))
  52. elif ext == 'mpd':
  53. continue
  54. # formats.extend(self._extract_mpd_formats(
  55. # video_url, video_id, format_id or 'dash', fatal=False))
  56. else:
  57. formats.append({
  58. 'url': video_url,
  59. 'format_id': format_id,
  60. 'vcodec': video.get('videoencoding') or video.get('video_codec'),
  61. 'width': int_or_none(video.get('width')),
  62. 'height': int_or_none(video.get('height')),
  63. 'filesize': get_file_size(video.get('file_size') or video.get('fileSize')),
  64. 'tbr': int_or_none(video.get('bitrate'), 1000),
  65. 'ext': ext,
  66. })
  67. self._sort_formats(formats)
  68. thumbnails = []
  69. for image in video_data.get('images', []):
  70. image_url = image.get('url')
  71. if not image_url:
  72. continue
  73. thumbnails.append({
  74. 'url': 'http://www.nytimes.com/' + image_url,
  75. 'width': int_or_none(image.get('width')),
  76. 'height': int_or_none(image.get('height')),
  77. })
  78. publication_date = video_data.get('publication_date')
  79. timestamp = parse_iso8601(publication_date[:-8]) if publication_date else None
  80. return {
  81. 'id': video_id,
  82. 'title': title,
  83. 'description': video_data.get('summary'),
  84. 'timestamp': timestamp,
  85. 'uploader': video_data.get('byline'),
  86. 'duration': float_or_none(video_data.get('duration'), 1000),
  87. 'formats': formats,
  88. 'thumbnails': thumbnails,
  89. }
  90. class NYTimesIE(NYTimesBaseIE):
  91. _VALID_URL = r'https?://(?:(?:www\.)?nytimes\.com/video/(?:[^/]+/)+?|graphics8\.nytimes\.com/bcvideo/\d+(?:\.\d+)?/iframe/embed\.html\?videoId=)(?P<id>\d+)'
  92. _TESTS = [{
  93. 'url': 'http://www.nytimes.com/video/opinion/100000002847155/verbatim-what-is-a-photocopier.html?playlistId=100000001150263',
  94. 'md5': 'd665342765db043f7e225cff19df0f2d',
  95. 'info_dict': {
  96. 'id': '100000002847155',
  97. 'ext': 'mov',
  98. 'title': 'Verbatim: What Is a Photocopier?',
  99. 'description': 'md5:93603dada88ddbda9395632fdc5da260',
  100. 'timestamp': 1398631707,
  101. 'upload_date': '20140427',
  102. 'uploader': 'Brett Weiner',
  103. 'duration': 419,
  104. }
  105. }, {
  106. 'url': 'http://www.nytimes.com/video/travel/100000003550828/36-hours-in-dubai.html',
  107. 'only_matching': True,
  108. }]
  109. def _real_extract(self, url):
  110. video_id = self._match_id(url)
  111. return self._extract_video_from_id(video_id)
  112. class NYTimesArticleIE(NYTimesBaseIE):
  113. _VALID_URL = r'https?://(?:www\.)?nytimes\.com/(.(?<!video))*?/(?:[^/]+/)*(?P<id>[^.]+)(?:\.html)?'
  114. _TESTS = [{
  115. 'url': 'http://www.nytimes.com/2015/04/14/business/owner-of-gravity-payments-a-credit-card-processor-is-setting-a-new-minimum-wage-70000-a-year.html?_r=0',
  116. 'md5': 'e2076d58b4da18e6a001d53fd56db3c9',
  117. 'info_dict': {
  118. 'id': '100000003628438',
  119. 'ext': 'mov',
  120. 'title': 'New Minimum Wage: $70,000 a Year',
  121. 'description': 'Dan Price, C.E.O. of Gravity Payments, surprised his 120-person staff by announcing that he planned over the next three years to raise the salary of every employee to $70,000 a year.',
  122. 'timestamp': 1429033037,
  123. 'upload_date': '20150414',
  124. 'uploader': 'Matthew Williams',
  125. }
  126. }, {
  127. 'url': 'http://www.nytimes.com/2016/10/14/podcasts/revelations-from-the-final-weeks.html',
  128. 'md5': 'e0d52040cafb07662acf3c9132db3575',
  129. 'info_dict': {
  130. 'id': '100000004709062',
  131. 'title': 'The Run-Up: ‘He Was Like an Octopus’',
  132. 'ext': 'mp3',
  133. 'description': 'md5:fb5c6b93b12efc51649b4847fe066ee4',
  134. 'series': 'The Run-Up',
  135. 'episode': '‘He Was Like an Octopus’',
  136. 'episode_number': 20,
  137. 'duration': 2130,
  138. }
  139. }, {
  140. 'url': 'http://www.nytimes.com/2016/10/16/books/review/inside-the-new-york-times-book-review-the-rise-of-hitler.html',
  141. 'info_dict': {
  142. 'id': '100000004709479',
  143. 'title': 'The Rise of Hitler',
  144. 'ext': 'mp3',
  145. 'description': 'md5:bce877fd9e3444990cb141875fab0028',
  146. 'creator': 'Pamela Paul',
  147. 'duration': 3475,
  148. },
  149. 'params': {
  150. 'skip_download': True,
  151. },
  152. }, {
  153. 'url': 'http://www.nytimes.com/news/minute/2014/03/17/times-minute-whats-next-in-crimea/?_php=true&_type=blogs&_php=true&_type=blogs&_r=1',
  154. 'only_matching': True,
  155. }]
  156. def _extract_podcast_from_json(self, json, page_id, webpage):
  157. podcast_audio = self._parse_json(
  158. json, page_id, transform_source=js_to_json)
  159. audio_data = podcast_audio['data']
  160. track = audio_data['track']
  161. episode_title = track['title']
  162. video_url = track['source']
  163. description = track.get('description') or self._html_search_meta(
  164. ['og:description', 'twitter:description'], webpage)
  165. podcast_title = audio_data.get('podcast', {}).get('title')
  166. title = ('%s: %s' % (podcast_title, episode_title)
  167. if podcast_title else episode_title)
  168. episode = audio_data.get('podcast', {}).get('episode') or ''
  169. episode_number = int_or_none(self._search_regex(
  170. r'[Ee]pisode\s+(\d+)', episode, 'episode number', default=None))
  171. return {
  172. 'id': remove_start(podcast_audio.get('target'), 'FT') or page_id,
  173. 'url': video_url,
  174. 'title': title,
  175. 'description': description,
  176. 'creator': track.get('credit'),
  177. 'series': podcast_title,
  178. 'episode': episode_title,
  179. 'episode_number': episode_number,
  180. 'duration': int_or_none(track.get('duration')),
  181. }
  182. def _real_extract(self, url):
  183. page_id = self._match_id(url)
  184. webpage = self._download_webpage(url, page_id)
  185. video_id = self._search_regex(
  186. r'data-videoid=["\'](\d+)', webpage, 'video id',
  187. default=None, fatal=False)
  188. if video_id is not None:
  189. return self._extract_video_from_id(video_id)
  190. podcast_data = self._search_regex(
  191. (r'NYTD\.FlexTypes\.push\s*\(\s*({.+?})\s*\)\s*;\s*</script',
  192. r'NYTD\.FlexTypes\.push\s*\(\s*({.+})\s*\)\s*;'),
  193. webpage, 'podcast data')
  194. return self._extract_podcast_from_json(podcast_data, page_id, webpage)