You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

380 lines
13 KiB

8 years ago
  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import re
  4. from .common import InfoExtractor
  5. from ..compat import compat_str
  6. from ..utils import (
  7. determine_ext,
  8. dict_get,
  9. int_or_none,
  10. str_or_none,
  11. strip_or_none,
  12. try_get,
  13. )
  14. class SVTBaseIE(InfoExtractor):
  15. _GEO_COUNTRIES = ['SE']
  16. def _extract_video(self, video_info, video_id):
  17. is_live = dict_get(video_info, ('live', 'simulcast'), default=False)
  18. m3u8_protocol = 'm3u8' if is_live else 'm3u8_native'
  19. formats = []
  20. for vr in video_info['videoReferences']:
  21. player_type = vr.get('playerType') or vr.get('format')
  22. vurl = vr['url']
  23. ext = determine_ext(vurl)
  24. if ext == 'm3u8':
  25. formats.extend(self._extract_m3u8_formats(
  26. vurl, video_id,
  27. ext='mp4', entry_protocol=m3u8_protocol,
  28. m3u8_id=player_type, fatal=False))
  29. elif ext == 'f4m':
  30. formats.extend(self._extract_f4m_formats(
  31. vurl + '?hdcore=3.3.0', video_id,
  32. f4m_id=player_type, fatal=False))
  33. elif ext == 'mpd':
  34. if player_type == 'dashhbbtv':
  35. formats.extend(self._extract_mpd_formats(
  36. vurl, video_id, mpd_id=player_type, fatal=False))
  37. else:
  38. formats.append({
  39. 'format_id': player_type,
  40. 'url': vurl,
  41. })
  42. if not formats and video_info.get('rights', {}).get('geoBlockedSweden'):
  43. self.raise_geo_restricted(
  44. 'This video is only available in Sweden',
  45. countries=self._GEO_COUNTRIES)
  46. self._sort_formats(formats)
  47. subtitles = {}
  48. subtitle_references = dict_get(video_info, ('subtitles', 'subtitleReferences'))
  49. if isinstance(subtitle_references, list):
  50. for sr in subtitle_references:
  51. subtitle_url = sr.get('url')
  52. subtitle_lang = sr.get('language', 'sv')
  53. if subtitle_url:
  54. if determine_ext(subtitle_url) == 'm3u8':
  55. # TODO(yan12125): handle WebVTT in m3u8 manifests
  56. continue
  57. subtitles.setdefault(subtitle_lang, []).append({'url': subtitle_url})
  58. title = video_info.get('title')
  59. series = video_info.get('programTitle')
  60. season_number = int_or_none(video_info.get('season'))
  61. episode = video_info.get('episodeTitle')
  62. episode_number = int_or_none(video_info.get('episodeNumber'))
  63. duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration')))
  64. age_limit = None
  65. adult = dict_get(
  66. video_info, ('inappropriateForChildren', 'blockedForChildren'),
  67. skip_false_values=False)
  68. if adult is not None:
  69. age_limit = 18 if adult else 0
  70. return {
  71. 'id': video_id,
  72. 'title': title,
  73. 'formats': formats,
  74. 'subtitles': subtitles,
  75. 'duration': duration,
  76. 'age_limit': age_limit,
  77. 'series': series,
  78. 'season_number': season_number,
  79. 'episode': episode,
  80. 'episode_number': episode_number,
  81. 'is_live': is_live,
  82. }
  83. class SVTIE(SVTBaseIE):
  84. _VALID_URL = r'https?://(?:www\.)?svt\.se/wd\?(?:.*?&)?widgetId=(?P<widget_id>\d+)&.*?\barticleId=(?P<id>\d+)'
  85. _TEST = {
  86. 'url': 'http://www.svt.se/wd?widgetId=23991&sectionId=541&articleId=2900353&type=embed&contextSectionId=123&autostart=false',
  87. 'md5': '33e9a5d8f646523ce0868ecfb0eed77d',
  88. 'info_dict': {
  89. 'id': '2900353',
  90. 'ext': 'mp4',
  91. 'title': 'Stjärnorna skojar till det - under SVT-intervjun',
  92. 'duration': 27,
  93. 'age_limit': 0,
  94. },
  95. }
  96. @staticmethod
  97. def _extract_url(webpage):
  98. mobj = re.search(
  99. r'(?:<iframe src|href)="(?P<url>%s[^"]*)"' % SVTIE._VALID_URL, webpage)
  100. if mobj:
  101. return mobj.group('url')
  102. def _real_extract(self, url):
  103. mobj = re.match(self._VALID_URL, url)
  104. widget_id = mobj.group('widget_id')
  105. article_id = mobj.group('id')
  106. info = self._download_json(
  107. 'http://www.svt.se/wd?widgetId=%s&articleId=%s&format=json&type=embed&output=json' % (widget_id, article_id),
  108. article_id)
  109. info_dict = self._extract_video(info['video'], article_id)
  110. info_dict['title'] = info['context']['title']
  111. return info_dict
  112. class SVTPlayBaseIE(SVTBaseIE):
  113. _SVTPLAY_RE = r'root\s*\[\s*(["\'])_*svtplay\1\s*\]\s*=\s*(?P<json>{.+?})\s*;\s*\n'
  114. class SVTPlayIE(SVTPlayBaseIE):
  115. IE_DESC = 'SVT Play and Öppet arkiv'
  116. _VALID_URL = r'''(?x)
  117. (?:
  118. svt:(?P<svt_id>[^/?#&]+)|
  119. https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp|kanaler)/(?P<id>[^/?#&]+)
  120. )
  121. '''
  122. _TESTS = [{
  123. 'url': 'http://www.svtplay.se/video/5996901/flygplan-till-haile-selassie/flygplan-till-haile-selassie-2',
  124. 'md5': '2b6704fe4a28801e1a098bbf3c5ac611',
  125. 'info_dict': {
  126. 'id': '5996901',
  127. 'ext': 'mp4',
  128. 'title': 'Flygplan till Haile Selassie',
  129. 'duration': 3527,
  130. 'thumbnail': r're:^https?://.*[\.-]jpg$',
  131. 'age_limit': 0,
  132. 'subtitles': {
  133. 'sv': [{
  134. 'ext': 'wsrt',
  135. }]
  136. },
  137. },
  138. }, {
  139. # geo restricted to Sweden
  140. 'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten',
  141. 'only_matching': True,
  142. }, {
  143. 'url': 'http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg',
  144. 'only_matching': True,
  145. }, {
  146. 'url': 'https://www.svtplay.se/kanaler/svt1',
  147. 'only_matching': True,
  148. }, {
  149. 'url': 'svt:1376446-003A',
  150. 'only_matching': True,
  151. }, {
  152. 'url': 'svt:14278044',
  153. 'only_matching': True,
  154. }]
  155. def _adjust_title(self, info):
  156. if info['is_live']:
  157. info['title'] = self._live_title(info['title'])
  158. def _extract_by_video_id(self, video_id, webpage=None):
  159. data = self._download_json(
  160. 'https://api.svt.se/videoplayer-api/video/%s' % video_id,
  161. video_id, headers=self.geo_verification_headers())
  162. info_dict = self._extract_video(data, video_id)
  163. if not info_dict.get('title'):
  164. title = dict_get(info_dict, ('episode', 'series'))
  165. if not title and webpage:
  166. title = re.sub(
  167. r'\s*\|\s*.+?$', '', self._og_search_title(webpage))
  168. if not title:
  169. title = video_id
  170. info_dict['title'] = title
  171. self._adjust_title(info_dict)
  172. return info_dict
  173. def _real_extract(self, url):
  174. mobj = re.match(self._VALID_URL, url)
  175. video_id, svt_id = mobj.group('id', 'svt_id')
  176. if svt_id:
  177. return self._extract_by_video_id(svt_id)
  178. webpage = self._download_webpage(url, video_id)
  179. data = self._parse_json(
  180. self._search_regex(
  181. self._SVTPLAY_RE, webpage, 'embedded data', default='{}',
  182. group='json'),
  183. video_id, fatal=False)
  184. thumbnail = self._og_search_thumbnail(webpage)
  185. if data:
  186. video_info = try_get(
  187. data, lambda x: x['context']['dispatcher']['stores']['VideoTitlePageStore']['data']['video'],
  188. dict)
  189. if video_info:
  190. info_dict = self._extract_video(video_info, video_id)
  191. info_dict.update({
  192. 'title': data['context']['dispatcher']['stores']['MetaStore']['title'],
  193. 'thumbnail': thumbnail,
  194. })
  195. self._adjust_title(info_dict)
  196. return info_dict
  197. svt_id = self._search_regex(
  198. r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
  199. webpage, 'video id')
  200. return self._extract_by_video_id(svt_id, webpage)
  201. class SVTSeriesIE(SVTPlayBaseIE):
  202. _VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)(?:.+?\btab=(?P<season_slug>[^&#]+))?'
  203. _TESTS = [{
  204. 'url': 'https://www.svtplay.se/rederiet',
  205. 'info_dict': {
  206. 'id': '14445680',
  207. 'title': 'Rederiet',
  208. 'description': 'md5:d9fdfff17f5d8f73468176ecd2836039',
  209. },
  210. 'playlist_mincount': 318,
  211. }, {
  212. 'url': 'https://www.svtplay.se/rederiet?tab=season-2-14445680',
  213. 'info_dict': {
  214. 'id': 'season-2-14445680',
  215. 'title': 'Rederiet - Säsong 2',
  216. 'description': 'md5:d9fdfff17f5d8f73468176ecd2836039',
  217. },
  218. 'playlist_mincount': 12,
  219. }]
  220. @classmethod
  221. def suitable(cls, url):
  222. return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTSeriesIE, cls).suitable(url)
  223. def _real_extract(self, url):
  224. series_slug, season_id = re.match(self._VALID_URL, url).groups()
  225. series = self._download_json(
  226. 'https://api.svt.se/contento/graphql', series_slug,
  227. 'Downloading series page', query={
  228. 'query': '''{
  229. listablesBySlug(slugs: ["%s"]) {
  230. associatedContent(include: [productionPeriod, season]) {
  231. items {
  232. item {
  233. ... on Episode {
  234. videoSvtId
  235. }
  236. }
  237. }
  238. id
  239. name
  240. }
  241. id
  242. longDescription
  243. name
  244. shortDescription
  245. }
  246. }''' % series_slug,
  247. })['data']['listablesBySlug'][0]
  248. season_name = None
  249. entries = []
  250. for season in series['associatedContent']:
  251. if not isinstance(season, dict):
  252. continue
  253. if season_id:
  254. if season.get('id') != season_id:
  255. continue
  256. season_name = season.get('name')
  257. items = season.get('items')
  258. if not isinstance(items, list):
  259. continue
  260. for item in items:
  261. video = item.get('item') or {}
  262. content_id = video.get('videoSvtId')
  263. if not content_id or not isinstance(content_id, compat_str):
  264. continue
  265. entries.append(self.url_result(
  266. 'svt:' + content_id, SVTPlayIE.ie_key(), content_id))
  267. title = series.get('name')
  268. season_name = season_name or season_id
  269. if title and season_name:
  270. title = '%s - %s' % (title, season_name)
  271. elif season_id:
  272. title = season_id
  273. return self.playlist_result(
  274. entries, season_id or series.get('id'), title,
  275. dict_get(series, ('longDescription', 'shortDescription')))
  276. class SVTPageIE(InfoExtractor):
  277. _VALID_URL = r'https?://(?:www\.)?svt\.se/(?P<path>(?:[^/]+/)*(?P<id>[^/?&#]+))'
  278. _TESTS = [{
  279. 'url': 'https://www.svt.se/sport/ishockey/bakom-masken-lehners-kamp-mot-mental-ohalsa',
  280. 'info_dict': {
  281. 'id': '25298267',
  282. 'title': 'Bakom masken – Lehners kamp mot mental ohälsa',
  283. },
  284. 'playlist_count': 4,
  285. }, {
  286. 'url': 'https://www.svt.se/nyheter/utrikes/svenska-andrea-ar-en-mil-fran-branderna-i-kalifornien',
  287. 'info_dict': {
  288. 'id': '24243746',
  289. 'title': 'Svenska Andrea redo att fly sitt hem i Kalifornien',
  290. },
  291. 'playlist_count': 2,
  292. }, {
  293. # only programTitle
  294. 'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun',
  295. 'info_dict': {
  296. 'id': '8439V2K',
  297. 'ext': 'mp4',
  298. 'title': 'Stjärnorna skojar till det - under SVT-intervjun',
  299. 'duration': 27,
  300. 'age_limit': 0,
  301. },
  302. }, {
  303. 'url': 'https://www.svt.se/nyheter/lokalt/vast/svt-testar-tar-nagon-upp-skrapet-1',
  304. 'only_matching': True,
  305. }, {
  306. 'url': 'https://www.svt.se/vader/manadskronikor/maj2018',
  307. 'only_matching': True,
  308. }]
  309. @classmethod
  310. def suitable(cls, url):
  311. return False if SVTIE.suitable(url) else super(SVTPageIE, cls).suitable(url)
  312. def _real_extract(self, url):
  313. path, display_id = re.match(self._VALID_URL, url).groups()
  314. article = self._download_json(
  315. 'https://api.svt.se/nss-api/page/' + path, display_id,
  316. query={'q': 'articles'})['articles']['content'][0]
  317. entries = []
  318. def _process_content(content):
  319. if content.get('_type') in ('VIDEOCLIP', 'VIDEOEPISODE'):
  320. video_id = compat_str(content['image']['svtId'])
  321. entries.append(self.url_result(
  322. 'svt:' + video_id, SVTPlayIE.ie_key(), video_id))
  323. for media in article.get('media', []):
  324. _process_content(media)
  325. for obj in article.get('structuredBody', []):
  326. _process_content(obj.get('content') or {})
  327. return self.playlist_result(
  328. entries, str_or_none(article.get('id')),
  329. strip_or_none(article.get('title')))