You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

172 lines
7.2 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import re
  4. from .common import InfoExtractor
  5. from ..compat import compat_str
  6. from ..utils import (
  7. xpath_text,
  8. int_or_none,
  9. determine_ext,
  10. parse_duration,
  11. xpath_attr,
  12. update_url_query,
  13. ExtractorError,
  14. )
  15. class TurnerBaseIE(InfoExtractor):
  16. def _extract_timestamp(self, video_data):
  17. return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
  18. def _extract_cvp_info(self, data_src, video_id, path_data={}):
  19. video_data = self._download_xml(data_src, video_id)
  20. video_id = video_data.attrib['id']
  21. title = xpath_text(video_data, 'headline', fatal=True)
  22. content_id = xpath_text(video_data, 'contentId') or video_id
  23. # rtmp_src = xpath_text(video_data, 'akamai/src')
  24. # if rtmp_src:
  25. # splited_rtmp_src = rtmp_src.split(',')
  26. # if len(splited_rtmp_src) == 2:
  27. # rtmp_src = splited_rtmp_src[1]
  28. # aifp = xpath_text(video_data, 'akamai/aifp', default='')
  29. tokens = {}
  30. urls = []
  31. formats = []
  32. rex = re.compile(
  33. r'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?')
  34. # Possible formats locations: files/file, files/groupFiles/files
  35. # and maybe others
  36. for video_file in video_data.findall('.//file'):
  37. video_url = video_file.text.strip()
  38. if not video_url:
  39. continue
  40. ext = determine_ext(video_url)
  41. if video_url.startswith('/mp4:protected/'):
  42. continue
  43. # TODO Correct extraction for these files
  44. # protected_path_data = path_data.get('protected')
  45. # if not protected_path_data or not rtmp_src:
  46. # continue
  47. # protected_path = self._search_regex(
  48. # r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path')
  49. # auth = self._download_webpage(
  50. # protected_path_data['tokenizer_src'], query={
  51. # 'path': protected_path,
  52. # 'videoId': content_id,
  53. # 'aifp': aifp,
  54. # })
  55. # token = xpath_text(auth, 'token')
  56. # if not token:
  57. # continue
  58. # video_url = rtmp_src + video_url + '?' + token
  59. elif video_url.startswith('/secure/'):
  60. secure_path_data = path_data.get('secure')
  61. if not secure_path_data:
  62. continue
  63. video_url = secure_path_data['media_src'] + video_url
  64. secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*'
  65. token = tokens.get(secure_path)
  66. if not token:
  67. auth = self._download_xml(
  68. secure_path_data['tokenizer_src'], video_id, query={
  69. 'path': secure_path,
  70. 'videoId': content_id,
  71. })
  72. error_msg = xpath_text(auth, 'error/msg')
  73. if error_msg:
  74. raise ExtractorError(error_msg, expected=True)
  75. token = xpath_text(auth, 'token')
  76. if not token:
  77. continue
  78. tokens[secure_path] = token
  79. video_url = video_url + '?hdnea=' + token
  80. elif not re.match('https?://', video_url):
  81. base_path_data = path_data.get(ext, path_data.get('default', {}))
  82. media_src = base_path_data.get('media_src')
  83. if not media_src:
  84. continue
  85. video_url = media_src + video_url
  86. if video_url in urls:
  87. continue
  88. urls.append(video_url)
  89. format_id = video_file.get('bitrate')
  90. if ext == 'smil':
  91. formats.extend(self._extract_smil_formats(
  92. video_url, video_id, fatal=False))
  93. elif ext == 'm3u8':
  94. formats.extend(self._extract_m3u8_formats(
  95. video_url, video_id, 'mp4',
  96. m3u8_id=format_id or 'hls', fatal=False))
  97. elif ext == 'f4m':
  98. formats.extend(self._extract_f4m_formats(
  99. update_url_query(video_url, {'hdcore': '3.7.0'}),
  100. video_id, f4m_id=format_id or 'hds', fatal=False))
  101. else:
  102. f = {
  103. 'format_id': format_id,
  104. 'url': video_url,
  105. 'ext': ext,
  106. }
  107. mobj = rex.search(format_id + video_url)
  108. if mobj:
  109. f.update({
  110. 'width': int(mobj.group('width')),
  111. 'height': int(mobj.group('height')),
  112. 'tbr': int_or_none(mobj.group('bitrate')),
  113. })
  114. elif isinstance(format_id, compat_str):
  115. if format_id.isdigit():
  116. f['tbr'] = int(format_id)
  117. else:
  118. mobj = re.match(r'ios_(audio|[0-9]+)$', format_id)
  119. if mobj:
  120. if mobj.group(1) == 'audio':
  121. f.update({
  122. 'vcodec': 'none',
  123. 'ext': 'm4a',
  124. })
  125. else:
  126. f['tbr'] = int(mobj.group(1))
  127. formats.append(f)
  128. self._sort_formats(formats)
  129. subtitles = {}
  130. for source in video_data.findall('closedCaptions/source'):
  131. for track in source.findall('track'):
  132. track_url = track.get('url')
  133. if not isinstance(track_url, compat_str) or track_url.endswith('/big'):
  134. continue
  135. lang = track.get('lang') or track.get('label') or 'en'
  136. subtitles.setdefault(lang, []).append({
  137. 'url': track_url,
  138. 'ext': {
  139. 'scc': 'scc',
  140. 'webvtt': 'vtt',
  141. 'smptett': 'tt',
  142. }.get(source.get('format'))
  143. })
  144. thumbnails = [{
  145. 'id': image.get('cut'),
  146. 'url': image.text,
  147. 'width': int_or_none(image.get('width')),
  148. 'height': int_or_none(image.get('height')),
  149. } for image in video_data.findall('images/image')]
  150. return {
  151. 'id': video_id,
  152. 'title': title,
  153. 'formats': formats,
  154. 'subtitles': subtitles,
  155. 'thumbnails': thumbnails,
  156. 'description': xpath_text(video_data, 'description'),
  157. 'duration': parse_duration(xpath_text(video_data, 'length') or xpath_text(video_data, 'trt')),
  158. 'timestamp': self._extract_timestamp(video_data),
  159. 'upload_date': xpath_attr(video_data, 'metas', 'version'),
  160. 'series': xpath_text(video_data, 'showTitle'),
  161. 'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')),
  162. 'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
  163. }