You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

174 lines
7.3 KiB

11 years ago
12 years ago
11 years ago
  1. import re
  2. import json
  3. import xml.etree.ElementTree
  4. from .common import InfoExtractor
  5. from ..utils import (
  6. ExtractorError,
  7. find_xpath_attr,
  8. unified_strdate,
  9. )
  10. class ArteTvIE(InfoExtractor):
  11. """
  12. There are two sources of video in arte.tv: videos.arte.tv and
  13. www.arte.tv/guide, the extraction process is different for each one.
  14. The videos expire in 7 days, so we can't add tests.
  15. """
  16. _EMISSION_URL = r'(?:http://)?www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
  17. _VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html'
  18. _LIVEWEB_URL = r'(?:http://)?liveweb.arte.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
  19. _LIVE_URL = r'index-[0-9]+\.html$'
  20. IE_NAME = u'arte.tv'
  21. @classmethod
  22. def suitable(cls, url):
  23. return any(re.match(regex, url) for regex in (cls._EMISSION_URL, cls._VIDEOS_URL, cls._LIVEWEB_URL))
  24. # TODO implement Live Stream
  25. # from ..utils import compat_urllib_parse
  26. # def extractLiveStream(self, url):
  27. # video_lang = url.split('/')[-4]
  28. # info = self.grep_webpage(
  29. # url,
  30. # r'src="(.*?/videothek_js.*?\.js)',
  31. # 0,
  32. # [
  33. # (1, 'url', u'Invalid URL: %s' % url)
  34. # ]
  35. # )
  36. # http_host = url.split('/')[2]
  37. # next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
  38. # info = self.grep_webpage(
  39. # next_url,
  40. # r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
  41. # '(http://.*?\.swf).*?' +
  42. # '(rtmp://.*?)\'',
  43. # re.DOTALL,
  44. # [
  45. # (1, 'path', u'could not extract video path: %s' % url),
  46. # (2, 'player', u'could not extract video player: %s' % url),
  47. # (3, 'url', u'could not extract video url: %s' % url)
  48. # ]
  49. # )
  50. # video_url = u'%s/%s' % (info.get('url'), info.get('path'))
  51. def _real_extract(self, url):
  52. mobj = re.match(self._EMISSION_URL, url)
  53. if mobj is not None:
  54. lang = mobj.group('lang')
  55. # This is not a real id, it can be for example AJT for the news
  56. # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
  57. video_id = mobj.group('id')
  58. return self._extract_emission(url, video_id, lang)
  59. mobj = re.match(self._VIDEOS_URL, url)
  60. if mobj is not None:
  61. id = mobj.group('id')
  62. lang = mobj.group('lang')
  63. return self._extract_video(url, id, lang)
  64. mobj = re.match(self._LIVEWEB_URL, url)
  65. if mobj is not None:
  66. name = mobj.group('name')
  67. lang = mobj.group('lang')
  68. return self._extract_liveweb(url, name, lang)
  69. if re.search(self._LIVE_URL, video_id) is not None:
  70. raise ExtractorError(u'Arte live streams are not yet supported, sorry')
  71. # self.extractLiveStream(url)
  72. # return
  73. def _extract_emission(self, url, video_id, lang):
  74. """Extract from www.arte.tv/guide"""
  75. webpage = self._download_webpage(url, video_id)
  76. json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url')
  77. json_info = self._download_webpage(json_url, video_id, 'Downloading info json')
  78. self.report_extraction(video_id)
  79. info = json.loads(json_info)
  80. player_info = info['videoJsonPlayer']
  81. info_dict = {'id': player_info['VID'],
  82. 'title': player_info['VTI'],
  83. 'description': player_info.get('VDE'),
  84. 'upload_date': unified_strdate(player_info['VDA'].split(' ')[0]),
  85. 'thumbnail': player_info['programImage'],
  86. 'ext': 'flv',
  87. }
  88. formats = player_info['VSR'].values()
  89. def _match_lang(f):
  90. # Return true if that format is in the language of the url
  91. if lang == 'fr':
  92. l = 'F'
  93. elif lang == 'de':
  94. l = 'A'
  95. regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
  96. return any(re.match(r, f['versionCode']) for r in regexes)
  97. # Some formats may not be in the same language as the url
  98. formats = filter(_match_lang, formats)
  99. # We order the formats by quality
  100. formats = sorted(formats, key=lambda f: int(f['height']))
  101. # Prefer videos without subtitles in the same language
  102. formats = sorted(formats, key=lambda f: re.match(r'VO(F|A)-STM\1', f['versionCode']) is None)
  103. # Pick the best quality
  104. format_info = formats[-1]
  105. if format_info['mediaType'] == u'rtmp':
  106. info_dict['url'] = format_info['streamer']
  107. info_dict['play_path'] = 'mp4:' + format_info['url']
  108. else:
  109. info_dict['url'] = format_info['url']
  110. return info_dict
  111. def _extract_video(self, url, video_id, lang):
  112. """Extract from videos.arte.tv"""
  113. ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
  114. ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
  115. ref_xml = self._download_webpage(ref_xml_url, video_id, note=u'Downloading metadata')
  116. ref_xml_doc = xml.etree.ElementTree.fromstring(ref_xml)
  117. config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang)
  118. config_xml_url = config_node.attrib['ref']
  119. config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
  120. video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
  121. def _key(m):
  122. quality = m.group('quality')
  123. if quality == 'hd':
  124. return 2
  125. else:
  126. return 1
  127. # We pick the best quality
  128. video_urls = sorted(video_urls, key=_key)
  129. video_url = list(video_urls)[-1].group('url')
  130. title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
  131. thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
  132. config_xml, 'thumbnail')
  133. return {'id': video_id,
  134. 'title': title,
  135. 'thumbnail': thumbnail,
  136. 'url': video_url,
  137. 'ext': 'flv',
  138. }
  139. def _extract_liveweb(self, url, name, lang):
  140. """Extract form http://liveweb.arte.tv/"""
  141. webpage = self._download_webpage(url, name)
  142. video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, u'event id')
  143. config_xml = self._download_webpage('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
  144. video_id, u'Downloading information')
  145. config_doc = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8'))
  146. event_doc = config_doc.find('event')
  147. url_node = event_doc.find('video').find('urlHd')
  148. if url_node is None:
  149. url_node = video_doc.find('urlSd')
  150. return {'id': video_id,
  151. 'title': event_doc.find('name%s' % lang.capitalize()).text,
  152. 'url': url_node.text.replace('MP4', 'mp4'),
  153. 'ext': 'flv',
  154. 'thumbnail': self._og_search_thumbnail(webpage),
  155. }