You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

129 lines
4.4 KiB

  1. # encoding: utf-8
  2. import re
  3. import xml.etree.ElementTree
  4. import json
  5. from .common import InfoExtractor
  6. from ..utils import (
  7. compat_urlparse,
  8. )
  9. class FranceTVBaseInfoExtractor(InfoExtractor):
  10. def _extract_video(self, video_id):
  11. xml_desc = self._download_webpage(
  12. 'http://www.francetvinfo.fr/appftv/webservices/video/'
  13. 'getInfosOeuvre.php?id-diffusion='
  14. + video_id, video_id, 'Downloading XML config')
  15. info = xml.etree.ElementTree.fromstring(xml_desc.encode('utf-8'))
  16. manifest_url = info.find('videos/video/url').text
  17. video_url = manifest_url.replace('manifest.f4m', 'index_2_av.m3u8')
  18. video_url = video_url.replace('/z/', '/i/')
  19. thumbnail_path = info.find('image').text
  20. return {'id': video_id,
  21. 'ext': 'mp4',
  22. 'url': video_url,
  23. 'title': info.find('titre').text,
  24. 'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', thumbnail_path),
  25. 'description': info.find('synopsis').text,
  26. }
  27. class PluzzIE(FranceTVBaseInfoExtractor):
  28. IE_NAME = u'pluzz.francetv.fr'
  29. _VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html'
  30. # Can't use tests, videos expire in 7 days
  31. def _real_extract(self, url):
  32. title = re.match(self._VALID_URL, url).group(1)
  33. webpage = self._download_webpage(url, title)
  34. video_id = self._search_regex(
  35. r'data-diffusion="(\d+)"', webpage, 'ID')
  36. return self._extract_video(video_id)
  37. class FranceTvInfoIE(FranceTVBaseInfoExtractor):
  38. IE_NAME = u'francetvinfo.fr'
  39. _VALID_URL = r'https?://www\.francetvinfo\.fr/replay.*/(?P<title>.+).html'
  40. _TEST = {
  41. u'url': u'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
  42. u'file': u'84981923.mp4',
  43. u'info_dict': {
  44. u'title': u'Soir 3',
  45. },
  46. u'params': {
  47. u'skip_download': True,
  48. },
  49. }
  50. def _real_extract(self, url):
  51. mobj = re.match(self._VALID_URL, url)
  52. page_title = mobj.group('title')
  53. webpage = self._download_webpage(url, page_title)
  54. video_id = self._search_regex(r'id-video=(\d+?)"', webpage, u'video id')
  55. return self._extract_video(video_id)
  56. class France2IE(FranceTVBaseInfoExtractor):
  57. IE_NAME = u'france2.fr'
  58. _VALID_URL = r'''(?x)https?://www\.france2\.fr/
  59. (?:
  60. emissions/.*?/videos/(?P<id>\d+)
  61. | emission/(?P<key>[^/?]+)
  62. )'''
  63. _TEST = {
  64. u'url': u'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104',
  65. u'file': u'75540104.mp4',
  66. u'info_dict': {
  67. u'title': u'13h15, le samedi...',
  68. u'description': u'md5:2e5b58ba7a2d3692b35c792be081a03d',
  69. },
  70. u'params': {
  71. u'skip_download': True,
  72. },
  73. }
  74. def _real_extract(self, url):
  75. mobj = re.match(self._VALID_URL, url)
  76. if mobj.group('key'):
  77. webpage = self._download_webpage(url, mobj.group('key'))
  78. video_id = self._html_search_regex(
  79. r'''(?x)<div\s+class="video-player">\s*
  80. <a\s+href="http://videos.francetv.fr/video/([0-9]+)"\s+
  81. class="francetv-video-player">''',
  82. webpage, u'video ID')
  83. else:
  84. video_id = mobj.group('id')
  85. return self._extract_video(video_id)
  86. class GenerationQuoiIE(InfoExtractor):
  87. IE_NAME = u'france2.fr:generation-quoi'
  88. _VALID_URL = r'https?://generation-quoi\.france2\.fr/portrait/(?P<name>.*)(\?|$)'
  89. _TEST = {
  90. u'url': u'http://generation-quoi.france2.fr/portrait/garde-a-vous',
  91. u'file': u'k7FJX8VBcvvLmX4wA5Q.mp4',
  92. u'info_dict': {
  93. u'title': u'Génération Quoi - Garde à Vous',
  94. u'uploader': u'Génération Quoi',
  95. },
  96. u'params': {
  97. # It uses Dailymotion
  98. u'skip_download': True,
  99. },
  100. }
  101. def _real_extract(self, url):
  102. mobj = re.match(self._VALID_URL, url)
  103. name = mobj.group('name')
  104. info_url = compat_urlparse.urljoin(url, '/medias/video/%s.json' % name)
  105. info_json = self._download_webpage(info_url, name)
  106. info = json.loads(info_json)
  107. return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
  108. ie='Dailymotion')