You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

167 lines
6.5 KiB

  1. import re
  2. import json
  3. import itertools
  4. from .common import InfoExtractor
  5. from .subtitles import SubtitlesInfoExtractor
  6. from ..utils import (
  7. compat_urllib_request,
  8. compat_str,
  9. get_element_by_attribute,
  10. get_element_by_id,
  11. ExtractorError,
  12. )
  13. class DailymotionIE(SubtitlesInfoExtractor):
  14. """Information Extractor for Dailymotion"""
  15. _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)'
  16. IE_NAME = u'dailymotion'
  17. _TEST = {
  18. u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
  19. u'file': u'x33vw9.mp4',
  20. u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
  21. u'info_dict': {
  22. u"uploader": u"Amphora Alex and Van .",
  23. u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
  24. }
  25. }
  26. def _real_extract(self, url):
  27. # Extract id and simplified title from URL
  28. mobj = re.match(self._VALID_URL, url)
  29. video_id = mobj.group(1).split('_')[0].split('?')[0]
  30. video_extension = 'mp4'
  31. url = 'http://www.dailymotion.com/video/%s' % video_id
  32. # Retrieve video webpage to extract further information
  33. request = compat_urllib_request.Request(url)
  34. request.add_header('Cookie', 'family_filter=off')
  35. webpage = self._download_webpage(request, video_id)
  36. # Extract URL, uploader and title from webpage
  37. self.report_extraction(video_id)
  38. video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
  39. # Looking for official user
  40. r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
  41. webpage, 'video uploader')
  42. video_upload_date = None
  43. mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
  44. if mobj is not None:
  45. video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
  46. embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
  47. embed_page = self._download_webpage(embed_url, video_id,
  48. u'Downloading embed page')
  49. info = self._search_regex(r'var info = ({.*?}),$', embed_page,
  50. 'video info', flags=re.MULTILINE)
  51. info = json.loads(info)
  52. if info.get('error') is not None:
  53. msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
  54. raise ExtractorError(msg, expected=True)
  55. # TODO: support choosing qualities
  56. for key in ['stream_h264_hd1080_url','stream_h264_hd_url',
  57. 'stream_h264_hq_url','stream_h264_url',
  58. 'stream_h264_ld_url']:
  59. if info.get(key):#key in info and info[key]:
  60. max_quality = key
  61. self.to_screen(u'Using %s' % key)
  62. break
  63. else:
  64. raise ExtractorError(u'Unable to extract video URL')
  65. video_url = info[max_quality]
  66. # subtitles
  67. video_subtitles = self.extract_subtitles(video_id)
  68. if self._downloader.params.get('listsubtitles', False):
  69. self._list_available_subtitles(video_id)
  70. return
  71. return [{
  72. 'id': video_id,
  73. 'url': video_url,
  74. 'uploader': video_uploader,
  75. 'upload_date': video_upload_date,
  76. 'title': self._og_search_title(webpage),
  77. 'ext': video_extension,
  78. 'subtitles': video_subtitles,
  79. 'thumbnail': info['thumbnail_url']
  80. }]
  81. def _get_available_subtitles(self, video_id):
  82. try:
  83. sub_list = self._download_webpage(
  84. 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
  85. video_id, note=False)
  86. except ExtractorError as err:
  87. self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
  88. return {}
  89. info = json.loads(sub_list)
  90. if (info['total'] > 0):
  91. sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
  92. return sub_lang_list
  93. self._downloader.report_warning(u'video doesn\'t have subtitles')
  94. return {}
  95. class DailymotionPlaylistIE(InfoExtractor):
  96. IE_NAME = u'dailymotion:playlist'
  97. _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
  98. _MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/playlist/.+?".*?>.*?</a>.*?</div>'
  99. _PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
  100. def _extract_entries(self, id):
  101. video_ids = []
  102. for pagenum in itertools.count(1):
  103. webpage = self._download_webpage(self._PAGE_TEMPLATE % (id, pagenum),
  104. id, u'Downloading page %s' % pagenum)
  105. playlist_el = get_element_by_attribute(u'class', u'video_list', webpage)
  106. video_ids.extend(re.findall(r'data-id="(.+?)" data-ext-id', playlist_el))
  107. if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
  108. break
  109. return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
  110. for video_id in video_ids]
  111. def _real_extract(self, url):
  112. mobj = re.match(self._VALID_URL, url)
  113. playlist_id = mobj.group('id')
  114. webpage = self._download_webpage(url, playlist_id)
  115. return {'_type': 'playlist',
  116. 'id': playlist_id,
  117. 'title': get_element_by_id(u'playlist_name', webpage),
  118. 'entries': self._extract_entries(playlist_id),
  119. }
  120. class DailymotionUserIE(DailymotionPlaylistIE):
  121. IE_NAME = u'dailymotion:user'
  122. _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
  123. _MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/user/.+?".*?>.*?</a>.*?</div>'
  124. _PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
  125. def _real_extract(self, url):
  126. mobj = re.match(self._VALID_URL, url)
  127. user = mobj.group('user')
  128. webpage = self._download_webpage(url, user)
  129. full_user = self._html_search_regex(
  130. r'<a class="label" href="/%s".*?>(.*?)</' % re.escape(user),
  131. webpage, u'user', flags=re.DOTALL)
  132. return {
  133. '_type': 'playlist',
  134. 'id': user,
  135. 'title': full_user,
  136. 'entries': self._extract_entries(user),
  137. }