You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

230 lines
8.5 KiB

  1. import re
  2. import json
  3. import itertools
  4. from .common import InfoExtractor
  5. from .subtitles import SubtitlesInfoExtractor
  6. from ..utils import (
  7. compat_urllib_request,
  8. compat_str,
  9. get_element_by_attribute,
  10. get_element_by_id,
  11. orderedSet,
  12. str_to_int,
  13. int_or_none,
  14. ExtractorError,
  15. )
  16. class DailymotionBaseInfoExtractor(InfoExtractor):
  17. @staticmethod
  18. def _build_request(url):
  19. """Build a request with the family filter disabled"""
  20. request = compat_urllib_request.Request(url)
  21. request.add_header('Cookie', 'family_filter=off')
  22. request.add_header('Cookie', 'ff=off')
  23. return request
  24. class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
  25. """Information Extractor for Dailymotion"""
  26. _VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
  27. IE_NAME = u'dailymotion'
  28. _FORMATS = [
  29. (u'stream_h264_ld_url', u'ld'),
  30. (u'stream_h264_url', u'standard'),
  31. (u'stream_h264_hq_url', u'hq'),
  32. (u'stream_h264_hd_url', u'hd'),
  33. (u'stream_h264_hd1080_url', u'hd180'),
  34. ]
  35. _TESTS = [
  36. {
  37. u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
  38. u'file': u'x33vw9.mp4',
  39. u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
  40. u'info_dict': {
  41. u"uploader": u"Amphora Alex and Van .",
  42. u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
  43. }
  44. },
  45. # Vevo video
  46. {
  47. u'url': u'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
  48. u'file': u'USUV71301934.mp4',
  49. u'info_dict': {
  50. u'title': u'Roar (Official)',
  51. u'uploader': u'Katy Perry',
  52. u'upload_date': u'20130905',
  53. },
  54. u'params': {
  55. u'skip_download': True,
  56. },
  57. u'skip': u'VEVO is only available in some countries',
  58. },
  59. # age-restricted video
  60. {
  61. u'url': u'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
  62. u'file': u'xyh2zz.mp4',
  63. u'md5': u'0d667a7b9cebecc3c89ee93099c4159d',
  64. u'info_dict': {
  65. u'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
  66. u'uploader': 'HotWaves1012',
  67. u'age_limit': 18,
  68. }
  69. }
  70. ]
  71. def _real_extract(self, url):
  72. # Extract id and simplified title from URL
  73. mobj = re.match(self._VALID_URL, url)
  74. video_id = mobj.group('id')
  75. url = 'http://www.dailymotion.com/video/%s' % video_id
  76. # Retrieve video webpage to extract further information
  77. request = self._build_request(url)
  78. webpage = self._download_webpage(request, video_id)
  79. # Extract URL, uploader and title from webpage
  80. self.report_extraction(video_id)
  81. # It may just embed a vevo video:
  82. m_vevo = re.search(
  83. r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)',
  84. webpage)
  85. if m_vevo is not None:
  86. vevo_id = m_vevo.group('id')
  87. self.to_screen(u'Vevo video detected: %s' % vevo_id)
  88. return self.url_result(u'vevo:%s' % vevo_id, ie='Vevo')
  89. age_limit = self._rta_search(webpage)
  90. video_upload_date = None
  91. mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
  92. if mobj is not None:
  93. video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
  94. embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
  95. embed_page = self._download_webpage(embed_url, video_id,
  96. u'Downloading embed page')
  97. info = self._search_regex(r'var info = ({.*?}),$', embed_page,
  98. 'video info', flags=re.MULTILINE)
  99. info = json.loads(info)
  100. if info.get('error') is not None:
  101. msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
  102. raise ExtractorError(msg, expected=True)
  103. formats = []
  104. for (key, format_id) in self._FORMATS:
  105. video_url = info.get(key)
  106. if video_url is not None:
  107. m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
  108. if m_size is not None:
  109. width, height = map(int_or_none, (m_size.group(1), m_size.group(2)))
  110. else:
  111. width, height = None, None
  112. formats.append({
  113. 'url': video_url,
  114. 'ext': 'mp4',
  115. 'format_id': format_id,
  116. 'width': width,
  117. 'height': height,
  118. })
  119. if not formats:
  120. raise ExtractorError(u'Unable to extract video URL')
  121. # subtitles
  122. video_subtitles = self.extract_subtitles(video_id, webpage)
  123. if self._downloader.params.get('listsubtitles', False):
  124. self._list_available_subtitles(video_id, webpage)
  125. return
  126. view_count = self._search_regex(
  127. r'video_views_count[^>]+>\s+([\d\.,]+)', webpage, u'view count', fatal=False)
  128. if view_count is not None:
  129. view_count = str_to_int(view_count)
  130. return {
  131. 'id': video_id,
  132. 'formats': formats,
  133. 'uploader': info['owner_screenname'],
  134. 'upload_date': video_upload_date,
  135. 'title': self._og_search_title(webpage),
  136. 'subtitles': video_subtitles,
  137. 'thumbnail': info['thumbnail_url'],
  138. 'age_limit': age_limit,
  139. 'view_count': view_count,
  140. }
  141. def _get_available_subtitles(self, video_id, webpage):
  142. try:
  143. sub_list = self._download_webpage(
  144. 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
  145. video_id, note=False)
  146. except ExtractorError as err:
  147. self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
  148. return {}
  149. info = json.loads(sub_list)
  150. if (info['total'] > 0):
  151. sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
  152. return sub_lang_list
  153. self._downloader.report_warning(u'video doesn\'t have subtitles')
  154. return {}
  155. class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
  156. IE_NAME = u'dailymotion:playlist'
  157. _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
  158. _MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
  159. _PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
  160. def _extract_entries(self, id):
  161. video_ids = []
  162. for pagenum in itertools.count(1):
  163. request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
  164. webpage = self._download_webpage(request,
  165. id, u'Downloading page %s' % pagenum)
  166. video_ids.extend(re.findall(r'data-id="(.+?)"', webpage))
  167. if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
  168. break
  169. return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
  170. for video_id in orderedSet(video_ids)]
  171. def _real_extract(self, url):
  172. mobj = re.match(self._VALID_URL, url)
  173. playlist_id = mobj.group('id')
  174. webpage = self._download_webpage(url, playlist_id)
  175. return {'_type': 'playlist',
  176. 'id': playlist_id,
  177. 'title': get_element_by_id(u'playlist_name', webpage),
  178. 'entries': self._extract_entries(playlist_id),
  179. }
  180. class DailymotionUserIE(DailymotionPlaylistIE):
  181. IE_NAME = u'dailymotion:user'
  182. _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
  183. _PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
  184. def _real_extract(self, url):
  185. mobj = re.match(self._VALID_URL, url)
  186. user = mobj.group('user')
  187. webpage = self._download_webpage(url, user)
  188. full_user = self._html_search_regex(
  189. r'<a class="label" href="/%s".*?>(.*?)</' % re.escape(user),
  190. webpage, u'user', flags=re.DOTALL)
  191. return {
  192. '_type': 'playlist',
  193. 'id': user,
  194. 'title': full_user,
  195. 'entries': self._extract_entries(user),
  196. }