You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

231 lines
8.7 KiB

  1. import re
  2. import json
  3. import itertools
  4. from .common import InfoExtractor
  5. from .subtitles import SubtitlesInfoExtractor
  6. from ..utils import (
  7. compat_urllib_request,
  8. compat_str,
  9. get_element_by_attribute,
  10. get_element_by_id,
  11. orderedSet,
  12. str_to_int,
  13. ExtractorError,
  14. )
  15. class DailymotionBaseInfoExtractor(InfoExtractor):
  16. @staticmethod
  17. def _build_request(url):
  18. """Build a request with the family filter disabled"""
  19. request = compat_urllib_request.Request(url)
  20. request.add_header('Cookie', 'family_filter=off')
  21. request.add_header('Cookie', 'ff=off')
  22. return request
  23. class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
  24. """Information Extractor for Dailymotion"""
  25. _VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
  26. IE_NAME = u'dailymotion'
  27. _FORMATS = [
  28. (u'stream_h264_ld_url', u'ld'),
  29. (u'stream_h264_url', u'standard'),
  30. (u'stream_h264_hq_url', u'hq'),
  31. (u'stream_h264_hd_url', u'hd'),
  32. (u'stream_h264_hd1080_url', u'hd180'),
  33. ]
  34. _TESTS = [
  35. {
  36. u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
  37. u'file': u'x33vw9.mp4',
  38. u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
  39. u'info_dict': {
  40. u"uploader": u"Amphora Alex and Van .",
  41. u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
  42. }
  43. },
  44. # Vevo video
  45. {
  46. u'url': u'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
  47. u'file': u'USUV71301934.mp4',
  48. u'info_dict': {
  49. u'title': u'Roar (Official)',
  50. u'uploader': u'Katy Perry',
  51. u'upload_date': u'20130905',
  52. },
  53. u'params': {
  54. u'skip_download': True,
  55. },
  56. u'skip': u'VEVO is only available in some countries',
  57. },
  58. # age-restricted video
  59. {
  60. u'url': u'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
  61. u'file': u'xyh2zz.mp4',
  62. u'md5': u'0d667a7b9cebecc3c89ee93099c4159d',
  63. u'info_dict': {
  64. u'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
  65. u'uploader': 'HotWaves1012',
  66. u'age_limit': 18,
  67. }
  68. }
  69. ]
  70. def _real_extract(self, url):
  71. # Extract id and simplified title from URL
  72. mobj = re.match(self._VALID_URL, url)
  73. video_id = mobj.group('id')
  74. url = 'http://www.dailymotion.com/video/%s' % video_id
  75. # Retrieve video webpage to extract further information
  76. request = self._build_request(url)
  77. webpage = self._download_webpage(request, video_id)
  78. # Extract URL, uploader and title from webpage
  79. self.report_extraction(video_id)
  80. # It may just embed a vevo video:
  81. m_vevo = re.search(
  82. r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)',
  83. webpage)
  84. if m_vevo is not None:
  85. vevo_id = m_vevo.group('id')
  86. self.to_screen(u'Vevo video detected: %s' % vevo_id)
  87. return self.url_result(u'vevo:%s' % vevo_id, ie='Vevo')
  88. age_limit = self._rta_search(webpage)
  89. video_upload_date = None
  90. mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
  91. if mobj is not None:
  92. video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
  93. embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
  94. embed_page = self._download_webpage(embed_url, video_id,
  95. u'Downloading embed page')
  96. info = self._search_regex(r'var info = ({.*?}),$', embed_page,
  97. 'video info', flags=re.MULTILINE)
  98. info = json.loads(info)
  99. if info.get('error') is not None:
  100. msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
  101. raise ExtractorError(msg, expected=True)
  102. formats = []
  103. for (key, format_id) in self._FORMATS:
  104. video_url = info.get(key)
  105. if video_url is not None:
  106. m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
  107. if m_size is not None:
  108. width, height = m_size.group(1), m_size.group(2)
  109. else:
  110. width, height = None, None
  111. formats.append({
  112. 'url': video_url,
  113. 'ext': 'mp4',
  114. 'format_id': format_id,
  115. 'width': width,
  116. 'height': height,
  117. })
  118. if not formats:
  119. raise ExtractorError(u'Unable to extract video URL')
  120. # subtitles
  121. video_subtitles = self.extract_subtitles(video_id, webpage)
  122. if self._downloader.params.get('listsubtitles', False):
  123. self._list_available_subtitles(video_id, webpage)
  124. return
  125. view_count = self._search_regex(
  126. r'video_views_count[^>]+>\s+([\d\.,]+)', webpage, u'view count', fatal=False)
  127. if view_count is not None:
  128. view_count = str_to_int(view_count)
  129. return {
  130. 'id': video_id,
  131. 'formats': formats,
  132. 'uploader': info['owner_screenname'],
  133. 'upload_date': video_upload_date,
  134. 'title': self._og_search_title(webpage),
  135. 'subtitles': video_subtitles,
  136. 'thumbnail': info['thumbnail_url'],
  137. 'age_limit': age_limit,
  138. 'view_count': view_count,
  139. }
  140. def _get_available_subtitles(self, video_id, webpage):
  141. try:
  142. sub_list = self._download_webpage(
  143. 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
  144. video_id, note=False)
  145. except ExtractorError as err:
  146. self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
  147. return {}
  148. info = json.loads(sub_list)
  149. if (info['total'] > 0):
  150. sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
  151. return sub_lang_list
  152. self._downloader.report_warning(u'video doesn\'t have subtitles')
  153. return {}
  154. class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
  155. IE_NAME = u'dailymotion:playlist'
  156. _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
  157. _MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/playlist/.+?".*?>.*?</a>.*?</div>'
  158. _PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
  159. def _extract_entries(self, id):
  160. video_ids = []
  161. for pagenum in itertools.count(1):
  162. request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
  163. webpage = self._download_webpage(request,
  164. id, u'Downloading page %s' % pagenum)
  165. playlist_el = get_element_by_attribute(u'class', u'row video_list', webpage)
  166. video_ids.extend(re.findall(r'data-id="(.+?)"', playlist_el))
  167. if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
  168. break
  169. return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
  170. for video_id in orderedSet(video_ids)]
  171. def _real_extract(self, url):
  172. mobj = re.match(self._VALID_URL, url)
  173. playlist_id = mobj.group('id')
  174. webpage = self._download_webpage(url, playlist_id)
  175. return {'_type': 'playlist',
  176. 'id': playlist_id,
  177. 'title': get_element_by_id(u'playlist_name', webpage),
  178. 'entries': self._extract_entries(playlist_id),
  179. }
  180. class DailymotionUserIE(DailymotionPlaylistIE):
  181. IE_NAME = u'dailymotion:user'
  182. _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
  183. _MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/user/.+?".*?>.*?</a>.*?</div>'
  184. _PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
  185. def _real_extract(self, url):
  186. mobj = re.match(self._VALID_URL, url)
  187. user = mobj.group('user')
  188. webpage = self._download_webpage(url, user)
  189. full_user = self._html_search_regex(
  190. r'<a class="label" href="/%s".*?>(.*?)</' % re.escape(user),
  191. webpage, u'user', flags=re.DOTALL)
  192. return {
  193. '_type': 'playlist',
  194. 'id': user,
  195. 'title': full_user,
  196. 'entries': self._extract_entries(user),
  197. }