You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

288 lines
11 KiB

  1. # encoding: utf-8
  2. from __future__ import unicode_literals
  3. import re
  4. import json
  5. import datetime
  6. from .common import InfoExtractor
  7. from ..compat import (
  8. compat_urllib_parse,
  9. compat_urllib_request,
  10. compat_urlparse,
  11. )
  12. from ..utils import (
  13. ExtractorError,
  14. int_or_none,
  15. parse_duration,
  16. parse_iso8601,
  17. xpath_text,
  18. determine_ext,
  19. )
  20. class NiconicoIE(InfoExtractor):
  21. IE_NAME = 'niconico'
  22. IE_DESC = 'ニコニコ動画'
  23. _TESTS = [{
  24. 'url': 'http://www.nicovideo.jp/watch/sm22312215',
  25. 'md5': 'd1a75c0823e2f629128c43e1212760f9',
  26. 'info_dict': {
  27. 'id': 'sm22312215',
  28. 'ext': 'mp4',
  29. 'title': 'Big Buck Bunny',
  30. 'uploader': 'takuya0301',
  31. 'uploader_id': '2698420',
  32. 'upload_date': '20131123',
  33. 'timestamp': 1385182762,
  34. 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
  35. 'duration': 33,
  36. },
  37. }, {
  38. # File downloaded with and without credentials are different, so omit
  39. # the md5 field
  40. 'url': 'http://www.nicovideo.jp/watch/nm14296458',
  41. 'info_dict': {
  42. 'id': 'nm14296458',
  43. 'ext': 'swf',
  44. 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
  45. 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
  46. 'uploader': 'りょうた',
  47. 'uploader_id': '18822557',
  48. 'upload_date': '20110429',
  49. 'timestamp': 1304065916,
  50. 'duration': 209,
  51. },
  52. }, {
  53. # 'video exists but is marked as "deleted"
  54. # md5 is unstable
  55. 'url': 'http://www.nicovideo.jp/watch/sm10000',
  56. 'info_dict': {
  57. 'id': 'sm10000',
  58. 'ext': 'unknown_video',
  59. 'description': 'deleted',
  60. 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
  61. 'upload_date': '20071224',
  62. 'timestamp': 1198527840, # timestamp field has different value if logged in
  63. 'duration': 304,
  64. },
  65. }, {
  66. 'url': 'http://www.nicovideo.jp/watch/so22543406',
  67. 'info_dict': {
  68. 'id': '1388129933',
  69. 'ext': 'mp4',
  70. 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
  71. 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
  72. 'timestamp': 1388851200,
  73. 'upload_date': '20140104',
  74. 'uploader': 'アニメロチャンネル',
  75. 'uploader_id': '312',
  76. }
  77. }]
  78. _VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
  79. _NETRC_MACHINE = 'niconico'
  80. # Determine whether the downloader used authentication to download video
  81. _AUTHENTICATED = False
  82. def _real_initialize(self):
  83. self._login()
  84. def _login(self):
  85. (username, password) = self._get_login_info()
  86. # No authentication to be performed
  87. if not username:
  88. return True
  89. # Log in
  90. login_form_strs = {
  91. 'mail': username,
  92. 'password': password,
  93. }
  94. # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
  95. # chokes on unicode
  96. login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
  97. login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8')
  98. request = compat_urllib_request.Request(
  99. 'https://secure.nicovideo.jp/secure/login', login_data)
  100. login_results = self._download_webpage(
  101. request, None, note='Logging in', errnote='Unable to log in')
  102. if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None:
  103. self._downloader.report_warning('unable to log in: bad username or password')
  104. return False
  105. # Successful login
  106. self._AUTHENTICATED = True
  107. return True
  108. def _real_extract(self, url):
  109. video_id = self._match_id(url)
  110. # Get video webpage. We are not actually interested in it for normal
  111. # cases, but need the cookies in order to be able to download the
  112. # info webpage
  113. webpage, handle = self._download_webpage_handle(
  114. 'http://www.nicovideo.jp/watch/' + video_id, video_id)
  115. if video_id.startswith('so'):
  116. video_id = self._match_id(handle.geturl())
  117. video_info = self._download_xml(
  118. 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id,
  119. note='Downloading video info page')
  120. if self._AUTHENTICATED:
  121. # Get flv info
  122. flv_info_webpage = self._download_webpage(
  123. 'http://flapi.nicovideo.jp/api/getflv/' + video_id + '?as3=1',
  124. video_id, 'Downloading flv info')
  125. else:
  126. # Get external player info
  127. ext_player_info = self._download_webpage(
  128. 'http://ext.nicovideo.jp/thumb_watch/' + video_id, video_id)
  129. thumb_play_key = self._search_regex(
  130. r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey')
  131. # Get flv info
  132. flv_info_data = compat_urllib_parse.urlencode({
  133. 'k': thumb_play_key,
  134. 'v': video_id
  135. })
  136. flv_info_request = compat_urllib_request.Request(
  137. 'http://ext.nicovideo.jp/thumb_watch', flv_info_data,
  138. {'Content-Type': 'application/x-www-form-urlencoded'})
  139. flv_info_webpage = self._download_webpage(
  140. flv_info_request, video_id,
  141. note='Downloading flv info', errnote='Unable to download flv info')
  142. flv_info = compat_urlparse.parse_qs(flv_info_webpage)
  143. if 'url' not in flv_info:
  144. if 'deleted' in flv_info:
  145. raise ExtractorError('The video has been deleted.',
  146. expected=True)
  147. else:
  148. raise ExtractorError('Unable to find video URL')
  149. video_real_url = flv_info['url'][0]
  150. # Start extracting information
  151. title = xpath_text(video_info, './/title')
  152. if not title:
  153. title = self._og_search_title(webpage, default=None)
  154. if not title:
  155. title = self._html_search_regex(
  156. r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
  157. webpage, 'video title')
  158. watch_api_data_string = self._html_search_regex(
  159. r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
  160. webpage, 'watch api data', default=None)
  161. watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
  162. video_detail = watch_api_data.get('videoDetail', {})
  163. extension = xpath_text(video_info, './/movie_type')
  164. if not extension:
  165. extension = determine_ext(video_real_url)
  166. video_format = extension.upper()
  167. thumbnail = (
  168. xpath_text(video_info, './/thumbnail_url') or
  169. self._html_search_meta('image', webpage, 'thumbnail', default=None) or
  170. video_detail.get('thumbnail'))
  171. description = xpath_text(video_info, './/description')
  172. timestamp = parse_iso8601(xpath_text(video_info, './/first_retrieve'))
  173. if not timestamp:
  174. match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
  175. if match:
  176. timestamp = parse_iso8601(match.replace('+', ':00+'))
  177. if not timestamp and video_detail.get('postedAt'):
  178. timestamp = parse_iso8601(
  179. video_detail['postedAt'].replace('/', '-'),
  180. delimiter=' ', timezone=datetime.timedelta(hours=9))
  181. view_count = int_or_none(xpath_text(video_info, './/view_counter'))
  182. if not view_count:
  183. match = self._html_search_regex(
  184. r'>Views: <strong[^>]*>([^<]+)</strong>',
  185. webpage, 'view count', default=None)
  186. if match:
  187. view_count = int_or_none(match.replace(',', ''))
  188. view_count = view_count or video_detail.get('viewCount')
  189. comment_count = int_or_none(xpath_text(video_info, './/comment_num'))
  190. if not comment_count:
  191. match = self._html_search_regex(
  192. r'>Comments: <strong[^>]*>([^<]+)</strong>',
  193. webpage, 'comment count', default=None)
  194. if match:
  195. comment_count = int_or_none(match.replace(',', ''))
  196. comment_count = comment_count or video_detail.get('commentCount')
  197. duration = (parse_duration(
  198. xpath_text(video_info, './/length') or
  199. self._html_search_meta(
  200. 'video:duration', webpage, 'video duration', default=None)) or
  201. video_detail.get('length'))
  202. webpage_url = xpath_text(video_info, './/watch_url') or url
  203. if video_info.find('.//ch_id') is not None:
  204. uploader_id = video_info.find('.//ch_id').text
  205. uploader = video_info.find('.//ch_name').text
  206. elif video_info.find('.//user_id') is not None:
  207. uploader_id = video_info.find('.//user_id').text
  208. uploader = video_info.find('.//user_nickname').text
  209. else:
  210. uploader_id = uploader = None
  211. return {
  212. 'id': video_id,
  213. 'url': video_real_url,
  214. 'title': title,
  215. 'ext': extension,
  216. 'format': video_format,
  217. 'thumbnail': thumbnail,
  218. 'description': description,
  219. 'uploader': uploader,
  220. 'timestamp': timestamp,
  221. 'uploader_id': uploader_id,
  222. 'view_count': view_count,
  223. 'comment_count': comment_count,
  224. 'duration': duration,
  225. 'webpage_url': webpage_url,
  226. }
  227. class NiconicoPlaylistIE(InfoExtractor):
  228. _VALID_URL = r'https?://www\.nicovideo\.jp/mylist/(?P<id>\d+)'
  229. _TEST = {
  230. 'url': 'http://www.nicovideo.jp/mylist/27411728',
  231. 'info_dict': {
  232. 'id': '27411728',
  233. 'title': 'AKB48のオールナイトニッポン',
  234. },
  235. 'playlist_mincount': 225,
  236. }
  237. def _real_extract(self, url):
  238. list_id = self._match_id(url)
  239. webpage = self._download_webpage(url, list_id)
  240. entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
  241. webpage, 'entries')
  242. entries = json.loads(entries_json)
  243. entries = [{
  244. '_type': 'url',
  245. 'ie_key': NiconicoIE.ie_key(),
  246. 'url': ('http://www.nicovideo.jp/watch/%s' %
  247. entry['item_data']['video_id']),
  248. } for entry in entries]
  249. return {
  250. '_type': 'playlist',
  251. 'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'),
  252. 'id': list_id,
  253. 'entries': entries,
  254. }