You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

287 lines
11 KiB

  1. # encoding: utf-8
  2. from __future__ import unicode_literals
  3. import re
  4. import json
  5. import datetime
  6. from .common import InfoExtractor
  7. from ..compat import (
  8. compat_urllib_parse,
  9. compat_urllib_request,
  10. compat_urlparse,
  11. )
  12. from ..utils import (
  13. ExtractorError,
  14. int_or_none,
  15. parse_duration,
  16. parse_iso8601,
  17. xpath_text,
  18. determine_ext,
  19. )
  20. class NiconicoIE(InfoExtractor):
  21. IE_NAME = 'niconico'
  22. IE_DESC = 'ニコニコ動画'
  23. _TESTS = [{
  24. 'url': 'http://www.nicovideo.jp/watch/sm22312215',
  25. 'md5': 'd1a75c0823e2f629128c43e1212760f9',
  26. 'info_dict': {
  27. 'id': 'sm22312215',
  28. 'ext': 'mp4',
  29. 'title': 'Big Buck Bunny',
  30. 'uploader': 'takuya0301',
  31. 'uploader_id': '2698420',
  32. 'upload_date': '20131123',
  33. 'timestamp': 1385182762,
  34. 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
  35. 'duration': 33,
  36. },
  37. }, {
  38. # File downloaded with and without credentials are different, so omit
  39. # the md5 field
  40. 'url': 'http://www.nicovideo.jp/watch/nm14296458',
  41. 'info_dict': {
  42. 'id': 'nm14296458',
  43. 'ext': 'swf',
  44. 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
  45. 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
  46. 'uploader': 'りょうた',
  47. 'uploader_id': '18822557',
  48. 'upload_date': '20110429',
  49. 'timestamp': 1304065916,
  50. 'duration': 209,
  51. },
  52. }, {
  53. # 'video exists but is marked as "deleted"
  54. # md5 is unstable
  55. 'url': 'http://www.nicovideo.jp/watch/sm10000',
  56. 'info_dict': {
  57. 'id': 'sm10000',
  58. 'ext': 'unknown_video',
  59. 'description': 'deleted',
  60. 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
  61. 'upload_date': '20071224',
  62. 'timestamp': 1198527840, # timestamp field has different value if logged in
  63. 'duration': 304,
  64. },
  65. }, {
  66. 'url': 'http://www.nicovideo.jp/watch/so22543406',
  67. 'info_dict': {
  68. 'id': '1388129933',
  69. 'ext': 'mp4',
  70. 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
  71. 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
  72. 'timestamp': 1388851200,
  73. 'upload_date': '20140104',
  74. 'uploader': 'アニメロチャンネル',
  75. 'uploader_id': '312',
  76. }
  77. }]
  78. _VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
  79. _NETRC_MACHINE = 'niconico'
  80. # Determine whether the downloader used authentication to download video
  81. _AUTHENTICATED = False
  82. def _real_initialize(self):
  83. self._login()
  84. def _login(self):
  85. (username, password) = self._get_login_info()
  86. # No authentication to be performed
  87. if not username:
  88. return True
  89. # Log in
  90. login_form_strs = {
  91. 'mail': username,
  92. 'password': password,
  93. }
  94. # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
  95. # chokes on unicode
  96. login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
  97. login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8')
  98. request = compat_urllib_request.Request(
  99. 'https://secure.nicovideo.jp/secure/login', login_data)
  100. login_results = self._download_webpage(
  101. request, None, note='Logging in', errnote='Unable to log in')
  102. if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None:
  103. self._downloader.report_warning('unable to log in: bad username or password')
  104. return False
  105. # Successful login
  106. self._AUTHENTICATED = True
  107. return True
  108. def _real_extract(self, url):
  109. video_id = self._match_id(url)
  110. # Get video webpage. We are not actually interested in it for normal
  111. # cases, but need the cookies in order to be able to download the
  112. # info webpage
  113. webpage, handle = self._download_webpage_handle(
  114. 'http://www.nicovideo.jp/watch/' + video_id, video_id)
  115. if video_id.startswith('so'):
  116. video_id = self._match_id(handle.geturl())
  117. video_info = self._download_xml(
  118. 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id,
  119. note='Downloading video info page')
  120. if self._AUTHENTICATED:
  121. # Get flv info
  122. flv_info_webpage = self._download_webpage(
  123. 'http://flapi.nicovideo.jp/api/getflv/' + video_id + '?as3=1',
  124. video_id, 'Downloading flv info')
  125. else:
  126. # Get external player info
  127. ext_player_info = self._download_webpage(
  128. 'http://ext.nicovideo.jp/thumb_watch/' + video_id, video_id)
  129. thumb_play_key = self._search_regex(
  130. r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey')
  131. # Get flv info
  132. flv_info_data = compat_urllib_parse.urlencode({
  133. 'k': thumb_play_key,
  134. 'v': video_id
  135. })
  136. flv_info_request = compat_urllib_request.Request(
  137. 'http://ext.nicovideo.jp/thumb_watch', flv_info_data,
  138. {'Content-Type': 'application/x-www-form-urlencoded'})
  139. flv_info_webpage = self._download_webpage(
  140. flv_info_request, video_id,
  141. note='Downloading flv info', errnote='Unable to download flv info')
  142. flv_info = compat_urlparse.parse_qs(flv_info_webpage)
  143. if 'url' not in flv_info:
  144. if 'deleted' in flv_info:
  145. raise ExtractorError('The video has been deleted.',
  146. expected=True)
  147. else:
  148. raise ExtractorError('Unable to find video URL')
  149. video_real_url = flv_info['url'][0]
  150. # Start extracting information
  151. title = xpath_text(video_info, './/title')
  152. if not title:
  153. title = self._og_search_title(webpage, default=None)
  154. if not title:
  155. title = self._html_search_regex(
  156. r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
  157. webpage, 'video title')
  158. watch_api_data_string = self._html_search_regex(
  159. r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
  160. webpage, 'watch api data', default=None)
  161. watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
  162. video_detail = watch_api_data.get('videoDetail', {})
  163. extension = xpath_text(video_info, './/movie_type')
  164. if not extension:
  165. extension = determine_ext(video_real_url)
  166. thumbnail = (
  167. xpath_text(video_info, './/thumbnail_url') or
  168. self._html_search_meta('image', webpage, 'thumbnail', default=None) or
  169. video_detail.get('thumbnail'))
  170. description = xpath_text(video_info, './/description')
  171. timestamp = parse_iso8601(xpath_text(video_info, './/first_retrieve'))
  172. if not timestamp:
  173. match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
  174. if match:
  175. timestamp = parse_iso8601(match.replace('+', ':00+'))
  176. if not timestamp and video_detail.get('postedAt'):
  177. timestamp = parse_iso8601(
  178. video_detail['postedAt'].replace('/', '-'),
  179. delimiter=' ', timezone=datetime.timedelta(hours=9))
  180. view_count = int_or_none(xpath_text(video_info, './/view_counter'))
  181. if not view_count:
  182. match = self._html_search_regex(
  183. r'>Views: <strong[^>]*>([^<]+)</strong>',
  184. webpage, 'view count', default=None)
  185. if match:
  186. view_count = int_or_none(match.replace(',', ''))
  187. view_count = view_count or video_detail.get('viewCount')
  188. comment_count = int_or_none(xpath_text(video_info, './/comment_num'))
  189. if not comment_count:
  190. match = self._html_search_regex(
  191. r'>Comments: <strong[^>]*>([^<]+)</strong>',
  192. webpage, 'comment count', default=None)
  193. if match:
  194. comment_count = int_or_none(match.replace(',', ''))
  195. comment_count = comment_count or video_detail.get('commentCount')
  196. duration = (parse_duration(
  197. xpath_text(video_info, './/length') or
  198. self._html_search_meta(
  199. 'video:duration', webpage, 'video duration', default=None)) or
  200. video_detail.get('length'))
  201. webpage_url = xpath_text(video_info, './/watch_url') or url
  202. if video_info.find('.//ch_id') is not None:
  203. uploader_id = video_info.find('.//ch_id').text
  204. uploader = video_info.find('.//ch_name').text
  205. elif video_info.find('.//user_id') is not None:
  206. uploader_id = video_info.find('.//user_id').text
  207. uploader = video_info.find('.//user_nickname').text
  208. else:
  209. uploader_id = uploader = None
  210. return {
  211. 'id': video_id,
  212. 'url': video_real_url,
  213. 'title': title,
  214. 'ext': extension,
  215. 'format_id': 'economy' if video_real_url.endswith('low') else 'normal',
  216. 'thumbnail': thumbnail,
  217. 'description': description,
  218. 'uploader': uploader,
  219. 'timestamp': timestamp,
  220. 'uploader_id': uploader_id,
  221. 'view_count': view_count,
  222. 'comment_count': comment_count,
  223. 'duration': duration,
  224. 'webpage_url': webpage_url,
  225. }
  226. class NiconicoPlaylistIE(InfoExtractor):
  227. _VALID_URL = r'https?://www\.nicovideo\.jp/mylist/(?P<id>\d+)'
  228. _TEST = {
  229. 'url': 'http://www.nicovideo.jp/mylist/27411728',
  230. 'info_dict': {
  231. 'id': '27411728',
  232. 'title': 'AKB48のオールナイトニッポン',
  233. },
  234. 'playlist_mincount': 225,
  235. }
  236. def _real_extract(self, url):
  237. list_id = self._match_id(url)
  238. webpage = self._download_webpage(url, list_id)
  239. entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
  240. webpage, 'entries')
  241. entries = json.loads(entries_json)
  242. entries = [{
  243. '_type': 'url',
  244. 'ie_key': NiconicoIE.ie_key(),
  245. 'url': ('http://www.nicovideo.jp/watch/%s' %
  246. entry['item_data']['video_id']),
  247. } for entry in entries]
  248. return {
  249. '_type': 'playlist',
  250. 'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'),
  251. 'id': list_id,
  252. 'entries': entries,
  253. }