You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

401 lines
14 KiB

10 years ago
  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import itertools
  4. import re
  5. import random
  6. from .common import InfoExtractor
  7. from ..compat import (
  8. compat_str,
  9. compat_urllib_parse,
  10. compat_urllib_request,
  11. )
  12. from ..utils import (
  13. ExtractorError,
  14. parse_iso8601,
  15. )
  16. class TwitchBaseIE(InfoExtractor):
  17. _VALID_URL_BASE = r'https?://(?:www\.)?twitch\.tv'
  18. _API_BASE = 'https://api.twitch.tv'
  19. _USHER_BASE = 'http://usher.twitch.tv'
  20. _LOGIN_URL = 'https://secure.twitch.tv/user/login'
  21. _LOGIN_POST_URL = 'https://secure-login.twitch.tv/login'
  22. _NETRC_MACHINE = 'twitch'
  23. def _handle_error(self, response):
  24. if not isinstance(response, dict):
  25. return
  26. error = response.get('error')
  27. if error:
  28. raise ExtractorError(
  29. '%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
  30. expected=True)
  31. def _download_json(self, url, video_id, note='Downloading JSON metadata'):
  32. headers = {
  33. 'Referer': 'http://api.twitch.tv/crossdomain/receiver.html?v=2',
  34. 'X-Requested-With': 'XMLHttpRequest',
  35. }
  36. for cookie in self._downloader.cookiejar:
  37. if cookie.name == 'api_token':
  38. headers['Twitch-Api-Token'] = cookie.value
  39. request = compat_urllib_request.Request(url, headers=headers)
  40. response = super(TwitchBaseIE, self)._download_json(request, video_id, note)
  41. self._handle_error(response)
  42. return response
  43. def _real_initialize(self):
  44. self._login()
  45. def _login(self):
  46. (username, password) = self._get_login_info()
  47. if username is None:
  48. return
  49. login_page = self._download_webpage(
  50. self._LOGIN_URL, None, 'Downloading login page')
  51. authenticity_token = self._search_regex(
  52. r'<input name="authenticity_token" type="hidden" value="([^"]+)"',
  53. login_page, 'authenticity token')
  54. login_form = {
  55. 'utf8': ''.encode('utf-8'),
  56. 'authenticity_token': authenticity_token,
  57. 'redirect_on_login': '',
  58. 'embed_form': 'false',
  59. 'mp_source_action': 'login-button',
  60. 'follow': '',
  61. 'login': username,
  62. 'password': password,
  63. }
  64. request = compat_urllib_request.Request(
  65. self._LOGIN_POST_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
  66. request.add_header('Referer', self._LOGIN_URL)
  67. response = self._download_webpage(
  68. request, None, 'Logging in as %s' % username)
  69. m = re.search(
  70. r"id=([\"'])login_error_message\1[^>]*>(?P<msg>[^<]+)", response)
  71. if m:
  72. raise ExtractorError(
  73. 'Unable to login: %s' % m.group('msg').strip(), expected=True)
  74. def _prefer_source(self, formats):
  75. try:
  76. source = next(f for f in formats if f['format_id'] == 'Source')
  77. source['preference'] = 10
  78. except StopIteration:
  79. pass # No Source stream present
  80. self._sort_formats(formats)
  81. class TwitchItemBaseIE(TwitchBaseIE):
  82. def _download_info(self, item, item_id):
  83. return self._extract_info(self._download_json(
  84. '%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
  85. 'Downloading %s info JSON' % self._ITEM_TYPE))
  86. def _extract_media(self, item_id):
  87. info = self._download_info(self._ITEM_SHORTCUT, item_id)
  88. response = self._download_json(
  89. '%s/api/videos/%s%s' % (self._API_BASE, self._ITEM_SHORTCUT, item_id), item_id,
  90. 'Downloading %s playlist JSON' % self._ITEM_TYPE)
  91. entries = []
  92. chunks = response['chunks']
  93. qualities = list(chunks.keys())
  94. for num, fragment in enumerate(zip(*chunks.values()), start=1):
  95. formats = []
  96. for fmt_num, fragment_fmt in enumerate(fragment):
  97. format_id = qualities[fmt_num]
  98. fmt = {
  99. 'url': fragment_fmt['url'],
  100. 'format_id': format_id,
  101. 'quality': 1 if format_id == 'live' else 0,
  102. }
  103. m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
  104. if m:
  105. fmt['height'] = int(m.group('height'))
  106. formats.append(fmt)
  107. self._sort_formats(formats)
  108. entry = dict(info)
  109. entry['id'] = '%s_%d' % (entry['id'], num)
  110. entry['title'] = '%s part %d' % (entry['title'], num)
  111. entry['formats'] = formats
  112. entries.append(entry)
  113. return self.playlist_result(entries, info['id'], info['title'])
  114. def _extract_info(self, info):
  115. return {
  116. 'id': info['_id'],
  117. 'title': info['title'],
  118. 'description': info['description'],
  119. 'duration': info['length'],
  120. 'thumbnail': info['preview'],
  121. 'uploader': info['channel']['display_name'],
  122. 'uploader_id': info['channel']['name'],
  123. 'timestamp': parse_iso8601(info['recorded_at']),
  124. 'view_count': info['views'],
  125. }
  126. def _real_extract(self, url):
  127. return self._extract_media(self._match_id(url))
  128. class TwitchVideoIE(TwitchItemBaseIE):
  129. IE_NAME = 'twitch:video'
  130. _VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
  131. _ITEM_TYPE = 'video'
  132. _ITEM_SHORTCUT = 'a'
  133. _TEST = {
  134. 'url': 'http://www.twitch.tv/riotgames/b/577357806',
  135. 'info_dict': {
  136. 'id': 'a577357806',
  137. 'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
  138. },
  139. 'playlist_mincount': 12,
  140. }
  141. class TwitchChapterIE(TwitchItemBaseIE):
  142. IE_NAME = 'twitch:chapter'
  143. _VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
  144. _ITEM_TYPE = 'chapter'
  145. _ITEM_SHORTCUT = 'c'
  146. _TESTS = [{
  147. 'url': 'http://www.twitch.tv/acracingleague/c/5285812',
  148. 'info_dict': {
  149. 'id': 'c5285812',
  150. 'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
  151. },
  152. 'playlist_mincount': 3,
  153. }, {
  154. 'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
  155. 'only_matching': True,
  156. }]
  157. class TwitchVodIE(TwitchItemBaseIE):
  158. IE_NAME = 'twitch:vod'
  159. _VALID_URL = r'%s/[^/]+/v/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
  160. _ITEM_TYPE = 'vod'
  161. _ITEM_SHORTCUT = 'v'
  162. _TEST = {
  163. 'url': 'http://www.twitch.tv/ksptv/v/3622000',
  164. 'info_dict': {
  165. 'id': 'v3622000',
  166. 'ext': 'mp4',
  167. 'title': '''KSPTV: Squadcast: "Everyone's on vacation so here's Dahud" Edition!''',
  168. 'thumbnail': 're:^https?://.*\.jpg$',
  169. 'duration': 6951,
  170. 'timestamp': 1419028564,
  171. 'upload_date': '20141219',
  172. 'uploader': 'KSPTV',
  173. 'uploader_id': 'ksptv',
  174. 'view_count': int,
  175. },
  176. 'params': {
  177. # m3u8 download
  178. 'skip_download': True,
  179. },
  180. }
  181. def _real_extract(self, url):
  182. item_id = self._match_id(url)
  183. info = self._download_info(self._ITEM_SHORTCUT, item_id)
  184. access_token = self._download_json(
  185. '%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
  186. 'Downloading %s access token' % self._ITEM_TYPE)
  187. formats = self._extract_m3u8_formats(
  188. '%s/vod/%s?nauth=%s&nauthsig=%s'
  189. % (self._USHER_BASE, item_id, access_token['token'], access_token['sig']),
  190. item_id, 'mp4')
  191. self._prefer_source(formats)
  192. info['formats'] = formats
  193. return info
  194. class TwitchPlaylistBaseIE(TwitchBaseIE):
  195. _PLAYLIST_URL = '%s/kraken/channels/%%s/videos/?offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
  196. _PAGE_LIMIT = 100
  197. def _extract_playlist(self, channel_id):
  198. info = self._download_json(
  199. '%s/kraken/channels/%s' % (self._API_BASE, channel_id),
  200. channel_id, 'Downloading channel info JSON')
  201. channel_name = info.get('display_name') or info.get('name')
  202. entries = []
  203. offset = 0
  204. limit = self._PAGE_LIMIT
  205. for counter in itertools.count(1):
  206. response = self._download_json(
  207. self._PLAYLIST_URL % (channel_id, offset, limit),
  208. channel_id, 'Downloading %s videos JSON page %d' % (self._PLAYLIST_TYPE, counter))
  209. page_entries = self._extract_playlist_page(response)
  210. if not page_entries:
  211. break
  212. entries.extend(page_entries)
  213. offset += limit
  214. return self.playlist_result(
  215. [self.url_result(entry) for entry in set(entries)],
  216. channel_id, channel_name)
  217. def _extract_playlist_page(self, response):
  218. videos = response.get('videos')
  219. return [video['url'] for video in videos] if videos else []
  220. def _real_extract(self, url):
  221. return self._extract_playlist(self._match_id(url))
  222. class TwitchProfileIE(TwitchPlaylistBaseIE):
  223. IE_NAME = 'twitch:profile'
  224. _VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
  225. _PLAYLIST_TYPE = 'profile'
  226. _TEST = {
  227. 'url': 'http://www.twitch.tv/vanillatv/profile',
  228. 'info_dict': {
  229. 'id': 'vanillatv',
  230. 'title': 'VanillaTV',
  231. },
  232. 'playlist_mincount': 412,
  233. }
  234. class TwitchPastBroadcastsIE(TwitchPlaylistBaseIE):
  235. IE_NAME = 'twitch:past_broadcasts'
  236. _VALID_URL = r'%s/(?P<id>[^/]+)/profile/past_broadcasts/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
  237. _PLAYLIST_URL = TwitchPlaylistBaseIE._PLAYLIST_URL + '&broadcasts=true'
  238. _PLAYLIST_TYPE = 'past broadcasts'
  239. _TEST = {
  240. 'url': 'http://www.twitch.tv/spamfish/profile/past_broadcasts',
  241. 'info_dict': {
  242. 'id': 'spamfish',
  243. 'title': 'Spamfish',
  244. },
  245. 'playlist_mincount': 54,
  246. }
  247. class TwitchBookmarksIE(TwitchPlaylistBaseIE):
  248. IE_NAME = 'twitch:bookmarks'
  249. _VALID_URL = r'%s/(?P<id>[^/]+)/profile/bookmarks/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
  250. _PLAYLIST_URL = '%s/api/bookmark/?user=%%s&offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
  251. _PLAYLIST_TYPE = 'bookmarks'
  252. _TEST = {
  253. 'url': 'http://www.twitch.tv/ognos/profile/bookmarks',
  254. 'info_dict': {
  255. 'id': 'ognos',
  256. 'title': 'Ognos',
  257. },
  258. 'playlist_mincount': 3,
  259. }
  260. def _extract_playlist_page(self, response):
  261. entries = []
  262. for bookmark in response.get('bookmarks', []):
  263. video = bookmark.get('video')
  264. if not video:
  265. continue
  266. entries.append(video['url'])
  267. return entries
  268. class TwitchStreamIE(TwitchBaseIE):
  269. IE_NAME = 'twitch:stream'
  270. _VALID_URL = r'%s/(?P<id>[^/]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
  271. _TEST = {
  272. 'url': 'http://www.twitch.tv/shroomztv',
  273. 'info_dict': {
  274. 'id': '12772022048',
  275. 'display_id': 'shroomztv',
  276. 'ext': 'mp4',
  277. 'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
  278. 'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
  279. 'is_live': True,
  280. 'timestamp': 1421928037,
  281. 'upload_date': '20150122',
  282. 'uploader': 'ShroomzTV',
  283. 'uploader_id': 'shroomztv',
  284. 'view_count': int,
  285. },
  286. 'params': {
  287. # m3u8 download
  288. 'skip_download': True,
  289. },
  290. }
  291. def _real_extract(self, url):
  292. channel_id = self._match_id(url)
  293. stream = self._download_json(
  294. '%s/kraken/streams/%s' % (self._API_BASE, channel_id), channel_id,
  295. 'Downloading stream JSON').get('stream')
  296. # Fallback on profile extraction if stream is offline
  297. if not stream:
  298. return self.url_result(
  299. 'http://www.twitch.tv/%s/profile' % channel_id,
  300. 'TwitchProfile', channel_id)
  301. access_token = self._download_json(
  302. '%s/api/channels/%s/access_token' % (self._API_BASE, channel_id), channel_id,
  303. 'Downloading channel access token')
  304. query = {
  305. 'allow_source': 'true',
  306. 'p': random.randint(1000000, 10000000),
  307. 'player': 'twitchweb',
  308. 'segment_preference': '4',
  309. 'sig': access_token['sig'].encode('utf-8'),
  310. 'token': access_token['token'].encode('utf-8'),
  311. }
  312. formats = self._extract_m3u8_formats(
  313. '%s/api/channel/hls/%s.m3u8?%s'
  314. % (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query)),
  315. channel_id, 'mp4')
  316. self._prefer_source(formats)
  317. view_count = stream.get('viewers')
  318. timestamp = parse_iso8601(stream.get('created_at'))
  319. channel = stream['channel']
  320. title = self._live_title(channel.get('display_name') or channel.get('name'))
  321. description = channel.get('status')
  322. thumbnails = []
  323. for thumbnail_key, thumbnail_url in stream['preview'].items():
  324. m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
  325. if not m:
  326. continue
  327. thumbnails.append({
  328. 'url': thumbnail_url,
  329. 'width': int(m.group('width')),
  330. 'height': int(m.group('height')),
  331. })
  332. return {
  333. 'id': compat_str(stream['_id']),
  334. 'display_id': channel_id,
  335. 'title': title,
  336. 'description': description,
  337. 'thumbnails': thumbnails,
  338. 'uploader': channel.get('display_name'),
  339. 'uploader_id': channel.get('name'),
  340. 'timestamp': timestamp,
  341. 'view_count': view_count,
  342. 'formats': formats,
  343. 'is_live': True,
  344. }