You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

527 lines
18 KiB

10 years ago
  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import itertools
  4. import re
  5. import random
  6. from .common import InfoExtractor
  7. from ..compat import (
  8. compat_HTTPError,
  9. compat_parse_qs,
  10. compat_str,
  11. compat_urllib_parse_urlencode,
  12. compat_urllib_parse_urlparse,
  13. compat_urlparse,
  14. )
  15. from ..utils import (
  16. clean_html,
  17. ExtractorError,
  18. int_or_none,
  19. js_to_json,
  20. orderedSet,
  21. parse_duration,
  22. parse_iso8601,
  23. urlencode_postdata,
  24. )
  25. class TwitchBaseIE(InfoExtractor):
  26. _VALID_URL_BASE = r'https?://(?:www\.)?twitch\.tv'
  27. _API_BASE = 'https://api.twitch.tv'
  28. _USHER_BASE = 'https://usher.ttvnw.net'
  29. _LOGIN_URL = 'http://www.twitch.tv/login'
  30. _NETRC_MACHINE = 'twitch'
  31. def _handle_error(self, response):
  32. if not isinstance(response, dict):
  33. return
  34. error = response.get('error')
  35. if error:
  36. raise ExtractorError(
  37. '%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
  38. expected=True)
  39. def _call_api(self, path, item_id, note):
  40. headers = {
  41. 'Referer': 'http://api.twitch.tv/crossdomain/receiver.html?v=2',
  42. 'X-Requested-With': 'XMLHttpRequest',
  43. }
  44. for cookie in self._downloader.cookiejar:
  45. if cookie.name == 'api_token':
  46. headers['Twitch-Api-Token'] = cookie.value
  47. response = self._download_json(
  48. '%s/%s' % (self._API_BASE, path), item_id, note)
  49. self._handle_error(response)
  50. return response
  51. def _real_initialize(self):
  52. self._login()
  53. def _login(self):
  54. (username, password) = self._get_login_info()
  55. if username is None:
  56. return
  57. def fail(message):
  58. raise ExtractorError(
  59. 'Unable to login. Twitch said: %s' % message, expected=True)
  60. login_page, handle = self._download_webpage_handle(
  61. self._LOGIN_URL, None, 'Downloading login page')
  62. # Some TOR nodes and public proxies are blocked completely
  63. if 'blacklist_message' in login_page:
  64. fail(clean_html(login_page))
  65. login_form = self._hidden_inputs(login_page)
  66. login_form.update({
  67. 'username': username,
  68. 'password': password,
  69. })
  70. redirect_url = handle.geturl()
  71. post_url = self._search_regex(
  72. r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
  73. 'post url', default=redirect_url, group='url')
  74. if not post_url.startswith('http'):
  75. post_url = compat_urlparse.urljoin(redirect_url, post_url)
  76. headers = {'Referer': redirect_url}
  77. try:
  78. response = self._download_json(
  79. post_url, None, 'Logging in as %s' % username,
  80. data=urlencode_postdata(login_form),
  81. headers=headers)
  82. except ExtractorError as e:
  83. if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
  84. response = self._parse_json(
  85. e.cause.read().decode('utf-8'), None)
  86. fail(response['message'])
  87. raise
  88. if response.get('redirect'):
  89. self._download_webpage(
  90. response['redirect'], None, 'Downloading login redirect page',
  91. headers=headers)
  92. def _prefer_source(self, formats):
  93. try:
  94. source = next(f for f in formats if f['format_id'] == 'Source')
  95. source['preference'] = 10
  96. except StopIteration:
  97. pass # No Source stream present
  98. self._sort_formats(formats)
  99. class TwitchItemBaseIE(TwitchBaseIE):
  100. def _download_info(self, item, item_id):
  101. return self._extract_info(self._call_api(
  102. 'kraken/videos/%s%s' % (item, item_id), item_id,
  103. 'Downloading %s info JSON' % self._ITEM_TYPE))
  104. def _extract_media(self, item_id):
  105. info = self._download_info(self._ITEM_SHORTCUT, item_id)
  106. response = self._call_api(
  107. 'api/videos/%s%s' % (self._ITEM_SHORTCUT, item_id), item_id,
  108. 'Downloading %s playlist JSON' % self._ITEM_TYPE)
  109. entries = []
  110. chunks = response['chunks']
  111. qualities = list(chunks.keys())
  112. for num, fragment in enumerate(zip(*chunks.values()), start=1):
  113. formats = []
  114. for fmt_num, fragment_fmt in enumerate(fragment):
  115. format_id = qualities[fmt_num]
  116. fmt = {
  117. 'url': fragment_fmt['url'],
  118. 'format_id': format_id,
  119. 'quality': 1 if format_id == 'live' else 0,
  120. }
  121. m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
  122. if m:
  123. fmt['height'] = int(m.group('height'))
  124. formats.append(fmt)
  125. self._sort_formats(formats)
  126. entry = dict(info)
  127. entry['id'] = '%s_%d' % (entry['id'], num)
  128. entry['title'] = '%s part %d' % (entry['title'], num)
  129. entry['formats'] = formats
  130. entries.append(entry)
  131. return self.playlist_result(entries, info['id'], info['title'])
  132. def _extract_info(self, info):
  133. return {
  134. 'id': info['_id'],
  135. 'title': info.get('title') or 'Untitled Broadcast',
  136. 'description': info.get('description'),
  137. 'duration': int_or_none(info.get('length')),
  138. 'thumbnail': info.get('preview'),
  139. 'uploader': info.get('channel', {}).get('display_name'),
  140. 'uploader_id': info.get('channel', {}).get('name'),
  141. 'timestamp': parse_iso8601(info.get('recorded_at')),
  142. 'view_count': int_or_none(info.get('views')),
  143. }
  144. def _real_extract(self, url):
  145. return self._extract_media(self._match_id(url))
  146. class TwitchVideoIE(TwitchItemBaseIE):
  147. IE_NAME = 'twitch:video'
  148. _VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
  149. _ITEM_TYPE = 'video'
  150. _ITEM_SHORTCUT = 'a'
  151. _TEST = {
  152. 'url': 'http://www.twitch.tv/riotgames/b/577357806',
  153. 'info_dict': {
  154. 'id': 'a577357806',
  155. 'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
  156. },
  157. 'playlist_mincount': 12,
  158. 'skip': 'HTTP Error 404: Not Found',
  159. }
  160. class TwitchChapterIE(TwitchItemBaseIE):
  161. IE_NAME = 'twitch:chapter'
  162. _VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
  163. _ITEM_TYPE = 'chapter'
  164. _ITEM_SHORTCUT = 'c'
  165. _TESTS = [{
  166. 'url': 'http://www.twitch.tv/acracingleague/c/5285812',
  167. 'info_dict': {
  168. 'id': 'c5285812',
  169. 'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
  170. },
  171. 'playlist_mincount': 3,
  172. 'skip': 'HTTP Error 404: Not Found',
  173. }, {
  174. 'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
  175. 'only_matching': True,
  176. }]
  177. class TwitchVodIE(TwitchItemBaseIE):
  178. IE_NAME = 'twitch:vod'
  179. _VALID_URL = r'%s/[^/]+/v/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
  180. _ITEM_TYPE = 'vod'
  181. _ITEM_SHORTCUT = 'v'
  182. _TESTS = [{
  183. 'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s',
  184. 'info_dict': {
  185. 'id': 'v6528877',
  186. 'ext': 'mp4',
  187. 'title': 'LCK Summer Split - Week 6 Day 1',
  188. 'thumbnail': 're:^https?://.*\.jpg$',
  189. 'duration': 17208,
  190. 'timestamp': 1435131709,
  191. 'upload_date': '20150624',
  192. 'uploader': 'Riot Games',
  193. 'uploader_id': 'riotgames',
  194. 'view_count': int,
  195. 'start_time': 310,
  196. },
  197. 'params': {
  198. # m3u8 download
  199. 'skip_download': True,
  200. },
  201. }, {
  202. # Untitled broadcast (title is None)
  203. 'url': 'http://www.twitch.tv/belkao_o/v/11230755',
  204. 'info_dict': {
  205. 'id': 'v11230755',
  206. 'ext': 'mp4',
  207. 'title': 'Untitled Broadcast',
  208. 'thumbnail': 're:^https?://.*\.jpg$',
  209. 'duration': 1638,
  210. 'timestamp': 1439746708,
  211. 'upload_date': '20150816',
  212. 'uploader': 'BelkAO_o',
  213. 'uploader_id': 'belkao_o',
  214. 'view_count': int,
  215. },
  216. 'params': {
  217. # m3u8 download
  218. 'skip_download': True,
  219. },
  220. }]
  221. def _real_extract(self, url):
  222. item_id = self._match_id(url)
  223. info = self._download_info(self._ITEM_SHORTCUT, item_id)
  224. access_token = self._call_api(
  225. 'api/vods/%s/access_token' % item_id, item_id,
  226. 'Downloading %s access token' % self._ITEM_TYPE)
  227. formats = self._extract_m3u8_formats(
  228. '%s/vod/%s?%s' % (
  229. self._USHER_BASE, item_id,
  230. compat_urllib_parse_urlencode({
  231. 'allow_source': 'true',
  232. 'allow_audio_only': 'true',
  233. 'allow_spectre': 'true',
  234. 'player': 'twitchweb',
  235. 'nauth': access_token['token'],
  236. 'nauthsig': access_token['sig'],
  237. })),
  238. item_id, 'mp4', entry_protocol='m3u8_native')
  239. self._prefer_source(formats)
  240. info['formats'] = formats
  241. parsed_url = compat_urllib_parse_urlparse(url)
  242. query = compat_parse_qs(parsed_url.query)
  243. if 't' in query:
  244. info['start_time'] = parse_duration(query['t'][0])
  245. return info
  246. class TwitchPlaylistBaseIE(TwitchBaseIE):
  247. _PLAYLIST_PATH = 'kraken/channels/%s/videos/?offset=%d&limit=%d'
  248. _PAGE_LIMIT = 100
  249. def _extract_playlist(self, channel_id):
  250. info = self._call_api(
  251. 'kraken/channels/%s' % channel_id,
  252. channel_id, 'Downloading channel info JSON')
  253. channel_name = info.get('display_name') or info.get('name')
  254. entries = []
  255. offset = 0
  256. limit = self._PAGE_LIMIT
  257. broken_paging_detected = False
  258. counter_override = None
  259. for counter in itertools.count(1):
  260. response = self._call_api(
  261. self._PLAYLIST_PATH % (channel_id, offset, limit),
  262. channel_id,
  263. 'Downloading %s videos JSON page %s'
  264. % (self._PLAYLIST_TYPE, counter_override or counter))
  265. page_entries = self._extract_playlist_page(response)
  266. if not page_entries:
  267. break
  268. total = int_or_none(response.get('_total'))
  269. # Since the beginning of March 2016 twitch's paging mechanism
  270. # is completely broken on the twitch side. It simply ignores
  271. # a limit and returns the whole offset number of videos.
  272. # Working around by just requesting all videos at once.
  273. # Upd: pagination bug was fixed by twitch on 15.03.2016.
  274. if not broken_paging_detected and total and len(page_entries) > limit:
  275. self.report_warning(
  276. 'Twitch pagination is broken on twitch side, requesting all videos at once',
  277. channel_id)
  278. broken_paging_detected = True
  279. offset = total
  280. counter_override = '(all at once)'
  281. continue
  282. entries.extend(page_entries)
  283. if broken_paging_detected or total and len(page_entries) >= total:
  284. break
  285. offset += limit
  286. return self.playlist_result(
  287. [self.url_result(entry) for entry in orderedSet(entries)],
  288. channel_id, channel_name)
  289. def _extract_playlist_page(self, response):
  290. videos = response.get('videos')
  291. return [video['url'] for video in videos] if videos else []
  292. def _real_extract(self, url):
  293. return self._extract_playlist(self._match_id(url))
  294. class TwitchProfileIE(TwitchPlaylistBaseIE):
  295. IE_NAME = 'twitch:profile'
  296. _VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
  297. _PLAYLIST_TYPE = 'profile'
  298. _TEST = {
  299. 'url': 'http://www.twitch.tv/vanillatv/profile',
  300. 'info_dict': {
  301. 'id': 'vanillatv',
  302. 'title': 'VanillaTV',
  303. },
  304. 'playlist_mincount': 412,
  305. }
  306. class TwitchPastBroadcastsIE(TwitchPlaylistBaseIE):
  307. IE_NAME = 'twitch:past_broadcasts'
  308. _VALID_URL = r'%s/(?P<id>[^/]+)/profile/past_broadcasts/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
  309. _PLAYLIST_PATH = TwitchPlaylistBaseIE._PLAYLIST_PATH + '&broadcasts=true'
  310. _PLAYLIST_TYPE = 'past broadcasts'
  311. _TEST = {
  312. 'url': 'http://www.twitch.tv/spamfish/profile/past_broadcasts',
  313. 'info_dict': {
  314. 'id': 'spamfish',
  315. 'title': 'Spamfish',
  316. },
  317. 'playlist_mincount': 54,
  318. }
  319. class TwitchStreamIE(TwitchBaseIE):
  320. IE_NAME = 'twitch:stream'
  321. _VALID_URL = r'%s/(?P<id>[^/#?]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
  322. _TESTS = [{
  323. 'url': 'http://www.twitch.tv/shroomztv',
  324. 'info_dict': {
  325. 'id': '12772022048',
  326. 'display_id': 'shroomztv',
  327. 'ext': 'mp4',
  328. 'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
  329. 'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
  330. 'is_live': True,
  331. 'timestamp': 1421928037,
  332. 'upload_date': '20150122',
  333. 'uploader': 'ShroomzTV',
  334. 'uploader_id': 'shroomztv',
  335. 'view_count': int,
  336. },
  337. 'params': {
  338. # m3u8 download
  339. 'skip_download': True,
  340. },
  341. }, {
  342. 'url': 'http://www.twitch.tv/miracle_doto#profile-0',
  343. 'only_matching': True,
  344. }]
  345. def _real_extract(self, url):
  346. channel_id = self._match_id(url)
  347. stream = self._call_api(
  348. 'kraken/streams/%s' % channel_id, channel_id,
  349. 'Downloading stream JSON').get('stream')
  350. # Fallback on profile extraction if stream is offline
  351. if not stream:
  352. return self.url_result(
  353. 'http://www.twitch.tv/%s/profile' % channel_id,
  354. 'TwitchProfile', channel_id)
  355. # Channel name may be typed if different case than the original channel name
  356. # (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
  357. # an invalid m3u8 URL. Working around by use of original channel name from stream
  358. # JSON and fallback to lowercase if it's not available.
  359. channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
  360. access_token = self._call_api(
  361. 'api/channels/%s/access_token' % channel_id, channel_id,
  362. 'Downloading channel access token')
  363. query = {
  364. 'allow_source': 'true',
  365. 'allow_audio_only': 'true',
  366. 'p': random.randint(1000000, 10000000),
  367. 'player': 'twitchweb',
  368. 'segment_preference': '4',
  369. 'sig': access_token['sig'].encode('utf-8'),
  370. 'token': access_token['token'].encode('utf-8'),
  371. }
  372. formats = self._extract_m3u8_formats(
  373. '%s/api/channel/hls/%s.m3u8?%s'
  374. % (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
  375. channel_id, 'mp4')
  376. self._prefer_source(formats)
  377. view_count = stream.get('viewers')
  378. timestamp = parse_iso8601(stream.get('created_at'))
  379. channel = stream['channel']
  380. title = self._live_title(channel.get('display_name') or channel.get('name'))
  381. description = channel.get('status')
  382. thumbnails = []
  383. for thumbnail_key, thumbnail_url in stream['preview'].items():
  384. m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
  385. if not m:
  386. continue
  387. thumbnails.append({
  388. 'url': thumbnail_url,
  389. 'width': int(m.group('width')),
  390. 'height': int(m.group('height')),
  391. })
  392. return {
  393. 'id': compat_str(stream['_id']),
  394. 'display_id': channel_id,
  395. 'title': title,
  396. 'description': description,
  397. 'thumbnails': thumbnails,
  398. 'uploader': channel.get('display_name'),
  399. 'uploader_id': channel.get('name'),
  400. 'timestamp': timestamp,
  401. 'view_count': view_count,
  402. 'formats': formats,
  403. 'is_live': True,
  404. }
  405. class TwitchClipsIE(InfoExtractor):
  406. IE_NAME = 'twitch:clips'
  407. _VALID_URL = r'https?://clips\.twitch\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)'
  408. _TESTS = [{
  409. 'url': 'https://clips.twitch.tv/ea/AggressiveCobraPoooound',
  410. 'md5': '761769e1eafce0ffebfb4089cb3847cd',
  411. 'info_dict': {
  412. 'id': 'AggressiveCobraPoooound',
  413. 'ext': 'mp4',
  414. 'title': 'EA Play 2016 Live from the Novo Theatre',
  415. 'thumbnail': 're:^https?://.*\.jpg',
  416. 'creator': 'EA',
  417. 'uploader': 'stereotype_',
  418. 'uploader_id': 'stereotype_',
  419. },
  420. }, {
  421. # multiple formats
  422. 'url': 'https://clips.twitch.tv/rflegendary/UninterestedBeeDAESuppy',
  423. 'only_matching': True,
  424. }]
  425. def _real_extract(self, url):
  426. video_id = self._match_id(url)
  427. webpage = self._download_webpage(url, video_id)
  428. clip = self._parse_json(
  429. self._search_regex(
  430. r'(?s)clipInfo\s*=\s*({.+?});', webpage, 'clip info'),
  431. video_id, transform_source=js_to_json)
  432. title = clip.get('channel_title') or self._og_search_title(webpage)
  433. formats = [{
  434. 'url': option['source'],
  435. 'format_id': option.get('quality'),
  436. 'height': int_or_none(option.get('quality')),
  437. } for option in clip.get('quality_options', []) if option.get('source')]
  438. if not formats:
  439. formats = [{
  440. 'url': clip['clip_video_url'],
  441. }]
  442. self._sort_formats(formats)
  443. return {
  444. 'id': video_id,
  445. 'title': title,
  446. 'thumbnail': self._og_search_thumbnail(webpage),
  447. 'creator': clip.get('broadcaster_display_name') or clip.get('broadcaster_login'),
  448. 'uploader': clip.get('curator_login'),
  449. 'uploader_id': clip.get('curator_display_name'),
  450. 'formats': formats,
  451. }