You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

155 lines
5.9 KiB

  1. from __future__ import unicode_literals
  2. import itertools
  3. import json
  4. import os
  5. import re
  6. from .common import InfoExtractor
  7. from ..utils import (
  8. compat_str,
  9. ExtractorError,
  10. formatSeconds,
  11. )
  12. class JustinTVIE(InfoExtractor):
  13. """Information extractor for justin.tv and twitch.tv"""
  14. # TODO: One broadcast may be split into multiple videos. The key
  15. # 'broadcast_id' is the same for all parts, and 'broadcast_part'
  16. # starts at 1 and increases. Can we treat all parts as one video?
  17. _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
  18. (?:
  19. (?P<channelid>[^/]+)|
  20. (?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
  21. (?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
  22. )
  23. /?(?:\#.*)?$
  24. """
  25. _JUSTIN_PAGE_LIMIT = 100
  26. IE_NAME = 'justin.tv'
  27. IE_DESC = 'justin.tv and twitch.tv'
  28. _TEST = {
  29. 'url': 'http://www.twitch.tv/thegamedevhub/b/296128360',
  30. 'md5': 'ecaa8a790c22a40770901460af191c9a',
  31. 'info_dict': {
  32. 'id': '296128360',
  33. 'ext': 'flv',
  34. 'upload_date': '20110927',
  35. 'uploader_id': 25114803,
  36. 'uploader': 'thegamedevhub',
  37. 'title': 'Beginner Series - Scripting With Python Pt.1'
  38. }
  39. }
  40. # Return count of items, list of *valid* items
  41. def _parse_page(self, url, video_id, counter):
  42. info_json = self._download_webpage(
  43. url, video_id,
  44. 'Downloading video info JSON on page %d' % counter,
  45. 'Unable to download video info JSON %d' % counter)
  46. response = json.loads(info_json)
  47. if type(response) != list:
  48. error_text = response.get('error', 'unknown error')
  49. raise ExtractorError('Justin.tv API: %s' % error_text)
  50. info = []
  51. for clip in response:
  52. video_url = clip['video_file_url']
  53. if video_url:
  54. video_extension = os.path.splitext(video_url)[1][1:]
  55. video_date = re.sub('-', '', clip['start_time'][:10])
  56. video_uploader_id = clip.get('user_id', clip.get('channel_id'))
  57. video_id = clip['id']
  58. video_title = clip.get('title', video_id)
  59. info.append({
  60. 'id': compat_str(video_id),
  61. 'url': video_url,
  62. 'title': video_title,
  63. 'uploader': clip.get('channel_name', video_uploader_id),
  64. 'uploader_id': video_uploader_id,
  65. 'upload_date': video_date,
  66. 'ext': video_extension,
  67. })
  68. return (len(response), info)
  69. def _real_extract(self, url):
  70. mobj = re.match(self._VALID_URL, url)
  71. api_base = 'http://api.justin.tv'
  72. paged = False
  73. if mobj.group('channelid'):
  74. paged = True
  75. video_id = mobj.group('channelid')
  76. api = api_base + '/channel/archives/%s.json' % video_id
  77. elif mobj.group('chapterid'):
  78. chapter_id = mobj.group('chapterid')
  79. webpage = self._download_webpage(url, chapter_id)
  80. m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
  81. if not m:
  82. raise ExtractorError('Cannot find archive of a chapter')
  83. archive_id = m.group(1)
  84. api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
  85. doc = self._download_xml(
  86. api, chapter_id,
  87. note='Downloading chapter information',
  88. errnote='Chapter information download failed')
  89. for a in doc.findall('.//archive'):
  90. if archive_id == a.find('./id').text:
  91. break
  92. else:
  93. raise ExtractorError('Could not find chapter in chapter information')
  94. video_url = a.find('./video_file_url').text
  95. video_ext = video_url.rpartition('.')[2] or 'flv'
  96. chapter_api_url = 'https://api.twitch.tv/kraken/videos/c' + chapter_id
  97. chapter_info = self._download_json(
  98. chapter_api_url, 'c' + chapter_id,
  99. note='Downloading chapter metadata',
  100. errnote='Download of chapter metadata failed')
  101. bracket_start = int(doc.find('.//bracket_start').text)
  102. bracket_end = int(doc.find('.//bracket_end').text)
  103. # TODO determine start (and probably fix up file)
  104. # youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
  105. #video_url += '?start=' + TODO:start_timestamp
  106. # bracket_start is 13290, but we want 51670615
  107. self._downloader.report_warning('Chapter detected, but we can just download the whole file. '
  108. 'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
  109. info = {
  110. 'id': 'c' + chapter_id,
  111. 'url': video_url,
  112. 'ext': video_ext,
  113. 'title': chapter_info['title'],
  114. 'thumbnail': chapter_info['preview'],
  115. 'description': chapter_info['description'],
  116. 'uploader': chapter_info['channel']['display_name'],
  117. 'uploader_id': chapter_info['channel']['name'],
  118. }
  119. return info
  120. else:
  121. video_id = mobj.group('videoid')
  122. api = api_base + '/broadcast/by_archive/%s.json' % video_id
  123. entries = []
  124. offset = 0
  125. limit = self._JUSTIN_PAGE_LIMIT
  126. for counter in itertools.count(1):
  127. page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
  128. page_count, page_info = self._parse_page(
  129. page_url, video_id, counter)
  130. entries.extend(page_info)
  131. if not paged or page_count != limit:
  132. break
  133. offset += limit
  134. return {
  135. '_type': 'playlist',
  136. 'id': video_id,
  137. 'entries': entries,
  138. }