You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

188 lines
8.3 KiB

  1. # encoding: utf-8
  2. from __future__ import unicode_literals
  3. import re
  4. import json
  5. import base64
  6. import zlib
  7. from hashlib import sha1
  8. from math import pow, sqrt, floor
  9. from .common import InfoExtractor
  10. from ..utils import (
  11. ExtractorError,
  12. compat_urllib_parse,
  13. compat_urllib_request,
  14. bytes_to_intlist,
  15. intlist_to_bytes,
  16. unified_strdate,
  17. clean_html,
  18. )
  19. from ..aes import (
  20. aes_cbc_decrypt,
  21. inc,
  22. )
  23. class CrunchyrollIE(InfoExtractor):
  24. _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
  25. _TEST = {
  26. 'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
  27. #'md5': 'b1639fd6ddfaa43788c85f6d1dddd412',
  28. 'info_dict': {
  29. 'id': '645513',
  30. 'ext': 'flv',
  31. 'title': 'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!',
  32. 'description': 'md5:2d17137920c64f2f49981a7797d275ef',
  33. 'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
  34. 'uploader': 'Yomiuri Telecasting Corporation (YTV)',
  35. 'upload_date': '20131013',
  36. },
  37. 'params': {
  38. # rtmp
  39. 'skip_download': True,
  40. },
  41. }
  42. _FORMAT_IDS = {
  43. '360': ('60', '106'),
  44. '480': ('61', '106'),
  45. '720': ('62', '106'),
  46. '1080': ('80', '108'),
  47. }
  48. def _decrypt_subtitles(self, data, iv, id):
  49. data = bytes_to_intlist(data)
  50. iv = bytes_to_intlist(iv)
  51. id = int(id)
  52. def obfuscate_key_aux(count, modulo, start):
  53. output = list(start)
  54. for _ in range(count):
  55. output.append(output[-1] + output[-2])
  56. # cut off start values
  57. output = output[2:]
  58. output = list(map(lambda x: x % modulo + 33, output))
  59. return output
  60. def obfuscate_key(key):
  61. num1 = int(floor(pow(2, 25) * sqrt(6.9)))
  62. num2 = (num1 ^ key) << 5
  63. num3 = key ^ num1
  64. num4 = num3 ^ (num3 >> 3) ^ num2
  65. prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2)))
  66. shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest())
  67. # Extend 160 Bit hash to 256 Bit
  68. return shaHash + [0] * 12
  69. key = obfuscate_key(id)
  70. class Counter:
  71. __value = iv
  72. def next_value(self):
  73. temp = self.__value
  74. self.__value = inc(self.__value)
  75. return temp
  76. decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
  77. return zlib.decompress(decrypted_data)
  78. def _convert_subtitles_to_srt(self, subtitles):
  79. output = ''
  80. for i, (start, end, text) in enumerate(re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles), 1):
  81. start = start.replace('.', ',')
  82. end = end.replace('.', ',')
  83. text = clean_html(text)
  84. text = text.replace('\\N', '\n')
  85. if not text:
  86. continue
  87. output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
  88. return output
  89. def _real_extract(self,url):
  90. mobj = re.match(self._VALID_URL, url)
  91. video_id = mobj.group('video_id')
  92. if mobj.group('prefix') == 'm':
  93. mobile_webpage = self._download_webpage(url, video_id, 'Downloading mobile webpage')
  94. webpage_url = self._search_regex(r'<link rel="canonical" href="([^"]+)" />', mobile_webpage, 'webpage_url')
  95. else:
  96. webpage_url = 'http://www.' + mobj.group('url')
  97. webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage')
  98. note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, 'trailer-notice', default='')
  99. if note_m:
  100. raise ExtractorError(note_m)
  101. mobj = re.search(r'Page\.messaging_box_controller\.addItems\(\[(?P<msg>{.+?})\]\)', webpage)
  102. if mobj:
  103. msg = json.loads(mobj.group('msg'))
  104. if msg.get('type') == 'error':
  105. raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True)
  106. video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL)
  107. video_title = re.sub(r' {2,}', ' ', video_title)
  108. video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='')
  109. if not video_description:
  110. video_description = None
  111. video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
  112. if video_upload_date:
  113. video_upload_date = unified_strdate(video_upload_date)
  114. video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, 'video_uploader', fatal=False, flags=re.DOTALL)
  115. playerdata_url = compat_urllib_parse.unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
  116. playerdata_req = compat_urllib_request.Request(playerdata_url)
  117. playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
  118. playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
  119. playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
  120. stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, 'stream_id')
  121. video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, 'thumbnail', fatal=False)
  122. formats = []
  123. for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
  124. stream_quality, stream_format = self._FORMAT_IDS[fmt]
  125. video_format = fmt+'p'
  126. streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
  127. # urlencode doesn't work!
  128. streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
  129. streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
  130. streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
  131. streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for '+video_format)
  132. video_url = self._search_regex(r'<host>([^<]+)', streamdata, 'video_url')
  133. video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, 'video_play_path')
  134. formats.append({
  135. 'url': video_url,
  136. 'play_path': video_play_path,
  137. 'ext': 'flv',
  138. 'format': video_format,
  139. 'format_id': video_format,
  140. })
  141. subtitles = {}
  142. for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
  143. sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
  144. video_id, note='Downloading subtitles for '+sub_name)
  145. id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
  146. iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
  147. data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
  148. if not id or not iv or not data:
  149. continue
  150. id = int(id)
  151. iv = base64.b64decode(iv)
  152. data = base64.b64decode(data)
  153. subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8')
  154. lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
  155. if not lang_code:
  156. continue
  157. subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
  158. return {
  159. 'id': video_id,
  160. 'title': video_title,
  161. 'description': video_description,
  162. 'thumbnail': video_thumbnail,
  163. 'uploader': video_uploader,
  164. 'upload_date': video_upload_date,
  165. 'subtitles': subtitles,
  166. 'formats': formats,
  167. }