You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

201 lines
6.7 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import time
  4. import hashlib
  5. import re
  6. from .common import InfoExtractor
  7. from ..utils import (
  8. ExtractorError,
  9. unescapeHTML,
  10. unified_strdate,
  11. urljoin,
  12. )
  13. class DouyuTVIE(InfoExtractor):
  14. IE_DESC = '斗鱼'
  15. _VALID_URL = r'https?://(?:www\.)?douyu(?:tv)?\.com/(?:[^/]+/)*(?P<id>[A-Za-z0-9]+)'
  16. _TESTS = [{
  17. 'url': 'http://www.douyutv.com/iseven',
  18. 'info_dict': {
  19. 'id': '17732',
  20. 'display_id': 'iseven',
  21. 'ext': 'flv',
  22. 'title': 're:^清晨醒脑!根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
  23. 'description': r're:.*m7show@163\.com.*',
  24. 'thumbnail': r're:^https?://.*\.jpg$',
  25. 'uploader': '7师傅',
  26. 'is_live': True,
  27. },
  28. 'params': {
  29. 'skip_download': True,
  30. },
  31. }, {
  32. 'url': 'http://www.douyutv.com/85982',
  33. 'info_dict': {
  34. 'id': '85982',
  35. 'display_id': '85982',
  36. 'ext': 'flv',
  37. 'title': 're:^小漠从零单排记!——CSOL2躲猫猫 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
  38. 'description': 'md5:746a2f7a253966a06755a912f0acc0d2',
  39. 'thumbnail': r're:^https?://.*\.jpg$',
  40. 'uploader': 'douyu小漠',
  41. 'is_live': True,
  42. },
  43. 'params': {
  44. 'skip_download': True,
  45. },
  46. 'skip': 'Room not found',
  47. }, {
  48. 'url': 'http://www.douyutv.com/17732',
  49. 'info_dict': {
  50. 'id': '17732',
  51. 'display_id': '17732',
  52. 'ext': 'flv',
  53. 'title': 're:^清晨醒脑!根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
  54. 'description': r're:.*m7show@163\.com.*',
  55. 'thumbnail': r're:^https?://.*\.jpg$',
  56. 'uploader': '7师傅',
  57. 'is_live': True,
  58. },
  59. 'params': {
  60. 'skip_download': True,
  61. },
  62. }, {
  63. 'url': 'http://www.douyu.com/xiaocang',
  64. 'only_matching': True,
  65. }, {
  66. # \"room_id\"
  67. 'url': 'http://www.douyu.com/t/lpl',
  68. 'only_matching': True,
  69. }]
  70. def _real_extract(self, url):
  71. video_id = self._match_id(url)
  72. if video_id.isdigit():
  73. room_id = video_id
  74. else:
  75. page = self._download_webpage(url, video_id)
  76. room_id = self._html_search_regex(
  77. r'"room_id\\?"\s*:\s*(\d+),', page, 'room id')
  78. # Grab metadata from mobile API
  79. room = self._download_json(
  80. 'http://m.douyu.com/html5/live?roomId=%s' % room_id, video_id,
  81. note='Downloading room info')['data']
  82. # 1 = live, 2 = offline
  83. if room.get('show_status') == '2':
  84. raise ExtractorError('Live stream is offline', expected=True)
  85. # Grab the URL from PC client API
  86. # The m3u8 url from mobile API requires re-authentication every 5 minutes
  87. tt = int(time.time())
  88. signContent = 'lapi/live/thirdPart/getPlay/%s?aid=pcclient&rate=0&time=%d9TUk5fjjUjg9qIMH3sdnh' % (room_id, tt)
  89. sign = hashlib.md5(signContent.encode('ascii')).hexdigest()
  90. video_url = self._download_json(
  91. 'http://coapi.douyucdn.cn/lapi/live/thirdPart/getPlay/' + room_id,
  92. video_id, note='Downloading video URL info',
  93. query={'rate': 0}, headers={
  94. 'auth': sign,
  95. 'time': str(tt),
  96. 'aid': 'pcclient'
  97. })['data']['live_url']
  98. title = self._live_title(unescapeHTML(room['room_name']))
  99. description = room.get('show_details')
  100. thumbnail = room.get('room_src')
  101. uploader = room.get('nickname')
  102. return {
  103. 'id': room_id,
  104. 'display_id': video_id,
  105. 'url': video_url,
  106. 'title': title,
  107. 'description': description,
  108. 'thumbnail': thumbnail,
  109. 'uploader': uploader,
  110. 'is_live': True,
  111. }
  112. class DouyuShowIE(InfoExtractor):
  113. _VALID_URL = r'https?://v(?:mobile)?\.douyu\.com/show/(?P<id>[0-9a-zA-Z]+)'
  114. _TESTS = [{
  115. 'url': 'https://v.douyu.com/show/rjNBdvnVXNzvE2yw',
  116. 'md5': '0c2cfd068ee2afe657801269b2d86214',
  117. 'info_dict': {
  118. 'id': 'rjNBdvnVXNzvE2yw',
  119. 'ext': 'mp4',
  120. 'title': '陈一发儿:砒霜 我有个室友系列!04-01 22点场',
  121. 'duration': 7150.08,
  122. 'thumbnail': r're:^https?://.*\.jpg$',
  123. 'uploader': '陈一发儿',
  124. 'uploader_id': 'XrZwYelr5wbK',
  125. 'uploader_url': 'https://v.douyu.com/author/XrZwYelr5wbK',
  126. 'upload_date': '20170402',
  127. },
  128. }, {
  129. 'url': 'https://vmobile.douyu.com/show/rjNBdvnVXNzvE2yw',
  130. 'only_matching': True,
  131. }]
  132. def _real_extract(self, url):
  133. url = url.replace('vmobile.', 'v.')
  134. video_id = self._match_id(url)
  135. webpage = self._download_webpage(url, video_id)
  136. room_info = self._parse_json(self._search_regex(
  137. r'var\s+\$ROOM\s*=\s*({.+});', webpage, 'room info'), video_id)
  138. video_info = None
  139. for trial in range(5):
  140. # Sometimes Douyu rejects our request. Let's try it more times
  141. try:
  142. video_info = self._download_json(
  143. 'https://vmobile.douyu.com/video/getInfo', video_id,
  144. query={'vid': video_id},
  145. headers={
  146. 'Referer': url,
  147. 'x-requested-with': 'XMLHttpRequest',
  148. })
  149. break
  150. except ExtractorError:
  151. self._sleep(1, video_id)
  152. if not video_info:
  153. raise ExtractorError('Can\'t fetch video info')
  154. formats = self._extract_m3u8_formats(
  155. video_info['data']['video_url'], video_id,
  156. entry_protocol='m3u8_native', ext='mp4')
  157. upload_date = unified_strdate(self._html_search_regex(
  158. r'<em>上传时间:</em><span>([^<]+)</span>', webpage,
  159. 'upload date', fatal=False))
  160. uploader = uploader_id = uploader_url = None
  161. mobj = re.search(
  162. r'(?m)<a[^>]+href="/author/([0-9a-zA-Z]+)".+?<strong[^>]+title="([^"]+)"',
  163. webpage)
  164. if mobj:
  165. uploader_id, uploader = mobj.groups()
  166. uploader_url = urljoin(url, '/author/' + uploader_id)
  167. return {
  168. 'id': video_id,
  169. 'title': room_info['name'],
  170. 'formats': formats,
  171. 'duration': room_info.get('duration'),
  172. 'thumbnail': room_info.get('pic'),
  173. 'upload_date': upload_date,
  174. 'uploader': uploader,
  175. 'uploader_id': uploader_id,
  176. 'uploader_url': uploader_url,
  177. }