You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

240 lines
8.0 KiB

10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import random
  4. import time
  5. import re
  6. from .common import InfoExtractor
  7. from ..utils import (
  8. strip_jsonp,
  9. unescapeHTML,
  10. js_to_json,
  11. )
  12. from ..compat import compat_urllib_request
  13. class QQMusicIE(InfoExtractor):
  14. IE_NAME = 'qqmusic'
  15. _VALID_URL = r'http://y.qq.com/#type=song&mid=(?P<id>[0-9A-Za-z]+)'
  16. _TESTS = [{
  17. 'url': 'http://y.qq.com/#type=song&mid=004295Et37taLD',
  18. 'md5': 'bed90b6db2a7a7a7e11bc585f471f63a',
  19. 'info_dict': {
  20. 'id': '004295Et37taLD',
  21. 'ext': 'm4a',
  22. 'title': '可惜没如果',
  23. 'upload_date': '20141227',
  24. 'creator': '林俊杰',
  25. 'description': 'md5:d327722d0361576fde558f1ac68a7065',
  26. }
  27. }]
  28. # Reference: m_r_GetRUin() in top_player.js
  29. # http://imgcache.gtimg.cn/music/portal_v3/y/top_player.js
  30. @staticmethod
  31. def m_r_get_ruin():
  32. curMs = int(time.time() * 1000) % 1000
  33. return int(round(random.random() * 2147483647) * curMs % 1E10)
  34. def _real_extract(self, url):
  35. mid = self._match_id(url)
  36. detail_info_page = self._download_webpage(
  37. 'http://s.plcloud.music.qq.com/fcgi-bin/fcg_yqq_song_detail_info.fcg?songmid=%s&play=0' % mid,
  38. mid, note='Download song detail info',
  39. errnote='Unable to get song detail info', encoding='gbk')
  40. song_name = self._html_search_regex(
  41. r"songname:\s*'([^']+)'", detail_info_page, 'song name')
  42. publish_time = self._html_search_regex(
  43. r'发行时间:(\d{4}-\d{2}-\d{2})', detail_info_page,
  44. 'publish time', default=None)
  45. if publish_time:
  46. publish_time = publish_time.replace('-', '')
  47. singer = self._html_search_regex(
  48. r"singer:\s*'([^']+)", detail_info_page, 'singer', default=None)
  49. lrc_content = self._html_search_regex(
  50. r'<div class="content" id="lrc_content"[^<>]*>([^<>]+)</div>',
  51. detail_info_page, 'LRC lyrics', default=None)
  52. if lrc_content:
  53. lrc_content = lrc_content.replace('\\n', '\n')
  54. guid = self.m_r_get_ruin()
  55. vkey = self._download_json(
  56. 'http://base.music.qq.com/fcgi-bin/fcg_musicexpress.fcg?json=3&guid=%s' % guid,
  57. mid, note='Retrieve vkey', errnote='Unable to get vkey',
  58. transform_source=strip_jsonp)['key']
  59. song_url = 'http://cc.stream.qqmusic.qq.com/C200%s.m4a?vkey=%s&guid=%s&fromtag=0' % (mid, vkey, guid)
  60. return {
  61. 'id': mid,
  62. 'url': song_url,
  63. 'title': song_name,
  64. 'upload_date': publish_time,
  65. 'creator': singer,
  66. 'description': lrc_content,
  67. }
  68. class QQPlaylistBaseIE(InfoExtractor):
  69. @staticmethod
  70. def qq_static_url(category, mid):
  71. return 'http://y.qq.com/y/static/%s/%s/%s/%s.html' % (category, mid[-2], mid[-1], mid)
  72. @classmethod
  73. def get_entries_from_page(cls, page):
  74. entries = []
  75. for item in re.findall(r'class="data"[^<>]*>([^<>]+)</', page):
  76. song_mid = unescapeHTML(item).split('|')[-5]
  77. entries.append(cls.url_result(
  78. 'http://y.qq.com/#type=song&mid=' + song_mid, 'QQMusic',
  79. song_mid))
  80. return entries
  81. class QQMusicSingerIE(QQPlaylistBaseIE):
  82. IE_NAME = 'qqmusic:singer'
  83. _VALID_URL = r'http://y.qq.com/#type=singer&mid=(?P<id>[0-9A-Za-z]+)'
  84. _TEST = {
  85. 'url': 'http://y.qq.com/#type=singer&mid=001BLpXF2DyJe2',
  86. 'info_dict': {
  87. 'id': '001BLpXF2DyJe2',
  88. 'title': '林俊杰',
  89. 'description': 'md5:2a222d89ba4455a3af19940c0481bb78',
  90. },
  91. 'playlist_count': 12,
  92. }
  93. def _real_extract(self, url):
  94. mid = self._match_id(url)
  95. singer_page = self._download_webpage(
  96. self.qq_static_url('singer', mid), mid, 'Download singer page')
  97. entries = self.get_entries_from_page(singer_page)
  98. singer_name = self._html_search_regex(
  99. r"singername\s*:\s*'([^']+)'", singer_page, 'singer name',
  100. default=None)
  101. singer_id = self._html_search_regex(
  102. r"singerid\s*:\s*'([0-9]+)'", singer_page, 'singer id',
  103. default=None)
  104. singer_desc = None
  105. if singer_id:
  106. req = compat_urllib_request.Request(
  107. 'http://s.plcloud.music.qq.com/fcgi-bin/fcg_get_singer_desc.fcg?utf8=1&outCharset=utf-8&format=xml&singerid=%s' % singer_id)
  108. req.add_header(
  109. 'Referer', 'http://s.plcloud.music.qq.com/xhr_proxy_utf8.html')
  110. singer_desc_page = self._download_xml(
  111. req, mid, 'Donwload singer description XML')
  112. singer_desc = singer_desc_page.find('./data/info/desc').text
  113. return self.playlist_result(entries, mid, singer_name, singer_desc)
  114. class QQMusicAlbumIE(QQPlaylistBaseIE):
  115. IE_NAME = 'qqmusic:album'
  116. _VALID_URL = r'http://y.qq.com/#type=album&mid=(?P<id>[0-9A-Za-z]+)'
  117. _TEST = {
  118. 'url': 'http://y.qq.com/#type=album&mid=000gXCTb2AhRR1&play=0',
  119. 'info_dict': {
  120. 'id': '000gXCTb2AhRR1',
  121. 'title': '我们都是这样长大的',
  122. 'description': 'md5:d216c55a2d4b3537fe4415b8767d74d6',
  123. },
  124. 'playlist_count': 4,
  125. }
  126. def _real_extract(self, url):
  127. mid = self._match_id(url)
  128. album_page = self._download_webpage(
  129. self.qq_static_url('album', mid), mid, 'Download album page')
  130. entries = self.get_entries_from_page(album_page)
  131. album_name = self._html_search_regex(
  132. r"albumname\s*:\s*'([^']+)',", album_page, 'album name',
  133. default=None)
  134. album_detail = self._html_search_regex(
  135. r'<div class="album_detail close_detail">\s*<p>((?:[^<>]+(?:<br />)?)+)</p>',
  136. album_page, 'album details', default=None)
  137. return self.playlist_result(entries, mid, album_name, album_detail)
  138. class QQMusicToplistIE(QQPlaylistBaseIE):
  139. IE_NAME = 'qqmusic:toplist'
  140. _VALID_URL = r'http://y\.qq\.com/#type=toplist&p=(?P<id>(top|global)_[0-9]+)'
  141. _TESTS = [{
  142. 'url': 'http://y.qq.com/#type=toplist&p=global_12',
  143. 'info_dict': {
  144. 'id': 'global_12',
  145. 'title': 'itunes榜',
  146. },
  147. 'playlist_count': 10,
  148. }, {
  149. 'url': 'http://y.qq.com/#type=toplist&p=top_6',
  150. 'info_dict': {
  151. 'id': 'top_6',
  152. 'title': 'QQ音乐巅峰榜·欧美',
  153. },
  154. 'playlist_count': 100,
  155. }, {
  156. 'url': 'http://y.qq.com/#type=toplist&p=global_5',
  157. 'info_dict': {
  158. 'id': 'global_5',
  159. 'title': '韩国mnet排行榜',
  160. },
  161. 'playlist_count': 50,
  162. }]
  163. @staticmethod
  164. def strip_qq_jsonp(code):
  165. return js_to_json(re.sub(r'^MusicJsonCallback\((.*?)\)/\*.+?\*/$', r'\1', code))
  166. def _real_extract(self, url):
  167. list_id = self._match_id(url)
  168. list_type, num_id = list_id.split("_")
  169. list_page = self._download_webpage(
  170. "http://y.qq.com/y/static/toplist/index/%s.html" % list_id,
  171. list_id, 'Download toplist page')
  172. entries = []
  173. if list_type == 'top':
  174. jsonp_url = "http://y.qq.com/y/static/toplist/json/top/%s/1.js" % num_id
  175. else:
  176. jsonp_url = "http://y.qq.com/y/static/toplist/json/global/%s/1_1.js" % num_id
  177. toplist_json = self._download_json(
  178. jsonp_url, list_id, note='Retrieve toplist json',
  179. errnote='Unable to get toplist json', transform_source=self.strip_qq_jsonp)
  180. for song in toplist_json['l']:
  181. s = song['s']
  182. song_mid = s.split("|")[20]
  183. entries.append(self.url_result(
  184. 'http://y.qq.com/#type=song&mid=' + song_mid, 'QQMusic',
  185. song_mid))
  186. list_name = self._html_search_regex(
  187. r'<h2 id="top_name">([^\']+)</h2>', list_page, 'top list name',
  188. default=None)
  189. return self.playlist_result(entries, list_id, list_name)