You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

149 lines
5.6 KiB

  1. from __future__ import unicode_literals
  2. import re
  3. import itertools
  4. from .common import InfoExtractor
  5. from ..compat import (
  6. compat_urllib_parse,
  7. )
  8. from ..utils import (
  9. ExtractorError,
  10. HEADRequest,
  11. str_to_int,
  12. )
  13. class MixcloudIE(InfoExtractor):
  14. _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([^/]+)/([^/]+)'
  15. IE_NAME = 'mixcloud'
  16. _TESTS = [{
  17. 'url': 'http://www.mixcloud.com/dholbach/cryptkeeper/',
  18. 'info_dict': {
  19. 'id': 'dholbach-cryptkeeper',
  20. 'ext': 'mp3',
  21. 'title': 'Cryptkeeper',
  22. 'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
  23. 'uploader': 'Daniel Holbach',
  24. 'uploader_id': 'dholbach',
  25. 'thumbnail': 're:https?://.*\.jpg',
  26. 'view_count': int,
  27. 'like_count': int,
  28. },
  29. }, {
  30. 'url': 'http://www.mixcloud.com/gillespeterson/caribou-7-inch-vinyl-mix-chat/',
  31. 'info_dict': {
  32. 'id': 'gillespeterson-caribou-7-inch-vinyl-mix-chat',
  33. 'ext': 'mp3',
  34. 'title': 'Caribou 7 inch Vinyl Mix & Chat',
  35. 'description': 'md5:2b8aec6adce69f9d41724647c65875e8',
  36. 'uploader': 'Gilles Peterson Worldwide',
  37. 'uploader_id': 'gillespeterson',
  38. 'thumbnail': 're:https?://.*/images/',
  39. 'view_count': int,
  40. 'like_count': int,
  41. },
  42. }]
  43. def _get_url(self, track_id, template_url, server_number):
  44. boundaries = (1, 30)
  45. for nr in server_numbers(server_number, boundaries):
  46. url = template_url % nr
  47. try:
  48. # We only want to know if the request succeed
  49. # don't download the whole file
  50. self._request_webpage(
  51. HEADRequest(url), track_id,
  52. 'Checking URL %d/%d ...' % (nr, boundaries[-1]))
  53. return url
  54. except ExtractorError:
  55. pass
  56. return None
  57. def _real_extract(self, url):
  58. mobj = re.match(self._VALID_URL, url)
  59. uploader = mobj.group(1)
  60. cloudcast_name = mobj.group(2)
  61. track_id = compat_urllib_parse.unquote('-'.join((uploader, cloudcast_name)))
  62. webpage = self._download_webpage(url, track_id)
  63. preview_url = self._search_regex(
  64. r'\s(?:data-preview-url|m-preview)="([^"]+)"', webpage, 'preview url')
  65. song_url = preview_url.replace('/previews/', '/c/originals/')
  66. server_number = int(self._search_regex(r'stream(\d+)', song_url, 'server number'))
  67. template_url = re.sub(r'(stream\d*)', 'stream%d', song_url)
  68. final_song_url = self._get_url(track_id, template_url, server_number)
  69. if final_song_url is None:
  70. self.to_screen('Trying with m4a extension')
  71. template_url = template_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/')
  72. final_song_url = self._get_url(track_id, template_url, server_number)
  73. if final_song_url is None:
  74. raise ExtractorError('Unable to extract track url')
  75. PREFIX = (
  76. r'm-play-on-spacebar[^>]+'
  77. r'(?:\s+[a-zA-Z0-9-]+(?:="[^"]+")?)*?\s+')
  78. title = self._html_search_regex(
  79. PREFIX + r'm-title="([^"]+)"', webpage, 'title')
  80. thumbnail = self._proto_relative_url(self._html_search_regex(
  81. PREFIX + r'm-thumbnail-url="([^"]+)"', webpage, 'thumbnail',
  82. fatal=False))
  83. uploader = self._html_search_regex(
  84. PREFIX + r'm-owner-name="([^"]+)"',
  85. webpage, 'uploader', fatal=False)
  86. uploader_id = self._search_regex(
  87. r'\s+"profile": "([^"]+)",', webpage, 'uploader id', fatal=False)
  88. description = self._og_search_description(webpage)
  89. like_count = str_to_int(self._search_regex(
  90. r'\bbutton-favorite\b.+m-ajax-toggle-count="([^"]+)"',
  91. webpage, 'like count', fatal=False))
  92. view_count = str_to_int(self._search_regex(
  93. [r'<meta itemprop="interactionCount" content="UserPlays:([0-9]+)"',
  94. r'/listeners/?">([0-9,.]+)</a>'],
  95. webpage, 'play count', fatal=False))
  96. return {
  97. 'id': track_id,
  98. 'title': title,
  99. 'url': final_song_url,
  100. 'description': description,
  101. 'thumbnail': thumbnail,
  102. 'uploader': uploader,
  103. 'uploader_id': uploader_id,
  104. 'view_count': view_count,
  105. 'like_count': like_count,
  106. }
  107. def server_numbers(first, boundaries):
  108. """ Server numbers to try in descending order of probable availability.
  109. Starting from first (i.e. the number of the server hosting the preview file)
  110. and going further and further up to the higher boundary and down to the
  111. lower one in an alternating fashion. Namely:
  112. server_numbers(2, (1, 5))
  113. # Where the preview server is 2, min number is 1 and max is 5.
  114. # Yields: 2, 3, 1, 4, 5
  115. Why not random numbers or increasing sequences? Since from what I've seen,
  116. full length files seem to be hosted on servers whose number is closer to
  117. that of the preview; to be confirmed.
  118. """
  119. zip_longest = getattr(itertools, 'zip_longest', None)
  120. if zip_longest is None:
  121. # python 2.x
  122. zip_longest = itertools.izip_longest
  123. if len(boundaries) != 2:
  124. raise ValueError("boundaries should be a two-element tuple")
  125. min, max = boundaries
  126. highs = range(first + 1, max + 1)
  127. lows = range(first - 1, min - 1, -1)
  128. rest = filter(
  129. None, itertools.chain.from_iterable(zip_longest(highs, lows)))
  130. yield first
  131. for n in rest:
  132. yield n