You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

99 lines
3.1 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import json
  4. import re
  5. from .common import InfoExtractor
  6. from ..utils import ExtractorError
  7. class TuneInIE(InfoExtractor):
  8. _VALID_URL = r'''(?x)https?://(?:www\.)?
  9. (?:
  10. tunein\.com/
  11. (?:
  12. radio/.*?-s|
  13. station/.*?StationId\=
  14. )(?P<id>[0-9]+)
  15. |tun\.in/(?P<redirect_id>[A-Za-z0-9]+)
  16. )
  17. '''
  18. _API_URL_TEMPLATE = 'http://tunein.com/tuner/tune/?stationId={0:}&tuneType=Station'
  19. _INFO_DICT = {
  20. 'id': '34682',
  21. 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2',
  22. 'ext': 'AAC',
  23. 'thumbnail': 're:^https?://.*\.png$',
  24. 'location': 'Tacoma, WA',
  25. }
  26. _TESTS = [
  27. {
  28. 'url': 'http://tunein.com/radio/Jazz24-885-s34682/',
  29. 'info_dict': _INFO_DICT,
  30. 'params': {
  31. 'skip_download': True, # live stream
  32. },
  33. },
  34. { # test redirection
  35. 'url': 'http://tun.in/ser7s',
  36. 'info_dict': _INFO_DICT,
  37. 'params': {
  38. 'skip_download': True, # live stream
  39. },
  40. },
  41. ]
  42. def _real_extract(self, url):
  43. mobj = re.match(self._VALID_URL, url)
  44. redirect_id = mobj.group('redirect_id')
  45. if redirect_id:
  46. # The server doesn't support HEAD requests
  47. urlh = self._request_webpage(
  48. url, redirect_id, note='Downloading redirect page')
  49. url = urlh.geturl()
  50. self.to_screen('Following redirect: %s' % url)
  51. mobj = re.match(self._VALID_URL, url)
  52. station_id = mobj.group('id')
  53. station_info = self._download_json(
  54. self._API_URL_TEMPLATE.format(station_id),
  55. station_id, note='Downloading station JSON')
  56. title = station_info['Title']
  57. thumbnail = station_info.get('Logo')
  58. location = station_info.get('Location')
  59. streams_url = station_info.get('StreamUrl')
  60. if not streams_url:
  61. raise ExtractorError('No downloadable streams found',
  62. expected=True)
  63. stream_data = self._download_webpage(
  64. streams_url, station_id, note='Downloading stream data')
  65. streams = json.loads(self._search_regex(
  66. r'\((.*)\);', stream_data, 'stream info'))['Streams']
  67. is_live = None
  68. formats = []
  69. for stream in streams:
  70. if stream.get('Type') == 'Live':
  71. is_live = True
  72. formats.append({
  73. 'abr': stream.get('Bandwidth'),
  74. 'ext': stream.get('MediaType'),
  75. 'acodec': stream.get('MediaType'),
  76. 'vcodec': 'none',
  77. 'url': stream.get('Url'),
  78. # Sometimes streams with the highest quality do not exist
  79. 'preference': stream.get('Reliability'),
  80. })
  81. self._sort_formats(formats)
  82. return {
  83. 'id': station_id,
  84. 'title': title,
  85. 'formats': formats,
  86. 'thumbnail': thumbnail,
  87. 'location': location,
  88. 'is_live': is_live,
  89. }