You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

72 lines
2.5 KiB

  1. import json
  2. import re
  3. from .common import InfoExtractor
  4. from ..utils import (
  5. compat_urlparse,
  6. get_meta_content,
  7. )
  8. class UstreamIE(InfoExtractor):
  9. _VALID_URL = r'https?://www\.ustream\.tv/recorded/(?P<videoID>\d+)'
  10. IE_NAME = u'ustream'
  11. _TEST = {
  12. u'url': u'http://www.ustream.tv/recorded/20274954',
  13. u'file': u'20274954.flv',
  14. u'md5': u'088f151799e8f572f84eb62f17d73e5c',
  15. u'info_dict': {
  16. u"uploader": u"Young Americans for Liberty",
  17. u"title": u"Young Americans for Liberty February 7, 2012 2:28 AM"
  18. }
  19. }
  20. def _real_extract(self, url):
  21. m = re.match(self._VALID_URL, url)
  22. video_id = m.group('videoID')
  23. video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
  24. webpage = self._download_webpage(url, video_id)
  25. self.report_extraction(video_id)
  26. video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
  27. webpage, u'title')
  28. uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
  29. webpage, u'uploader', fatal=False, flags=re.DOTALL)
  30. thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
  31. webpage, u'thumbnail', fatal=False)
  32. info = {
  33. 'id': video_id,
  34. 'url': video_url,
  35. 'ext': 'flv',
  36. 'title': video_title,
  37. 'uploader': uploader,
  38. 'thumbnail': thumbnail,
  39. }
  40. return info
  41. class UstreamChannelIE(InfoExtractor):
  42. _VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)'
  43. IE_NAME = u'ustream:channel'
  44. def _real_extract(self, url):
  45. m = re.match(self._VALID_URL, url)
  46. slug = m.group('slug')
  47. webpage = self._download_webpage(url, slug)
  48. channel_id = get_meta_content('ustream:channel_id', webpage)
  49. BASE = 'http://www.ustream.tv'
  50. next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
  51. video_ids = []
  52. while next_url:
  53. reply = json.loads(self._download_webpage(compat_urlparse.urljoin(BASE, next_url), channel_id))
  54. video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
  55. next_url = reply['nextUrl']
  56. urls = ['http://www.ustream.tv/recorded/' + vid for vid in video_ids]
  57. url_entries = [self.url_result(eurl, 'Ustream') for eurl in urls]
  58. return self.playlist_result(url_entries, channel_id)