You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

242 lines
8.6 KiB

11 years ago
  1. from __future__ import unicode_literals
  2. import re
  3. import json
  4. from .common import InfoExtractor
  5. from ..utils import (
  6. compat_str,
  7. compat_urllib_parse_urlparse,
  8. compat_urlparse,
  9. ExtractorError,
  10. find_xpath_attr,
  11. int_or_none,
  12. orderedSet,
  13. xpath_with_ns,
  14. )
  15. class LivestreamIE(InfoExtractor):
  16. IE_NAME = 'livestream'
  17. _VALID_URL = r'http://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>\d+))?/?$'
  18. _TESTS = [{
  19. 'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
  20. 'md5': '53274c76ba7754fb0e8d072716f2292b',
  21. 'info_dict': {
  22. 'id': '4719370',
  23. 'ext': 'mp4',
  24. 'title': 'Live from Webster Hall NYC',
  25. 'upload_date': '20121012',
  26. 'like_count': int,
  27. 'view_count': int,
  28. 'thumbnail': 're:^http://.*\.jpg$'
  29. }
  30. }, {
  31. 'url': 'http://new.livestream.com/tedx/cityenglish',
  32. 'info_dict': {
  33. 'title': 'TEDCity2.0 (English)',
  34. },
  35. 'playlist_mincount': 4,
  36. }]
  37. def _parse_smil(self, video_id, smil_url):
  38. formats = []
  39. _SWITCH_XPATH = (
  40. './/{http://www.w3.org/2001/SMIL20/Language}body/'
  41. '{http://www.w3.org/2001/SMIL20/Language}switch')
  42. smil_doc = self._download_xml(
  43. smil_url, video_id,
  44. note='Downloading SMIL information',
  45. errnote='Unable to download SMIL information',
  46. fatal=False)
  47. if smil_doc is False: # Download failed
  48. return formats
  49. title_node = find_xpath_attr(
  50. smil_doc, './/{http://www.w3.org/2001/SMIL20/Language}meta',
  51. 'name', 'title')
  52. if title_node is None:
  53. self.report_warning('Cannot find SMIL id')
  54. switch_node = smil_doc.find(_SWITCH_XPATH)
  55. else:
  56. title_id = title_node.attrib['content']
  57. switch_node = find_xpath_attr(
  58. smil_doc, _SWITCH_XPATH, 'id', title_id)
  59. if switch_node is None:
  60. raise ExtractorError('Cannot find switch node')
  61. video_nodes = switch_node.findall(
  62. '{http://www.w3.org/2001/SMIL20/Language}video')
  63. for vn in video_nodes:
  64. tbr = int_or_none(vn.attrib.get('system-bitrate'))
  65. furl = (
  66. 'http://livestream-f.akamaihd.net/%s?v=3.0.3&fp=WIN%%2014,0,0,145' %
  67. (vn.attrib['src']))
  68. if 'clipBegin' in vn.attrib:
  69. furl += '&ssek=' + vn.attrib['clipBegin']
  70. formats.append({
  71. 'url': furl,
  72. 'format_id': 'smil_%d' % tbr,
  73. 'ext': 'flv',
  74. 'tbr': tbr,
  75. 'preference': -1000,
  76. })
  77. return formats
  78. def _extract_video_info(self, video_data):
  79. video_id = compat_str(video_data['id'])
  80. FORMAT_KEYS = (
  81. ('sd', 'progressive_url'),
  82. ('hd', 'progressive_url_hd'),
  83. )
  84. formats = [{
  85. 'format_id': format_id,
  86. 'url': video_data[key],
  87. 'quality': i + 1,
  88. } for i, (format_id, key) in enumerate(FORMAT_KEYS)
  89. if video_data.get(key)]
  90. smil_url = video_data.get('smil_url')
  91. if smil_url:
  92. formats.extend(self._parse_smil(video_id, smil_url))
  93. self._sort_formats(formats)
  94. return {
  95. 'id': video_id,
  96. 'formats': formats,
  97. 'title': video_data['caption'],
  98. 'thumbnail': video_data.get('thumbnail_url'),
  99. 'upload_date': video_data['updated_at'].replace('-', '')[:8],
  100. 'like_count': video_data.get('likes', {}).get('total'),
  101. 'view_count': video_data.get('views'),
  102. }
  103. def _real_extract(self, url):
  104. mobj = re.match(self._VALID_URL, url)
  105. video_id = mobj.group('id')
  106. event_name = mobj.group('event_name')
  107. webpage = self._download_webpage(url, video_id or event_name)
  108. og_video = self._og_search_video_url(
  109. webpage, 'player url', fatal=False, default=None)
  110. if og_video is not None:
  111. query_str = compat_urllib_parse_urlparse(og_video).query
  112. query = compat_urlparse.parse_qs(query_str)
  113. if 'play_url' in query:
  114. api_url = query['play_url'][0].replace('.smil', '')
  115. info = json.loads(self._download_webpage(
  116. api_url, video_id, 'Downloading video info'))
  117. return self._extract_video_info(info)
  118. config_json = self._search_regex(
  119. r'window.config = ({.*?});', webpage, 'window config')
  120. info = json.loads(config_json)['event']
  121. def is_relevant(vdata, vid):
  122. result = vdata['type'] == 'video'
  123. if video_id is not None:
  124. result = result and compat_str(vdata['data']['id']) == vid
  125. return result
  126. videos = [self._extract_video_info(video_data['data'])
  127. for video_data in info['feed']['data']
  128. if is_relevant(video_data, video_id)]
  129. if video_id is None:
  130. # This is an event page:
  131. return self.playlist_result(videos, info['id'], info['full_name'])
  132. else:
  133. if not videos:
  134. raise ExtractorError('Cannot find video %s' % video_id)
  135. return videos[0]
  136. # The original version of Livestream uses a different system
  137. class LivestreamOriginalIE(InfoExtractor):
  138. IE_NAME = 'livestream:original'
  139. _VALID_URL = r'''(?x)https?://www\.livestream\.com/
  140. (?P<user>[^/]+)/(?P<type>video|folder)
  141. (?:\?.*?Id=|/)(?P<id>.*?)(&|$)
  142. '''
  143. _TESTS = [{
  144. 'url': 'http://www.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
  145. 'info_dict': {
  146. 'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
  147. 'ext': 'flv',
  148. 'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital',
  149. },
  150. 'params': {
  151. # rtmp
  152. 'skip_download': True,
  153. },
  154. }, {
  155. 'url': 'https://www.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3',
  156. 'info_dict': {
  157. 'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3',
  158. },
  159. 'playlist_mincount': 4,
  160. }]
  161. def _extract_video(self, user, video_id):
  162. api_url = 'http://x{0}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={1}'.format(user, video_id)
  163. info = self._download_xml(api_url, video_id)
  164. item = info.find('channel').find('item')
  165. ns = {'media': 'http://search.yahoo.com/mrss'}
  166. thumbnail_url = item.find(xpath_with_ns('media:thumbnail', ns)).attrib['url']
  167. # Remove the extension and number from the path (like 1.jpg)
  168. path = self._search_regex(r'(user-files/.+)_.*?\.jpg$', thumbnail_url, 'path')
  169. return {
  170. 'id': video_id,
  171. 'title': item.find('title').text,
  172. 'url': 'rtmp://extondemand.livestream.com/ondemand',
  173. 'play_path': 'trans/dv15/mogulus-{0}'.format(path),
  174. 'player_url': 'http://static.livestream.com/chromelessPlayer/v21/playerapi.swf?hash=5uetk&v=0803&classid=D27CDB6E-AE6D-11cf-96B8-444553540000&jsEnabled=false&wmode=opaque',
  175. 'ext': 'flv',
  176. 'thumbnail': thumbnail_url,
  177. }
  178. def _extract_folder(self, url, folder_id):
  179. webpage = self._download_webpage(url, folder_id)
  180. paths = orderedSet(re.findall(
  181. r'''(?x)(?:
  182. <li\s+class="folder">\s*<a\s+href="|
  183. <a\s+href="(?=https?://livestre\.am/)
  184. )([^"]+)"''', webpage))
  185. return {
  186. '_type': 'playlist',
  187. 'id': folder_id,
  188. 'entries': [{
  189. '_type': 'url',
  190. 'url': compat_urlparse.urljoin(url, p),
  191. } for p in paths],
  192. }
  193. def _real_extract(self, url):
  194. mobj = re.match(self._VALID_URL, url)
  195. id = mobj.group('id')
  196. user = mobj.group('user')
  197. url_type = mobj.group('type')
  198. if url_type == 'folder':
  199. return self._extract_folder(url, id)
  200. else:
  201. return self._extract_video(user, id)
  202. # The server doesn't support HEAD request, the generic extractor can't detect
  203. # the redirection
  204. class LivestreamShortenerIE(InfoExtractor):
  205. IE_NAME = 'livestream:shortener'
  206. IE_DESC = False # Do not list
  207. _VALID_URL = r'https?://livestre\.am/(?P<id>.+)'
  208. def _real_extract(self, url):
  209. mobj = re.match(self._VALID_URL, url)
  210. id = mobj.group('id')
  211. webpage = self._download_webpage(url, id)
  212. return {
  213. '_type': 'url',
  214. 'url': self._og_search_url(webpage),
  215. }