You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

172 lines
6.4 KiB

11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
  1. from __future__ import unicode_literals
  2. import itertools
  3. import json
  4. import re
  5. from .common import InfoExtractor, SearchInfoExtractor
  6. from ..utils import (
  7. compat_urllib_parse,
  8. compat_urlparse,
  9. clean_html,
  10. int_or_none,
  11. )
  12. class YahooIE(InfoExtractor):
  13. IE_DESC = 'Yahoo screen and movies'
  14. _VALID_URL = r'https?://(?:screen|movies)\.yahoo\.com/.*?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html'
  15. _TESTS = [
  16. {
  17. 'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
  18. 'md5': '4962b075c08be8690a922ee026d05e69',
  19. 'info_dict': {
  20. 'id': '214727115',
  21. 'ext': 'mp4',
  22. 'title': 'Julian Smith & Travis Legg Watch Julian Smith',
  23. 'description': 'Julian and Travis watch Julian Smith',
  24. },
  25. },
  26. {
  27. 'url': 'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html',
  28. 'md5': 'd6e6fc6e1313c608f316ddad7b82b306',
  29. 'info_dict': {
  30. 'id': '103000935',
  31. 'ext': 'mp4',
  32. 'title': 'Codefellas - The Cougar Lies with Spanish Moss',
  33. 'description': 'Agent Topple\'s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about?',
  34. },
  35. },
  36. {
  37. 'url': 'https://movies.yahoo.com/video/world-loves-spider-man-190819223.html',
  38. 'md5': '410b7104aa9893b765bc22787a22f3d9',
  39. 'info_dict': {
  40. 'id': '516ed8e2-2c4f-339f-a211-7a8b49d30845',
  41. 'ext': 'mp4',
  42. 'title': 'The World Loves Spider-Man',
  43. 'description': '''People all over the world are celebrating the release of \"The Amazing Spider-Man 2.\" We're taking a look at the enthusiastic response Spider-Man has received from viewers all over the world.''',
  44. }
  45. }
  46. ]
  47. def _real_extract(self, url):
  48. mobj = re.match(self._VALID_URL, url)
  49. video_id = mobj.group('id')
  50. webpage = self._download_webpage(url, video_id)
  51. items_json = self._search_regex(
  52. r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
  53. default=None)
  54. if items_json is None:
  55. long_id = self._search_regex(
  56. r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
  57. webpage, 'content ID')
  58. video_id = long_id
  59. else:
  60. items = json.loads(items_json)
  61. info = items['mediaItems']['query']['results']['mediaObj'][0]
  62. # The 'meta' field is not always in the video webpage, we request it
  63. # from another page
  64. long_id = info['id']
  65. return self._get_info(long_id, video_id)
  66. def _get_info(self, long_id, video_id):
  67. query = ('SELECT * FROM yahoo.media.video.streams WHERE id="%s"'
  68. ' AND plrs="86Gj0vCaSzV_Iuf6hNylf2" AND region="US"'
  69. ' AND protocol="http"' % long_id)
  70. data = compat_urllib_parse.urlencode({
  71. 'q': query,
  72. 'env': 'prod',
  73. 'format': 'json',
  74. })
  75. query_result = self._download_json(
  76. 'http://video.query.yahoo.com/v1/public/yql?' + data,
  77. video_id, 'Downloading video info')
  78. info = query_result['query']['results']['mediaObj'][0]
  79. meta = info['meta']
  80. formats = []
  81. for s in info['streams']:
  82. format_info = {
  83. 'width': int_or_none(s.get('width')),
  84. 'height': int_or_none(s.get('height')),
  85. 'tbr': int_or_none(s.get('bitrate')),
  86. }
  87. host = s['host']
  88. path = s['path']
  89. if host.startswith('rtmp'):
  90. format_info.update({
  91. 'url': host,
  92. 'play_path': path,
  93. 'ext': 'flv',
  94. })
  95. else:
  96. format_url = compat_urlparse.urljoin(host, path)
  97. format_info['url'] = format_url
  98. formats.append(format_info)
  99. self._sort_formats(formats)
  100. return {
  101. 'id': video_id,
  102. 'title': meta['title'],
  103. 'formats': formats,
  104. 'description': clean_html(meta['description']),
  105. 'thumbnail': meta['thumbnail'],
  106. }
  107. class YahooNewsIE(YahooIE):
  108. IE_NAME = 'yahoo:news'
  109. _VALID_URL = r'http://news\.yahoo\.com/video/.*?-(?P<id>\d*?)\.html'
  110. _TESTS = [{
  111. 'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
  112. 'md5': '67010fdf3a08d290e060a4dd96baa07b',
  113. 'info_dict': {
  114. 'id': '104538833',
  115. 'ext': 'mp4',
  116. 'title': 'China Moses Is Crazy About the Blues',
  117. 'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
  118. },
  119. }]
  120. def _real_extract(self, url):
  121. mobj = re.match(self._VALID_URL, url)
  122. video_id = mobj.group('id')
  123. webpage = self._download_webpage(url, video_id)
  124. long_id = self._search_regex(r'contentId: \'(.+?)\',', webpage, 'long id')
  125. return self._get_info(long_id, video_id)
  126. class YahooSearchIE(SearchInfoExtractor):
  127. IE_DESC = 'Yahoo screen search'
  128. _MAX_RESULTS = 1000
  129. IE_NAME = 'screen.yahoo:search'
  130. _SEARCH_KEY = 'yvsearch'
  131. def _get_n_results(self, query, n):
  132. """Get a specified number of results for a query"""
  133. entries = []
  134. for pagenum in itertools.count(0):
  135. result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
  136. info = self._download_json(result_url, query,
  137. note='Downloading results page '+str(pagenum+1))
  138. m = info['m']
  139. results = info['results']
  140. for (i, r) in enumerate(results):
  141. if (pagenum * 30) + i >= n:
  142. break
  143. mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
  144. e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
  145. entries.append(e)
  146. if (pagenum * 30 + i >= n) or (m['last'] >= (m['total'] - 1)):
  147. break
  148. return {
  149. '_type': 'playlist',
  150. 'id': query,
  151. 'entries': entries,
  152. }