You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

140 lines
4.4 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. from .common import InfoExtractor
  4. import json
  5. import random
  6. import re
  7. from ..compat import (
  8. compat_parse_qs,
  9. compat_str,
  10. )
  11. from ..utils import (
  12. js_to_json,
  13. strip_jsonp,
  14. urlencode_postdata,
  15. )
  16. class WeiboIE(InfoExtractor):
  17. _VALID_URL = r'https?://(?:www\.)?weibo\.com/[0-9]+/(?P<id>[a-zA-Z0-9]+)'
  18. _TEST = {
  19. 'url': 'https://weibo.com/6275294458/Fp6RGfbff?type=comment',
  20. 'info_dict': {
  21. 'id': 'Fp6RGfbff',
  22. 'ext': 'mp4',
  23. 'title': 'You should have servants to massage you,... 来自Hosico_猫 - 微博',
  24. }
  25. }
  26. def _real_extract(self, url):
  27. video_id = self._match_id(url)
  28. # to get Referer url for genvisitor
  29. webpage, urlh = self._download_webpage_handle(url, video_id)
  30. visitor_url = urlh.geturl()
  31. if 'passport.weibo.com' in visitor_url:
  32. # first visit
  33. visitor_data = self._download_json(
  34. 'https://passport.weibo.com/visitor/genvisitor', video_id,
  35. note='Generating first-visit data',
  36. transform_source=strip_jsonp,
  37. headers={'Referer': visitor_url},
  38. data=urlencode_postdata({
  39. 'cb': 'gen_callback',
  40. 'fp': json.dumps({
  41. 'os': '2',
  42. 'browser': 'Gecko57,0,0,0',
  43. 'fonts': 'undefined',
  44. 'screenInfo': '1440*900*24',
  45. 'plugins': '',
  46. }),
  47. }))
  48. tid = visitor_data['data']['tid']
  49. cnfd = '%03d' % visitor_data['data']['confidence']
  50. self._download_webpage(
  51. 'https://passport.weibo.com/visitor/visitor', video_id,
  52. note='Running first-visit callback',
  53. query={
  54. 'a': 'incarnate',
  55. 't': tid,
  56. 'w': 2,
  57. 'c': cnfd,
  58. 'cb': 'cross_domain',
  59. 'from': 'weibo',
  60. '_rand': random.random(),
  61. })
  62. webpage = self._download_webpage(
  63. url, video_id, note='Revisiting webpage')
  64. title = self._html_search_regex(
  65. r'<title>(.+?)</title>', webpage, 'title')
  66. video_formats = compat_parse_qs(self._search_regex(
  67. r'video-sources=\\\"(.+?)\"', webpage, 'video_sources'))
  68. formats = []
  69. supported_resolutions = (480, 720)
  70. for res in supported_resolutions:
  71. vid_urls = video_formats.get(compat_str(res))
  72. if not vid_urls or not isinstance(vid_urls, list):
  73. continue
  74. vid_url = vid_urls[0]
  75. formats.append({
  76. 'url': vid_url,
  77. 'height': res,
  78. })
  79. self._sort_formats(formats)
  80. uploader = self._og_search_property(
  81. 'nick-name', webpage, 'uploader', default=None)
  82. return {
  83. 'id': video_id,
  84. 'title': title,
  85. 'uploader': uploader,
  86. 'formats': formats
  87. }
  88. class WeiboMobileIE(InfoExtractor):
  89. _VALID_URL = r'https?://m\.weibo\.cn/status/(?P<id>[0-9]+)(\?.+)?'
  90. _TEST = {
  91. 'url': 'https://m.weibo.cn/status/4189191225395228?wm=3333_2001&sourcetype=weixin&featurecode=newtitle&from=singlemessage&isappinstalled=0',
  92. 'info_dict': {
  93. 'id': '4189191225395228',
  94. 'ext': 'mp4',
  95. 'title': '午睡当然是要甜甜蜜蜜的啦',
  96. 'uploader': '柴犬柴犬'
  97. }
  98. }
  99. def _real_extract(self, url):
  100. video_id = self._match_id(url)
  101. # to get Referer url for genvisitor
  102. webpage = self._download_webpage(url, video_id, note='visit the page')
  103. weibo_info = self._parse_json(self._search_regex(
  104. r'var\s+\$render_data\s*=\s*\[({.*})\]\[0\]\s*\|\|\s*{};',
  105. webpage, 'js_code', flags=re.DOTALL),
  106. video_id, transform_source=js_to_json)
  107. status_data = weibo_info.get('status', {})
  108. page_info = status_data.get('page_info')
  109. title = status_data['status_title']
  110. uploader = status_data.get('user', {}).get('screen_name')
  111. return {
  112. 'id': video_id,
  113. 'title': title,
  114. 'uploader': uploader,
  115. 'url': page_info['media_info']['stream_url']
  116. }