You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

97 lines
3.3 KiB

  1. # coding: utf-8
  2. import datetime
  3. import re
  4. from .common import InfoExtractor
  5. from ..utils import (
  6. ExtractorError,
  7. )
  8. class GooglePlusIE(InfoExtractor):
  9. IE_DESC = u'Google Plus'
  10. _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
  11. IE_NAME = u'plus.google'
  12. _TEST = {
  13. u"url": u"https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
  14. u"file": u"ZButuJc6CtH.flv",
  15. u"info_dict": {
  16. u"upload_date": u"20120613",
  17. u"uploader": u"井上ヨシマサ",
  18. u"title": u"嘆きの天使 降臨"
  19. }
  20. }
  21. def _real_extract(self, url):
  22. # Extract id from URL
  23. mobj = re.match(self._VALID_URL, url)
  24. if mobj is None:
  25. raise ExtractorError(u'Invalid URL: %s' % url)
  26. post_url = mobj.group(0)
  27. video_id = mobj.group(1)
  28. video_extension = 'flv'
  29. # Step 1, Retrieve post webpage to extract further information
  30. webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
  31. self.report_extraction(video_id)
  32. # Extract update date
  33. upload_date = self._html_search_regex(
  34. ['title="Timestamp">(.*?)</a>', r'<a.+?class="g-M.+?>(.+?)</a>'],
  35. webpage, u'upload date', fatal=False)
  36. if upload_date:
  37. # Convert timestring to a format suitable for filename
  38. upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
  39. upload_date = upload_date.strftime('%Y%m%d')
  40. # Extract uploader
  41. uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
  42. webpage, u'uploader', fatal=False)
  43. # Extract title
  44. # Get the first line for title
  45. video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
  46. webpage, 'title', default=u'NA')
  47. # Step 2, Simulate clicking the image box to launch video
  48. DOMAIN = 'https://plus.google.com/'
  49. video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
  50. webpage, u'video page URL')
  51. if not video_page.startswith(DOMAIN):
  52. video_page = DOMAIN + video_page
  53. webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
  54. # Extract video links on video page
  55. """Extract video links of all sizes"""
  56. pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
  57. mobj = re.findall(pattern, webpage)
  58. if len(mobj) == 0:
  59. raise ExtractorError(u'Unable to extract video links')
  60. # Sort in resolution
  61. links = sorted(mobj)
  62. # Choose the lowest of the sort, i.e. highest resolution
  63. video_url = links[-1]
  64. # Only get the url. The resolution part in the tuple has no use anymore
  65. video_url = video_url[-1]
  66. # Treat escaped \u0026 style hex
  67. try:
  68. video_url = video_url.decode("unicode_escape")
  69. except AttributeError: # Python 3
  70. video_url = bytes(video_url, 'ascii').decode('unicode-escape')
  71. return [{
  72. 'id': video_id,
  73. 'url': video_url,
  74. 'uploader': uploader,
  75. 'upload_date': upload_date,
  76. 'title': video_title,
  77. 'ext': video_extension,
  78. }]