You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

97 lines
3.3 KiB

  1. # coding: utf-8
  2. import datetime
  3. import re
  4. from .common import InfoExtractor
  5. from ..utils import (
  6. ExtractorError,
  7. )
  8. class GooglePlusIE(InfoExtractor):
  9. """Information extractor for plus.google.com."""
  10. _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
  11. IE_NAME = u'plus.google'
  12. _TEST = {
  13. u"url": u"https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
  14. u"file": u"ZButuJc6CtH.flv",
  15. u"info_dict": {
  16. u"upload_date": u"20120613",
  17. u"uploader": u"井上ヨシマサ",
  18. u"title": u"嘆きの天使 降臨"
  19. }
  20. }
  21. def _real_extract(self, url):
  22. # Extract id from URL
  23. mobj = re.match(self._VALID_URL, url)
  24. if mobj is None:
  25. raise ExtractorError(u'Invalid URL: %s' % url)
  26. post_url = mobj.group(0)
  27. video_id = mobj.group(1)
  28. video_extension = 'flv'
  29. # Step 1, Retrieve post webpage to extract further information
  30. webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
  31. self.report_extraction(video_id)
  32. # Extract update date
  33. upload_date = self._html_search_regex('title="Timestamp">(.*?)</a>',
  34. webpage, u'upload date', fatal=False)
  35. if upload_date:
  36. # Convert timestring to a format suitable for filename
  37. upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
  38. upload_date = upload_date.strftime('%Y%m%d')
  39. # Extract uploader
  40. uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
  41. webpage, u'uploader', fatal=False)
  42. # Extract title
  43. # Get the first line for title
  44. video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
  45. webpage, 'title', default=u'NA')
  46. # Step 2, Simulate clicking the image box to launch video
  47. DOMAIN = 'https://plus.google.com'
  48. video_page = self._search_regex(r'<a href="((?:%s)?/photos/.*?)"' % re.escape(DOMAIN),
  49. webpage, u'video page URL')
  50. if not video_page.startswith(DOMAIN):
  51. video_page = DOMAIN + video_page
  52. webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
  53. # Extract video links on video page
  54. """Extract video links of all sizes"""
  55. pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
  56. mobj = re.findall(pattern, webpage)
  57. if len(mobj) == 0:
  58. raise ExtractorError(u'Unable to extract video links')
  59. # Sort in resolution
  60. links = sorted(mobj)
  61. # Choose the lowest of the sort, i.e. highest resolution
  62. video_url = links[-1]
  63. # Only get the url. The resolution part in the tuple has no use anymore
  64. video_url = video_url[-1]
  65. # Treat escaped \u0026 style hex
  66. try:
  67. video_url = video_url.decode("unicode_escape")
  68. except AttributeError: # Python 3
  69. video_url = bytes(video_url, 'ascii').decode('unicode-escape')
  70. return [{
  71. 'id': video_id,
  72. 'url': video_url,
  73. 'uploader': uploader,
  74. 'upload_date': upload_date,
  75. 'title': video_title,
  76. 'ext': video_extension,
  77. }]