You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

92 lines
3.1 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import datetime
  4. import re
  5. from .common import InfoExtractor
  6. from ..utils import (
  7. ExtractorError,
  8. )
  9. class GooglePlusIE(InfoExtractor):
  10. IE_DESC = 'Google Plus'
  11. _VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
  12. IE_NAME = 'plus.google'
  13. _TEST = {
  14. 'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
  15. 'info_dict': {
  16. 'id': 'ZButuJc6CtH',
  17. 'ext': 'flv',
  18. 'upload_date': '20120613',
  19. 'uploader': '井上ヨシマサ',
  20. 'title': '嘆きの天使 降臨',
  21. }
  22. }
  23. def _real_extract(self, url):
  24. # Extract id from URL
  25. mobj = re.match(self._VALID_URL, url)
  26. video_id = mobj.group('id')
  27. # Step 1, Retrieve post webpage to extract further information
  28. webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
  29. self.report_extraction(video_id)
  30. # Extract update date
  31. upload_date = self._html_search_regex(
  32. r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
  33. ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
  34. webpage, 'upload date', fatal=False, flags=re.VERBOSE)
  35. if upload_date:
  36. # Convert timestring to a format suitable for filename
  37. upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
  38. upload_date = upload_date.strftime('%Y%m%d')
  39. # Extract uploader
  40. uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
  41. webpage, 'uploader', fatal=False)
  42. # Extract title
  43. # Get the first line for title
  44. video_title = self._og_search_description(webpage).splitlines()[0]
  45. # Step 2, Simulate clicking the image box to launch video
  46. DOMAIN = 'https://plus.google.com/'
  47. video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
  48. webpage, 'video page URL')
  49. if not video_page.startswith(DOMAIN):
  50. video_page = DOMAIN + video_page
  51. webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
  52. # Extract video links all sizes
  53. pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
  54. mobj = re.findall(pattern, webpage)
  55. if len(mobj) == 0:
  56. raise ExtractorError('Unable to extract video links')
  57. # Sort in resolution
  58. links = sorted(mobj)
  59. # Choose the lowest of the sort, i.e. highest resolution
  60. video_url = links[-1]
  61. # Only get the url. The resolution part in the tuple has no use anymore
  62. video_url = video_url[-1]
  63. # Treat escaped \u0026 style hex
  64. try:
  65. video_url = video_url.decode("unicode_escape")
  66. except AttributeError: # Python 3
  67. video_url = bytes(video_url, 'ascii').decode('unicode-escape')
  68. return {
  69. 'id': video_id,
  70. 'url': video_url,
  71. 'uploader': uploader,
  72. 'upload_date': upload_date,
  73. 'title': video_title,
  74. 'ext': 'flv',
  75. }