You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

129 lines
4.1 KiB

  1. from __future__ import unicode_literals
  2. import re
  3. from .common import InfoExtractor
  4. from ..utils import (
  5. ExtractorError,
  6. int_or_none,
  7. float_or_none,
  8. )
  9. class RedditIE(InfoExtractor):
  10. _VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)'
  11. _TEST = {
  12. # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
  13. 'url': 'https://v.redd.it/zv89llsvexdz',
  14. 'md5': '0a070c53eba7ec4534d95a5a1259e253',
  15. 'info_dict': {
  16. 'id': 'zv89llsvexdz',
  17. 'ext': 'mp4',
  18. 'title': 'zv89llsvexdz',
  19. },
  20. 'params': {
  21. 'format': 'bestvideo',
  22. },
  23. }
  24. def _real_extract(self, url):
  25. video_id = self._match_id(url)
  26. formats = self._extract_m3u8_formats(
  27. 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id,
  28. 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
  29. formats.extend(self._extract_mpd_formats(
  30. 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id,
  31. mpd_id='dash', fatal=False))
  32. self._sort_formats(formats)
  33. return {
  34. 'id': video_id,
  35. 'title': video_id,
  36. 'formats': formats,
  37. }
  38. class RedditRIE(InfoExtractor):
  39. _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'
  40. _TESTS = [{
  41. 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
  42. 'info_dict': {
  43. 'id': 'zv89llsvexdz',
  44. 'ext': 'mp4',
  45. 'title': 'That small heart attack.',
  46. 'thumbnail': r're:^https?://.*\.jpg$',
  47. 'timestamp': 1501941939,
  48. 'upload_date': '20170805',
  49. 'uploader': 'Antw87',
  50. 'like_count': int,
  51. 'dislike_count': int,
  52. 'comment_count': int,
  53. 'age_limit': 0,
  54. },
  55. 'params': {
  56. 'format': 'bestvideo',
  57. 'skip_download': True,
  58. },
  59. }, {
  60. 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
  61. 'only_matching': True,
  62. }, {
  63. # imgur
  64. 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
  65. 'only_matching': True,
  66. }, {
  67. # imgur @ old reddit
  68. 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
  69. 'only_matching': True,
  70. }, {
  71. # streamable
  72. 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
  73. 'only_matching': True,
  74. }, {
  75. # youtube
  76. 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
  77. 'only_matching': True,
  78. }, {
  79. # reddit video @ nm reddit
  80. 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
  81. 'only_matching': True,
  82. }]
  83. def _real_extract(self, url):
  84. mobj = re.match(self._VALID_URL, url)
  85. url, video_id = mobj.group('url', 'id')
  86. video_id = self._match_id(url)
  87. data = self._download_json(
  88. url + '/.json', video_id)[0]['data']['children'][0]['data']
  89. video_url = data['url']
  90. # Avoid recursing into the same reddit URL
  91. if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
  92. raise ExtractorError('No media found', expected=True)
  93. over_18 = data.get('over_18')
  94. if over_18 is True:
  95. age_limit = 18
  96. elif over_18 is False:
  97. age_limit = 0
  98. else:
  99. age_limit = None
  100. return {
  101. '_type': 'url_transparent',
  102. 'url': video_url,
  103. 'title': data.get('title'),
  104. 'thumbnail': data.get('thumbnail'),
  105. 'timestamp': float_or_none(data.get('created_utc')),
  106. 'uploader': data.get('author'),
  107. 'like_count': int_or_none(data.get('ups')),
  108. 'dislike_count': int_or_none(data.get('downs')),
  109. 'comment_count': int_or_none(data.get('num_comments')),
  110. 'age_limit': age_limit,
  111. }