You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

97 lines
3.5 KiB

10 years ago
10 years ago
10 years ago
10 years ago
  1. from __future__ import unicode_literals
  2. import re
  3. from .common import InfoExtractor
  4. from ..utils import (
  5. int_or_none,
  6. js_to_json,
  7. mimetype2ext,
  8. ExtractorError,
  9. )
  10. class ImgurIE(InfoExtractor):
  11. _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?P<id>[a-zA-Z0-9]+)(?:\.mp4|\.gifv)?'
  12. _TESTS = [{
  13. 'url': 'https://i.imgur.com/A61SaA1.gifv',
  14. 'info_dict': {
  15. 'id': 'A61SaA1',
  16. 'ext': 'mp4',
  17. 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
  18. 'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
  19. },
  20. }, {
  21. 'url': 'https://imgur.com/A61SaA1',
  22. 'info_dict': {
  23. 'id': 'A61SaA1',
  24. 'ext': 'mp4',
  25. 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
  26. 'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
  27. },
  28. }]
  29. def _real_extract(self, url):
  30. video_id = self._match_id(url)
  31. webpage = self._download_webpage(url, video_id)
  32. width = int_or_none(self._search_regex(
  33. r'<param name="width" value="([0-9]+)"',
  34. webpage, 'width', fatal=False))
  35. height = int_or_none(self._search_regex(
  36. r'<param name="height" value="([0-9]+)"',
  37. webpage, 'height', fatal=False))
  38. video_elements = self._search_regex(
  39. r'(?s)<div class="video-elements">(.*?)</div>',
  40. webpage, 'video elements', default=None)
  41. if not video_elements:
  42. raise ExtractorError(
  43. 'No sources found for video %s. Maybe an image?' % video_id,
  44. expected=True)
  45. formats = []
  46. for m in re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements):
  47. formats.append({
  48. 'format_id': m.group('type').partition('/')[2],
  49. 'url': self._proto_relative_url(m.group('src')),
  50. 'ext': mimetype2ext(m.group('type')),
  51. 'acodec': 'none',
  52. 'width': width,
  53. 'height': height,
  54. 'http_headers': {
  55. 'User-Agent': 'youtube-dl (like wget)',
  56. },
  57. })
  58. gif_json = self._search_regex(
  59. r'(?s)var\s+videoItem\s*=\s*(\{.*?\})',
  60. webpage, 'GIF code', fatal=False)
  61. if gif_json:
  62. gifd = self._parse_json(
  63. gif_json, video_id, transform_source=js_to_json)
  64. formats.append({
  65. 'format_id': 'gif',
  66. 'preference': -10,
  67. 'width': width,
  68. 'height': height,
  69. 'ext': 'gif',
  70. 'acodec': 'none',
  71. 'vcodec': 'gif',
  72. 'container': 'gif',
  73. 'url': self._proto_relative_url(gifd['gifUrl']),
  74. 'filesize': gifd.get('size'),
  75. 'http_headers': {
  76. 'User-Agent': 'youtube-dl (like wget)',
  77. },
  78. })
  79. self._sort_formats(formats)
  80. return {
  81. 'id': video_id,
  82. 'formats': formats,
  83. 'description': self._og_search_description(webpage),
  84. 'title': self._og_search_title(webpage),
  85. }