You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

73 lines
2.4 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import re
  4. from .common import InfoExtractor
  5. from ..utils import (
  6. parse_duration,
  7. parse_filesize,
  8. str_to_int,
  9. )
  10. class SnotrIE(InfoExtractor):
  11. _VALID_URL = r'http?://(?:www\.)?snotr\.com/video/(?P<id>\d+)/([\w]+)'
  12. _TESTS = [{
  13. 'url': 'http://www.snotr.com/video/13708/Drone_flying_through_fireworks',
  14. 'info_dict': {
  15. 'id': '13708',
  16. 'ext': 'mp4',
  17. 'title': 'Drone flying through fireworks!',
  18. 'duration': 248,
  19. 'filesize_approx': 40700000,
  20. 'description': 'A drone flying through Fourth of July Fireworks',
  21. 'thumbnail': r're:^https?://.*\.jpg$',
  22. },
  23. 'expected_warnings': ['description'],
  24. }, {
  25. 'url': 'http://www.snotr.com/video/530/David_Letteman_-_George_W_Bush_Top_10',
  26. 'info_dict': {
  27. 'id': '530',
  28. 'ext': 'mp4',
  29. 'title': 'David Letteman - George W. Bush Top 10',
  30. 'duration': 126,
  31. 'filesize_approx': 8500000,
  32. 'description': 'The top 10 George W. Bush moments, brought to you by David Letterman!',
  33. 'thumbnail': r're:^https?://.*\.jpg$',
  34. }
  35. }]
  36. def _real_extract(self, url):
  37. mobj = re.match(self._VALID_URL, url)
  38. video_id = mobj.group('id')
  39. webpage = self._download_webpage(url, video_id)
  40. title = self._og_search_title(webpage)
  41. description = self._og_search_description(webpage)
  42. info_dict = self._parse_html5_media_entries(
  43. url, webpage, video_id, m3u8_entry_protocol='m3u8_native')[0]
  44. view_count = str_to_int(self._html_search_regex(
  45. r'<p[^>]*>\s*<strong[^>]*>Views:</strong>\s*<span[^>]*>([\d,\.]+)',
  46. webpage, 'view count', fatal=False))
  47. duration = parse_duration(self._html_search_regex(
  48. r'<p[^>]*>\s*<strong[^>]*>Length:</strong>\s*<span[^>]*>([\d:]+)',
  49. webpage, 'duration', fatal=False))
  50. filesize_approx = parse_filesize(self._html_search_regex(
  51. r'<p[^>]*>\s*<strong[^>]*>Filesize:</strong>\s*<span[^>]*>([^<]+)',
  52. webpage, 'filesize', fatal=False))
  53. info_dict.update({
  54. 'id': video_id,
  55. 'description': description,
  56. 'title': title,
  57. 'view_count': view_count,
  58. 'duration': duration,
  59. 'filesize_approx': filesize_approx,
  60. })
  61. return info_dict