You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

108 lines
3.8 KiB

10 years ago
  1. from __future__ import unicode_literals
  2. import re
  3. from .common import InfoExtractor
  4. from ..compat import (
  5. compat_urllib_request,
  6. )
  7. from ..utils import (
  8. parse_duration,
  9. parse_iso8601,
  10. str_to_int,
  11. )
  12. class FourTubeIE(InfoExtractor):
  13. IE_NAME = '4tube'
  14. _VALID_URL = r'https?://(?:www\.)?4tube\.com/videos/(?P<id>\d+)'
  15. _TEST = {
  16. 'url': 'http://www.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black',
  17. 'md5': '6516c8ac63b03de06bc8eac14362db4f',
  18. 'info_dict': {
  19. 'id': '209733',
  20. 'ext': 'mp4',
  21. 'title': 'Hot Babe Holly Michaels gets her ass stuffed by black',
  22. 'uploader': 'WCP Club',
  23. 'uploader_id': 'wcp-club',
  24. 'upload_date': '20131031',
  25. 'timestamp': 1383263892,
  26. 'duration': 583,
  27. 'view_count': int,
  28. 'like_count': int,
  29. 'categories': list,
  30. 'age_limit': 18,
  31. }
  32. }
  33. def _real_extract(self, url):
  34. video_id = self._match_id(url)
  35. webpage = self._download_webpage(url, video_id)
  36. title = self._html_search_meta('name', webpage)
  37. timestamp = parse_iso8601(self._html_search_meta(
  38. 'uploadDate', webpage))
  39. thumbnail = self._html_search_meta('thumbnailUrl', webpage)
  40. uploader_id = self._html_search_regex(
  41. r'<a class="img-avatar" href="[^"]+/channels/([^/"]+)" title="Go to [^"]+ page">',
  42. webpage, 'uploader id')
  43. uploader = self._html_search_regex(
  44. r'<a class="img-avatar" href="[^"]+/channels/[^/"]+" title="Go to ([^"]+) page">',
  45. webpage, 'uploader')
  46. categories_html = self._search_regex(
  47. r'(?s)><i class="icon icon-tag"></i>\s*Categories / Tags\s*.*?<ul class="list">(.*?)</ul>',
  48. webpage, 'categories', fatal=False)
  49. categories = None
  50. if categories_html:
  51. categories = [
  52. c.strip() for c in re.findall(
  53. r'(?s)<li><a.*?>(.*?)</a>', categories_html)]
  54. view_count = str_to_int(self._search_regex(
  55. r'<meta itemprop="interactionCount" content="UserPlays:([0-9,]+)">',
  56. webpage, 'view count', fatal=False))
  57. like_count = str_to_int(self._search_regex(
  58. r'<meta itemprop="interactionCount" content="UserLikes:([0-9,]+)">',
  59. webpage, 'like count', fatal=False))
  60. duration = parse_duration(self._html_search_meta('duration', webpage))
  61. params_js = self._search_regex(
  62. r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)',
  63. webpage, 'initialization parameters'
  64. )
  65. params = self._parse_json('[%s]' % params_js, video_id)
  66. media_id = params[0]
  67. sources = ['%s' % p for p in params[2]]
  68. token_url = 'http://tkn.4tube.com/{0}/desktop/{1}'.format(
  69. media_id, '+'.join(sources))
  70. headers = {
  71. b'Content-Type': b'application/x-www-form-urlencoded',
  72. b'Origin': b'http://www.4tube.com',
  73. }
  74. token_req = compat_urllib_request.Request(token_url, b'{}', headers)
  75. tokens = self._download_json(token_req, video_id)
  76. formats = [{
  77. 'url': tokens[format]['token'],
  78. 'format_id': format + 'p',
  79. 'resolution': format + 'p',
  80. 'quality': int(format),
  81. } for format in sources]
  82. self._sort_formats(formats)
  83. return {
  84. 'id': video_id,
  85. 'title': title,
  86. 'formats': formats,
  87. 'categories': categories,
  88. 'thumbnail': thumbnail,
  89. 'uploader': uploader,
  90. 'uploader_id': uploader_id,
  91. 'timestamp': timestamp,
  92. 'like_count': like_count,
  93. 'view_count': view_count,
  94. 'duration': duration,
  95. 'age_limit': 18,
  96. }