You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

112 lines
4.1 KiB

10 years ago
  1. from __future__ import unicode_literals
  2. import os
  3. import re
  4. from .common import InfoExtractor
  5. from ..compat import (
  6. compat_urllib_parse,
  7. compat_urllib_parse_urlparse,
  8. compat_urllib_request,
  9. )
  10. from ..utils import (
  11. ExtractorError,
  12. str_to_int,
  13. )
  14. from ..aes import (
  15. aes_decrypt_text
  16. )
  17. class PornHubIE(InfoExtractor):
  18. _VALID_URL = r'https?://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=(?P<id>[0-9a-f]+)'
  19. _TEST = {
  20. 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
  21. 'md5': '882f488fa1f0026f023f33576004a2ed',
  22. 'info_dict': {
  23. 'id': '648719015',
  24. 'ext': 'mp4',
  25. "uploader": "Babes",
  26. "title": "Seductive Indian beauty strips down and fingers her pink pussy",
  27. "age_limit": 18
  28. }
  29. }
  30. def _extract_count(self, pattern, webpage, name):
  31. count = self._html_search_regex(pattern, webpage, '%s count' % name, fatal=False)
  32. if count:
  33. count = str_to_int(count)
  34. return count
  35. def _real_extract(self, url):
  36. video_id = self._match_id(url)
  37. req = compat_urllib_request.Request(url)
  38. req.add_header('Cookie', 'age_verified=1')
  39. webpage = self._download_webpage(req, video_id)
  40. error_msg = self._html_search_regex(
  41. r'(?s)<div class="userMessageSection[^"]*".*?>(.*?)</div>',
  42. webpage, 'error message', default=None)
  43. if error_msg:
  44. error_msg = re.sub(r'\s+', ' ', error_msg)
  45. raise ExtractorError(
  46. 'PornHub said: %s' % error_msg,
  47. expected=True, video_id=video_id)
  48. video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
  49. video_uploader = self._html_search_regex(
  50. r'(?s)From:&nbsp;.+?<(?:a href="/users/|a href="/channels/|<span class="username)[^>]+>(.+?)<',
  51. webpage, 'uploader', fatal=False)
  52. thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
  53. if thumbnail:
  54. thumbnail = compat_urllib_parse.unquote(thumbnail)
  55. view_count = self._extract_count(r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
  56. like_count = self._extract_count(r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
  57. dislike_count = self._extract_count(r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
  58. comment_count = self._extract_count(
  59. r'All comments \(<var class="videoCommentCount">([\d,\.]+)</var>', webpage, 'comment')
  60. video_urls = list(map(compat_urllib_parse.unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
  61. if webpage.find('"encrypted":true') != -1:
  62. password = compat_urllib_parse.unquote_plus(self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
  63. video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
  64. formats = []
  65. for video_url in video_urls:
  66. path = compat_urllib_parse_urlparse(video_url).path
  67. extension = os.path.splitext(path)[1][1:]
  68. format = path.split('/')[5].split('_')[:2]
  69. format = "-".join(format)
  70. m = re.match(r'^(?P<height>[0-9]+)P-(?P<tbr>[0-9]+)K$', format)
  71. if m is None:
  72. height = None
  73. tbr = None
  74. else:
  75. height = int(m.group('height'))
  76. tbr = int(m.group('tbr'))
  77. formats.append({
  78. 'url': video_url,
  79. 'ext': extension,
  80. 'format': format,
  81. 'format_id': format,
  82. 'tbr': tbr,
  83. 'height': height,
  84. })
  85. self._sort_formats(formats)
  86. return {
  87. 'id': video_id,
  88. 'uploader': video_uploader,
  89. 'title': video_title,
  90. 'thumbnail': thumbnail,
  91. 'view_count': view_count,
  92. 'like_count': like_count,
  93. 'dislike_count': dislike_count,
  94. 'comment_count': comment_count,
  95. 'formats': formats,
  96. 'age_limit': 18,
  97. }