You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

319 lines
11 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import itertools
  4. # import os
  5. import re
  6. from .common import InfoExtractor
  7. from ..compat import (
  8. compat_HTTPError,
  9. # compat_urllib_parse_unquote,
  10. # compat_urllib_parse_unquote_plus,
  11. # compat_urllib_parse_urlparse,
  12. )
  13. from ..utils import (
  14. ExtractorError,
  15. int_or_none,
  16. js_to_json,
  17. orderedSet,
  18. # sanitized_Request,
  19. str_to_int,
  20. )
  21. # from ..aes import (
  22. # aes_decrypt_text
  23. # )
  24. class PornHubIE(InfoExtractor):
  25. IE_DESC = 'PornHub and Thumbzilla'
  26. _VALID_URL = r'''(?x)
  27. https?://
  28. (?:
  29. (?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)|
  30. (?:www\.)?thumbzilla\.com/video/
  31. )
  32. (?P<id>[\da-z]+)
  33. '''
  34. _TESTS = [{
  35. 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
  36. 'md5': '1e19b41231a02eba417839222ac9d58e',
  37. 'info_dict': {
  38. 'id': '648719015',
  39. 'ext': 'mp4',
  40. 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
  41. 'uploader': 'Babes',
  42. 'duration': 361,
  43. 'view_count': int,
  44. 'like_count': int,
  45. 'dislike_count': int,
  46. 'comment_count': int,
  47. 'age_limit': 18,
  48. 'tags': list,
  49. 'categories': list,
  50. },
  51. }, {
  52. # non-ASCII title
  53. 'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
  54. 'info_dict': {
  55. 'id': '1331683002',
  56. 'ext': 'mp4',
  57. 'title': '重庆婷婷女王足交',
  58. 'uploader': 'cj397186295',
  59. 'duration': 1753,
  60. 'view_count': int,
  61. 'like_count': int,
  62. 'dislike_count': int,
  63. 'comment_count': int,
  64. 'age_limit': 18,
  65. 'tags': list,
  66. 'categories': list,
  67. },
  68. 'params': {
  69. 'skip_download': True,
  70. },
  71. }, {
  72. 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
  73. 'only_matching': True,
  74. }, {
  75. # removed at the request of cam4.com
  76. 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
  77. 'only_matching': True,
  78. }, {
  79. # removed at the request of the copyright owner
  80. 'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
  81. 'only_matching': True,
  82. }, {
  83. # removed by uploader
  84. 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
  85. 'only_matching': True,
  86. }, {
  87. # private video
  88. 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
  89. 'only_matching': True,
  90. }, {
  91. 'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
  92. 'only_matching': True,
  93. }]
  94. @staticmethod
  95. def _extract_urls(webpage):
  96. return re.findall(
  97. r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/[\da-z]+)',
  98. webpage)
  99. def _extract_count(self, pattern, webpage, name):
  100. return str_to_int(self._search_regex(
  101. pattern, webpage, '%s count' % name, fatal=False))
  102. def _real_extract(self, url):
  103. video_id = self._match_id(url)
  104. def dl_webpage(platform):
  105. return self._download_webpage(
  106. 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id,
  107. video_id, headers={
  108. 'Cookie': 'age_verified=1; platform=%s' % platform,
  109. })
  110. webpage = dl_webpage('pc')
  111. error_msg = self._html_search_regex(
  112. r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
  113. webpage, 'error message', default=None, group='error')
  114. if error_msg:
  115. error_msg = re.sub(r'\s+', ' ', error_msg)
  116. raise ExtractorError(
  117. 'PornHub said: %s' % error_msg,
  118. expected=True, video_id=video_id)
  119. tv_webpage = dl_webpage('tv')
  120. video_url = self._search_regex(
  121. r'<video[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//.+?)\1', tv_webpage,
  122. 'video url', group='url')
  123. title = self._search_regex(
  124. r'<h1>([^>]+)</h1>', tv_webpage, 'title', default=None)
  125. # video_title from flashvars contains whitespace instead of non-ASCII (see
  126. # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
  127. # on that anymore.
  128. title = title or self._html_search_meta(
  129. 'twitter:title', webpage, default=None) or self._search_regex(
  130. (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
  131. r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
  132. r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
  133. webpage, 'title', group='title')
  134. flashvars = self._parse_json(
  135. self._search_regex(
  136. r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
  137. video_id)
  138. if flashvars:
  139. thumbnail = flashvars.get('image_url')
  140. duration = int_or_none(flashvars.get('video_duration'))
  141. else:
  142. title, thumbnail, duration = [None] * 3
  143. video_uploader = self._html_search_regex(
  144. r'(?s)From:&nbsp;.+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<',
  145. webpage, 'uploader', fatal=False)
  146. view_count = self._extract_count(
  147. r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
  148. like_count = self._extract_count(
  149. r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
  150. dislike_count = self._extract_count(
  151. r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
  152. comment_count = self._extract_count(
  153. r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
  154. """
  155. video_variables = {}
  156. for video_variablename, quote, video_variable in re.findall(
  157. r'(player_quality_[0-9]{3,4}p\w+)\s*=\s*(["\'])(.+?)\2;', webpage):
  158. video_variables[video_variablename] = video_variable
  159. video_urls = []
  160. for encoded_video_url in re.findall(
  161. r'player_quality_[0-9]{3,4}p\s*=(.+?);', webpage):
  162. for varname, varval in video_variables.items():
  163. encoded_video_url = encoded_video_url.replace(varname, varval)
  164. video_urls.append(re.sub(r'[\s+]', '', encoded_video_url))
  165. if webpage.find('"encrypted":true') != -1:
  166. password = compat_urllib_parse_unquote_plus(
  167. self._search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
  168. video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
  169. formats = []
  170. for video_url in video_urls:
  171. path = compat_urllib_parse_urlparse(video_url).path
  172. extension = os.path.splitext(path)[1][1:]
  173. format = path.split('/')[5].split('_')[:2]
  174. format = '-'.join(format)
  175. m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
  176. if m is None:
  177. height = None
  178. tbr = None
  179. else:
  180. height = int(m.group('height'))
  181. tbr = int(m.group('tbr'))
  182. formats.append({
  183. 'url': video_url,
  184. 'ext': extension,
  185. 'format': format,
  186. 'format_id': format,
  187. 'tbr': tbr,
  188. 'height': height,
  189. })
  190. self._sort_formats(formats)
  191. """
  192. page_params = self._parse_json(self._search_regex(
  193. r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})',
  194. webpage, 'page parameters', group='data', default='{}'),
  195. video_id, transform_source=js_to_json, fatal=False)
  196. tags = categories = None
  197. if page_params:
  198. tags = page_params.get('tags', '').split(',')
  199. categories = page_params.get('categories', '').split(',')
  200. return {
  201. 'id': video_id,
  202. 'url': video_url,
  203. 'uploader': video_uploader,
  204. 'title': title,
  205. 'thumbnail': thumbnail,
  206. 'duration': duration,
  207. 'view_count': view_count,
  208. 'like_count': like_count,
  209. 'dislike_count': dislike_count,
  210. 'comment_count': comment_count,
  211. # 'formats': formats,
  212. 'age_limit': 18,
  213. 'tags': tags,
  214. 'categories': categories,
  215. }
  216. class PornHubPlaylistBaseIE(InfoExtractor):
  217. def _extract_entries(self, webpage):
  218. return [
  219. self.url_result(
  220. 'http://www.pornhub.com/%s' % video_url,
  221. PornHubIE.ie_key(), video_title=title)
  222. for video_url, title in orderedSet(re.findall(
  223. r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
  224. webpage))
  225. ]
  226. def _real_extract(self, url):
  227. playlist_id = self._match_id(url)
  228. webpage = self._download_webpage(url, playlist_id)
  229. # Only process container div with main playlist content skipping
  230. # drop-down menu that uses similar pattern for videos (see
  231. # https://github.com/rg3/youtube-dl/issues/11594).
  232. container = self._search_regex(
  233. r'(?s)(<div[^>]+class=["\']container.+)', webpage,
  234. 'container', default=webpage)
  235. entries = self._extract_entries(container)
  236. playlist = self._parse_json(
  237. self._search_regex(
  238. r'playlistObject\s*=\s*({.+?});', webpage, 'playlist'),
  239. playlist_id)
  240. return self.playlist_result(
  241. entries, playlist_id, playlist.get('title'), playlist.get('description'))
  242. class PornHubPlaylistIE(PornHubPlaylistBaseIE):
  243. _VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
  244. _TESTS = [{
  245. 'url': 'http://www.pornhub.com/playlist/4667351',
  246. 'info_dict': {
  247. 'id': '4667351',
  248. 'title': 'Nataly Hot',
  249. },
  250. 'playlist_mincount': 2,
  251. }]
  252. class PornHubUserVideosIE(PornHubPlaylistBaseIE):
  253. _VALID_URL = r'https?://(?:www\.)?pornhub\.com/users/(?P<id>[^/]+)/videos'
  254. _TESTS = [{
  255. 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
  256. 'info_dict': {
  257. 'id': 'zoe_ph',
  258. },
  259. 'playlist_mincount': 171,
  260. }, {
  261. 'url': 'http://www.pornhub.com/users/rushandlia/videos',
  262. 'only_matching': True,
  263. }]
  264. def _real_extract(self, url):
  265. user_id = self._match_id(url)
  266. entries = []
  267. for page_num in itertools.count(1):
  268. try:
  269. webpage = self._download_webpage(
  270. url, user_id, 'Downloading page %d' % page_num,
  271. query={'page': page_num})
  272. except ExtractorError as e:
  273. if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
  274. break
  275. page_entries = self._extract_entries(webpage)
  276. if not page_entries:
  277. break
  278. entries.extend(page_entries)
  279. return self.playlist_result(entries, user_id)