You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

416 lines
15 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import functools
  4. import itertools
  5. import operator
  6. import re
  7. from .common import InfoExtractor
  8. from ..compat import (
  9. compat_HTTPError,
  10. compat_str,
  11. )
  12. from ..utils import (
  13. ExtractorError,
  14. int_or_none,
  15. js_to_json,
  16. orderedSet,
  17. remove_quotes,
  18. str_to_int,
  19. url_or_none,
  20. )
  21. class PornHubIE(InfoExtractor):
  22. IE_DESC = 'PornHub and Thumbzilla'
  23. _VALID_URL = r'''(?x)
  24. https?://
  25. (?:
  26. (?:[^/]+\.)?pornhub\.com/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
  27. (?:www\.)?thumbzilla\.com/video/
  28. )
  29. (?P<id>[\da-z]+)
  30. '''
  31. _TESTS = [{
  32. 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
  33. 'md5': '1e19b41231a02eba417839222ac9d58e',
  34. 'info_dict': {
  35. 'id': '648719015',
  36. 'ext': 'mp4',
  37. 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
  38. 'uploader': 'Babes',
  39. 'upload_date': '20130628',
  40. 'duration': 361,
  41. 'view_count': int,
  42. 'like_count': int,
  43. 'dislike_count': int,
  44. 'comment_count': int,
  45. 'age_limit': 18,
  46. 'tags': list,
  47. 'categories': list,
  48. },
  49. }, {
  50. # non-ASCII title
  51. 'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
  52. 'info_dict': {
  53. 'id': '1331683002',
  54. 'ext': 'mp4',
  55. 'title': '重庆婷婷女王足交',
  56. 'uploader': 'Unknown',
  57. 'upload_date': '20150213',
  58. 'duration': 1753,
  59. 'view_count': int,
  60. 'like_count': int,
  61. 'dislike_count': int,
  62. 'comment_count': int,
  63. 'age_limit': 18,
  64. 'tags': list,
  65. 'categories': list,
  66. },
  67. 'params': {
  68. 'skip_download': True,
  69. },
  70. }, {
  71. # subtitles
  72. 'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5af5fef7c2aa7',
  73. 'info_dict': {
  74. 'id': 'ph5af5fef7c2aa7',
  75. 'ext': 'mp4',
  76. 'title': 'BFFS - Cute Teen Girls Share Cock On the Floor',
  77. 'uploader': 'BFFs',
  78. 'duration': 622,
  79. 'view_count': int,
  80. 'like_count': int,
  81. 'dislike_count': int,
  82. 'comment_count': int,
  83. 'age_limit': 18,
  84. 'tags': list,
  85. 'categories': list,
  86. 'subtitles': {
  87. 'en': [{
  88. "ext": 'srt'
  89. }]
  90. },
  91. },
  92. 'params': {
  93. 'skip_download': True,
  94. },
  95. }, {
  96. 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
  97. 'only_matching': True,
  98. }, {
  99. # removed at the request of cam4.com
  100. 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
  101. 'only_matching': True,
  102. }, {
  103. # removed at the request of the copyright owner
  104. 'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
  105. 'only_matching': True,
  106. }, {
  107. # removed by uploader
  108. 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
  109. 'only_matching': True,
  110. }, {
  111. # private video
  112. 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
  113. 'only_matching': True,
  114. }, {
  115. 'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
  116. 'only_matching': True,
  117. }, {
  118. 'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
  119. 'only_matching': True,
  120. }]
  121. @staticmethod
  122. def _extract_urls(webpage):
  123. return re.findall(
  124. r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/[\da-z]+)',
  125. webpage)
  126. def _extract_count(self, pattern, webpage, name):
  127. return str_to_int(self._search_regex(
  128. pattern, webpage, '%s count' % name, fatal=False))
  129. def _real_extract(self, url):
  130. video_id = self._match_id(url)
  131. self._set_cookie('pornhub.com', 'age_verified', '1')
  132. def dl_webpage(platform):
  133. self._set_cookie('pornhub.com', 'platform', platform)
  134. return self._download_webpage(
  135. 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id,
  136. video_id, 'Downloading %s webpage' % platform)
  137. webpage = dl_webpage('pc')
  138. error_msg = self._html_search_regex(
  139. r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
  140. webpage, 'error message', default=None, group='error')
  141. if error_msg:
  142. error_msg = re.sub(r'\s+', ' ', error_msg)
  143. raise ExtractorError(
  144. 'PornHub said: %s' % error_msg,
  145. expected=True, video_id=video_id)
  146. # video_title from flashvars contains whitespace instead of non-ASCII (see
  147. # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
  148. # on that anymore.
  149. title = self._html_search_meta(
  150. 'twitter:title', webpage, default=None) or self._search_regex(
  151. (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
  152. r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
  153. r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
  154. webpage, 'title', group='title')
  155. video_urls = []
  156. video_urls_set = set()
  157. subtitles = {}
  158. flashvars = self._parse_json(
  159. self._search_regex(
  160. r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
  161. video_id)
  162. if flashvars:
  163. subtitle_url = url_or_none(flashvars.get('closedCaptionsFile'))
  164. if subtitle_url:
  165. subtitles.setdefault('en', []).append({
  166. 'url': subtitle_url,
  167. 'ext': 'srt',
  168. })
  169. thumbnail = flashvars.get('image_url')
  170. duration = int_or_none(flashvars.get('video_duration'))
  171. media_definitions = flashvars.get('mediaDefinitions')
  172. if isinstance(media_definitions, list):
  173. for definition in media_definitions:
  174. if not isinstance(definition, dict):
  175. continue
  176. video_url = definition.get('videoUrl')
  177. if not video_url or not isinstance(video_url, compat_str):
  178. continue
  179. if video_url in video_urls_set:
  180. continue
  181. video_urls_set.add(video_url)
  182. video_urls.append(
  183. (video_url, int_or_none(definition.get('quality'))))
  184. else:
  185. thumbnail, duration = [None] * 2
  186. if not video_urls:
  187. tv_webpage = dl_webpage('tv')
  188. assignments = self._search_regex(
  189. r'(var.+?mediastring.+?)</script>', tv_webpage,
  190. 'encoded url').split(';')
  191. js_vars = {}
  192. def parse_js_value(inp):
  193. inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
  194. if '+' in inp:
  195. inps = inp.split('+')
  196. return functools.reduce(
  197. operator.concat, map(parse_js_value, inps))
  198. inp = inp.strip()
  199. if inp in js_vars:
  200. return js_vars[inp]
  201. return remove_quotes(inp)
  202. for assn in assignments:
  203. assn = assn.strip()
  204. if not assn:
  205. continue
  206. assn = re.sub(r'var\s+', '', assn)
  207. vname, value = assn.split('=', 1)
  208. js_vars[vname] = parse_js_value(value)
  209. video_url = js_vars['mediastring']
  210. if video_url not in video_urls_set:
  211. video_urls.append((video_url, None))
  212. video_urls_set.add(video_url)
  213. for mobj in re.finditer(
  214. r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1',
  215. webpage):
  216. video_url = mobj.group('url')
  217. if video_url not in video_urls_set:
  218. video_urls.append((video_url, None))
  219. video_urls_set.add(video_url)
  220. upload_date = None
  221. formats = []
  222. for video_url, height in video_urls:
  223. if not upload_date:
  224. upload_date = self._search_regex(
  225. r'/(\d{6}/\d{2})/', video_url, 'upload data', default=None)
  226. if upload_date:
  227. upload_date = upload_date.replace('/', '')
  228. tbr = None
  229. mobj = re.search(r'(?P<height>\d+)[pP]?_(?P<tbr>\d+)[kK]', video_url)
  230. if mobj:
  231. if not height:
  232. height = int(mobj.group('height'))
  233. tbr = int(mobj.group('tbr'))
  234. formats.append({
  235. 'url': video_url,
  236. 'format_id': '%dp' % height if height else None,
  237. 'height': height,
  238. 'tbr': tbr,
  239. })
  240. self._sort_formats(formats)
  241. video_uploader = self._html_search_regex(
  242. r'(?s)From:&nbsp;.+?<(?:a\b[^>]+\bhref=["\']/(?:(?:user|channel)s|model|pornstar)/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
  243. webpage, 'uploader', fatal=False)
  244. view_count = self._extract_count(
  245. r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
  246. like_count = self._extract_count(
  247. r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
  248. dislike_count = self._extract_count(
  249. r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
  250. comment_count = self._extract_count(
  251. r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
  252. page_params = self._parse_json(self._search_regex(
  253. r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})',
  254. webpage, 'page parameters', group='data', default='{}'),
  255. video_id, transform_source=js_to_json, fatal=False)
  256. tags = categories = None
  257. if page_params:
  258. tags = page_params.get('tags', '').split(',')
  259. categories = page_params.get('categories', '').split(',')
  260. return {
  261. 'id': video_id,
  262. 'uploader': video_uploader,
  263. 'upload_date': upload_date,
  264. 'title': title,
  265. 'thumbnail': thumbnail,
  266. 'duration': duration,
  267. 'view_count': view_count,
  268. 'like_count': like_count,
  269. 'dislike_count': dislike_count,
  270. 'comment_count': comment_count,
  271. 'formats': formats,
  272. 'age_limit': 18,
  273. 'tags': tags,
  274. 'categories': categories,
  275. 'subtitles': subtitles,
  276. }
  277. class PornHubPlaylistBaseIE(InfoExtractor):
  278. def _extract_entries(self, webpage):
  279. # Only process container div with main playlist content skipping
  280. # drop-down menu that uses similar pattern for videos (see
  281. # https://github.com/rg3/youtube-dl/issues/11594).
  282. container = self._search_regex(
  283. r'(?s)(<div[^>]+class=["\']container.+)', webpage,
  284. 'container', default=webpage)
  285. return [
  286. self.url_result(
  287. 'http://www.pornhub.com/%s' % video_url,
  288. PornHubIE.ie_key(), video_title=title)
  289. for video_url, title in orderedSet(re.findall(
  290. r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
  291. container))
  292. ]
  293. def _real_extract(self, url):
  294. playlist_id = self._match_id(url)
  295. webpage = self._download_webpage(url, playlist_id)
  296. entries = self._extract_entries(webpage)
  297. playlist = self._parse_json(
  298. self._search_regex(
  299. r'(?:playlistObject|PLAYLIST_VIEW)\s*=\s*({.+?});', webpage,
  300. 'playlist', default='{}'),
  301. playlist_id, fatal=False)
  302. title = playlist.get('title') or self._search_regex(
  303. r'>Videos\s+in\s+(.+?)\s+[Pp]laylist<', webpage, 'title', fatal=False)
  304. return self.playlist_result(
  305. entries, playlist_id, title, playlist.get('description'))
  306. class PornHubPlaylistIE(PornHubPlaylistBaseIE):
  307. _VALID_URL = r'https?://(?:[^/]+\.)?pornhub\.com/playlist/(?P<id>\d+)'
  308. _TESTS = [{
  309. 'url': 'http://www.pornhub.com/playlist/4667351',
  310. 'info_dict': {
  311. 'id': '4667351',
  312. 'title': 'Nataly Hot',
  313. },
  314. 'playlist_mincount': 2,
  315. }, {
  316. 'url': 'https://de.pornhub.com/playlist/4667351',
  317. 'only_matching': True,
  318. }]
  319. class PornHubUserVideosIE(PornHubPlaylistBaseIE):
  320. _VALID_URL = r'https?://(?:[^/]+\.)?pornhub\.com/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos'
  321. _TESTS = [{
  322. 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
  323. 'info_dict': {
  324. 'id': 'zoe_ph',
  325. },
  326. 'playlist_mincount': 171,
  327. }, {
  328. 'url': 'http://www.pornhub.com/users/rushandlia/videos',
  329. 'only_matching': True,
  330. }, {
  331. # default sorting as Top Rated Videos
  332. 'url': 'https://www.pornhub.com/channels/povd/videos',
  333. 'info_dict': {
  334. 'id': 'povd',
  335. },
  336. 'playlist_mincount': 293,
  337. }, {
  338. # Top Rated Videos
  339. 'url': 'https://www.pornhub.com/channels/povd/videos?o=ra',
  340. 'only_matching': True,
  341. }, {
  342. # Most Recent Videos
  343. 'url': 'https://www.pornhub.com/channels/povd/videos?o=da',
  344. 'only_matching': True,
  345. }, {
  346. # Most Viewed Videos
  347. 'url': 'https://www.pornhub.com/channels/povd/videos?o=vi',
  348. 'only_matching': True,
  349. }, {
  350. 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
  351. 'only_matching': True,
  352. }, {
  353. 'url': 'https://www.pornhub.com/model/jayndrea/videos/upload',
  354. 'only_matching': True,
  355. }, {
  356. 'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos/upload',
  357. 'only_matching': True,
  358. }]
  359. def _real_extract(self, url):
  360. user_id = self._match_id(url)
  361. entries = []
  362. for page_num in itertools.count(1):
  363. try:
  364. webpage = self._download_webpage(
  365. url, user_id, 'Downloading page %d' % page_num,
  366. query={'page': page_num})
  367. except ExtractorError as e:
  368. if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
  369. break
  370. raise
  371. page_entries = self._extract_entries(webpage)
  372. if not page_entries:
  373. break
  374. entries.extend(page_entries)
  375. return self.playlist_result(entries, user_id)