You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

384 lines
14 KiB

10 years ago
  1. from __future__ import unicode_literals
  2. import itertools
  3. import re
  4. from .common import InfoExtractor
  5. from ..compat import compat_str
  6. from ..utils import (
  7. clean_html,
  8. determine_ext,
  9. dict_get,
  10. extract_attributes,
  11. ExtractorError,
  12. int_or_none,
  13. parse_duration,
  14. try_get,
  15. unified_strdate,
  16. url_or_none,
  17. )
  18. class XHamsterIE(InfoExtractor):
  19. _DOMAINS = r'(?:xhamster\.(?:com|one|desi)|xhms\.pro|xhamster[27]\.com)'
  20. _VALID_URL = r'''(?x)
  21. https?://
  22. (?:.+?\.)?%s/
  23. (?:
  24. movies/(?P<id>\d+)/(?P<display_id>[^/]*)\.html|
  25. videos/(?P<display_id_2>[^/]*)-(?P<id_2>\d+)
  26. )
  27. ''' % _DOMAINS
  28. _TESTS = [{
  29. 'url': 'https://xhamster.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
  30. 'md5': '98b4687efb1ffd331c4197854dc09e8f',
  31. 'info_dict': {
  32. 'id': '1509445',
  33. 'display_id': 'femaleagent-shy-beauty-takes-the-bait',
  34. 'ext': 'mp4',
  35. 'title': 'FemaleAgent Shy beauty takes the bait',
  36. 'timestamp': 1350194821,
  37. 'upload_date': '20121014',
  38. 'uploader': 'Ruseful2011',
  39. 'duration': 893,
  40. 'age_limit': 18,
  41. },
  42. }, {
  43. 'url': 'https://xhamster.com/videos/britney-spears-sexy-booty-2221348?hd=',
  44. 'info_dict': {
  45. 'id': '2221348',
  46. 'display_id': 'britney-spears-sexy-booty',
  47. 'ext': 'mp4',
  48. 'title': 'Britney Spears Sexy Booty',
  49. 'timestamp': 1379123460,
  50. 'upload_date': '20130914',
  51. 'uploader': 'jojo747400',
  52. 'duration': 200,
  53. 'age_limit': 18,
  54. },
  55. 'params': {
  56. 'skip_download': True,
  57. },
  58. }, {
  59. # empty seo, unavailable via new URL schema
  60. 'url': 'http://xhamster.com/movies/5667973/.html',
  61. 'info_dict': {
  62. 'id': '5667973',
  63. 'ext': 'mp4',
  64. 'title': '....',
  65. 'timestamp': 1454948101,
  66. 'upload_date': '20160208',
  67. 'uploader': 'parejafree',
  68. 'duration': 72,
  69. 'age_limit': 18,
  70. },
  71. 'params': {
  72. 'skip_download': True,
  73. },
  74. }, {
  75. # mobile site
  76. 'url': 'https://m.xhamster.com/videos/cute-teen-jacqueline-solo-masturbation-8559111',
  77. 'only_matching': True,
  78. }, {
  79. 'url': 'https://xhamster.com/movies/2272726/amber_slayed_by_the_knight.html',
  80. 'only_matching': True,
  81. }, {
  82. # This video is visible for marcoalfa123456's friends only
  83. 'url': 'https://it.xhamster.com/movies/7263980/la_mia_vicina.html',
  84. 'only_matching': True,
  85. }, {
  86. # new URL schema
  87. 'url': 'https://pt.xhamster.com/videos/euro-pedal-pumping-7937821',
  88. 'only_matching': True,
  89. }, {
  90. 'url': 'https://xhamster.one/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
  91. 'only_matching': True,
  92. }, {
  93. 'url': 'https://xhamster.desi/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
  94. 'only_matching': True,
  95. }, {
  96. 'url': 'https://xhamster2.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
  97. 'only_matching': True,
  98. }, {
  99. 'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
  100. 'only_matching': True,
  101. }, {
  102. 'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
  103. 'only_matching': True,
  104. }]
  105. def _real_extract(self, url):
  106. mobj = re.match(self._VALID_URL, url)
  107. video_id = mobj.group('id') or mobj.group('id_2')
  108. display_id = mobj.group('display_id') or mobj.group('display_id_2')
  109. desktop_url = re.sub(r'^(https?://(?:.+?\.)?)m\.', r'\1', url)
  110. webpage, urlh = self._download_webpage_handle(desktop_url, video_id)
  111. error = self._html_search_regex(
  112. r'<div[^>]+id=["\']videoClosed["\'][^>]*>(.+?)</div>',
  113. webpage, 'error', default=None)
  114. if error:
  115. raise ExtractorError(error, expected=True)
  116. age_limit = self._rta_search(webpage)
  117. def get_height(s):
  118. return int_or_none(self._search_regex(
  119. r'^(\d+)[pP]', s, 'height', default=None))
  120. initials = self._parse_json(
  121. self._search_regex(
  122. r'window\.initials\s*=\s*({.+?})\s*;\s*\n', webpage, 'initials',
  123. default='{}'),
  124. video_id, fatal=False)
  125. if initials:
  126. video = initials['videoModel']
  127. title = video['title']
  128. formats = []
  129. for format_id, formats_dict in video['sources'].items():
  130. if not isinstance(formats_dict, dict):
  131. continue
  132. for quality, format_item in formats_dict.items():
  133. if format_id == 'download':
  134. # Download link takes some time to be generated,
  135. # skipping for now
  136. continue
  137. if not isinstance(format_item, dict):
  138. continue
  139. format_url = format_item.get('link')
  140. filesize = int_or_none(
  141. format_item.get('size'), invscale=1000000)
  142. else:
  143. format_url = format_item
  144. filesize = None
  145. format_url = url_or_none(format_url)
  146. if not format_url:
  147. continue
  148. formats.append({
  149. 'format_id': '%s-%s' % (format_id, quality),
  150. 'url': format_url,
  151. 'ext': determine_ext(format_url, 'mp4'),
  152. 'height': get_height(quality),
  153. 'filesize': filesize,
  154. 'http_headers': {
  155. 'Referer': urlh.geturl(),
  156. },
  157. })
  158. self._sort_formats(formats)
  159. categories_list = video.get('categories')
  160. if isinstance(categories_list, list):
  161. categories = []
  162. for c in categories_list:
  163. if not isinstance(c, dict):
  164. continue
  165. c_name = c.get('name')
  166. if isinstance(c_name, compat_str):
  167. categories.append(c_name)
  168. else:
  169. categories = None
  170. return {
  171. 'id': video_id,
  172. 'display_id': display_id,
  173. 'title': title,
  174. 'description': video.get('description'),
  175. 'timestamp': int_or_none(video.get('created')),
  176. 'uploader': try_get(
  177. video, lambda x: x['author']['name'], compat_str),
  178. 'thumbnail': video.get('thumbURL'),
  179. 'duration': int_or_none(video.get('duration')),
  180. 'view_count': int_or_none(video.get('views')),
  181. 'like_count': int_or_none(try_get(
  182. video, lambda x: x['rating']['likes'], int)),
  183. 'dislike_count': int_or_none(try_get(
  184. video, lambda x: x['rating']['dislikes'], int)),
  185. 'comment_count': int_or_none(video.get('views')),
  186. 'age_limit': age_limit,
  187. 'categories': categories,
  188. 'formats': formats,
  189. }
  190. # Old layout fallback
  191. title = self._html_search_regex(
  192. [r'<h1[^>]*>([^<]+)</h1>',
  193. r'<meta[^>]+itemprop=".*?caption.*?"[^>]+content="(.+?)"',
  194. r'<title[^>]*>(.+?)(?:,\s*[^,]*?\s*Porn\s*[^,]*?:\s*xHamster[^<]*| - xHamster\.com)</title>'],
  195. webpage, 'title')
  196. formats = []
  197. format_urls = set()
  198. sources = self._parse_json(
  199. self._search_regex(
  200. r'sources\s*:\s*({.+?})\s*,?\s*\n', webpage, 'sources',
  201. default='{}'),
  202. video_id, fatal=False)
  203. for format_id, format_url in sources.items():
  204. format_url = url_or_none(format_url)
  205. if not format_url:
  206. continue
  207. if format_url in format_urls:
  208. continue
  209. format_urls.add(format_url)
  210. formats.append({
  211. 'format_id': format_id,
  212. 'url': format_url,
  213. 'height': get_height(format_id),
  214. })
  215. video_url = self._search_regex(
  216. [r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''',
  217. r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''',
  218. r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''],
  219. webpage, 'video url', group='mp4', default=None)
  220. if video_url and video_url not in format_urls:
  221. formats.append({
  222. 'url': video_url,
  223. })
  224. self._sort_formats(formats)
  225. # Only a few videos have an description
  226. mobj = re.search(r'<span>Description: </span>([^<]+)', webpage)
  227. description = mobj.group(1) if mobj else None
  228. upload_date = unified_strdate(self._search_regex(
  229. r'hint=["\'](\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}',
  230. webpage, 'upload date', fatal=False))
  231. uploader = self._html_search_regex(
  232. r'<span[^>]+itemprop=["\']author[^>]+><a[^>]+><span[^>]+>([^<]+)',
  233. webpage, 'uploader', default='anonymous')
  234. thumbnail = self._search_regex(
  235. [r'''["']thumbUrl["']\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''',
  236. r'''<video[^>]+"poster"=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''],
  237. webpage, 'thumbnail', fatal=False, group='thumbnail')
  238. duration = parse_duration(self._search_regex(
  239. [r'<[^<]+\bitemprop=["\']duration["\'][^<]+\bcontent=["\'](.+?)["\']',
  240. r'Runtime:\s*</span>\s*([\d:]+)'], webpage,
  241. 'duration', fatal=False))
  242. view_count = int_or_none(self._search_regex(
  243. r'content=["\']User(?:View|Play)s:(\d+)',
  244. webpage, 'view count', fatal=False))
  245. mobj = re.search(r'hint=[\'"](?P<likecount>\d+) Likes / (?P<dislikecount>\d+) Dislikes', webpage)
  246. (like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None)
  247. mobj = re.search(r'</label>Comments \((?P<commentcount>\d+)\)</div>', webpage)
  248. comment_count = mobj.group('commentcount') if mobj else 0
  249. categories_html = self._search_regex(
  250. r'(?s)<table.+?(<span>Categories:.+?)</table>', webpage,
  251. 'categories', default=None)
  252. categories = [clean_html(category) for category in re.findall(
  253. r'<a[^>]+>(.+?)</a>', categories_html)] if categories_html else None
  254. return {
  255. 'id': video_id,
  256. 'display_id': display_id,
  257. 'title': title,
  258. 'description': description,
  259. 'upload_date': upload_date,
  260. 'uploader': uploader,
  261. 'thumbnail': thumbnail,
  262. 'duration': duration,
  263. 'view_count': view_count,
  264. 'like_count': int_or_none(like_count),
  265. 'dislike_count': int_or_none(dislike_count),
  266. 'comment_count': int_or_none(comment_count),
  267. 'age_limit': age_limit,
  268. 'categories': categories,
  269. 'formats': formats,
  270. }
  271. class XHamsterEmbedIE(InfoExtractor):
  272. _VALID_URL = r'https?://(?:.+?\.)?%s/xembed\.php\?video=(?P<id>\d+)' % XHamsterIE._DOMAINS
  273. _TEST = {
  274. 'url': 'http://xhamster.com/xembed.php?video=3328539',
  275. 'info_dict': {
  276. 'id': '3328539',
  277. 'ext': 'mp4',
  278. 'title': 'Pen Masturbation',
  279. 'timestamp': 1406581861,
  280. 'upload_date': '20140728',
  281. 'uploader': 'ManyakisArt',
  282. 'duration': 5,
  283. 'age_limit': 18,
  284. }
  285. }
  286. @staticmethod
  287. def _extract_urls(webpage):
  288. return [url for _, url in re.findall(
  289. r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?xhamster\.com/xembed\.php\?video=\d+)\1',
  290. webpage)]
  291. def _real_extract(self, url):
  292. video_id = self._match_id(url)
  293. webpage = self._download_webpage(url, video_id)
  294. video_url = self._search_regex(
  295. r'href="(https?://xhamster\.com/(?:movies/{0}/[^"]*\.html|videos/[^/]*-{0})[^"]*)"'.format(video_id),
  296. webpage, 'xhamster url', default=None)
  297. if not video_url:
  298. vars = self._parse_json(
  299. self._search_regex(r'vars\s*:\s*({.+?})\s*,\s*\n', webpage, 'vars'),
  300. video_id)
  301. video_url = dict_get(vars, ('downloadLink', 'homepageLink', 'commentsLink', 'shareUrl'))
  302. return self.url_result(video_url, 'XHamster')
  303. class XHamsterUserIE(InfoExtractor):
  304. _VALID_URL = r'https?://(?:.+?\.)?%s/users/(?P<id>[^/?#&]+)' % XHamsterIE._DOMAINS
  305. _TESTS = [{
  306. # Paginated user profile
  307. 'url': 'https://xhamster.com/users/netvideogirls/videos',
  308. 'info_dict': {
  309. 'id': 'netvideogirls',
  310. },
  311. 'playlist_mincount': 267,
  312. }, {
  313. # Non-paginated user profile
  314. 'url': 'https://xhamster.com/users/firatkaan/videos',
  315. 'info_dict': {
  316. 'id': 'firatkaan',
  317. },
  318. 'playlist_mincount': 1,
  319. }]
  320. def _entries(self, user_id):
  321. next_page_url = 'https://xhamster.com/users/%s/videos/1' % user_id
  322. for pagenum in itertools.count(1):
  323. page = self._download_webpage(
  324. next_page_url, user_id, 'Downloading page %s' % pagenum)
  325. for video_tag in re.findall(
  326. r'(<a[^>]+class=["\'].*?\bvideo-thumb__image-container[^>]+>)',
  327. page):
  328. video = extract_attributes(video_tag)
  329. video_url = url_or_none(video.get('href'))
  330. if not video_url or not XHamsterIE.suitable(video_url):
  331. continue
  332. video_id = XHamsterIE._match_id(video_url)
  333. yield self.url_result(
  334. video_url, ie=XHamsterIE.ie_key(), video_id=video_id)
  335. mobj = re.search(r'<a[^>]+data-page=["\']next[^>]+>', page)
  336. if not mobj:
  337. break
  338. next_page = extract_attributes(mobj.group(0))
  339. next_page_url = url_or_none(next_page.get('href'))
  340. if not next_page_url:
  341. break
  342. def _real_extract(self, url):
  343. user_id = self._match_id(url)
  344. return self.playlist_result(self._entries(user_id), user_id)