You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

93 lines
3.1 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import re
  4. from .common import InfoExtractor
  5. from .kaltura import KalturaIE
  6. from ..utils import (
  7. extract_attributes,
  8. remove_end,
  9. urlencode_postdata,
  10. )
  11. class AsianCrushIE(InfoExtractor):
  12. _VALID_URL = r'https?://(?:www\.)?asiancrush\.com/video/(?:[^/]+/)?0+(?P<id>\d+)v\b'
  13. _TESTS = [{
  14. 'url': 'https://www.asiancrush.com/video/012869v/women-who-flirt/',
  15. 'md5': 'c3b740e48d0ba002a42c0b72857beae6',
  16. 'info_dict': {
  17. 'id': '1_y4tmjm5r',
  18. 'ext': 'mp4',
  19. 'title': 'Women Who Flirt',
  20. 'description': 'md5:3db14e9186197857e7063522cb89a805',
  21. 'timestamp': 1496936429,
  22. 'upload_date': '20170608',
  23. 'uploader_id': 'craig@crifkin.com',
  24. },
  25. }, {
  26. 'url': 'https://www.asiancrush.com/video/she-was-pretty/011886v-pretty-episode-3/',
  27. 'only_matching': True,
  28. }]
  29. def _real_extract(self, url):
  30. video_id = self._match_id(url)
  31. data = self._download_json(
  32. 'https://www.asiancrush.com/wp-admin/admin-ajax.php', video_id,
  33. data=urlencode_postdata({
  34. 'postid': video_id,
  35. 'action': 'get_channel_kaltura_vars',
  36. }))
  37. entry_id = data['entry_id']
  38. return self.url_result(
  39. 'kaltura:%s:%s' % (data['partner_id'], entry_id),
  40. ie=KalturaIE.ie_key(), video_id=entry_id,
  41. video_title=data.get('vid_label'))
  42. class AsianCrushPlaylistIE(InfoExtractor):
  43. _VALID_URL = r'https?://(?:www\.)?asiancrush\.com/series/0+(?P<id>\d+)s\b'
  44. _TEST = {
  45. 'url': 'https://www.asiancrush.com/series/012481s/scholar-walks-night/',
  46. 'info_dict': {
  47. 'id': '12481',
  48. 'title': 'Scholar Who Walks the Night',
  49. 'description': 'md5:7addd7c5132a09fd4741152d96cce886',
  50. },
  51. 'playlist_count': 20,
  52. }
  53. def _real_extract(self, url):
  54. playlist_id = self._match_id(url)
  55. webpage = self._download_webpage(url, playlist_id)
  56. entries = []
  57. for mobj in re.finditer(
  58. r'<a[^>]+href=(["\'])(?P<url>%s.*?)\1[^>]*>' % AsianCrushIE._VALID_URL,
  59. webpage):
  60. attrs = extract_attributes(mobj.group(0))
  61. if attrs.get('class') == 'clearfix':
  62. entries.append(self.url_result(
  63. mobj.group('url'), ie=AsianCrushIE.ie_key()))
  64. title = remove_end(
  65. self._html_search_regex(
  66. r'(?s)<h1\b[^>]\bid=["\']movieTitle[^>]+>(.+?)</h1>', webpage,
  67. 'title', default=None) or self._og_search_title(
  68. webpage, default=None) or self._html_search_meta(
  69. 'twitter:title', webpage, 'title',
  70. default=None) or self._search_regex(
  71. r'<title>([^<]+)</title>', webpage, 'title', fatal=False),
  72. ' | AsianCrush')
  73. description = self._og_search_description(
  74. webpage, default=None) or self._html_search_meta(
  75. 'twitter:description', webpage, 'description', fatal=False)
  76. return self.playlist_result(entries, playlist_id, title, description)