|
|
@ -1,9 +1,14 @@ |
|
|
|
# coding: utf-8 |
|
|
|
from __future__ import unicode_literals |
|
|
|
|
|
|
|
import re |
|
|
|
|
|
|
|
from .common import InfoExtractor |
|
|
|
from ..utils import ( |
|
|
|
ExtractorError, |
|
|
|
int_or_none, |
|
|
|
strip_or_none, |
|
|
|
unescapeHTML, |
|
|
|
urlencode_postdata, |
|
|
|
) |
|
|
|
|
|
|
@ -14,19 +19,17 @@ class RoosterTeethIE(InfoExtractor): |
|
|
|
_NETRC_MACHINE = 'roosterteeth' |
|
|
|
_TESTS = [{ |
|
|
|
'url': 'http://roosterteeth.com/episode/million-dollars-but-season-2-million-dollars-but-the-game-announcement', |
|
|
|
'md5': 'e2bd7764732d785ef797700a2489f212', |
|
|
|
'info_dict': { |
|
|
|
'id': '26576', |
|
|
|
'display_id': 'million-dollars-but-season-2-million-dollars-but-the-game-announcement', |
|
|
|
'ext': 'mp4', |
|
|
|
'title': 'Million Dollars, But... The Game Announcement', |
|
|
|
'title': 'Million Dollars, But...: Million Dollars, But... The Game Announcement', |
|
|
|
'description': 'md5:0cc3b21986d54ed815f5faeccd9a9ca5', |
|
|
|
'thumbnail': 're:^https?://.*\.png$', |
|
|
|
'description': 'Introducing Million Dollars, But... The Game! Available for pre-order now at www.MDBGame.com ', |
|
|
|
'creator': 'Rooster Teeth', |
|
|
|
'series': 'Million Dollars, But...', |
|
|
|
'episode': 'Million Dollars, But... The Game Announcement', |
|
|
|
'episode_id': '26576', |
|
|
|
}, |
|
|
|
'params': { |
|
|
|
'skip_download': True, # m3u8 downloads |
|
|
|
'comment_count': int, |
|
|
|
}, |
|
|
|
}, { |
|
|
|
'url': 'http://achievementhunter.roosterteeth.com/episode/off-topic-the-achievement-hunter-podcast-2016-i-didn-t-think-it-would-pass-31', |
|
|
@ -40,73 +43,106 @@ class RoosterTeethIE(InfoExtractor): |
|
|
|
}, { |
|
|
|
'url': 'http://theknow.roosterteeth.com/episode/the-know-game-news-season-1-boring-steam-sales-are-better', |
|
|
|
'only_matching': True, |
|
|
|
}, { |
|
|
|
# only available for FIRST members |
|
|
|
'url': 'http://roosterteeth.com/episode/rt-docs-the-world-s-greatest-head-massage-the-world-s-greatest-head-massage-an-asmr-journey-part-one', |
|
|
|
'only_matching': True, |
|
|
|
}] |
|
|
|
|
|
|
|
def _login(self): |
|
|
|
(username, password) = self._get_login_info() |
|
|
|
if username is None or password is None: |
|
|
|
return False |
|
|
|
if username is None: |
|
|
|
return |
|
|
|
|
|
|
|
# token is required to authenticate request |
|
|
|
login_page = self._download_webpage(self._LOGIN_URL, None, 'Getting login token', 'Unable to get login token') |
|
|
|
login_page = self._download_webpage( |
|
|
|
self._LOGIN_URL, None, |
|
|
|
note='Downloading login page', |
|
|
|
errnote='Unable to download login page') |
|
|
|
|
|
|
|
login_form = self._hidden_inputs(login_page) |
|
|
|
|
|
|
|
login_form.update({ |
|
|
|
'username': username, |
|
|
|
'password': password, |
|
|
|
}) |
|
|
|
login_payload = urlencode_postdata(login_form) |
|
|
|
|
|
|
|
# required for proper responses |
|
|
|
login_headers = { |
|
|
|
'Referer': self._LOGIN_URL, |
|
|
|
} |
|
|
|
|
|
|
|
login_request = self._download_webpage( |
|
|
|
self._LOGIN_URL, None, |
|
|
|
note='Logging in as %s' % username, |
|
|
|
data=login_payload, |
|
|
|
headers=login_headers) |
|
|
|
|
|
|
|
if 'Authentication failed' in login_request: |
|
|
|
raise ExtractorError( |
|
|
|
'Login failed (invalid username/password)', expected=True) |
|
|
|
data=urlencode_postdata(login_form), |
|
|
|
headers={ |
|
|
|
'Referer': self._LOGIN_URL, |
|
|
|
}) |
|
|
|
|
|
|
|
if not any(re.search(p, login_request) for p in ( |
|
|
|
r'href=["\']https?://(?:www\.)?roosterteeth\.com/logout"', |
|
|
|
r'>Sign Out<')): |
|
|
|
error = self._html_search_regex( |
|
|
|
r'(?s)<div[^>]+class=(["\']).*?\balert-danger\b.*?\1[^>]*>(?:\s*<button[^>]*>.*?</button>)?(?P<error>.+?)</div>', |
|
|
|
login_request, 'alert', default=None, group='error') |
|
|
|
if error: |
|
|
|
raise ExtractorError('Unable to login: %s' % error, expected=True) |
|
|
|
raise ExtractorError('Unable to log in') |
|
|
|
|
|
|
|
def _real_initialize(self): |
|
|
|
self._login() |
|
|
|
|
|
|
|
def _real_extract(self, url): |
|
|
|
match_id = self._match_id(url) |
|
|
|
webpage = self._download_webpage(url, match_id) |
|
|
|
display_id = self._match_id(url) |
|
|
|
|
|
|
|
webpage = self._download_webpage(url, display_id) |
|
|
|
|
|
|
|
episode_id = self._html_search_regex(r"commentControls\('#comment-([0-9]+)'\)", webpage, 'episode id', match_id, False) |
|
|
|
episode = strip_or_none(unescapeHTML(self._search_regex( |
|
|
|
(r'videoTitle\s*=\s*(["\'])(?P<title>(?:(?!\1).)+)\1', |
|
|
|
r'<title>(?P<title>[^<]+)</title>'), webpage, 'title', |
|
|
|
default=None, group='title'))) |
|
|
|
|
|
|
|
self.report_extraction(episode_id) |
|
|
|
title = strip_or_none(self._og_search_title( |
|
|
|
webpage, default=None)) or episode |
|
|
|
|
|
|
|
title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, 'episode title', self._og_search_title(webpage), False) |
|
|
|
thumbnail = self._og_search_thumbnail(webpage) |
|
|
|
description = self._og_search_description(webpage) |
|
|
|
creator = self._html_search_regex(r'<h3>Latest (.+) Gear</h3>', webpage, 'site', 'Rooster Teeth', False) |
|
|
|
series = self._html_search_regex(r'<h2>More ([^<]+)</h2>', webpage, 'series', fatal=False) |
|
|
|
episode = self._html_search_regex(r'<title>([^<]+)</title>', webpage, 'episode title', fatal=False) |
|
|
|
m3u8_url = self._search_regex( |
|
|
|
r'file\s*:\s*(["\'])(?P<url>http.+?\.m3u8.*?)\1', |
|
|
|
webpage, 'm3u8 url', default=None, group='url') |
|
|
|
|
|
|
|
if '<div class="non-sponsor">' in webpage: |
|
|
|
self.raise_login_required('%s is only available for FIRST members' % title) |
|
|
|
if not m3u8_url: |
|
|
|
if re.search(r'<div[^>]+class=["\']non-sponsor', webpage): |
|
|
|
self.raise_login_required( |
|
|
|
'%s is only available for FIRST members' % display_id) |
|
|
|
|
|
|
|
if '<div class="golive-gate">' in webpage: |
|
|
|
self.raise_login_required('%s is not available yet' % title) |
|
|
|
if re.search(r'<div[^>]+class=["\']golive-gate', webpage): |
|
|
|
self.raise_login_required('%s is not available yet' % display_id) |
|
|
|
|
|
|
|
formats = self._extract_m3u8_formats(self._html_search_regex(r"file: '(.+?)m3u8'", webpage, 'm3u8 url') + 'm3u8', episode_id, ext='mp4') |
|
|
|
raise ExtractorError('Unable to extract m3u8 URL') |
|
|
|
|
|
|
|
formats = self._extract_m3u8_formats( |
|
|
|
m3u8_url, display_id, ext='mp4', |
|
|
|
entry_protocol='m3u8_native', m3u8_id='hls') |
|
|
|
self._sort_formats(formats) |
|
|
|
|
|
|
|
description = strip_or_none(self._og_search_description(webpage)) |
|
|
|
thumbnail = self._proto_relative_url(self._og_search_thumbnail(webpage)) |
|
|
|
|
|
|
|
series = self._search_regex( |
|
|
|
(r'<h2>More ([^<]+)</h2>', r'<a[^>]+>See All ([^<]+) Videos<'), |
|
|
|
webpage, 'series', fatal=False) |
|
|
|
|
|
|
|
comment_count = int_or_none(self._search_regex( |
|
|
|
r'>Comments \((\d+)\)<', webpage, |
|
|
|
'comment count', fatal=False)) |
|
|
|
|
|
|
|
video_id = self._search_regex( |
|
|
|
(r'containerId\s*=\s*["\']episode-(\d+)\1', |
|
|
|
r'<div[^<]+id=["\']episode-(\d+)'), webpage, |
|
|
|
'video id', default=display_id) |
|
|
|
|
|
|
|
return { |
|
|
|
'id': episode_id, |
|
|
|
'id': video_id, |
|
|
|
'display_id': display_id, |
|
|
|
'title': title, |
|
|
|
'formats': formats, |
|
|
|
'thumbnail': thumbnail, |
|
|
|
'description': description, |
|
|
|
'creator': creator, |
|
|
|
'thumbnail': thumbnail, |
|
|
|
'series': series, |
|
|
|
'episode': episode, |
|
|
|
'episode_id': episode_id, |
|
|
|
'comment_count': comment_count, |
|
|
|
'formats': formats, |
|
|
|
} |