|
|
@ -1,3 +1,4 @@ |
|
|
|
# coding: utf-8 |
|
|
|
from __future__ import unicode_literals |
|
|
|
|
|
|
|
import itertools |
|
|
@ -5,6 +6,8 @@ import re |
|
|
|
|
|
|
|
from .common import InfoExtractor |
|
|
|
from ..utils import ( |
|
|
|
compat_urllib_parse, |
|
|
|
compat_urllib_request, |
|
|
|
ExtractorError, |
|
|
|
parse_iso8601, |
|
|
|
) |
|
|
@ -24,6 +27,7 @@ class TwitchIE(InfoExtractor): |
|
|
|
""" |
|
|
|
_PAGE_LIMIT = 100 |
|
|
|
_API_BASE = 'https://api.twitch.tv' |
|
|
|
_LOGIN_URL = 'https://secure.twitch.tv/user/login' |
|
|
|
_TESTS = [{ |
|
|
|
'url': 'http://www.twitch.tv/riotgames/b/577357806', |
|
|
|
'info_dict': { |
|
|
@ -109,6 +113,44 @@ class TwitchIE(InfoExtractor): |
|
|
|
'view_count': info['views'], |
|
|
|
} |
|
|
|
|
|
|
|
def _real_initialize(self): |
|
|
|
self._login() |
|
|
|
|
|
|
|
def _login(self): |
|
|
|
(username, password) = self._get_login_info() |
|
|
|
if username is None: |
|
|
|
return |
|
|
|
|
|
|
|
login_page = self._download_webpage( |
|
|
|
self._LOGIN_URL, None, 'Downloading login page') |
|
|
|
|
|
|
|
authenticity_token = self._search_regex( |
|
|
|
r'<input name="authenticity_token" type="hidden" value="([^"]+)"', |
|
|
|
login_page, 'authenticity token') |
|
|
|
|
|
|
|
login_form = { |
|
|
|
'utf8': '✓'.encode('utf-8'), |
|
|
|
'authenticity_token': authenticity_token, |
|
|
|
'redirect_on_login': '', |
|
|
|
'embed_form': 'false', |
|
|
|
'mp_source_action': '', |
|
|
|
'follow': '', |
|
|
|
'user[login]': username, |
|
|
|
'user[password]': password, |
|
|
|
} |
|
|
|
|
|
|
|
request = compat_urllib_request.Request( |
|
|
|
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) |
|
|
|
request.add_header('Referer', self._LOGIN_URL) |
|
|
|
response = self._download_webpage( |
|
|
|
request, None, 'Logging in as %s' % username) |
|
|
|
|
|
|
|
m = re.search( |
|
|
|
r"id=([\"'])login_error_message\1[^>]*>(?P<msg>[^<]+)", response) |
|
|
|
if m: |
|
|
|
raise ExtractorError( |
|
|
|
'Unable to login: %s' % m.group('msg').strip(), expected=True) |
|
|
|
|
|
|
|
def _real_extract(self, url): |
|
|
|
mobj = re.match(self._VALID_URL, url) |
|
|
|
if mobj.group('chapterid'): |
|
|
|