Browse Source

Merge branch 'ping-soompi'

totalwebcasting
Sergey M․ 10 years ago
parent
commit
bba5bfc890
3 changed files with 163 additions and 17 deletions
  1. +4
    -0
      youtube_dl/extractor/__init__.py
  2. +13
    -17
      youtube_dl/extractor/crunchyroll.py
  3. +146
    -0
      youtube_dl/extractor/soompi.py

+ 4
- 0
youtube_dl/extractor/__init__.py View File

@ -480,6 +480,10 @@ from .smotri import (
)
from .snotr import SnotrIE
from .sohu import SohuIE
from .soompi import (
SoompiIE,
SoompiShowIE,
)
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,


+ 13
- 17
youtube_dl/extractor/crunchyroll.py View File

@ -76,8 +76,8 @@ class CrunchyrollIE(InfoExtractor):
self._login()
def _decrypt_subtitles(self, data, iv, id):
data = bytes_to_intlist(data)
iv = bytes_to_intlist(iv)
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8')))
id = int(id)
def obfuscate_key_aux(count, modulo, start):
@ -179,6 +179,16 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
return output
def _extract_subtitles(self, subtitle):
sub_root = xml.etree.ElementTree.fromstring(subtitle)
return [{
'ext': 'srt',
'data': self._convert_subtitles_to_srt(sub_root),
}, {
'ext': 'ass',
'data': self._convert_subtitles_to_ass(sub_root),
}]
def _get_subtitles(self, video_id, webpage):
subtitles = {}
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
@ -190,25 +200,11 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
if not id or not iv or not data:
continue
id = int(id)
iv = base64.b64decode(iv)
data = base64.b64decode(data)
subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8')
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
if not lang_code:
continue
sub_root = xml.etree.ElementTree.fromstring(subtitle)
subtitles[lang_code] = [
{
'ext': 'srt',
'data': self._convert_subtitles_to_srt(sub_root),
},
{
'ext': 'ass',
'data': self._convert_subtitles_to_ass(sub_root),
},
]
subtitles[lang_code] = self._extract_subtitles(subtitle)
return subtitles
def _real_extract(self, url):


+ 146
- 0
youtube_dl/extractor/soompi.py View File

@ -0,0 +1,146 @@
# encoding: utf-8
from __future__ import unicode_literals
import re
from .crunchyroll import CrunchyrollIE
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
int_or_none,
remove_start,
xpath_text,
)
class SoompiBaseIE(InfoExtractor):
def _get_episodes(self, webpage, episode_filter=None):
episodes = self._parse_json(
self._search_regex(
r'VIDEOS\s*=\s*(\[.+?\]);', webpage, 'episodes JSON'),
None)
return list(filter(episode_filter, episodes))
class SoompiIE(SoompiBaseIE, CrunchyrollIE):
IE_NAME = 'soompi'
_VALID_URL = r'https?://tv\.soompi\.com/(?:en/)?watch/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://tv.soompi.com/en/watch/29235',
'info_dict': {
'id': '29235',
'ext': 'mp4',
'title': 'Episode 1096',
'description': '2015-05-20'
},
'params': {
'skip_download': True,
},
}]
def _get_episode(self, webpage, video_id):
return self._get_episodes(webpage, lambda x: x['id'] == video_id)[0]
def _get_subtitles(self, config, video_id):
sub_langs = {}
for subtitle in config.findall('./{default}preload/subtitles/subtitle'):
sub_langs[subtitle.attrib['id']] = subtitle.attrib['title']
subtitles = {}
for s in config.findall('./{default}preload/subtitle'):
lang_code = sub_langs.get(s.attrib['id'])
if not lang_code:
continue
sub_id = s.get('id')
data = xpath_text(s, './data', 'data')
iv = xpath_text(s, './iv', 'iv')
if not id or not iv or not data:
continue
subtitle = self._decrypt_subtitles(data, iv, sub_id).decode('utf-8')
subtitles[lang_code] = self._extract_subtitles(subtitle)
return subtitles
def _real_extract(self, url):
video_id = self._match_id(url)
try:
webpage = self._download_webpage(
url, video_id, 'Downloading episode page')
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
webpage = ee.cause.read()
block_message = self._html_search_regex(
r'(?s)<div class="block-message">(.+?)</div>', webpage,
'block message', default=None)
if block_message:
raise ExtractorError(block_message, expected=True)
raise
formats = []
config = None
for format_id in re.findall(r'\?quality=([0-9a-zA-Z]+)', webpage):
config = self._download_xml(
'http://tv.soompi.com/en/show/_/%s-config.xml?mode=hls&quality=%s' % (video_id, format_id),
video_id, 'Downloading %s XML' % format_id)
m3u8_url = xpath_text(
config, './{default}preload/stream_info/file',
'%s m3u8 URL' % format_id)
if not m3u8_url:
continue
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', m3u8_id=format_id))
self._sort_formats(formats)
episode = self._get_episode(webpage, video_id)
title = episode['name']
description = episode.get('description')
duration = int_or_none(episode.get('duration'))
thumbnails = [{
'id': thumbnail_id,
'url': thumbnail_url,
} for thumbnail_id, thumbnail_url in episode.get('img_url', {}).items()]
subtitles = self.extract_subtitles(config, video_id)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'duration': duration,
'formats': formats,
'subtitles': subtitles
}
class SoompiShowIE(SoompiBaseIE):
IE_NAME = 'soompi:show'
_VALID_URL = r'https?://tv\.soompi\.com/en/shows/(?P<id>[0-9a-zA-Z\-_]+)'
_TESTS = [{
'url': 'http://tv.soompi.com/en/shows/liar-game',
'info_dict': {
'id': 'liar-game',
'title': 'Liar Game',
'description': 'md5:52c02bce0c1a622a95823591d0589b66',
},
'playlist_count': 14,
}]
def _real_extract(self, url):
show_id = self._match_id(url)
webpage = self._download_webpage(
url, show_id, 'Downloading show page')
title = remove_start(self._og_search_title(webpage), 'SoompiTV | ')
description = self._og_search_description(webpage)
entries = [
self.url_result('http://tv.soompi.com/en/watch/%s' % episode['id'], 'Soompi')
for episode in self._get_episodes(webpage)]
return self.playlist_result(entries, show_id, title, description)

Loading…
Cancel
Save