You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

632 lines
26 KiB

  1. # encoding: utf-8
  2. from __future__ import unicode_literals
  3. import os
  4. import re
  5. from .common import InfoExtractor
  6. from .youtube import YoutubeIE
  7. from ..utils import (
  8. compat_urllib_error,
  9. compat_urllib_parse,
  10. compat_urllib_request,
  11. compat_urlparse,
  12. compat_xml_parse_error,
  13. ExtractorError,
  14. HEADRequest,
  15. parse_xml,
  16. smuggle_url,
  17. unescapeHTML,
  18. unified_strdate,
  19. url_basename,
  20. )
  21. from .brightcove import BrightcoveIE
  22. from .ooyala import OoyalaIE
  23. from .rutv import RUTVIE
  24. from .smotri import SmotriIE
  25. class GenericIE(InfoExtractor):
  26. IE_DESC = 'Generic downloader that works on some sites'
  27. _VALID_URL = r'.*'
  28. IE_NAME = 'generic'
  29. _TESTS = [
  30. {
  31. 'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
  32. 'file': '13601338388002.mp4',
  33. 'md5': '6e15c93721d7ec9e9ca3fdbf07982cfd',
  34. 'info_dict': {
  35. 'uploader': 'www.hodiho.fr',
  36. 'title': 'R\u00e9gis plante sa Jeep',
  37. }
  38. },
  39. # bandcamp page with custom domain
  40. {
  41. 'add_ie': ['Bandcamp'],
  42. 'url': 'http://bronyrock.com/track/the-pony-mash',
  43. 'file': '3235767654.mp3',
  44. 'info_dict': {
  45. 'title': 'The Pony Mash',
  46. 'uploader': 'M_Pallante',
  47. },
  48. 'skip': 'There is a limit of 200 free downloads / month for the test song',
  49. },
  50. # embedded brightcove video
  51. # it also tests brightcove videos that need to set the 'Referer' in the
  52. # http requests
  53. {
  54. 'add_ie': ['Brightcove'],
  55. 'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
  56. 'info_dict': {
  57. 'id': '2765128793001',
  58. 'ext': 'mp4',
  59. 'title': 'Le cours de bourse : l’analyse technique',
  60. 'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
  61. 'uploader': 'BFM BUSINESS',
  62. },
  63. 'params': {
  64. 'skip_download': True,
  65. },
  66. },
  67. {
  68. # https://github.com/rg3/youtube-dl/issues/2253
  69. 'url': 'http://bcove.me/i6nfkrc3',
  70. 'file': '3101154703001.mp4',
  71. 'md5': '0ba9446db037002366bab3b3eb30c88c',
  72. 'info_dict': {
  73. 'title': 'Still no power',
  74. 'uploader': 'thestar.com',
  75. 'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
  76. },
  77. 'add_ie': ['Brightcove'],
  78. },
  79. {
  80. 'url': 'http://www.championat.com/video/football/v/87/87499.html',
  81. 'md5': 'fb973ecf6e4a78a67453647444222983',
  82. 'info_dict': {
  83. 'id': '3414141473001',
  84. 'ext': 'mp4',
  85. 'title': 'Видео. Удаление Дзагоева (ЦСКА)',
  86. 'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
  87. 'uploader': 'Championat',
  88. },
  89. },
  90. # Direct link to a video
  91. {
  92. 'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
  93. 'md5': '67d406c2bcb6af27fa886f31aa934bbe',
  94. 'info_dict': {
  95. 'id': 'trailer',
  96. 'ext': 'mp4',
  97. 'title': 'trailer',
  98. 'upload_date': '20100513',
  99. }
  100. },
  101. # ooyala video
  102. {
  103. 'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
  104. 'md5': '5644c6ca5d5782c1d0d350dad9bd840c',
  105. 'info_dict': {
  106. 'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
  107. 'ext': 'mp4',
  108. 'title': '2cc213299525360.mov', # that's what we get
  109. },
  110. },
  111. # google redirect
  112. {
  113. 'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
  114. 'info_dict': {
  115. 'id': 'cmQHVoWB5FY',
  116. 'ext': 'mp4',
  117. 'upload_date': '20130224',
  118. 'uploader_id': 'TheVerge',
  119. 'description': 'Chris Ziegler takes a look at the Alcatel OneTouch Fire and the ZTE Open; two of the first Firefox OS handsets to be officially announced.',
  120. 'uploader': 'The Verge',
  121. 'title': 'First Firefox OS phones side-by-side',
  122. },
  123. 'params': {
  124. 'skip_download': False,
  125. }
  126. },
  127. # embed.ly video
  128. {
  129. 'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
  130. 'info_dict': {
  131. 'id': '9ODmcdjQcHQ',
  132. 'ext': 'mp4',
  133. 'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
  134. 'upload_date': '20140225',
  135. 'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
  136. 'uploader': 'Tested',
  137. 'uploader_id': 'testedcom',
  138. },
  139. # No need to test YoutubeIE here
  140. 'params': {
  141. 'skip_download': True,
  142. },
  143. },
  144. # funnyordie embed
  145. {
  146. 'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
  147. 'md5': '7cf780be104d40fea7bae52eed4a470e',
  148. 'info_dict': {
  149. 'id': '18e820ec3f',
  150. 'ext': 'mp4',
  151. 'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
  152. 'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
  153. },
  154. },
  155. # RUTV embed
  156. {
  157. 'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
  158. 'info_dict': {
  159. 'id': '776940',
  160. 'ext': 'mp4',
  161. 'title': 'Охотское море стало целиком российским',
  162. 'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
  163. },
  164. 'params': {
  165. # m3u8 download
  166. 'skip_download': True,
  167. },
  168. },
  169. # Embedded TED video
  170. {
  171. 'url': 'http://en.support.wordpress.com/videos/ted-talks/',
  172. 'md5': 'deeeabcc1085eb2ba205474e7235a3d5',
  173. 'info_dict': {
  174. 'id': '981',
  175. 'ext': 'mp4',
  176. 'title': 'My web playroom',
  177. 'uploader': 'Ze Frank',
  178. 'description': 'md5:ddb2a40ecd6b6a147e400e535874947b',
  179. }
  180. },
  181. # nowvideo embed hidden behind percent encoding
  182. {
  183. 'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
  184. 'md5': '2baf4ddd70f697d94b1c18cf796d5107',
  185. 'info_dict': {
  186. 'id': '06e53103ca9aa',
  187. 'ext': 'flv',
  188. 'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
  189. 'description': 'No description',
  190. },
  191. },
  192. # arte embed
  193. {
  194. 'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
  195. 'md5': '7653032cbb25bf6c80d80f217055fa43',
  196. 'info_dict': {
  197. 'id': '048195-004_PLUS7-F',
  198. 'ext': 'flv',
  199. 'title': 'X:enius',
  200. 'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
  201. 'upload_date': '20140320',
  202. },
  203. 'params': {
  204. 'skip_download': 'Requires rtmpdump'
  205. }
  206. },
  207. # smotri embed
  208. {
  209. 'url': 'http://rbctv.rbc.ru/archive/news/562949990879132.shtml',
  210. 'md5': 'ec40048448e9284c9a1de77bb188108b',
  211. 'info_dict': {
  212. 'id': 'v27008541fad',
  213. 'ext': 'mp4',
  214. 'title': 'Крым и Севастополь вошли в состав России',
  215. 'description': 'md5:fae01b61f68984c7bd2fa741e11c3175',
  216. 'duration': 900,
  217. 'upload_date': '20140318',
  218. 'uploader': 'rbctv_2012_4',
  219. 'uploader_id': 'rbctv_2012_4',
  220. },
  221. },
  222. ]
  223. def report_download_webpage(self, video_id):
  224. """Report webpage download."""
  225. if not self._downloader.params.get('test', False):
  226. self._downloader.report_warning('Falling back on generic information extractor.')
  227. super(GenericIE, self).report_download_webpage(video_id)
  228. def report_following_redirect(self, new_url):
  229. """Report information extraction."""
  230. self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
  231. def _send_head(self, url):
  232. """Check if it is a redirect, like url shorteners, in case return the new url."""
  233. class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
  234. """
  235. Subclass the HTTPRedirectHandler to make it use our
  236. HEADRequest also on the redirected URL
  237. """
  238. def redirect_request(self, req, fp, code, msg, headers, newurl):
  239. if code in (301, 302, 303, 307):
  240. newurl = newurl.replace(' ', '%20')
  241. newheaders = dict((k,v) for k,v in req.headers.items()
  242. if k.lower() not in ("content-length", "content-type"))
  243. try:
  244. # This function was deprecated in python 3.3 and removed in 3.4
  245. origin_req_host = req.get_origin_req_host()
  246. except AttributeError:
  247. origin_req_host = req.origin_req_host
  248. return HEADRequest(newurl,
  249. headers=newheaders,
  250. origin_req_host=origin_req_host,
  251. unverifiable=True)
  252. else:
  253. raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
  254. class HTTPMethodFallback(compat_urllib_request.BaseHandler):
  255. """
  256. Fallback to GET if HEAD is not allowed (405 HTTP error)
  257. """
  258. def http_error_405(self, req, fp, code, msg, headers):
  259. fp.read()
  260. fp.close()
  261. newheaders = dict((k,v) for k,v in req.headers.items()
  262. if k.lower() not in ("content-length", "content-type"))
  263. return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
  264. headers=newheaders,
  265. origin_req_host=req.get_origin_req_host(),
  266. unverifiable=True))
  267. # Build our opener
  268. opener = compat_urllib_request.OpenerDirector()
  269. for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
  270. HTTPMethodFallback, HEADRedirectHandler,
  271. compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
  272. opener.add_handler(handler())
  273. response = opener.open(HEADRequest(url))
  274. if response is None:
  275. raise ExtractorError('Invalid URL protocol')
  276. return response
  277. def _extract_rss(self, url, video_id, doc):
  278. playlist_title = doc.find('./channel/title').text
  279. playlist_desc_el = doc.find('./channel/description')
  280. playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
  281. entries = [{
  282. '_type': 'url',
  283. 'url': e.find('link').text,
  284. 'title': e.find('title').text,
  285. } for e in doc.findall('./channel/item')]
  286. return {
  287. '_type': 'playlist',
  288. 'id': url,
  289. 'title': playlist_title,
  290. 'description': playlist_desc,
  291. 'entries': entries,
  292. }
  293. def _real_extract(self, url):
  294. parsed_url = compat_urlparse.urlparse(url)
  295. if not parsed_url.scheme:
  296. default_search = self._downloader.params.get('default_search')
  297. if default_search is None:
  298. default_search = 'auto_warning'
  299. if default_search in ('auto', 'auto_warning'):
  300. if '/' in url:
  301. self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
  302. return self.url_result('http://' + url)
  303. else:
  304. if default_search == 'auto_warning':
  305. self._downloader.report_warning(
  306. 'Falling back to youtube search for %s . Set --default-search to "auto" to suppress this warning.' % url)
  307. return self.url_result('ytsearch:' + url)
  308. else:
  309. assert ':' in default_search
  310. return self.url_result(default_search + url)
  311. video_id = os.path.splitext(url.rstrip('/').split('/')[-1])[0]
  312. self.to_screen('%s: Requesting header' % video_id)
  313. try:
  314. response = self._send_head(url)
  315. # Check for redirect
  316. new_url = response.geturl()
  317. if url != new_url:
  318. self.report_following_redirect(new_url)
  319. return self.url_result(new_url)
  320. # Check for direct link to a video
  321. content_type = response.headers.get('Content-Type', '')
  322. m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
  323. if m:
  324. upload_date = response.headers.get('Last-Modified')
  325. if upload_date:
  326. upload_date = unified_strdate(upload_date)
  327. return {
  328. 'id': video_id,
  329. 'title': os.path.splitext(url_basename(url))[0],
  330. 'formats': [{
  331. 'format_id': m.group('format_id'),
  332. 'url': url,
  333. 'vcodec': 'none' if m.group('type') == 'audio' else None
  334. }],
  335. 'upload_date': upload_date,
  336. }
  337. except compat_urllib_error.HTTPError:
  338. # This may be a stupid server that doesn't like HEAD, our UA, or so
  339. pass
  340. try:
  341. webpage = self._download_webpage(url, video_id)
  342. except ValueError:
  343. # since this is the last-resort InfoExtractor, if
  344. # this error is thrown, it'll be thrown here
  345. raise ExtractorError('Failed to download URL: %s' % url)
  346. self.report_extraction(video_id)
  347. # Is it an RSS feed?
  348. try:
  349. doc = parse_xml(webpage)
  350. if doc.tag == 'rss':
  351. return self._extract_rss(url, video_id, doc)
  352. except compat_xml_parse_error:
  353. pass
  354. # Sometimes embedded video player is hidden behind percent encoding
  355. # (e.g. https://github.com/rg3/youtube-dl/issues/2448)
  356. # Unescaping the whole page allows to handle those cases in a generic way
  357. webpage = compat_urllib_parse.unquote(webpage)
  358. # it's tempting to parse this further, but you would
  359. # have to take into account all the variations like
  360. # Video Title - Site Name
  361. # Site Name | Video Title
  362. # Video Title - Tagline | Site Name
  363. # and so on and so forth; it's just not practical
  364. video_title = self._html_search_regex(
  365. r'(?s)<title>(.*?)</title>', webpage, 'video title',
  366. default='video')
  367. # video uploader is domain name
  368. video_uploader = self._search_regex(
  369. r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
  370. # Look for BrightCove:
  371. bc_urls = BrightcoveIE._extract_brightcove_urls(webpage)
  372. if bc_urls:
  373. self.to_screen('Brightcove video detected.')
  374. entries = [{
  375. '_type': 'url',
  376. 'url': smuggle_url(bc_url, {'Referer': url}),
  377. 'ie_key': 'Brightcove'
  378. } for bc_url in bc_urls]
  379. return {
  380. '_type': 'playlist',
  381. 'title': video_title,
  382. 'id': video_id,
  383. 'entries': entries,
  384. }
  385. # Look for embedded (iframe) Vimeo player
  386. mobj = re.search(
  387. r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/.+?)\1', webpage)
  388. if mobj:
  389. player_url = unescapeHTML(mobj.group('url'))
  390. surl = smuggle_url(player_url, {'Referer': url})
  391. return self.url_result(surl, 'Vimeo')
  392. # Look for embedded (swf embed) Vimeo player
  393. mobj = re.search(
  394. r'<embed[^>]+?src="(https?://(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
  395. if mobj:
  396. return self.url_result(mobj.group(1), 'Vimeo')
  397. # Look for embedded YouTube player
  398. matches = re.findall(r'''(?x)
  399. (?:<iframe[^>]+?src=|embedSWF\(\s*)
  400. (["\'])(?P<url>(?:https?:)?//(?:www\.)?youtube\.com/
  401. (?:embed|v)/.+?)
  402. \1''', webpage)
  403. if matches:
  404. urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Youtube')
  405. for tuppl in matches]
  406. return self.playlist_result(
  407. urlrs, playlist_id=video_id, playlist_title=video_title)
  408. # Look for embedded Dailymotion player
  409. matches = re.findall(
  410. r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/embed/video/.+?)\1', webpage)
  411. if matches:
  412. urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Dailymotion')
  413. for tuppl in matches]
  414. return self.playlist_result(
  415. urlrs, playlist_id=video_id, playlist_title=video_title)
  416. # Look for embedded Wistia player
  417. match = re.search(
  418. r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
  419. if match:
  420. return {
  421. '_type': 'url_transparent',
  422. 'url': unescapeHTML(match.group('url')),
  423. 'ie_key': 'Wistia',
  424. 'uploader': video_uploader,
  425. 'title': video_title,
  426. 'id': video_id,
  427. }
  428. # Look for embedded blip.tv player
  429. mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
  430. if mobj:
  431. return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV')
  432. mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9]+)', webpage)
  433. if mobj:
  434. return self.url_result(mobj.group(1), 'BlipTV')
  435. # Look for Bandcamp pages with custom domain
  436. mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
  437. if mobj is not None:
  438. burl = unescapeHTML(mobj.group(1))
  439. # Don't set the extractor because it can be a track url or an album
  440. return self.url_result(burl)
  441. # Look for embedded Vevo player
  442. mobj = re.search(
  443. r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
  444. if mobj is not None:
  445. return self.url_result(mobj.group('url'))
  446. # Look for Ooyala videos
  447. mobj = (re.search(r'player.ooyala.com/[^"?]+\?[^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
  448. re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage))
  449. if mobj is not None:
  450. return OoyalaIE._build_url_result(mobj.group('ec'))
  451. # Look for Aparat videos
  452. mobj = re.search(r'<iframe src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
  453. if mobj is not None:
  454. return self.url_result(mobj.group(1), 'Aparat')
  455. # Look for MPORA videos
  456. mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
  457. if mobj is not None:
  458. return self.url_result(mobj.group(1), 'Mpora')
  459. # Look for embedded NovaMov player
  460. mobj = re.search(
  461. r'<iframe[^>]+?src=(["\'])(?P<url>http://(?:(?:embed|www)\.)?novamov\.com/embed\.php.+?)\1', webpage)
  462. if mobj is not None:
  463. return self.url_result(mobj.group('url'), 'NovaMov')
  464. # Look for embedded NowVideo player
  465. mobj = re.search(
  466. r'<iframe[^>]+?src=(["\'])(?P<url>http://(?:(?:embed|www)\.)?nowvideo\.(?:ch|sx|eu)/embed\.php.+?)\1', webpage)
  467. if mobj is not None:
  468. return self.url_result(mobj.group('url'), 'NowVideo')
  469. # Look for embedded Facebook player
  470. mobj = re.search(
  471. r'<iframe[^>]+?src=(["\'])(?P<url>https://www\.facebook\.com/video/embed.+?)\1', webpage)
  472. if mobj is not None:
  473. return self.url_result(mobj.group('url'), 'Facebook')
  474. # Look for embedded VK player
  475. mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
  476. if mobj is not None:
  477. return self.url_result(mobj.group('url'), 'VK')
  478. # Look for embedded Huffington Post player
  479. mobj = re.search(
  480. r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
  481. if mobj is not None:
  482. return self.url_result(mobj.group('url'), 'HuffPost')
  483. # Look for embed.ly
  484. mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
  485. if mobj is not None:
  486. return self.url_result(mobj.group('url'))
  487. mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
  488. if mobj is not None:
  489. return self.url_result(compat_urllib_parse.unquote(mobj.group('url')))
  490. # Look for funnyordie embed
  491. matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
  492. if matches:
  493. urlrs = [self.url_result(unescapeHTML(eurl), 'FunnyOrDie')
  494. for eurl in matches]
  495. return self.playlist_result(
  496. urlrs, playlist_id=video_id, playlist_title=video_title)
  497. # Look for embedded RUTV player
  498. rutv_url = RUTVIE._extract_url(webpage)
  499. if rutv_url:
  500. return self.url_result(rutv_url, 'RUTV')
  501. # Look for embedded TED player
  502. mobj = re.search(
  503. r'<iframe[^>]+?src=(["\'])(?P<url>http://embed\.ted\.com/.+?)\1', webpage)
  504. if mobj is not None:
  505. return self.url_result(mobj.group('url'), 'TED')
  506. # Look for embedded arte.tv player
  507. mobj = re.search(
  508. r'<script [^>]*?src="(?P<url>http://www\.arte\.tv/playerv2/embed[^"]+)"',
  509. webpage)
  510. if mobj is not None:
  511. return self.url_result(mobj.group('url'), 'ArteTVEmbed')
  512. # Look for embedded smotri.com player
  513. smotri_url = SmotriIE._extract_url(webpage)
  514. if smotri_url:
  515. return self.url_result(smotri_url, 'Smotri')
  516. # Start with something easy: JW Player in SWFObject
  517. mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
  518. if mobj is None:
  519. # Look for gorilla-vid style embedding
  520. mobj = re.search(r'(?s)(?:jw_plugins|JWPlayerOptions).*?file\s*:\s*["\'](.*?)["\']', webpage)
  521. if mobj is None:
  522. # Broaden the search a little bit
  523. mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
  524. if mobj is None:
  525. # Broaden the search a little bit: JWPlayer JS loader
  526. mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
  527. if mobj is None:
  528. # Try to find twitter cards info
  529. mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
  530. if mobj is None:
  531. # We look for Open Graph info:
  532. # We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
  533. m_video_type = re.search(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
  534. # We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
  535. if m_video_type is not None:
  536. mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
  537. if mobj is None:
  538. # HTML5 video
  539. mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL)
  540. if mobj is None:
  541. mobj = re.search(
  542. r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
  543. r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'([^\']+)\'"',
  544. webpage)
  545. if mobj:
  546. new_url = mobj.group(1)
  547. self.report_following_redirect(new_url)
  548. return {
  549. '_type': 'url',
  550. 'url': new_url,
  551. }
  552. if mobj is None:
  553. raise ExtractorError('Unsupported URL: %s' % url)
  554. # It's possible that one of the regexes
  555. # matched, but returned an empty group:
  556. if mobj.group(1) is None:
  557. raise ExtractorError('Did not find a valid video URL at %s' % url)
  558. video_url = mobj.group(1)
  559. video_url = compat_urlparse.urljoin(url, video_url)
  560. video_id = compat_urllib_parse.unquote(os.path.basename(video_url))
  561. # Sometimes, jwplayer extraction will result in a YouTube URL
  562. if YoutubeIE.suitable(video_url):
  563. return self.url_result(video_url, 'Youtube')
  564. # here's a fun little line of code for you:
  565. video_id = os.path.splitext(video_id)[0]
  566. return {
  567. 'id': video_id,
  568. 'url': video_url,
  569. 'uploader': video_uploader,
  570. 'title': video_title,
  571. }