You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

193 lines
6.9 KiB

  1. from __future__ import unicode_literals
  2. import re
  3. from .common import InfoExtractor
  4. from ..compat import (
  5. compat_urlparse,
  6. compat_str,
  7. )
  8. from ..utils import (
  9. parse_duration,
  10. js_to_json,
  11. parse_iso8601,
  12. )
  13. class ViideaIE(InfoExtractor):
  14. _VALID_URL = r'''(?x)https?://(?:www\.)?(?:
  15. videolectures\.net|
  16. flexilearn\.viidea\.net|
  17. presentations\.ocwconsortium\.org|
  18. video\.travel-zoom\.si|
  19. video\.pomp-forum\.si|
  20. tv\.nil\.si|
  21. video\.hekovnik.com|
  22. video\.szko\.si|
  23. kpk\.viidea\.com|
  24. inside\.viidea\.net|
  25. video\.kiberpipa\.org|
  26. bvvideo\.si|
  27. kongres\.viidea\.net|
  28. edemokracija\.viidea\.com
  29. )(?:/lecture)?/(?P<id>[^/]+)(?:/video/(?P<part>\d+))?/*(?:[#?].*)?$'''
  30. _TESTS = [{
  31. 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/',
  32. 'info_dict': {
  33. 'id': '20171',
  34. 'display_id': 'promogram_igor_mekjavic_eng',
  35. 'ext': 'mp4',
  36. 'title': 'Automatics, robotics and biocybernetics',
  37. 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
  38. 'thumbnail': 're:http://.*\.jpg',
  39. 'timestamp': 1372349289,
  40. 'upload_date': '20130627',
  41. 'duration': 565,
  42. },
  43. 'params': {
  44. # m3u8 download
  45. 'skip_download': True,
  46. },
  47. }, {
  48. # video with invalid direct format links (HTTP 403)
  49. 'url': 'http://videolectures.net/russir2010_filippova_nlp/',
  50. 'info_dict': {
  51. 'id': '14891',
  52. 'display_id': 'russir2010_filippova_nlp',
  53. 'ext': 'flv',
  54. 'title': 'NLP at Google',
  55. 'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3',
  56. 'thumbnail': 're:http://.*\.jpg',
  57. 'timestamp': 1284375600,
  58. 'upload_date': '20100913',
  59. 'duration': 5352,
  60. },
  61. 'params': {
  62. # rtmp download
  63. 'skip_download': True,
  64. },
  65. }, {
  66. # event playlist
  67. 'url': 'http://videolectures.net/deeplearning2015_montreal/',
  68. 'info_dict': {
  69. 'id': '23181',
  70. 'title': 'Deep Learning Summer School, Montreal 2015',
  71. 'description': 'md5:0533a85e4bd918df52a01f0e1ebe87b7',
  72. 'thumbnail': 're:http://.*\.jpg',
  73. 'timestamp': 1438560000,
  74. },
  75. 'playlist_count': 30,
  76. }, {
  77. # multi part lecture
  78. 'url': 'http://videolectures.net/mlss09uk_bishop_ibi/',
  79. 'info_dict': {
  80. 'id': '9737',
  81. 'display_id': 'mlss09uk_bishop_ibi',
  82. 'title': 'Introduction To Bayesian Inference',
  83. 'thumbnail': 're:http://.*\.jpg',
  84. 'timestamp': 1251622800,
  85. },
  86. 'playlist': [{
  87. 'info_dict': {
  88. 'id': '9737_part1',
  89. 'display_id': 'mlss09uk_bishop_ibi_part1',
  90. 'ext': 'wmv',
  91. 'title': 'Introduction To Bayesian Inference (Part 1)',
  92. 'thumbnail': 're:http://.*\.jpg',
  93. 'duration': 4622,
  94. 'timestamp': 1251622800,
  95. 'upload_date': '20090830',
  96. },
  97. }, {
  98. 'info_dict': {
  99. 'id': '9737_part2',
  100. 'display_id': 'mlss09uk_bishop_ibi_part2',
  101. 'ext': 'wmv',
  102. 'title': 'Introduction To Bayesian Inference (Part 2)',
  103. 'thumbnail': 're:http://.*\.jpg',
  104. 'duration': 5641,
  105. 'timestamp': 1251622800,
  106. 'upload_date': '20090830',
  107. },
  108. }],
  109. 'playlist_count': 2,
  110. }]
  111. def _real_extract(self, url):
  112. lecture_slug, explicit_part_id = re.match(self._VALID_URL, url).groups()
  113. webpage = self._download_webpage(url, lecture_slug)
  114. cfg = self._parse_json(self._search_regex(
  115. [r'cfg\s*:\s*({.+?})\s*,\s*[\da-zA-Z_]+\s*:\s*\(?\s*function',
  116. r'cfg\s*:\s*({[^}]+})'],
  117. webpage, 'cfg'), lecture_slug, js_to_json)
  118. lecture_id = compat_str(cfg['obj_id'])
  119. base_url = self._proto_relative_url(cfg['livepipe'], 'http:')
  120. lecture_data = self._download_json(
  121. '%s/site/api/lecture/%s?format=json' % (base_url, lecture_id),
  122. lecture_id)['lecture'][0]
  123. lecture_info = {
  124. 'id': lecture_id,
  125. 'display_id': lecture_slug,
  126. 'title': lecture_data['title'],
  127. 'timestamp': parse_iso8601(lecture_data.get('time')),
  128. 'description': lecture_data.get('description_wiki'),
  129. 'thumbnail': lecture_data.get('thumb'),
  130. }
  131. playlist_entries = []
  132. lecture_type = lecture_data.get('type')
  133. parts = [compat_str(video) for video in cfg.get('videos', [])]
  134. if parts:
  135. multipart = len(parts) > 1
  136. def extract_part(part_id):
  137. smil_url = '%s/%s/video/%s/smil.xml' % (base_url, lecture_slug, part_id)
  138. smil = self._download_smil(smil_url, lecture_id)
  139. info = self._parse_smil(smil, smil_url, lecture_id)
  140. self._sort_formats(info['formats'])
  141. info['id'] = lecture_id if not multipart else '%s_part%s' % (lecture_id, part_id)
  142. info['display_id'] = lecture_slug if not multipart else '%s_part%s' % (lecture_slug, part_id)
  143. if multipart:
  144. info['title'] += ' (Part %s)' % part_id
  145. switch = smil.find('.//switch')
  146. if switch is not None:
  147. info['duration'] = parse_duration(switch.attrib.get('dur'))
  148. item_info = lecture_info.copy()
  149. item_info.update(info)
  150. return item_info
  151. if explicit_part_id or not multipart:
  152. result = extract_part(explicit_part_id or parts[0])
  153. else:
  154. result = {
  155. '_type': 'multi_video',
  156. 'entries': [extract_part(part) for part in parts],
  157. }
  158. result.update(lecture_info)
  159. # Immediately return explicitly requested part or non event item
  160. if explicit_part_id or lecture_type != 'evt':
  161. return result
  162. playlist_entries.append(result)
  163. # It's probably a playlist
  164. if not parts or lecture_type == 'evt':
  165. playlist_webpage = self._download_webpage(
  166. '%s/site/ajax/drilldown/?id=%s' % (base_url, lecture_id), lecture_id)
  167. entries = [
  168. self.url_result(compat_urlparse.urljoin(url, video_url), 'Viidea')
  169. for _, video_url in re.findall(
  170. r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', playlist_webpage)]
  171. playlist_entries.extend(entries)
  172. playlist = self.playlist_result(playlist_entries, lecture_id)
  173. playlist.update(lecture_info)
  174. return playlist