You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2297 lines
79 KiB

16 years ago
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. # Author: Ricardo Garcia Gonzalez
  4. # Author: Danny Colligan
  5. # Author: Benjamin Johnson
  6. # License: Public domain code
  7. import htmlentitydefs
  8. import httplib
  9. import locale
  10. import math
  11. import netrc
  12. import os
  13. import os.path
  14. import re
  15. import socket
  16. import string
  17. import subprocess
  18. import sys
  19. import time
  20. import urllib
  21. import urllib2
  22. # parse_qs was moved from the cgi module to the urlparse module recently.
  23. try:
  24. from urlparse import parse_qs
  25. except ImportError:
  26. from cgi import parse_qs
  27. std_headers = {
  28. 'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100723 Firefox/3.6.8',
  29. 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
  30. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  31. 'Accept-Language': 'en-us,en;q=0.5',
  32. }
  33. simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii')
  34. def preferredencoding():
  35. """Get preferred encoding.
  36. Returns the best encoding scheme for the system, based on
  37. locale.getpreferredencoding() and some further tweaks.
  38. """
  39. def yield_preferredencoding():
  40. try:
  41. pref = locale.getpreferredencoding()
  42. u'TEST'.encode(pref)
  43. except:
  44. pref = 'UTF-8'
  45. while True:
  46. yield pref
  47. return yield_preferredencoding().next()
  48. def htmlentity_transform(matchobj):
  49. """Transforms an HTML entity to a Unicode character.
  50. This function receives a match object and is intended to be used with
  51. the re.sub() function.
  52. """
  53. entity = matchobj.group(1)
  54. # Known non-numeric HTML entity
  55. if entity in htmlentitydefs.name2codepoint:
  56. return unichr(htmlentitydefs.name2codepoint[entity])
  57. # Unicode character
  58. mobj = re.match(ur'(?u)#(x?\d+)', entity)
  59. if mobj is not None:
  60. numstr = mobj.group(1)
  61. if numstr.startswith(u'x'):
  62. base = 16
  63. numstr = u'0%s' % numstr
  64. else:
  65. base = 10
  66. return unichr(long(numstr, base))
  67. # Unknown entity in name, return its literal representation
  68. return (u'&%s;' % entity)
  69. def sanitize_title(utitle):
  70. """Sanitizes a video title so it could be used as part of a filename."""
  71. utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
  72. return utitle.replace(unicode(os.sep), u'%')
  73. def sanitize_open(filename, open_mode):
  74. """Try to open the given filename, and slightly tweak it if this fails.
  75. Attempts to open the given filename. If this fails, it tries to change
  76. the filename slightly, step by step, until it's either able to open it
  77. or it fails and raises a final exception, like the standard open()
  78. function.
  79. It returns the tuple (stream, definitive_file_name).
  80. """
  81. try:
  82. if filename == u'-':
  83. return (sys.stdout, filename)
  84. stream = open(filename, open_mode)
  85. return (stream, filename)
  86. except (IOError, OSError), err:
  87. # In case of error, try to remove win32 forbidden chars
  88. filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename)
  89. # An exception here should be caught in the caller
  90. stream = open(filename, open_mode)
  91. return (stream, filename)
  92. class DownloadError(Exception):
  93. """Download Error exception.
  94. This exception may be thrown by FileDownloader objects if they are not
  95. configured to continue on errors. They will contain the appropriate
  96. error message.
  97. """
  98. pass
  99. class SameFileError(Exception):
  100. """Same File exception.
  101. This exception will be thrown by FileDownloader objects if they detect
  102. multiple files would have to be downloaded to the same file on disk.
  103. """
  104. pass
  105. class PostProcessingError(Exception):
  106. """Post Processing exception.
  107. This exception may be raised by PostProcessor's .run() method to
  108. indicate an error in the postprocessing task.
  109. """
  110. pass
  111. class UnavailableVideoError(Exception):
  112. """Unavailable Format exception.
  113. This exception will be thrown when a video is requested
  114. in a format that is not available for that video.
  115. """
  116. pass
  117. class ContentTooShortError(Exception):
  118. """Content Too Short exception.
  119. This exception may be raised by FileDownloader objects when a file they
  120. download is too small for what the server announced first, indicating
  121. the connection was probably interrupted.
  122. """
  123. # Both in bytes
  124. downloaded = None
  125. expected = None
  126. def __init__(self, downloaded, expected):
  127. self.downloaded = downloaded
  128. self.expected = expected
  129. class FileDownloader(object):
  130. """File Downloader class.
  131. File downloader objects are the ones responsible of downloading the
  132. actual video file and writing it to disk if the user has requested
  133. it, among some other tasks. In most cases there should be one per
  134. program. As, given a video URL, the downloader doesn't know how to
  135. extract all the needed information, task that InfoExtractors do, it
  136. has to pass the URL to one of them.
  137. For this, file downloader objects have a method that allows
  138. InfoExtractors to be registered in a given order. When it is passed
  139. a URL, the file downloader handles it to the first InfoExtractor it
  140. finds that reports being able to handle it. The InfoExtractor extracts
  141. all the information about the video or videos the URL refers to, and
  142. asks the FileDownloader to process the video information, possibly
  143. downloading the video.
  144. File downloaders accept a lot of parameters. In order not to saturate
  145. the object constructor with arguments, it receives a dictionary of
  146. options instead. These options are available through the params
  147. attribute for the InfoExtractors to use. The FileDownloader also
  148. registers itself as the downloader in charge for the InfoExtractors
  149. that are added to it, so this is a "mutual registration".
  150. Available options:
  151. username: Username for authentication purposes.
  152. password: Password for authentication purposes.
  153. usenetrc: Use netrc for authentication instead.
  154. quiet: Do not print messages to stdout.
  155. forceurl: Force printing final URL.
  156. forcetitle: Force printing title.
  157. simulate: Do not download the video files.
  158. format: Video format code.
  159. format_limit: Highest quality format to try.
  160. outtmpl: Template for output names.
  161. ignoreerrors: Do not stop on download errors.
  162. ratelimit: Download speed limit, in bytes/sec.
  163. nooverwrites: Prevent overwriting files.
  164. retries: Number of times to retry for HTTP error 503
  165. continuedl: Try to continue downloads if possible.
  166. noprogress: Do not print the progress bar.
  167. """
  168. params = None
  169. _ies = []
  170. _pps = []
  171. _download_retcode = None
  172. _num_downloads = None
  173. def __init__(self, params):
  174. """Create a FileDownloader object with the given options."""
  175. self._ies = []
  176. self._pps = []
  177. self._download_retcode = 0
  178. self._num_downloads = 0
  179. self.params = params
  180. @staticmethod
  181. def pmkdir(filename):
  182. """Create directory components in filename. Similar to Unix "mkdir -p"."""
  183. components = filename.split(os.sep)
  184. aggregate = [os.sep.join(components[0:x]) for x in xrange(1, len(components))]
  185. aggregate = ['%s%s' % (x, os.sep) for x in aggregate] # Finish names with separator
  186. for dir in aggregate:
  187. if not os.path.exists(dir):
  188. os.mkdir(dir)
  189. @staticmethod
  190. def format_bytes(bytes):
  191. if bytes is None:
  192. return 'N/A'
  193. if type(bytes) is str:
  194. bytes = float(bytes)
  195. if bytes == 0.0:
  196. exponent = 0
  197. else:
  198. exponent = long(math.log(bytes, 1024.0))
  199. suffix = 'bkMGTPEZY'[exponent]
  200. converted = float(bytes) / float(1024**exponent)
  201. return '%.2f%s' % (converted, suffix)
  202. @staticmethod
  203. def calc_percent(byte_counter, data_len):
  204. if data_len is None:
  205. return '---.-%'
  206. return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
  207. @staticmethod
  208. def calc_eta(start, now, total, current):
  209. if total is None:
  210. return '--:--'
  211. dif = now - start
  212. if current == 0 or dif < 0.001: # One millisecond
  213. return '--:--'
  214. rate = float(current) / dif
  215. eta = long((float(total) - float(current)) / rate)
  216. (eta_mins, eta_secs) = divmod(eta, 60)
  217. if eta_mins > 99:
  218. return '--:--'
  219. return '%02d:%02d' % (eta_mins, eta_secs)
  220. @staticmethod
  221. def calc_speed(start, now, bytes):
  222. dif = now - start
  223. if bytes == 0 or dif < 0.001: # One millisecond
  224. return '%10s' % '---b/s'
  225. return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
  226. @staticmethod
  227. def best_block_size(elapsed_time, bytes):
  228. new_min = max(bytes / 2.0, 1.0)
  229. new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
  230. if elapsed_time < 0.001:
  231. return long(new_max)
  232. rate = bytes / elapsed_time
  233. if rate > new_max:
  234. return long(new_max)
  235. if rate < new_min:
  236. return long(new_min)
  237. return long(rate)
  238. @staticmethod
  239. def parse_bytes(bytestr):
  240. """Parse a string indicating a byte quantity into a long integer."""
  241. matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
  242. if matchobj is None:
  243. return None
  244. number = float(matchobj.group(1))
  245. multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
  246. return long(round(number * multiplier))
  247. def add_info_extractor(self, ie):
  248. """Add an InfoExtractor object to the end of the list."""
  249. self._ies.append(ie)
  250. ie.set_downloader(self)
  251. def add_post_processor(self, pp):
  252. """Add a PostProcessor object to the end of the chain."""
  253. self._pps.append(pp)
  254. pp.set_downloader(self)
  255. def to_stdout(self, message, skip_eol=False, ignore_encoding_errors=False):
  256. """Print message to stdout if not in quiet mode."""
  257. try:
  258. if not self.params.get('quiet', False):
  259. print (u'%s%s' % (message, [u'\n', u''][skip_eol])).encode(preferredencoding()),
  260. sys.stdout.flush()
  261. except (UnicodeEncodeError), err:
  262. if not ignore_encoding_errors:
  263. raise
  264. def to_stderr(self, message):
  265. """Print message to stderr."""
  266. print >>sys.stderr, message.encode(preferredencoding())
  267. def fixed_template(self):
  268. """Checks if the output template is fixed."""
  269. return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None)
  270. def trouble(self, message=None):
  271. """Determine action to take when a download problem appears.
  272. Depending on if the downloader has been configured to ignore
  273. download errors or not, this method may throw an exception or
  274. not when errors are found, after printing the message.
  275. """
  276. if message is not None:
  277. self.to_stderr(message)
  278. if not self.params.get('ignoreerrors', False):
  279. raise DownloadError(message)
  280. self._download_retcode = 1
  281. def slow_down(self, start_time, byte_counter):
  282. """Sleep if the download speed is over the rate limit."""
  283. rate_limit = self.params.get('ratelimit', None)
  284. if rate_limit is None or byte_counter == 0:
  285. return
  286. now = time.time()
  287. elapsed = now - start_time
  288. if elapsed <= 0.0:
  289. return
  290. speed = float(byte_counter) / elapsed
  291. if speed > rate_limit:
  292. time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
  293. def report_destination(self, filename):
  294. """Report destination filename."""
  295. self.to_stdout(u'[download] Destination: %s' % filename, ignore_encoding_errors=True)
  296. def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
  297. """Report download progress."""
  298. if self.params.get('noprogress', False):
  299. return
  300. self.to_stdout(u'\r[download] %s of %s at %s ETA %s' %
  301. (percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
  302. def report_resuming_byte(self, resume_len):
  303. """Report attempt to resume at given byte."""
  304. self.to_stdout(u'[download] Resuming download at byte %s' % resume_len)
  305. def report_retry(self, count, retries):
  306. """Report retry in case of HTTP error 503"""
  307. self.to_stdout(u'[download] Got HTTP error 503. Retrying (attempt %d of %d)...' % (count, retries))
  308. def report_file_already_downloaded(self, file_name):
  309. """Report file has already been fully downloaded."""
  310. try:
  311. self.to_stdout(u'[download] %s has already been downloaded' % file_name)
  312. except (UnicodeEncodeError), err:
  313. self.to_stdout(u'[download] The file has already been downloaded')
  314. def report_unable_to_resume(self):
  315. """Report it was impossible to resume download."""
  316. self.to_stdout(u'[download] Unable to resume')
  317. def report_finish(self):
  318. """Report download finished."""
  319. if self.params.get('noprogress', False):
  320. self.to_stdout(u'[download] Download completed')
  321. else:
  322. self.to_stdout(u'')
  323. def increment_downloads(self):
  324. """Increment the ordinal that assigns a number to each file."""
  325. self._num_downloads += 1
  326. def process_info(self, info_dict):
  327. """Process a single dictionary returned by an InfoExtractor."""
  328. # Do nothing else if in simulate mode
  329. if self.params.get('simulate', False):
  330. # Forced printings
  331. if self.params.get('forcetitle', False):
  332. print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
  333. if self.params.get('forceurl', False):
  334. print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace')
  335. if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
  336. print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace')
  337. if self.params.get('forcedescription', False) and 'description' in info_dict:
  338. print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace')
  339. return
  340. try:
  341. template_dict = dict(info_dict)
  342. template_dict['epoch'] = unicode(long(time.time()))
  343. template_dict['ord'] = unicode('%05d' % self._num_downloads)
  344. filename = self.params['outtmpl'] % template_dict
  345. except (ValueError, KeyError), err:
  346. self.trouble(u'ERROR: invalid system charset or erroneous output template')
  347. return
  348. if self.params.get('nooverwrites', False) and os.path.exists(filename):
  349. self.to_stderr(u'WARNING: file exists: %s; skipping' % filename)
  350. return
  351. try:
  352. self.pmkdir(filename)
  353. except (OSError, IOError), err:
  354. self.trouble(u'ERROR: unable to create directories: %s' % str(err))
  355. return
  356. try:
  357. success = self._do_download(filename, info_dict['url'].encode('utf-8'), info_dict.get('player_url', None))
  358. except (OSError, IOError), err:
  359. raise UnavailableVideoError
  360. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  361. self.trouble(u'ERROR: unable to download video data: %s' % str(err))
  362. return
  363. except (ContentTooShortError, ), err:
  364. self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
  365. return
  366. if success:
  367. try:
  368. self.post_process(filename, info_dict)
  369. except (PostProcessingError), err:
  370. self.trouble(u'ERROR: postprocessing: %s' % str(err))
  371. return
  372. def download(self, url_list):
  373. """Download a given list of URLs."""
  374. if len(url_list) > 1 and self.fixed_template():
  375. raise SameFileError(self.params['outtmpl'])
  376. for url in url_list:
  377. suitable_found = False
  378. for ie in self._ies:
  379. # Go to next InfoExtractor if not suitable
  380. if not ie.suitable(url):
  381. continue
  382. # Suitable InfoExtractor found
  383. suitable_found = True
  384. # Extract information from URL and process it
  385. ie.extract(url)
  386. # Suitable InfoExtractor had been found; go to next URL
  387. break
  388. if not suitable_found:
  389. self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
  390. return self._download_retcode
  391. def post_process(self, filename, ie_info):
  392. """Run the postprocessing chain on the given file."""
  393. info = dict(ie_info)
  394. info['filepath'] = filename
  395. for pp in self._pps:
  396. info = pp.run(info)
  397. if info is None:
  398. break
  399. def _download_with_rtmpdump(self, filename, url, player_url):
  400. self.report_destination(filename)
  401. # Check for rtmpdump first
  402. try:
  403. subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
  404. except (OSError, IOError):
  405. self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
  406. return False
  407. # Download using rtmpdump. rtmpdump returns exit code 2 when
  408. # the connection was interrumpted and resuming appears to be
  409. # possible. This is part of rtmpdump's normal usage, AFAIK.
  410. basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', filename]
  411. retval = subprocess.call(basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)])
  412. while retval == 2 or retval == 1:
  413. prevsize = os.path.getsize(filename)
  414. self.to_stdout(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
  415. time.sleep(5.0) # This seems to be needed
  416. retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
  417. cursize = os.path.getsize(filename)
  418. if prevsize == cursize and retval == 1:
  419. break
  420. if retval == 0:
  421. self.to_stdout(u'\r[rtmpdump] %s bytes' % os.path.getsize(filename))
  422. return True
  423. else:
  424. self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
  425. return False
  426. def _do_download(self, filename, url, player_url):
  427. # Attempt to download using rtmpdump
  428. if url.startswith('rtmp'):
  429. return self._download_with_rtmpdump(filename, url, player_url)
  430. stream = None
  431. open_mode = 'wb'
  432. basic_request = urllib2.Request(url, None, std_headers)
  433. request = urllib2.Request(url, None, std_headers)
  434. # Establish possible resume length
  435. if os.path.isfile(filename):
  436. resume_len = os.path.getsize(filename)
  437. else:
  438. resume_len = 0
  439. # Request parameters in case of being able to resume
  440. if self.params.get('continuedl', False) and resume_len != 0:
  441. self.report_resuming_byte(resume_len)
  442. request.add_header('Range','bytes=%d-' % resume_len)
  443. open_mode = 'ab'
  444. count = 0
  445. retries = self.params.get('retries', 0)
  446. while count <= retries:
  447. # Establish connection
  448. try:
  449. data = urllib2.urlopen(request)
  450. break
  451. except (urllib2.HTTPError, ), err:
  452. if err.code != 503 and err.code != 416:
  453. # Unexpected HTTP error
  454. raise
  455. elif err.code == 416:
  456. # Unable to resume (requested range not satisfiable)
  457. try:
  458. # Open the connection again without the range header
  459. data = urllib2.urlopen(basic_request)
  460. content_length = data.info()['Content-Length']
  461. except (urllib2.HTTPError, ), err:
  462. if err.code != 503:
  463. raise
  464. else:
  465. # Examine the reported length
  466. if (content_length is not None and
  467. (resume_len - 100 < long(content_length) < resume_len + 100)):
  468. # The file had already been fully downloaded.
  469. # Explanation to the above condition: in issue #175 it was revealed that
  470. # YouTube sometimes adds or removes a few bytes from the end of the file,
  471. # changing the file size slightly and causing problems for some users. So
  472. # I decided to implement a suggested change and consider the file
  473. # completely downloaded if the file size differs less than 100 bytes from
  474. # the one in the hard drive.
  475. self.report_file_already_downloaded(filename)
  476. return True
  477. else:
  478. # The length does not match, we start the download over
  479. self.report_unable_to_resume()
  480. open_mode = 'wb'
  481. break
  482. # Retry
  483. count += 1
  484. if count <= retries:
  485. self.report_retry(count, retries)
  486. if count > retries:
  487. self.trouble(u'ERROR: giving up after %s retries' % retries)
  488. return False
  489. data_len = data.info().get('Content-length', None)
  490. data_len_str = self.format_bytes(data_len)
  491. byte_counter = 0
  492. block_size = 1024
  493. start = time.time()
  494. while True:
  495. # Download and write
  496. before = time.time()
  497. data_block = data.read(block_size)
  498. after = time.time()
  499. data_block_len = len(data_block)
  500. if data_block_len == 0:
  501. break
  502. byte_counter += data_block_len
  503. # Open file just in time
  504. if stream is None:
  505. try:
  506. (stream, filename) = sanitize_open(filename, open_mode)
  507. self.report_destination(filename)
  508. except (OSError, IOError), err:
  509. self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
  510. return False
  511. try:
  512. stream.write(data_block)
  513. except (IOError, OSError), err:
  514. self.trouble(u'\nERROR: unable to write data: %s' % str(err))
  515. return False
  516. block_size = self.best_block_size(after - before, data_block_len)
  517. # Progress message
  518. percent_str = self.calc_percent(byte_counter, data_len)
  519. eta_str = self.calc_eta(start, time.time(), data_len, byte_counter)
  520. speed_str = self.calc_speed(start, time.time(), byte_counter)
  521. self.report_progress(percent_str, data_len_str, speed_str, eta_str)
  522. # Apply rate limit
  523. self.slow_down(start, byte_counter)
  524. self.report_finish()
  525. if data_len is not None and str(byte_counter) != data_len:
  526. raise ContentTooShortError(byte_counter, long(data_len))
  527. return True
  528. class InfoExtractor(object):
  529. """Information Extractor class.
  530. Information extractors are the classes that, given a URL, extract
  531. information from the video (or videos) the URL refers to. This
  532. information includes the real video URL, the video title and simplified
  533. title, author and others. The information is stored in a dictionary
  534. which is then passed to the FileDownloader. The FileDownloader
  535. processes this information possibly downloading the video to the file
  536. system, among other possible outcomes. The dictionaries must include
  537. the following fields:
  538. id: Video identifier.
  539. url: Final video URL.
  540. uploader: Nickname of the video uploader.
  541. title: Literal title.
  542. stitle: Simplified title.
  543. ext: Video filename extension.
  544. format: Video format.
  545. player_url: SWF Player URL (may be None).
  546. The following fields are optional. Their primary purpose is to allow
  547. youtube-dl to serve as the backend for a video search function, such
  548. as the one in youtube2mp3. They are only used when their respective
  549. forced printing functions are called:
  550. thumbnail: Full URL to a video thumbnail image.
  551. description: One-line video description.
  552. Subclasses of this one should re-define the _real_initialize() and
  553. _real_extract() methods, as well as the suitable() static method.
  554. Probably, they should also be instantiated and added to the main
  555. downloader.
  556. """
  557. _ready = False
  558. _downloader = None
  559. def __init__(self, downloader=None):
  560. """Constructor. Receives an optional downloader."""
  561. self._ready = False
  562. self.set_downloader(downloader)
  563. @staticmethod
  564. def suitable(url):
  565. """Receives a URL and returns True if suitable for this IE."""
  566. return False
  567. def initialize(self):
  568. """Initializes an instance (authentication, etc)."""
  569. if not self._ready:
  570. self._real_initialize()
  571. self._ready = True
  572. def extract(self, url):
  573. """Extracts URL information and returns it in list of dicts."""
  574. self.initialize()
  575. return self._real_extract(url)
  576. def set_downloader(self, downloader):
  577. """Sets the downloader for this IE."""
  578. self._downloader = downloader
  579. def _real_initialize(self):
  580. """Real initialization process. Redefine in subclasses."""
  581. pass
  582. def _real_extract(self, url):
  583. """Real extraction process. Redefine in subclasses."""
  584. pass
  585. class YoutubeIE(InfoExtractor):
  586. """Information extractor for youtube.com."""
  587. _VALID_URL = r'^((?:http://)?(?:youtu\.be/|(?:\w+\.)?youtube\.com/(?:(?:v/)|(?:(?:watch(?:_popup)?(?:\.php)?)?[\?#](?:.+&)?v=))))?([0-9A-Za-z_-]+)(?(1).+)?$'
  588. _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
  589. _LOGIN_URL = 'http://www.youtube.com/signup?next=/&gl=US&hl=en'
  590. _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
  591. _NETRC_MACHINE = 'youtube'
  592. # Listed in order of quality
  593. _available_formats = ['38', '37', '22', '45', '35', '34', '43', '18', '6', '5', '17', '13']
  594. _video_extensions = {
  595. '13': '3gp',
  596. '17': 'mp4',
  597. '18': 'mp4',
  598. '22': 'mp4',
  599. '37': 'mp4',
  600. '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
  601. '43': 'webm',
  602. '45': 'webm',
  603. }
  604. @staticmethod
  605. def suitable(url):
  606. return (re.match(YoutubeIE._VALID_URL, url) is not None)
  607. def report_lang(self):
  608. """Report attempt to set language."""
  609. self._downloader.to_stdout(u'[youtube] Setting language')
  610. def report_login(self):
  611. """Report attempt to log in."""
  612. self._downloader.to_stdout(u'[youtube] Logging in')
  613. def report_age_confirmation(self):
  614. """Report attempt to confirm age."""
  615. self._downloader.to_stdout(u'[youtube] Confirming age')
  616. def report_video_webpage_download(self, video_id):
  617. """Report attempt to download video webpage."""
  618. self._downloader.to_stdout(u'[youtube] %s: Downloading video webpage' % video_id)
  619. def report_video_info_webpage_download(self, video_id):
  620. """Report attempt to download video info webpage."""
  621. self._downloader.to_stdout(u'[youtube] %s: Downloading video info webpage' % video_id)
  622. def report_information_extraction(self, video_id):
  623. """Report attempt to extract video information."""
  624. self._downloader.to_stdout(u'[youtube] %s: Extracting video information' % video_id)
  625. def report_unavailable_format(self, video_id, format):
  626. """Report extracted video URL."""
  627. self._downloader.to_stdout(u'[youtube] %s: Format %s not available' % (video_id, format))
  628. def report_rtmp_download(self):
  629. """Indicate the download will use the RTMP protocol."""
  630. self._downloader.to_stdout(u'[youtube] RTMP download detected')
  631. def _real_initialize(self):
  632. if self._downloader is None:
  633. return
  634. username = None
  635. password = None
  636. downloader_params = self._downloader.params
  637. # Attempt to use provided username and password or .netrc data
  638. if downloader_params.get('username', None) is not None:
  639. username = downloader_params['username']
  640. password = downloader_params['password']
  641. elif downloader_params.get('usenetrc', False):
  642. try:
  643. info = netrc.netrc().authenticators(self._NETRC_MACHINE)
  644. if info is not None:
  645. username = info[0]
  646. password = info[2]
  647. else:
  648. raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
  649. except (IOError, netrc.NetrcParseError), err:
  650. self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
  651. return
  652. # Set language
  653. request = urllib2.Request(self._LANG_URL, None, std_headers)
  654. try:
  655. self.report_lang()
  656. urllib2.urlopen(request).read()
  657. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  658. self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err))
  659. return
  660. # No authentication to be performed
  661. if username is None:
  662. return
  663. # Log in
  664. login_form = {
  665. 'current_form': 'loginForm',
  666. 'next': '/',
  667. 'action_login': 'Log In',
  668. 'username': username,
  669. 'password': password,
  670. }
  671. request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form), std_headers)
  672. try:
  673. self.report_login()
  674. login_results = urllib2.urlopen(request).read()
  675. if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
  676. self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
  677. return
  678. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  679. self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
  680. return
  681. # Confirm age
  682. age_form = {
  683. 'next_url': '/',
  684. 'action_confirm': 'Confirm',
  685. }
  686. request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form), std_headers)
  687. try:
  688. self.report_age_confirmation()
  689. age_results = urllib2.urlopen(request).read()
  690. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  691. self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
  692. return
  693. def _real_extract(self, url):
  694. # Extract video id from URL
  695. mobj = re.match(self._VALID_URL, url)
  696. if mobj is None:
  697. self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
  698. return
  699. video_id = mobj.group(2)
  700. # Get video webpage
  701. self.report_video_webpage_download(video_id)
  702. request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en' % video_id, None, std_headers)
  703. try:
  704. video_webpage = urllib2.urlopen(request).read()
  705. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  706. self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
  707. return
  708. # Attempt to extract SWF player URL
  709. mobj = re.search(r'swfConfig.*"(http://.*?watch.*?-.*?\.swf)"', video_webpage)
  710. if mobj is not None:
  711. player_url = mobj.group(1)
  712. else:
  713. player_url = None
  714. # Get video info
  715. self.report_video_info_webpage_download(video_id)
  716. for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
  717. video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
  718. % (video_id, el_type))
  719. request = urllib2.Request(video_info_url, None, std_headers)
  720. try:
  721. video_info_webpage = urllib2.urlopen(request).read()
  722. video_info = parse_qs(video_info_webpage)
  723. if 'token' in video_info:
  724. break
  725. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  726. self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
  727. return
  728. if 'token' not in video_info:
  729. if 'reason' in video_info:
  730. self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0])
  731. else:
  732. self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
  733. return
  734. # Start extracting information
  735. self.report_information_extraction(video_id)
  736. # uploader
  737. if 'author' not in video_info:
  738. self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
  739. return
  740. video_uploader = urllib.unquote_plus(video_info['author'][0])
  741. # title
  742. if 'title' not in video_info:
  743. self._downloader.trouble(u'ERROR: unable to extract video title')
  744. return
  745. video_title = urllib.unquote_plus(video_info['title'][0])
  746. video_title = video_title.decode('utf-8')
  747. video_title = sanitize_title(video_title)
  748. # simplified title
  749. simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
  750. simple_title = simple_title.strip(ur'_')
  751. # thumbnail image
  752. if 'thumbnail_url' not in video_info:
  753. self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
  754. video_thumbnail = ''
  755. else: # don't panic if we can't find it
  756. video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
  757. # description
  758. video_description = 'No description available.'
  759. if self._downloader.params.get('forcedescription', False):
  760. mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage)
  761. if mobj is not None:
  762. video_description = mobj.group(1)
  763. # token
  764. video_token = urllib.unquote_plus(video_info['token'][0])
  765. # Decide which formats to download
  766. requested_format = self._downloader.params.get('format', None)
  767. get_video_template = 'http://www.youtube.com/get_video?video_id=%s&t=%s&eurl=&el=&ps=&asv=&fmt=%%s' % (video_id, video_token)
  768. if 'fmt_url_map' in video_info:
  769. url_map = dict(tuple(pair.split('|')) for pair in video_info['fmt_url_map'][0].split(','))
  770. format_limit = self._downloader.params.get('format_limit', None)
  771. if format_limit is not None and format_limit in self._available_formats:
  772. format_list = self._available_formats[self._available_formats.index(format_limit):]
  773. else:
  774. format_list = self._available_formats
  775. existing_formats = [x for x in format_list if x in url_map]
  776. if len(existing_formats) == 0:
  777. self._downloader.trouble(u'ERROR: no known formats available for video')
  778. return
  779. if requested_format is None:
  780. video_url_list = [(existing_formats[0], get_video_template % existing_formats[0])] # Best quality
  781. elif requested_format == '-1':
  782. video_url_list = [(f, get_video_template % f) for f in existing_formats] # All formats
  783. else:
  784. video_url_list = [(requested_format, get_video_template % requested_format)] # Specific format
  785. elif 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
  786. self.report_rtmp_download()
  787. video_url_list = [(None, video_info['conn'][0])]
  788. else:
  789. self._downloader.trouble(u'ERROR: no fmt_url_map or conn information found in video info')
  790. return
  791. for format_param, video_real_url in video_url_list:
  792. # At this point we have a new video
  793. self._downloader.increment_downloads()
  794. # Extension
  795. video_extension = self._video_extensions.get(format_param, 'flv')
  796. # Find the video URL in fmt_url_map or conn paramters
  797. try:
  798. # Process video information
  799. self._downloader.process_info({
  800. 'id': video_id.decode('utf-8'),
  801. 'url': video_real_url.decode('utf-8'),
  802. 'uploader': video_uploader.decode('utf-8'),
  803. 'title': video_title,
  804. 'stitle': simple_title,
  805. 'ext': video_extension.decode('utf-8'),
  806. 'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
  807. 'thumbnail': video_thumbnail.decode('utf-8'),
  808. 'description': video_description.decode('utf-8'),
  809. 'player_url': player_url,
  810. })
  811. except UnavailableVideoError, err:
  812. self._downloader.trouble(u'ERROR: unable to download video (format may not be available)')
  813. class MetacafeIE(InfoExtractor):
  814. """Information Extractor for metacafe.com."""
  815. _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
  816. _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
  817. _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
  818. _youtube_ie = None
  819. def __init__(self, youtube_ie, downloader=None):
  820. InfoExtractor.__init__(self, downloader)
  821. self._youtube_ie = youtube_ie
  822. @staticmethod
  823. def suitable(url):
  824. return (re.match(MetacafeIE._VALID_URL, url) is not None)
  825. def report_disclaimer(self):
  826. """Report disclaimer retrieval."""
  827. self._downloader.to_stdout(u'[metacafe] Retrieving disclaimer')
  828. def report_age_confirmation(self):
  829. """Report attempt to confirm age."""
  830. self._downloader.to_stdout(u'[metacafe] Confirming age')
  831. def report_download_webpage(self, video_id):
  832. """Report webpage download."""
  833. self._downloader.to_stdout(u'[metacafe] %s: Downloading webpage' % video_id)
  834. def report_extraction(self, video_id):
  835. """Report information extraction."""
  836. self._downloader.to_stdout(u'[metacafe] %s: Extracting information' % video_id)
  837. def _real_initialize(self):
  838. # Retrieve disclaimer
  839. request = urllib2.Request(self._DISCLAIMER, None, std_headers)
  840. try:
  841. self.report_disclaimer()
  842. disclaimer = urllib2.urlopen(request).read()
  843. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  844. self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err))
  845. return
  846. # Confirm age
  847. disclaimer_form = {
  848. 'filters': '0',
  849. 'submit': "Continue - I'm over 18",
  850. }
  851. request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form), std_headers)
  852. try:
  853. self.report_age_confirmation()
  854. disclaimer = urllib2.urlopen(request).read()
  855. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  856. self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
  857. return
  858. def _real_extract(self, url):
  859. # Extract id and simplified title from URL
  860. mobj = re.match(self._VALID_URL, url)
  861. if mobj is None:
  862. self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
  863. return
  864. video_id = mobj.group(1)
  865. # Check if video comes from YouTube
  866. mobj2 = re.match(r'^yt-(.*)$', video_id)
  867. if mobj2 is not None:
  868. self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
  869. return
  870. # At this point we have a new video
  871. self._downloader.increment_downloads()
  872. simple_title = mobj.group(2).decode('utf-8')
  873. video_extension = 'flv'
  874. # Retrieve video webpage to extract further information
  875. request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
  876. try:
  877. self.report_download_webpage(video_id)
  878. webpage = urllib2.urlopen(request).read()
  879. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  880. self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
  881. return
  882. # Extract URL, uploader and title from webpage
  883. self.report_extraction(video_id)
  884. mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
  885. if mobj is not None:
  886. mediaURL = urllib.unquote(mobj.group(1))
  887. # Extract gdaKey if available
  888. mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
  889. if mobj is None:
  890. video_url = mediaURL
  891. else:
  892. gdaKey = mobj.group(1)
  893. video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
  894. else:
  895. mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
  896. if mobj is None:
  897. self._downloader.trouble(u'ERROR: unable to extract media URL')
  898. return
  899. vardict = parse_qs(mobj.group(1))
  900. if 'mediaData' not in vardict:
  901. self._downloader.trouble(u'ERROR: unable to extract media URL')
  902. return
  903. mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
  904. if mobj is None:
  905. self._downloader.trouble(u'ERROR: unable to extract media URL')
  906. return
  907. video_url = '%s?__gda__=%s' % (mobj.group(1).replace('\\/', '/'), mobj.group(2))
  908. mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
  909. if mobj is None:
  910. self._downloader.trouble(u'ERROR: unable to extract title')
  911. return
  912. video_title = mobj.group(1).decode('utf-8')
  913. video_title = sanitize_title(video_title)
  914. mobj = re.search(r'(?ms)By:\s*<a .*?>(.+?)<', webpage)
  915. if mobj is None:
  916. self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
  917. return
  918. video_uploader = mobj.group(1)
  919. try:
  920. # Process video information
  921. self._downloader.process_info({
  922. 'id': video_id.decode('utf-8'),
  923. 'url': video_url.decode('utf-8'),
  924. 'uploader': video_uploader.decode('utf-8'),
  925. 'title': video_title,
  926. 'stitle': simple_title,
  927. 'ext': video_extension.decode('utf-8'),
  928. 'format': u'NA',
  929. 'player_url': None,
  930. })
  931. except UnavailableVideoError:
  932. self._downloader.trouble(u'ERROR: unable to download video')
  933. class DailymotionIE(InfoExtractor):
  934. """Information Extractor for Dailymotion"""
  935. _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)'
  936. def __init__(self, downloader=None):
  937. InfoExtractor.__init__(self, downloader)
  938. @staticmethod
  939. def suitable(url):
  940. return (re.match(DailymotionIE._VALID_URL, url) is not None)
  941. def report_download_webpage(self, video_id):
  942. """Report webpage download."""
  943. self._downloader.to_stdout(u'[dailymotion] %s: Downloading webpage' % video_id)
  944. def report_extraction(self, video_id):
  945. """Report information extraction."""
  946. self._downloader.to_stdout(u'[dailymotion] %s: Extracting information' % video_id)
  947. def _real_initialize(self):
  948. return
  949. def _real_extract(self, url):
  950. # Extract id and simplified title from URL
  951. mobj = re.match(self._VALID_URL, url)
  952. if mobj is None:
  953. self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
  954. return
  955. # At this point we have a new video
  956. self._downloader.increment_downloads()
  957. video_id = mobj.group(1)
  958. simple_title = mobj.group(2).decode('utf-8')
  959. video_extension = 'flv'
  960. # Retrieve video webpage to extract further information
  961. request = urllib2.Request(url)
  962. try:
  963. self.report_download_webpage(video_id)
  964. webpage = urllib2.urlopen(request).read()
  965. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  966. self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
  967. return
  968. # Extract URL, uploader and title from webpage
  969. self.report_extraction(video_id)
  970. mobj = re.search(r'(?i)addVariable\(\"video\"\s*,\s*\"([^\"]*)\"\)', webpage)
  971. if mobj is None:
  972. self._downloader.trouble(u'ERROR: unable to extract media URL')
  973. return
  974. mediaURL = urllib.unquote(mobj.group(1))
  975. # if needed add http://www.dailymotion.com/ if relative URL
  976. video_url = mediaURL
  977. # '<meta\s+name="title"\s+content="Dailymotion\s*[:\-]\s*(.*?)"\s*\/\s*>'
  978. mobj = re.search(r'(?im)<title>Dailymotion\s*[\-:]\s*(.+?)</title>', webpage)
  979. if mobj is None:
  980. self._downloader.trouble(u'ERROR: unable to extract title')
  981. return
  982. video_title = mobj.group(1).decode('utf-8')
  983. video_title = sanitize_title(video_title)
  984. mobj = re.search(r'(?im)<div class="dmco_html owner">.*?<a class="name" href="/.+?">(.+?)</a></div>', webpage)
  985. if mobj is None:
  986. self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
  987. return
  988. video_uploader = mobj.group(1)
  989. try:
  990. # Process video information
  991. self._downloader.process_info({
  992. 'id': video_id.decode('utf-8'),
  993. 'url': video_url.decode('utf-8'),
  994. 'uploader': video_uploader.decode('utf-8'),
  995. 'title': video_title,
  996. 'stitle': simple_title,
  997. 'ext': video_extension.decode('utf-8'),
  998. 'format': u'NA',
  999. 'player_url': None,
  1000. })
  1001. except UnavailableVideoError:
  1002. self._downloader.trouble(u'ERROR: unable to download video')
  1003. class GoogleIE(InfoExtractor):
  1004. """Information extractor for video.google.com."""
  1005. _VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
  1006. def __init__(self, downloader=None):
  1007. InfoExtractor.__init__(self, downloader)
  1008. @staticmethod
  1009. def suitable(url):
  1010. return (re.match(GoogleIE._VALID_URL, url) is not None)
  1011. def report_download_webpage(self, video_id):
  1012. """Report webpage download."""
  1013. self._downloader.to_stdout(u'[video.google] %s: Downloading webpage' % video_id)
  1014. def report_extraction(self, video_id):
  1015. """Report information extraction."""
  1016. self._downloader.to_stdout(u'[video.google] %s: Extracting information' % video_id)
  1017. def _real_initialize(self):
  1018. return
  1019. def _real_extract(self, url):
  1020. # Extract id from URL
  1021. mobj = re.match(self._VALID_URL, url)
  1022. if mobj is None:
  1023. self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
  1024. return
  1025. # At this point we have a new video
  1026. self._downloader.increment_downloads()
  1027. video_id = mobj.group(1)
  1028. video_extension = 'mp4'
  1029. # Retrieve video webpage to extract further information
  1030. request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id)
  1031. try:
  1032. self.report_download_webpage(video_id)
  1033. webpage = urllib2.urlopen(request).read()
  1034. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1035. self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
  1036. return
  1037. # Extract URL, uploader, and title from webpage
  1038. self.report_extraction(video_id)
  1039. mobj = re.search(r"download_url:'([^']+)'", webpage)
  1040. if mobj is None:
  1041. video_extension = 'flv'
  1042. mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage)
  1043. if mobj is None:
  1044. self._downloader.trouble(u'ERROR: unable to extract media URL')
  1045. return
  1046. mediaURL = urllib.unquote(mobj.group(1))
  1047. mediaURL = mediaURL.replace('\\x3d', '\x3d')
  1048. mediaURL = mediaURL.replace('\\x26', '\x26')
  1049. video_url = mediaURL
  1050. mobj = re.search(r'<title>(.*)</title>', webpage)
  1051. if mobj is None:
  1052. self._downloader.trouble(u'ERROR: unable to extract title')
  1053. return
  1054. video_title = mobj.group(1).decode('utf-8')
  1055. video_title = sanitize_title(video_title)
  1056. simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
  1057. # Extract video description
  1058. mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
  1059. if mobj is None:
  1060. self._downloader.trouble(u'ERROR: unable to extract video description')
  1061. return
  1062. video_description = mobj.group(1).decode('utf-8')
  1063. if not video_description:
  1064. video_description = 'No description available.'
  1065. # Extract video thumbnail
  1066. if self._downloader.params.get('forcethumbnail', False):
  1067. request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
  1068. try:
  1069. webpage = urllib2.urlopen(request).read()
  1070. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1071. self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
  1072. return
  1073. mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
  1074. if mobj is None:
  1075. self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
  1076. return
  1077. video_thumbnail = mobj.group(1)
  1078. else: # we need something to pass to process_info
  1079. video_thumbnail = ''
  1080. try:
  1081. # Process video information
  1082. self._downloader.process_info({
  1083. 'id': video_id.decode('utf-8'),
  1084. 'url': video_url.decode('utf-8'),
  1085. 'uploader': u'NA',
  1086. 'title': video_title,
  1087. 'stitle': simple_title,
  1088. 'ext': video_extension.decode('utf-8'),
  1089. 'format': u'NA',
  1090. 'player_url': None,
  1091. })
  1092. except UnavailableVideoError:
  1093. self._downloader.trouble(u'ERROR: unable to download video')
  1094. class PhotobucketIE(InfoExtractor):
  1095. """Information extractor for photobucket.com."""
  1096. _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
  1097. def __init__(self, downloader=None):
  1098. InfoExtractor.__init__(self, downloader)
  1099. @staticmethod
  1100. def suitable(url):
  1101. return (re.match(PhotobucketIE._VALID_URL, url) is not None)
  1102. def report_download_webpage(self, video_id):
  1103. """Report webpage download."""
  1104. self._downloader.to_stdout(u'[photobucket] %s: Downloading webpage' % video_id)
  1105. def report_extraction(self, video_id):
  1106. """Report information extraction."""
  1107. self._downloader.to_stdout(u'[photobucket] %s: Extracting information' % video_id)
  1108. def _real_initialize(self):
  1109. return
  1110. def _real_extract(self, url):
  1111. # Extract id from URL
  1112. mobj = re.match(self._VALID_URL, url)
  1113. if mobj is None:
  1114. self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
  1115. return
  1116. # At this point we have a new video
  1117. self._downloader.increment_downloads()
  1118. video_id = mobj.group(1)
  1119. video_extension = 'flv'
  1120. # Retrieve video webpage to extract further information
  1121. request = urllib2.Request(url)
  1122. try:
  1123. self.report_download_webpage(video_id)
  1124. webpage = urllib2.urlopen(request).read()
  1125. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1126. self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
  1127. return
  1128. # Extract URL, uploader, and title from webpage
  1129. self.report_extraction(video_id)
  1130. mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
  1131. if mobj is None:
  1132. self._downloader.trouble(u'ERROR: unable to extract media URL')
  1133. return
  1134. mediaURL = urllib.unquote(mobj.group(1))
  1135. video_url = mediaURL
  1136. mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
  1137. if mobj is None:
  1138. self._downloader.trouble(u'ERROR: unable to extract title')
  1139. return
  1140. video_title = mobj.group(1).decode('utf-8')
  1141. video_title = sanitize_title(video_title)
  1142. simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
  1143. video_uploader = mobj.group(2).decode('utf-8')
  1144. try:
  1145. # Process video information
  1146. self._downloader.process_info({
  1147. 'id': video_id.decode('utf-8'),
  1148. 'url': video_url.decode('utf-8'),
  1149. 'uploader': video_uploader,
  1150. 'title': video_title,
  1151. 'stitle': simple_title,
  1152. 'ext': video_extension.decode('utf-8'),
  1153. 'format': u'NA',
  1154. 'player_url': None,
  1155. })
  1156. except UnavailableVideoError:
  1157. self._downloader.trouble(u'ERROR: unable to download video')
  1158. class YahooIE(InfoExtractor):
  1159. """Information extractor for video.yahoo.com."""
  1160. # _VALID_URL matches all Yahoo! Video URLs
  1161. # _VPAGE_URL matches only the extractable '/watch/' URLs
  1162. _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
  1163. _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
  1164. def __init__(self, downloader=None):
  1165. InfoExtractor.__init__(self, downloader)
  1166. @staticmethod
  1167. def suitable(url):
  1168. return (re.match(YahooIE._VALID_URL, url) is not None)
  1169. def report_download_webpage(self, video_id):
  1170. """Report webpage download."""
  1171. self._downloader.to_stdout(u'[video.yahoo] %s: Downloading webpage' % video_id)
  1172. def report_extraction(self, video_id):
  1173. """Report information extraction."""
  1174. self._downloader.to_stdout(u'[video.yahoo] %s: Extracting information' % video_id)
  1175. def _real_initialize(self):
  1176. return
  1177. def _real_extract(self, url, new_video=True):
  1178. # Extract ID from URL
  1179. mobj = re.match(self._VALID_URL, url)
  1180. if mobj is None:
  1181. self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
  1182. return
  1183. # At this point we have a new video
  1184. self._downloader.increment_downloads()
  1185. video_id = mobj.group(2)
  1186. video_extension = 'flv'
  1187. # Rewrite valid but non-extractable URLs as
  1188. # extractable English language /watch/ URLs
  1189. if re.match(self._VPAGE_URL, url) is None:
  1190. request = urllib2.Request(url)
  1191. try:
  1192. webpage = urllib2.urlopen(request).read()
  1193. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1194. self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
  1195. return
  1196. mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
  1197. if mobj is None:
  1198. self._downloader.trouble(u'ERROR: Unable to extract id field')
  1199. return
  1200. yahoo_id = mobj.group(1)
  1201. mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
  1202. if mobj is None:
  1203. self._downloader.trouble(u'ERROR: Unable to extract vid field')
  1204. return
  1205. yahoo_vid = mobj.group(1)
  1206. url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
  1207. return self._real_extract(url, new_video=False)
  1208. # Retrieve video webpage to extract further information
  1209. request = urllib2.Request(url)
  1210. try:
  1211. self.report_download_webpage(video_id)
  1212. webpage = urllib2.urlopen(request).read()
  1213. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1214. self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
  1215. return
  1216. # Extract uploader and title from webpage
  1217. self.report_extraction(video_id)
  1218. mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
  1219. if mobj is None:
  1220. self._downloader.trouble(u'ERROR: unable to extract video title')
  1221. return
  1222. video_title = mobj.group(1).decode('utf-8')
  1223. simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
  1224. mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
  1225. if mobj is None:
  1226. self._downloader.trouble(u'ERROR: unable to extract video uploader')
  1227. return
  1228. video_uploader = mobj.group(1).decode('utf-8')
  1229. # Extract video thumbnail
  1230. mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
  1231. if mobj is None:
  1232. self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
  1233. return
  1234. video_thumbnail = mobj.group(1).decode('utf-8')
  1235. # Extract video description
  1236. mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
  1237. if mobj is None:
  1238. self._downloader.trouble(u'ERROR: unable to extract video description')
  1239. return
  1240. video_description = mobj.group(1).decode('utf-8')
  1241. if not video_description: video_description = 'No description available.'
  1242. # Extract video height and width
  1243. mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
  1244. if mobj is None:
  1245. self._downloader.trouble(u'ERROR: unable to extract video height')
  1246. return
  1247. yv_video_height = mobj.group(1)
  1248. mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
  1249. if mobj is None:
  1250. self._downloader.trouble(u'ERROR: unable to extract video width')
  1251. return
  1252. yv_video_width = mobj.group(1)
  1253. # Retrieve video playlist to extract media URL
  1254. # I'm not completely sure what all these options are, but we
  1255. # seem to need most of them, otherwise the server sends a 401.
  1256. yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents
  1257. yv_bitrate = '700' # according to Wikipedia this is hard-coded
  1258. request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
  1259. '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
  1260. '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
  1261. try:
  1262. self.report_download_webpage(video_id)
  1263. webpage = urllib2.urlopen(request).read()
  1264. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1265. self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
  1266. return
  1267. # Extract media URL from playlist XML
  1268. mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
  1269. if mobj is None:
  1270. self._downloader.trouble(u'ERROR: Unable to extract media URL')
  1271. return
  1272. video_url = urllib.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
  1273. video_url = re.sub(r'(?u)&(.+?);', htmlentity_transform, video_url)
  1274. try:
  1275. # Process video information
  1276. self._downloader.process_info({
  1277. 'id': video_id.decode('utf-8'),
  1278. 'url': video_url,
  1279. 'uploader': video_uploader,
  1280. 'title': video_title,
  1281. 'stitle': simple_title,
  1282. 'ext': video_extension.decode('utf-8'),
  1283. 'thumbnail': video_thumbnail.decode('utf-8'),
  1284. 'description': video_description,
  1285. 'thumbnail': video_thumbnail,
  1286. 'description': video_description,
  1287. 'player_url': None,
  1288. })
  1289. except UnavailableVideoError:
  1290. self._downloader.trouble(u'ERROR: unable to download video')
  1291. class GenericIE(InfoExtractor):
  1292. """Generic last-resort information extractor."""
  1293. def __init__(self, downloader=None):
  1294. InfoExtractor.__init__(self, downloader)
  1295. @staticmethod
  1296. def suitable(url):
  1297. return True
  1298. def report_download_webpage(self, video_id):
  1299. """Report webpage download."""
  1300. self._downloader.to_stdout(u'WARNING: Falling back on generic information extractor.')
  1301. self._downloader.to_stdout(u'[generic] %s: Downloading webpage' % video_id)
  1302. def report_extraction(self, video_id):
  1303. """Report information extraction."""
  1304. self._downloader.to_stdout(u'[generic] %s: Extracting information' % video_id)
  1305. def _real_initialize(self):
  1306. return
  1307. def _real_extract(self, url):
  1308. # At this point we have a new video
  1309. self._downloader.increment_downloads()
  1310. video_id = url.split('/')[-1]
  1311. request = urllib2.Request(url)
  1312. try:
  1313. self.report_download_webpage(video_id)
  1314. webpage = urllib2.urlopen(request).read()
  1315. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1316. self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
  1317. return
  1318. except ValueError, err:
  1319. # since this is the last-resort InfoExtractor, if
  1320. # this error is thrown, it'll be thrown here
  1321. self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
  1322. return
  1323. # Start with something easy: JW Player in SWFObject
  1324. mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
  1325. if mobj is None:
  1326. # Broaden the search a little bit
  1327. mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
  1328. if mobj is None:
  1329. self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
  1330. return
  1331. # It's possible that one of the regexes
  1332. # matched, but returned an empty group:
  1333. if mobj.group(1) is None:
  1334. self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
  1335. return
  1336. video_url = urllib.unquote(mobj.group(1))
  1337. video_id = os.path.basename(video_url)
  1338. # here's a fun little line of code for you:
  1339. video_extension = os.path.splitext(video_id)[1][1:]
  1340. video_id = os.path.splitext(video_id)[0]
  1341. # it's tempting to parse this further, but you would
  1342. # have to take into account all the variations like
  1343. # Video Title - Site Name
  1344. # Site Name | Video Title
  1345. # Video Title - Tagline | Site Name
  1346. # and so on and so forth; it's just not practical
  1347. mobj = re.search(r'<title>(.*)</title>', webpage)
  1348. if mobj is None:
  1349. self._downloader.trouble(u'ERROR: unable to extract title')
  1350. return
  1351. video_title = mobj.group(1).decode('utf-8')
  1352. video_title = sanitize_title(video_title)
  1353. simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
  1354. # video uploader is domain name
  1355. mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
  1356. if mobj is None:
  1357. self._downloader.trouble(u'ERROR: unable to extract title')
  1358. return
  1359. video_uploader = mobj.group(1).decode('utf-8')
  1360. try:
  1361. # Process video information
  1362. self._downloader.process_info({
  1363. 'id': video_id.decode('utf-8'),
  1364. 'url': video_url.decode('utf-8'),
  1365. 'uploader': video_uploader,
  1366. 'title': video_title,
  1367. 'stitle': simple_title,
  1368. 'ext': video_extension.decode('utf-8'),
  1369. 'format': u'NA',
  1370. 'player_url': None,
  1371. })
  1372. except UnavailableVideoError, err:
  1373. self._downloader.trouble(u'ERROR: unable to download video')
  1374. class YoutubeSearchIE(InfoExtractor):
  1375. """Information Extractor for YouTube search queries."""
  1376. _VALID_QUERY = r'ytsearch(\d+|all)?:[\s\S]+'
  1377. _TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
  1378. _VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
  1379. _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
  1380. _youtube_ie = None
  1381. _max_youtube_results = 1000
  1382. def __init__(self, youtube_ie, downloader=None):
  1383. InfoExtractor.__init__(self, downloader)
  1384. self._youtube_ie = youtube_ie
  1385. @staticmethod
  1386. def suitable(url):
  1387. return (re.match(YoutubeSearchIE._VALID_QUERY, url) is not None)
  1388. def report_download_page(self, query, pagenum):
  1389. """Report attempt to download playlist page with given number."""
  1390. query = query.decode(preferredencoding())
  1391. self._downloader.to_stdout(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
  1392. def _real_initialize(self):
  1393. self._youtube_ie.initialize()
  1394. def _real_extract(self, query):
  1395. mobj = re.match(self._VALID_QUERY, query)
  1396. if mobj is None:
  1397. self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
  1398. return
  1399. prefix, query = query.split(':')
  1400. prefix = prefix[8:]
  1401. query = query.encode('utf-8')
  1402. if prefix == '':
  1403. self._download_n_results(query, 1)
  1404. return
  1405. elif prefix == 'all':
  1406. self._download_n_results(query, self._max_youtube_results)
  1407. return
  1408. else:
  1409. try:
  1410. n = long(prefix)
  1411. if n <= 0:
  1412. self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
  1413. return
  1414. elif n > self._max_youtube_results:
  1415. self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
  1416. n = self._max_youtube_results
  1417. self._download_n_results(query, n)
  1418. return
  1419. except ValueError: # parsing prefix as integer fails
  1420. self._download_n_results(query, 1)
  1421. return
  1422. def _download_n_results(self, query, n):
  1423. """Downloads a specified number of results for a query"""
  1424. video_ids = []
  1425. already_seen = set()
  1426. pagenum = 1
  1427. while True:
  1428. self.report_download_page(query, pagenum)
  1429. result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
  1430. request = urllib2.Request(result_url, None, std_headers)
  1431. try:
  1432. page = urllib2.urlopen(request).read()
  1433. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1434. self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
  1435. return
  1436. # Extract video identifiers
  1437. for mobj in re.finditer(self._VIDEO_INDICATOR, page):
  1438. video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
  1439. if video_id not in already_seen:
  1440. video_ids.append(video_id)
  1441. already_seen.add(video_id)
  1442. if len(video_ids) == n:
  1443. # Specified n videos reached
  1444. for id in video_ids:
  1445. self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
  1446. return
  1447. if re.search(self._MORE_PAGES_INDICATOR, page) is None:
  1448. for id in video_ids:
  1449. self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
  1450. return
  1451. pagenum = pagenum + 1
  1452. class GoogleSearchIE(InfoExtractor):
  1453. """Information Extractor for Google Video search queries."""
  1454. _VALID_QUERY = r'gvsearch(\d+|all)?:[\s\S]+'
  1455. _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
  1456. _VIDEO_INDICATOR = r'videoplay\?docid=([^\&>]+)\&'
  1457. _MORE_PAGES_INDICATOR = r'<span>Next</span>'
  1458. _google_ie = None
  1459. _max_google_results = 1000
  1460. def __init__(self, google_ie, downloader=None):
  1461. InfoExtractor.__init__(self, downloader)
  1462. self._google_ie = google_ie
  1463. @staticmethod
  1464. def suitable(url):
  1465. return (re.match(GoogleSearchIE._VALID_QUERY, url) is not None)
  1466. def report_download_page(self, query, pagenum):
  1467. """Report attempt to download playlist page with given number."""
  1468. query = query.decode(preferredencoding())
  1469. self._downloader.to_stdout(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
  1470. def _real_initialize(self):
  1471. self._google_ie.initialize()
  1472. def _real_extract(self, query):
  1473. mobj = re.match(self._VALID_QUERY, query)
  1474. if mobj is None:
  1475. self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
  1476. return
  1477. prefix, query = query.split(':')
  1478. prefix = prefix[8:]
  1479. query = query.encode('utf-8')
  1480. if prefix == '':
  1481. self._download_n_results(query, 1)
  1482. return
  1483. elif prefix == 'all':
  1484. self._download_n_results(query, self._max_google_results)
  1485. return
  1486. else:
  1487. try:
  1488. n = long(prefix)
  1489. if n <= 0:
  1490. self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
  1491. return
  1492. elif n > self._max_google_results:
  1493. self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
  1494. n = self._max_google_results
  1495. self._download_n_results(query, n)
  1496. return
  1497. except ValueError: # parsing prefix as integer fails
  1498. self._download_n_results(query, 1)
  1499. return
  1500. def _download_n_results(self, query, n):
  1501. """Downloads a specified number of results for a query"""
  1502. video_ids = []
  1503. already_seen = set()
  1504. pagenum = 1
  1505. while True:
  1506. self.report_download_page(query, pagenum)
  1507. result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
  1508. request = urllib2.Request(result_url, None, std_headers)
  1509. try:
  1510. page = urllib2.urlopen(request).read()
  1511. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1512. self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
  1513. return
  1514. # Extract video identifiers
  1515. for mobj in re.finditer(self._VIDEO_INDICATOR, page):
  1516. video_id = mobj.group(1)
  1517. if video_id not in already_seen:
  1518. video_ids.append(video_id)
  1519. already_seen.add(video_id)
  1520. if len(video_ids) == n:
  1521. # Specified n videos reached
  1522. for id in video_ids:
  1523. self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
  1524. return
  1525. if re.search(self._MORE_PAGES_INDICATOR, page) is None:
  1526. for id in video_ids:
  1527. self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
  1528. return
  1529. pagenum = pagenum + 1
  1530. class YahooSearchIE(InfoExtractor):
  1531. """Information Extractor for Yahoo! Video search queries."""
  1532. _VALID_QUERY = r'yvsearch(\d+|all)?:[\s\S]+'
  1533. _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
  1534. _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
  1535. _MORE_PAGES_INDICATOR = r'\s*Next'
  1536. _yahoo_ie = None
  1537. _max_yahoo_results = 1000
  1538. def __init__(self, yahoo_ie, downloader=None):
  1539. InfoExtractor.__init__(self, downloader)
  1540. self._yahoo_ie = yahoo_ie
  1541. @staticmethod
  1542. def suitable(url):
  1543. return (re.match(YahooSearchIE._VALID_QUERY, url) is not None)
  1544. def report_download_page(self, query, pagenum):
  1545. """Report attempt to download playlist page with given number."""
  1546. query = query.decode(preferredencoding())
  1547. self._downloader.to_stdout(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
  1548. def _real_initialize(self):
  1549. self._yahoo_ie.initialize()
  1550. def _real_extract(self, query):
  1551. mobj = re.match(self._VALID_QUERY, query)
  1552. if mobj is None:
  1553. self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
  1554. return
  1555. prefix, query = query.split(':')
  1556. prefix = prefix[8:]
  1557. query = query.encode('utf-8')
  1558. if prefix == '':
  1559. self._download_n_results(query, 1)
  1560. return
  1561. elif prefix == 'all':
  1562. self._download_n_results(query, self._max_yahoo_results)
  1563. return
  1564. else:
  1565. try:
  1566. n = long(prefix)
  1567. if n <= 0:
  1568. self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
  1569. return
  1570. elif n > self._max_yahoo_results:
  1571. self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
  1572. n = self._max_yahoo_results
  1573. self._download_n_results(query, n)
  1574. return
  1575. except ValueError: # parsing prefix as integer fails
  1576. self._download_n_results(query, 1)
  1577. return
  1578. def _download_n_results(self, query, n):
  1579. """Downloads a specified number of results for a query"""
  1580. video_ids = []
  1581. already_seen = set()
  1582. pagenum = 1
  1583. while True:
  1584. self.report_download_page(query, pagenum)
  1585. result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
  1586. request = urllib2.Request(result_url, None, std_headers)
  1587. try:
  1588. page = urllib2.urlopen(request).read()
  1589. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1590. self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
  1591. return
  1592. # Extract video identifiers
  1593. for mobj in re.finditer(self._VIDEO_INDICATOR, page):
  1594. video_id = mobj.group(1)
  1595. if video_id not in already_seen:
  1596. video_ids.append(video_id)
  1597. already_seen.add(video_id)
  1598. if len(video_ids) == n:
  1599. # Specified n videos reached
  1600. for id in video_ids:
  1601. self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
  1602. return
  1603. if re.search(self._MORE_PAGES_INDICATOR, page) is None:
  1604. for id in video_ids:
  1605. self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
  1606. return
  1607. pagenum = pagenum + 1
  1608. class YoutubePlaylistIE(InfoExtractor):
  1609. """Information Extractor for YouTube playlists."""
  1610. _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists)\?.*?p=|user/.*?/user/)([^&]+).*'
  1611. _TEMPLATE_URL = 'http://www.youtube.com/view_play_list?p=%s&page=%s&gl=US&hl=en'
  1612. _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
  1613. _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
  1614. _youtube_ie = None
  1615. def __init__(self, youtube_ie, downloader=None):
  1616. InfoExtractor.__init__(self, downloader)
  1617. self._youtube_ie = youtube_ie
  1618. @staticmethod
  1619. def suitable(url):
  1620. return (re.match(YoutubePlaylistIE._VALID_URL, url) is not None)
  1621. def report_download_page(self, playlist_id, pagenum):
  1622. """Report attempt to download playlist page with given number."""
  1623. self._downloader.to_stdout(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
  1624. def _real_initialize(self):
  1625. self._youtube_ie.initialize()
  1626. def _real_extract(self, url):
  1627. # Extract playlist id
  1628. mobj = re.match(self._VALID_URL, url)
  1629. if mobj is None:
  1630. self._downloader.trouble(u'ERROR: invalid url: %s' % url)
  1631. return
  1632. # Download playlist pages
  1633. playlist_id = mobj.group(1)
  1634. video_ids = []
  1635. pagenum = 1
  1636. while True:
  1637. self.report_download_page(playlist_id, pagenum)
  1638. request = urllib2.Request(self._TEMPLATE_URL % (playlist_id, pagenum), None, std_headers)
  1639. try:
  1640. page = urllib2.urlopen(request).read()
  1641. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1642. self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
  1643. return
  1644. # Extract video identifiers
  1645. ids_in_page = []
  1646. for mobj in re.finditer(self._VIDEO_INDICATOR, page):
  1647. if mobj.group(1) not in ids_in_page:
  1648. ids_in_page.append(mobj.group(1))
  1649. video_ids.extend(ids_in_page)
  1650. if re.search(self._MORE_PAGES_INDICATOR, page) is None:
  1651. break
  1652. pagenum = pagenum + 1
  1653. playliststart = self._downloader.params.get('playliststart', 1)
  1654. playliststart -= 1 #our arrays are zero-based but the playlist is 1-based
  1655. if playliststart > 0:
  1656. video_ids = video_ids[playliststart:]
  1657. for id in video_ids:
  1658. self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
  1659. return
  1660. class YoutubeUserIE(InfoExtractor):
  1661. """Information Extractor for YouTube users."""
  1662. _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/user/(.*)'
  1663. _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
  1664. _VIDEO_INDICATOR = r'http://gdata.youtube.com/feeds/api/videos/(.*)' # XXX Fix this.
  1665. _youtube_ie = None
  1666. def __init__(self, youtube_ie, downloader=None):
  1667. InfoExtractor.__init__(self, downloader)
  1668. self._youtube_ie = youtube_ie
  1669. @staticmethod
  1670. def suitable(url):
  1671. return (re.match(YoutubeUserIE._VALID_URL, url) is not None)
  1672. def report_download_page(self, username):
  1673. """Report attempt to download user page."""
  1674. self._downloader.to_stdout(u'[youtube] user %s: Downloading page ' % (username))
  1675. def _real_initialize(self):
  1676. self._youtube_ie.initialize()
  1677. def _real_extract(self, url):
  1678. # Extract username
  1679. mobj = re.match(self._VALID_URL, url)
  1680. if mobj is None:
  1681. self._downloader.trouble(u'ERROR: invalid url: %s' % url)
  1682. return
  1683. # Download user page
  1684. username = mobj.group(1)
  1685. video_ids = []
  1686. pagenum = 1
  1687. self.report_download_page(username)
  1688. request = urllib2.Request(self._TEMPLATE_URL % (username), None, std_headers)
  1689. try:
  1690. page = urllib2.urlopen(request).read()
  1691. except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  1692. self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
  1693. return
  1694. # Extract video identifiers
  1695. ids_in_page = []
  1696. for mobj in re.finditer(self._VIDEO_INDICATOR, page):
  1697. if mobj.group(1) not in ids_in_page:
  1698. ids_in_page.append(mobj.group(1))
  1699. video_ids.extend(ids_in_page)
  1700. playliststart = self._downloader.params.get('playliststart', 1)
  1701. playliststart = playliststart-1 #our arrays are zero-based but the playlist is 1-based
  1702. if playliststart > 0:
  1703. video_ids = video_ids[playliststart:]
  1704. for id in video_ids:
  1705. self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
  1706. return
  1707. class PostProcessor(object):
  1708. """Post Processor class.
  1709. PostProcessor objects can be added to downloaders with their
  1710. add_post_processor() method. When the downloader has finished a
  1711. successful download, it will take its internal chain of PostProcessors
  1712. and start calling the run() method on each one of them, first with
  1713. an initial argument and then with the returned value of the previous
  1714. PostProcessor.
  1715. The chain will be stopped if one of them ever returns None or the end
  1716. of the chain is reached.
  1717. PostProcessor objects follow a "mutual registration" process similar
  1718. to InfoExtractor objects.
  1719. """
  1720. _downloader = None
  1721. def __init__(self, downloader=None):
  1722. self._downloader = downloader
  1723. def set_downloader(self, downloader):
  1724. """Sets the downloader for this PP."""
  1725. self._downloader = downloader
  1726. def run(self, information):
  1727. """Run the PostProcessor.
  1728. The "information" argument is a dictionary like the ones
  1729. composed by InfoExtractors. The only difference is that this
  1730. one has an extra field called "filepath" that points to the
  1731. downloaded file.
  1732. When this method returns None, the postprocessing chain is
  1733. stopped. However, this method may return an information
  1734. dictionary that will be passed to the next postprocessing
  1735. object in the chain. It can be the one it received after
  1736. changing some fields.
  1737. In addition, this method may raise a PostProcessingError
  1738. exception that will be taken into account by the downloader
  1739. it was called from.
  1740. """
  1741. return information # by default, do nothing
  1742. ### MAIN PROGRAM ###
  1743. if __name__ == '__main__':
  1744. try:
  1745. # Modules needed only when running the main program
  1746. import getpass
  1747. import optparse
  1748. # Function to update the program file with the latest version from bitbucket.org
  1749. def update_self(downloader, filename):
  1750. # Note: downloader only used for options
  1751. if not os.access (filename, os.W_OK):
  1752. sys.exit('ERROR: no write permissions on %s' % filename)
  1753. downloader.to_stdout('Updating to latest stable version...')
  1754. latest_url = 'http://bitbucket.org/rg3/youtube-dl/raw/tip/LATEST_VERSION'
  1755. latest_version = urllib.urlopen(latest_url).read().strip()
  1756. prog_url = 'http://bitbucket.org/rg3/youtube-dl/raw/%s/youtube-dl' % latest_version
  1757. newcontent = urllib.urlopen(prog_url).read()
  1758. stream = open(filename, 'w')
  1759. stream.write(newcontent)
  1760. stream.close()
  1761. downloader.to_stdout('Updated to version %s' % latest_version)
  1762. # General configuration
  1763. urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler()))
  1764. urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
  1765. socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
  1766. # Parse command line
  1767. parser = optparse.OptionParser(
  1768. usage='Usage: %prog [options] url...',
  1769. version='2010.08.04',
  1770. conflict_handler='resolve',
  1771. )
  1772. parser.add_option('-h', '--help',
  1773. action='help', help='print this help text and exit')
  1774. parser.add_option('-v', '--version',
  1775. action='version', help='print program version and exit')
  1776. parser.add_option('-U', '--update',
  1777. action='store_true', dest='update_self', help='update this program to latest stable version')
  1778. parser.add_option('-i', '--ignore-errors',
  1779. action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
  1780. parser.add_option('-r', '--rate-limit',
  1781. dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
  1782. parser.add_option('-R', '--retries',
  1783. dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
  1784. parser.add_option('--playlist-start',
  1785. dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
  1786. authentication = optparse.OptionGroup(parser, 'Authentication Options')
  1787. authentication.add_option('-u', '--username',
  1788. dest='username', metavar='USERNAME', help='account username')
  1789. authentication.add_option('-p', '--password',
  1790. dest='password', metavar='PASSWORD', help='account password')
  1791. authentication.add_option('-n', '--netrc',
  1792. action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
  1793. parser.add_option_group(authentication)
  1794. video_format = optparse.OptionGroup(parser, 'Video Format Options')
  1795. video_format.add_option('-f', '--format',
  1796. action='store', dest='format', metavar='FORMAT', help='video format code')
  1797. video_format.add_option('-m', '--mobile-version',
  1798. action='store_const', dest='format', help='alias for -f 17', const='17')
  1799. video_format.add_option('--all-formats',
  1800. action='store_const', dest='format', help='download all available video formats', const='-1')
  1801. video_format.add_option('--max-quality',
  1802. action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
  1803. video_format.add_option('-b', '--best-quality',
  1804. action='store_true', dest='bestquality', help='download the best video quality (DEPRECATED)')
  1805. parser.add_option_group(video_format)
  1806. verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
  1807. verbosity.add_option('-q', '--quiet',
  1808. action='store_true', dest='quiet', help='activates quiet mode', default=False)
  1809. verbosity.add_option('-s', '--simulate',
  1810. action='store_true', dest='simulate', help='do not download video', default=False)
  1811. verbosity.add_option('-g', '--get-url',
  1812. action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
  1813. verbosity.add_option('-e', '--get-title',
  1814. action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
  1815. verbosity.add_option('--get-thumbnail',
  1816. action='store_true', dest='getthumbnail', help='simulate, quiet but print thumbnail URL', default=False)
  1817. verbosity.add_option('--get-description',
  1818. action='store_true', dest='getdescription', help='simulate, quiet but print video description', default=False)
  1819. verbosity.add_option('--no-progress',
  1820. action='store_true', dest='noprogress', help='do not print progress bar', default=False)
  1821. parser.add_option_group(verbosity)
  1822. filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
  1823. filesystem.add_option('-t', '--title',
  1824. action='store_true', dest='usetitle', help='use title in file name', default=False)
  1825. filesystem.add_option('-l', '--literal',
  1826. action='store_true', dest='useliteral', help='use literal title in file name', default=False)
  1827. filesystem.add_option('-o', '--output',
  1828. dest='outtmpl', metavar='TEMPLATE', help='output filename template')
  1829. filesystem.add_option('-a', '--batch-file',
  1830. dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
  1831. filesystem.add_option('-w', '--no-overwrites',
  1832. action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
  1833. filesystem.add_option('-c', '--continue',
  1834. action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
  1835. parser.add_option_group(filesystem)
  1836. (opts, args) = parser.parse_args()
  1837. # Batch file verification
  1838. batchurls = []
  1839. if opts.batchfile is not None:
  1840. try:
  1841. if opts.batchfile == '-':
  1842. batchfd = sys.stdin
  1843. else:
  1844. batchfd = open(opts.batchfile, 'r')
  1845. batchurls = batchfd.readlines()
  1846. batchurls = [x.strip() for x in batchurls]
  1847. batchurls = [x for x in batchurls if len(x) > 0]
  1848. except IOError:
  1849. sys.exit(u'ERROR: batch file could not be read')
  1850. all_urls = batchurls + args
  1851. # Conflicting, missing and erroneous options
  1852. if opts.bestquality:
  1853. print >>sys.stderr, u'\nWARNING: -b/--best-quality IS DEPRECATED AS IT IS THE DEFAULT BEHAVIOR NOW\n'
  1854. if opts.usenetrc and (opts.username is not None or opts.password is not None):
  1855. parser.error(u'using .netrc conflicts with giving username/password')
  1856. if opts.password is not None and opts.username is None:
  1857. parser.error(u'account username missing')
  1858. if opts.outtmpl is not None and (opts.useliteral or opts.usetitle):
  1859. parser.error(u'using output template conflicts with using title or literal title')
  1860. if opts.usetitle and opts.useliteral:
  1861. parser.error(u'using title conflicts with using literal title')
  1862. if opts.username is not None and opts.password is None:
  1863. opts.password = getpass.getpass(u'Type account password and press return:')
  1864. if opts.ratelimit is not None:
  1865. numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
  1866. if numeric_limit is None:
  1867. parser.error(u'invalid rate limit specified')
  1868. opts.ratelimit = numeric_limit
  1869. if opts.retries is not None:
  1870. try:
  1871. opts.retries = long(opts.retries)
  1872. except (TypeError, ValueError), err:
  1873. parser.error(u'invalid retry count specified')
  1874. if opts.playliststart is not None:
  1875. try:
  1876. opts.playliststart = long(opts.playliststart)
  1877. except (TypeError, ValueError), err:
  1878. parser.error(u'invalid playlist page specified')
  1879. # Information extractors
  1880. youtube_ie = YoutubeIE()
  1881. metacafe_ie = MetacafeIE(youtube_ie)
  1882. dailymotion_ie = DailymotionIE()
  1883. youtube_pl_ie = YoutubePlaylistIE(youtube_ie)
  1884. youtube_user_ie = YoutubeUserIE(youtube_ie)
  1885. youtube_search_ie = YoutubeSearchIE(youtube_ie)
  1886. google_ie = GoogleIE()
  1887. google_search_ie = GoogleSearchIE(google_ie)
  1888. photobucket_ie = PhotobucketIE()
  1889. yahoo_ie = YahooIE()
  1890. yahoo_search_ie = YahooSearchIE(yahoo_ie)
  1891. generic_ie = GenericIE()
  1892. # File downloader
  1893. fd = FileDownloader({
  1894. 'usenetrc': opts.usenetrc,
  1895. 'username': opts.username,
  1896. 'password': opts.password,
  1897. 'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription),
  1898. 'forceurl': opts.geturl,
  1899. 'forcetitle': opts.gettitle,
  1900. 'forcethumbnail': opts.getthumbnail,
  1901. 'forcedescription': opts.getdescription,
  1902. 'simulate': (opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription),
  1903. 'format': opts.format,
  1904. 'format_limit': opts.format_limit,
  1905. 'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
  1906. or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
  1907. or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
  1908. or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
  1909. or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
  1910. or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
  1911. or u'%(id)s.%(ext)s'),
  1912. 'ignoreerrors': opts.ignoreerrors,
  1913. 'ratelimit': opts.ratelimit,
  1914. 'nooverwrites': opts.nooverwrites,
  1915. 'retries': opts.retries,
  1916. 'continuedl': opts.continue_dl,
  1917. 'noprogress': opts.noprogress,
  1918. 'playliststart': opts.playliststart,
  1919. })
  1920. fd.add_info_extractor(youtube_search_ie)
  1921. fd.add_info_extractor(youtube_pl_ie)
  1922. fd.add_info_extractor(youtube_user_ie)
  1923. fd.add_info_extractor(metacafe_ie)
  1924. fd.add_info_extractor(dailymotion_ie)
  1925. fd.add_info_extractor(youtube_ie)
  1926. fd.add_info_extractor(google_ie)
  1927. fd.add_info_extractor(google_search_ie)
  1928. fd.add_info_extractor(photobucket_ie)
  1929. fd.add_info_extractor(yahoo_ie)
  1930. fd.add_info_extractor(yahoo_search_ie)
  1931. # This must come last since it's the
  1932. # fallback if none of the others work
  1933. fd.add_info_extractor(generic_ie)
  1934. # Update version
  1935. if opts.update_self:
  1936. update_self(fd, sys.argv[0])
  1937. # Maybe do nothing
  1938. if len(all_urls) < 1:
  1939. if not opts.update_self:
  1940. parser.error(u'you must provide at least one URL')
  1941. else:
  1942. sys.exit()
  1943. retcode = fd.download(all_urls)
  1944. sys.exit(retcode)
  1945. except DownloadError:
  1946. sys.exit(1)
  1947. except SameFileError:
  1948. sys.exit(u'ERROR: fixed output name but more than one file to download')
  1949. except KeyboardInterrupt:
  1950. sys.exit(u'\nERROR: Interrupted by user')