|
@ -1,3 +1,5 @@ |
|
|
|
|
|
from __future__ import unicode_literals |
|
|
|
|
|
|
|
|
import itertools |
|
|
import itertools |
|
|
import re |
|
|
import re |
|
|
|
|
|
|
|
@ -8,32 +10,42 @@ from ..utils import ( |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class GoogleSearchIE(SearchInfoExtractor): |
|
|
class GoogleSearchIE(SearchInfoExtractor): |
|
|
IE_DESC = u'Google Video search' |
|
|
|
|
|
_MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"' |
|
|
|
|
|
|
|
|
IE_DESC = 'Google Video search' |
|
|
_MAX_RESULTS = 1000 |
|
|
_MAX_RESULTS = 1000 |
|
|
IE_NAME = u'video.google:search' |
|
|
|
|
|
|
|
|
IE_NAME = 'video.google:search' |
|
|
_SEARCH_KEY = 'gvsearch' |
|
|
_SEARCH_KEY = 'gvsearch' |
|
|
|
|
|
|
|
|
def _get_n_results(self, query, n): |
|
|
def _get_n_results(self, query, n): |
|
|
"""Get a specified number of results for a query""" |
|
|
"""Get a specified number of results for a query""" |
|
|
|
|
|
|
|
|
|
|
|
entries = [] |
|
|
res = { |
|
|
res = { |
|
|
'_type': 'playlist', |
|
|
'_type': 'playlist', |
|
|
'id': query, |
|
|
'id': query, |
|
|
'entries': [] |
|
|
|
|
|
|
|
|
'title': query, |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
for pagenum in itertools.count(1): |
|
|
|
|
|
result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10) |
|
|
|
|
|
webpage = self._download_webpage(result_url, u'gvsearch:' + query, |
|
|
|
|
|
note='Downloading result page ' + str(pagenum)) |
|
|
|
|
|
|
|
|
for pagenum in itertools.count(): |
|
|
|
|
|
result_url = ( |
|
|
|
|
|
'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' |
|
|
|
|
|
% (compat_urllib_parse.quote_plus(query), pagenum * 10)) |
|
|
|
|
|
|
|
|
|
|
|
webpage = self._download_webpage( |
|
|
|
|
|
result_url, 'gvsearch:' + query, |
|
|
|
|
|
note='Downloading result page ' + str(pagenum + 1)) |
|
|
|
|
|
|
|
|
|
|
|
for hit_idx, mobj in enumerate(re.finditer( |
|
|
|
|
|
r'<h3 class="r"><a href="([^"]+)"', webpage)): |
|
|
|
|
|
|
|
|
|
|
|
# Skip playlists |
|
|
|
|
|
if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage): |
|
|
|
|
|
continue |
|
|
|
|
|
|
|
|
for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage): |
|
|
|
|
|
e = { |
|
|
|
|
|
|
|
|
entries.append({ |
|
|
'_type': 'url', |
|
|
'_type': 'url', |
|
|
'url': mobj.group(1) |
|
|
'url': mobj.group(1) |
|
|
} |
|
|
|
|
|
res['entries'].append(e) |
|
|
|
|
|
|
|
|
}) |
|
|
|
|
|
|
|
|
if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage): |
|
|
|
|
|
|
|
|
if (len(entries) >= n) or not re.search(r'class="pn" id="pnnext"', webpage): |
|
|
|
|
|
res['entries'] = entries[:n] |
|
|
return res |
|
|
return res |