mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-15 21:03:18 +00:00
[googlesearch] Fix start, and skip playlists (Fixes #2329)
This commit is contained in:
parent
211e17dd43
commit
ccf9114e84
|
@ -34,6 +34,7 @@
|
|||
KhanAcademyIE,
|
||||
EveryonesMixtapeIE,
|
||||
RutubeChannelIE,
|
||||
GoogleSearchIE,
|
||||
GenericIE,
|
||||
)
|
||||
|
||||
|
@ -240,6 +241,14 @@ def test_multiple_brightcove_videos(self):
|
|||
self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker')
|
||||
self.assertEqual(len(result['entries']), 3)
|
||||
|
||||
def test_GoogleSearch(self):
|
||||
dl = FakeYDL()
|
||||
ie = GoogleSearchIE(dl)
|
||||
result = ie.extract('gvsearch15:python language')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], 'python language')
|
||||
self.assertEqual(result['title'], 'python language')
|
||||
self.assertTrue(len(result['entries']) == 15)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
import re
|
||||
|
||||
|
@ -8,32 +10,42 @@
|
|||
|
||||
|
||||
class GoogleSearchIE(SearchInfoExtractor):
|
||||
IE_DESC = u'Google Video search'
|
||||
_MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
|
||||
IE_DESC = 'Google Video search'
|
||||
_MAX_RESULTS = 1000
|
||||
IE_NAME = u'video.google:search'
|
||||
IE_NAME = 'video.google:search'
|
||||
_SEARCH_KEY = 'gvsearch'
|
||||
|
||||
def _get_n_results(self, query, n):
|
||||
"""Get a specified number of results for a query"""
|
||||
|
||||
entries = []
|
||||
res = {
|
||||
'_type': 'playlist',
|
||||
'id': query,
|
||||
'entries': []
|
||||
'title': query,
|
||||
}
|
||||
|
||||
for pagenum in itertools.count(1):
|
||||
result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
|
||||
webpage = self._download_webpage(result_url, u'gvsearch:' + query,
|
||||
note='Downloading result page ' + str(pagenum))
|
||||
for pagenum in itertools.count():
|
||||
result_url = (
|
||||
'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
|
||||
% (compat_urllib_parse.quote_plus(query), pagenum * 10))
|
||||
|
||||
for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
|
||||
e = {
|
||||
webpage = self._download_webpage(
|
||||
result_url, 'gvsearch:' + query,
|
||||
note='Downloading result page ' + str(pagenum + 1))
|
||||
|
||||
for hit_idx, mobj in enumerate(re.finditer(
|
||||
r'<h3 class="r"><a href="([^"]+)"', webpage)):
|
||||
|
||||
# Skip playlists
|
||||
if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
|
||||
continue
|
||||
|
||||
entries.append({
|
||||
'_type': 'url',
|
||||
'url': mobj.group(1)
|
||||
}
|
||||
res['entries'].append(e)
|
||||
})
|
||||
|
||||
if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
|
||||
if (len(entries) >= n) or not re.search(r'class="pn" id="pnnext"', webpage):
|
||||
res['entries'] = entries[:n]
|
||||
return res
|
||||
|
|
Loading…
Reference in a new issue