mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-27 10:31:29 +00:00
Merge branch 'master' of github.com:rg3/youtube-dl
This commit is contained in:
commit
4c21c56bfe
70
test/test_youtube_lists.py
Normal file
70
test/test_youtube_lists.py
Normal file
|
@ -0,0 +1,70 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
import socket
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.InfoExtractors import YoutubePlaylistIE
|
||||
from youtube_dl.utils import *
|
||||
|
||||
# General configuration (from __init__, not very elegant...)
|
||||
jar = compat_cookiejar.CookieJar()
|
||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
||||
proxy_handler = compat_urllib_request.ProxyHandler()
|
||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
|
||||
compat_urllib_request.install_opener(opener)
|
||||
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
|
||||
|
||||
class FakeDownloader(object):
|
||||
def __init__(self):
|
||||
self.result = []
|
||||
self.params = {}
|
||||
def to_screen(self, s):
|
||||
print(s)
|
||||
def trouble(self, s):
|
||||
raise Exception(s)
|
||||
def download(self, x):
|
||||
self.result.append(x)
|
||||
|
||||
class TestYoutubeLists(unittest.TestCase):
|
||||
def test_youtube_playlist(self):
|
||||
DL = FakeDownloader()
|
||||
IE = YoutubePlaylistIE(DL)
|
||||
IE.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||
self.assertEqual(DL.result, [
|
||||
['http://www.youtube.com/watch?v=bV9L5Ht9LgY'],
|
||||
['http://www.youtube.com/watch?v=FXxLjLQi3Fg'],
|
||||
['http://www.youtube.com/watch?v=tU3Bgo5qJZE']
|
||||
])
|
||||
|
||||
def test_youtube_playlist_long(self):
|
||||
DL = FakeDownloader()
|
||||
IE = YoutubePlaylistIE(DL)
|
||||
IE.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||
self.assertTrue(len(DL.result) >= 799)
|
||||
|
||||
def test_youtube_course(self):
|
||||
DL = FakeDownloader()
|
||||
IE = YoutubePlaylistIE(DL)
|
||||
# TODO find a > 100 (paginating?) videos course
|
||||
IE.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
self.assertEqual(DL.result[0], ['http://www.youtube.com/watch?v=j9WZyLZCBzs'])
|
||||
self.assertEqual(len(DL.result), 25)
|
||||
self.assertEqual(DL.result[-1], ['http://www.youtube.com/watch?v=rYefUsYuEp0'])
|
||||
|
||||
def test_youtube_channel(self):
|
||||
"""I give up, please find a channel that does paginate and test this like test_youtube_playlist_long"""
|
||||
pass # TODO
|
||||
|
||||
def test_youtube_user(self):
|
||||
DL = FakeDownloader()
|
||||
IE = YoutubePlaylistIE(DL)
|
||||
IE.extract('https://www.youtube.com/user/TheLinuxFoundation')
|
||||
self.assertTrue(len(DL.result) >= 320)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -11,12 +11,12 @@
|
|||
|
||||
class TestYoutubePlaylistMatching(unittest.TestCase):
|
||||
def test_playlist_matching(self):
|
||||
assert YoutubePlaylistIE().suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assert YoutubePlaylistIE().suitable(u'PL63F0C78739B09958')
|
||||
assert not YoutubePlaylistIE().suitable(u'PLtS2H6bU1M')
|
||||
self.assertTrue(YoutubePlaylistIE().suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
|
||||
self.assertTrue(YoutubePlaylistIE().suitable(u'PL63F0C78739B09958'))
|
||||
self.assertFalse(YoutubePlaylistIE().suitable(u'PLtS2H6bU1M'))
|
||||
|
||||
def test_youtube_matching(self):
|
||||
assert YoutubeIE().suitable(u'PLtS2H6bU1M')
|
||||
self.assertTrue(YoutubeIE().suitable(u'PLtS2H6bU1M'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
@ -1674,7 +1674,7 @@ class YoutubePlaylistIE(InfoExtractor):
|
|||
_VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*'
|
||||
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
|
||||
_VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&([^&"]+&)*list=.*?%s'
|
||||
_MORE_PAGES_INDICATOR = r'yt-uix-pager-next'
|
||||
_MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
|
||||
IE_NAME = u'youtube:playlist'
|
||||
|
||||
def __init__(self, downloader=None):
|
||||
|
@ -1713,7 +1713,7 @@ def _real_extract(self, url):
|
|||
url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
|
||||
request = compat_urllib_request.Request(url)
|
||||
try:
|
||||
page = compat_urllib_request.urlopen(request).read()
|
||||
page = compat_urllib_request.urlopen(request).read().decode('utf8')
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
@ -1725,10 +1725,12 @@ def _real_extract(self, url):
|
|||
ids_in_page.append(mobj.group(1))
|
||||
video_ids.extend(ids_in_page)
|
||||
|
||||
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
|
||||
if self._MORE_PAGES_INDICATOR not in page:
|
||||
break
|
||||
pagenum = pagenum + 1
|
||||
|
||||
total = len(video_ids)
|
||||
|
||||
playliststart = self._downloader.params.get('playliststart', 1) - 1
|
||||
playlistend = self._downloader.params.get('playlistend', -1)
|
||||
if playlistend == -1:
|
||||
|
@ -1736,6 +1738,11 @@ def _real_extract(self, url):
|
|||
else:
|
||||
video_ids = video_ids[playliststart:playlistend]
|
||||
|
||||
if len(video_ids) == total:
|
||||
self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
|
||||
else:
|
||||
self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids)))
|
||||
|
||||
for id in video_ids:
|
||||
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
|
||||
return
|
||||
|
@ -1746,7 +1753,7 @@ class YoutubeChannelIE(InfoExtractor):
|
|||
|
||||
_VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$"
|
||||
_TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
|
||||
_MORE_PAGES_INDICATOR = r'yt-uix-button-content">Next' # TODO
|
||||
_MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
|
||||
IE_NAME = u'youtube:channel'
|
||||
|
||||
def report_download_page(self, channel_id, pagenum):
|
||||
|
@ -1770,7 +1777,7 @@ def _real_extract(self, url):
|
|||
url = self._TEMPLATE_URL % (channel_id, pagenum)
|
||||
request = compat_urllib_request.Request(url)
|
||||
try:
|
||||
page = compat_urllib_request.urlopen(request).read()
|
||||
page = compat_urllib_request.urlopen(request).read().decode('utf8')
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
@ -1782,10 +1789,12 @@ def _real_extract(self, url):
|
|||
ids_in_page.append(mobj.group(1))
|
||||
video_ids.extend(ids_in_page)
|
||||
|
||||
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
|
||||
if self._MORE_PAGES_INDICATOR not in page:
|
||||
break
|
||||
pagenum = pagenum + 1
|
||||
|
||||
self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
|
||||
|
||||
for id in video_ids:
|
||||
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
|
||||
return
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
|
||||
__version__ = '2012.11.29'
|
||||
__version__ = '2012.12.11'
|
||||
|
|
Loading…
Reference in a new issue