From 876f1c17fff194cbed3595bb2a8497ea9e479bf7 Mon Sep 17 00:00:00 2001 From: Ali Sherief Date: Mon, 9 Nov 2020 16:06:48 +0000 Subject: [PATCH 1/4] Fix #93 YoutubePlaylistsIE --- youtube_dlc/extractor/youtube.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/youtube_dlc/extractor/youtube.py b/youtube_dlc/extractor/youtube.py index 3ec2581dc..35ac67b49 100644 --- a/youtube_dlc/extractor/youtube.py +++ b/youtube_dlc/extractor/youtube.py @@ -300,11 +300,12 @@ class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor): # Extract entries from page with "Load more" button def _entries(self, page, playlist_id): more_widget_html = content_html = page + mobj_reg = r'(?:(?:data-uix-load-more-href="[^"]+?;continuation=)|(?:"continuation":"))(?P[^"]+)"' for page_num in itertools.count(1): for entry in self._process_page(content_html): yield entry - mobj = re.search(r'data-uix-load-more-href="/?(?P[^"]+)"', more_widget_html) + mobj = re.search(mobj_reg, more_widget_html) if not mobj: break @@ -315,7 +316,7 @@ def _entries(self, page, playlist_id): # Downloading page may result in intermittent 5xx HTTP error # that is usually worked around with a retry more = self._download_json( - 'https://www.youtube.com/%s' % mobj.group('more'), playlist_id, + 'https://www.youtube.com/browse_ajax?ctoken=%s' % mobj.group('more'), playlist_id, 'Downloading page #%s%s' % (page_num, ' (retry #%d)' % count if count else ''), transform_source=uppercase_escape, @@ -372,7 +373,7 @@ def extract_videos_from_page(self, page): class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor): def _process_page(self, content): for playlist_id in orderedSet(re.findall( - r']+class="[^"]*yt-lockup-title[^"]*"[^>]*>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"', + r'"/?playlist\?list=([0-9A-Za-z-_]{10,})"', content)): yield self.url_result( 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist') From da8fb75df5aa3a6bdda2afbe7bec7da905f0618a Mon Sep 17 00:00:00 2001 From: Tom-Oliver Heidel Date: Tue, 10 Nov 2020 01:19:33 +0100 Subject: [PATCH 2/4] [skip travis] adjust python versions --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 74b50ecca..4920a30b8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -82,7 +82,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: '3.x' + python-version: '3.8' - name: Install Requirements run: pip install pyinstaller - name: Bump version @@ -116,7 +116,7 @@ jobs: - name: Set up Python 3.5.4 32-Bit uses: actions/setup-python@v2 with: - python-version: '3.5.4' + python-version: '3.4.4' architecture: 'x86' - name: Install Requirements for 32 Bit run: pip install pyinstaller==3.5 From 8f109ad4ad6bc734f817ccf3daefb9ed603d7480 Mon Sep 17 00:00:00 2001 From: Roman Karwacik Date: Tue, 10 Nov 2020 10:39:57 +0100 Subject: [PATCH 3/4] [zoom] Fix url parsing for url's containing /share/ and dots --- youtube_dlc/extractor/zoom.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dlc/extractor/zoom.py b/youtube_dlc/extractor/zoom.py index 003e1f901..038a90297 100644 --- a/youtube_dlc/extractor/zoom.py +++ b/youtube_dlc/extractor/zoom.py @@ -13,7 +13,7 @@ class ZoomIE(InfoExtractor): IE_NAME = 'zoom' - _VALID_URL = r'https://(?:.*).?zoom.us/rec(?:ording)?/play/(?P[A-Za-z0-9\-_]+)' + _VALID_URL = r'https://(?:.*).?zoom.us/rec(?:ording)?/(play|share)/(?P[A-Za-z0-9\-_.]+)' _TEST = { 'url': 'https://zoom.us/recording/play/SILVuCL4bFtRwWTtOCFQQxAsBQsJljFtm9e4Z_bvo-A8B-nzUSYZRNuPl3qW5IGK', From 002ea8fe172c0bf234fd15d3775a527706843fc3 Mon Sep 17 00:00:00 2001 From: pukkandan Date: Tue, 27 Oct 2020 16:48:23 +0530 Subject: [PATCH 4/4] Fix external downloader when there is no http_header --- youtube_dlc/downloader/external.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/youtube_dlc/downloader/external.py b/youtube_dlc/downloader/external.py index c31f8910a..d2f8f271d 100644 --- a/youtube_dlc/downloader/external.py +++ b/youtube_dlc/downloader/external.py @@ -115,8 +115,10 @@ class CurlFD(ExternalFD): def _make_cmd(self, tmpfilename, info_dict): cmd = [self.exe, '--location', '-o', tmpfilename] - for key, val in info_dict['http_headers'].items(): - cmd += ['--header', '%s: %s' % (key, val)] + if info_dict.get('http_headers') is not None: + for key, val in info_dict['http_headers'].items(): + cmd += ['--header', '%s: %s' % (key, val)] + cmd += self._bool_option('--continue-at', 'continuedl', '-', '0') cmd += self._valueless_option('--silent', 'noprogress') cmd += self._valueless_option('--verbose', 'verbose') @@ -150,8 +152,9 @@ class AxelFD(ExternalFD): def _make_cmd(self, tmpfilename, info_dict): cmd = [self.exe, '-o', tmpfilename] - for key, val in info_dict['http_headers'].items(): - cmd += ['-H', '%s: %s' % (key, val)] + if info_dict.get('http_headers') is not None: + for key, val in info_dict['http_headers'].items(): + cmd += ['-H', '%s: %s' % (key, val)] cmd += self._configuration_args() cmd += ['--', info_dict['url']] return cmd @@ -162,8 +165,9 @@ class WgetFD(ExternalFD): def _make_cmd(self, tmpfilename, info_dict): cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies'] - for key, val in info_dict['http_headers'].items(): - cmd += ['--header', '%s: %s' % (key, val)] + if info_dict.get('http_headers') is not None: + for key, val in info_dict['http_headers'].items(): + cmd += ['--header', '%s: %s' % (key, val)] cmd += self._option('--limit-rate', 'ratelimit') retry = self._option('--tries', 'retries') if len(retry) == 2: @@ -189,8 +193,9 @@ def _make_cmd(self, tmpfilename, info_dict): if dn: cmd += ['--dir', dn] cmd += ['--out', os.path.basename(tmpfilename)] - for key, val in info_dict['http_headers'].items(): - cmd += ['--header', '%s: %s' % (key, val)] + if info_dict.get('http_headers') is not None: + for key, val in info_dict['http_headers'].items(): + cmd += ['--header', '%s: %s' % (key, val)] cmd += self._option('--interface', 'source_address') cmd += self._option('--all-proxy', 'proxy') cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=') @@ -206,8 +211,10 @@ def available(cls): def _make_cmd(self, tmpfilename, info_dict): cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']] - for key, val in info_dict['http_headers'].items(): - cmd += ['%s:%s' % (key, val)] + + if info_dict.get('http_headers') is not None: + for key, val in info_dict['http_headers'].items(): + cmd += ['%s:%s' % (key, val)] return cmd @@ -253,7 +260,7 @@ def _call_downloader(self, tmpfilename, info_dict): # if end_time: # args += ['-t', compat_str(end_time - start_time)] - if info_dict['http_headers'] and re.match(r'^https?://', url): + if info_dict.get('http_headers') is not None and re.match(r'^https?://', url): # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv: # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header. headers = handle_youtubedl_headers(info_dict['http_headers'])