Compare commits

...

8 Commits

Author SHA1 Message Date
McSwindler cfd5f8bc74
Merge 31b11c339b into 96da952504 2024-05-05 10:29:33 +05:30
sepro 96da952504
[core] Warn if lack of ffmpeg alters format selection (#9805)
Authored by: seproDev, pukkandan
2024-05-05 00:44:08 +02:00
bashonly bec9a59e8e
[networking] Add `extensions` attribute to `Response` (#9756)
CurlCFFIRH now provides an `impersonate` field in its responses' extensions

Authored by: bashonly
2024-05-04 22:19:42 +00:00
bashonly 036e0d92c6
[ie/patreon] Extract multiple embeds (#9850)
Closes #9848
Authored by: bashonly
2024-05-04 22:11:11 +00:00
McSwindler 31b11c339b
[watchertv] add comments for required fields
Co-authored-by: pukkandan <pukkandan.ytdlp@gmail.com>
2024-04-24 22:12:55 -05:00
McSwindler 123ac3301c [watchertv] create DropoutBase IEs for Dropout and WatcherTV to extend 2024-04-23 22:48:18 -05:00
McSwindler dd41cc4ade [watchertv] update extractor to extend dropout instead of duplicating 2024-04-21 08:51:10 -05:00
McSwindler 1b71001149 [watchertv] Add extractor 2024-04-20 12:07:02 -05:00
8 changed files with 366 additions and 167 deletions

View File

@ -785,6 +785,25 @@ class TestHTTPImpersonateRequestHandler(TestRequestHandlerBase):
assert res.status == 200 assert res.status == 200
assert std_headers['user-agent'].lower() not in res.read().decode().lower() assert std_headers['user-agent'].lower() not in res.read().decode().lower()
def test_response_extensions(self, handler):
with handler() as rh:
for target in rh.supported_targets:
request = Request(
f'http://127.0.0.1:{self.http_port}/gen_200', extensions={'impersonate': target})
res = validate_and_send(rh, request)
assert res.extensions['impersonate'] == rh._get_request_target(request)
def test_http_error_response_extensions(self, handler):
with handler() as rh:
for target in rh.supported_targets:
request = Request(
f'http://127.0.0.1:{self.http_port}/gen_404', extensions={'impersonate': target})
try:
validate_and_send(rh, request)
except HTTPError as e:
res = e.response
assert res.extensions['impersonate'] == rh._get_request_target(request)
class TestRequestHandlerMisc: class TestRequestHandlerMisc:
"""Misc generic tests for request handlers, not related to request or validation testing""" """Misc generic tests for request handlers, not related to request or validation testing"""

View File

@ -2136,6 +2136,11 @@ class YoutubeDL:
def _check_formats(self, formats): def _check_formats(self, formats):
for f in formats: for f in formats:
working = f.get('__working')
if working is not None:
if working:
yield f
continue
self.to_screen('[info] Testing format %s' % f['format_id']) self.to_screen('[info] Testing format %s' % f['format_id'])
path = self.get_output_path('temp') path = self.get_output_path('temp')
if not self._ensure_dir_exists(f'{path}/'): if not self._ensure_dir_exists(f'{path}/'):
@ -2152,33 +2157,44 @@ class YoutubeDL:
os.remove(temp_file.name) os.remove(temp_file.name)
except OSError: except OSError:
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name) self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
f['__working'] = success
if success: if success:
yield f yield f
else: else:
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id']) self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
def _select_formats(self, formats, selector):
return list(selector({
'formats': formats,
'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
'incomplete_formats': (all(f.get('vcodec') == 'none' for f in formats) # No formats with video
or all(f.get('acodec') == 'none' for f in formats)), # OR, No formats with audio
}))
def _default_format_spec(self, info_dict, download=True): def _default_format_spec(self, info_dict, download=True):
download = download and not self.params.get('simulate')
prefer_best = download and (
self.params['outtmpl']['default'] == '-'
or info_dict.get('is_live') and not self.params.get('live_from_start'))
def can_merge(): def can_merge():
merger = FFmpegMergerPP(self) merger = FFmpegMergerPP(self)
return merger.available and merger.can_merge() return merger.available and merger.can_merge()
prefer_best = ( if not prefer_best and download and not can_merge():
not self.params.get('simulate') prefer_best = True
and download formats = self._get_formats(info_dict)
and ( evaluate_formats = lambda spec: self._select_formats(formats, self.build_format_selector(spec))
not can_merge() if evaluate_formats('b/bv+ba') != evaluate_formats('bv*+ba/b'):
or info_dict.get('is_live') and not self.params.get('live_from_start') self.report_warning('ffmpeg not found. The downloaded format may not be the best available. '
or self.params['outtmpl']['default'] == '-')) 'Installing ffmpeg is strongly recommended: https://github.com/yt-dlp/yt-dlp#dependencies')
compat = (
prefer_best
or self.params.get('allow_multiple_audio_streams', False)
or 'format-spec' in self.params['compat_opts'])
return ( compat = (self.params.get('allow_multiple_audio_streams')
'best/bestvideo+bestaudio' if prefer_best or 'format-spec' in self.params['compat_opts'])
else 'bestvideo*+bestaudio/best' if not compat
else 'bestvideo+bestaudio/best') return ('best/bestvideo+bestaudio' if prefer_best
else 'bestvideo+bestaudio/best' if compat
else 'bestvideo*+bestaudio/best')
def build_format_selector(self, format_spec): def build_format_selector(self, format_spec):
def syntax_error(note, start): def syntax_error(note, start):
@ -2928,12 +2944,7 @@ class YoutubeDL:
self.write_debug(f'Default format spec: {req_format}') self.write_debug(f'Default format spec: {req_format}')
format_selector = self.build_format_selector(req_format) format_selector = self.build_format_selector(req_format)
formats_to_download = list(format_selector({ formats_to_download = self._select_formats(formats, format_selector)
'formats': formats,
'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
'incomplete_formats': (all(f.get('vcodec') == 'none' for f in formats) # No formats with video
or all(f.get('acodec') == 'none' for f in formats)), # OR, No formats with audio
}))
if interactive_format_selection and not formats_to_download: if interactive_format_selection and not formats_to_download:
self.report_error('Requested format is not available', tb=False, is_error=False) self.report_error('Requested format is not available', tb=False, is_error=False)
continue continue

View File

@ -2306,6 +2306,10 @@ from .washingtonpost import (
WashingtonPostArticleIE, WashingtonPostArticleIE,
) )
from .wat import WatIE from .wat import WatIE
from .watchertv import (
WatcherTVSeasonIE,
WatcherTVIE
)
from .wdr import ( from .wdr import (
WDRIE, WDRIE,
WDRPageIE, WDRPageIE,

View File

@ -17,83 +17,12 @@ from ..utils import (
) )
class DropoutIE(InfoExtractor): class DropoutBaseIE(InfoExtractor):
_LOGIN_URL = 'https://www.dropout.tv/login' """Subclasses must define _HOST"""
_NETRC_MACHINE = 'dropout'
_VALID_URL = r'https?://(?:www\.)?dropout\.tv/(?:[^/]+/)*videos/(?P<id>[^/]+)/?$'
_TESTS = [
{
'url': 'https://www.dropout.tv/game-changer/season:2/videos/yes-or-no',
'note': 'Episode in a series',
'md5': '5e000fdfd8d8fa46ff40456f1c2af04a',
'info_dict': {
'id': '738153',
'display_id': 'yes-or-no',
'ext': 'mp4',
'title': 'Yes or No',
'description': 'Ally, Brennan, and Zac are asked a simple question, but is there a correct answer?',
'release_date': '20200508',
'thumbnail': 'https://vhx.imgix.net/chuncensoredstaging/assets/351e3f24-c4a3-459a-8b79-dc80f1e5b7fd.jpg',
'series': 'Game Changer',
'season_number': 2,
'season': 'Season 2',
'episode_number': 6,
'episode': 'Yes or No',
'duration': 1180,
'uploader_id': 'user80538407',
'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos'
},
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
},
{
'url': 'https://www.dropout.tv/dimension-20-fantasy-high/season:1/videos/episode-1',
'note': 'Episode in a series (missing release_date)',
'md5': '712caf7c191f1c47c8f1879520c2fa5c',
'info_dict': {
'id': '320562',
'display_id': 'episode-1',
'ext': 'mp4',
'title': 'The Beginning Begins',
'description': 'The cast introduces their PCs, including a neurotic elf, a goblin PI, and a corn-worshipping cleric.',
'thumbnail': 'https://vhx.imgix.net/chuncensoredstaging/assets/4421ed0d-f630-4c88-9004-5251b2b8adfa.jpg',
'series': 'Dimension 20: Fantasy High',
'season_number': 1,
'season': 'Season 1',
'episode_number': 1,
'episode': 'The Beginning Begins',
'duration': 6838,
'uploader_id': 'user80538407',
'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos'
},
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
},
{
'url': 'https://www.dropout.tv/videos/misfits-magic-holiday-special',
'note': 'Episode not in a series',
'md5': 'c30fa18999c5880d156339f13c953a26',
'info_dict': {
'id': '1915774',
'display_id': 'misfits-magic-holiday-special',
'ext': 'mp4',
'title': 'Misfits & Magic Holiday Special',
'description': 'The magical misfits spend Christmas break at Gowpenny, with an unwelcome visitor.',
'release_date': '20211215',
'thumbnail': 'https://vhx.imgix.net/chuncensoredstaging/assets/d91ea8a6-b250-42ed-907e-b30fb1c65176-8e24b8e5.jpg',
'duration': 11698,
'uploader_id': 'user80538407',
'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos'
},
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
}
]
def _get_authenticity_token(self, display_id): def _get_authenticity_token(self, display_id):
signin_page = self._download_webpage( signin_page = self._download_webpage(
self._LOGIN_URL, display_id, note='Getting authenticity token') f'{self._HOST}/login', display_id, note='Getting authenticity token')
return self._html_search_regex( return self._html_search_regex(
r'name=["\']authenticity_token["\'] value=["\'](.+?)["\']', r'name=["\']authenticity_token["\'] value=["\'](.+?)["\']',
signin_page, 'authenticity_token') signin_page, 'authenticity_token')
@ -104,7 +33,7 @@ class DropoutIE(InfoExtractor):
return True return True
response = self._download_webpage( response = self._download_webpage(
self._LOGIN_URL, display_id, note='Logging in', fatal=False, f'{self._HOST}/login', display_id, note='Logging in', fatal=False,
data=urlencode_postdata({ data=urlencode_postdata({
'email': username, 'email': username,
'password': password, 'password': password,
@ -125,7 +54,7 @@ class DropoutIE(InfoExtractor):
display_id = self._match_id(url) display_id = self._match_id(url)
webpage = None webpage = None
if self._get_cookies('https://www.dropout.tv').get('_session'): if self._get_cookies(self._HOST).get('_session'):
webpage = self._download_webpage(url, display_id) webpage = self._download_webpage(url, display_id)
if not webpage or '<div id="watch-unauthorized"' in webpage: if not webpage or '<div id="watch-unauthorized"' in webpage:
login_err = self._login(display_id) login_err = self._login(display_id)
@ -148,7 +77,7 @@ class DropoutIE(InfoExtractor):
return { return {
'_type': 'url_transparent', '_type': 'url_transparent',
'ie_key': VHXEmbedIE.ie_key(), 'ie_key': VHXEmbedIE.ie_key(),
'url': VHXEmbedIE._smuggle_referrer(embed_url, 'https://www.dropout.tv'), 'url': VHXEmbedIE._smuggle_referrer(embed_url, self._HOST),
'id': self._search_regex(r'embed\.vhx\.tv/videos/(.+?)\?', embed_url, 'id'), 'id': self._search_regex(r'embed\.vhx\.tv/videos/(.+?)\?', embed_url, 'id'),
'display_id': display_id, 'display_id': display_id,
'title': title, 'title': title,
@ -165,9 +94,105 @@ class DropoutIE(InfoExtractor):
} }
class DropoutSeasonIE(InfoExtractor): class DropoutIE(DropoutBaseIE):
_HOST = 'https://www.dropout.tv'
_NETRC_MACHINE = 'dropout'
_VALID_URL = r'https?://(?:www\.)?dropout\.tv/(?:[^/]+/)*videos/(?P<id>[^/]+)/?$'
_TESTS = [
{
'url': 'https://www.dropout.tv/game-changer/season:2/videos/yes-or-no',
'note': 'Episode in a series',
'md5': 'fc55805bac60b1ce2ffdc35fb9c51195',
'info_dict': {
'id': '738153',
'display_id': 'yes-or-no',
'ext': 'mp4',
'title': 'Yes or No',
'description': 'Ally, Brennan, and Zac are asked a simple question, but is there a correct answer?',
'release_date': '20200508',
'thumbnail': 'https://vhx.imgix.net/chuncensoredstaging/assets/351e3f24-c4a3-459a-8b79-dc80f1e5b7fd.jpg',
'series': 'Game Changer',
'season_number': 2,
'season': 'Season 2',
'episode_number': 6,
'episode': 'Yes or No',
'duration': 1180,
'uploader_id': 'user80538407',
'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos'
},
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
},
{
'url': 'https://www.dropout.tv/ch-shorts/season:1/videos/post-apocalyptic-dane-cook',
'note': 'Episode in a series (missing release_date)',
'md5': 'f260b8d7d0fdbaceae713c9196dac07f',
'info_dict': {
'id': '449042',
'display_id': 'post-apocalyptic-dane-cook',
'ext': 'mp4',
'title': 'Post-Apocalyptic Dane Cook',
'description': 'Dane Cook is back with his all new special. Don\'t worry, it\'s not the end of the world.',
'thumbnail': 'https://vhx.imgix.net/chuncensoredstaging/assets/5b0678df-d9c3-4864-b811-24db03072f4a.jpg',
'series': 'CH Shorts',
'season_number': 1,
'season': 'Season 1',
'episode_number': 1,
'episode': 'Post-Apocalyptic Dane Cook',
'duration': 135,
'uploader_id': 'user80538407',
'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos'
},
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
},
{
'url': 'https://www.dropout.tv/videos/misfits-magic-holiday-special',
'note': 'Episode not in a series',
'md5': '147e0607bd877a791665c0b7219b512c',
'info_dict': {
'id': '1915774',
'display_id': 'misfits-magic-holiday-special',
'ext': 'mp4',
'title': 'Misfits & Magic Holiday Special',
'description': 'The magical misfits spend Christmas break at Gowpenny, with an unwelcome visitor.',
'release_date': '20211215',
'thumbnail': 'https://vhx.imgix.net/chuncensoredstaging/assets/d91ea8a6-b250-42ed-907e-b30fb1c65176-8e24b8e5.jpg',
'duration': 11698,
'uploader_id': 'user80538407',
'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos'
},
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
}
]
class DropoutSeasonBaseIE(InfoExtractor):
"""Subclasses must define _VIDEO_IE"""
_PAGE_SIZE = 24 _PAGE_SIZE = 24
def _fetch_page(self, url, season_id, page):
page += 1
webpage = self._download_webpage(
f'{url}?page={page}', season_id, note=f'Downloading page {page}', expected_status={400})
yield from [self.url_result(item_url, self._VIDEO_IE) for item_url in traverse_obj(
get_elements_html_by_class('browse-item-link', webpage), (..., {extract_attributes}, 'href'))]
def _real_extract(self, url):
season_id = self._match_id(url)
season_num = self._match_valid_url(url).group('season') or 1
season_title = season_id.replace('-', ' ').title()
return self.playlist_result(
OnDemandPagedList(functools.partial(self._fetch_page, url, season_id), self._PAGE_SIZE),
f'{season_id}-season-{season_num}', f'{season_title} - Season {season_num}')
class DropoutSeasonIE(DropoutSeasonBaseIE):
_VALID_URL = r'https?://(?:www\.)?dropout\.tv/(?P<id>[^\/$&?#]+)(?:/?$|/season:(?P<season>[0-9]+)/?$)' _VALID_URL = r'https?://(?:www\.)?dropout\.tv/(?P<id>[^\/$&?#]+)(?:/?$|/season:(?P<season>[0-9]+)/?$)'
_VIDEO_IE = DropoutIE
_TESTS = [ _TESTS = [
{ {
'url': 'https://www.dropout.tv/dimension-20-fantasy-high/season:1', 'url': 'https://www.dropout.tv/dimension-20-fantasy-high/season:1',
@ -206,19 +231,3 @@ class DropoutSeasonIE(InfoExtractor):
} }
} }
] ]
def _fetch_page(self, url, season_id, page):
page += 1
webpage = self._download_webpage(
f'{url}?page={page}', season_id, note=f'Downloading page {page}', expected_status={400})
yield from [self.url_result(item_url, DropoutIE) for item_url in traverse_obj(
get_elements_html_by_class('browse-item-link', webpage), (..., {extract_attributes}, 'href'))]
def _real_extract(self, url):
season_id = self._match_id(url)
season_num = self._match_valid_url(url).group('season') or 1
season_title = season_id.replace('-', ' ').title()
return self.playlist_result(
OnDemandPagedList(functools.partial(self._fetch_page, url, season_id), self._PAGE_SIZE),
f'{season_id}-season-{season_num}', f'{season_title} - Season {season_num}')

View File

@ -219,7 +219,29 @@ class PatreonIE(PatreonBaseIE):
'thumbnail': r're:^https?://.+', 'thumbnail': r're:^https?://.+',
}, },
'params': {'skip_download': 'm3u8'}, 'params': {'skip_download': 'm3u8'},
}, {
# multiple attachments/embeds
'url': 'https://www.patreon.com/posts/holy-wars-solos-100601977',
'playlist_count': 3,
'info_dict': {
'id': '100601977',
'title': '"Holy Wars" (Megadeth) Solos Transcription & Lesson/Analysis',
'description': 'md5:d099ab976edfce6de2a65c2b169a88d3',
'uploader': 'Bradley Hall',
'uploader_id': '24401883',
'uploader_url': 'https://www.patreon.com/bradleyhallguitar',
'channel_id': '3193932',
'channel_url': 'https://www.patreon.com/bradleyhallguitar',
'channel_follower_count': int,
'timestamp': 1710777855,
'upload_date': '20240318',
'like_count': int,
'comment_count': int,
'thumbnail': r're:^https?://.+',
},
'skip': 'Patron-only content',
}] }]
_RETURN_TYPE = 'video'
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
@ -234,58 +256,54 @@ class PatreonIE(PatreonBaseIE):
'include': 'audio,user,user_defined_tags,campaign,attachments_media', 'include': 'audio,user,user_defined_tags,campaign,attachments_media',
}) })
attributes = post['data']['attributes'] attributes = post['data']['attributes']
title = attributes['title'].strip() info = traverse_obj(attributes, {
image = attributes.get('image') or {} 'title': ('title', {str.strip}),
info = { 'description': ('content', {clean_html}),
'id': video_id, 'thumbnail': ('image', ('large_url', 'url'), {url_or_none}, any),
'title': title, 'timestamp': ('published_at', {parse_iso8601}),
'description': clean_html(attributes.get('content')), 'like_count': ('like_count', {int_or_none}),
'thumbnail': image.get('large_url') or image.get('url'), 'comment_count': ('comment_count', {int_or_none}),
'timestamp': parse_iso8601(attributes.get('published_at')), })
'like_count': int_or_none(attributes.get('like_count')),
'comment_count': int_or_none(attributes.get('comment_count')),
}
can_view_post = traverse_obj(attributes, 'current_user_can_view')
if can_view_post and info['comment_count']:
info['__post_extractor'] = self.extract_comments(video_id)
for i in post.get('included', []): entries = []
i_type = i.get('type') idx = 0
if i_type == 'media': for include in traverse_obj(post, ('included', lambda _, v: v['type'])):
media_attributes = i.get('attributes') or {} include_type = include['type']
download_url = media_attributes.get('download_url') if include_type == 'media':
media_attributes = traverse_obj(include, ('attributes', {dict})) or {}
download_url = url_or_none(media_attributes.get('download_url'))
ext = mimetype2ext(media_attributes.get('mimetype')) ext = mimetype2ext(media_attributes.get('mimetype'))
# if size_bytes is None, this media file is likely unavailable # if size_bytes is None, this media file is likely unavailable
# See: https://github.com/yt-dlp/yt-dlp/issues/4608 # See: https://github.com/yt-dlp/yt-dlp/issues/4608
size_bytes = int_or_none(media_attributes.get('size_bytes')) size_bytes = int_or_none(media_attributes.get('size_bytes'))
if download_url and ext in KNOWN_EXTENSIONS and size_bytes is not None: if download_url and ext in KNOWN_EXTENSIONS and size_bytes is not None:
# XXX: what happens if there are multiple attachments? idx += 1
return { entries.append({
**info, 'id': f'{video_id}-{idx}',
'ext': ext, 'ext': ext,
'filesize': size_bytes, 'filesize': size_bytes,
'url': download_url, 'url': download_url,
}
elif i_type == 'user':
user_attributes = i.get('attributes')
if user_attributes:
info.update({
'uploader': user_attributes.get('full_name'),
'uploader_id': str_or_none(i.get('id')),
'uploader_url': user_attributes.get('url'),
}) })
elif i_type == 'post_tag': elif include_type == 'user':
info.setdefault('tags', []).append(traverse_obj(i, ('attributes', 'value'))) info.update(traverse_obj(include, {
'uploader': ('attributes', 'full_name', {str}),
'uploader_id': ('id', {str_or_none}),
'uploader_url': ('attributes', 'url', {url_or_none}),
}))
elif i_type == 'campaign': elif include_type == 'post_tag':
info.update({ if post_tag := traverse_obj(include, ('attributes', 'value', {str})):
'channel': traverse_obj(i, ('attributes', 'title')), info.setdefault('tags', []).append(post_tag)
'channel_id': str_or_none(i.get('id')),
'channel_url': traverse_obj(i, ('attributes', 'url')), elif include_type == 'campaign':
'channel_follower_count': int_or_none(traverse_obj(i, ('attributes', 'patron_count'))), info.update(traverse_obj(include, {
}) 'channel': ('attributes', 'title', {str}),
'channel_id': ('id', {str_or_none}),
'channel_url': ('attributes', 'url', {url_or_none}),
'channel_follower_count': ('attributes', 'patron_count', {int_or_none}),
}))
# handle Vimeo embeds # handle Vimeo embeds
if traverse_obj(attributes, ('embed', 'provider')) == 'Vimeo': if traverse_obj(attributes, ('embed', 'provider')) == 'Vimeo':
@ -296,36 +314,50 @@ class PatreonIE(PatreonBaseIE):
v_url, video_id, 'Checking Vimeo embed URL', v_url, video_id, 'Checking Vimeo embed URL',
headers={'Referer': 'https://patreon.com/'}, headers={'Referer': 'https://patreon.com/'},
fatal=False, errnote=False): fatal=False, errnote=False):
return self.url_result( entries.append(self.url_result(
VimeoIE._smuggle_referrer(v_url, 'https://patreon.com/'), VimeoIE._smuggle_referrer(v_url, 'https://patreon.com/'),
VimeoIE, url_transparent=True, **info) VimeoIE, url_transparent=True))
embed_url = traverse_obj(attributes, ('embed', 'url', {url_or_none})) embed_url = traverse_obj(attributes, ('embed', 'url', {url_or_none}))
if embed_url and self._request_webpage(embed_url, video_id, 'Checking embed URL', fatal=False, errnote=False): if embed_url and self._request_webpage(embed_url, video_id, 'Checking embed URL', fatal=False, errnote=False):
return self.url_result(embed_url, **info) entries.append(self.url_result(embed_url))
post_file = traverse_obj(attributes, 'post_file') post_file = traverse_obj(attributes, ('post_file', {dict}))
if post_file: if post_file:
name = post_file.get('name') name = post_file.get('name')
ext = determine_ext(name) ext = determine_ext(name)
if ext in KNOWN_EXTENSIONS: if ext in KNOWN_EXTENSIONS:
return { entries.append({
**info, 'id': video_id,
'ext': ext, 'ext': ext,
'url': post_file['url'], 'url': post_file['url'],
} })
elif name == 'video' or determine_ext(post_file.get('url')) == 'm3u8': elif name == 'video' or determine_ext(post_file.get('url')) == 'm3u8':
formats, subtitles = self._extract_m3u8_formats_and_subtitles(post_file['url'], video_id) formats, subtitles = self._extract_m3u8_formats_and_subtitles(post_file['url'], video_id)
return { entries.append({
**info, 'id': video_id,
'formats': formats, 'formats': formats,
'subtitles': subtitles, 'subtitles': subtitles,
} })
if can_view_post is False: can_view_post = traverse_obj(attributes, 'current_user_can_view')
comments = None
if can_view_post and info.get('comment_count'):
comments = self.extract_comments(video_id)
if not entries and can_view_post is False:
self.raise_no_formats('You do not have access to this post', video_id=video_id, expected=True) self.raise_no_formats('You do not have access to this post', video_id=video_id, expected=True)
else: elif not entries:
self.raise_no_formats('No supported media found in this post', video_id=video_id, expected=True) self.raise_no_formats('No supported media found in this post', video_id=video_id, expected=True)
elif len(entries) == 1:
info.update(entries[0])
else:
for entry in entries:
entry.update(info)
return self.playlist_result(entries, video_id, **info, __post_extractor=comments)
info['id'] = video_id
info['__post_extractor'] = comments
return info return info
def _get_comments(self, post_id): def _get_comments(self, post_id):

View File

@ -0,0 +1,110 @@
from .dropout import DropoutBaseIE, DropoutSeasonBaseIE
class WatcherTVIE(DropoutBaseIE):
_HOST = 'https://www.watchertv.com'
_NETRC_MACHINE = 'watchertv'
_VALID_URL = r'https?://(?:www\.)?watchertv\.com/(?:[^/]+/)*videos/(?P<id>[^/]+)/?$'
_TESTS = [
{
'url': 'https://www.watchertv.com/ghost-files/season:2/videos/gf-201',
'note': 'Episode in a series',
'md5': '99c9aab2cb62157467b7ef5e37266e4e',
'info_dict': {
'id': '3129338',
'display_id': 'gf-201',
'ext': 'mp4',
'title': 'The Death Row Poltergeists of Missouri State Penitentiary',
'description': 'Where Curiosity Meets Comedy',
'release_date': '20230825',
'thumbnail': 'https://vhx.imgix.net/watcherentertainment/assets/92c02f39-2ed6-4b51-9e63-1a907b82e2bc.png',
'series': 'Ghost Files',
'season_number': 2,
'season': 'Season 2',
'episode_number': 1,
'episode': 'The Death Row Poltergeists of Missouri State Penitentiary',
'duration': 3853,
'uploader_id': 'user80538407',
'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos'
},
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
},
{
'url': 'https://www.watchertv.com/road-files/season:1/videos/rf101',
'note': 'Episode in a series (missing release_date)',
'md5': '02f9aaafc8ad9bd1be366cf6a61a68d8',
'info_dict': {
'id': '3187312',
'display_id': 'rf101',
'ext': 'mp4',
'title': 'Road Files: Haunted Hill House',
'description': 'Where Curiosity Meets Comedy',
'thumbnail': 'https://vhx.imgix.net/watcherentertainment/assets/7445f23c-a3e7-47fb-835a-d288273e2698.png',
'series': 'Road Files',
'season_number': 1,
'season': 'Season 1',
'episode_number': 1,
'episode': 'Road Files: Haunted Hill House',
'duration': 516,
'uploader_id': 'user80538407',
'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos'
},
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
},
{
'url': 'https://www.watchertv.com/videos/welcome-beta-users',
'note': 'Episode not in a series',
'md5': 'fd1db805f9adc442c38d706bba21ad03',
'info_dict': {
'id': '3187107',
'display_id': 'welcome-beta-users',
'ext': 'mp4',
'title': 'Welcome to Watcher!',
'description': 'Where Curiosity Meets Comedy',
'release_date': '20240419',
'thumbnail': 'https://vhx.imgix.net/watcherentertainment/assets/fbb90dc8-ebb0-4597-9a83-95729e234030.jpg',
'duration': 92,
'uploader_id': 'user80538407',
'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos'
},
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
}
]
class WatcherTVSeasonIE(DropoutSeasonBaseIE):
_VALID_URL = r'https?://(?:www\.)?watchertv\.com/(?P<id>[^\/$&?#]+)(?:/?$|/season:(?P<season>[0-9]+)/?$)'
_VIDEO_IE = WatcherTVIE
_TESTS = [
{
'url': 'https://www.watchertv.com/ghost-files/season:1',
'note': 'Multi-season series with the season in the url',
'playlist_count': 8,
'info_dict': {
'id': 'ghost-files-season-1',
'title': 'Ghost Files - Season 1'
}
},
{
'url': 'https://www.watchertv.com/are-you-scared',
'note': 'Multi-season series with the season not in the url',
'playlist_count': 3,
'info_dict': {
'id': 'are-you-scared-season-1',
'title': 'Are You Scared - Season 1'
}
},
{
'url': 'https://www.watchertv.com/watcher-one-offs',
'note': 'Single-season series',
'playlist_count': 16,
'info_dict': {
'id': 'watcher-one-offs-season-1',
'title': 'Watcher One Offs - Season 1'
}
}
]

View File

@ -132,6 +132,16 @@ class CurlCFFIRH(ImpersonateRequestHandler, InstanceStoreMixin):
extensions.pop('cookiejar', None) extensions.pop('cookiejar', None)
extensions.pop('timeout', None) extensions.pop('timeout', None)
def send(self, request: Request) -> Response:
target = self._get_request_target(request)
try:
response = super().send(request)
except HTTPError as e:
e.response.extensions['impersonate'] = target
raise
response.extensions['impersonate'] = target
return response
def _send(self, request: Request): def _send(self, request: Request):
max_redirects_exceeded = False max_redirects_exceeded = False
session: curl_cffi.requests.Session = self._get_instance( session: curl_cffi.requests.Session = self._get_instance(

View File

@ -497,6 +497,7 @@ class Response(io.IOBase):
@param headers: response headers. @param headers: response headers.
@param status: Response HTTP status code. Default is 200 OK. @param status: Response HTTP status code. Default is 200 OK.
@param reason: HTTP status reason. Will use built-in reasons based on status code if not provided. @param reason: HTTP status reason. Will use built-in reasons based on status code if not provided.
@param extensions: Dictionary of handler-specific response extensions.
""" """
def __init__( def __init__(
@ -505,7 +506,9 @@ class Response(io.IOBase):
url: str, url: str,
headers: Mapping[str, str], headers: Mapping[str, str],
status: int = 200, status: int = 200,
reason: str = None): reason: str = None,
extensions: dict = None
):
self.fp = fp self.fp = fp
self.headers = Message() self.headers = Message()
@ -517,6 +520,7 @@ class Response(io.IOBase):
self.reason = reason or HTTPStatus(status).phrase self.reason = reason or HTTPStatus(status).phrase
except ValueError: except ValueError:
self.reason = None self.reason = None
self.extensions = extensions or {}
def readable(self): def readable(self):
return self.fp.readable() return self.fp.readable()