Compare commits

...

18 Commits

Author SHA1 Message Date
Kieran 28b5297df1
Merge 6c8ede8188 into 96da952504 2024-05-05 10:29:33 +05:30
sepro 96da952504
[core] Warn if lack of ffmpeg alters format selection (#9805)
Authored by: seproDev, pukkandan
2024-05-05 00:44:08 +02:00
bashonly bec9a59e8e
[networking] Add `extensions` attribute to `Response` (#9756)
CurlCFFIRH now provides an `impersonate` field in its responses' extensions

Authored by: bashonly
2024-05-04 22:19:42 +00:00
bashonly 036e0d92c6
[ie/patreon] Extract multiple embeds (#9850)
Closes #9848
Authored by: bashonly
2024-05-04 22:11:11 +00:00
Kieran Eglin 6c8ede8188
Fixed embedding filepath issue for subs and infojson 2024-04-26 16:16:56 -07:00
Kieran Eglin 3046c17822
Fixed filepath bug when embedding thumbnails 2024-04-26 15:51:37 -07:00
Kieran Eglin dd986a4149
Linter 2024-04-26 15:31:22 -07:00
Kieran Eglin c3fccc58cf
Updated logic for determining file extensions 2024-04-26 15:26:26 -07:00
Kieran Eglin 28d5051546
Reverted pre/post_process function signature 2024-04-26 14:17:18 -07:00
Kieran Eglin a1ff1d4272
Reverted unrelated changes 2024-04-26 13:55:07 -07:00
Kieran Eglin 0a3c5aceb5
Removed now-unneeded thumbnail/subtitle return values 2024-04-24 11:08:21 -07:00
Kieran Eglin 9c3b227db8
Removed files_to_move logic 2024-04-24 11:02:26 -07:00
Kieran Eglin ea2a085397
Fixed up tests and linting 2024-04-24 10:57:39 -07:00
Kieran Eglin 44bb6c2056
[WIP] got refactor of file mover basically working 2024-04-24 09:47:58 -07:00
Kieran Eglin fe4a15ff75
First pass at test feedback 2024-04-23 11:54:38 -07:00
Kieran Eglin 5d51ddbbfc
Removed unneeded conditionals + return 2024-04-23 11:04:46 -07:00
Kieran Eglin c9d8184fe6
Ran flake8 2024-04-23 11:00:15 -07:00
Kieran Eglin c574be85f1
Refactored MoveFilesPP to respect non-video files 2024-04-23 10:22:36 -07:00
10 changed files with 281 additions and 155 deletions

View File

@ -874,32 +874,33 @@ class TestYoutubeDL(unittest.TestCase):
}), r'^30fps$')
def test_postprocessors(self):
filename = 'post-processor-testfile.mp4'
audiofile = filename + '.mp3'
filename = 'post-processor-testfile'
video_file = filename + '.mp4'
audio_file = filename + '.mp3'
class SimplePP(PostProcessor):
def run(self, info):
with open(audiofile, 'w') as f:
with open(audio_file, 'w') as f:
f.write('EXAMPLE')
return [info['filepath']], info
def run_pp(params, PP):
with open(filename, 'w') as f:
with open(video_file, 'w') as f:
f.write('EXAMPLE')
ydl = YoutubeDL(params)
ydl.add_post_processor(PP())
ydl.post_process(filename, {'filepath': filename})
ydl.post_process(video_file, {'filepath': video_file})
run_pp({'keepvideo': True}, SimplePP)
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
os.unlink(filename)
os.unlink(audiofile)
run_pp({'keepvideo': True, 'outtmpl': filename}, SimplePP)
self.assertTrue(os.path.exists(video_file), '%s doesn\'t exist' % video_file)
self.assertTrue(os.path.exists(audio_file), '%s doesn\'t exist' % audio_file)
os.unlink(video_file)
os.unlink(audio_file)
run_pp({'keepvideo': False}, SimplePP)
self.assertFalse(os.path.exists(filename), '%s exists' % filename)
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
os.unlink(audiofile)
run_pp({'keepvideo': False, 'outtmpl': filename}, SimplePP)
self.assertFalse(os.path.exists(video_file), '%s exists' % video_file)
self.assertTrue(os.path.exists(audio_file), '%s doesn\'t exist' % audio_file)
os.unlink(audio_file)
class ModifierPP(PostProcessor):
def run(self, info):
@ -907,9 +908,9 @@ class TestYoutubeDL(unittest.TestCase):
f.write('MODIFIED')
return [], info
run_pp({'keepvideo': False}, ModifierPP)
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
os.unlink(filename)
run_pp({'keepvideo': False, 'outtmpl': filename}, ModifierPP)
self.assertTrue(os.path.exists(video_file), '%s doesn\'t exist' % video_file)
os.unlink(video_file)
def test_match_filter(self):
first = {

View File

@ -785,6 +785,25 @@ class TestHTTPImpersonateRequestHandler(TestRequestHandlerBase):
assert res.status == 200
assert std_headers['user-agent'].lower() not in res.read().decode().lower()
def test_response_extensions(self, handler):
with handler() as rh:
for target in rh.supported_targets:
request = Request(
f'http://127.0.0.1:{self.http_port}/gen_200', extensions={'impersonate': target})
res = validate_and_send(rh, request)
assert res.extensions['impersonate'] == rh._get_request_target(request)
def test_http_error_response_extensions(self, handler):
with handler() as rh:
for target in rh.supported_targets:
request = Request(
f'http://127.0.0.1:{self.http_port}/gen_404', extensions={'impersonate': target})
try:
validate_and_send(rh, request)
except HTTPError as e:
res = e.response
assert res.extensions['impersonate'] == rh._get_request_target(request)
class TestRequestHandlerMisc:
"""Misc generic tests for request handlers, not related to request or validation testing"""

View File

@ -2136,6 +2136,11 @@ class YoutubeDL:
def _check_formats(self, formats):
for f in formats:
working = f.get('__working')
if working is not None:
if working:
yield f
continue
self.to_screen('[info] Testing format %s' % f['format_id'])
path = self.get_output_path('temp')
if not self._ensure_dir_exists(f'{path}/'):
@ -2152,33 +2157,44 @@ class YoutubeDL:
os.remove(temp_file.name)
except OSError:
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
f['__working'] = success
if success:
yield f
else:
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
def _select_formats(self, formats, selector):
return list(selector({
'formats': formats,
'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
'incomplete_formats': (all(f.get('vcodec') == 'none' for f in formats) # No formats with video
or all(f.get('acodec') == 'none' for f in formats)), # OR, No formats with audio
}))
def _default_format_spec(self, info_dict, download=True):
download = download and not self.params.get('simulate')
prefer_best = download and (
self.params['outtmpl']['default'] == '-'
or info_dict.get('is_live') and not self.params.get('live_from_start'))
def can_merge():
merger = FFmpegMergerPP(self)
return merger.available and merger.can_merge()
prefer_best = (
not self.params.get('simulate')
and download
and (
not can_merge()
or info_dict.get('is_live') and not self.params.get('live_from_start')
or self.params['outtmpl']['default'] == '-'))
compat = (
prefer_best
or self.params.get('allow_multiple_audio_streams', False)
or 'format-spec' in self.params['compat_opts'])
if not prefer_best and download and not can_merge():
prefer_best = True
formats = self._get_formats(info_dict)
evaluate_formats = lambda spec: self._select_formats(formats, self.build_format_selector(spec))
if evaluate_formats('b/bv+ba') != evaluate_formats('bv*+ba/b'):
self.report_warning('ffmpeg not found. The downloaded format may not be the best available. '
'Installing ffmpeg is strongly recommended: https://github.com/yt-dlp/yt-dlp#dependencies')
return (
'best/bestvideo+bestaudio' if prefer_best
else 'bestvideo*+bestaudio/best' if not compat
else 'bestvideo+bestaudio/best')
compat = (self.params.get('allow_multiple_audio_streams')
or 'format-spec' in self.params['compat_opts'])
return ('best/bestvideo+bestaudio' if prefer_best
else 'bestvideo+bestaudio/best' if compat
else 'bestvideo*+bestaudio/best')
def build_format_selector(self, format_spec):
def syntax_error(note, start):
@ -2928,12 +2944,7 @@ class YoutubeDL:
self.write_debug(f'Default format spec: {req_format}')
format_selector = self.build_format_selector(req_format)
formats_to_download = list(format_selector({
'formats': formats,
'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
'incomplete_formats': (all(f.get('vcodec') == 'none' for f in formats) # No formats with video
or all(f.get('acodec') == 'none' for f in formats)), # OR, No formats with audio
}))
formats_to_download = self._select_formats(formats, format_selector)
if interactive_format_selection and not formats_to_download:
self.report_error('Requested format is not available', tb=False, is_error=False)
continue
@ -3208,7 +3219,6 @@ class YoutubeDL:
# info_dict['_filename'] needs to be set for backward compatibility
info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
temp_filename = self.prepare_filename(info_dict, 'temp')
files_to_move = {}
# Forced printings
self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
@ -3236,13 +3246,11 @@ class YoutubeDL:
sub_files = self._write_subtitles(info_dict, temp_filename)
if sub_files is None:
return
files_to_move.update(dict(sub_files))
thumb_files = self._write_thumbnails(
'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
if thumb_files is None:
return
files_to_move.update(dict(thumb_files))
infofn = self.prepare_filename(info_dict, 'infojson')
_infojson_written = self._write_info_json('video', info_dict, infofn)
@ -3316,13 +3324,12 @@ class YoutubeDL:
for link_type, should_write in write_links.items()):
return
new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
new_info, _ = self.pre_process(info_dict, 'before_dl')
replace_info_dict(new_info)
if self.params.get('skip_download'):
info_dict['filepath'] = temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
info_dict['__files_to_move'] = files_to_move
replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
else:
@ -3436,9 +3443,6 @@ class YoutubeDL:
info_dict['__files_to_merge'] = downloaded
# Even if there were no downloads, it is being merged only now
info_dict['__real_download'] = True
else:
for file in downloaded:
files_to_move[file] = None
else:
# Just a single file
dl_filename = existing_video_file(full_filename, temp_filename)
@ -3452,7 +3456,6 @@ class YoutubeDL:
dl_filename = dl_filename or temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
except network_exceptions as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return
@ -3523,7 +3526,7 @@ class YoutubeDL:
fixup()
try:
replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
replace_info_dict(self.post_process(dl_filename, info_dict))
except PostProcessingError as err:
self.report_error('Postprocessing: %s' % str(err))
return
@ -3644,8 +3647,6 @@ class YoutubeDL:
os.remove(filename)
except OSError:
self.report_warning(f'Unable to delete file {filename}')
if filename in info.get('__files_to_move', []): # NB: Delete even if None
del info['__files_to_move'][filename]
@staticmethod
def post_extract(info_dict):
@ -3662,8 +3663,7 @@ class YoutubeDL:
def run_pp(self, pp, infodict):
files_to_delete = []
if '__files_to_move' not in infodict:
infodict['__files_to_move'] = {}
try:
files_to_delete, infodict = pp.run(infodict)
except PostProcessingError as e:
@ -3675,10 +3675,7 @@ class YoutubeDL:
if not files_to_delete:
return infodict
if self.params.get('keepvideo', False):
for f in files_to_delete:
infodict['__files_to_move'].setdefault(f, '')
else:
if not self.params.get('keepvideo', False):
self._delete_downloaded_files(
*files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
return infodict
@ -3691,23 +3688,27 @@ class YoutubeDL:
return info
def pre_process(self, ie_info, key='pre_process', files_to_move=None):
if files_to_move is not None:
self.report_warning('[pre_process] "files_to_move" is deprecated and may be removed in a future version')
info = dict(ie_info)
info['__files_to_move'] = files_to_move or {}
try:
info = self.run_all_pps(key, info)
except PostProcessingError as err:
msg = f'Preprocessing: {err}'
info.setdefault('__pending_error', msg)
self.report_error(msg, is_error=False)
return info, info.pop('__files_to_move', None)
return info, files_to_move
def post_process(self, filename, info, files_to_move=None):
"""Run all the postprocessors on the given file."""
if files_to_move is not None:
self.report_warning('[post_process] "files_to_move" is deprecated and may be removed in a future version')
info['filepath'] = filename
info['__files_to_move'] = files_to_move or {}
info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
del info['__files_to_move']
info.pop('__multiple_thumbnails', None)
return self.run_all_pps('after_move', info)
def _make_archive_id(self, info_dict):
@ -4294,10 +4295,11 @@ class YoutubeDL:
sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
existing_sub = self.existing_file((sub_filename_final, sub_filename))
if existing_sub:
self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
sub_info['filepath'] = existing_sub
ret.append((existing_sub, sub_filename_final))
ret.append(existing_sub)
continue
self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
@ -4308,7 +4310,7 @@ class YoutubeDL:
with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data'])
sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final))
ret.append(sub_filename)
continue
except OSError:
self.report_error(f'Cannot write video subtitles file {sub_filename}')
@ -4319,7 +4321,7 @@ class YoutubeDL:
sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
self.dl(sub_filename, sub_copy, subtitle=True)
sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final))
ret.append(sub_filename)
except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
if self.params.get('ignoreerrors') is not True: # False or 'only_download'
@ -4339,6 +4341,7 @@ class YoutubeDL:
self.to_screen(f'[info] There are no {label} thumbnails to download')
return ret
multiple = write_all and len(thumbnails) > 1
info_dict['__multiple_thumbnails'] = multiple
if thumb_filename_base is None:
thumb_filename_base = filename
@ -4360,7 +4363,7 @@ class YoutubeDL:
self.to_screen('[info] %s is already present' % (
thumb_display_id if multiple else f'{label} thumbnail').capitalize())
t['filepath'] = existing_thumb
ret.append((existing_thumb, thumb_filename_final))
ret.append(existing_thumb)
else:
self.to_screen(f'[info] Downloading {thumb_display_id} ...')
try:
@ -4368,7 +4371,7 @@ class YoutubeDL:
self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
ret.append((thumb_filename, thumb_filename_final))
ret.append(thumb_filename)
t['filepath'] = thumb_filename
except network_exceptions as err:
if isinstance(err, HTTPError) and err.status == 404:
@ -4378,4 +4381,5 @@ class YoutubeDL:
thumbnails.pop(idx)
if ret and not write_all:
break
return ret

View File

@ -219,7 +219,29 @@ class PatreonIE(PatreonBaseIE):
'thumbnail': r're:^https?://.+',
},
'params': {'skip_download': 'm3u8'},
}, {
# multiple attachments/embeds
'url': 'https://www.patreon.com/posts/holy-wars-solos-100601977',
'playlist_count': 3,
'info_dict': {
'id': '100601977',
'title': '"Holy Wars" (Megadeth) Solos Transcription & Lesson/Analysis',
'description': 'md5:d099ab976edfce6de2a65c2b169a88d3',
'uploader': 'Bradley Hall',
'uploader_id': '24401883',
'uploader_url': 'https://www.patreon.com/bradleyhallguitar',
'channel_id': '3193932',
'channel_url': 'https://www.patreon.com/bradleyhallguitar',
'channel_follower_count': int,
'timestamp': 1710777855,
'upload_date': '20240318',
'like_count': int,
'comment_count': int,
'thumbnail': r're:^https?://.+',
},
'skip': 'Patron-only content',
}]
_RETURN_TYPE = 'video'
def _real_extract(self, url):
video_id = self._match_id(url)
@ -234,58 +256,54 @@ class PatreonIE(PatreonBaseIE):
'include': 'audio,user,user_defined_tags,campaign,attachments_media',
})
attributes = post['data']['attributes']
title = attributes['title'].strip()
image = attributes.get('image') or {}
info = {
'id': video_id,
'title': title,
'description': clean_html(attributes.get('content')),
'thumbnail': image.get('large_url') or image.get('url'),
'timestamp': parse_iso8601(attributes.get('published_at')),
'like_count': int_or_none(attributes.get('like_count')),
'comment_count': int_or_none(attributes.get('comment_count')),
}
can_view_post = traverse_obj(attributes, 'current_user_can_view')
if can_view_post and info['comment_count']:
info['__post_extractor'] = self.extract_comments(video_id)
info = traverse_obj(attributes, {
'title': ('title', {str.strip}),
'description': ('content', {clean_html}),
'thumbnail': ('image', ('large_url', 'url'), {url_or_none}, any),
'timestamp': ('published_at', {parse_iso8601}),
'like_count': ('like_count', {int_or_none}),
'comment_count': ('comment_count', {int_or_none}),
})
for i in post.get('included', []):
i_type = i.get('type')
if i_type == 'media':
media_attributes = i.get('attributes') or {}
download_url = media_attributes.get('download_url')
entries = []
idx = 0
for include in traverse_obj(post, ('included', lambda _, v: v['type'])):
include_type = include['type']
if include_type == 'media':
media_attributes = traverse_obj(include, ('attributes', {dict})) or {}
download_url = url_or_none(media_attributes.get('download_url'))
ext = mimetype2ext(media_attributes.get('mimetype'))
# if size_bytes is None, this media file is likely unavailable
# See: https://github.com/yt-dlp/yt-dlp/issues/4608
size_bytes = int_or_none(media_attributes.get('size_bytes'))
if download_url and ext in KNOWN_EXTENSIONS and size_bytes is not None:
# XXX: what happens if there are multiple attachments?
return {
**info,
idx += 1
entries.append({
'id': f'{video_id}-{idx}',
'ext': ext,
'filesize': size_bytes,
'url': download_url,
}
elif i_type == 'user':
user_attributes = i.get('attributes')
if user_attributes:
info.update({
'uploader': user_attributes.get('full_name'),
'uploader_id': str_or_none(i.get('id')),
'uploader_url': user_attributes.get('url'),
})
elif i_type == 'post_tag':
info.setdefault('tags', []).append(traverse_obj(i, ('attributes', 'value')))
elif include_type == 'user':
info.update(traverse_obj(include, {
'uploader': ('attributes', 'full_name', {str}),
'uploader_id': ('id', {str_or_none}),
'uploader_url': ('attributes', 'url', {url_or_none}),
}))
elif i_type == 'campaign':
info.update({
'channel': traverse_obj(i, ('attributes', 'title')),
'channel_id': str_or_none(i.get('id')),
'channel_url': traverse_obj(i, ('attributes', 'url')),
'channel_follower_count': int_or_none(traverse_obj(i, ('attributes', 'patron_count'))),
})
elif include_type == 'post_tag':
if post_tag := traverse_obj(include, ('attributes', 'value', {str})):
info.setdefault('tags', []).append(post_tag)
elif include_type == 'campaign':
info.update(traverse_obj(include, {
'channel': ('attributes', 'title', {str}),
'channel_id': ('id', {str_or_none}),
'channel_url': ('attributes', 'url', {url_or_none}),
'channel_follower_count': ('attributes', 'patron_count', {int_or_none}),
}))
# handle Vimeo embeds
if traverse_obj(attributes, ('embed', 'provider')) == 'Vimeo':
@ -296,36 +314,50 @@ class PatreonIE(PatreonBaseIE):
v_url, video_id, 'Checking Vimeo embed URL',
headers={'Referer': 'https://patreon.com/'},
fatal=False, errnote=False):
return self.url_result(
entries.append(self.url_result(
VimeoIE._smuggle_referrer(v_url, 'https://patreon.com/'),
VimeoIE, url_transparent=True, **info)
VimeoIE, url_transparent=True))
embed_url = traverse_obj(attributes, ('embed', 'url', {url_or_none}))
if embed_url and self._request_webpage(embed_url, video_id, 'Checking embed URL', fatal=False, errnote=False):
return self.url_result(embed_url, **info)
entries.append(self.url_result(embed_url))
post_file = traverse_obj(attributes, 'post_file')
post_file = traverse_obj(attributes, ('post_file', {dict}))
if post_file:
name = post_file.get('name')
ext = determine_ext(name)
if ext in KNOWN_EXTENSIONS:
return {
**info,
entries.append({
'id': video_id,
'ext': ext,
'url': post_file['url'],
}
})
elif name == 'video' or determine_ext(post_file.get('url')) == 'm3u8':
formats, subtitles = self._extract_m3u8_formats_and_subtitles(post_file['url'], video_id)
return {
**info,
entries.append({
'id': video_id,
'formats': formats,
'subtitles': subtitles,
}
})
if can_view_post is False:
can_view_post = traverse_obj(attributes, 'current_user_can_view')
comments = None
if can_view_post and info.get('comment_count'):
comments = self.extract_comments(video_id)
if not entries and can_view_post is False:
self.raise_no_formats('You do not have access to this post', video_id=video_id, expected=True)
else:
elif not entries:
self.raise_no_formats('No supported media found in this post', video_id=video_id, expected=True)
elif len(entries) == 1:
info.update(entries[0])
else:
for entry in entries:
entry.update(info)
return self.playlist_result(entries, video_id, **info, __post_extractor=comments)
info['id'] = video_id
info['__post_extractor'] = comments
return info
def _get_comments(self, post_id):

View File

@ -132,6 +132,16 @@ class CurlCFFIRH(ImpersonateRequestHandler, InstanceStoreMixin):
extensions.pop('cookiejar', None)
extensions.pop('timeout', None)
def send(self, request: Request) -> Response:
target = self._get_request_target(request)
try:
response = super().send(request)
except HTTPError as e:
e.response.extensions['impersonate'] = target
raise
response.extensions['impersonate'] = target
return response
def _send(self, request: Request):
max_redirects_exceeded = False
session: curl_cffi.requests.Session = self._get_instance(

View File

@ -497,6 +497,7 @@ class Response(io.IOBase):
@param headers: response headers.
@param status: Response HTTP status code. Default is 200 OK.
@param reason: HTTP status reason. Will use built-in reasons based on status code if not provided.
@param extensions: Dictionary of handler-specific response extensions.
"""
def __init__(
@ -505,7 +506,9 @@ class Response(io.IOBase):
url: str,
headers: Mapping[str, str],
status: int = 200,
reason: str = None):
reason: str = None,
extensions: dict = None
):
self.fp = fp
self.headers = Message()
@ -517,6 +520,7 @@ class Response(io.IOBase):
self.reason = reason or HTTPStatus(status).phrase
except ValueError:
self.reason = None
self.extensions = extensions or {}
def readable(self):
return self.fp.readable()

View File

@ -224,4 +224,8 @@ class EmbedThumbnailPP(FFmpegPostProcessor):
thumbnail_filename if converted or not self._already_have_thumbnail else None,
original_thumbnail if converted and not self._already_have_thumbnail else None,
info=info)
if not self._already_have_thumbnail:
info['thumbnails'][idx].pop('filepath', None)
return [], info

View File

@ -662,6 +662,10 @@ class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
os.replace(temp_filename, filename)
if not self._already_have_subtitle:
for _, subtitle in subtitles.items():
subtitle.pop('filepath', None)
files_to_delete = [] if self._already_have_subtitle else sub_filenames
return files_to_delete, info
@ -698,6 +702,7 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
infojson_filename = info.get('infojson_filename')
options.extend(self._get_infojson_opts(info, infojson_filename))
if not infojson_filename:
info.pop('infojson_filename', None)
files_to_delete.append(info.get('infojson_filename'))
elif self._add_infojson is True:
self.to_screen('The info-json can only be attached to mkv/mka files')
@ -1016,9 +1021,6 @@ class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
'filepath': new_file,
}
info['__files_to_move'][new_file] = replace_extension(
info['__files_to_move'][sub['filepath']], new_ext)
return sub_filenames, info
@ -1083,16 +1085,15 @@ class FFmpegThumbnailsConvertorPP(FFmpegPostProcessor):
return imghdr.what(path) == 'webp'
def fixup_webp(self, info, idx=-1):
thumbnail_filename = info['thumbnails'][idx]['filepath']
thumbnail = info['thumbnails'][idx]
thumbnail_filename = thumbnail['filepath']
_, thumbnail_ext = os.path.splitext(thumbnail_filename)
if thumbnail_ext:
if thumbnail_ext.lower() != '.webp' and imghdr.what(thumbnail_filename) == 'webp':
self.to_screen('Correcting thumbnail "%s" extension to webp' % thumbnail_filename)
webp_filename = replace_extension(thumbnail_filename, 'webp')
os.replace(thumbnail_filename, webp_filename)
info['thumbnails'][idx]['filepath'] = webp_filename
info['__files_to_move'][webp_filename] = replace_extension(
info['__files_to_move'].pop(thumbnail_filename), 'webp')
thumbnail['filepath'] = webp_filename
@staticmethod
def _options(target_ext):
@ -1130,8 +1131,6 @@ class FFmpegThumbnailsConvertorPP(FFmpegPostProcessor):
continue
thumbnail_dict['filepath'] = self.convert_thumbnail(original_thumbnail, target_ext)
files_to_delete.append(original_thumbnail)
info['__files_to_move'][thumbnail_dict['filepath']] = replace_extension(
info['__files_to_move'][original_thumbnail], target_ext)
if not has_thumbnail:
self.to_screen('There aren\'t any thumbnails to convert')

View File

@ -1,16 +1,22 @@
import os
from pathlib import Path
from .common import PostProcessor
from ..compat import shutil
from ..utils import (
PostProcessingError,
decodeFilename,
encodeFilename,
make_dir,
replace_extension
)
class MoveFilesAfterDownloadPP(PostProcessor):
# Map of the keys that contain moveable files and the 'type' of the file
# for generating the output filename
CHILD_KEYS = {
'thumbnails': 'thumbnail',
'requested_subtitles': 'subtitle'
}
def __init__(self, downloader=None, downloaded=True):
PostProcessor.__init__(self, downloader)
@ -20,34 +26,79 @@ class MoveFilesAfterDownloadPP(PostProcessor):
def pp_key(cls):
return 'MoveFiles'
def move_file_and_write_to_info(self, info_dict, relevant_dict=None, output_file_type=None):
relevant_dict = relevant_dict or info_dict
if 'filepath' not in relevant_dict:
return
output_file_type = output_file_type or ''
current_filepath, final_filepath = self.determine_filepath(info_dict, relevant_dict, output_file_type)
move_result = self.move_file(info_dict, current_filepath, final_filepath)
if move_result:
relevant_dict['filepath'] = move_result
else:
del relevant_dict['filepath']
def determine_filepath(self, info_dict, relevant_dict, output_file_type):
current_filepath = relevant_dict['filepath']
prepared_filepath = self._downloader.prepare_filename(info_dict, output_file_type)
if (output_file_type == 'thumbnail' and info_dict['__multiple_thumbnails']) or output_file_type == 'subtitle':
desired_extension = ''.join(Path(current_filepath).suffixes[-2:])
else:
desired_extension = Path(current_filepath).suffix
return current_filepath, replace_extension(prepared_filepath, desired_extension)
def move_file(self, info_dict, current_filepath, final_filepath):
if not current_filepath or not final_filepath:
return
dl_parent_folder = os.path.split(info_dict['filepath'])[0]
finaldir = info_dict.get('__finaldir', os.path.abspath(dl_parent_folder))
if not os.path.isabs(current_filepath):
current_filepath = os.path.join(finaldir, current_filepath)
if not os.path.isabs(final_filepath):
final_filepath = os.path.join(finaldir, final_filepath)
if current_filepath == final_filepath:
return final_filepath
if not os.path.exists(current_filepath):
self.report_warning('File "%s" cannot be found' % current_filepath)
return
if os.path.exists(final_filepath):
if self.get_param('overwrites', True):
self.report_warning('Replacing existing file "%s"' % final_filepath)
os.remove(final_filepath)
else:
self.report_warning(
'Cannot move file "%s" out of temporary directory since "%s" already exists. '
% (current_filepath, final_filepath))
return
make_dir(final_filepath, PostProcessingError)
self.to_screen(f'Moving file "{current_filepath}" to "{final_filepath}"')
shutil.move(current_filepath, final_filepath) # os.rename cannot move between volumes
return final_filepath
def run(self, info):
dl_path, dl_name = os.path.split(encodeFilename(info['filepath']))
finaldir = info.get('__finaldir', dl_path)
finalpath = os.path.join(finaldir, dl_name)
if self._downloaded:
info['__files_to_move'][info['filepath']] = decodeFilename(finalpath)
# This represents the main media file (using the 'filepath' key)
self.move_file_and_write_to_info(info)
make_newfilename = lambda old: decodeFilename(os.path.join(finaldir, os.path.basename(encodeFilename(old))))
for oldfile, newfile in info['__files_to_move'].items():
if not newfile:
newfile = make_newfilename(oldfile)
if os.path.abspath(encodeFilename(oldfile)) == os.path.abspath(encodeFilename(newfile)):
for key, output_file_type in self.CHILD_KEYS.items():
if key not in info:
continue
if not os.path.exists(encodeFilename(oldfile)):
self.report_warning('File "%s" cannot be found' % oldfile)
continue
if os.path.exists(encodeFilename(newfile)):
if self.get_param('overwrites', True):
self.report_warning('Replacing existing file "%s"' % newfile)
os.remove(encodeFilename(newfile))
else:
self.report_warning(
'Cannot move file "%s" out of temporary directory since "%s" already exists. '
% (oldfile, newfile))
continue
make_dir(newfile, PostProcessingError)
self.to_screen(f'Moving file "{oldfile}" to "{newfile}"')
shutil.move(oldfile, newfile) # os.rename cannot move between volumes
info['filepath'] = finalpath
if isinstance(info[key], list) or isinstance(info[key], dict):
iterable = info[key].values() if isinstance(info[key], dict) else info[key]
for file_dict in iterable:
self.move_file_and_write_to_info(info, file_dict, output_file_type)
return [], info

View File

@ -2092,7 +2092,9 @@ def prepend_extension(filename, ext, expected_real_ext=None):
def replace_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return '{}.{}'.format(
ext = ext if ext.startswith('.') else '.' + ext
return '{}{}'.format(
name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
ext)