Compare commits

...

5 Commits

Author SHA1 Message Date
SirElderling 115c95b230
Merge ab81421072 into 96da952504 2024-05-05 10:29:33 +05:30
sepro 96da952504
[core] Warn if lack of ffmpeg alters format selection (#9805)
Authored by: seproDev, pukkandan
2024-05-05 00:44:08 +02:00
bashonly bec9a59e8e
[networking] Add `extensions` attribute to `Response` (#9756)
CurlCFFIRH now provides an `impersonate` field in its responses' extensions

Authored by: bashonly
2024-05-04 22:19:42 +00:00
bashonly 036e0d92c6
[ie/patreon] Extract multiple embeds (#9850)
Closes #9848
Authored by: bashonly
2024-05-04 22:11:11 +00:00
SirElderling ab81421072 [ie/NYTimes] - add audio extraction 2024-04-21 13:53:54 +01:00
7 changed files with 371 additions and 73 deletions

View File

@ -785,6 +785,25 @@ class TestHTTPImpersonateRequestHandler(TestRequestHandlerBase):
assert res.status == 200
assert std_headers['user-agent'].lower() not in res.read().decode().lower()
def test_response_extensions(self, handler):
with handler() as rh:
for target in rh.supported_targets:
request = Request(
f'http://127.0.0.1:{self.http_port}/gen_200', extensions={'impersonate': target})
res = validate_and_send(rh, request)
assert res.extensions['impersonate'] == rh._get_request_target(request)
def test_http_error_response_extensions(self, handler):
with handler() as rh:
for target in rh.supported_targets:
request = Request(
f'http://127.0.0.1:{self.http_port}/gen_404', extensions={'impersonate': target})
try:
validate_and_send(rh, request)
except HTTPError as e:
res = e.response
assert res.extensions['impersonate'] == rh._get_request_target(request)
class TestRequestHandlerMisc:
"""Misc generic tests for request handlers, not related to request or validation testing"""

View File

@ -2136,6 +2136,11 @@ class YoutubeDL:
def _check_formats(self, formats):
for f in formats:
working = f.get('__working')
if working is not None:
if working:
yield f
continue
self.to_screen('[info] Testing format %s' % f['format_id'])
path = self.get_output_path('temp')
if not self._ensure_dir_exists(f'{path}/'):
@ -2152,33 +2157,44 @@ class YoutubeDL:
os.remove(temp_file.name)
except OSError:
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
f['__working'] = success
if success:
yield f
else:
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
def _select_formats(self, formats, selector):
return list(selector({
'formats': formats,
'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
'incomplete_formats': (all(f.get('vcodec') == 'none' for f in formats) # No formats with video
or all(f.get('acodec') == 'none' for f in formats)), # OR, No formats with audio
}))
def _default_format_spec(self, info_dict, download=True):
download = download and not self.params.get('simulate')
prefer_best = download and (
self.params['outtmpl']['default'] == '-'
or info_dict.get('is_live') and not self.params.get('live_from_start'))
def can_merge():
merger = FFmpegMergerPP(self)
return merger.available and merger.can_merge()
prefer_best = (
not self.params.get('simulate')
and download
and (
not can_merge()
or info_dict.get('is_live') and not self.params.get('live_from_start')
or self.params['outtmpl']['default'] == '-'))
compat = (
prefer_best
or self.params.get('allow_multiple_audio_streams', False)
or 'format-spec' in self.params['compat_opts'])
if not prefer_best and download and not can_merge():
prefer_best = True
formats = self._get_formats(info_dict)
evaluate_formats = lambda spec: self._select_formats(formats, self.build_format_selector(spec))
if evaluate_formats('b/bv+ba') != evaluate_formats('bv*+ba/b'):
self.report_warning('ffmpeg not found. The downloaded format may not be the best available. '
'Installing ffmpeg is strongly recommended: https://github.com/yt-dlp/yt-dlp#dependencies')
return (
'best/bestvideo+bestaudio' if prefer_best
else 'bestvideo*+bestaudio/best' if not compat
else 'bestvideo+bestaudio/best')
compat = (self.params.get('allow_multiple_audio_streams')
or 'format-spec' in self.params['compat_opts'])
return ('best/bestvideo+bestaudio' if prefer_best
else 'bestvideo+bestaudio/best' if compat
else 'bestvideo*+bestaudio/best')
def build_format_selector(self, format_spec):
def syntax_error(note, start):
@ -2928,12 +2944,7 @@ class YoutubeDL:
self.write_debug(f'Default format spec: {req_format}')
format_selector = self.build_format_selector(req_format)
formats_to_download = list(format_selector({
'formats': formats,
'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
'incomplete_formats': (all(f.get('vcodec') == 'none' for f in formats) # No formats with video
or all(f.get('acodec') == 'none' for f in formats)), # OR, No formats with audio
}))
formats_to_download = self._select_formats(formats, format_selector)
if interactive_format_selection and not formats_to_download:
self.report_error('Requested format is not available', tb=False, is_error=False)
continue

View File

@ -1341,6 +1341,7 @@ from .nubilesporn import NubilesPornIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
NYTimesAudioIE,
NYTimesCookingIE,
NYTimesCookingRecipeIE,
)

View File

@ -10,6 +10,7 @@ from ..utils import (
float_or_none,
get_elements_html_by_class,
int_or_none,
js_to_json,
merge_dicts,
mimetype2ext,
parse_iso8601,
@ -418,3 +419,223 @@ class NYTimesCookingRecipeIE(InfoExtractor):
'thumbnails': [{'url': thumb_url} for thumb_url in traverse_obj(
recipe_data, ('image', 'crops', 'recipe', ..., {url_or_none}))],
}
class NYTimesAudioIE(NYTimesBaseIE):
_VALID_URL = r"https?://(?:www\.)?nytimes\.com/\d{4}/\d{2}/\d{2}/(?:podcasts|books)/(?:[\w-]+/)?(?P<id>[^./?#]+)(?:\.html)?"
_TESTS = [
{
"url": "http://www.nytimes.com/2016/10/14/podcasts/revelations-from-the-final-weeks.html",
"md5": "cd402e44a059c8caf3b5f514c9264d0f",
"info_dict": {
"id": "100000004709062",
"title": "Revelations From the Final Weeks",
"ext": "mp3",
"description": "md5:fb5c6b93b12efc51649b4847fe066ee4",
"timestamp": 1476448332,
"upload_date": "20161014",
"creators": [''],
"series": "The Run-Up",
"episode": "He Was Like an Octopus",
"episode_number": 20,
"duration": 2130,
"thumbnail": r"re:https?://\w+\.nyt.com/images/.*\.jpg",
},
},
{
"url": "https://www.nytimes.com/2023/11/25/podcasts/poultry-slam.html",
"info_dict": {
"id": "100000009191248",
"title": "Poultry Slam",
"ext": "mp3",
"description": "md5:1e6f16b21bb9287b8a1fe563145a72fe",
"timestamp": 1700911084,
"upload_date": "20231125",
"creators": [],
"series": "This American Life",
"episode": "Poultry Slam",
"duration": 3523,
"thumbnail": r"re:https?://\w+\.nyt.com/images/.*\.png",
},
"params": {
"skip_download": True,
},
},
{
"url": "http://www.nytimes.com/2016/10/16/books/review/inside-the-new-york-times-book-review-the-rise-of-hitler.html",
"info_dict": {
"id": "100000004709479",
"title": "Inside The New York Times Book Review: The Rise of Hitler",
"ext": "mp3",
"description": "md5:288161c98c098a0c24f07a94af7108c3",
"timestamp": 1476461513,
"upload_date": "20161014",
"creators": ['Pamela Paul'],
"series": "",
"episode": "The Rise of Hitler",
"duration": 3475,
"thumbnail": r"re:https?://\w+\.nyt.com/images/.*\.jpg",
},
"params": {
"skip_download": True,
},
},
{
"url": "https://www.nytimes.com/2023/12/07/podcasts/the-daily/nikki-haley.html",
"info_dict": {
"id": "100000009214128",
"title": "Nikki Haleys Moment",
"ext": "mp3",
"description": "md5:bf9f532fe689967ef1c458bcb057f3e5",
"timestamp": 1701946819,
"upload_date": "20231207",
"creators": [],
"series": "The Daily",
"episode": "Listen to The Daily: Nikki Haleys Moment",
"duration": 1908,
},
"params": {
"skip_download": True,
},
},
{
"url": "https://www.nytimes.com/2023/12/18/podcasts/israel-putin.html",
"md5": "708b4fd393ca103280fe9e56d91b08b5",
"info_dict": {
"id": "100000009227362",
"title": "Pressure Mounts on Israel, and Putin Profits Off Boycott",
"ext": "mp3",
"description": "Hear the news in five minutes.",
"timestamp": 1702897212,
"upload_date": "20231218",
"creators": [],
"series": "The Headlines",
"episode": "The Headlines",
"duration": 298,
"thumbnail": r"re:https?://\w+\.nyt.com/images/.*\.jpg",
},
},
]
def _extract_content_from_block(self, block):
return traverse_obj(
block,
{
"creators": ("data", "track", "credit", all),
"duration": (
("data", "media"),
("track", "length"),
("duration", None),
{int_or_none},
),
"series": (
("data", "media"),
("podcast", "podcastSeries"),
("title", None),
{str_or_none},
),
"episode": (
("data", "media"),
("track", "headline"),
("title", "default"), {str}),
"episode_number": (
"data",
"podcast",
"episode",
{lambda v: v.split()[1]},
{int_or_none},
),
"url": (
("data", "media"),
("track", "fileUrl"),
("source", None),
{url_or_none},
),
"vcodec": "none",
},
get_all=False,
)
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
art_json = self._search_json(
r"window\.__preloadedData\s*=",
webpage,
"media details",
page_id,
transform_source=js_to_json,
)["initialData"]["data"]["article"]
blocks = traverse_obj(
art_json,
(
"sprinkledBody",
"content",
lambda _, v: v["__typename"]
in ("InteractiveBlock", "HeaderMultimediaBlock"),
"media",
),
)
if not blocks:
raise ExtractorError("Unable to extract any media blocks from webpage")
common_info = {
"title": remove_end(
self._html_extract_title(webpage), " - The New York Times"
),
"description": self._html_search_meta(
["og:description", "twitter:description"], webpage
),
"id": traverse_obj(
art_json, ("sourceId")
), # poltry slam is under art_json > 'sourceId'
**traverse_obj(
art_json,
{
"id": (
"sprinkledBody",
"content",
...,
"media",
"sourceId",
any,
{str},
),
"title": ("headline", "default"),
"description": ("summary"),
"timestamp": ("firstPublished", {parse_iso8601}),
"thumbnails": (
"promotionalMedia",
"assetCrops",
...,
"renditions",
...,
all,
{self._extract_thumbnails},
),
},
),
}
entries = []
for block in blocks:
if block.get("html"):
block = self._search_json(
r"function\s+getFlexData\(\)\s*\{\s*return",
block.get("html"),
"Retrieve the inner JSON",
page_id,
)
entries.append(
merge_dicts(self._extract_content_from_block(block), common_info)
)
if len(entries) > 1:
return self.playlist_result(entries, page_id, **common_info)
return {
"id": page_id,
**entries[0],
}

View File

@ -219,7 +219,29 @@ class PatreonIE(PatreonBaseIE):
'thumbnail': r're:^https?://.+',
},
'params': {'skip_download': 'm3u8'},
}, {
# multiple attachments/embeds
'url': 'https://www.patreon.com/posts/holy-wars-solos-100601977',
'playlist_count': 3,
'info_dict': {
'id': '100601977',
'title': '"Holy Wars" (Megadeth) Solos Transcription & Lesson/Analysis',
'description': 'md5:d099ab976edfce6de2a65c2b169a88d3',
'uploader': 'Bradley Hall',
'uploader_id': '24401883',
'uploader_url': 'https://www.patreon.com/bradleyhallguitar',
'channel_id': '3193932',
'channel_url': 'https://www.patreon.com/bradleyhallguitar',
'channel_follower_count': int,
'timestamp': 1710777855,
'upload_date': '20240318',
'like_count': int,
'comment_count': int,
'thumbnail': r're:^https?://.+',
},
'skip': 'Patron-only content',
}]
_RETURN_TYPE = 'video'
def _real_extract(self, url):
video_id = self._match_id(url)
@ -234,58 +256,54 @@ class PatreonIE(PatreonBaseIE):
'include': 'audio,user,user_defined_tags,campaign,attachments_media',
})
attributes = post['data']['attributes']
title = attributes['title'].strip()
image = attributes.get('image') or {}
info = {
'id': video_id,
'title': title,
'description': clean_html(attributes.get('content')),
'thumbnail': image.get('large_url') or image.get('url'),
'timestamp': parse_iso8601(attributes.get('published_at')),
'like_count': int_or_none(attributes.get('like_count')),
'comment_count': int_or_none(attributes.get('comment_count')),
}
can_view_post = traverse_obj(attributes, 'current_user_can_view')
if can_view_post and info['comment_count']:
info['__post_extractor'] = self.extract_comments(video_id)
info = traverse_obj(attributes, {
'title': ('title', {str.strip}),
'description': ('content', {clean_html}),
'thumbnail': ('image', ('large_url', 'url'), {url_or_none}, any),
'timestamp': ('published_at', {parse_iso8601}),
'like_count': ('like_count', {int_or_none}),
'comment_count': ('comment_count', {int_or_none}),
})
for i in post.get('included', []):
i_type = i.get('type')
if i_type == 'media':
media_attributes = i.get('attributes') or {}
download_url = media_attributes.get('download_url')
entries = []
idx = 0
for include in traverse_obj(post, ('included', lambda _, v: v['type'])):
include_type = include['type']
if include_type == 'media':
media_attributes = traverse_obj(include, ('attributes', {dict})) or {}
download_url = url_or_none(media_attributes.get('download_url'))
ext = mimetype2ext(media_attributes.get('mimetype'))
# if size_bytes is None, this media file is likely unavailable
# See: https://github.com/yt-dlp/yt-dlp/issues/4608
size_bytes = int_or_none(media_attributes.get('size_bytes'))
if download_url and ext in KNOWN_EXTENSIONS and size_bytes is not None:
# XXX: what happens if there are multiple attachments?
return {
**info,
idx += 1
entries.append({
'id': f'{video_id}-{idx}',
'ext': ext,
'filesize': size_bytes,
'url': download_url,
}
elif i_type == 'user':
user_attributes = i.get('attributes')
if user_attributes:
info.update({
'uploader': user_attributes.get('full_name'),
'uploader_id': str_or_none(i.get('id')),
'uploader_url': user_attributes.get('url'),
})
elif i_type == 'post_tag':
info.setdefault('tags', []).append(traverse_obj(i, ('attributes', 'value')))
elif include_type == 'user':
info.update(traverse_obj(include, {
'uploader': ('attributes', 'full_name', {str}),
'uploader_id': ('id', {str_or_none}),
'uploader_url': ('attributes', 'url', {url_or_none}),
}))
elif i_type == 'campaign':
info.update({
'channel': traverse_obj(i, ('attributes', 'title')),
'channel_id': str_or_none(i.get('id')),
'channel_url': traverse_obj(i, ('attributes', 'url')),
'channel_follower_count': int_or_none(traverse_obj(i, ('attributes', 'patron_count'))),
})
elif include_type == 'post_tag':
if post_tag := traverse_obj(include, ('attributes', 'value', {str})):
info.setdefault('tags', []).append(post_tag)
elif include_type == 'campaign':
info.update(traverse_obj(include, {
'channel': ('attributes', 'title', {str}),
'channel_id': ('id', {str_or_none}),
'channel_url': ('attributes', 'url', {url_or_none}),
'channel_follower_count': ('attributes', 'patron_count', {int_or_none}),
}))
# handle Vimeo embeds
if traverse_obj(attributes, ('embed', 'provider')) == 'Vimeo':
@ -296,36 +314,50 @@ class PatreonIE(PatreonBaseIE):
v_url, video_id, 'Checking Vimeo embed URL',
headers={'Referer': 'https://patreon.com/'},
fatal=False, errnote=False):
return self.url_result(
entries.append(self.url_result(
VimeoIE._smuggle_referrer(v_url, 'https://patreon.com/'),
VimeoIE, url_transparent=True, **info)
VimeoIE, url_transparent=True))
embed_url = traverse_obj(attributes, ('embed', 'url', {url_or_none}))
if embed_url and self._request_webpage(embed_url, video_id, 'Checking embed URL', fatal=False, errnote=False):
return self.url_result(embed_url, **info)
entries.append(self.url_result(embed_url))
post_file = traverse_obj(attributes, 'post_file')
post_file = traverse_obj(attributes, ('post_file', {dict}))
if post_file:
name = post_file.get('name')
ext = determine_ext(name)
if ext in KNOWN_EXTENSIONS:
return {
**info,
entries.append({
'id': video_id,
'ext': ext,
'url': post_file['url'],
}
})
elif name == 'video' or determine_ext(post_file.get('url')) == 'm3u8':
formats, subtitles = self._extract_m3u8_formats_and_subtitles(post_file['url'], video_id)
return {
**info,
entries.append({
'id': video_id,
'formats': formats,
'subtitles': subtitles,
}
})
if can_view_post is False:
can_view_post = traverse_obj(attributes, 'current_user_can_view')
comments = None
if can_view_post and info.get('comment_count'):
comments = self.extract_comments(video_id)
if not entries and can_view_post is False:
self.raise_no_formats('You do not have access to this post', video_id=video_id, expected=True)
else:
elif not entries:
self.raise_no_formats('No supported media found in this post', video_id=video_id, expected=True)
elif len(entries) == 1:
info.update(entries[0])
else:
for entry in entries:
entry.update(info)
return self.playlist_result(entries, video_id, **info, __post_extractor=comments)
info['id'] = video_id
info['__post_extractor'] = comments
return info
def _get_comments(self, post_id):

View File

@ -132,6 +132,16 @@ class CurlCFFIRH(ImpersonateRequestHandler, InstanceStoreMixin):
extensions.pop('cookiejar', None)
extensions.pop('timeout', None)
def send(self, request: Request) -> Response:
target = self._get_request_target(request)
try:
response = super().send(request)
except HTTPError as e:
e.response.extensions['impersonate'] = target
raise
response.extensions['impersonate'] = target
return response
def _send(self, request: Request):
max_redirects_exceeded = False
session: curl_cffi.requests.Session = self._get_instance(

View File

@ -497,6 +497,7 @@ class Response(io.IOBase):
@param headers: response headers.
@param status: Response HTTP status code. Default is 200 OK.
@param reason: HTTP status reason. Will use built-in reasons based on status code if not provided.
@param extensions: Dictionary of handler-specific response extensions.
"""
def __init__(
@ -505,7 +506,9 @@ class Response(io.IOBase):
url: str,
headers: Mapping[str, str],
status: int = 200,
reason: str = None):
reason: str = None,
extensions: dict = None
):
self.fp = fp
self.headers = Message()
@ -517,6 +520,7 @@ class Response(io.IOBase):
self.reason = reason or HTTPStatus(status).phrase
except ValueError:
self.reason = None
self.extensions = extensions or {}
def readable(self):
return self.fp.readable()