[extractor/lbry] Extract original quality formats (#7257)

Closes #7251
Authored by: bashonly
This commit is contained in:
bashonly 2023-06-08 13:36:09 -05:00 committed by GitHub
parent 8213ce28a4
commit 44c0d66442
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -1,8 +1,8 @@
import functools
import json
import urllib.parse
from .common import InfoExtractor
from ..compat import compat_str, compat_urllib_parse_unquote
from ..utils import (
ExtractorError,
HEADRequest,
@ -12,7 +12,10 @@
int_or_none,
mimetype2ext,
parse_qs,
traverse_obj,
try_get,
url_or_none,
urlhandle_detect_ext,
urljoin,
)
@ -52,38 +55,25 @@ def _permanent_url(self, url, claim_name, claim_id):
'/%s:%s' % (claim_name, claim_id))
def _parse_stream(self, stream, url):
stream_value = stream.get('value') or {}
stream_type = stream_value.get('stream_type')
source = stream_value.get('source') or {}
media = stream_value.get(stream_type) or {}
signing_channel = stream.get('signing_channel') or {}
channel_name = signing_channel.get('name')
channel_claim_id = signing_channel.get('claim_id')
channel_url = None
if channel_name and channel_claim_id:
channel_url = self._permanent_url(url, channel_name, channel_claim_id)
stream_type = traverse_obj(stream, ('value', 'stream_type', {str}))
info = {
'thumbnail': try_get(stream_value, lambda x: x['thumbnail']['url'], compat_str),
'description': stream_value.get('description'),
'license': stream_value.get('license'),
'timestamp': int_or_none(stream.get('timestamp')),
'release_timestamp': int_or_none(stream_value.get('release_time')),
'tags': stream_value.get('tags'),
'duration': int_or_none(media.get('duration')),
'channel': try_get(signing_channel, lambda x: x['value']['title']),
'channel_id': channel_claim_id,
'channel_url': channel_url,
'ext': determine_ext(source.get('name')) or mimetype2ext(source.get('media_type')),
'filesize': int_or_none(source.get('size')),
}
if stream_type == 'audio':
info['vcodec'] = 'none'
else:
info.update({
'width': int_or_none(media.get('width')),
'height': int_or_none(media.get('height')),
info = traverse_obj(stream, {
'title': ('value', 'title', {str}),
'thumbnail': ('value', 'thumbnail', 'url', {url_or_none}),
'description': ('value', 'description', {str}),
'license': ('value', 'license', {str}),
'timestamp': ('timestamp', {int_or_none}),
'release_timestamp': ('value', 'release_time', {int_or_none}),
'tags': ('value', 'tags', ..., {lambda x: x or None}),
'duration': ('value', stream_type, 'duration', {int_or_none}),
'channel': ('signing_channel', 'value', 'title', {str}),
'channel_id': ('signing_channel', 'claim_id', {str}),
})
channel_name = traverse_obj(stream, ('signing_channel', 'name', {str}))
if channel_name and info.get('channel_id'):
info['channel_url'] = self._permanent_url(url, channel_name, info['channel_id'])
return info
@ -186,6 +176,28 @@ class LBRYIE(LBRYBaseIE):
'license': 'None',
},
'params': {'skip_download': True}
}, {
# original quality format w/higher resolution than HLS formats
'url': 'https://odysee.com/@wickedtruths:2/Biotechnological-Invasion-of-Skin-(April-2023):4',
'md5': '305b0b3b369bde1b984961f005b67193',
'info_dict': {
'id': '41fbfe805eb73c8d3012c0c49faa0f563274f634',
'ext': 'mp4',
'title': 'Biotechnological Invasion of Skin (April 2023)',
'description': 'md5:709a2f4c07bd8891cda3a7cc2d6fcf5c',
'channel': 'Wicked Truths',
'channel_id': '23d2bbf856b0ceed5b1d7c5960bcc72da5a20cb0',
'channel_url': 'https://odysee.com/@wickedtruths:23d2bbf856b0ceed5b1d7c5960bcc72da5a20cb0',
'timestamp': 1685790036,
'upload_date': '20230603',
'release_timestamp': 1685617473,
'release_date': '20230601',
'duration': 1063,
'thumbnail': 'https://thumbs.odycdn.com/4e6d39da4df0cfdad45f64e253a15959.webp',
'tags': ['smart skin surveillance', 'biotechnology invasion of skin', 'morgellons'],
'license': 'None',
'protocol': 'https', # test for direct mp4 download
},
}, {
'url': 'https://odysee.com/@BrodieRobertson:5/apple-is-tracking-everything-you-do-on:e',
'only_matching': True,
@ -221,41 +233,64 @@ def _real_extract(self, url):
display_id = display_id.split('/', 2)[-1].replace('/', ':')
else:
display_id = display_id.replace(':', '#')
display_id = compat_urllib_parse_unquote(display_id)
display_id = urllib.parse.unquote(display_id)
uri = 'lbry://' + display_id
result = self._resolve_url(uri, display_id, 'stream')
headers = {'Referer': 'https://odysee.com/'}
if result['value'].get('stream_type') in self._SUPPORTED_STREAM_TYPES:
formats = []
stream_type = traverse_obj(result, ('value', 'stream_type', {str}))
if stream_type in self._SUPPORTED_STREAM_TYPES:
claim_id, is_live = result['claim_id'], False
streaming_url = self._call_api_proxy(
'get', claim_id, {'uri': uri}, 'streaming url')['streaming_url']
# GET request returns original video/audio file if available
ext = urlhandle_detect_ext(self._request_webpage(
streaming_url, display_id, 'Checking for original quality', headers=headers))
if ext != 'm3u8':
formats.append({
'url': streaming_url,
'format_id': 'original',
'quality': 1,
**traverse_obj(result, ('value', {
'ext': ('source', (('name', {determine_ext}), ('media_type', {mimetype2ext}))),
'filesize': ('source', 'size', {int_or_none}),
'width': ('video', 'width', {int_or_none}),
'height': ('video', 'height', {int_or_none}),
}), get_all=False),
'vcodec': 'none' if stream_type == 'audio' else None,
})
# HEAD request returns redirect response to m3u8 URL if available
final_url = self._request_webpage(
HEADRequest(streaming_url), display_id, headers=headers,
note='Downloading streaming redirect url info').geturl()
elif result.get('value_type') == 'stream':
claim_id, is_live = result['signing_channel']['claim_id'], True
live_data = self._download_json(
'https://api.odysee.live/livestream/is_live', claim_id,
query={'channel_claim_id': claim_id},
note='Downloading livestream JSON metadata')['data']
streaming_url = final_url = live_data.get('VideoURL')
final_url = live_data.get('VideoURL')
# Upcoming videos may still give VideoURL
if not live_data.get('Live'):
streaming_url = final_url = None
final_url = None
self.raise_no_formats('This stream is not live', True, claim_id)
else:
raise UnsupportedError(url)
info = self._parse_stream(result, url)
if determine_ext(final_url) == 'm3u8':
info['formats'] = self._extract_m3u8_formats(
final_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', live=is_live, headers=headers)
else:
info['url'] = streaming_url
formats.extend(self._extract_m3u8_formats(
final_url, display_id, 'mp4', m3u8_id='hls', live=is_live, headers=headers))
return {
**info,
**self._parse_stream(result, url),
'id': claim_id,
'title': result['value']['title'],
'formats': formats,
'is_live': is_live,
'http_headers': headers,
}
@ -299,14 +334,12 @@ def _fetch_page(self, claim_id, url, params, page):
if not (stream_claim_name and stream_claim_id):
continue
info = self._parse_stream(item, url)
info.update({
yield {
**self._parse_stream(item, url),
'_type': 'url',
'id': stream_claim_id,
'title': try_get(item, lambda x: x['value']['title']),
'url': self._permanent_url(url, stream_claim_name, stream_claim_id),
})
yield info
}
def _real_extract(self, url):
display_id = self._match_id(url).replace(':', '#')