Merge branch 'yt-dlp:master' into master

This commit is contained in:
Eric Lam 2024-04-14 04:24:44 +08:00 committed by GitHub
commit 634a0ac756
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 120 additions and 55 deletions

View file

@ -1837,6 +1837,9 @@ #### nflplusreplay
#### jiosaavn
* `bitrate`: Audio bitrates to request. One or more of `16`, `32`, `64`, `128`, `320`. Default is `128,320`
#### afreecatvlive
* `cdn`: One or more CDN IDs to use with the API call for stream URLs, e.g. `gcp_cdn`, `gs_cdn_pc_app`, `gs_cdn_mobile_web`, `gs_cdn_pc_web`
**Note**: These options may be changed/removed in the future without concern for backward compatibility
<!-- MANPAGE: MOVE "INSTALLATION" SECTION HERE -->

View file

@ -8,9 +8,11 @@
determine_ext,
filter_dict,
int_or_none,
orderedSet,
unified_timestamp,
url_or_none,
urlencode_postdata,
urljoin,
)
from ..utils.traversal import traverse_obj
@ -276,6 +278,47 @@ class AfreecaTVLiveIE(AfreecaTVBaseIE):
}]
_LIVE_API_URL = 'https://live.afreecatv.com/afreeca/player_live_api.php'
_WORKING_CDNS = [
'gcp_cdn', # live-global-cdn-v02.afreecatv.com
'gs_cdn_pc_app', # pc-app.stream.afreecatv.com
'gs_cdn_mobile_web', # mobile-web.stream.afreecatv.com
'gs_cdn_pc_web', # pc-web.stream.afreecatv.com
]
_BAD_CDNS = [
'gs_cdn', # chromecast.afreeca.gscdn.com (cannot resolve)
'gs_cdn_chromecast', # chromecast.stream.afreecatv.com (HTTP Error 400)
'azure_cdn', # live-global-cdn-v01.afreecatv.com (cannot resolve)
'aws_cf', # live-global-cdn-v03.afreecatv.com (cannot resolve)
'kt_cdn', # kt.stream.afreecatv.com (HTTP Error 400)
]
def _extract_formats(self, channel_info, broadcast_no, aid):
stream_base_url = channel_info.get('RMD') or 'https://livestream-manager.afreecatv.com'
# If user has not passed CDN IDs, try API-provided CDN ID followed by other working CDN IDs
default_cdn_ids = orderedSet([
*traverse_obj(channel_info, ('CDN', {str}, all, lambda _, v: v not in self._BAD_CDNS)),
*self._WORKING_CDNS,
])
cdn_ids = self._configuration_arg('cdn', default_cdn_ids)
for attempt, cdn_id in enumerate(cdn_ids, start=1):
m3u8_url = traverse_obj(self._download_json(
urljoin(stream_base_url, 'broad_stream_assign.html'), broadcast_no,
f'Downloading {cdn_id} stream info', f'Unable to download {cdn_id} stream info',
fatal=False, query={
'return_type': cdn_id,
'broad_key': f'{broadcast_no}-common-master-hls',
}), ('view_url', {url_or_none}))
try:
return self._extract_m3u8_formats(
m3u8_url, broadcast_no, 'mp4', m3u8_id='hls', query={'aid': aid},
headers={'Referer': 'https://play.afreecatv.com/'})
except ExtractorError as e:
if attempt == len(cdn_ids):
raise
self.report_warning(
f'{e.cause or e.msg}. Retrying... (attempt {attempt} of {len(cdn_ids)})')
def _real_extract(self, url):
broadcaster_id, broadcast_no = self._match_valid_url(url).group('id', 'bno')
@ -294,7 +337,7 @@ def _real_extract(self, url):
'This livestream is protected by a password, use the --video-password option',
expected=True)
aid = self._download_json(
token_info = traverse_obj(self._download_json(
self._LIVE_API_URL, broadcast_no, 'Downloading access token for stream',
'Unable to download access token for stream', data=urlencode_postdata(filter_dict({
'bno': broadcast_no,
@ -302,18 +345,17 @@ def _real_extract(self, url):
'type': 'aid',
'quality': 'master',
'pwd': password,
})))['CHANNEL']['AID']
}))), ('CHANNEL', {dict})) or {}
aid = token_info.get('AID')
if not aid:
result = token_info.get('RESULT')
if result == 0:
raise ExtractorError('This livestream has ended', expected=True)
elif result == -6:
self.raise_login_required('This livestream is for subscribers only', method='password')
raise ExtractorError('Unable to extract access token')
stream_base_url = channel_info.get('RMD') or 'https://livestream-manager.afreecatv.com'
stream_info = self._download_json(f'{stream_base_url}/broad_stream_assign.html', broadcast_no, query={
# works: gs_cdn_pc_app, gs_cdn_mobile_web, gs_cdn_pc_web
'return_type': 'gs_cdn_pc_app',
'broad_key': f'{broadcast_no}-common-master-hls',
}, note='Downloading metadata for stream', errnote='Unable to download metadata for stream')
formats = self._extract_m3u8_formats(
stream_info['view_url'], broadcast_no, 'mp4', m3u8_id='hls',
query={'aid': aid}, headers={'Referer': url})
formats = self._extract_formats(channel_info, broadcast_no, aid)
station_info = traverse_obj(self._download_json(
'https://st.afreecatv.com/api/get_station_status.php', broadcast_no,

View file

@ -1,10 +1,12 @@
import functools
import math
import re
from .common import InfoExtractor
from ..utils import (
format_field,
InAdvancePagedList,
clean_html,
int_or_none,
js_to_json,
make_archive_id,
smuggle_url,
unsmuggle_url,
@ -16,6 +18,7 @@
class JioSaavnBaseIE(InfoExtractor):
_API_URL = 'https://www.jiosaavn.com/api.php'
_VALID_BITRATES = {'16', '32', '64', '128', '320'}
@functools.cached_property
@ -30,7 +33,7 @@ def requested_bitrates(self):
def _extract_formats(self, song_data):
for bitrate in self.requested_bitrates:
media_data = self._download_json(
'https://www.jiosaavn.com/api.php', song_data['id'],
self._API_URL, song_data['id'],
f'Downloading format info for {bitrate}',
fatal=False, data=urlencode_postdata({
'__call': 'song.generateAuthToken',
@ -50,31 +53,45 @@ def _extract_formats(self, song_data):
'vcodec': 'none',
}
def _extract_song(self, song_data):
def _extract_song(self, song_data, url=None):
info = traverse_obj(song_data, {
'id': ('id', {str}),
'title': ('title', 'text', {str}),
'album': ('album', 'text', {str}),
'thumbnail': ('image', 0, {url_or_none}),
'title': ('song', {clean_html}),
'album': ('album', {clean_html}),
'thumbnail': ('image', {url_or_none}, {lambda x: re.sub(r'-\d+x\d+\.', '-500x500.', x)}),
'duration': ('duration', {int_or_none}),
'view_count': ('play_count', {int_or_none}),
'release_year': ('year', {int_or_none}),
'artists': ('artists', lambda _, v: v['role'] == 'singer', 'name', {str}),
'webpage_url': ('perma_url', {url_or_none}), # for song, playlist extraction
'artists': ('primary_artists', {lambda x: x.split(', ') if x else None}),
'webpage_url': ('perma_url', {url_or_none}),
})
if not info.get('webpage_url'): # for album extraction / fallback
info['webpage_url'] = format_field(
song_data, [('title', 'action')], 'https://www.jiosaavn.com%s') or None
if webpage_url := info['webpage_url']:
info['_old_archive_ids'] = [make_archive_id(JioSaavnSongIE, url_basename(webpage_url))]
if webpage_url := info.get('webpage_url') or url:
info['display_id'] = url_basename(webpage_url)
info['_old_archive_ids'] = [make_archive_id(JioSaavnSongIE, info['display_id'])]
return info
def _extract_initial_data(self, url, display_id):
webpage = self._download_webpage(url, display_id)
return self._search_json(
r'window\.__INITIAL_DATA__\s*=', webpage,
'initial data', display_id, transform_source=js_to_json)
def _call_api(self, type_, token, note='API', params={}):
return self._download_json(
self._API_URL, token, f'Downloading {note} JSON', f'Unable to download {note} JSON',
query={
'__call': 'webapi.get',
'_format': 'json',
'_marker': '0',
'ctx': 'web6dot0',
'token': token,
'type': type_,
**params,
})
def _yield_songs(self, playlist_data):
for song_data in traverse_obj(playlist_data, ('songs', lambda _, v: v['id'] and v['perma_url'])):
song_info = self._extract_song(song_data)
url = smuggle_url(song_info['webpage_url'], {
'id': song_data['id'],
'encrypted_media_url': song_data['encrypted_media_url'],
})
yield self.url_result(url, JioSaavnSongIE, url_transparent=True, **song_info)
class JioSaavnSongIE(JioSaavnBaseIE):
@ -85,10 +102,11 @@ class JioSaavnSongIE(JioSaavnBaseIE):
'md5': '3b84396d15ed9e083c3106f1fa589c04',
'info_dict': {
'id': 'IcoLuefJ',
'display_id': 'OQsEfQFVUXk',
'ext': 'm4a',
'title': 'Leja Re',
'album': 'Leja Re',
'thumbnail': 'https://c.saavncdn.com/258/Leja-Re-Hindi-2018-20181124024539-500x500.jpg',
'thumbnail': r're:https?://c.saavncdn.com/258/Leja-Re-Hindi-2018-20181124024539-500x500.jpg',
'duration': 205,
'view_count': int,
'release_year': 2018,
@ -111,8 +129,8 @@ def _real_extract(self, url):
result = {'id': song_data['id']}
else:
# only extract metadata if this is not a url_transparent result
song_data = self._extract_initial_data(url, self._match_id(url))['song']['song']
result = self._extract_song(song_data)
song_data = self._call_api('song', self._match_id(url))['songs'][0]
result = self._extract_song(song_data, url)
result['formats'] = list(self._extract_formats(song_data))
return result
@ -130,19 +148,12 @@ class JioSaavnAlbumIE(JioSaavnBaseIE):
'playlist_count': 10,
}]
def _entries(self, playlist_data):
for song_data in traverse_obj(playlist_data, (
'modules', lambda _, x: x['key'] == 'list', 'data', lambda _, v: v['title']['action'])):
song_info = self._extract_song(song_data)
# album song data is missing artists and release_year, need to re-extract metadata
yield self.url_result(song_info['webpage_url'], JioSaavnSongIE, **song_info)
def _real_extract(self, url):
display_id = self._match_id(url)
album_data = self._extract_initial_data(url, display_id)['albumView']
album_data = self._call_api('album', display_id)
return self.playlist_result(
self._entries(album_data), display_id, traverse_obj(album_data, ('album', 'title', 'text', {str})))
self._yield_songs(album_data), display_id, traverse_obj(album_data, ('title', {str})))
class JioSaavnPlaylistIE(JioSaavnBaseIE):
@ -154,21 +165,30 @@ class JioSaavnPlaylistIE(JioSaavnBaseIE):
'id': 'LlJ8ZWT1ibN5084vKHRj2Q__',
'title': 'Mood English',
},
'playlist_mincount': 50,
'playlist_mincount': 301,
}, {
'url': 'https://www.jiosaavn.com/s/playlist/2279fbe391defa793ad7076929a2f5c9/mood-hindi/DVR,pFUOwyXqIp77B1JF,A__',
'info_dict': {
'id': 'DVR,pFUOwyXqIp77B1JF,A__',
'title': 'Mood Hindi',
},
'playlist_mincount': 801,
}]
_PAGE_SIZE = 50
def _entries(self, playlist_data):
for song_data in traverse_obj(playlist_data, ('list', lambda _, v: v['perma_url'])):
song_info = self._extract_song(song_data)
url = smuggle_url(song_info['webpage_url'], {
'id': song_data['id'],
'encrypted_media_url': song_data['encrypted_media_url'],
})
yield self.url_result(url, JioSaavnSongIE, url_transparent=True, **song_info)
def _fetch_page(self, token, page):
return self._call_api(
'playlist', token, f'playlist page {page}', {'p': page, 'n': self._PAGE_SIZE})
def _entries(self, token, first_page_data, page):
page_data = first_page_data if not page else self._fetch_page(token, page + 1)
yield from self._yield_songs(page_data)
def _real_extract(self, url):
display_id = self._match_id(url)
playlist_data = self._extract_initial_data(url, display_id)['playlist']['playlist']
playlist_data = self._fetch_page(display_id, 1)
total_pages = math.ceil(int(playlist_data['list_count']) / self._PAGE_SIZE)
return self.playlist_result(
self._entries(playlist_data), display_id, traverse_obj(playlist_data, ('title', 'text', {str})))
return self.playlist_result(InAdvancePagedList(
functools.partial(self._entries, display_id, playlist_data),
total_pages, self._PAGE_SIZE), display_id, traverse_obj(playlist_data, ('listname', {str})))