mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-15 21:03:18 +00:00
[RaiplayRadio] Add extractors (#780)
Original PR: https://github.com/ytdl-org/youtube-dl/pull/21837 Authored by: frafra
This commit is contained in:
parent
aeb2a9ad27
commit
73673ccff3
|
@ -1156,6 +1156,8 @@
|
||||||
RaiPlayLiveIE,
|
RaiPlayLiveIE,
|
||||||
RaiPlayPlaylistIE,
|
RaiPlayPlaylistIE,
|
||||||
RaiIE,
|
RaiIE,
|
||||||
|
RaiPlayRadioIE,
|
||||||
|
RaiPlayRadioPlaylistIE,
|
||||||
)
|
)
|
||||||
from .raywenderlich import (
|
from .raywenderlich import (
|
||||||
RayWenderlichIE,
|
RayWenderlichIE,
|
||||||
|
|
|
@ -14,12 +14,15 @@
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
fix_xml_ampersands,
|
fix_xml_ampersands,
|
||||||
GeoRestrictedError,
|
GeoRestrictedError,
|
||||||
|
get_element_by_class,
|
||||||
HEADRequest,
|
HEADRequest,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
parse_list,
|
||||||
remove_start,
|
remove_start,
|
||||||
strip_or_none,
|
strip_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
|
unescapeHTML,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
|
@ -585,3 +588,84 @@ def _real_extract(self, url):
|
||||||
info.update(relinker_info)
|
info.update(relinker_info)
|
||||||
|
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
class RaiPlayRadioBaseIE(InfoExtractor):
|
||||||
|
_BASE = 'https://www.raiplayradio.it'
|
||||||
|
|
||||||
|
def get_playlist_iter(self, url, uid):
|
||||||
|
webpage = self._download_webpage(url, uid)
|
||||||
|
for attrs in parse_list(webpage):
|
||||||
|
title = attrs['data-title'].strip()
|
||||||
|
audio_url = urljoin(url, attrs['data-mediapolis'])
|
||||||
|
entry = {
|
||||||
|
'url': audio_url,
|
||||||
|
'id': attrs['data-uniquename'].lstrip('ContentItem-'),
|
||||||
|
'title': title,
|
||||||
|
'ext': 'mp3',
|
||||||
|
'language': 'it',
|
||||||
|
}
|
||||||
|
if 'data-image' in attrs:
|
||||||
|
entry['thumbnail'] = urljoin(url, attrs['data-image'])
|
||||||
|
yield entry
|
||||||
|
|
||||||
|
|
||||||
|
class RaiPlayRadioIE(RaiPlayRadioBaseIE):
|
||||||
|
_VALID_URL = r'%s/audio/.+?-(?P<id>%s)\.html' % (
|
||||||
|
RaiPlayRadioBaseIE._BASE, RaiBaseIE._UUID_RE)
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://www.raiplayradio.it/audio/2019/07/RADIO3---LEZIONI-DI-MUSICA-36b099ff-4123-4443-9bf9-38e43ef5e025.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '36b099ff-4123-4443-9bf9-38e43ef5e025',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'Dal "Chiaro di luna" al "Clair de lune", prima parte con Giovanni Bietti',
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'language': 'it',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
audio_id = self._match_id(url)
|
||||||
|
list_url = url.replace('.html', '-list.html')
|
||||||
|
return next(entry for entry in self.get_playlist_iter(list_url, audio_id) if entry['id'] == audio_id)
|
||||||
|
|
||||||
|
|
||||||
|
class RaiPlayRadioPlaylistIE(RaiPlayRadioBaseIE):
|
||||||
|
_VALID_URL = r'%s/playlist/.+?-(?P<id>%s)\.html' % (
|
||||||
|
RaiPlayRadioBaseIE._BASE, RaiBaseIE._UUID_RE)
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://www.raiplayradio.it/playlist/2017/12/Alice-nel-paese-delle-meraviglie-72371d3c-d998-49f3-8860-d168cfdf4966.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '72371d3c-d998-49f3-8860-d168cfdf4966',
|
||||||
|
'title': "Alice nel paese delle meraviglie",
|
||||||
|
'description': "di Lewis Carrol letto da Aldo Busi",
|
||||||
|
},
|
||||||
|
'playlist_count': 11,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
playlist_id = self._match_id(url)
|
||||||
|
playlist_webpage = self._download_webpage(url, playlist_id)
|
||||||
|
playlist_title = unescapeHTML(self._html_search_regex(
|
||||||
|
r'data-playlist-title="(.+?)"', playlist_webpage, 'title'))
|
||||||
|
playlist_creator = self._html_search_meta(
|
||||||
|
'nomeProgramma', playlist_webpage)
|
||||||
|
playlist_description = get_element_by_class(
|
||||||
|
'textDescriptionProgramma', playlist_webpage)
|
||||||
|
|
||||||
|
player_href = self._html_search_regex(
|
||||||
|
r'data-player-href="(.+?)"', playlist_webpage, 'href')
|
||||||
|
list_url = urljoin(url, player_href)
|
||||||
|
|
||||||
|
entries = list(self.get_playlist_iter(list_url, playlist_id))
|
||||||
|
for index, entry in enumerate(entries, start=1):
|
||||||
|
entry.update({
|
||||||
|
'track': entry['title'],
|
||||||
|
'track_number': index,
|
||||||
|
'artist': playlist_creator,
|
||||||
|
'album': playlist_title
|
||||||
|
})
|
||||||
|
|
||||||
|
return self.playlist_result(
|
||||||
|
entries, playlist_id, playlist_title, playlist_description,
|
||||||
|
creator=playlist_creator)
|
||||||
|
|
|
@ -2006,6 +2006,23 @@ def handle_starttag(self, tag, attrs):
|
||||||
self.attrs = dict(attrs)
|
self.attrs = dict(attrs)
|
||||||
|
|
||||||
|
|
||||||
|
class HTMLListAttrsParser(compat_HTMLParser):
|
||||||
|
"""HTML parser to gather the attributes for the elements of a list"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
compat_HTMLParser.__init__(self)
|
||||||
|
self.items = []
|
||||||
|
self._level = 0
|
||||||
|
|
||||||
|
def handle_starttag(self, tag, attrs):
|
||||||
|
if tag == 'li' and self._level == 0:
|
||||||
|
self.items.append(dict(attrs))
|
||||||
|
self._level += 1
|
||||||
|
|
||||||
|
def handle_endtag(self, tag):
|
||||||
|
self._level -= 1
|
||||||
|
|
||||||
|
|
||||||
def extract_attributes(html_element):
|
def extract_attributes(html_element):
|
||||||
"""Given a string for an HTML element such as
|
"""Given a string for an HTML element such as
|
||||||
<el
|
<el
|
||||||
|
@ -2032,6 +2049,15 @@ def extract_attributes(html_element):
|
||||||
return parser.attrs
|
return parser.attrs
|
||||||
|
|
||||||
|
|
||||||
|
def parse_list(webpage):
|
||||||
|
"""Given a string for an series of HTML <li> elements,
|
||||||
|
return a dictionary of their attributes"""
|
||||||
|
parser = HTMLListAttrsParser()
|
||||||
|
parser.feed(webpage)
|
||||||
|
parser.close()
|
||||||
|
return parser.items
|
||||||
|
|
||||||
|
|
||||||
def clean_html(html):
|
def clean_html(html):
|
||||||
"""Clean an HTML snippet into a readable string"""
|
"""Clean an HTML snippet into a readable string"""
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue