mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-30 12:01:28 +00:00
fix: do not use classmethod; fix title in the base extractor
This commit is contained in:
parent
c58ee488a9
commit
bd857a06a0
|
@ -69,32 +69,30 @@ def _get_element_by_class_and_tag(cls, class_, tag, html):
|
||||||
def _urljoin(cls, path):
|
def _urljoin(cls, path):
|
||||||
return url_or_none(urljoin(base=cls._BASE, path=path))
|
return url_or_none(urljoin(base=cls._BASE, path=path))
|
||||||
|
|
||||||
@classmethod
|
def _get_playurl(self, item_id, item_type):
|
||||||
def _get_playurl(cls, item_id, item_type):
|
resp = self._download_json(
|
||||||
resp = cls._download_json(
|
|
||||||
'https://www.boomplay.com/getResourceAddr', item_id,
|
'https://www.boomplay.com/getResourceAddr', item_id,
|
||||||
note='Downloading play URL', errnote='Failed to download play URL',
|
note='Downloading play URL', errnote='Failed to download play URL',
|
||||||
data=urlencode_postdata({
|
data=urlencode_postdata({
|
||||||
'param': base64.b64encode(aes_cbc_encrypt_bytes(json.dumps({
|
'param': base64.b64encode(aes_cbc_encrypt_bytes(json.dumps({
|
||||||
'itemID': item_id,
|
'itemID': item_id,
|
||||||
'itemType': item_type,
|
'itemType': item_type,
|
||||||
}).encode(), cls._KEY, cls._IV)).decode(),
|
}).encode(), self._KEY, self._IV)).decode(),
|
||||||
}), headers={
|
}), headers={
|
||||||
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
|
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
|
||||||
})
|
})
|
||||||
if not (source := resp.get('source')) and (code := resp.get('code')):
|
if not (source := resp.get('source')) and (code := resp.get('code')):
|
||||||
if 'unavailable in your country' in (desc := resp.get('desc')) or '':
|
if 'unavailable in your country' in (desc := resp.get('desc')) or '':
|
||||||
# since NG must have failed ...
|
# since NG must have failed ...
|
||||||
cls.raise_geo_restricted(countries=['GH', 'KE', 'TZ', 'CM', 'CI'])
|
self.raise_geo_restricted(countries=['GH', 'KE', 'TZ', 'CM', 'CI'])
|
||||||
else:
|
else:
|
||||||
raise ExtractorError(desc or f'Failed to get play url, code: {code}')
|
raise ExtractorError(desc or f'Failed to get play url, code: {code}')
|
||||||
return unpad_pkcs7(aes_cbc_decrypt_bytes(
|
return unpad_pkcs7(aes_cbc_decrypt_bytes(
|
||||||
base64.b64decode(source),
|
base64.b64decode(source),
|
||||||
cls._KEY, cls._IV)).decode()
|
self._KEY, self._IV)).decode()
|
||||||
|
|
||||||
@classmethod
|
def _extract_formats(self, item_id, item_type='MUSIC', **kwargs):
|
||||||
def _extract_formats(cls, item_id, item_type='MUSIC', **kwargs):
|
if url := url_or_none(self._get_playurl(item_id, item_type)):
|
||||||
if url := url_or_none(cls._get_playurl(item_id, item_type)):
|
|
||||||
return [{
|
return [{
|
||||||
'format_id': '0',
|
'format_id': '0',
|
||||||
'url': url,
|
'url': url,
|
||||||
|
@ -106,30 +104,29 @@ def _extract_formats(cls, item_id, item_type='MUSIC', **kwargs):
|
||||||
**kwargs,
|
**kwargs,
|
||||||
}]
|
}]
|
||||||
else:
|
else:
|
||||||
cls.raise_no_formats('No formats found')
|
self.raise_no_formats('No formats found')
|
||||||
|
|
||||||
@classmethod
|
def _extract_page_metadata(self, webpage, item_id):
|
||||||
def _extract_page_metadata(cls, webpage, item_id):
|
metadata_div = self._get_element_by_class_and_tag('summary', 'div', webpage) or ''
|
||||||
metadata_div = cls._get_element_by_class_and_tag('summary', 'div', webpage) or ''
|
|
||||||
metadata_entries = re.findall(r'(?si)<strong>(?P<entry>.*?)</strong>', metadata_div) or []
|
metadata_entries = re.findall(r'(?si)<strong>(?P<entry>.*?)</strong>', metadata_div) or []
|
||||||
description = re.sub(
|
description = re.sub(
|
||||||
r'(?i)Listen and download music for free on Boomplay!', '',
|
r'(?i)Listen and download music for free on Boomplay!', '',
|
||||||
clean_html(cls._get_element_by_class_and_tag(
|
clean_html(self._get_element_by_class_and_tag(
|
||||||
'description_content', 'span', webpage)) or '') or None
|
'description_content', 'span', webpage)) or '') or None
|
||||||
|
|
||||||
details_section = cls._get_element_by_class_and_tag('songDetailInfo', 'section', webpage) or ''
|
details_section = self._get_element_by_class_and_tag('songDetailInfo', 'section', webpage) or ''
|
||||||
metadata_entries.extend(re.findall(r'(?si)<li>(?P<entry>.*?)</li>', details_section) or [])
|
metadata_entries.extend(re.findall(r'(?si)<li>(?P<entry>.*?)</li>', details_section) or [])
|
||||||
page_metadata = {
|
page_metadata = {
|
||||||
'id': item_id,
|
'id': item_id,
|
||||||
'title': cls._html_search_regex(r'(?i)<h1[^>]*>([^<]+)</h1>', webpage, 'title', default=None),
|
**self._extract_title_from_webpage(webpage),
|
||||||
'thumbnail': cls._html_search_meta(['og:image', 'twitter:image'],
|
'thumbnail': self._html_search_meta(['og:image', 'twitter:image'],
|
||||||
webpage, 'thumbnail', default=''),
|
webpage, 'thumbnail', default=None),
|
||||||
'like_count': parse_count(cls._get_element_by_class_and_tag('btn_favorite', 'button', metadata_div)),
|
'like_count': parse_count(self._get_element_by_class_and_tag('btn_favorite', 'button', metadata_div)),
|
||||||
'repost_count': parse_count(cls._get_element_by_class_and_tag('btn_share', 'button', metadata_div)),
|
'repost_count': parse_count(self._get_element_by_class_and_tag('btn_share', 'button', metadata_div)),
|
||||||
'comment_count': parse_count(cls._get_element_by_class_and_tag('btn_comment', 'button', metadata_div)),
|
'comment_count': parse_count(self._get_element_by_class_and_tag('btn_comment', 'button', metadata_div)),
|
||||||
'duration': parse_duration(cls._get_element_by_class_and_tag('btn_duration', 'button', metadata_div)),
|
'duration': parse_duration(self._get_element_by_class_and_tag('btn_duration', 'button', metadata_div)),
|
||||||
'upload_date': unified_strdate(strip_or_none(
|
'upload_date': unified_strdate(strip_or_none(
|
||||||
cls._get_element_by_class_and_tag('btn_pubDate', 'button', metadata_div))),
|
self._get_element_by_class_and_tag('btn_pubDate', 'button', metadata_div))),
|
||||||
'description': description,
|
'description': description,
|
||||||
}
|
}
|
||||||
for metadata_entry in metadata_entries:
|
for metadata_entry in metadata_entries:
|
||||||
|
@ -147,6 +144,40 @@ def _extract_page_metadata(cls, webpage, item_id):
|
||||||
page_metadata['release_year'] = int_or_none(v)
|
page_metadata['release_year'] = int_or_none(v)
|
||||||
return page_metadata
|
return page_metadata
|
||||||
|
|
||||||
|
def _extract_title_from_webpage(self, webpage):
|
||||||
|
if h1_title := self._html_search_regex(r'(?i)<h1[^>]*>([^<]+)</h1>', webpage, 'title', default=None):
|
||||||
|
return {'title': h1_title}
|
||||||
|
else:
|
||||||
|
return self._fix_title(
|
||||||
|
self._html_search_meta(['og:title', 'twitter:title'], webpage, 'title', default=None)
|
||||||
|
or self._html_search_regex(r'(?i)<title[^>]*>([^<]+)</title>', webpage, 'title', default=None))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _fix_title(title):
|
||||||
|
"""
|
||||||
|
fix various types of titles(og:title, twitter:title, title tag in html head):
|
||||||
|
"""
|
||||||
|
if not title:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
title_patterns = (
|
||||||
|
r'^(?P<title>(?P<artist>.+)) Songs MP3 Download, New Songs \& Albums \| Boomplay$', # artists
|
||||||
|
r'^(?P<artist>.+?) - (?P<title>.+) MP3\ Download \& Lyrics \| Boomplay$', # music
|
||||||
|
r'^Download (?P<artist>.+) album songs: (?P<title>.+?) \| Boomplay Music$', # album
|
||||||
|
r'^Search:(?P<title>.+) \| Boomplay Music$', # search url
|
||||||
|
r'^(?P<title>.+) \| Podcast \| Boomplay$', # podcast, episode
|
||||||
|
r'^(?P<title>.+) \| Boomplay(?: Music)?$', # video, playlist, generic playlists
|
||||||
|
)
|
||||||
|
|
||||||
|
for pattern in title_patterns:
|
||||||
|
if match := re.search(pattern, title):
|
||||||
|
return {
|
||||||
|
'title': match.group('title'),
|
||||||
|
'artists': [match.group('artist')] if 'artist' in match.groupdict() else None,
|
||||||
|
}
|
||||||
|
|
||||||
|
return {'title': title}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _extract_from_webpage(cls, url, webpage, **kwargs):
|
def _extract_from_webpage(cls, url, webpage, **kwargs):
|
||||||
if kwargs:
|
if kwargs:
|
||||||
|
@ -166,7 +197,7 @@ def _extract_embed_urls(cls, url, webpage):
|
||||||
<a
|
<a
|
||||||
(?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
|
(?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
|
||||||
(?<=\s)href\s*=\s*(?P<_q>['"])
|
(?<=\s)href\s*=\s*(?P<_q>['"])
|
||||||
(?!javascript:)(?P<href>/(?:{media_types})/\d+/?[\-\w=?&#:;@]*)
|
(?P<href>/(?:{media_types})/\d+/?[\-\w=?&#:;@]*)
|
||||||
(?P=_q)
|
(?P=_q)
|
||||||
(?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
|
(?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
|
||||||
>''', webpage):
|
>''', webpage):
|
||||||
|
@ -290,7 +321,6 @@ def _real_extract(self, url):
|
||||||
webpage = self._download_webpage(url, ep_id)
|
webpage = self._download_webpage(url, ep_id)
|
||||||
return merge_dicts(
|
return merge_dicts(
|
||||||
self._extract_page_metadata(webpage, ep_id), {
|
self._extract_page_metadata(webpage, ep_id), {
|
||||||
'title': self._og_search_title(webpage, default='').rsplit('|', 2)[0].strip() or None,
|
|
||||||
'description': self._html_search_meta(
|
'description': self._html_search_meta(
|
||||||
['description', 'og:description', 'twitter:description'], webpage),
|
['description', 'og:description', 'twitter:description'], webpage),
|
||||||
'formats': self._extract_formats(ep_id, 'EPISODE', vcodec='none'),
|
'formats': self._extract_formats(ep_id, 'EPISODE', vcodec='none'),
|
||||||
|
|
Loading…
Reference in a new issue