mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-02 06:27:21 +00:00
[ie] additional partial_application
-related cleanup
Authored by: bashonly
This commit is contained in:
parent
61dcaf74ba
commit
091805bb11
|
@ -145,10 +145,9 @@ def _real_extract(self, url):
|
||||||
tp_metadata = self._download_json(
|
tp_metadata = self._download_json(
|
||||||
update_url_query(tp_url, {'format': 'preview'}), video_id, fatal=False)
|
update_url_query(tp_url, {'format': 'preview'}), video_id, fatal=False)
|
||||||
|
|
||||||
seconds_or_none = float_or_none(scale=1000)
|
|
||||||
chapters = traverse_obj(tp_metadata, ('chapters', ..., {
|
chapters = traverse_obj(tp_metadata, ('chapters', ..., {
|
||||||
'start_time': ('startTime', {seconds_or_none}),
|
'start_time': ('startTime', {float_or_none(scale=1000)}),
|
||||||
'end_time': ('endTime', {seconds_or_none}),
|
'end_time': ('endTime', {float_or_none(scale=1000)}),
|
||||||
}))
|
}))
|
||||||
# prune pointless single chapters that span the entire duration from short videos
|
# prune pointless single chapters that span the entire duration from short videos
|
||||||
if len(chapters) == 1 and not traverse_obj(chapters, (0, 'end_time')):
|
if len(chapters) == 1 and not traverse_obj(chapters, (0, 'end_time')):
|
||||||
|
@ -168,8 +167,8 @@ def _real_extract(self, url):
|
||||||
**merge_dicts(traverse_obj(tp_metadata, {
|
**merge_dicts(traverse_obj(tp_metadata, {
|
||||||
'title': 'title',
|
'title': 'title',
|
||||||
'description': 'description',
|
'description': 'description',
|
||||||
'duration': ('duration', {seconds_or_none}),
|
'duration': ('duration', {float_or_none(scale=1000)}),
|
||||||
'timestamp': ('pubDate', {seconds_or_none}),
|
'timestamp': ('pubDate', {float_or_none(scale=1000)}),
|
||||||
'season_number': (('pl1$seasonNumber', 'nbcu$seasonNumber'), {int_or_none}),
|
'season_number': (('pl1$seasonNumber', 'nbcu$seasonNumber'), {int_or_none}),
|
||||||
'episode_number': (('pl1$episodeNumber', 'nbcu$episodeNumber'), {int_or_none}),
|
'episode_number': (('pl1$episodeNumber', 'nbcu$episodeNumber'), {int_or_none}),
|
||||||
'series': (('pl1$show', 'nbcu$show'), (None, ...), {str}),
|
'series': (('pl1$show', 'nbcu$show'), (None, ...), {str}),
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
import functools
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
UserNotLive,
|
UserNotLive,
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
import functools
|
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
|
@ -36,10 +36,6 @@ class NetEaseMusicBaseIE(InfoExtractor):
|
||||||
_API_BASE = 'http://music.163.com/api/'
|
_API_BASE = 'http://music.163.com/api/'
|
||||||
_GEO_BYPASS = False
|
_GEO_BYPASS = False
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _kilo_or_none(value):
|
|
||||||
return int_or_none(value, scale=1000)
|
|
||||||
|
|
||||||
def _create_eapi_cipher(self, api_path, query_body, cookies):
|
def _create_eapi_cipher(self, api_path, query_body, cookies):
|
||||||
request_text = json.dumps({**query_body, 'header': cookies}, separators=(',', ':'))
|
request_text = json.dumps({**query_body, 'header': cookies}, separators=(',', ':'))
|
||||||
|
|
||||||
|
@ -101,7 +97,7 @@ def _extract_formats(self, info):
|
||||||
'vcodec': 'none',
|
'vcodec': 'none',
|
||||||
**traverse_obj(song, {
|
**traverse_obj(song, {
|
||||||
'ext': ('type', {str}),
|
'ext': ('type', {str}),
|
||||||
'abr': ('br', {self._kilo_or_none}),
|
'abr': ('br', {int_or_none(scale=1000)}),
|
||||||
'filesize': ('size', {int_or_none}),
|
'filesize': ('size', {int_or_none}),
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
|
@ -282,9 +278,9 @@ def _real_extract(self, url):
|
||||||
**lyric_data,
|
**lyric_data,
|
||||||
**traverse_obj(info, {
|
**traverse_obj(info, {
|
||||||
'title': ('name', {str}),
|
'title': ('name', {str}),
|
||||||
'timestamp': ('album', 'publishTime', {self._kilo_or_none}),
|
'timestamp': ('album', 'publishTime', {int_or_none(scale=1000)}),
|
||||||
'thumbnail': ('album', 'picUrl', {url_or_none}),
|
'thumbnail': ('album', 'picUrl', {url_or_none}),
|
||||||
'duration': ('duration', {self._kilo_or_none}),
|
'duration': ('duration', {int_or_none(scale=1000)}),
|
||||||
'album': ('album', 'name', {str}),
|
'album': ('album', 'name', {str}),
|
||||||
'average_rating': ('score', {int_or_none}),
|
'average_rating': ('score', {int_or_none}),
|
||||||
}),
|
}),
|
||||||
|
@ -440,7 +436,7 @@ def _real_extract(self, url):
|
||||||
'tags': ('tags', ..., {str}),
|
'tags': ('tags', ..., {str}),
|
||||||
'uploader': ('creator', 'nickname', {str}),
|
'uploader': ('creator', 'nickname', {str}),
|
||||||
'uploader_id': ('creator', 'userId', {str_or_none}),
|
'uploader_id': ('creator', 'userId', {str_or_none}),
|
||||||
'timestamp': ('updateTime', {self._kilo_or_none}),
|
'timestamp': ('updateTime', {int_or_none(scale=1000)}),
|
||||||
}))
|
}))
|
||||||
if traverse_obj(info, ('playlist', 'specialType')) == 10:
|
if traverse_obj(info, ('playlist', 'specialType')) == 10:
|
||||||
metainfo['title'] = f'{metainfo.get("title")} {strftime_or_none(metainfo.get("timestamp"), "%Y-%m-%d")}'
|
metainfo['title'] = f'{metainfo.get("title")} {strftime_or_none(metainfo.get("timestamp"), "%Y-%m-%d")}'
|
||||||
|
@ -520,7 +516,7 @@ def _real_extract(self, url):
|
||||||
'description': (('desc', 'briefDesc'), {str}, filter),
|
'description': (('desc', 'briefDesc'), {str}, filter),
|
||||||
'upload_date': ('publishTime', {unified_strdate}),
|
'upload_date': ('publishTime', {unified_strdate}),
|
||||||
'thumbnail': ('cover', {url_or_none}),
|
'thumbnail': ('cover', {url_or_none}),
|
||||||
'duration': ('duration', {self._kilo_or_none}),
|
'duration': ('duration', {int_or_none(scale=1000)}),
|
||||||
'view_count': ('playCount', {int_or_none}),
|
'view_count': ('playCount', {int_or_none}),
|
||||||
'like_count': ('likeCount', {int_or_none}),
|
'like_count': ('likeCount', {int_or_none}),
|
||||||
'comment_count': ('commentCount', {int_or_none}),
|
'comment_count': ('commentCount', {int_or_none}),
|
||||||
|
@ -588,7 +584,7 @@ def _real_extract(self, url):
|
||||||
'description': ('description', {str}),
|
'description': ('description', {str}),
|
||||||
'creator': ('dj', 'brand', {str}),
|
'creator': ('dj', 'brand', {str}),
|
||||||
'thumbnail': ('coverUrl', {url_or_none}),
|
'thumbnail': ('coverUrl', {url_or_none}),
|
||||||
'timestamp': ('createTime', {self._kilo_or_none}),
|
'timestamp': ('createTime', {int_or_none(scale=1000)}),
|
||||||
})
|
})
|
||||||
|
|
||||||
if not self._yes_playlist(
|
if not self._yes_playlist(
|
||||||
|
@ -598,7 +594,7 @@ def _real_extract(self, url):
|
||||||
return {
|
return {
|
||||||
'id': str(info['mainSong']['id']),
|
'id': str(info['mainSong']['id']),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'duration': traverse_obj(info, ('mainSong', 'duration', {self._kilo_or_none})),
|
'duration': traverse_obj(info, ('mainSong', 'duration', {int_or_none(scale=1000)})),
|
||||||
**metainfo,
|
**metainfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
import functools
|
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
import string
|
import string
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
import functools
|
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
Loading…
Reference in a new issue