mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-18 06:09:17 +00:00
use search_regex in new IEs
This commit is contained in:
parent
be95cac157
commit
8409501206
|
@ -3347,7 +3347,7 @@ def _real_extract(self, url):
|
||||||
title = clean_html(title)
|
title = clean_html(title)
|
||||||
|
|
||||||
video_description = self._search_regex(r'<meta property="og:description" content="(?P<desc>.*?)"',
|
video_description = self._search_regex(r'<meta property="og:description" content="(?P<desc>.*?)"',
|
||||||
webpage, u'description', flags=re.DOTALL)
|
webpage, u'description', fatal=False, flags=re.DOTALL)
|
||||||
if video_description: video_description = unescapeHTML(video_description)
|
if video_description: video_description = unescapeHTML(video_description)
|
||||||
|
|
||||||
info = {
|
info = {
|
||||||
|
@ -4310,8 +4310,9 @@ def _real_extract(self,url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
mrss_url='http://xhamster.com/movies/%s/.html' % video_id
|
mrss_url = 'http://xhamster.com/movies/%s/.html' % video_id
|
||||||
webpage = self._download_webpage(mrss_url, video_id)
|
webpage = self._download_webpage(mrss_url, video_id)
|
||||||
|
|
||||||
mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
|
mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
raise ExtractorError(u'Unable to extract media URL')
|
raise ExtractorError(u'Unable to extract media URL')
|
||||||
|
@ -4321,32 +4322,26 @@ def _real_extract(self,url):
|
||||||
video_url = mobj.group('server')+'/key='+mobj.group('file')
|
video_url = mobj.group('server')+'/key='+mobj.group('file')
|
||||||
video_extension = video_url.split('.')[-1]
|
video_extension = video_url.split('.')[-1]
|
||||||
|
|
||||||
mobj = re.search(r'<title>(?P<title>.+?) - xHamster\.com</title>', webpage)
|
video_title = self._search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>',
|
||||||
if mobj is None:
|
webpage, u'title')
|
||||||
raise ExtractorError(u'Unable to extract title')
|
video_title = unescapeHTML(video_title)
|
||||||
video_title = unescapeHTML(mobj.group('title'))
|
|
||||||
|
|
||||||
mobj = re.search(r'<span>Description: </span>(?P<description>[^<]+)', webpage)
|
video_description = self._search_regex(r'<span>Description: </span>(?P<description>[^<]+)',
|
||||||
if mobj is None:
|
webpage, u'description', fatal=False)
|
||||||
video_description = u''
|
if video_description: video_description = unescapeHTML(video_description)
|
||||||
else:
|
|
||||||
video_description = unescapeHTML(mobj.group('description'))
|
|
||||||
|
|
||||||
mobj = re.search(r'hint=\'(?P<upload_date_Y>[0-9]{4})-(?P<upload_date_m>[0-9]{2})-(?P<upload_date_d>[0-9]{2}) [0-9]{2}:[0-9]{2}:[0-9]{2} [A-Z]{3,4}\'', webpage)
|
mobj = re.search(r'hint=\'(?P<upload_date_Y>[0-9]{4})-(?P<upload_date_m>[0-9]{2})-(?P<upload_date_d>[0-9]{2}) [0-9]{2}:[0-9]{2}:[0-9]{2} [A-Z]{3,4}\'', webpage)
|
||||||
if mobj is None:
|
if mobj:
|
||||||
raise ExtractorError(u'Unable to extract upload date')
|
video_upload_date = mobj.group('upload_date_Y')+mobj.group('upload_date_m')+mobj.group('upload_date_d')
|
||||||
video_upload_date = mobj.group('upload_date_Y')+mobj.group('upload_date_m')+mobj.group('upload_date_d')
|
|
||||||
|
|
||||||
mobj = re.search(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^>]+)', webpage)
|
|
||||||
if mobj is None:
|
|
||||||
video_uploader_id = u'anonymous'
|
|
||||||
else:
|
else:
|
||||||
video_uploader_id = mobj.group('uploader_id')
|
video_upload_date = None
|
||||||
|
self._downloader.report_warning(u'Unable to extract upload date')
|
||||||
|
|
||||||
mobj = re.search(r'\'image\':\'(?P<thumbnail>[^\']+)\'', webpage)
|
video_uploader_id = self._search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^>]+)',
|
||||||
if mobj is None:
|
webpage, u'uploader id', default=u'anonymous')
|
||||||
raise ExtractorError(u'Unable to extract thumbnail URL')
|
|
||||||
video_thumbnail = mobj.group('thumbnail')
|
video_thumbnail = self._search_regex(r'\'image\':\'(?P<thumbnail>[^\']+)\'',
|
||||||
|
webpage, u'thumbnail', fatal=False)
|
||||||
|
|
||||||
return [{
|
return [{
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
@ -4377,10 +4372,9 @@ def _real_extract(self, url):
|
||||||
cookie = urlh.headers.get('Set-Cookie', '')
|
cookie = urlh.headers.get('Set-Cookie', '')
|
||||||
|
|
||||||
self.report_extraction(track_id)
|
self.report_extraction(track_id)
|
||||||
mobj = re.search(r'<script type="application/json" id="displayList-data">(.*?)</script>', response, flags=re.MULTILINE|re.DOTALL)
|
|
||||||
if mobj is None:
|
html_tracks = self._search_regex(r'<script type="application/json" id="displayList-data">(.*?)</script>',
|
||||||
raise ExtractorError(u'Unable to extrack tracks')
|
response, u'tracks', flags=re.MULTILINE|re.DOTALL).strip()
|
||||||
html_tracks = mobj.group(1).strip()
|
|
||||||
try:
|
try:
|
||||||
track_list = json.loads(html_tracks)
|
track_list = json.loads(html_tracks)
|
||||||
track = track_list[u'tracks'][0]
|
track = track_list[u'tracks'][0]
|
||||||
|
|
Loading…
Reference in a new issue