mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-10 09:07:58 +01:00
[vevo] extract metadata and formats from api if videoinfo is empty
these was fixed by @yan12125 in ff51983e15
i only added some code to extract video metadata and more formats from
api
This commit is contained in:
parent
2975fe1a7b
commit
9165d6bab9
1 changed files with 140 additions and 49 deletions
|
@ -7,6 +7,8 @@
|
|||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
sanitized_Request,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
|
@ -64,6 +66,7 @@ class VevoIE(InfoExtractor):
|
|||
'uploader_id': 'justintimberlakeVEVO',
|
||||
'uploader': 'justintimberlakeVEVO',
|
||||
'upload_date': '20130703',
|
||||
'duration': 419,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'true',
|
||||
|
@ -72,13 +75,18 @@ class VevoIE(InfoExtractor):
|
|||
}, {
|
||||
'note': 'No video_info',
|
||||
'url': 'http://www.vevo.com/watch/k-camp-1/Till-I-Die/USUV71503000',
|
||||
'md5': '8b83cc492d72fc9cf74a02acee7dc1b0',
|
||||
'md5': 'a8b84d1d1957cd01046441b701b270fb',
|
||||
'info_dict': {
|
||||
'id': 'USUV71503000',
|
||||
'id': 'Lad2jHtJCqY',
|
||||
'ext': 'mp4',
|
||||
'title': 'Till I Die - K Camp ft. T.I.',
|
||||
'title': 'K Camp - Till I Die ft. T.I.',
|
||||
'description': 'md5:0694920ededdee4a14cfc39695cc8ec3',
|
||||
'uploader_id': 'KCampVEVO',
|
||||
'uploader': 'KCampVEVO',
|
||||
'upload_date': '20151207',
|
||||
'duration': 193,
|
||||
},
|
||||
'add_ie': ['Youtube'],
|
||||
}]
|
||||
_SMIL_BASE_URL = 'http://smil.lvl3.vevo.com'
|
||||
_SOURCE_TYPES = {
|
||||
|
@ -96,7 +104,7 @@ class VevoIE(InfoExtractor):
|
|||
18: 'dash',
|
||||
}
|
||||
_VERSIONS = {
|
||||
0: 'youtube',
|
||||
0: 'youtube', # only in AuthenticateVideo videoVersions
|
||||
1: 'level3',
|
||||
2: 'akamai',
|
||||
3: 'level3',
|
||||
|
@ -138,14 +146,34 @@ def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_para
|
|||
})
|
||||
return formats
|
||||
|
||||
def _initialize_api(self, video_url, video_id):
|
||||
req = sanitized_Request(
|
||||
'http://www.vevo.com/auth', data=b'')
|
||||
webpage = self._download_webpage(
|
||||
req, None,
|
||||
note='Retrieving oauth token',
|
||||
errnote='Unable to retrieve oauth token')
|
||||
|
||||
if 'THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION' in webpage:
|
||||
raise ExtractorError('%s said: This page is currently unavailable in your region.' % self.IE_NAME, expected=True)
|
||||
|
||||
auth_info = self._parse_json(webpage, video_id)
|
||||
self._api_url_template = self.http_scheme() + '//apiv2.vevo.com/%s?token=' + auth_info['access_token']
|
||||
|
||||
def _call_api(self, path, video_id, note, errnote, fatal=True):
|
||||
return self._download_json(self._api_url_template % path, video_id, note, errnote)
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = None
|
||||
|
||||
json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
|
||||
response = self._download_json(json_url, video_id)
|
||||
video_info = response['video'] or {}
|
||||
response = self._download_json(json_url, video_id, 'Downloading video info', 'Unable to download info')
|
||||
video_info = response.get('video') or {}
|
||||
video_versions = video_info.get('videoVersions')
|
||||
uploader = None
|
||||
timestamp = None
|
||||
view_count = None
|
||||
formats = []
|
||||
|
||||
if not video_info:
|
||||
ytid = response.get('errorInfo', {}).get('ytid')
|
||||
|
@ -161,44 +189,112 @@ def _real_extract(self, url):
|
|||
if url.startswith('vevo:'):
|
||||
raise ExtractorError(
|
||||
'Please specify full Vevo URL for downloading', expected=True)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
title = video_info.get('title') or self._og_search_title(webpage)
|
||||
self._initialize_api(url, video_id)
|
||||
video_info = self._call_api(
|
||||
'video/%s' % video_id, video_id, 'Downloading api video info',
|
||||
'Failed to download video info')
|
||||
|
||||
smil_parsed = False
|
||||
formats = []
|
||||
for video_version in video_info['videoVersions']:
|
||||
version = self._VERSIONS.get(video_version['version'])
|
||||
if version == 'youtube':
|
||||
ytid = video_info.get('youTubeId')
|
||||
if ytid:
|
||||
return self.url_result(
|
||||
video_version['id'], 'Youtube', video_version['id'])
|
||||
else:
|
||||
source_type = self._SOURCE_TYPES.get(video_version['sourceType'])
|
||||
renditions = compat_etree_fromstring(video_version['data'])
|
||||
if source_type == 'http':
|
||||
for rend in renditions.findall('rendition'):
|
||||
attr = rend.attrib
|
||||
formats.append({
|
||||
'url': attr['url'],
|
||||
'format_id': '%s-%s' % (version, attr['name']),
|
||||
'height': int_or_none(attr.get('frameheight')),
|
||||
'width': int_or_none(attr.get('frameWidth')),
|
||||
'tbr': int_or_none(attr.get('totalBitrate')),
|
||||
'vbr': int_or_none(attr.get('videoBitrate')),
|
||||
'abr': int_or_none(attr.get('audioBitrate')),
|
||||
'vcodec': attr.get('videoCodec'),
|
||||
'acodec': attr.get('audioCodec'),
|
||||
})
|
||||
elif source_type == 'hls':
|
||||
ytid, 'Youtube', ytid)
|
||||
|
||||
video_versions = self._call_api(
|
||||
'video/%s/streams' % video_id, video_id,
|
||||
'Downloading video versions info',
|
||||
'Failed to download video versions info')
|
||||
|
||||
timestamp = parse_iso8601(video_info.get('releaseDate'))
|
||||
artists = video_info.get('artists')
|
||||
if artists:
|
||||
uploader = artists[0]['name']
|
||||
view_count = int_or_none(video_info.get('views', {}).get('total'))
|
||||
|
||||
for video_version in video_versions:
|
||||
version = self._VERSIONS.get(video_version['version'])
|
||||
version_url = video_version.get('url')
|
||||
if not version_url:
|
||||
continue
|
||||
|
||||
if '.mpd' in version_url or '.ism' in version_url:
|
||||
continue
|
||||
elif '.m3u8' in version_url:
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
renditions.find('rendition').attrib['url'], video_id,
|
||||
'mp4', 'm3u8_native', m3u8_id='hls-%s' % version, fatal=False))
|
||||
elif source_type == 'smil' and not smil_parsed:
|
||||
formats.extend(self._extract_smil_formats(
|
||||
renditions.find('rendition').attrib['url'], video_id, False))
|
||||
smil_parsed = True
|
||||
version_url, video_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls-%s' % version,
|
||||
note='Downloading %s m3u8 information' % version,
|
||||
errnote='Failed to download %s m3u8 information' % version,
|
||||
fatal=False))
|
||||
else:
|
||||
m = re.search(r'''(?xi)
|
||||
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
|
||||
_(?P<vcodec>[a-z0-9]+)
|
||||
_(?P<vbr>[0-9]+)
|
||||
_(?P<acodec>[a-z0-9]+)
|
||||
_(?P<abr>[0-9]+)
|
||||
\.(?P<ext>[a-z0-9]+)''', version_url)
|
||||
if not m:
|
||||
continue
|
||||
|
||||
formats.append({
|
||||
'url': version_url,
|
||||
'format_id': 'http-%s-%s' % (version, video_version['quality']),
|
||||
'vcodec': m.group('vcodec'),
|
||||
'acodec': m.group('acodec'),
|
||||
'vbr': int(m.group('vbr')),
|
||||
'abr': int(m.group('abr')),
|
||||
'ext': m.group('ext'),
|
||||
'width': int(m.group('width')),
|
||||
'height': int(m.group('height')),
|
||||
})
|
||||
else:
|
||||
timestamp = int_or_none(self._search_regex(
|
||||
r'/Date\((\d+)\)/',
|
||||
video_info['releaseDate'], 'release date', fatal=False),
|
||||
scale=1000)
|
||||
artists = video_info.get('mainArtists')
|
||||
if artists:
|
||||
uploader = artists[0]['artistName']
|
||||
|
||||
smil_parsed = False
|
||||
for video_version in video_info['videoVersions']:
|
||||
version = self._VERSIONS.get(video_version['version'])
|
||||
if version == 'youtube':
|
||||
return self.url_result(
|
||||
video_version['id'], 'Youtube', video_version['id'])
|
||||
else:
|
||||
source_type = self._SOURCE_TYPES.get(video_version['sourceType'])
|
||||
renditions = compat_etree_fromstring(video_version['data'])
|
||||
if source_type == 'http':
|
||||
for rend in renditions.findall('rendition'):
|
||||
attr = rend.attrib
|
||||
formats.append({
|
||||
'url': attr['url'],
|
||||
'format_id': 'http-%s-%s' % (version, attr['name']),
|
||||
'height': int_or_none(attr.get('frameheight')),
|
||||
'width': int_or_none(attr.get('frameWidth')),
|
||||
'tbr': int_or_none(attr.get('totalBitrate')),
|
||||
'vbr': int_or_none(attr.get('videoBitrate')),
|
||||
'abr': int_or_none(attr.get('audioBitrate')),
|
||||
'vcodec': attr.get('videoCodec'),
|
||||
'acodec': attr.get('audioCodec'),
|
||||
})
|
||||
elif source_type == 'hls':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
renditions.find('rendition').attrib['url'], video_id,
|
||||
'mp4', 'm3u8_native', m3u8_id='hls-%s' % version,
|
||||
note='Downloading %s m3u8 information' % version,
|
||||
errnote='Failed to download %s m3u8 information' % version,
|
||||
fatal=False))
|
||||
elif source_type == 'smil' and not smil_parsed:
|
||||
formats.extend(self._extract_smil_formats(
|
||||
renditions.find('rendition').attrib['url'], video_id, False))
|
||||
smil_parsed = True
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = video_info['title']
|
||||
|
||||
is_explicit = video_info.get('isExplicit')
|
||||
if is_explicit is True:
|
||||
age_limit = 18
|
||||
|
@ -207,21 +303,16 @@ def _real_extract(self, url):
|
|||
else:
|
||||
age_limit = None
|
||||
|
||||
timestamp = int_or_none(self._search_regex(
|
||||
r'/Date\((\d+)\)/',
|
||||
video_info['launchDate'], 'launch date', fatal=False),
|
||||
scale=1000) if video_info else None
|
||||
|
||||
duration = video_info.get('duration') or int_or_none(
|
||||
self._html_search_meta('video:duration', webpage))
|
||||
duration = video_info.get('duration')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': video_info.get('imageUrl'),
|
||||
'thumbnail': video_info.get('imageUrl') or video_info.get('thumbnailUrl'),
|
||||
'timestamp': timestamp,
|
||||
'uploader': video_info['mainArtists'][0]['artistName'] if video_info else None,
|
||||
'uploader': uploader,
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'age_limit': age_limit,
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue