mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-10 09:07:58 +01:00
Fix inconsistent use of report_warning
This commit is contained in:
parent
33245766ab
commit
6a39ee13f7
14 changed files with 31 additions and 31 deletions
|
@ -257,7 +257,7 @@ def _real_extract(self, url):
|
||||||
if flag and flag == 'SUCCEED':
|
if flag and flag == 'SUCCEED':
|
||||||
break
|
break
|
||||||
if flag == 'PARTIAL_ADULT':
|
if flag == 'PARTIAL_ADULT':
|
||||||
self._downloader.report_warning(
|
self.report_warning(
|
||||||
'In accordance with local laws and regulations, underage users are restricted from watching adult content. '
|
'In accordance with local laws and regulations, underage users are restricted from watching adult content. '
|
||||||
'Only content suitable for all ages will be downloaded. '
|
'Only content suitable for all ages will be downloaded. '
|
||||||
'Provide account credentials if you wish to download restricted content.')
|
'Provide account credentials if you wish to download restricted content.')
|
||||||
|
|
|
@ -114,7 +114,7 @@ def _real_extract(self, url):
|
||||||
medias.append(media)
|
medias.append(media)
|
||||||
|
|
||||||
if len(medias) > 1:
|
if len(medias) > 1:
|
||||||
self._downloader.report_warning(
|
self.report_warning(
|
||||||
'found multiple medias; please '
|
'found multiple medias; please '
|
||||||
'report this with the video URL to http://yt-dl.org/bug')
|
'report this with the video URL to http://yt-dl.org/bug')
|
||||||
if not medias:
|
if not medias:
|
||||||
|
|
|
@ -683,7 +683,7 @@ def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fa
|
||||||
if fatal:
|
if fatal:
|
||||||
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
|
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
|
||||||
else:
|
else:
|
||||||
self._downloader.report_warning(errmsg)
|
self.report_warning(errmsg)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
|
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
|
||||||
|
@ -1044,7 +1044,7 @@ def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, f
|
||||||
elif fatal:
|
elif fatal:
|
||||||
raise RegexNotFoundError('Unable to extract %s' % _name)
|
raise RegexNotFoundError('Unable to extract %s' % _name)
|
||||||
else:
|
else:
|
||||||
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
|
self.report_warning('unable to extract %s' % _name + bug_reports_message())
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
|
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
|
||||||
|
@ -1072,7 +1072,7 @@ def _get_netrc_login_info(self, netrc_machine=None):
|
||||||
raise netrc.NetrcParseError(
|
raise netrc.NetrcParseError(
|
||||||
'No authenticators for %s' % netrc_machine)
|
'No authenticators for %s' % netrc_machine)
|
||||||
except (IOError, netrc.NetrcParseError) as err:
|
except (IOError, netrc.NetrcParseError) as err:
|
||||||
self._downloader.report_warning(
|
self.report_warning(
|
||||||
'parsing .netrc: %s' % error_to_compat_str(err))
|
'parsing .netrc: %s' % error_to_compat_str(err))
|
||||||
|
|
||||||
return username, password
|
return username, password
|
||||||
|
@ -1247,7 +1247,7 @@ def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
|
||||||
elif fatal:
|
elif fatal:
|
||||||
raise RegexNotFoundError('Unable to extract JSON-LD')
|
raise RegexNotFoundError('Unable to extract JSON-LD')
|
||||||
else:
|
else:
|
||||||
self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
|
self.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
|
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
|
||||||
|
@ -3203,7 +3203,7 @@ def _int(self, v, name, fatal=False, **kwargs):
|
||||||
if fatal:
|
if fatal:
|
||||||
raise ExtractorError(msg)
|
raise ExtractorError(msg)
|
||||||
else:
|
else:
|
||||||
self._downloader.report_warning(msg)
|
self.report_warning(msg)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def _float(self, v, name, fatal=False, **kwargs):
|
def _float(self, v, name, fatal=False, **kwargs):
|
||||||
|
@ -3213,7 +3213,7 @@ def _float(self, v, name, fatal=False, **kwargs):
|
||||||
if fatal:
|
if fatal:
|
||||||
raise ExtractorError(msg)
|
raise ExtractorError(msg)
|
||||||
else:
|
else:
|
||||||
self._downloader.report_warning(msg)
|
self.report_warning(msg)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
|
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
|
||||||
|
@ -3389,7 +3389,7 @@ def _real_extract(self, query):
|
||||||
if n <= 0:
|
if n <= 0:
|
||||||
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
|
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
|
||||||
elif n > self._MAX_RESULTS:
|
elif n > self._MAX_RESULTS:
|
||||||
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
|
self.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
|
||||||
n = self._MAX_RESULTS
|
n = self._MAX_RESULTS
|
||||||
return self._get_n_results(query, n)
|
return self._get_n_results(query, n)
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
class DeezerBaseInfoExtractor(InfoExtractor):
|
class DeezerBaseInfoExtractor(InfoExtractor):
|
||||||
def get_data(self, url):
|
def get_data(self, url):
|
||||||
if not self._downloader.params.get('test'):
|
if not self._downloader.params.get('test'):
|
||||||
self._downloader.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
|
self.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
|
||||||
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
data_id = mobj.group('id')
|
data_id = mobj.group('id')
|
||||||
|
|
|
@ -79,7 +79,7 @@ def _get_subtitles(self, fmt, doc, url):
|
||||||
elif fmt == 'mpd':
|
elif fmt == 'mpd':
|
||||||
subs = self._parse_mpd_subtitles(doc)
|
subs = self._parse_mpd_subtitles(doc)
|
||||||
else:
|
else:
|
||||||
self._downloader.report_warning(
|
self.report_warning(
|
||||||
"Cannot download subtitles from '%s' streams." % (fmt))
|
"Cannot download subtitles from '%s' streams." % (fmt))
|
||||||
subs = {}
|
subs = {}
|
||||||
return subs
|
return subs
|
||||||
|
|
|
@ -348,7 +348,7 @@ def _login(self):
|
||||||
login_results, 'login error', default=None, group='error')
|
login_results, 'login error', default=None, group='error')
|
||||||
if error:
|
if error:
|
||||||
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
||||||
self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
|
self.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
|
||||||
return
|
return
|
||||||
|
|
||||||
fb_dtsg = self._search_regex(
|
fb_dtsg = self._search_regex(
|
||||||
|
@ -369,9 +369,9 @@ def _login(self):
|
||||||
check_response = self._download_webpage(check_req, None,
|
check_response = self._download_webpage(check_req, None,
|
||||||
note='Confirming login')
|
note='Confirming login')
|
||||||
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
|
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
|
||||||
self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
|
self.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
|
||||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||||
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
|
self.report_warning('unable to log in: %s' % error_to_compat_str(err))
|
||||||
return
|
return
|
||||||
|
|
||||||
def _real_initialize(self):
|
def _real_initialize(self):
|
||||||
|
|
|
@ -2376,7 +2376,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
if default_search in ('auto', 'auto_warning', 'fixup_error'):
|
if default_search in ('auto', 'auto_warning', 'fixup_error'):
|
||||||
if re.match(r'^[^\s/]+\.[^\s/]+/', url):
|
if re.match(r'^[^\s/]+\.[^\s/]+/', url):
|
||||||
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
|
self.report_warning('The url doesn\'t specify the protocol, trying with http')
|
||||||
return self.url_result('http://' + url)
|
return self.url_result('http://' + url)
|
||||||
elif default_search != 'fixup_error':
|
elif default_search != 'fixup_error':
|
||||||
if default_search == 'auto_warning':
|
if default_search == 'auto_warning':
|
||||||
|
@ -2385,7 +2385,7 @@ def _real_extract(self, url):
|
||||||
'Invalid URL: %r . Call yt-dlp like this: yt-dlp -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
|
'Invalid URL: %r . Call yt-dlp like this: yt-dlp -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
|
||||||
expected=True)
|
expected=True)
|
||||||
else:
|
else:
|
||||||
self._downloader.report_warning(
|
self.report_warning(
|
||||||
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
|
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
|
||||||
return self.url_result('ytsearch:' + url)
|
return self.url_result('ytsearch:' + url)
|
||||||
|
|
||||||
|
@ -2461,7 +2461,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
if not self._downloader.params.get('test', False) and not is_intentional:
|
if not self._downloader.params.get('test', False) and not is_intentional:
|
||||||
force = self._downloader.params.get('force_generic_extractor', False)
|
force = self._downloader.params.get('force_generic_extractor', False)
|
||||||
self._downloader.report_warning(
|
self.report_warning(
|
||||||
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
|
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
|
||||||
|
|
||||||
if not full_response:
|
if not full_response:
|
||||||
|
@ -2488,7 +2488,7 @@ def _real_extract(self, url):
|
||||||
# Maybe it's a direct link to a video?
|
# Maybe it's a direct link to a video?
|
||||||
# Be careful not to download the whole thing!
|
# Be careful not to download the whole thing!
|
||||||
if not is_html(first_bytes):
|
if not is_html(first_bytes):
|
||||||
self._downloader.report_warning(
|
self.report_warning(
|
||||||
'URL could be a direct video link, returning it as such.')
|
'URL could be a direct video link, returning it as such.')
|
||||||
info_dict.update({
|
info_dict.update({
|
||||||
'direct': True,
|
'direct': True,
|
||||||
|
|
|
@ -280,7 +280,7 @@ def _login(self):
|
||||||
msg = 'error %s' % code
|
msg = 'error %s' % code
|
||||||
if validation_result.get('msg'):
|
if validation_result.get('msg'):
|
||||||
msg += ': ' + validation_result['msg']
|
msg += ': ' + validation_result['msg']
|
||||||
self._downloader.report_warning('unable to log in: ' + msg)
|
self.report_warning('unable to log in: ' + msg)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
|
@ -331,7 +331,7 @@ def _real_extract(self, url):
|
||||||
})
|
})
|
||||||
|
|
||||||
if unaccessible_videos > 0:
|
if unaccessible_videos > 0:
|
||||||
self._downloader.report_warning(
|
self.report_warning(
|
||||||
'%s videos are only available for members (or paid members) and will not be downloaded. '
|
'%s videos are only available for members (or paid members) and will not be downloaded. '
|
||||||
% unaccessible_videos + self._ACCOUNT_CREDENTIALS_HINT)
|
% unaccessible_videos + self._ACCOUNT_CREDENTIALS_HINT)
|
||||||
|
|
||||||
|
|
|
@ -262,7 +262,7 @@ class MildomUserVodIE(MildomBaseIE):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
user_id = self._match_id(url)
|
user_id = self._match_id(url)
|
||||||
|
|
||||||
self._downloader.report_warning('To download ongoing live, please use "https://www.mildom.com/%s" instead. This will list up VODs belonging to user.' % user_id)
|
self.report_warning('To download ongoing live, please use "https://www.mildom.com/%s" instead. This will list up VODs belonging to user.' % user_id)
|
||||||
|
|
||||||
profile = self._call_api(
|
profile = self._call_api(
|
||||||
'https://cloudac.mildom.com/nonolive/gappserv/user/profileV2', user_id,
|
'https://cloudac.mildom.com/nonolive/gappserv/user/profileV2', user_id,
|
||||||
|
|
|
@ -190,7 +190,7 @@ def _login(self):
|
||||||
if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login':
|
if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login':
|
||||||
login_ok = False
|
login_ok = False
|
||||||
if not login_ok:
|
if not login_ok:
|
||||||
self._downloader.report_warning('unable to log in: bad username or password')
|
self.report_warning('unable to log in: bad username or password')
|
||||||
return login_ok
|
return login_ok
|
||||||
|
|
||||||
def _get_heartbeat_info(self, info_dict):
|
def _get_heartbeat_info(self, info_dict):
|
||||||
|
|
|
@ -312,7 +312,7 @@ def _download_json(self, *args, **kwargs):
|
||||||
self._update_client_id()
|
self._update_client_id()
|
||||||
continue
|
continue
|
||||||
elif non_fatal:
|
elif non_fatal:
|
||||||
self._downloader.report_warning(error_to_compat_str(e))
|
self.report_warning(error_to_compat_str(e))
|
||||||
return False
|
return False
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -804,7 +804,7 @@ def is_rented():
|
||||||
video_description = self._html_search_meta(
|
video_description = self._html_search_meta(
|
||||||
'description', orig_webpage, default=None)
|
'description', orig_webpage, default=None)
|
||||||
if not video_description and not is_player:
|
if not video_description and not is_player:
|
||||||
self._downloader.report_warning('Cannot find video description')
|
self.report_warning('Cannot find video description')
|
||||||
|
|
||||||
# Extract upload date
|
# Extract upload date
|
||||||
if not timestamp:
|
if not timestamp:
|
||||||
|
|
|
@ -128,7 +128,7 @@ def req(url, f_req, note, errnote):
|
||||||
})
|
})
|
||||||
|
|
||||||
def warn(message):
|
def warn(message):
|
||||||
self._downloader.report_warning(message)
|
self.report_warning(message)
|
||||||
|
|
||||||
lookup_req = [
|
lookup_req = [
|
||||||
username,
|
username,
|
||||||
|
@ -1739,7 +1739,7 @@ def extract_thread(parent_renderer):
|
||||||
# See: https://github.com/ytdl-org/youtube-dl/issues/28194
|
# See: https://github.com/ytdl-org/youtube-dl/issues/28194
|
||||||
last_error = 'Incomplete data received'
|
last_error = 'Incomplete data received'
|
||||||
if count >= retries:
|
if count >= retries:
|
||||||
self._downloader.report_error(last_error)
|
raise ExtractorError(last_error)
|
||||||
|
|
||||||
if not response:
|
if not response:
|
||||||
break
|
break
|
||||||
|
@ -3303,7 +3303,7 @@ def _real_extract_alerts():
|
||||||
warnings.append([alert_type, alert_message])
|
warnings.append([alert_type, alert_message])
|
||||||
|
|
||||||
for alert_type, alert_message in (warnings + errors[:-1]):
|
for alert_type, alert_message in (warnings + errors[:-1]):
|
||||||
self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
|
self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
|
||||||
if errors:
|
if errors:
|
||||||
raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
|
raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
|
||||||
|
|
||||||
|
@ -3414,7 +3414,7 @@ def _extract_webpage(self, url, item_id):
|
||||||
if data.get('contents') or data.get('currentVideoEndpoint'):
|
if data.get('contents') or data.get('currentVideoEndpoint'):
|
||||||
break
|
break
|
||||||
if count >= retries:
|
if count >= retries:
|
||||||
self._downloader.report_error(last_error)
|
raise ExtractorError(last_error)
|
||||||
return webpage, data
|
return webpage, data
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -3426,7 +3426,7 @@ def _real_extract(self, url):
|
||||||
mobj = re.match(r'(?P<pre>%s)(?P<post>/?(?![^#?]).*$)' % self._VALID_URL, url)
|
mobj = re.match(r'(?P<pre>%s)(?P<post>/?(?![^#?]).*$)' % self._VALID_URL, url)
|
||||||
mobj = mobj.groupdict() if mobj else {}
|
mobj = mobj.groupdict() if mobj else {}
|
||||||
if mobj and not mobj.get('not_channel'):
|
if mobj and not mobj.get('not_channel'):
|
||||||
self._downloader.report_warning(
|
self.report_warning(
|
||||||
'A channel/user page was given. All the channel\'s videos will be downloaded. '
|
'A channel/user page was given. All the channel\'s videos will be downloaded. '
|
||||||
'To download only the videos in the home page, add a "/featured" to the URL')
|
'To download only the videos in the home page, add a "/featured" to the URL')
|
||||||
url = '%s/videos%s' % (mobj.get('pre'), mobj.get('post') or '')
|
url = '%s/videos%s' % (mobj.get('pre'), mobj.get('post') or '')
|
||||||
|
@ -3441,7 +3441,7 @@ def _real_extract(self, url):
|
||||||
# If there is neither video or playlist ids,
|
# If there is neither video or playlist ids,
|
||||||
# youtube redirects to home page, which is undesirable
|
# youtube redirects to home page, which is undesirable
|
||||||
raise ExtractorError('Unable to recognize tab page')
|
raise ExtractorError('Unable to recognize tab page')
|
||||||
self._downloader.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
|
self.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
|
||||||
url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
|
url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
|
||||||
|
|
||||||
if video_id and playlist_id:
|
if video_id and playlist_id:
|
||||||
|
@ -3469,7 +3469,7 @@ def _real_extract(self, url):
|
||||||
data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
|
data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
|
||||||
compat_str) or video_id
|
compat_str) or video_id
|
||||||
if video_id:
|
if video_id:
|
||||||
self._downloader.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
|
self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
|
||||||
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
|
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
|
||||||
|
|
||||||
raise ExtractorError('Unable to recognize tab page')
|
raise ExtractorError('Unable to recognize tab page')
|
||||||
|
|
Loading…
Reference in a new issue