Fix inconsistent use of report_warning

This commit is contained in:
pukkandan 2021-04-16 15:31:10 +05:30
parent 33245766ab
commit 6a39ee13f7
No known key found for this signature in database
GPG key ID: 0F00D95A001F4698
14 changed files with 31 additions and 31 deletions

View file

@ -257,7 +257,7 @@ def _real_extract(self, url):
if flag and flag == 'SUCCEED':
break
if flag == 'PARTIAL_ADULT':
self._downloader.report_warning(
self.report_warning(
'In accordance with local laws and regulations, underage users are restricted from watching adult content. '
'Only content suitable for all ages will be downloaded. '
'Provide account credentials if you wish to download restricted content.')

View file

@ -114,7 +114,7 @@ def _real_extract(self, url):
medias.append(media)
if len(medias) > 1:
self._downloader.report_warning(
self.report_warning(
'found multiple medias; please '
'report this with the video URL to http://yt-dl.org/bug')
if not medias:

View file

@ -683,7 +683,7 @@ def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fa
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
self.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
@ -1044,7 +1044,7 @@ def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, f
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
self.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
@ -1072,7 +1072,7 @@ def _get_netrc_login_info(self, netrc_machine=None):
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
self.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
@ -1247,7 +1247,7 @@ def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
elif fatal:
raise RegexNotFoundError('Unable to extract JSON-LD')
else:
self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
self.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
return {}
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
@ -3203,7 +3203,7 @@ def _int(self, v, name, fatal=False, **kwargs):
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
self.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
@ -3213,7 +3213,7 @@ def _float(self, v, name, fatal=False, **kwargs):
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
self.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
@ -3389,7 +3389,7 @@ def _real_extract(self, query):
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
self.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)

View file

@ -14,7 +14,7 @@
class DeezerBaseInfoExtractor(InfoExtractor):
def get_data(self, url):
if not self._downloader.params.get('test'):
self._downloader.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
self.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
mobj = re.match(self._VALID_URL, url)
data_id = mobj.group('id')

View file

@ -79,7 +79,7 @@ def _get_subtitles(self, fmt, doc, url):
elif fmt == 'mpd':
subs = self._parse_mpd_subtitles(doc)
else:
self._downloader.report_warning(
self.report_warning(
"Cannot download subtitles from '%s' streams." % (fmt))
subs = {}
return subs

View file

@ -348,7 +348,7 @@ def _login(self):
login_results, 'login error', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
self.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
return
fb_dtsg = self._search_regex(
@ -369,9 +369,9 @@ def _login(self):
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
self.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
self.report_warning('unable to log in: %s' % error_to_compat_str(err))
return
def _real_initialize(self):

View file

@ -2376,7 +2376,7 @@ def _real_extract(self, url):
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if re.match(r'^[^\s/]+\.[^\s/]+/', url):
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
self.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url)
elif default_search != 'fixup_error':
if default_search == 'auto_warning':
@ -2385,7 +2385,7 @@ def _real_extract(self, url):
'Invalid URL: %r . Call yt-dlp like this: yt-dlp -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self._downloader.report_warning(
self.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
@ -2461,7 +2461,7 @@ def _real_extract(self, url):
if not self._downloader.params.get('test', False) and not is_intentional:
force = self._downloader.params.get('force_generic_extractor', False)
self._downloader.report_warning(
self.report_warning(
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
@ -2488,7 +2488,7 @@ def _real_extract(self, url):
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self._downloader.report_warning(
self.report_warning(
'URL could be a direct video link, returning it as such.')
info_dict.update({
'direct': True,

View file

@ -280,7 +280,7 @@ def _login(self):
msg = 'error %s' % code
if validation_result.get('msg'):
msg += ': ' + validation_result['msg']
self._downloader.report_warning('unable to log in: ' + msg)
self.report_warning('unable to log in: ' + msg)
return False
return True

View file

@ -331,7 +331,7 @@ def _real_extract(self, url):
})
if unaccessible_videos > 0:
self._downloader.report_warning(
self.report_warning(
'%s videos are only available for members (or paid members) and will not be downloaded. '
% unaccessible_videos + self._ACCOUNT_CREDENTIALS_HINT)

View file

@ -262,7 +262,7 @@ class MildomUserVodIE(MildomBaseIE):
def _real_extract(self, url):
user_id = self._match_id(url)
self._downloader.report_warning('To download ongoing live, please use "https://www.mildom.com/%s" instead. This will list up VODs belonging to user.' % user_id)
self.report_warning('To download ongoing live, please use "https://www.mildom.com/%s" instead. This will list up VODs belonging to user.' % user_id)
profile = self._call_api(
'https://cloudac.mildom.com/nonolive/gappserv/user/profileV2', user_id,

View file

@ -190,7 +190,7 @@ def _login(self):
if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login':
login_ok = False
if not login_ok:
self._downloader.report_warning('unable to log in: bad username or password')
self.report_warning('unable to log in: bad username or password')
return login_ok
def _get_heartbeat_info(self, info_dict):

View file

@ -312,7 +312,7 @@ def _download_json(self, *args, **kwargs):
self._update_client_id()
continue
elif non_fatal:
self._downloader.report_warning(error_to_compat_str(e))
self.report_warning(error_to_compat_str(e))
return False
raise

View file

@ -804,7 +804,7 @@ def is_rented():
video_description = self._html_search_meta(
'description', orig_webpage, default=None)
if not video_description and not is_player:
self._downloader.report_warning('Cannot find video description')
self.report_warning('Cannot find video description')
# Extract upload date
if not timestamp:

View file

@ -128,7 +128,7 @@ def req(url, f_req, note, errnote):
})
def warn(message):
self._downloader.report_warning(message)
self.report_warning(message)
lookup_req = [
username,
@ -1739,7 +1739,7 @@ def extract_thread(parent_renderer):
# See: https://github.com/ytdl-org/youtube-dl/issues/28194
last_error = 'Incomplete data received'
if count >= retries:
self._downloader.report_error(last_error)
raise ExtractorError(last_error)
if not response:
break
@ -3303,7 +3303,7 @@ def _real_extract_alerts():
warnings.append([alert_type, alert_message])
for alert_type, alert_message in (warnings + errors[:-1]):
self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
if errors:
raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
@ -3414,7 +3414,7 @@ def _extract_webpage(self, url, item_id):
if data.get('contents') or data.get('currentVideoEndpoint'):
break
if count >= retries:
self._downloader.report_error(last_error)
raise ExtractorError(last_error)
return webpage, data
def _real_extract(self, url):
@ -3426,7 +3426,7 @@ def _real_extract(self, url):
mobj = re.match(r'(?P<pre>%s)(?P<post>/?(?![^#?]).*$)' % self._VALID_URL, url)
mobj = mobj.groupdict() if mobj else {}
if mobj and not mobj.get('not_channel'):
self._downloader.report_warning(
self.report_warning(
'A channel/user page was given. All the channel\'s videos will be downloaded. '
'To download only the videos in the home page, add a "/featured" to the URL')
url = '%s/videos%s' % (mobj.get('pre'), mobj.get('post') or '')
@ -3441,7 +3441,7 @@ def _real_extract(self, url):
# If there is neither video or playlist ids,
# youtube redirects to home page, which is undesirable
raise ExtractorError('Unable to recognize tab page')
self._downloader.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
self.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
if video_id and playlist_id:
@ -3469,7 +3469,7 @@ def _real_extract(self, url):
data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
compat_str) or video_id
if video_id:
self._downloader.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
raise ExtractorError('Unable to recognize tab page')