mirror of
https://github.com/ansible/awx.git
synced 2026-01-10 15:32:07 -03:30
Fix up logger .warn() calls to use .warning() instead
This is a usage that was deprecated in Python 3.0.
This commit is contained in:
parent
a3a216f91f
commit
b852baaa39
@ -104,7 +104,7 @@ class LoggedLoginView(auth_views.LoginView):
|
||||
return ret
|
||||
else:
|
||||
if 'username' in self.request.POST:
|
||||
logger.warn(smart_str(u"Login failed for user {} from {}".format(self.request.POST.get('username'), request.META.get('REMOTE_ADDR', None))))
|
||||
logger.warning(smart_str(u"Login failed for user {} from {}".format(self.request.POST.get('username'), request.META.get('REMOTE_ADDR', None))))
|
||||
ret.status_code = 401
|
||||
return ret
|
||||
|
||||
|
||||
@ -5078,7 +5078,7 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
try:
|
||||
return json.loads(obj.changes)
|
||||
except Exception:
|
||||
logger.warn("Error deserializing activity stream json changes")
|
||||
logger.warning("Error deserializing activity stream json changes")
|
||||
return {}
|
||||
|
||||
def get_object_association(self, obj):
|
||||
|
||||
@ -89,7 +89,7 @@ class BroadcastWebsocketStatsManager:
|
||||
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS)
|
||||
except Exception as e:
|
||||
logger.warn(e)
|
||||
logger.warning(e)
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS)
|
||||
self.start()
|
||||
|
||||
|
||||
@ -65,7 +65,7 @@ class WebsocketSecretAuthHelper:
|
||||
nonce_parsed = int(nonce_parsed)
|
||||
nonce_diff = now - nonce_parsed
|
||||
if abs(nonce_diff) > nonce_tolerance:
|
||||
logger.warn(f"Potential replay attack or machine(s) time out of sync by {nonce_diff} seconds.")
|
||||
logger.warning(f"Potential replay attack or machine(s) time out of sync by {nonce_diff} seconds.")
|
||||
raise ValueError(f"Potential replay attack or machine(s) time out of sync by {nonce_diff} seconds.")
|
||||
|
||||
return True
|
||||
@ -85,7 +85,7 @@ class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
try:
|
||||
WebsocketSecretAuthHelper.is_authorized(self.scope)
|
||||
except Exception:
|
||||
logger.warn(f"client '{self.channel_name}' failed to authorize against the broadcast endpoint.")
|
||||
logger.warning(f"client '{self.channel_name}' failed to authorize against the broadcast endpoint.")
|
||||
await self.close()
|
||||
return
|
||||
|
||||
|
||||
@ -42,7 +42,7 @@ class Control(object):
|
||||
return f"reply_to_{str(uuid.uuid4()).replace('-','_')}"
|
||||
|
||||
def control_with_reply(self, command, timeout=5):
|
||||
logger.warn('checking {} {} for {}'.format(self.service, command, self.queuename))
|
||||
logger.warning('checking {} {} for {}'.format(self.service, command, self.queuename))
|
||||
reply_queue = Control.generate_reply_queue_name()
|
||||
self.result = None
|
||||
|
||||
|
||||
@ -19,13 +19,13 @@ class Scheduler(Scheduler):
|
||||
|
||||
def run():
|
||||
ppid = os.getppid()
|
||||
logger.warn('periodic beat started')
|
||||
logger.warning('periodic beat started')
|
||||
while True:
|
||||
if os.getppid() != ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
pid = os.getpid()
|
||||
logger.warn(f'periodic beat exiting gracefully pid:{pid}')
|
||||
logger.warning(f'periodic beat exiting gracefully pid:{pid}')
|
||||
raise SystemExit()
|
||||
try:
|
||||
for conn in connections.all():
|
||||
|
||||
@ -142,7 +142,7 @@ class PoolWorker(object):
|
||||
# when this occurs, it's _fine_ to ignore this KeyError because
|
||||
# the purpose of self.managed_tasks is to just track internal
|
||||
# state of which events are *currently* being processed.
|
||||
logger.warn('Event UUID {} appears to be have been duplicated.'.format(uuid))
|
||||
logger.warning('Event UUID {} appears to be have been duplicated.'.format(uuid))
|
||||
|
||||
@property
|
||||
def current_task(self):
|
||||
@ -291,8 +291,8 @@ class WorkerPool(object):
|
||||
pass
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
logger.warn("could not write to queue %s" % preferred_queue)
|
||||
logger.warn("detail: {}".format(tb))
|
||||
logger.warning("could not write to queue %s" % preferred_queue)
|
||||
logger.warning("detail: {}".format(tb))
|
||||
write_attempt_order.append(preferred_queue)
|
||||
logger.error("could not write payload to any queue, attempted order: {}".format(write_attempt_order))
|
||||
return None
|
||||
|
||||
@ -60,7 +60,7 @@ class AWXConsumerBase(object):
|
||||
return f'listening on {self.queues}'
|
||||
|
||||
def control(self, body):
|
||||
logger.warn(f'Received control signal:\n{body}')
|
||||
logger.warning(f'Received control signal:\n{body}')
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running'):
|
||||
reply_queue = body['reply_to']
|
||||
@ -118,7 +118,7 @@ class AWXConsumerBase(object):
|
||||
|
||||
def stop(self, signum, frame):
|
||||
self.should_stop = True
|
||||
logger.warn('received {}, stopping'.format(signame(signum)))
|
||||
logger.warning('received {}, stopping'.format(signame(signum)))
|
||||
self.worker.on_stop()
|
||||
raise SystemExit()
|
||||
|
||||
@ -153,7 +153,7 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
if self.should_stop:
|
||||
return
|
||||
except psycopg2.InterfaceError:
|
||||
logger.warn("Stale Postgres message bus connection, reconnecting")
|
||||
logger.warning("Stale Postgres message bus connection, reconnecting")
|
||||
continue
|
||||
|
||||
|
||||
|
||||
@ -79,13 +79,13 @@ class AnsibleInventoryLoader(object):
|
||||
ee = get_default_execution_environment()
|
||||
|
||||
if settings.IS_K8S:
|
||||
logger.warn('This command is not able to run on kubernetes-based deployment. This action should be done using the API.')
|
||||
logger.warning('This command is not able to run on kubernetes-based deployment. This action should be done using the API.')
|
||||
sys.exit(1)
|
||||
|
||||
if ee.credential:
|
||||
process = subprocess.run(['podman', 'image', 'exists', ee.image], capture_output=True)
|
||||
if process.returncode != 0:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
f'The default execution environment (id={ee.id}, name={ee.name}, image={ee.image}) is not available on this node. '
|
||||
'The image needs to be available locally before using this command, due to registry authentication. '
|
||||
'To pull this image, either run a job on this node or manually pull the image.'
|
||||
|
||||
@ -247,7 +247,7 @@ class InstanceGroupManager(models.Manager):
|
||||
if t.controller_node:
|
||||
control_groups = instance_ig_mapping.get(t.controller_node, [])
|
||||
if not control_groups:
|
||||
logger.warn(f"No instance group found for {t.controller_node}, capacity consumed may be innaccurate.")
|
||||
logger.warning(f"No instance group found for {t.controller_node}, capacity consumed may be innaccurate.")
|
||||
|
||||
if t.status == 'waiting' or (not t.execution_node and not t.is_container_group_task):
|
||||
# Subtract capacity from any peer groups that share instances
|
||||
|
||||
@ -15,10 +15,10 @@ def forwards(apps, schema_editor):
|
||||
|
||||
r = InventoryUpdate.objects.filter(source='tower').update(source='controller')
|
||||
if r:
|
||||
logger.warn(f'Renamed {r} tower inventory updates to controller')
|
||||
logger.warning(f'Renamed {r} tower inventory updates to controller')
|
||||
InventorySource.objects.filter(source='tower').update(source='controller')
|
||||
if r:
|
||||
logger.warn(f'Renamed {r} tower inventory sources to controller')
|
||||
logger.warning(f'Renamed {r} tower inventory sources to controller')
|
||||
|
||||
CredentialType = apps.get_model('main', 'CredentialType')
|
||||
|
||||
@ -32,7 +32,7 @@ def forwards(apps, schema_editor):
|
||||
registry_type = ManagedCredentialType.registry.get('controller')
|
||||
if not registry_type:
|
||||
raise RuntimeError('Excpected to find controller credential, this may need to be edited in the future!')
|
||||
logger.warn('Renaming the Ansible Tower credential type for existing install')
|
||||
logger.warning('Renaming the Ansible Tower credential type for existing install')
|
||||
tower_type.name = registry_type.name # sensitive to translations
|
||||
tower_type.namespace = 'controller' # if not done, will error setup_tower_managed_defaults
|
||||
tower_type.save(update_fields=['name', 'namespace'])
|
||||
@ -46,10 +46,10 @@ def backwards(apps, schema_editor):
|
||||
|
||||
r = InventoryUpdate.objects.filter(source='controller').update(source='tower')
|
||||
if r:
|
||||
logger.warn(f'Renamed {r} controller inventory updates to tower')
|
||||
logger.warning(f'Renamed {r} controller inventory updates to tower')
|
||||
r = InventorySource.objects.filter(source='controller').update(source='tower')
|
||||
if r:
|
||||
logger.warn(f'Renamed {r} controller inventory sources to tower')
|
||||
logger.warning(f'Renamed {r} controller inventory sources to tower')
|
||||
|
||||
CredentialType = apps.get_model('main', 'CredentialType')
|
||||
|
||||
|
||||
@ -14,4 +14,4 @@ def delete_hg_scm(apps, schema_editor):
|
||||
update_ct = Project.objects.filter(scm_type='hg').update(scm_type='')
|
||||
|
||||
if update_ct:
|
||||
logger.warn('Changed {} mercurial projects to manual, deprecation period ended'.format(update_ct))
|
||||
logger.warning('Changed {} mercurial projects to manual, deprecation period ended'.format(update_ct))
|
||||
|
||||
@ -35,7 +35,7 @@ def _get_instance_id_for_upgrade(host, new_id):
|
||||
return None
|
||||
if len(new_id) > 255:
|
||||
# this should never happen
|
||||
logger.warn('Computed instance id "{}"" for host {}-{} is too long'.format(new_id_value, host.name, host.pk))
|
||||
logger.warning('Computed instance id "{}"" for host {}-{} is too long'.format(new_id_value, host.name, host.pk))
|
||||
return None
|
||||
return new_id_value
|
||||
|
||||
@ -47,7 +47,7 @@ def set_new_instance_id(apps, source, new_id):
|
||||
id_from_settings = getattr(settings, '{}_INSTANCE_ID_VAR'.format(source.upper()))
|
||||
if id_from_settings != new_id:
|
||||
# User applied an instance ID themselves, so nope on out of there
|
||||
logger.warn('You have an instance ID set for {}, not migrating'.format(source))
|
||||
logger.warning('You have an instance ID set for {}, not migrating'.format(source))
|
||||
return
|
||||
logger.debug('Migrating inventory instance_id for {} to {}'.format(source, new_id))
|
||||
Host = apps.get_model('main', 'Host')
|
||||
|
||||
@ -247,7 +247,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
|
||||
if uuid is not None and self.uuid != uuid:
|
||||
if self.uuid is not None:
|
||||
logger.warn(f'Self-reported uuid of {self.hostname} changed from {self.uuid} to {uuid}')
|
||||
logger.warning(f'Self-reported uuid of {self.hostname} changed from {self.uuid} to {uuid}')
|
||||
self.uuid = uuid
|
||||
update_fields.append('uuid')
|
||||
|
||||
|
||||
@ -515,7 +515,7 @@ class JobNotificationMixin(object):
|
||||
try:
|
||||
notification_templates = self.get_notification_templates()
|
||||
except Exception:
|
||||
logger.warn("No notification template defined for emitting notification")
|
||||
logger.warning("No notification template defined for emitting notification")
|
||||
return
|
||||
|
||||
if not notification_templates:
|
||||
|
||||
@ -103,7 +103,7 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
for zone in all_zones:
|
||||
if fname.endswith(zone):
|
||||
return zone
|
||||
logger.warn('Could not detect valid zoneinfo for {}'.format(self.rrule))
|
||||
logger.warning('Could not detect valid zoneinfo for {}'.format(self.rrule))
|
||||
return ''
|
||||
|
||||
@property
|
||||
|
||||
@ -357,7 +357,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
validated_kwargs = kwargs.copy()
|
||||
if unallowed_fields:
|
||||
if parent_field_name is None:
|
||||
logger.warn('Fields {} are not allowed as overrides to spawn from {}.'.format(', '.join(unallowed_fields), self))
|
||||
logger.warning('Fields {} are not allowed as overrides to spawn from {}.'.format(', '.join(unallowed_fields), self))
|
||||
for f in unallowed_fields:
|
||||
validated_kwargs.pop(f)
|
||||
|
||||
@ -1205,7 +1205,7 @@ class UnifiedJob(
|
||||
try:
|
||||
extra_data_dict = parse_yaml_or_json(extra_data, silent_failure=False)
|
||||
except Exception as e:
|
||||
logger.warn("Exception deserializing extra vars: " + str(e))
|
||||
logger.warning("Exception deserializing extra vars: " + str(e))
|
||||
evars = self.extra_vars_dict
|
||||
evars.update(extra_data_dict)
|
||||
self.update_fields(extra_vars=json.dumps(evars))
|
||||
|
||||
@ -21,7 +21,7 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
logger.debug(f"cleaning up Redis key {k}")
|
||||
r.delete(k)
|
||||
except redis.exceptions.RedisError as e:
|
||||
logger.warn("encountered an error communicating with redis.")
|
||||
logger.warning("encountered an error communicating with redis.")
|
||||
raise e
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
|
||||
@ -574,7 +574,7 @@ class TaskManager:
|
||||
timeout_message = _("The approval node {name} ({pk}) has expired after {timeout} seconds.").format(
|
||||
name=task.name, pk=task.pk, timeout=task.timeout
|
||||
)
|
||||
logger.warn(timeout_message)
|
||||
logger.warning(timeout_message)
|
||||
task.timed_out = True
|
||||
task.status = 'failed'
|
||||
task.send_approval_notification('timed_out')
|
||||
|
||||
@ -154,7 +154,7 @@ class RunnerCallback:
|
||||
if self.instance.cancel_flag or self.instance.status == 'canceled':
|
||||
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
|
||||
if cancel_wait > 5:
|
||||
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
|
||||
logger.warning('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@ -169,7 +169,7 @@ class BaseTask(object):
|
||||
# mount_option validation via performed via API, but since this can be overriden via settings.py
|
||||
if mount_option not in CONTAINER_VOLUMES_MOUNT_TYPES:
|
||||
mount_option = 'z'
|
||||
logger.warn(f'The path {this_path} has volume mount type {mount_option} which is not supported. Using "z" instead.')
|
||||
logger.warning(f'The path {this_path} has volume mount type {mount_option} which is not supported. Using "z" instead.')
|
||||
|
||||
params['container_volume_mounts'].append(f'{src}:{dest}:{mount_option}')
|
||||
elif this_path.count(':') == MAX_ISOLATED_PATH_COLON_DELIMITER - 1:
|
||||
|
||||
@ -164,7 +164,7 @@ def run_until_complete(node, timing_data=None, **kwargs):
|
||||
if settings.RECEPTOR_RELEASE_WORK:
|
||||
res = receptor_ctl.simple_command(f"work release {unit_id}")
|
||||
if res != {'released': unit_id}:
|
||||
logger.warn(f'Could not confirm release of receptor work unit id {unit_id} from {node}, data: {res}')
|
||||
logger.warning(f'Could not confirm release of receptor work unit id {unit_id} from {node}, data: {res}')
|
||||
|
||||
receptor_ctl.close()
|
||||
|
||||
@ -358,9 +358,9 @@ class AWXReceptorJob:
|
||||
logger.exception(f'An error was encountered while getting status for work unit {self.unit_id}')
|
||||
|
||||
if 'exceeded quota' in detail:
|
||||
logger.warn(detail)
|
||||
logger.warning(detail)
|
||||
log_name = self.task.instance.log_format
|
||||
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
|
||||
logger.warning(f"Could not launch pod for {log_name}. Exceeded quota.")
|
||||
self.task.update_model(self.task.instance.pk, status='pending')
|
||||
return
|
||||
# If ansible-runner ran, but an error occured at runtime, the traceback information
|
||||
@ -380,7 +380,7 @@ class AWXReceptorJob:
|
||||
self.task.instance.result_traceback = detail
|
||||
self.task.instance.save(update_fields=['result_traceback'])
|
||||
else:
|
||||
logger.warn(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
||||
logger.warning(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
||||
except Exception:
|
||||
raise RuntimeError(detail)
|
||||
|
||||
|
||||
@ -374,15 +374,15 @@ def cluster_node_health_check(node):
|
||||
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
|
||||
"""
|
||||
if node == '':
|
||||
logger.warn('Local health check incorrectly called with blank string')
|
||||
logger.warning('Local health check incorrectly called with blank string')
|
||||
return
|
||||
elif node != settings.CLUSTER_HOST_ID:
|
||||
logger.warn(f'Local health check for {node} incorrectly sent to {settings.CLUSTER_HOST_ID}')
|
||||
logger.warning(f'Local health check for {node} incorrectly sent to {settings.CLUSTER_HOST_ID}')
|
||||
return
|
||||
try:
|
||||
this_inst = Instance.objects.me()
|
||||
except Instance.DoesNotExist:
|
||||
logger.warn(f'Instance record for {node} missing, could not check capacity.')
|
||||
logger.warning(f'Instance record for {node} missing, could not check capacity.')
|
||||
return
|
||||
this_inst.local_health_check()
|
||||
|
||||
@ -390,12 +390,12 @@ def cluster_node_health_check(node):
|
||||
@task(queue=get_local_queuename)
|
||||
def execution_node_health_check(node):
|
||||
if node == '':
|
||||
logger.warn('Remote health check incorrectly called with blank string')
|
||||
logger.warning('Remote health check incorrectly called with blank string')
|
||||
return
|
||||
try:
|
||||
instance = Instance.objects.get(hostname=node)
|
||||
except Instance.DoesNotExist:
|
||||
logger.warn(f'Instance record for {node} missing, could not check capacity.')
|
||||
logger.warning(f'Instance record for {node} missing, could not check capacity.')
|
||||
return
|
||||
|
||||
if instance.node_type != 'execution':
|
||||
@ -416,7 +416,7 @@ def execution_node_health_check(node):
|
||||
if data['errors']:
|
||||
formatted_error = "\n".join(data["errors"])
|
||||
if prior_capacity:
|
||||
logger.warn(f'Health check marking execution node {node} as lost, errors:\n{formatted_error}')
|
||||
logger.warning(f'Health check marking execution node {node} as lost, errors:\n{formatted_error}')
|
||||
else:
|
||||
logger.info(f'Failed to find capacity of new or lost execution node {node}, errors:\n{formatted_error}')
|
||||
else:
|
||||
@ -441,7 +441,7 @@ def inspect_execution_nodes(instance_list):
|
||||
if hostname in node_lookup:
|
||||
instance = node_lookup[hostname]
|
||||
else:
|
||||
logger.warn(f"Unrecognized node advertising on mesh: {hostname}")
|
||||
logger.warning(f"Unrecognized node advertising on mesh: {hostname}")
|
||||
continue
|
||||
|
||||
# Control-plane nodes are dealt with via local_health_check instead.
|
||||
@ -466,7 +466,7 @@ def inspect_execution_nodes(instance_list):
|
||||
# if the instance *was* lost, but has appeared again,
|
||||
# attempt to re-establish the initial capacity and version
|
||||
# check
|
||||
logger.warn(f'Execution node attempting to rejoin as instance {hostname}.')
|
||||
logger.warning(f'Execution node attempting to rejoin as instance {hostname}.')
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
elif instance.capacity == 0 and instance.enabled:
|
||||
# nodes with proven connection but need remediation run health checks are reduced frequency
|
||||
@ -640,7 +640,7 @@ def awx_periodic_scheduler():
|
||||
template = schedule.unified_job_template
|
||||
schedule.update_computed_fields() # To update next_run timestamp.
|
||||
if template.cache_timeout_blocked:
|
||||
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
|
||||
logger.warning("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
|
||||
continue
|
||||
try:
|
||||
job_kwargs = schedule.get_job_kwargs()
|
||||
@ -694,7 +694,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
|
||||
if not instance:
|
||||
# Unknown task type
|
||||
logger.warn("Unknown task type: {}".format(each_task['type']))
|
||||
logger.warning("Unknown task type: {}".format(each_task['type']))
|
||||
continue
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
|
||||
@ -741,7 +741,7 @@ def handle_success_and_failure_notifications(job_id):
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_id)
|
||||
|
||||
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
|
||||
logger.warning(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
|
||||
@ -1947,7 +1947,7 @@ def test_notification_job_not_finished(logging_getLogger, mocker):
|
||||
|
||||
with mocker.patch('awx.main.models.UnifiedJob.objects.get', uj):
|
||||
system.handle_success_and_failure_notifications(1)
|
||||
assert logger.warn.called_with(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
|
||||
assert logger.warning.called_with(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
|
||||
|
||||
|
||||
def test_notification_job_finished(mocker):
|
||||
|
||||
@ -40,5 +40,5 @@ def supervisor_service_command(command, service='*', communicate=True):
|
||||
|
||||
|
||||
def stop_local_services(communicate=True):
|
||||
logger.warn('Stopping services on this node in response to user action')
|
||||
logger.warning('Stopping services on this node in response to user action')
|
||||
supervisor_service_command(command='stop', communicate=communicate)
|
||||
|
||||
@ -92,7 +92,7 @@ class WebsocketTask:
|
||||
if attempt > 0:
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_RECONNECT_RETRY_RATE_SECONDS)
|
||||
except asyncio.CancelledError:
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} cancelled")
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} cancelled")
|
||||
raise
|
||||
|
||||
uri = f"{self.protocol}://{self.remote_host}:{self.remote_port}/websocket/{self.endpoint}/"
|
||||
@ -109,18 +109,18 @@ class WebsocketTask:
|
||||
except asyncio.CancelledError:
|
||||
# TODO: Check if connected and disconnect
|
||||
# Possibly use run_until_complete() if disconnect is async
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} cancelled.")
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} cancelled.")
|
||||
self.stats.record_connection_lost()
|
||||
raise
|
||||
except client_exceptions.ClientConnectorError as e:
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} failed: '{e}'.")
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} failed: '{e}'.")
|
||||
except asyncio.TimeoutError:
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} timed out.")
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} timed out.")
|
||||
except Exception as e:
|
||||
# Early on, this is our canary. I'm not sure what exceptions we can really encounter.
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.")
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.")
|
||||
else:
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} list.")
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} list.")
|
||||
|
||||
self.stats.record_connection_lost()
|
||||
self.start(attempt=attempt + 1)
|
||||
@ -146,7 +146,7 @@ class BroadcastWebsocketTask(WebsocketTask):
|
||||
logmsg = "Failed to decode broadcast message"
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logmsg = "{} {}".format(logmsg, payload)
|
||||
logger.warn(logmsg)
|
||||
logger.warning(logmsg)
|
||||
continue
|
||||
(group, message) = unwrap_broadcast_msg(payload)
|
||||
if group == "metrics":
|
||||
@ -185,9 +185,9 @@ class BroadcastWebsocketManager(object):
|
||||
new_remote_hosts.add(hostname)
|
||||
|
||||
if deleted_remote_hosts:
|
||||
logger.warn(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||
logger.warning(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||
if new_remote_hosts:
|
||||
logger.warn(f"Adding {new_remote_hosts} to websocket broadcast list")
|
||||
logger.warning(f"Adding {new_remote_hosts} to websocket broadcast list")
|
||||
|
||||
for h in deleted_remote_hosts:
|
||||
self.broadcast_tasks[h].cancel()
|
||||
|
||||
@ -179,7 +179,7 @@ def _get_or_set_enterprise_user(username, password, provider):
|
||||
created = True
|
||||
if created or user.is_in_enterprise_category(provider):
|
||||
return user
|
||||
logger.warn("Enterprise user %s already defined in Tower." % username)
|
||||
logger.warning("Enterprise user %s already defined in Tower." % username)
|
||||
|
||||
|
||||
class RADIUSBackend(BaseRADIUSBackend):
|
||||
@ -257,7 +257,7 @@ class TowerSAMLIdentityProvider(BaseSAMLIdentityProvider):
|
||||
if isinstance(value, (list, tuple)):
|
||||
value = value[0]
|
||||
if conf_key in ('attr_first_name', 'attr_last_name', 'attr_username', 'attr_email') and value is None:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Could not map user detail '%s' from SAML attribute '%s'; " "update SOCIAL_AUTH_SAML_ENABLED_IDPS['%s']['%s'] with the correct SAML attribute.",
|
||||
conf_key[5:],
|
||||
key,
|
||||
@ -370,7 +370,7 @@ def on_populate_user(sender, **kwargs):
|
||||
if field_len > max_len:
|
||||
setattr(user, field, getattr(user, field)[:max_len])
|
||||
force_user_update = True
|
||||
logger.warn('LDAP user {} has {} > max {} characters'.format(user.username, field, max_len))
|
||||
logger.warning('LDAP user {} has {} > max {} characters'.format(user.username, field, max_len))
|
||||
|
||||
# Update organization membership based on group memberships.
|
||||
org_map = getattr(backend.settings, 'ORGANIZATION_MAP', {})
|
||||
|
||||
@ -11,7 +11,7 @@ def test_fetch_user_if_exist(existing_tacacsplus_user):
|
||||
with mock.patch('awx.sso.backends.logger') as mocked_logger:
|
||||
new_user = _get_or_set_enterprise_user("foo", "password", "tacacs+")
|
||||
mocked_logger.debug.assert_not_called()
|
||||
mocked_logger.warn.assert_not_called()
|
||||
mocked_logger.warning.assert_not_called()
|
||||
assert new_user == existing_tacacsplus_user
|
||||
|
||||
|
||||
@ -33,5 +33,5 @@ def test_created_user_has_no_usable_password():
|
||||
def test_non_enterprise_user_does_not_get_pass(existing_normal_user):
|
||||
with mock.patch('awx.sso.backends.logger') as mocked_logger:
|
||||
new_user = _get_or_set_enterprise_user("alice", "password", "tacacs+")
|
||||
mocked_logger.warn.assert_called_once_with(u'Enterprise user alice already defined in Tower.')
|
||||
mocked_logger.warning.assert_called_once_with(u'Enterprise user alice already defined in Tower.')
|
||||
assert new_user is None
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user