mirror of
https://github.com/ansible/awx.git
synced 2026-01-11 18:09:57 -03:30
Merge pull request #11327 from shanemcd/downstream-changes
Pull in downstream changes
This commit is contained in:
commit
481f6435ee
12
.github/workflows/ci.yml
vendored
12
.github/workflows/ci.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} || :
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
@ -43,7 +43,7 @@ jobs:
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} || :
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
@ -91,7 +91,7 @@ jobs:
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} || :
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
@ -115,7 +115,7 @@ jobs:
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} || :
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
@ -139,7 +139,7 @@ jobs:
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} || :
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
@ -163,7 +163,7 @@ jobs:
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} || :
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
|
||||
2
.github/workflows/upload_schema.yml
vendored
2
.github/workflows/upload_schema.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/}
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
|
||||
@ -5003,6 +5003,7 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
('credential_type', ('id', 'name', 'description', 'kind', 'managed')),
|
||||
('ad_hoc_command', ('id', 'name', 'status', 'limit')),
|
||||
('workflow_approval', ('id', 'name', 'unified_job_id')),
|
||||
('instance', ('id', 'hostname')),
|
||||
]
|
||||
return field_list
|
||||
|
||||
|
||||
@ -36,3 +36,7 @@ class PostRunError(Exception):
|
||||
self.status = status
|
||||
self.tb = tb
|
||||
super(PostRunError, self).__init__(msg)
|
||||
|
||||
|
||||
class ReceptorNodeNotFound(RuntimeError):
|
||||
pass
|
||||
|
||||
@ -76,7 +76,10 @@ class AnsibleInventoryLoader(object):
|
||||
bargs.extend(['-v', '{0}:{0}:Z'.format(self.source)])
|
||||
for key, value in STANDARD_INVENTORY_UPDATE_ENV.items():
|
||||
bargs.extend(['-e', '{0}={1}'.format(key, value)])
|
||||
bargs.extend([get_default_execution_environment().image])
|
||||
ee = get_default_execution_environment()
|
||||
|
||||
bargs.extend([ee.image])
|
||||
|
||||
bargs.extend(['ansible-inventory', '-i', self.source])
|
||||
bargs.extend(['--playbook-dir', functioning_dir(self.source)])
|
||||
if self.verbosity:
|
||||
@ -111,9 +114,7 @@ class AnsibleInventoryLoader(object):
|
||||
|
||||
def load(self):
|
||||
base_args = self.get_base_args()
|
||||
|
||||
logger.info('Reading Ansible inventory source: %s', self.source)
|
||||
|
||||
return self.command_to_json(base_args)
|
||||
|
||||
|
||||
@ -138,7 +139,7 @@ class Command(BaseCommand):
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='v',
|
||||
help='host variable used to ' 'set/clear enabled flag when host is online/offline, may ' 'be specified as "foo.bar" to traverse nested dicts.',
|
||||
help='host variable used to set/clear enabled flag when host is online/offline, may be specified as "foo.bar" to traverse nested dicts.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--enabled-value',
|
||||
@ -146,7 +147,7 @@ class Command(BaseCommand):
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='v',
|
||||
help='value of host variable ' 'specified by --enabled-var that indicates host is ' 'enabled/online.',
|
||||
help='value of host variable specified by --enabled-var that indicates host is enabled/online.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--group-filter',
|
||||
@ -154,7 +155,7 @@ class Command(BaseCommand):
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='regex',
|
||||
help='regular expression ' 'to filter group name(s); only matches are imported.',
|
||||
help='regular expression to filter group name(s); only matches are imported.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--host-filter',
|
||||
@ -162,14 +163,14 @@ class Command(BaseCommand):
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='regex',
|
||||
help='regular expression ' 'to filter host name(s); only matches are imported.',
|
||||
help='regular expression to filter host name(s); only matches are imported.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--exclude-empty-groups',
|
||||
dest='exclude_empty_groups',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='when set, ' 'exclude all groups that have no child groups, hosts, or ' 'variables.',
|
||||
help='when set, exclude all groups that have no child groups, hosts, or variables.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--instance-id-var',
|
||||
@ -177,7 +178,7 @@ class Command(BaseCommand):
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='v',
|
||||
help='host variable that ' 'specifies the unique, immutable instance ID, may be ' 'specified as "foo.bar" to traverse nested dicts.',
|
||||
help='host variable that specifies the unique, immutable instance ID, may be specified as "foo.bar" to traverse nested dicts.',
|
||||
)
|
||||
|
||||
def set_logging_level(self, verbosity):
|
||||
@ -1017,4 +1018,4 @@ class Command(BaseCommand):
|
||||
if settings.SQL_DEBUG:
|
||||
queries_this_import = connection.queries[queries_before:]
|
||||
sqltime = sum(float(x['time']) for x in queries_this_import)
|
||||
logger.warning('Inventory import required %d queries ' 'taking %0.3fs', len(queries_this_import), sqltime)
|
||||
logger.warning('Inventory import required %d queries taking %0.3fs', len(queries_this_import), sqltime)
|
||||
|
||||
@ -47,7 +47,7 @@ class Command(BaseCommand):
|
||||
color = '\033[90m[DISABLED] '
|
||||
if no_color:
|
||||
color = ''
|
||||
fmt = '\t' + color + '{0.hostname} capacity={0.capacity} version={1}'
|
||||
fmt = '\t' + color + '{0.hostname} capacity={0.capacity} node_type={0.node_type} version={1}'
|
||||
if x.capacity:
|
||||
fmt += ' heartbeat="{0.modified:%Y-%m-%d %H:%M:%S}"'
|
||||
print((fmt + '\033[0m').format(x, x.version or '?'))
|
||||
|
||||
@ -36,7 +36,7 @@ class RegisterQueue:
|
||||
ig.policy_instance_minimum = self.instance_min
|
||||
changed = True
|
||||
|
||||
if self.is_container_group:
|
||||
if self.is_container_group and (ig.is_container_group != self.is_container_group):
|
||||
ig.is_container_group = self.is_container_group
|
||||
changed = True
|
||||
|
||||
|
||||
@ -201,6 +201,8 @@ activity_stream_registrar.connect(Organization)
|
||||
activity_stream_registrar.connect(Inventory)
|
||||
activity_stream_registrar.connect(Host)
|
||||
activity_stream_registrar.connect(Group)
|
||||
activity_stream_registrar.connect(Instance)
|
||||
activity_stream_registrar.connect(InstanceGroup)
|
||||
activity_stream_registrar.connect(InventorySource)
|
||||
# activity_stream_registrar.connect(InventoryUpdate)
|
||||
activity_stream_registrar.connect(Credential)
|
||||
|
||||
@ -162,7 +162,9 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
returns a dict that is passed to the python interface for the runner method corresponding to that command
|
||||
any kwargs will override that key=value combination in the returned dict
|
||||
"""
|
||||
vargs = dict(file_pattern='/tmp/{}*'.format(JOB_FOLDER_PREFIX % '*'))
|
||||
vargs = dict()
|
||||
if settings.AWX_CLEANUP_PATHS:
|
||||
vargs['file_pattern'] = '/tmp/{}*'.format(JOB_FOLDER_PREFIX % '*')
|
||||
vargs.update(kwargs)
|
||||
if 'exclude_strings' not in vargs and vargs.get('file_pattern'):
|
||||
active_pks = list(UnifiedJob.objects.filter(execution_node=self.hostname, status__in=('running', 'waiting')).values_list('pk', flat=True))
|
||||
|
||||
@ -1497,7 +1497,12 @@ class UnifiedJob(
|
||||
return False
|
||||
|
||||
def log_lifecycle(self, state, blocked_by=None):
|
||||
extra = {'type': self._meta.model_name, 'task_id': self.id, 'state': state}
|
||||
extra = {
|
||||
'type': self._meta.model_name,
|
||||
'task_id': self.id,
|
||||
'state': state,
|
||||
'work_unit_id': self.work_unit_id,
|
||||
}
|
||||
if self.unified_job_template:
|
||||
extra["template_name"] = self.unified_job_template.name
|
||||
if state == "blocked" and blocked_by:
|
||||
@ -1506,6 +1511,11 @@ class UnifiedJob(
|
||||
extra["blocked_by"] = blocked_by_msg
|
||||
else:
|
||||
msg = f"{self._meta.model_name}-{self.id} {state.replace('_', ' ')}"
|
||||
|
||||
if state == "controller_node_chosen":
|
||||
extra["controller_node"] = self.controller_node or "NOT_SET"
|
||||
elif state == "execution_node_chosen":
|
||||
extra["execution_node"] = self.execution_node or "NOT_SET"
|
||||
logger_job_lifecycle.debug(msg, extra=extra)
|
||||
|
||||
@property
|
||||
|
||||
@ -291,6 +291,7 @@ class TaskManager:
|
||||
# act as the controller for k8s API interaction
|
||||
try:
|
||||
task.controller_node = Instance.choose_online_control_plane_node()
|
||||
task.log_lifecycle("controller_node_chosen")
|
||||
except IndexError:
|
||||
logger.warning("No control plane nodes available to run containerized job {}".format(task.log_format))
|
||||
return
|
||||
@ -298,19 +299,23 @@ class TaskManager:
|
||||
# project updates and system jobs don't *actually* run in pods, so
|
||||
# just pick *any* non-containerized host and use it as the execution node
|
||||
task.execution_node = Instance.choose_online_control_plane_node()
|
||||
task.log_lifecycle("execution_node_chosen")
|
||||
logger.debug('Submitting containerized {} to queue {}.'.format(task.log_format, task.execution_node))
|
||||
else:
|
||||
task.instance_group = rampart_group
|
||||
task.execution_node = instance.hostname
|
||||
task.log_lifecycle("execution_node_chosen")
|
||||
if instance.node_type == 'execution':
|
||||
try:
|
||||
task.controller_node = Instance.choose_online_control_plane_node()
|
||||
task.log_lifecycle("controller_node_chosen")
|
||||
except IndexError:
|
||||
logger.warning("No control plane nodes available to manage {}".format(task.log_format))
|
||||
return
|
||||
else:
|
||||
# control plane nodes will manage jobs locally for performance and resilience
|
||||
task.controller_node = task.execution_node
|
||||
task.log_lifecycle("controller_node_chosen")
|
||||
logger.debug('Submitting job {} to queue {} controlled by {}.'.format(task.log_format, task.execution_node, task.controller_node))
|
||||
with disable_activity_stream():
|
||||
task.celery_task_id = str(uuid.uuid4())
|
||||
|
||||
@ -34,7 +34,6 @@ from awx.main.models import (
|
||||
ExecutionEnvironment,
|
||||
Group,
|
||||
Host,
|
||||
InstanceGroup,
|
||||
Inventory,
|
||||
InventorySource,
|
||||
Job,
|
||||
@ -377,6 +376,7 @@ def model_serializer_mapping():
|
||||
models.Inventory: serializers.InventorySerializer,
|
||||
models.Host: serializers.HostSerializer,
|
||||
models.Group: serializers.GroupSerializer,
|
||||
models.Instance: serializers.InstanceSerializer,
|
||||
models.InstanceGroup: serializers.InstanceGroupSerializer,
|
||||
models.InventorySource: serializers.InventorySourceSerializer,
|
||||
models.Credential: serializers.CredentialSerializer,
|
||||
@ -675,9 +675,3 @@ def create_access_token_user_if_missing(sender, **kwargs):
|
||||
post_save.disconnect(create_access_token_user_if_missing, sender=OAuth2AccessToken)
|
||||
obj.save()
|
||||
post_save.connect(create_access_token_user_if_missing, sender=OAuth2AccessToken)
|
||||
|
||||
|
||||
# Connect the Instance Group to Activity Stream receivers.
|
||||
post_save.connect(activity_stream_create, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_create")
|
||||
pre_save.connect(activity_stream_update, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_update")
|
||||
pre_delete.connect(activity_stream_delete, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_delete")
|
||||
|
||||
@ -85,7 +85,7 @@ from awx.main.models import (
|
||||
build_safe_env,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.exceptions import AwxTaskError, PostRunError
|
||||
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
@ -108,7 +108,7 @@ from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils.handlers import SpecialInventoryHandler
|
||||
from awx.main.utils.receptor import get_receptor_ctl, worker_info, get_conn_type, get_tls_client, worker_cleanup
|
||||
from awx.main.utils.receptor import get_receptor_ctl, worker_info, get_conn_type, get_tls_client, worker_cleanup, administrative_workunit_reaper
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main import analytics
|
||||
from awx.conf import settings_registry
|
||||
@ -191,6 +191,8 @@ def inform_cluster_of_shutdown():
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def apply_cluster_membership_policies():
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
started_waiting = time.time()
|
||||
with advisory_lock('cluster_policy_lock', wait=True):
|
||||
lock_time = time.time() - started_waiting
|
||||
@ -282,18 +284,19 @@ def apply_cluster_membership_policies():
|
||||
|
||||
# On a differential basis, apply instances to groups
|
||||
with transaction.atomic():
|
||||
for g in actual_groups:
|
||||
if g.obj.is_container_group:
|
||||
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
|
||||
continue
|
||||
instances_to_add = set(g.instances) - set(g.prior_instances)
|
||||
instances_to_remove = set(g.prior_instances) - set(g.instances)
|
||||
if instances_to_add:
|
||||
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
|
||||
g.obj.instances.add(*instances_to_add)
|
||||
if instances_to_remove:
|
||||
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
|
||||
g.obj.instances.remove(*instances_to_remove)
|
||||
with disable_activity_stream():
|
||||
for g in actual_groups:
|
||||
if g.obj.is_container_group:
|
||||
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
|
||||
continue
|
||||
instances_to_add = set(g.instances) - set(g.prior_instances)
|
||||
instances_to_remove = set(g.prior_instances) - set(g.instances)
|
||||
if instances_to_add:
|
||||
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
|
||||
g.obj.instances.add(*instances_to_add)
|
||||
if instances_to_remove:
|
||||
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
|
||||
g.obj.instances.remove(*instances_to_remove)
|
||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@ -397,20 +400,22 @@ def _cleanup_images_and_files(**kwargs):
|
||||
return
|
||||
this_inst = Instance.objects.me()
|
||||
runner_cleanup_kwargs = this_inst.get_cleanup_task_kwargs(**kwargs)
|
||||
stdout = ''
|
||||
with StringIO() as buffer:
|
||||
with redirect_stdout(buffer):
|
||||
ansible_runner.cleanup.run_cleanup(runner_cleanup_kwargs)
|
||||
stdout = buffer.getvalue()
|
||||
if '(changed: True)' in stdout:
|
||||
logger.info(f'Performed local cleanup with kwargs {kwargs}, output:\n{stdout}')
|
||||
if runner_cleanup_kwargs:
|
||||
stdout = ''
|
||||
with StringIO() as buffer:
|
||||
with redirect_stdout(buffer):
|
||||
ansible_runner.cleanup.run_cleanup(runner_cleanup_kwargs)
|
||||
stdout = buffer.getvalue()
|
||||
if '(changed: True)' in stdout:
|
||||
logger.info(f'Performed local cleanup with kwargs {kwargs}, output:\n{stdout}')
|
||||
|
||||
# if we are the first instance alphabetically, then run cleanup on execution nodes
|
||||
checker_instance = Instance.objects.filter(node_type__in=['hybrid', 'control'], enabled=True, capacity__gt=0).order_by('-hostname').first()
|
||||
if checker_instance and this_inst.hostname == checker_instance.hostname:
|
||||
logger.info(f'Running execution node cleanup with kwargs {kwargs}')
|
||||
for inst in Instance.objects.filter(node_type='execution', enabled=True, capacity__gt=0):
|
||||
runner_cleanup_kwargs = inst.get_cleanup_task_kwargs(**kwargs)
|
||||
if not runner_cleanup_kwargs:
|
||||
continue
|
||||
try:
|
||||
stdout = worker_cleanup(inst.hostname, runner_cleanup_kwargs)
|
||||
if '(changed: True)' in stdout:
|
||||
@ -532,7 +537,7 @@ def inspect_execution_nodes(instance_list):
|
||||
# check
|
||||
logger.warn(f'Execution node attempting to rejoin as instance {hostname}.')
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
elif instance.capacity == 0:
|
||||
elif instance.capacity == 0 and instance.enabled:
|
||||
# nodes with proven connection but need remediation run health checks are reduced frequency
|
||||
if not instance.last_health_check or (nowtime - instance.last_health_check).total_seconds() >= settings.EXECUTION_NODE_REMEDIATION_CHECKS:
|
||||
# Periodically re-run the health check of errored nodes, in case someone fixed it
|
||||
@ -649,6 +654,8 @@ def awx_receptor_workunit_reaper():
|
||||
receptor_ctl.simple_command(f"work cancel {job.work_unit_id}")
|
||||
receptor_ctl.simple_command(f"work release {job.work_unit_id}")
|
||||
|
||||
administrative_workunit_reaper(receptor_work_list)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_k8s_reaper():
|
||||
@ -1542,6 +1549,8 @@ class BaseTask(object):
|
||||
# ensure failure notification sends even if playbook_on_stats event is not triggered
|
||||
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
|
||||
|
||||
except ReceptorNodeNotFound as exc:
|
||||
extra_update_fields['job_explanation'] = str(exc)
|
||||
except Exception:
|
||||
# this could catch programming or file system errors
|
||||
extra_update_fields['result_traceback'] = traceback.format_exc()
|
||||
@ -1905,6 +1914,7 @@ class RunJob(BaseTask):
|
||||
status='running',
|
||||
instance_group=pu_ig,
|
||||
execution_node=pu_en,
|
||||
controller_node=pu_en,
|
||||
celery_task_id=job.celery_task_id,
|
||||
)
|
||||
if branch_override:
|
||||
@ -1913,6 +1923,8 @@ class RunJob(BaseTask):
|
||||
if 'update_' not in sync_metafields['job_tags']:
|
||||
sync_metafields['scm_revision'] = job_revision
|
||||
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
|
||||
local_project_sync.log_lifecycle("controller_node_chosen")
|
||||
local_project_sync.log_lifecycle("execution_node_chosen")
|
||||
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
|
||||
# save the associated job before calling run() so that a
|
||||
# cancel() call on the job can cancel the project update
|
||||
@ -2205,10 +2217,13 @@ class RunProjectUpdate(BaseTask):
|
||||
status='running',
|
||||
instance_group=instance_group,
|
||||
execution_node=project_update.execution_node,
|
||||
controller_node=project_update.execution_node,
|
||||
source_project_update=project_update,
|
||||
celery_task_id=project_update.celery_task_id,
|
||||
)
|
||||
)
|
||||
local_inv_update.log_lifecycle("controller_node_chosen")
|
||||
local_inv_update.log_lifecycle("execution_node_chosen")
|
||||
try:
|
||||
create_partition(local_inv_update.event_class._meta.db_table, start=local_inv_update.created)
|
||||
inv_update_class().run(local_inv_update.id)
|
||||
@ -2656,10 +2671,13 @@ class RunInventoryUpdate(BaseTask):
|
||||
job_tags=','.join(sync_needs),
|
||||
status='running',
|
||||
execution_node=Instance.objects.me().hostname,
|
||||
controller_node=Instance.objects.me().hostname,
|
||||
instance_group=inventory_update.instance_group,
|
||||
celery_task_id=inventory_update.celery_task_id,
|
||||
)
|
||||
)
|
||||
local_project_sync.log_lifecycle("controller_node_chosen")
|
||||
local_project_sync.log_lifecycle("execution_node_chosen")
|
||||
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
|
||||
# associate the inventory update before calling run() so that a
|
||||
# cancel() call on the inventory update can cancel the project update
|
||||
@ -3062,10 +3080,10 @@ class AWXReceptorJob:
|
||||
finally:
|
||||
# Make sure to always release the work unit if we established it
|
||||
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
|
||||
receptor_ctl.simple_command(f"work release {self.unit_id}")
|
||||
# If an error occured without the job itself failing, it could be a broken instance
|
||||
if self.work_type == 'ansible-runner' and ((res is None) or (getattr(res, 'rc', None) is None)):
|
||||
execution_node_health_check(self.task.instance.execution_node)
|
||||
try:
|
||||
receptor_ctl.simple_command(f"work release {self.unit_id}")
|
||||
except Exception:
|
||||
logger.exception(f"Error releasing work unit {self.unit_id}.")
|
||||
|
||||
@property
|
||||
def sign_work(self):
|
||||
@ -3089,7 +3107,21 @@ class AWXReceptorJob:
|
||||
_kw['tlsclient'] = get_tls_client(use_stream_tls)
|
||||
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params, signwork=self.sign_work, **_kw)
|
||||
self.unit_id = result['unitid']
|
||||
# Update the job with the work unit in-memory so that the log_lifecycle
|
||||
# will print out the work unit that is to be associated with the job in the database
|
||||
# via the update_model() call.
|
||||
# We want to log the work_unit_id as early as possible. A failure can happen in between
|
||||
# when we start the job in receptor and when we associate the job <-> work_unit_id.
|
||||
# In that case, there will be work running in receptor and Controller will not know
|
||||
# which Job it is associated with.
|
||||
# We do not programatically handle this case. Ideally, we would handle this with a reaper case.
|
||||
# The two distinct job lifecycle log events below allow for us to at least detect when this
|
||||
# edge case occurs. If the lifecycle event work_unit_id_received occurs without the
|
||||
# work_unit_id_assigned event then this case may have occured.
|
||||
self.task.instance.work_unit_id = result['unitid'] # Set work_unit_id in-memory only
|
||||
self.task.instance.log_lifecycle("work_unit_id_received")
|
||||
self.task.update_model(self.task.instance.pk, work_unit_id=result['unitid'])
|
||||
self.task.instance.log_lifecycle("work_unit_id_assigned")
|
||||
|
||||
sockin.close()
|
||||
sockout.close()
|
||||
@ -3118,9 +3150,14 @@ class AWXReceptorJob:
|
||||
resultsock.shutdown(socket.SHUT_RDWR)
|
||||
resultfile.close()
|
||||
elif res.status == 'error':
|
||||
unit_status = receptor_ctl.simple_command(f'work status {self.unit_id}')
|
||||
detail = unit_status['Detail']
|
||||
state_name = unit_status['StateName']
|
||||
try:
|
||||
unit_status = receptor_ctl.simple_command(f'work status {self.unit_id}')
|
||||
detail = unit_status.get('Detail', None)
|
||||
state_name = unit_status.get('StateName', None)
|
||||
except Exception:
|
||||
detail = ''
|
||||
state_name = ''
|
||||
logger.exception(f'An error was encountered while getting status for work unit {self.unit_id}')
|
||||
|
||||
if 'exceeded quota' in detail:
|
||||
logger.warn(detail)
|
||||
@ -3137,11 +3174,19 @@ class AWXReceptorJob:
|
||||
try:
|
||||
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
|
||||
lines = resultsock.readlines()
|
||||
self.task.instance.result_traceback = b"".join(lines).decode()
|
||||
self.task.instance.save(update_fields=['result_traceback'])
|
||||
receptor_output = b"".join(lines).decode()
|
||||
if receptor_output:
|
||||
self.task.instance.result_traceback = receptor_output
|
||||
self.task.instance.save(update_fields=['result_traceback'])
|
||||
elif detail:
|
||||
self.task.instance.result_traceback = detail
|
||||
self.task.instance.save(update_fields=['result_traceback'])
|
||||
else:
|
||||
logger.warn(f'No result details or output from {self.task.instance.log_format}, status:\n{unit_status}')
|
||||
except Exception:
|
||||
raise RuntimeError(detail)
|
||||
|
||||
time.sleep(3)
|
||||
return res
|
||||
|
||||
# Spawned in a thread so Receptor can start reading before we finish writing, we
|
||||
@ -3184,7 +3229,7 @@ class AWXReceptorJob:
|
||||
receptor_params["secret_kube_config"] = kubeconfig_yaml
|
||||
else:
|
||||
private_data_dir = self.runner_params['private_data_dir']
|
||||
if self.work_type == 'ansible-runner':
|
||||
if self.work_type == 'ansible-runner' and settings.AWX_CLEANUP_PATHS:
|
||||
# on execution nodes, we rely on the private data dir being deleted
|
||||
cli_params = f"--private-data-dir={private_data_dir} --delete"
|
||||
else:
|
||||
|
||||
@ -3,6 +3,7 @@ import pytest
|
||||
from unittest import mock
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.activity_stream import ActivityStream
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
import redis
|
||||
@ -17,6 +18,7 @@ INSTANCE_KWARGS = dict(hostname='example-host', cpu=6, memory=36000000000, cpu_c
|
||||
@pytest.mark.django_db
|
||||
def test_disabled_zeros_capacity(patch, admin_user):
|
||||
instance = Instance.objects.create(**INSTANCE_KWARGS)
|
||||
assert ActivityStream.objects.filter(instance=instance).count() == 1
|
||||
|
||||
url = reverse('api:instance_detail', kwargs={'pk': instance.pk})
|
||||
|
||||
@ -25,12 +27,14 @@ def test_disabled_zeros_capacity(patch, admin_user):
|
||||
|
||||
instance.refresh_from_db()
|
||||
assert instance.capacity == 0
|
||||
assert ActivityStream.objects.filter(instance=instance).count() == 2
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_enabled_sets_capacity(patch, admin_user):
|
||||
instance = Instance.objects.create(enabled=False, capacity=0, **INSTANCE_KWARGS)
|
||||
assert instance.capacity == 0
|
||||
assert ActivityStream.objects.filter(instance=instance).count() == 1
|
||||
|
||||
url = reverse('api:instance_detail', kwargs={'pk': instance.pk})
|
||||
|
||||
@ -39,6 +43,7 @@ def test_enabled_sets_capacity(patch, admin_user):
|
||||
|
||||
instance.refresh_from_db()
|
||||
assert instance.capacity > 0
|
||||
assert ActivityStream.objects.filter(instance=instance).count() == 2
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@ -50,6 +55,20 @@ def test_auditor_user_health_check(get, post, system_auditor):
|
||||
post(url=url, user=system_auditor, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_health_check_throws_error(post, admin_user):
|
||||
instance = Instance.objects.create(node_type='execution', **INSTANCE_KWARGS)
|
||||
url = reverse('api:instance_health_check', kwargs={'pk': instance.pk})
|
||||
# we will simulate a receptor error, similar to this one
|
||||
# https://github.com/ansible/receptor/blob/156e6e24a49fbf868734507f9943ac96208ed8f5/receptorctl/receptorctl/socket_interface.py#L204
|
||||
# related to issue https://github.com/ansible/tower/issues/5315
|
||||
with mock.patch('awx.main.utils.receptor.run_until_complete', side_effect=RuntimeError('Remote error: foobar')):
|
||||
post(url=url, user=admin_user, expect=200)
|
||||
instance.refresh_from_db()
|
||||
assert 'Remote error: foobar' in instance.errors
|
||||
assert instance.capacity == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch.object(redis.client.Redis, 'ping', lambda self: True)
|
||||
def test_health_check_usage(get, post, admin_user):
|
||||
|
||||
@ -4,6 +4,7 @@ import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import (
|
||||
ActivityStream,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
ProjectUpdate,
|
||||
@ -213,9 +214,23 @@ def test_containerized_group_default_fields(instance_group, kube_credential):
|
||||
def test_instance_attach_to_instance_group(post, instance_group, node_type_instance, admin, node_type):
|
||||
instance = node_type_instance(hostname=node_type, node_type=node_type)
|
||||
|
||||
count = ActivityStream.objects.count()
|
||||
|
||||
url = reverse(f'api:instance_group_instance_list', kwargs={'pk': instance_group.pk})
|
||||
post(url, {'associate': True, 'id': instance.id}, admin, expect=204 if node_type != 'control' else 400)
|
||||
|
||||
new_activity = ActivityStream.objects.all()[count:]
|
||||
if node_type != 'control':
|
||||
assert len(new_activity) == 2 # the second is an update of the instance group policy
|
||||
new_activity = new_activity[0]
|
||||
assert new_activity.operation == 'associate'
|
||||
assert new_activity.object1 == 'instance_group'
|
||||
assert new_activity.object2 == 'instance'
|
||||
assert new_activity.instance.first() == instance
|
||||
assert new_activity.instance_group.first() == instance_group
|
||||
else:
|
||||
assert not new_activity
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'execution'])
|
||||
@ -223,18 +238,46 @@ def test_instance_unattach_from_instance_group(post, instance_group, node_type_i
|
||||
instance = node_type_instance(hostname=node_type, node_type=node_type)
|
||||
instance_group.instances.add(instance)
|
||||
|
||||
count = ActivityStream.objects.count()
|
||||
|
||||
url = reverse(f'api:instance_group_instance_list', kwargs={'pk': instance_group.pk})
|
||||
post(url, {'disassociate': True, 'id': instance.id}, admin, expect=204 if node_type != 'control' else 400)
|
||||
|
||||
new_activity = ActivityStream.objects.all()[count:]
|
||||
if node_type != 'control':
|
||||
assert len(new_activity) == 1
|
||||
new_activity = new_activity[0]
|
||||
assert new_activity.operation == 'disassociate'
|
||||
assert new_activity.object1 == 'instance_group'
|
||||
assert new_activity.object2 == 'instance'
|
||||
assert new_activity.instance.first() == instance
|
||||
assert new_activity.instance_group.first() == instance_group
|
||||
else:
|
||||
assert not new_activity
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'execution'])
|
||||
def test_instance_group_attach_to_instance(post, instance_group, node_type_instance, admin, node_type):
|
||||
instance = node_type_instance(hostname=node_type, node_type=node_type)
|
||||
|
||||
count = ActivityStream.objects.count()
|
||||
|
||||
url = reverse(f'api:instance_instance_groups_list', kwargs={'pk': instance.pk})
|
||||
post(url, {'associate': True, 'id': instance_group.id}, admin, expect=204 if node_type != 'control' else 400)
|
||||
|
||||
new_activity = ActivityStream.objects.all()[count:]
|
||||
if node_type != 'control':
|
||||
assert len(new_activity) == 2 # the second is an update of the instance group policy
|
||||
new_activity = new_activity[0]
|
||||
assert new_activity.operation == 'associate'
|
||||
assert new_activity.object1 == 'instance'
|
||||
assert new_activity.object2 == 'instance_group'
|
||||
assert new_activity.instance.first() == instance
|
||||
assert new_activity.instance_group.first() == instance_group
|
||||
else:
|
||||
assert not new_activity
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'execution'])
|
||||
@ -242,5 +285,19 @@ def test_instance_group_unattach_from_instance(post, instance_group, node_type_i
|
||||
instance = node_type_instance(hostname=node_type, node_type=node_type)
|
||||
instance_group.instances.add(instance)
|
||||
|
||||
count = ActivityStream.objects.count()
|
||||
|
||||
url = reverse(f'api:instance_instance_groups_list', kwargs={'pk': instance.pk})
|
||||
post(url, {'disassociate': True, 'id': instance_group.id}, admin, expect=204 if node_type != 'control' else 400)
|
||||
|
||||
new_activity = ActivityStream.objects.all()[count:]
|
||||
if node_type != 'control':
|
||||
assert len(new_activity) == 1
|
||||
new_activity = new_activity[0]
|
||||
assert new_activity.operation == 'disassociate'
|
||||
assert new_activity.object1 == 'instance'
|
||||
assert new_activity.object2 == 'instance_group'
|
||||
assert new_activity.instance.first() == instance
|
||||
assert new_activity.instance_group.first() == instance_group
|
||||
else:
|
||||
assert not new_activity
|
||||
|
||||
26
awx/main/tests/functional/commands/test_register_queue.py
Normal file
26
awx/main/tests/functional/commands/test_register_queue.py
Normal file
@ -0,0 +1,26 @@
|
||||
from io import StringIO
|
||||
from contextlib import redirect_stdout
|
||||
|
||||
import pytest
|
||||
|
||||
from awx.main.management.commands.register_queue import RegisterQueue
|
||||
from awx.main.models.ha import InstanceGroup
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_openshift_idempotence():
|
||||
def perform_register():
|
||||
with StringIO() as buffer:
|
||||
with redirect_stdout(buffer):
|
||||
RegisterQueue('default', 100, 0, [], is_container_group=True).register()
|
||||
return buffer.getvalue()
|
||||
|
||||
assert '(changed: True)' in perform_register()
|
||||
assert '(changed: True)' not in perform_register()
|
||||
assert '(changed: True)' not in perform_register()
|
||||
|
||||
ig = InstanceGroup.objects.get(name='default')
|
||||
assert ig.policy_instance_percentage == 100
|
||||
assert ig.policy_instance_minimum == 0
|
||||
assert ig.policy_instance_list == []
|
||||
assert ig.is_container_group is True
|
||||
@ -170,7 +170,7 @@ def test_activity_stream_actor(admin_user):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_annon_user_action():
|
||||
def test_anon_user_action():
|
||||
with mock.patch('awx.main.signals.get_current_user') as u_mock:
|
||||
u_mock.return_value = AnonymousUser()
|
||||
inv = Inventory.objects.create(name='ainventory')
|
||||
|
||||
@ -2,6 +2,7 @@ import pytest
|
||||
from unittest import mock
|
||||
|
||||
from awx.main.models import AdHocCommand, InventoryUpdate, JobTemplate, ProjectUpdate
|
||||
from awx.main.models.activity_stream import ActivityStream
|
||||
from awx.main.models.ha import Instance, InstanceGroup
|
||||
from awx.main.tasks import apply_cluster_membership_policies
|
||||
from awx.api.versioning import reverse
|
||||
@ -72,6 +73,7 @@ def test_instance_dup(org_admin, organization, project, instance_factory, instan
|
||||
i1 = instance_factory("i1")
|
||||
i2 = instance_factory("i2")
|
||||
i3 = instance_factory("i3")
|
||||
|
||||
ig_all = instance_group_factory("all", instances=[i1, i2, i3])
|
||||
ig_dup = instance_group_factory("duplicates", instances=[i1])
|
||||
project.organization.instance_groups.add(ig_all, ig_dup)
|
||||
@ -83,7 +85,7 @@ def test_instance_dup(org_admin, organization, project, instance_factory, instan
|
||||
api_num_instances_oa = list(list_response2.data.items())[0][1]
|
||||
|
||||
assert actual_num_instances == api_num_instances_auditor
|
||||
# Note: The org_admin will not see the default 'tower' node (instance fixture) because it is not in it's group, as expected
|
||||
# Note: The org_admin will not see the default 'tower' node (instance fixture) because it is not in its group, as expected
|
||||
assert api_num_instances_oa == (actual_num_instances - 1)
|
||||
|
||||
|
||||
@ -94,7 +96,13 @@ def test_policy_instance_few_instances(instance_factory, instance_group_factory)
|
||||
ig_2 = instance_group_factory("ig2", percentage=25)
|
||||
ig_3 = instance_group_factory("ig3", percentage=25)
|
||||
ig_4 = instance_group_factory("ig4", percentage=25)
|
||||
|
||||
count = ActivityStream.objects.count()
|
||||
|
||||
apply_cluster_membership_policies()
|
||||
# running apply_cluster_membership_policies shouldn't spam the activity stream
|
||||
assert ActivityStream.objects.count() == count
|
||||
|
||||
assert len(ig_1.instances.all()) == 1
|
||||
assert i1 in ig_1.instances.all()
|
||||
assert len(ig_2.instances.all()) == 1
|
||||
@ -103,8 +111,12 @@ def test_policy_instance_few_instances(instance_factory, instance_group_factory)
|
||||
assert i1 in ig_3.instances.all()
|
||||
assert len(ig_4.instances.all()) == 1
|
||||
assert i1 in ig_4.instances.all()
|
||||
|
||||
i2 = instance_factory("i2")
|
||||
count += 1
|
||||
apply_cluster_membership_policies()
|
||||
assert ActivityStream.objects.count() == count
|
||||
|
||||
assert len(ig_1.instances.all()) == 1
|
||||
assert i1 in ig_1.instances.all()
|
||||
assert len(ig_2.instances.all()) == 1
|
||||
|
||||
@ -1,16 +1,21 @@
|
||||
import logging
|
||||
import yaml
|
||||
import time
|
||||
from enum import Enum, unique
|
||||
|
||||
from receptorctl.socket_interface import ReceptorControl
|
||||
|
||||
from awx.main.exceptions import ReceptorNodeNotFound
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from enum import Enum, unique
|
||||
|
||||
logger = logging.getLogger('awx.main.utils.receptor')
|
||||
|
||||
__RECEPTOR_CONF = '/etc/receptor/receptor.conf'
|
||||
|
||||
RECEPTOR_ACTIVE_STATES = ('Pending', 'Running')
|
||||
|
||||
|
||||
@unique
|
||||
class ReceptorConnectionType(Enum):
|
||||
@ -60,6 +65,35 @@ def get_conn_type(node_name, receptor_ctl):
|
||||
for node in all_nodes:
|
||||
if node.get('NodeID') == node_name:
|
||||
return ReceptorConnectionType(node.get('ConnType'))
|
||||
raise ReceptorNodeNotFound(f'Instance {node_name} is not in the receptor mesh')
|
||||
|
||||
|
||||
def administrative_workunit_reaper(work_list=None):
|
||||
"""
|
||||
This releases completed work units that were spawned by actions inside of this module
|
||||
specifically, this should catch any completed work unit left by
|
||||
- worker_info
|
||||
- worker_cleanup
|
||||
These should ordinarily be released when the method finishes, but this is a
|
||||
cleanup of last-resort, in case something went awry
|
||||
"""
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
if work_list is None:
|
||||
work_list = receptor_ctl.simple_command("work list")
|
||||
|
||||
for unit_id, work_data in work_list.items():
|
||||
extra_data = work_data.get('ExtraData')
|
||||
if (extra_data is None) or (extra_data.get('RemoteWorkType') != 'ansible-runner'):
|
||||
continue # if this is not ansible-runner work, we do not want to touch it
|
||||
params = extra_data.get('RemoteParams', {}).get('params')
|
||||
if not params:
|
||||
continue
|
||||
if not (params == '--worker-info' or params.startswith('cleanup')):
|
||||
continue # if this is not a cleanup or health check, we do not want to touch it
|
||||
if work_data.get('StateName') in RECEPTOR_ACTIVE_STATES:
|
||||
continue # do not want to touch active work units
|
||||
logger.info(f'Reaping orphaned work unit {unit_id} with params {params}')
|
||||
receptor_ctl.simple_command(f"work release {unit_id}")
|
||||
|
||||
|
||||
class RemoteJobError(RuntimeError):
|
||||
@ -95,7 +129,7 @@ def run_until_complete(node, timing_data=None, **kwargs):
|
||||
while run_timing < 20.0:
|
||||
status = receptor_ctl.simple_command(f'work status {unit_id}')
|
||||
state_name = status.get('StateName')
|
||||
if state_name not in ('Pending', 'Running'):
|
||||
if state_name not in RECEPTOR_ACTIVE_STATES:
|
||||
break
|
||||
run_timing = time.time() - run_start
|
||||
time.sleep(0.5)
|
||||
@ -110,9 +144,10 @@ def run_until_complete(node, timing_data=None, **kwargs):
|
||||
|
||||
finally:
|
||||
|
||||
res = receptor_ctl.simple_command(f"work release {unit_id}")
|
||||
if res != {'released': unit_id}:
|
||||
logger.warn(f'Could not confirm release of receptor work unit id {unit_id} from {node}, data: {res}')
|
||||
if settings.RECEPTOR_RELEASE_WORK:
|
||||
res = receptor_ctl.simple_command(f"work release {unit_id}")
|
||||
if res != {'released': unit_id}:
|
||||
logger.warn(f'Could not confirm release of receptor work unit id {unit_id} from {node}, data: {res}')
|
||||
|
||||
receptor_ctl.close()
|
||||
|
||||
@ -153,6 +188,9 @@ def worker_info(node_name, work_type='ansible-runner'):
|
||||
else:
|
||||
error_list.append(details)
|
||||
|
||||
except (ReceptorNodeNotFound, RuntimeError) as exc:
|
||||
error_list.append(str(exc))
|
||||
|
||||
# If we have a connection error, missing keys would be trivial consequence of that
|
||||
if not data['errors']:
|
||||
# see tasks.py usage of keys
|
||||
|
||||
@ -68,7 +68,6 @@ DATABASES = {
|
||||
# the K8S cluster where awx itself is running)
|
||||
IS_K8S = False
|
||||
|
||||
RECEPTOR_RELEASE_WORK = True
|
||||
AWX_CONTAINER_GROUP_K8S_API_TIMEOUT = 10
|
||||
AWX_CONTAINER_GROUP_DEFAULT_NAMESPACE = os.getenv('MY_POD_NAMESPACE', 'default')
|
||||
# Timeout when waiting for pod to enter running state. If the pod is still in pending state , it will be terminated. Valid time units are "s", "m", "h". Example : "5m" , "10s".
|
||||
@ -426,7 +425,7 @@ os.environ.setdefault('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:9013-9199')
|
||||
# heartbeat period can factor into some forms of logic, so it is maintained as a setting here
|
||||
CLUSTER_NODE_HEARTBEAT_PERIOD = 60
|
||||
RECEPTOR_SERVICE_ADVERTISEMENT_PERIOD = 60 # https://github.com/ansible/receptor/blob/aa1d589e154d8a0cb99a220aff8f98faf2273be6/pkg/netceptor/netceptor.go#L34
|
||||
EXECUTION_NODE_REMEDIATION_CHECKS = 60 * 10 # once every 10 minutes check if an execution node errors have been resolved
|
||||
EXECUTION_NODE_REMEDIATION_CHECKS = 60 * 30 # once every 30 minutes check if an execution node errors have been resolved
|
||||
|
||||
BROKER_URL = 'unix:///var/run/redis/redis.sock'
|
||||
CELERYBEAT_SCHEDULE = {
|
||||
@ -931,6 +930,9 @@ AWX_CALLBACK_PROFILE = False
|
||||
# Delete temporary directories created to store playbook run-time
|
||||
AWX_CLEANUP_PATHS = True
|
||||
|
||||
# Delete completed work units in receptor
|
||||
RECEPTOR_RELEASE_WORK = True
|
||||
|
||||
MIDDLEWARE = [
|
||||
'django_guid.middleware.GuidMiddleware',
|
||||
'awx.main.middleware.TimingMiddleware',
|
||||
|
||||
@ -98,8 +98,13 @@ const AuthorizedRoutes = ({ routeConfig }) => {
|
||||
);
|
||||
};
|
||||
|
||||
const ProtectedRoute = ({ children, ...rest }) => {
|
||||
const { authRedirectTo, setAuthRedirectTo } = useSession();
|
||||
export function ProtectedRoute({ children, ...rest }) {
|
||||
const {
|
||||
authRedirectTo,
|
||||
setAuthRedirectTo,
|
||||
loginRedirectOverride,
|
||||
isUserBeingLoggedOut,
|
||||
} = useSession();
|
||||
const location = useLocation();
|
||||
|
||||
useEffect(() => {
|
||||
@ -120,8 +125,16 @@ const ProtectedRoute = ({ children, ...rest }) => {
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
loginRedirectOverride &&
|
||||
!window.location.href.includes('/login') &&
|
||||
!isUserBeingLoggedOut
|
||||
) {
|
||||
window.location.replace(loginRedirectOverride);
|
||||
return null;
|
||||
}
|
||||
return <Redirect to="/login" />;
|
||||
};
|
||||
}
|
||||
|
||||
function App() {
|
||||
const history = useHistory();
|
||||
|
||||
@ -3,7 +3,7 @@ import { act } from 'react-dom/test-utils';
|
||||
import { RootAPI } from 'api';
|
||||
import * as SessionContext from 'contexts/Session';
|
||||
import { mountWithContexts } from '../testUtils/enzymeHelpers';
|
||||
import App from './App';
|
||||
import App, { ProtectedRoute } from './App';
|
||||
|
||||
jest.mock('./api');
|
||||
|
||||
@ -20,6 +20,8 @@ describe('<App />', () => {
|
||||
const contextValues = {
|
||||
setAuthRedirectTo: jest.fn(),
|
||||
isSessionExpired: false,
|
||||
isUserBeingLoggedOut: false,
|
||||
loginRedirectOverride: null,
|
||||
};
|
||||
jest
|
||||
.spyOn(SessionContext, 'useSession')
|
||||
@ -32,4 +34,36 @@ describe('<App />', () => {
|
||||
expect(wrapper.length).toBe(1);
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
test('redirect to login override', async () => {
|
||||
const { location } = window;
|
||||
delete window.location;
|
||||
window.location = {
|
||||
replace: jest.fn(),
|
||||
href: '/',
|
||||
};
|
||||
|
||||
expect(window.location.replace).not.toHaveBeenCalled();
|
||||
|
||||
const contextValues = {
|
||||
setAuthRedirectTo: jest.fn(),
|
||||
isSessionExpired: false,
|
||||
isUserBeingLoggedOut: false,
|
||||
loginRedirectOverride: '/sso/test',
|
||||
};
|
||||
jest
|
||||
.spyOn(SessionContext, 'useSession')
|
||||
.mockImplementation(() => contextValues);
|
||||
|
||||
await act(async () => {
|
||||
mountWithContexts(
|
||||
<ProtectedRoute>
|
||||
<div>foo</div>
|
||||
</ProtectedRoute>
|
||||
);
|
||||
});
|
||||
|
||||
expect(window.location.replace).toHaveBeenCalled();
|
||||
window.location = location;
|
||||
});
|
||||
});
|
||||
|
||||
@ -18,10 +18,7 @@ const QS_CONFIG = (order_by = 'name') =>
|
||||
|
||||
function AssociateModal({
|
||||
header = t`Items`,
|
||||
columns = [
|
||||
{ key: 'hostname', name: t`Name` },
|
||||
{ key: 'node_type', name: t`Node Type` },
|
||||
],
|
||||
columns = [],
|
||||
title = t`Select Items`,
|
||||
onClose,
|
||||
onAssociate,
|
||||
|
||||
@ -128,9 +128,9 @@ function checkForError(launchConfig, surveyConfig, values) {
|
||||
hasError = true;
|
||||
}
|
||||
}
|
||||
if (isNumeric && (value || value === 0)) {
|
||||
if (isNumeric) {
|
||||
if (
|
||||
(value < question.min || value > question.max) &&
|
||||
(value < question.min || value > question.max || value === '') &&
|
||||
question.required
|
||||
) {
|
||||
hasError = true;
|
||||
|
||||
@ -5,10 +5,11 @@ import React, {
|
||||
useRef,
|
||||
useCallback,
|
||||
} from 'react';
|
||||
import { useHistory } from 'react-router-dom';
|
||||
import { useHistory, Redirect } from 'react-router-dom';
|
||||
import { DateTime } from 'luxon';
|
||||
import { RootAPI, MeAPI } from 'api';
|
||||
import { isAuthenticated } from 'util/auth';
|
||||
import useRequest from 'hooks/useRequest';
|
||||
import { SESSION_TIMEOUT_KEY } from '../constants';
|
||||
|
||||
// The maximum supported timeout for setTimeout(), in milliseconds,
|
||||
@ -72,8 +73,31 @@ function SessionProvider({ children }) {
|
||||
const [sessionTimeout, setSessionTimeout] = useStorage(SESSION_TIMEOUT_KEY);
|
||||
const [sessionCountdown, setSessionCountdown] = useState(0);
|
||||
const [authRedirectTo, setAuthRedirectTo] = useState('/');
|
||||
const [isUserBeingLoggedOut, setIsUserBeingLoggedOut] = useState(false);
|
||||
|
||||
const {
|
||||
request: fetchLoginRedirectOverride,
|
||||
result: { loginRedirectOverride },
|
||||
isLoading,
|
||||
} = useRequest(
|
||||
useCallback(async () => {
|
||||
const { data } = await RootAPI.read();
|
||||
return {
|
||||
loginRedirectOverride: data?.login_redirect_override,
|
||||
};
|
||||
}, []),
|
||||
{
|
||||
loginRedirectOverride: null,
|
||||
isLoading: true,
|
||||
}
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
fetchLoginRedirectOverride();
|
||||
}, [fetchLoginRedirectOverride]);
|
||||
|
||||
const logout = useCallback(async () => {
|
||||
setIsUserBeingLoggedOut(true);
|
||||
if (!isSessionExpired.current) {
|
||||
setAuthRedirectTo('/logout');
|
||||
}
|
||||
@ -82,14 +106,13 @@ function SessionProvider({ children }) {
|
||||
setSessionCountdown(0);
|
||||
clearTimeout(sessionTimeoutId.current);
|
||||
clearInterval(sessionIntervalId.current);
|
||||
return <Redirect to="/login" />;
|
||||
}, [setSessionTimeout, setSessionCountdown]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isAuthenticated(document.cookie)) {
|
||||
history.replace('/login');
|
||||
return () => {};
|
||||
}
|
||||
|
||||
const calcRemaining = () => {
|
||||
if (sessionTimeout) {
|
||||
return Math.max(
|
||||
@ -140,9 +163,15 @@ function SessionProvider({ children }) {
|
||||
clearInterval(sessionIntervalId.current);
|
||||
}, []);
|
||||
|
||||
if (isLoading) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<SessionContext.Provider
|
||||
value={{
|
||||
isUserBeingLoggedOut,
|
||||
loginRedirectOverride,
|
||||
authRedirectTo,
|
||||
handleSessionContinue,
|
||||
isSessionExpired,
|
||||
|
||||
@ -188,6 +188,10 @@ function ActivityStream() {
|
||||
>
|
||||
{t`Notification Templates`}
|
||||
</SelectOption>
|
||||
<SelectOption
|
||||
key="instance"
|
||||
value="instance"
|
||||
>{t`Instances`}</SelectOption>
|
||||
<SelectOption key="instance_groups" value="instance_group">
|
||||
{t`Instance Groups`}
|
||||
</SelectOption>
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import React, { useCallback, useEffect, useState } from 'react';
|
||||
|
||||
import { t } from '@lingui/macro';
|
||||
import { Route, Switch } from 'react-router-dom';
|
||||
import { Route, Switch, useLocation } from 'react-router-dom';
|
||||
|
||||
import useRequest from 'hooks/useRequest';
|
||||
import { SettingsAPI } from 'api';
|
||||
@ -14,6 +14,7 @@ import ContainerGroupAdd from './ContainerGroupAdd';
|
||||
import ContainerGroup from './ContainerGroup';
|
||||
|
||||
function InstanceGroups() {
|
||||
const { pathname } = useLocation();
|
||||
const {
|
||||
request: settingsRequest,
|
||||
isLoading: isSettingsRequestLoading,
|
||||
@ -62,10 +63,14 @@ function InstanceGroups() {
|
||||
});
|
||||
}, []);
|
||||
|
||||
const streamType = pathname.includes('instances')
|
||||
? 'instance'
|
||||
: 'instance_group';
|
||||
|
||||
return (
|
||||
<>
|
||||
<ScreenHeader
|
||||
streamType="instance_group"
|
||||
streamType={streamType}
|
||||
breadcrumbConfig={breadcrumbConfig}
|
||||
/>
|
||||
<Switch>
|
||||
|
||||
@ -1,10 +1,20 @@
|
||||
import React from 'react';
|
||||
import { shallow } from 'enzyme';
|
||||
|
||||
import { InstanceGroupsAPI } from 'api';
|
||||
import InstanceGroups from './InstanceGroups';
|
||||
|
||||
const mockUseLocationValue = {
|
||||
pathname: '',
|
||||
};
|
||||
jest.mock('api');
|
||||
jest.mock('react-router-dom', () => ({
|
||||
...jest.requireActual('react-router-dom'),
|
||||
useLocation: () => mockUseLocationValue,
|
||||
}));
|
||||
describe('<InstanceGroups/>', () => {
|
||||
test('should set breadcrumbs', () => {
|
||||
mockUseLocationValue.pathname = '/instance_groups';
|
||||
|
||||
const wrapper = shallow(<InstanceGroups />);
|
||||
|
||||
const header = wrapper.find('ScreenHeader');
|
||||
@ -15,4 +25,17 @@ describe('<InstanceGroups/>', () => {
|
||||
'/instance_groups/container_group/add': 'Create new container group',
|
||||
});
|
||||
});
|
||||
test('should set breadcrumbs', async () => {
|
||||
mockUseLocationValue.pathname = '/instance_groups/1/instances';
|
||||
InstanceGroupsAPI.readInstances.mockResolvedValue({
|
||||
data: { results: [{ hostname: 'EC2', id: 1 }] },
|
||||
});
|
||||
InstanceGroupsAPI.readInstanceOptions.mockResolvedValue({
|
||||
data: { actions: {} },
|
||||
});
|
||||
|
||||
const wrapper = shallow(<InstanceGroups />);
|
||||
|
||||
expect(wrapper.find('ScreenHeader').prop('streamType')).toEqual('instance');
|
||||
});
|
||||
});
|
||||
|
||||
@ -147,7 +147,10 @@ function InstanceList() {
|
||||
const fetchInstancesToAssociate = useCallback(
|
||||
(params) =>
|
||||
InstancesAPI.read(
|
||||
mergeParams(params, { not__rampart_groups__id: instanceGroupId })
|
||||
mergeParams(params, {
|
||||
...{ not__rampart_groups__id: instanceGroupId },
|
||||
...{ not__node_type: 'control' },
|
||||
})
|
||||
),
|
||||
[instanceGroupId]
|
||||
);
|
||||
@ -280,6 +283,10 @@ function InstanceList() {
|
||||
title={t`Select Instances`}
|
||||
optionsRequest={readInstancesOptions}
|
||||
displayKey="hostname"
|
||||
columns={[
|
||||
{ key: 'hostname', name: t`Name` },
|
||||
{ key: 'node_type', name: t`Node Type` },
|
||||
]}
|
||||
/>
|
||||
)}
|
||||
{error && (
|
||||
|
||||
@ -47,18 +47,12 @@ function AWXLogin({ alt, isAuthenticated }) {
|
||||
isLoading: isCustomLoginInfoLoading,
|
||||
error: customLoginInfoError,
|
||||
request: fetchCustomLoginInfo,
|
||||
result: {
|
||||
brandName,
|
||||
logo,
|
||||
loginInfo,
|
||||
socialAuthOptions,
|
||||
loginRedirectOverride,
|
||||
},
|
||||
result: { brandName, logo, loginInfo, socialAuthOptions },
|
||||
} = useRequest(
|
||||
useCallback(async () => {
|
||||
const [
|
||||
{
|
||||
data: { custom_logo, custom_login_info, login_redirect_override },
|
||||
data: { custom_logo, custom_login_info },
|
||||
},
|
||||
{
|
||||
data: { BRAND_NAME },
|
||||
@ -78,7 +72,6 @@ function AWXLogin({ alt, isAuthenticated }) {
|
||||
logo: logoSrc,
|
||||
loginInfo: custom_login_info,
|
||||
socialAuthOptions: authData,
|
||||
loginRedirectOverride: login_redirect_override,
|
||||
};
|
||||
}, []),
|
||||
{
|
||||
@ -118,10 +111,6 @@ function AWXLogin({ alt, isAuthenticated }) {
|
||||
if (isCustomLoginInfoLoading) {
|
||||
return null;
|
||||
}
|
||||
if (!isAuthenticated(document.cookie) && loginRedirectOverride) {
|
||||
window.location.replace(loginRedirectOverride);
|
||||
return null;
|
||||
}
|
||||
if (isAuthenticated(document.cookie)) {
|
||||
return <Redirect to={authRedirectTo || '/'} />;
|
||||
}
|
||||
|
||||
@ -94,10 +94,9 @@ function AzureADEdit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -147,8 +147,8 @@ describe('<GitHubDetail />', () => {
|
||||
);
|
||||
assertDetail(wrapper, 'GitHub OAuth2 Key', 'mock github key');
|
||||
assertDetail(wrapper, 'GitHub OAuth2 Secret', 'Encrypted');
|
||||
assertVariableDetail(wrapper, 'GitHub OAuth2 Organization Map', '{}');
|
||||
assertVariableDetail(wrapper, 'GitHub OAuth2 Team Map', '{}');
|
||||
assertVariableDetail(wrapper, 'GitHub OAuth2 Organization Map', 'null');
|
||||
assertVariableDetail(wrapper, 'GitHub OAuth2 Team Map', 'null');
|
||||
});
|
||||
|
||||
test('should hide edit button from non-superusers', async () => {
|
||||
@ -226,12 +226,12 @@ describe('<GitHubDetail />', () => {
|
||||
assertVariableDetail(
|
||||
wrapper,
|
||||
'GitHub Organization OAuth2 Organization Map',
|
||||
'{}'
|
||||
'null'
|
||||
);
|
||||
assertVariableDetail(
|
||||
wrapper,
|
||||
'GitHub Organization OAuth2 Team Map',
|
||||
'{}'
|
||||
'null'
|
||||
);
|
||||
});
|
||||
});
|
||||
@ -333,9 +333,13 @@ describe('<GitHubDetail />', () => {
|
||||
assertVariableDetail(
|
||||
wrapper,
|
||||
'GitHub Enterprise OAuth2 Organization Map',
|
||||
'{}'
|
||||
'null'
|
||||
);
|
||||
assertVariableDetail(
|
||||
wrapper,
|
||||
'GitHub Enterprise OAuth2 Team Map',
|
||||
'null'
|
||||
);
|
||||
assertVariableDetail(wrapper, 'GitHub Enterprise OAuth2 Team Map', '{}');
|
||||
});
|
||||
});
|
||||
|
||||
@ -398,12 +402,12 @@ describe('<GitHubDetail />', () => {
|
||||
assertVariableDetail(
|
||||
wrapper,
|
||||
'GitHub Enterprise Organization OAuth2 Organization Map',
|
||||
'{}'
|
||||
'null'
|
||||
);
|
||||
assertVariableDetail(
|
||||
wrapper,
|
||||
'GitHub Enterprise Organization OAuth2 Team Map',
|
||||
'{}'
|
||||
'null'
|
||||
);
|
||||
});
|
||||
});
|
||||
@ -463,12 +467,12 @@ describe('<GitHubDetail />', () => {
|
||||
assertVariableDetail(
|
||||
wrapper,
|
||||
'GitHub Enterprise Team OAuth2 Organization Map',
|
||||
'{}'
|
||||
'null'
|
||||
);
|
||||
assertVariableDetail(
|
||||
wrapper,
|
||||
'GitHub Enterprise Team OAuth2 Team Map',
|
||||
'{}'
|
||||
'null'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@ -92,10 +92,9 @@ function GitHubEdit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -94,10 +94,9 @@ function GitHubEnterpriseEdit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -133,7 +133,7 @@ describe('<GitHubEnterpriseEdit />', () => {
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL: '',
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY: '',
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET: '',
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP: {},
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP: null,
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP: {
|
||||
Default: {
|
||||
users: false,
|
||||
|
||||
@ -94,10 +94,9 @@ function GitHubEnterpriseOrgEdit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -146,7 +146,7 @@ describe('<GitHubEnterpriseOrgEdit />', () => {
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY: '',
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET: '',
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME: '',
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP: {},
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP: null,
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_ORGANIZATION_MAP: {
|
||||
Default: {
|
||||
users: false,
|
||||
|
||||
@ -94,10 +94,9 @@ function GitHubEnterpriseTeamEdit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -140,7 +140,7 @@ describe('<GitHubEnterpriseTeamEdit />', () => {
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY: '',
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET: '',
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID: '',
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP: {},
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP: null,
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ORGANIZATION_MAP: {
|
||||
Default: {
|
||||
users: false,
|
||||
|
||||
@ -94,10 +94,9 @@ function GitHubOrgEdit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -122,7 +122,7 @@ describe('<GitHubOrgEdit />', () => {
|
||||
SOCIAL_AUTH_GITHUB_ORG_KEY: '',
|
||||
SOCIAL_AUTH_GITHUB_ORG_SECRET: '',
|
||||
SOCIAL_AUTH_GITHUB_ORG_NAME: 'new org',
|
||||
SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP: {},
|
||||
SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP: null,
|
||||
SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP: {
|
||||
Default: {
|
||||
users: false,
|
||||
|
||||
@ -94,10 +94,9 @@ function GitHubTeamEdit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -100,10 +100,9 @@ function GoogleOAuth2Edit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -103,10 +103,9 @@ function JobsEdit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -146,10 +146,9 @@ function LDAPEdit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -164,10 +164,9 @@ function MiscAuthenticationEdit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -29,9 +29,9 @@ const authenticationData = {
|
||||
'awx.sso.backends.TACACSPlusBackend',
|
||||
'awx.main.backends.AWXModelBackend',
|
||||
],
|
||||
SOCIAL_AUTH_ORGANIZATION_MAP: {},
|
||||
SOCIAL_AUTH_TEAM_MAP: {},
|
||||
SOCIAL_AUTH_USER_FIELDS: [],
|
||||
SOCIAL_AUTH_ORGANIZATION_MAP: null,
|
||||
SOCIAL_AUTH_TEAM_MAP: null,
|
||||
SOCIAL_AUTH_USER_FIELDS: null,
|
||||
};
|
||||
|
||||
describe('<MiscAuthenticationEdit />', () => {
|
||||
|
||||
@ -112,10 +112,9 @@ function SAMLEdit() {
|
||||
const initialValues = (fields) =>
|
||||
Object.keys(fields).reduce((acc, key) => {
|
||||
if (fields[key].type === 'list' || fields[key].type === 'nested object') {
|
||||
const emptyDefault = fields[key].type === 'list' ? '[]' : '{}';
|
||||
acc[key] = fields[key].value
|
||||
? JSON.stringify(fields[key].value, null, 2)
|
||||
: emptyDefault;
|
||||
: null;
|
||||
} else {
|
||||
acc[key] = fields[key].value ?? '';
|
||||
}
|
||||
|
||||
@ -5,7 +5,7 @@ import { Detail } from 'components/DetailList';
|
||||
import CodeDetail from 'components/DetailList/CodeDetail';
|
||||
|
||||
function sortObj(obj) {
|
||||
if (typeof obj !== 'object' || Array.isArray(obj)) {
|
||||
if (typeof obj !== 'object' || Array.isArray(obj) || obj === null) {
|
||||
return obj;
|
||||
}
|
||||
const sorted = {};
|
||||
@ -30,7 +30,7 @@ export default ({ helpText, id, label, type, unit = '', value }) => {
|
||||
label={label}
|
||||
mode="javascript"
|
||||
rows={4}
|
||||
value={JSON.stringify(sortObj(value || {}), undefined, 2)}
|
||||
value={JSON.stringify(sortObj(value), undefined, 2)}
|
||||
/>
|
||||
);
|
||||
break;
|
||||
@ -42,7 +42,7 @@ export default ({ helpText, id, label, type, unit = '', value }) => {
|
||||
label={label}
|
||||
mode="javascript"
|
||||
rows={4}
|
||||
value={JSON.stringify(value || [], undefined, 2)}
|
||||
value={JSON.stringify(value, undefined, 2)}
|
||||
/>
|
||||
);
|
||||
break;
|
||||
|
||||
@ -22,7 +22,7 @@ import CodeEditor from 'components/CodeEditor';
|
||||
import { PasswordInput } from 'components/FormField';
|
||||
import { FormFullWidthLayout } from 'components/FormLayout';
|
||||
import Popover from 'components/Popover';
|
||||
import { combine, integer, minMaxValue, required, url } from 'util/validators';
|
||||
import { combine, minMaxValue, required, url, number } from 'util/validators';
|
||||
import AlertModal from 'components/AlertModal';
|
||||
import RevertButton from './RevertButton';
|
||||
|
||||
@ -365,12 +365,11 @@ const InputField = ({ name, config, type = 'text', isRequired = false }) => {
|
||||
const validators = [
|
||||
...(isRequired ? [required(null)] : []),
|
||||
...(type === 'url' ? [url()] : []),
|
||||
...(type === 'number'
|
||||
? [integer(), minMaxValue(min_value, max_value)]
|
||||
: []),
|
||||
...(type === 'number' ? [number(), minMaxValue(min_value, max_value)] : []),
|
||||
];
|
||||
const [field, meta] = useField({ name, validate: combine(validators) });
|
||||
const isValid = !(meta.touched && meta.error);
|
||||
|
||||
return config ? (
|
||||
<SettingGroup
|
||||
defaultValue={config.default ?? ''}
|
||||
@ -382,6 +381,7 @@ const InputField = ({ name, config, type = 'text', isRequired = false }) => {
|
||||
validated={isValid ? 'default' : 'error'}
|
||||
>
|
||||
<TextInput
|
||||
type={type}
|
||||
id={name}
|
||||
isRequired={isRequired}
|
||||
placeholder={config.placeholder}
|
||||
@ -440,10 +440,8 @@ const ObjectField = ({ name, config, isRequired = false }) => {
|
||||
const [field, meta, helpers] = useField({ name, validate });
|
||||
const isValid = !(meta.touched && meta.error);
|
||||
|
||||
const emptyDefault = config?.type === 'list' ? '[]' : '{}';
|
||||
const defaultRevertValue = config?.default
|
||||
? JSON.stringify(config.default, null, 2)
|
||||
: emptyDefault;
|
||||
const defaultRevertValue =
|
||||
config?.default !== null ? JSON.stringify(config.default, null, 2) : null;
|
||||
|
||||
return config ? (
|
||||
<FormFullWidthLayout>
|
||||
@ -458,7 +456,12 @@ const ObjectField = ({ name, config, isRequired = false }) => {
|
||||
>
|
||||
<CodeEditor
|
||||
{...field}
|
||||
rows="auto"
|
||||
value={
|
||||
field.value === null
|
||||
? JSON.stringify(field.value, null, 2)
|
||||
: field.value
|
||||
}
|
||||
rows={field.value !== null ? 'auto' : 1}
|
||||
id={name}
|
||||
mode="javascript"
|
||||
onChange={(value) => {
|
||||
|
||||
@ -113,4 +113,5 @@
|
||||
src: "receptor-worker.conf.j2"
|
||||
dest: "{{ sources_dest }}/receptor/receptor-worker-{{ item }}.conf"
|
||||
mode: '0600'
|
||||
with_sequence: start=1 end={{ execution_node_count }}
|
||||
with_sequence: start=1 end={{ execution_node_count if execution_node_count | int > 0 else 1}}
|
||||
when: execution_node_count | int > 0
|
||||
|
||||
@ -111,7 +111,7 @@ services:
|
||||
- "../../docker-compose/_sources/receptor/receptor-hop.conf:/etc/receptor/receptor.conf"
|
||||
{% for i in range(execution_node_count|int) -%}
|
||||
receptor-{{ loop.index }}:
|
||||
image: quay.io/awx/awx_devel:devel
|
||||
image: "{{ awx_image }}:{{ awx_image_tag }}"
|
||||
user: "{{ ansible_user_uid }}"
|
||||
container_name: tools_receptor_{{ loop.index }}
|
||||
hostname: receptor-{{ loop.index }}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user