mirror of
https://github.com/ansible/awx.git
synced 2026-02-08 04:54:45 -03:30
Compare commits
39 Commits
21.5.0
...
thenets/ma
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f25309078 | ||
|
|
1afa49f3ff | ||
|
|
6f88ea1dc7 | ||
|
|
f9428c10b9 | ||
|
|
bb7509498e | ||
|
|
8a06ffbe15 | ||
|
|
8ad948f268 | ||
|
|
73f808dee7 | ||
|
|
fecab52f86 | ||
|
|
609c67d85e | ||
|
|
8828ea706e | ||
|
|
4070ef3f33 | ||
|
|
39f6e2fa32 | ||
|
|
1dfdff4a9e | ||
|
|
310e354164 | ||
|
|
6d207d2490 | ||
|
|
01037fa561 | ||
|
|
61f3e5cbed | ||
|
|
44995e944a | ||
|
|
d3f15f5784 | ||
|
|
2437a84b48 | ||
|
|
696f099940 | ||
|
|
3f0f538c40 | ||
|
|
66529d0f70 | ||
|
|
974f845059 | ||
|
|
b4ef687b60 | ||
|
|
2ef531b2dc | ||
|
|
125801ec5b | ||
|
|
691d9d7dc4 | ||
|
|
5ca898541f | ||
|
|
24821ff030 | ||
|
|
99815f8962 | ||
|
|
d752e6ce6d | ||
|
|
457dd890cb | ||
|
|
4fbf5e9e2f | ||
|
|
687b4ac71d | ||
|
|
a1b364f80c | ||
|
|
88bf03c6bf | ||
|
|
13fc845bcc |
@@ -1,3 +1,2 @@
|
||||
awx/ui/node_modules
|
||||
Dockerfile
|
||||
.git
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -153,9 +153,6 @@ use_dev_supervisor.txt
|
||||
/sanity/
|
||||
/awx_collection_build/
|
||||
|
||||
# Setup for metrics gathering
|
||||
tools/prometheus/prometheus.yml
|
||||
|
||||
.idea/*
|
||||
*.unison.tmp
|
||||
*.#
|
||||
|
||||
126
Makefile
126
Makefile
@@ -54,45 +54,6 @@ I18N_FLAG_FILE = .i18n_built
|
||||
VERSION PYTHON_VERSION docker-compose-sources \
|
||||
.git/hooks/pre-commit
|
||||
|
||||
clean-tmp:
|
||||
rm -rf tmp/
|
||||
|
||||
clean-venv:
|
||||
rm -rf venv/
|
||||
|
||||
clean-dist:
|
||||
rm -rf dist
|
||||
|
||||
clean-schema:
|
||||
rm -rf swagger.json
|
||||
rm -rf schema.json
|
||||
rm -rf reference-schema.json
|
||||
|
||||
clean-languages:
|
||||
rm -f $(I18N_FLAG_FILE)
|
||||
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
|
||||
|
||||
## Remove temporary build files, compiled Python files.
|
||||
clean: clean-ui clean-api clean-awxkit clean-dist
|
||||
rm -rf awx/public
|
||||
rm -rf awx/lib/site-packages
|
||||
rm -rf awx/job_status
|
||||
rm -rf awx/job_output
|
||||
rm -rf reports
|
||||
rm -rf tmp
|
||||
rm -rf $(I18N_FLAG_FILE)
|
||||
mkdir tmp
|
||||
|
||||
clean-api:
|
||||
rm -rf build $(NAME)-$(VERSION) *.egg-info
|
||||
find . -type f -regex ".*\.py[co]$$" -delete
|
||||
find . -type d -name "__pycache__" -delete
|
||||
rm -f awx/awx_test.sqlite3*
|
||||
rm -rf requirements/vendor
|
||||
rm -rf awx/projects
|
||||
|
||||
clean-awxkit:
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
|
||||
|
||||
## convenience target to assert environment variables are defined
|
||||
guard-%:
|
||||
@@ -117,7 +78,7 @@ virtualenv_awx:
|
||||
fi; \
|
||||
fi
|
||||
|
||||
## Install third-party requirements needed for AWX's environment.
|
||||
## Install third-party requirements needed for AWX's environment.
|
||||
# this does not use system site packages intentionally
|
||||
requirements_awx: virtualenv_awx
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
@@ -365,13 +326,75 @@ bulk_data:
|
||||
fi; \
|
||||
$(PYTHON) tools/data_generators/rbac_dummy_data_generator.py --preset=$(DATA_GEN_PRESET)
|
||||
|
||||
# CLEANUP COMMANDS
|
||||
# --------------------------------------
|
||||
|
||||
## Clean everything. Including temporary build files, compiled Python files.
|
||||
clean: clean-tmp clean-ui clean-api clean-awxkit clean-dist
|
||||
rm -rf awx/public
|
||||
rm -rf awx/lib/site-packages
|
||||
rm -rf awx/job_status
|
||||
rm -rf awx/job_output
|
||||
rm -rf reports
|
||||
rm -rf $(I18N_FLAG_FILE)
|
||||
|
||||
clean-tmp:
|
||||
rm -rf tmp/
|
||||
mkdir tmp
|
||||
|
||||
clean-venv:
|
||||
rm -rf venv/
|
||||
|
||||
clean-dist:
|
||||
rm -rf dist
|
||||
|
||||
clean-schema:
|
||||
rm -rf swagger.json
|
||||
rm -rf schema.json
|
||||
rm -rf reference-schema.json
|
||||
|
||||
clean-languages:
|
||||
rm -f $(I18N_FLAG_FILE)
|
||||
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
|
||||
|
||||
clean-api:
|
||||
rm -rf build $(NAME)-$(VERSION) *.egg-info
|
||||
find . -type f -regex ".*\.py[co]$$" -delete
|
||||
find . -type d -name "__pycache__" -delete
|
||||
rm -f awx/awx_test.sqlite3*
|
||||
rm -rf requirements/vendor
|
||||
rm -rf awx/projects
|
||||
|
||||
## Clean UI builded static files (alias for ui-clean)
|
||||
clean-ui: ui-clean
|
||||
|
||||
## Clean temp build files from the awxkit
|
||||
clean-awxkit:
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
|
||||
|
||||
clean-docker-images:
|
||||
IMAGES_TO_BE_DELETE=' \
|
||||
quay.io/ansible/receptor \
|
||||
quay.io/awx/awx_devel \
|
||||
ansible/receptor \
|
||||
postgres \
|
||||
redis \
|
||||
' && \
|
||||
for IMAGE in $$IMAGES_TO_BE_DELETE; do \
|
||||
echo "Removing image '$$IMAGE'" && \
|
||||
IMAGE_IDS=$$(docker image ls -a | grep $$IMAGE | awk '{print $$3}') echo "oi" \
|
||||
done
|
||||
|
||||
clean-docker-containers:
|
||||
clean-docker-volumes:
|
||||
|
||||
|
||||
# UI TASKS
|
||||
# --------------------------------------
|
||||
|
||||
UI_BUILD_FLAG_FILE = awx/ui/.ui-built
|
||||
|
||||
clean-ui:
|
||||
ui-clean:
|
||||
rm -rf node_modules
|
||||
rm -rf awx/ui/node_modules
|
||||
rm -rf awx/ui/build
|
||||
@@ -381,7 +404,8 @@ clean-ui:
|
||||
awx/ui/node_modules:
|
||||
NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn ci
|
||||
|
||||
$(UI_BUILD_FLAG_FILE): awx/ui/node_modules
|
||||
$(UI_BUILD_FLAG_FILE):
|
||||
$(MAKE) awx/ui/node_modules
|
||||
$(PYTHON) tools/scripts/compilemessages.py
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run compile-strings
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run build
|
||||
@@ -452,6 +476,11 @@ COMPOSE_OPTS ?=
|
||||
CONTROL_PLANE_NODE_COUNT ?= 1
|
||||
EXECUTION_NODE_COUNT ?= 2
|
||||
MINIKUBE_CONTAINER_GROUP ?= false
|
||||
EXTRA_SOURCES_ANSIBLE_OPTS ?=
|
||||
|
||||
ifneq ($(ADMIN_PASSWORD),)
|
||||
EXTRA_SOURCES_ANSIBLE_OPTS := -e admin_password=$(ADMIN_PASSWORD) $(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
endif
|
||||
|
||||
docker-compose-sources: .git/hooks/pre-commit
|
||||
@if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\
|
||||
@@ -469,7 +498,8 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e enable_ldap=$(LDAP) \
|
||||
-e enable_splunk=$(SPLUNK) \
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA)
|
||||
-e enable_grafana=$(GRAFANA) $(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
@@ -558,12 +588,20 @@ Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
-e template_dest=_build_kube_dev \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE)
|
||||
|
||||
## Build awx_kube_devel image for development on local Kubernetes environment.
|
||||
awx-kube-dev-build: Dockerfile.kube-dev
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile.kube-dev \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
@@ -576,7 +614,7 @@ pot: $(UI_BUILD_FLAG_FILE)
|
||||
po: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
|
||||
|
||||
LANG = "en-us"
|
||||
LANG = "en_us"
|
||||
## generate API django .pot .po
|
||||
messages:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
|
||||
@@ -16,6 +16,7 @@ from awx.conf.license import get_license
|
||||
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
|
||||
from awx.main import models
|
||||
from awx.main.analytics import register
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstances
|
||||
|
||||
"""
|
||||
This module is used to define metrics collected by awx.main.analytics.gather()
|
||||
@@ -235,25 +236,25 @@ def projects_by_scm_type(since, **kwargs):
|
||||
@register('instance_info', '1.2', description=_('Cluster topology and capacity'))
|
||||
def instance_info(since, include_hostnames=False, **kwargs):
|
||||
info = {}
|
||||
instances = models.Instance.objects.values_list('hostname').values(
|
||||
'uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'hostname', 'enabled'
|
||||
)
|
||||
for instance in instances:
|
||||
consumed_capacity = sum(x.task_impact for x in models.UnifiedJob.objects.filter(execution_node=instance['hostname'], status__in=('running', 'waiting')))
|
||||
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
|
||||
active_tasks = models.UnifiedJob.objects.filter(status__in=['running', 'waiting']).only('task_impact', 'controller_node', 'execution_node')
|
||||
tm_instances = TaskManagerInstances(active_tasks, instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
|
||||
for tm_instance in tm_instances.instances_by_hostname.values():
|
||||
instance = tm_instance.obj
|
||||
instance_info = {
|
||||
'uuid': instance['uuid'],
|
||||
'version': instance['version'],
|
||||
'capacity': instance['capacity'],
|
||||
'cpu': instance['cpu'],
|
||||
'memory': instance['memory'],
|
||||
'managed_by_policy': instance['managed_by_policy'],
|
||||
'enabled': instance['enabled'],
|
||||
'consumed_capacity': consumed_capacity,
|
||||
'remaining_capacity': instance['capacity'] - consumed_capacity,
|
||||
'uuid': instance.uuid,
|
||||
'version': instance.version,
|
||||
'capacity': instance.capacity,
|
||||
'cpu': instance.cpu,
|
||||
'memory': instance.memory,
|
||||
'managed_by_policy': instance.managed_by_policy,
|
||||
'enabled': instance.enabled,
|
||||
'consumed_capacity': tm_instance.consumed_capacity,
|
||||
'remaining_capacity': instance.capacity - tm_instance.consumed_capacity,
|
||||
}
|
||||
if include_hostnames is True:
|
||||
instance_info['hostname'] = instance['hostname']
|
||||
info[instance['uuid']] = instance_info
|
||||
instance_info['hostname'] = instance.hostname
|
||||
info[instance.uuid] = instance_info
|
||||
return info
|
||||
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ class PubSub(object):
|
||||
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
||||
|
||||
def events(self, select_timeout=5, yield_timeouts=False):
|
||||
if not pg_connection.get_autocommit():
|
||||
if not self.conn.autocommit:
|
||||
raise RuntimeError('Listening for events can only be done in autocommit mode')
|
||||
|
||||
while True:
|
||||
|
||||
@@ -46,7 +46,7 @@ class Control(object):
|
||||
reply_queue = Control.generate_reply_queue_name()
|
||||
self.result = None
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
conn.listen(reply_queue)
|
||||
conn.notify(self.queuename, json.dumps({'control': command, 'reply_to': reply_queue}))
|
||||
|
||||
|
||||
@@ -72,11 +72,9 @@ class PoolWorker(object):
|
||||
self.messages_finished = 0
|
||||
self.managed_tasks = collections.OrderedDict()
|
||||
self.finished = MPQueue(queue_size) if self.track_managed_tasks else NoOpResultQueue()
|
||||
self.last_finished = None
|
||||
self.queue = MPQueue(queue_size)
|
||||
self.process = Process(target=target, args=(self.queue, self.finished) + args)
|
||||
self.process.daemon = True
|
||||
self.scale_down_in = settings.DISPATCHER_SCALE_DOWN_WAIT_TIME
|
||||
|
||||
def start(self):
|
||||
self.process.start()
|
||||
@@ -147,9 +145,6 @@ class PoolWorker(object):
|
||||
# state of which events are *currently* being processed.
|
||||
logger.warning('Event UUID {} appears to be have been duplicated.'.format(uuid))
|
||||
|
||||
if finished:
|
||||
self.last_finished = time.time()
|
||||
|
||||
@property
|
||||
def current_task(self):
|
||||
if not self.track_managed_tasks:
|
||||
@@ -195,14 +190,6 @@ class PoolWorker(object):
|
||||
def idle(self):
|
||||
return not self.busy
|
||||
|
||||
@property
|
||||
def ready_to_scale_down(self):
|
||||
if self.busy:
|
||||
return False
|
||||
if self.last_finished is None:
|
||||
return True
|
||||
return time.time() - self.last_finished > self.scale_down_in
|
||||
|
||||
|
||||
class StatefulPoolWorker(PoolWorker):
|
||||
|
||||
@@ -263,7 +250,7 @@ class WorkerPool(object):
|
||||
except Exception:
|
||||
logger.exception('could not fork')
|
||||
else:
|
||||
logger.info(f'scaling up worker pid:{worker.pid} total:{len(self.workers)}')
|
||||
logger.debug('scaling up worker pid:{}'.format(worker.pid))
|
||||
return idx, worker
|
||||
|
||||
def debug(self, *args, **kwargs):
|
||||
@@ -402,12 +389,12 @@ class AutoscalePool(WorkerPool):
|
||||
logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))
|
||||
orphaned.extend(w.orphaned_tasks)
|
||||
self.workers.remove(w)
|
||||
elif (len(self.workers) > self.min_workers) and w.ready_to_scale_down:
|
||||
elif w.idle and len(self.workers) > self.min_workers:
|
||||
# the process has an empty queue (it's idle) and we have
|
||||
# more processes in the pool than we need (> min)
|
||||
# send this process a message so it will exit gracefully
|
||||
# at the next opportunity
|
||||
logger.info(f'scaling down worker pid:{w.pid} prior total:{len(self.workers)}')
|
||||
logger.debug('scaling down worker pid:{}'.format(w.pid))
|
||||
w.quit()
|
||||
self.workers.remove(w)
|
||||
if w.alive:
|
||||
|
||||
@@ -36,7 +36,7 @@ def create_clearsessions_jt(apps, schema_editor):
|
||||
if created:
|
||||
sched = Schedule(
|
||||
name='Cleanup Expired Sessions',
|
||||
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;COUNT=1' % schedule_time,
|
||||
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1' % schedule_time,
|
||||
description='Cleans out expired browser sessions',
|
||||
enabled=True,
|
||||
created=now_dt,
|
||||
@@ -69,7 +69,7 @@ def create_cleartokens_jt(apps, schema_editor):
|
||||
if created:
|
||||
sched = Schedule(
|
||||
name='Cleanup Expired OAuth 2 Tokens',
|
||||
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;COUNT=1' % schedule_time,
|
||||
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1' % schedule_time,
|
||||
description='Removes expired OAuth 2 access and refresh tokens',
|
||||
enabled=True,
|
||||
created=now_dt,
|
||||
|
||||
@@ -236,6 +236,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
raise ParseError(_('Slice number must be 1 or higher.'))
|
||||
return (number, step)
|
||||
|
||||
def get_sliced_hosts(self, host_queryset, slice_number, slice_count):
|
||||
if slice_count > 1 and slice_number > 0:
|
||||
offset = slice_number - 1
|
||||
host_queryset = host_queryset[offset::slice_count]
|
||||
return host_queryset
|
||||
|
||||
def get_script_data(self, hostvars=False, towervars=False, show_all=False, slice_number=1, slice_count=1):
|
||||
hosts_kw = dict()
|
||||
if not show_all:
|
||||
@@ -243,10 +249,8 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
fetch_fields = ['name', 'id', 'variables', 'inventory_id']
|
||||
if towervars:
|
||||
fetch_fields.append('enabled')
|
||||
hosts = self.hosts.filter(**hosts_kw).order_by('name').only(*fetch_fields)
|
||||
if slice_count > 1 and slice_number > 0:
|
||||
offset = slice_number - 1
|
||||
hosts = hosts[offset::slice_count]
|
||||
host_queryset = self.hosts.filter(**hosts_kw).order_by('name').only(*fetch_fields)
|
||||
hosts = self.get_sliced_hosts(host_queryset, slice_number, slice_count)
|
||||
|
||||
data = dict()
|
||||
all_group = data.setdefault('all', dict())
|
||||
|
||||
@@ -814,7 +814,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def _get_inventory_hosts(self, only=['name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id']):
|
||||
if not self.inventory:
|
||||
return []
|
||||
return self.inventory.hosts.only(*only)
|
||||
host_queryset = self.inventory.hosts.only(*only)
|
||||
return self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
|
||||
|
||||
def start_job_fact_cache(self, destination, modification_times, timeout=None):
|
||||
self.log_lifecycle("start_job_fact_cache")
|
||||
|
||||
@@ -1406,9 +1406,10 @@ class UnifiedJob(
|
||||
timeout = 5
|
||||
try:
|
||||
running = self.celery_task_id in ControlDispatcher('dispatcher', self.controller_node or self.execution_node).running(timeout=timeout)
|
||||
except (socket.timeout, RuntimeError):
|
||||
except socket.timeout:
|
||||
logger.error('could not reach dispatcher on {} within {}s'.format(self.execution_node, timeout))
|
||||
running = False
|
||||
except Exception:
|
||||
logger.exception("error encountered when checking task status")
|
||||
return running
|
||||
|
||||
@property
|
||||
|
||||
@@ -34,12 +34,10 @@ class TaskManagerInstance:
|
||||
|
||||
|
||||
class TaskManagerInstances:
|
||||
def __init__(self, active_tasks, instances=None):
|
||||
def __init__(self, active_tasks, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled')):
|
||||
self.instances_by_hostname = dict()
|
||||
if instances is None:
|
||||
instances = (
|
||||
Instance.objects.filter(hostname__isnull=False, enabled=True).exclude(node_type='hop').only('node_type', 'capacity', 'hostname', 'enabled')
|
||||
)
|
||||
instances = Instance.objects.filter(hostname__isnull=False, enabled=True).exclude(node_type='hop').only(*instance_fields)
|
||||
for instance in instances:
|
||||
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance)
|
||||
|
||||
|
||||
@@ -402,6 +402,10 @@ class BaseTask(object):
|
||||
raise
|
||||
else:
|
||||
time.sleep(1.0)
|
||||
self.instance.refresh_from_db(fields=['cancel_flag'])
|
||||
if self.instance.cancel_flag or signal_callback():
|
||||
logger.debug(f"Unified job {self.instance.id} was canceled while waiting for project file lock")
|
||||
return
|
||||
waiting_time = time.time() - start_time
|
||||
|
||||
if waiting_time > 1.0:
|
||||
@@ -1288,10 +1292,6 @@ class RunProjectUpdate(BaseTask):
|
||||
# re-create root project folder if a natural disaster has destroyed it
|
||||
project_path = instance.project.get_project_path(check_if_exists=False)
|
||||
|
||||
instance.refresh_from_db(fields=['cancel_flag'])
|
||||
if instance.cancel_flag:
|
||||
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
|
||||
return
|
||||
if instance.launch_type != 'sync':
|
||||
self.acquire_lock(instance.project, instance.id)
|
||||
|
||||
|
||||
@@ -444,10 +444,6 @@ EXECUTION_NODE_REMEDIATION_CHECKS = 60 * 30 # once every 30 minutes check if an
|
||||
# Amount of time dispatcher will try to reconnect to database for jobs and consuming new work
|
||||
DISPATCHER_DB_DOWNTOWN_TOLLERANCE = 40
|
||||
|
||||
# Minimum time to wait after last job finished before scaling down a worker
|
||||
# A higher value will free up memory more agressively, but a lower value will require less forking
|
||||
DISPATCHER_SCALE_DOWN_WAIT_TIME = 60
|
||||
|
||||
BROKER_URL = 'unix:///var/run/redis/redis.sock'
|
||||
CELERYBEAT_SCHEDULE = {
|
||||
'tower_scheduler': {'task': 'awx.main.tasks.system.awx_periodic_scheduler', 'schedule': timedelta(seconds=30), 'options': {'expires': 20}},
|
||||
|
||||
@@ -108,6 +108,9 @@ AWX_DISABLE_TASK_MANAGERS = False
|
||||
if 'sqlite3' not in DATABASES['default']['ENGINE']: # noqa
|
||||
DATABASES['default'].setdefault('OPTIONS', dict()).setdefault('application_name', f'{CLUSTER_HOST_ID}-{os.getpid()}-{" ".join(sys.argv)}'[:63]) # noqa
|
||||
|
||||
# Everywhere else we use /var/lib/awx/public/static/ - but this requires running collectstatic.
|
||||
# This makes the browsable API work in the dev env without any additional steps.
|
||||
STATIC_ROOT = os.path.join(BASE_DIR, 'public', 'static')
|
||||
|
||||
# If any local_*.py files are present in awx/settings/, use them to override
|
||||
# default settings for development. If not present, we can still run using
|
||||
|
||||
@@ -10,7 +10,7 @@ import {
|
||||
InfiniteLoader,
|
||||
List,
|
||||
} from 'react-virtualized';
|
||||
import { Button } from '@patternfly/react-core';
|
||||
import { Button, Alert } from '@patternfly/react-core';
|
||||
|
||||
import AlertModal from 'components/AlertModal';
|
||||
import { CardBody as _CardBody } from 'components/Card';
|
||||
@@ -99,6 +99,7 @@ function JobOutput({ job, eventRelatedSearchableKeys, eventSearchableKeys }) {
|
||||
const scrollHeight = useRef(0);
|
||||
const history = useHistory();
|
||||
const eventByUuidRequests = useRef([]);
|
||||
const eventsProcessedDelay = useRef(250);
|
||||
|
||||
const fetchEventByUuid = async (uuid) => {
|
||||
let promise = eventByUuidRequests.current[uuid];
|
||||
@@ -156,6 +157,7 @@ function JobOutput({ job, eventRelatedSearchableKeys, eventSearchableKeys }) {
|
||||
);
|
||||
const [isMonitoringWebsocket, setIsMonitoringWebsocket] = useState(false);
|
||||
const [lastScrollPosition, setLastScrollPosition] = useState(0);
|
||||
const [showEventsRefresh, setShowEventsRefresh] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isTreeReady || !onReadyEvents.length) {
|
||||
@@ -196,14 +198,28 @@ function JobOutput({ job, eventRelatedSearchableKeys, eventSearchableKeys }) {
|
||||
rebuildEventsTree();
|
||||
}, [isFlatMode]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
|
||||
const pollForEventsProcessed = useCallback(async () => {
|
||||
const {
|
||||
data: { event_processing_finished },
|
||||
} = await getJobModel(job.type).readDetail(job.id);
|
||||
if (event_processing_finished) {
|
||||
setShowEventsRefresh(true);
|
||||
return;
|
||||
}
|
||||
const fiveMinutes = 1000 * 60 * 5;
|
||||
if (eventsProcessedDelay.current >= fiveMinutes) {
|
||||
return;
|
||||
}
|
||||
setTimeout(pollForEventsProcessed, eventsProcessedDelay.current);
|
||||
eventsProcessedDelay.current *= 2;
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [job.id, job.type, lastScrollPosition]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isJobRunning(jobStatus)) {
|
||||
setTimeout(() => {
|
||||
loadJobEvents().then(() => {
|
||||
setWsEvents([]);
|
||||
scrollToRow(lastScrollPosition);
|
||||
});
|
||||
}, 500);
|
||||
if (wsEvents.length) {
|
||||
pollForEventsProcessed();
|
||||
}
|
||||
return;
|
||||
}
|
||||
let batchTimeout;
|
||||
@@ -268,7 +284,8 @@ function JobOutput({ job, eventRelatedSearchableKeys, eventSearchableKeys }) {
|
||||
setIsMonitoringWebsocket(false);
|
||||
isMounted.current = false;
|
||||
};
|
||||
}, [isJobRunning(jobStatus)]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [isJobRunning(jobStatus), pollForEventsProcessed]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isFollowModeEnabled) {
|
||||
@@ -681,6 +698,26 @@ function JobOutput({ job, eventRelatedSearchableKeys, eventSearchableKeys }) {
|
||||
isFollowModeEnabled={isFollowModeEnabled}
|
||||
setIsFollowModeEnabled={setIsFollowModeEnabled}
|
||||
/>
|
||||
{showEventsRefresh ? (
|
||||
<Alert
|
||||
variant="default"
|
||||
title={
|
||||
<>
|
||||
{t`Events processing complete.`}{' '}
|
||||
<Button
|
||||
variant="link"
|
||||
isInline
|
||||
onClick={() => {
|
||||
loadJobEvents().then(() => {
|
||||
setWsEvents([]);
|
||||
});
|
||||
setShowEventsRefresh(false);
|
||||
}}
|
||||
>{t`Reload output`}</Button>
|
||||
</>
|
||||
}
|
||||
/>
|
||||
) : null}
|
||||
<PageControls
|
||||
onScrollFirst={handleScrollFirst}
|
||||
onScrollLast={handleScrollLast}
|
||||
|
||||
@@ -440,8 +440,9 @@ const ObjectField = ({ name, config, revertValue, isRequired = false }) => {
|
||||
const [field, meta, helpers] = useField({ name, validate });
|
||||
const isValid = !(meta.touched && meta.error);
|
||||
|
||||
const defaultRevertValue =
|
||||
config?.default !== null ? JSON.stringify(config.default, null, 2) : null;
|
||||
const defaultRevertValue = config?.default
|
||||
? JSON.stringify(config.default, null, 2)
|
||||
: null;
|
||||
|
||||
return config ? (
|
||||
<FormFullWidthLayout>
|
||||
|
||||
152
docs/development/kind.md
Normal file
152
docs/development/kind.md
Normal file
@@ -0,0 +1,152 @@
|
||||
# Running Development Environment in Kubernetes using Kind Cluster
|
||||
|
||||
## Start Kind Cluster
|
||||
Note: This environment has only been tested on MacOS with Docker.
|
||||
|
||||
If you do not already have Kind, install it from:
|
||||
https://kind.sigs.k8s.io/docs/user/quick-start/
|
||||
|
||||
Create Kind cluster config file
|
||||
```yml
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
extraMounts:
|
||||
- hostPath: /path/to/awx
|
||||
containerPath: /awx_devel
|
||||
extraPortMappings:
|
||||
- containerPort: 30080
|
||||
hostPort: 30080
|
||||
```
|
||||
|
||||
Start Kind cluster
|
||||
```
|
||||
$ kind create cluster --config kind-cluster.yaml
|
||||
```
|
||||
|
||||
Verify AWX source tree is mounted in the kind-control-plane container
|
||||
```
|
||||
$ docker exec -it kind-control-plane ls /awx_devel
|
||||
```
|
||||
|
||||
## Deploy the AWX Operator
|
||||
|
||||
Clone the [awx-operator](https://github.com/ansible/awx-operator).
|
||||
|
||||
For the following playbooks to work, you will need to:
|
||||
|
||||
```
|
||||
$ pip install openshift
|
||||
```
|
||||
|
||||
If you are not changing any code in the operator itself, git checkout the latest version from https://github.com/ansible/awx-operator/releases, and then follow the instructions in the awx-operator [README](https://github.com/ansible/awx-operator#basic-install).
|
||||
|
||||
If making changes to the operator itself, run the following command in the root
|
||||
of the awx-operator repo. If not, continue to the next section.
|
||||
|
||||
### Building and Deploying a Custom AWX Operator Image
|
||||
|
||||
```
|
||||
# in awx-operator repo on the branch you want to use
|
||||
$ export IMAGE_TAG_BASE=quay.io/<username>/awx-operator
|
||||
$ make docker-build docker-push deploy
|
||||
```
|
||||
|
||||
Check the operator deployment
|
||||
```
|
||||
$ kubectl get deployments
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
awx-operator-controller-manager 1/1 1 1 16h
|
||||
```
|
||||
|
||||
## Deploy AWX into Kind Cluster using the AWX Operator
|
||||
|
||||
If have have not made any changes to the AWX Dockerfile, run the following
|
||||
command. If you need to test out changes to the Dockerfile, see the
|
||||
"Custom AWX Development Image for Kubernetes" section below.
|
||||
|
||||
In the root of awx-operator:
|
||||
|
||||
```
|
||||
$ ansible-playbook ansible/instantiate-awx-deployment.yml \
|
||||
-e development_mode=yes \
|
||||
-e image=ghcr.io/ansible/awx_kube_devel \
|
||||
-e image_version=devel \
|
||||
-e image_pull_policy=Always \
|
||||
-e service_type=nodeport \
|
||||
-e namespace=awx
|
||||
```
|
||||
Check the operator with the following commands:
|
||||
|
||||
```
|
||||
# Check the operator deployment
|
||||
$ kubectl get deployments
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
awx 1/1 1 1 16h
|
||||
awx-operator-controller-manager 1/1 1 1 16h
|
||||
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
awx-operator-controller-manager-b775bfc7c-fn995 2/2 Running 0 16h
|
||||
```
|
||||
|
||||
If there are errors in the image pull, check that it is using the right tag. You can update the tag that it will pull by editing the deployment.
|
||||
|
||||
### Custom AWX Development Image for Kubernetes
|
||||
|
||||
In the root of the AWX repo:
|
||||
|
||||
```
|
||||
$ make awx-kube-dev-build
|
||||
$ docker push ghcr.io/ansible/awx_kube_devel:${COMPOSE_TAG}
|
||||
```
|
||||
|
||||
In the root of awx-operator:
|
||||
|
||||
```
|
||||
$ ansible-playbook ansible/instantiate-awx-deployment.yml \
|
||||
-e development_mode=yes \
|
||||
-e image=ghcr.io/ansible/awx_kube_devel \
|
||||
-e image_version=${COMPOSE_TAG} \
|
||||
-e image_pull_policy=Always \
|
||||
-e service_type=nodeport \
|
||||
-e namespace=$NAMESPACE
|
||||
```
|
||||
|
||||
To iterate on changes to the Dockerfile, rebuild and push the image, then delete
|
||||
the AWX Pod. A new Pod will respawn with the latest revision.
|
||||
|
||||
## Accessing AWX
|
||||
|
||||
To access via the web browser, use the following URL:
|
||||
```
|
||||
http://localhost:30080
|
||||
```
|
||||
|
||||
To retrieve your admin password
|
||||
```
|
||||
$ kubectl get secrets awx-admin-password -o json | jq '.data.password' | xargs | base64 -d
|
||||
```
|
||||
|
||||
To tail logs from the task containers
|
||||
```
|
||||
$ kubectl logs -f deployment/awx -n awx -c awx-web
|
||||
```
|
||||
|
||||
To tail logs from the web containers
|
||||
```
|
||||
$ kubectl logs -f deployment/awx -n awx -c awx-web
|
||||
```
|
||||
|
||||
NOTE: If there's multiple replica of the awx deployment you can use `stern` to tail logs from all replicas. For more information about `stern` check out https://github.com/wercker/stern.
|
||||
|
||||
To exec in to the a instance of the awx-task container:
|
||||
```
|
||||
$ kubectl exec -it deployment/awx -c awx-task bash
|
||||
```
|
||||
|
||||
The application will live reload when files are edited just like in the development environment. Just like in the development environment, if the application totally crashes because files are invalid syntax or other fatal problem, you will get an error like "no python application" in the web container. Delete the whole control plane pod and wait until a new one spins up automatically.
|
||||
```
|
||||
oc delete pod -l app.kubernetes.io/component=awx
|
||||
```
|
||||
@@ -1,18 +0,0 @@
|
||||
# Prometheus Container
|
||||
|
||||
## Development
|
||||
AWX comes with an example Prometheus container and `make` target. To use it:
|
||||
|
||||
1. Edit `tools/prometheus/prometheus.yml` and update the `basic_auth` section
|
||||
to specify a valid user/password for an AWX user you've created.
|
||||
Alternatively, you can provide an OAuth2 token (which can be generated at
|
||||
`/api/v2/users/N/personal_tokens/`).
|
||||
|
||||
> Note: By default, the config assumes a user with username=admin and password=password.
|
||||
|
||||
2. Start the Prometheus container:
|
||||
`make prometheus`
|
||||
3. The Prometheus UI will now be accessible at `http://localhost:9090/graph`.
|
||||
|
||||
There should be no extra setup needed. You can try executing this query in the
|
||||
UI to get back the number of active sessions: `awx_sessions_total`
|
||||
@@ -101,6 +101,7 @@ RUN dnf -y update && dnf install -y 'dnf-command(config-manager)' && \
|
||||
glibc-langpack-en \
|
||||
krb5-workstation \
|
||||
nginx \
|
||||
"openldap >= 2.6.2-3" \
|
||||
postgresql \
|
||||
python3-devel \
|
||||
python3-libselinux \
|
||||
@@ -117,10 +118,6 @@ RUN dnf -y update && dnf install -y 'dnf-command(config-manager)' && \
|
||||
xmlsec1-openssl && \
|
||||
dnf -y clean all
|
||||
|
||||
RUN curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && \
|
||||
chmod 700 get_helm.sh && \
|
||||
./get_helm.sh
|
||||
|
||||
RUN pip3 install virtualenv supervisor dumb-init
|
||||
|
||||
RUN rm -rf /root/.cache && rm -rf /tmp/*
|
||||
|
||||
@@ -243,7 +243,7 @@ $ make docker-compose
|
||||
- [Using Logstash](./docs/logstash.md)
|
||||
- [Start a Cluster](#start-a-cluster)
|
||||
- [Start with Minikube](#start-with-minikube)
|
||||
- [Keycloak Integration](#keycloak-integration)
|
||||
- [SAML and OIDC Integration](#saml-and-oidc-integration)
|
||||
- [OpenLDAP Integration](#openldap-integration)
|
||||
- [Splunk Integration](#splunk-integration)
|
||||
|
||||
@@ -317,8 +317,8 @@ If you want to clean all things once your are done, you can do:
|
||||
(host)$ make docker-compose-container-group-clean
|
||||
```
|
||||
|
||||
### Keycloak Integration
|
||||
Keycloak is a SAML provider and can be used to test AWX social auth. This section describes how to build a reference Keycloak instance and plumb it with AWX for testing purposes.
|
||||
### SAML and OIDC Integration
|
||||
Keycloak can be used as both a SAML and OIDC provider and can be used to test AWX social auth. This section describes how to build a reference Keycloak instance and plumb it with AWX for testing purposes.
|
||||
|
||||
First, be sure that you have the awx.awx collection installed by running `make install_collection`.
|
||||
Next, make sure you have your containers running by running `make docker-compose`.
|
||||
@@ -357,14 +357,16 @@ Go ahead and stop your existing docker-compose run and restart with Keycloak bef
|
||||
Once the containers come up a new port (8443) should be exposed and the Keycloak interface should be running on that port. Connect to this through a url like `https://localhost:8443` to confirm that Keycloak has stared. If you wanted to login and look at Keycloak itself you could select the "Administration console" link and log into the UI the username/password set in the previous `docker run` command. For more information about Keycloak and links to their documentation see their project at https://github.com/keycloak/keycloak.
|
||||
|
||||
Now we are ready to configure and plumb Keycloak with AWX. To do this we have provided a playbook which will:
|
||||
* Create a certificate for data exchange between Keycloak and AWX.
|
||||
* Create a realm in Keycloak with a client for AWX and 3 users.
|
||||
* Backup and configure the SMAL adapter in AWX. NOTE: the private key of any existing SAML adapters can not be backed up through the API, you need a DB backup to recover this.
|
||||
* Create a certificate for SAML data exchange between Keycloak and AWX.
|
||||
* Create a realm in Keycloak with a client for AWX via SAML and OIDC and 3 users.
|
||||
* Backup and configure the SMAL and OIDC adapter in AWX. NOTE: the private key of any existing SAML or OIDC adapters can not be backed up through the API, you need a DB backup to recover this.
|
||||
|
||||
Before we can run the playbook we need to understand that SAML works by sending redirects between AWX and Keycloak through the browser. Because of this we have to tell both AWX and Keycloak how they will construct the redirect URLs. On the Keycloak side, this is done within the realm configuration and on the AWX side its done through the SAML settings. The playbook requires a variable called `container_reference` to be set. The container_reference variable needs to be how your browser will be able to talk to the running containers. Here are some examples of how to choose a proper container_reference.
|
||||
* If you develop on a mac which runs a Fedora VM which has AWX running within that and the browser you use to access AWX runs on the mac. The the VM with the container has its own IP that is mapped to a name like `tower.home.net`. In this scenario your "container_reference" could be either the IP of the VM or the tower.home.net friendly name.
|
||||
* If you are on a Fedora work station running AWX and also using a browser on your workstation you could use localhost, your work stations IP or hostname as the container_reference.
|
||||
|
||||
In addition, OIDC works similar but slightly differently. OIDC has browser redirection but OIDC will also communicate from the AWX docker instance to the Keycloak docker instance directly. Any hostnames you might have are likely not propagated down into the AWX container. So we need a method for both the browser and AWX container to talk to Keycloak. For this we will likely use your machines IP address. This can be passed in as a variable called `oidc_reference`. If unset this will default to container_reference which may be viable for some configurations.
|
||||
|
||||
In addition to container_reference, there are some additional variables which you can override if you need/choose to do so. Here are their names and default values:
|
||||
```yaml
|
||||
keycloak_user: admin
|
||||
@@ -375,23 +377,27 @@ In addition to container_reference, there are some additional variables which yo
|
||||
* keycloak_(user|pass) need to change if you modified the user when starting the initial container above.
|
||||
* cert_subject will be the subject line of the certificate shared between AWX and keycloak you can change this if you like or just use the defaults.
|
||||
|
||||
To override any of the variables above you can add more `-e` arguments to the playbook run below. For example, if you simply need to change the `keycloak_pass` add the argument `-r keycloak_pass=my_secret_pass` to the next command.
|
||||
To override any of the variables above you can add more `-e` arguments to the playbook run below. For example, if you simply need to change the `keycloak_pass` add the argument `-e keycloak_pass=my_secret_pass` to the following ansible-playbook command.
|
||||
|
||||
In addition, you may need to override the username or password to get into your AWX instance. We log into AWX in order to read and write the SAML settings. This can be done in several ways because we are using the awx.awx collection. The easiest way is to set environment variables such as `CONTROLLER_USERNAME`. See the awx.awx documentation for more information on setting environment variables. In the example provided below we are showing an example of specifying a username/password for authentication.
|
||||
In addition, you may need to override the username or password to get into your AWX instance. We log into AWX in order to read and write the SAML and OIDC settings. This can be done in several ways because we are using the awx.awx collection. The easiest way is to set environment variables such as `CONTROLLER_USERNAME`. See the awx.awx documentation for more information on setting environment variables. In the example provided below we are showing an example of specifying a username/password for authentication.
|
||||
|
||||
Now that we have all of our variables covered we can run the playbook like:
|
||||
```bash
|
||||
export CONTROLLER_USERNAME=<your username>
|
||||
export CONTROLLER_PASSWORD=<your password>
|
||||
ansible-playbook tools/docker-compose/ansible/plumb_keycloak.yml -e container_reference=<your container_reference here>
|
||||
ansible-playbook tools/docker-compose/ansible/plumb_keycloak.yml -e container_reference=<your container_reference here> -e oidc_reference=<your oidc reference>
|
||||
```
|
||||
|
||||
Once the playbook is done running SAML should now be setup in your development environment. This realm has three users with the following username/passwords:
|
||||
Once the playbook is done running both SAML and OIDC should now be setup in your development environment. This realm has three users with the following username/passwords:
|
||||
1. awx_unpriv:unpriv123
|
||||
2. awx_admin:admin123
|
||||
3. awx_auditor:audit123
|
||||
|
||||
The first account is a normal user. The second account has the attribute is_superuser set in Keycloak so will be a super user in AWX. The third account has the is_system_auditor attribute in Keycloak so it will be a system auditor in AWX. To log in with one of these Keycloak users go to the AWX login screen and click the small "Sign In With SAML Keycloak" button at the bottom of the login box.
|
||||
The first account is a normal user. The second account has the SMAL attribute is_superuser set in Keycloak so will be a super user in AWX if logged in through SAML. The third account has the SAML is_system_auditor attribute in Keycloak so it will be a system auditor in AWX if logged in through SAML. To log in with one of these Keycloak users go to the AWX login screen and click the small "Sign In With SAML Keycloak" button at the bottom of the login box.
|
||||
|
||||
Note: The OIDC adapter performs authentication only, not authorization. So any user created in AWX will not have any permissions on it at all.
|
||||
|
||||
If you Keycloak configuration is not working and you need to rerun the playbook to try a different `container_reference` or `oidc_reference` you can log into the Keycloak admin console on port 8443 and select the AWX realm in the upper left drop down. Then make sure you are on "Ream Settings" in the Configure menu option and click the trash can next to AWX in the main page window pane. This will completely remove the AWX ream (which has both SAML and OIDC settings) enabling you to re-run the plumb playbook.
|
||||
|
||||
### OpenLDAP Integration
|
||||
|
||||
@@ -463,28 +469,14 @@ Once the playbook is done running Splunk should now be setup in your development
|
||||
|
||||
### Prometheus and Grafana integration
|
||||
|
||||
Prometheus is a metrics collecting tool, and we support prometheus formatted data at the `api/v2/metrics` endpoint.
|
||||
|
||||
Before you run anything, you should perform this basic setup:
|
||||
|
||||
1. Copy the prometheus configuration:
|
||||
Prometheus is a metrics collecting tool, and we support prometheus formatted data at the `api/v2/metrics` endpoint. To run the development environment (see [docs](https://github.com/ansible/awx/blob/devel/tools/docker-compose/README.md)) with Prometheus and Grafana enabled, set the following variables:
|
||||
|
||||
```
|
||||
cp tools/prometheus/prometheus.yml.example tools/prometheus/prometheus.yml
|
||||
$ PROMETHEUS=yes GRAFANA=yes make docker-compose
|
||||
```
|
||||
|
||||
Set the `username` and `password` in that file to your AWX user. You can also change the scrape interval.
|
||||
|
||||
2. (optional) if you are in a clustered environment, you can change the target to `haproxy:8043` so that the incoming prometheus requests go through the load balancer. Leaving it set to `awx1` also works.
|
||||
|
||||
You can use this as part of the docker-compose target:
|
||||
|
||||
```
|
||||
PROMETHEUS=true GRAFANA=true make docker-compose
|
||||
```
|
||||
|
||||
3. navigate to `http://localhost:9090/targets` and check that the metrics endpoint State is Up.
|
||||
4. Click the Graph tab, start typing a metric name, or use the Open metrics explorer button to find a metric to display (next to `Execute` button)
|
||||
5. Navigate to `http://localhost:3001`. Sign in, using `admin` for both username and password.
|
||||
6. In the left navigation menu go to Dashboards->Browse, find the "awx-demo" and click. These should have graphs.
|
||||
6. Now you can modify these and add panels for whichever metrics you like.
|
||||
1. navigate to `http://localhost:9090/targets` and check that the metrics endpoint State is Up.
|
||||
2. Click the Graph tab, start typing a metric name, or use the Open metrics explorer button to find a metric to display (next to `Execute` button)
|
||||
3. Navigate to `http://localhost:3001`. Sign in, using `admin` for both username and password.
|
||||
4. In the left navigation menu go to Dashboards->Browse, find the "awx-demo" and click. These should have graphs.
|
||||
5. Now you can modify these and add panels for whichever metrics you like.
|
||||
|
||||
@@ -24,6 +24,8 @@
|
||||
public_key_trimmed: "{{ public_key_content | regex_replace('-----BEGIN CERTIFICATE-----\\\\n', '') | regex_replace('\\\\n-----END CERTIFICATE-----', '') }}"
|
||||
existing_saml: "{{ lookup('awx.awx.controller_api', 'settings/saml', host=awx_host, verify_ssl=false) }}"
|
||||
new_saml: "{{ lookup('template', 'saml_settings.json.j2') }}"
|
||||
existing_oidc: "{{ lookup('awx.awx.controller_api', 'settings/oidc', host=awx_host, verify_ssl=false) }}"
|
||||
new_oidc: "{{ lookup('template', 'oidc_settings.json.j2') }}"
|
||||
vars:
|
||||
# We add the extra \\ in here so that when jinja is templating out the files we end up with \n in the strings.
|
||||
public_key_content: "{{ lookup('file', public_key_file) | regex_replace('\n', '\\\\n') }}"
|
||||
@@ -34,14 +36,21 @@
|
||||
msg:
|
||||
- "Here is your existing SAML configuration for reference:"
|
||||
- "{{ existing_saml }}"
|
||||
- "Here is your existing OIDC configuration for reference:"
|
||||
- "{{ existing_oidc }}"
|
||||
|
||||
- pause:
|
||||
prompt: "Continuing to run this will replace your existing saml settings (displayed above). They will all be captured except for your private key. Be sure that is backed up before continuing"
|
||||
prompt: "Continuing to run this will replace your existing saml and OIDC settings (displayed above). They will all be captured except for your private key. Be sure that is backed up before continuing"
|
||||
|
||||
- name: Write out the existing content
|
||||
copy:
|
||||
dest: "../_sources/existing_saml_adapter_settings.json"
|
||||
content: "{{ existing_saml }}"
|
||||
dest: "../_sources/{{ item.filename }}"
|
||||
content: "{{ item.content }}"
|
||||
loop:
|
||||
- filename: "existing_saml_adapter_settings.json"
|
||||
content: "{{ existing_saml }}"
|
||||
- filename: "existing_oidc_adapter_settings.json"
|
||||
content: "{{ existing_oidc }}"
|
||||
|
||||
- name: Configure AWX SAML adapter
|
||||
awx.awx.settings:
|
||||
@@ -49,6 +58,12 @@
|
||||
controller_host: "{{ awx_host }}"
|
||||
validate_certs: False
|
||||
|
||||
- name: Configure AWX OIDC adapter
|
||||
awx.awx.settings:
|
||||
settings: "{{ new_oidc }}"
|
||||
controller_host: "{{ awx_host }}"
|
||||
validate_certs: False
|
||||
|
||||
- name: Get a keycloak token
|
||||
uri:
|
||||
url: "https://localhost:8443/auth/realms/master/protocol/openid-connect/token"
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
- pg_password
|
||||
- secret_key
|
||||
- broadcast_websocket_secret
|
||||
- admin_password
|
||||
|
||||
- name: Generate secrets if needed
|
||||
template:
|
||||
@@ -121,3 +122,9 @@
|
||||
mode: '0600'
|
||||
with_sequence: start=1 end={{ execution_node_count if execution_node_count | int > 0 else 1}}
|
||||
when: execution_node_count | int > 0
|
||||
|
||||
- name: Render prometheus config
|
||||
template:
|
||||
src: "prometheus.yml.j2"
|
||||
dest: "{{ sources_dest }}/prometheus.yml"
|
||||
when: enable_prometheus|bool
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#jinja2: lstrip_blocks: True
|
||||
---
|
||||
version: '2.1'
|
||||
services:
|
||||
@@ -22,6 +23,7 @@ services:
|
||||
CONTROL_PLANE_NODE_COUNT: {{ control_plane_node_count|int }}
|
||||
EXECUTION_NODE_COUNT: {{ execution_node_count|int }}
|
||||
AWX_LOGGING_MODE: stdout
|
||||
DJANGO_SUPERUSER_PASSWORD: {{ admin_password }}
|
||||
{% if loop.index == 1 %}
|
||||
RUN_MIGRATIONS: 1
|
||||
{% endif %}
|
||||
@@ -141,28 +143,27 @@ services:
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
container_name: tools_prometheus_1
|
||||
hostname: splunk
|
||||
hostname: prometheus
|
||||
ports:
|
||||
- "9090:9090"
|
||||
volumes:
|
||||
- "../../prometheus:/etc/prometheus"
|
||||
- "../../docker-compose/_sources/prometheus.yml:/etc/prometheus/prometheus.yml"
|
||||
- "prometheus_storage:/prometheus:rw"
|
||||
links:
|
||||
- awx_1:awx1
|
||||
{% for i in range(control_plane_node_count|int) %}
|
||||
- awx_{{ loop.index }}:awx{{ loop.index }} # because underscores are not valid in hostnames
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if enable_grafana|bool %}
|
||||
grafana:
|
||||
image: grafana/grafana-enterprise:latest
|
||||
container_name: tools_grafana_1
|
||||
hostname: splunk
|
||||
hostname: grafana
|
||||
ports:
|
||||
- "3001:3000"
|
||||
volumes:
|
||||
- "../../grafana:/etc/grafana/provisioning"
|
||||
- "grafana_storage:/var/lib/grafana:rw"
|
||||
environment:
|
||||
SPLUNK_START_ARGS: --accept-license
|
||||
SPLUNK_PASSWORD: splunk_admin
|
||||
links:
|
||||
- prometheus
|
||||
depends_on:
|
||||
@@ -199,7 +200,7 @@ services:
|
||||
- "5555:5555"
|
||||
volumes:
|
||||
- "../../docker-compose/_sources/receptor/receptor-hop.conf:/etc/receptor/receptor.conf"
|
||||
{% for i in range(execution_node_count|int) -%}
|
||||
{% for i in range(execution_node_count|int) %}
|
||||
receptor-{{ loop.index }}:
|
||||
image: "{{ awx_image }}:{{ awx_image_tag }}"
|
||||
user: "{{ ansible_user_uid }}"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#jinja2: lstrip_blocks: True
|
||||
---
|
||||
global:
|
||||
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
@@ -5,14 +6,15 @@ global:
|
||||
scrape_configs:
|
||||
- job_name: 'awx'
|
||||
static_configs:
|
||||
- targets: ['awx1:8043'] # or haproxy:8043 in cluster env
|
||||
tls_config:
|
||||
insecure_skip_verify: true
|
||||
- targets:
|
||||
# metrics are broadcast to all nodes in the cluster,
|
||||
# so no need to track nodes individually.
|
||||
- awx1:8013
|
||||
metrics_path: /api/v2/metrics
|
||||
scrape_interval: 5s
|
||||
scheme: https
|
||||
scheme: http
|
||||
params:
|
||||
format: ['txt']
|
||||
basic_auth:
|
||||
username: admin # change this
|
||||
password: password # change this
|
||||
username: admin
|
||||
password: {{ admin_password }}
|
||||
@@ -2,7 +2,7 @@
|
||||
This template is an export from Keycloak.
|
||||
See https://github.com/keycloak/keycloak-documentation/blob/main/server_admin/topics/export-import.adoc for instructions on how to run the export.
|
||||
Once you have the export you want to variablize the public cert, private cert, and the endpoints.
|
||||
The endpoints should be replaced with the variable {{ container_reference }}
|
||||
The endpoints should be replaced with either the variable {{ container_reference }} or {{ oidc_reference }}
|
||||
Some of the keys have \n's in there and some references do not.
|
||||
The ones with the \n can be variablized by {{ private_key }} and {{ public_key }}.
|
||||
The public key in the setting `saml.signing.certificate` should be replaced with {{ public_key_trimmed }}
|
||||
@@ -65,7 +65,8 @@
|
||||
"composite": true,
|
||||
"composites": {
|
||||
"realm": [
|
||||
"offline_access"
|
||||
"offline_access",
|
||||
"uma_authorization"
|
||||
],
|
||||
"client": {
|
||||
"account": [
|
||||
@@ -75,12 +76,31 @@
|
||||
}
|
||||
},
|
||||
"clientRole": false,
|
||||
"containerId": "Tower Realm",
|
||||
"containerId": "AWX Realm",
|
||||
"attributes": {}
|
||||
},
|
||||
{
|
||||
"id": "ea2c2864-93b0-4022-9ef1-202bc2f9c87a",
|
||||
"name": "uma_authorization",
|
||||
"description": "${role_uma_authorization}",
|
||||
"composite": false,
|
||||
"clientRole": false,
|
||||
"containerId": "AWX Realm",
|
||||
"attributes": {}
|
||||
},
|
||||
{
|
||||
"id": "3764c3ca-d706-424e-8802-65be0d2e060d",
|
||||
"name": "offline_access",
|
||||
"description": "${role_offline-access}",
|
||||
"composite": false,
|
||||
"clientRole": false,
|
||||
"containerId": "AWX Realm",
|
||||
"attributes": {}
|
||||
}
|
||||
],
|
||||
"client": {
|
||||
"{{ container_reference }}:8043": []
|
||||
"{{ container_reference }}:8043": [],
|
||||
"awx_oidc_client": []
|
||||
}
|
||||
},
|
||||
"groups": [],
|
||||
@@ -90,7 +110,7 @@
|
||||
"description": "${role_default-roles}",
|
||||
"composite": true,
|
||||
"clientRole": false,
|
||||
"containerId": "Tower Realm"
|
||||
"containerId": "AWX Realm"
|
||||
},
|
||||
"requiredCredentials": [
|
||||
"password"
|
||||
@@ -290,6 +310,88 @@
|
||||
"role_list"
|
||||
],
|
||||
"optionalClientScopes": []
|
||||
},
|
||||
{
|
||||
"id": "525e0eeb-56ee-429f-a040-c6fc18072dc4",
|
||||
"clientId": "awx_oidc_client",
|
||||
"baseUrl": "",
|
||||
"surrogateAuthRequired": false,
|
||||
"enabled": true,
|
||||
"alwaysDisplayInConsole": false,
|
||||
"clientAuthenticatorType": "client-secret",
|
||||
"secret": "7b1c3527-8702-4742-af69-2b74ee5742e8",
|
||||
"redirectUris": [
|
||||
{% if oidc_reference is defined %}
|
||||
"https://{{ oidc_reference }}:8043/sso/complete/oidc/",
|
||||
{% endif %}
|
||||
"https://{{ container_reference }}:8043/sso/complete/oidc/"
|
||||
],
|
||||
"webOrigins": [],
|
||||
"notBefore": 0,
|
||||
"bearerOnly": false,
|
||||
"consentRequired": false,
|
||||
"standardFlowEnabled": true,
|
||||
"implicitFlowEnabled": false,
|
||||
"directAccessGrantsEnabled": true,
|
||||
"serviceAccountsEnabled": false,
|
||||
"publicClient": false,
|
||||
"frontchannelLogout": false,
|
||||
"protocol": "openid-connect",
|
||||
"attributes": {
|
||||
"id.token.as.detached.signature": "false",
|
||||
"saml.assertion.signature": "false",
|
||||
"saml.force.post.binding": "false",
|
||||
"saml.multivalued.roles": "false",
|
||||
"saml.encrypt": "false",
|
||||
"oauth2.device.authorization.grant.enabled": "false",
|
||||
"backchannel.logout.revoke.offline.tokens": "false",
|
||||
"saml.server.signature": "false",
|
||||
"saml.server.signature.keyinfo.ext": "false",
|
||||
"use.refresh.tokens": "true",
|
||||
"exclude.session.state.from.auth.response": "false",
|
||||
"oidc.ciba.grant.enabled": "false",
|
||||
"saml.artifact.binding": "false",
|
||||
"backchannel.logout.session.required": "true",
|
||||
"client_credentials.use_refresh_token": "false",
|
||||
"saml_force_name_id_format": "false",
|
||||
"require.pushed.authorization.requests": "false",
|
||||
"saml.client.signature": "false",
|
||||
"tls.client.certificate.bound.access.tokens": "false",
|
||||
"saml.authnstatement": "false",
|
||||
"display.on.consent.screen": "false",
|
||||
"saml.onetimeuse.condition": "false"
|
||||
},
|
||||
"authenticationFlowBindingOverrides": {},
|
||||
"fullScopeAllowed": true,
|
||||
"nodeReRegistrationTimeout": -1,
|
||||
"protocolMappers": [
|
||||
{
|
||||
"id": "a8f4a0a8-ece4-4a9d-9e7b-830f23ba0067",
|
||||
"name": "AWX OIDC Group Membership",
|
||||
"protocol": "openid-connect",
|
||||
"protocolMapper": "oidc-group-membership-mapper",
|
||||
"consentRequired": false,
|
||||
"config": {
|
||||
"full.path": "false",
|
||||
"id.token.claim": "true",
|
||||
"access.token.claim": "true",
|
||||
"claim.name": "Group",
|
||||
"userinfo.token.claim": "true"
|
||||
}
|
||||
}
|
||||
],
|
||||
"defaultClientScopes": [
|
||||
"web-origins",
|
||||
"profile",
|
||||
"roles",
|
||||
"email"
|
||||
],
|
||||
"optionalClientScopes": [
|
||||
"address",
|
||||
"phone",
|
||||
"offline_access",
|
||||
"microprofile-jwt"
|
||||
]
|
||||
}
|
||||
],
|
||||
"clientScopes": [
|
||||
@@ -626,6 +728,7 @@
|
||||
"consentRequired": false,
|
||||
"config": {
|
||||
"multivalued": "true",
|
||||
"userinfo.token.claim": "true",
|
||||
"user.attribute": "foo",
|
||||
"id.token.claim": "true",
|
||||
"access.token.claim": "true",
|
||||
@@ -1686,7 +1789,7 @@
|
||||
"clientOfflineSessionIdleTimeout": "0",
|
||||
"cibaInterval": "5"
|
||||
},
|
||||
"keycloakVersion": "15.0.2.redhat-00001",
|
||||
"keycloakVersion": "15.0.2",
|
||||
"userManagedAccessAllowed": false,
|
||||
"clientProfiles": {
|
||||
"profiles": []
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"SOCIAL_AUTH_OIDC_KEY": "awx_oidc_client",
|
||||
"SOCIAL_AUTH_OIDC_SECRET": "7b1c3527-8702-4742-af69-2b74ee5742e8",
|
||||
"SOCIAL_AUTH_OIDC_OIDC_ENDPOINT": "https://{{ oidc_reference | default(container_reference) }}:8443/auth/realms/awx",
|
||||
"SOCIAL_AUTH_OIDC_VERIFY_SSL": "False"
|
||||
}
|
||||
@@ -21,10 +21,9 @@ fi
|
||||
|
||||
if output=$(awx-manage createsuperuser --noinput --username=admin --email=admin@localhost 2> /dev/null); then
|
||||
echo $output
|
||||
admin_password=$(openssl rand -base64 12)
|
||||
echo "Admin password: ${admin_password}"
|
||||
awx-manage update_password --username=admin --password=${admin_password}
|
||||
fi
|
||||
echo "Admin password: ${DJANGO_SUPERUSER_PASSWORD}"
|
||||
|
||||
awx-manage create_preload_data
|
||||
awx-manage register_default_execution_environments
|
||||
|
||||
|
||||
@@ -85,10 +85,96 @@
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 8,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"expr": "awx_status_total",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "job status",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"id": 12,
|
||||
"options": {
|
||||
"legend": {
|
||||
@@ -199,7 +285,7 @@
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"id": 10,
|
||||
@@ -458,8 +544,8 @@
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"id": 18,
|
||||
"options": {
|
||||
@@ -556,9 +642,9 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 32
|
||||
"y": 24
|
||||
},
|
||||
"id": 8,
|
||||
"id": 14,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
@@ -576,11 +662,14 @@
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"expr": "awx_status_total",
|
||||
"editorMode": "builder",
|
||||
"expr": "awx_database_connections_total",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "job status",
|
||||
"title": "Database",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -641,10 +730,10 @@
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 40
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"id": 14,
|
||||
"id": 20,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
@@ -663,13 +752,13 @@
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"editorMode": "builder",
|
||||
"expr": "awx_database_connections_total",
|
||||
"expr": "awx_instance_consumed_capacity",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Database",
|
||||
"title": "Consumed Instance Capacity",
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
@@ -688,6 +777,6 @@
|
||||
"timezone": "",
|
||||
"title": "awx-demo",
|
||||
"uid": "GISWZOXnk",
|
||||
"version": 2,
|
||||
"version": 4,
|
||||
"weekStart": ""
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user