Compare commits

..

13 Commits

Author SHA1 Message Date
Cesar Francisco San Nicolas Martinez
c437a37be7 Merge pull request #13171 from infamousjoeg/cyberark-ccp-branding-webserviceid
Cyberark ccp branding webserviceid
2022-11-08 18:42:27 +01:00
Cesar Francisco San Nicolas Martinez
b59cee97d8 Merge branch 'test_cyberark' into cyberark-ccp-branding-webserviceid 2022-11-08 18:42:10 +01:00
Cesar Francisco San Nicolas Martinez
9ca554ce75 Merge pull request #13172 from infamousjoeg/fix-12846-conjur-versioning
Add proper declaration of secret version if present
2022-11-08 18:41:20 +01:00
Cesar Francisco San Nicolas Martinez
81e20c727d Merge pull request #13170 from infamousjoeg/conjur-cloud-and-branding
Conjur cloud and branding
2022-11-08 18:40:46 +01:00
Joe Garcia
f3482f4038 Removed final reference to CyberArk Conjur Secret Lookup 2022-10-28 12:42:02 -04:00
Joe Garcia
878035c13b Fixed webservice_id check to string 2022-10-26 12:45:59 -04:00
Joe Garcia
2cc971a43f default to AIMWebService if no val provided 2022-10-26 12:41:15 -04:00
Joe Garcia
9d77c54612 Remove references to AIM everywhere 2022-10-26 12:32:12 -04:00
Joe Garcia
546fabbb97 Update references across the board 2022-10-26 12:28:50 -04:00
Joe Garcia
ef651a3a21 Add Web Service ID & update branding 2022-10-26 11:54:09 -04:00
Joe Garcia
68862d5085 rm base64 import to pass lint 2022-10-26 11:14:14 -04:00
Joe Garcia
66c7d5e9be Fixes #13119 #13120 Cloud support & update brand 2022-10-26 10:25:19 -04:00
Joe Garcia
4a7335676d Add proper declaration of secret version if present 2022-10-17 14:43:16 -05:00
38 changed files with 294 additions and 526 deletions

158
Makefile
View File

@@ -54,6 +54,47 @@ I18N_FLAG_FILE = .i18n_built
VERSION PYTHON_VERSION docker-compose-sources \ VERSION PYTHON_VERSION docker-compose-sources \
.git/hooks/pre-commit .git/hooks/pre-commit
clean-tmp:
rm -rf tmp/
clean-venv:
rm -rf venv/
clean-dist:
rm -rf dist
clean-schema:
rm -rf swagger.json
rm -rf schema.json
rm -rf reference-schema.json
clean-languages:
rm -f $(I18N_FLAG_FILE)
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
## Remove temporary build files, compiled Python files.
clean: clean-ui clean-api clean-awxkit clean-dist
rm -rf awx/public
rm -rf awx/lib/site-packages
rm -rf awx/job_status
rm -rf awx/job_output
rm -rf reports
rm -rf tmp
rm -rf $(I18N_FLAG_FILE)
mkdir tmp
clean-api:
rm -rf build $(NAME)-$(VERSION) *.egg-info
rm -rf .tox
find . -type f -regex ".*\.py[co]$$" -delete
find . -type d -name "__pycache__" -delete
rm -f awx/awx_test.sqlite3*
rm -rf requirements/vendor
rm -rf awx/projects
clean-awxkit:
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
## convenience target to assert environment variables are defined ## convenience target to assert environment variables are defined
guard-%: guard-%:
@if [ "$${$*}" = "" ]; then \ @if [ "$${$*}" = "" ]; then \
@@ -77,7 +118,7 @@ virtualenv_awx:
fi; \ fi; \
fi fi
## Install third-party requirements needed for AWX's environment. ## Install third-party requirements needed for AWX's environment.
# this does not use system site packages intentionally # this does not use system site packages intentionally
requirements_awx: virtualenv_awx requirements_awx: virtualenv_awx
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \ if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
@@ -331,6 +372,15 @@ bulk_data:
UI_BUILD_FLAG_FILE = awx/ui/.ui-built UI_BUILD_FLAG_FILE = awx/ui/.ui-built
clean-ui:
rm -rf node_modules
rm -rf awx/ui/node_modules
rm -rf awx/ui/build
rm -rf awx/ui/src/locales/_build
rm -rf $(UI_BUILD_FLAG_FILE)
# the collectstatic command doesn't like it if this dir doesn't exist.
mkdir -p awx/ui/build/static
awx/ui/node_modules: awx/ui/node_modules:
NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn --force ci NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn --force ci
@@ -402,7 +452,7 @@ awx/projects:
COMPOSE_UP_OPTS ?= COMPOSE_UP_OPTS ?=
COMPOSE_OPTS ?= COMPOSE_OPTS ?=
CONTROL_PLANE_NODE_COUNT ?= 1 CONTROL_PLANE_NODE_COUNT ?= 1
EXECUTION_NODE_COUNT ?= 0 EXECUTION_NODE_COUNT ?= 2
MINIKUBE_CONTAINER_GROUP ?= false MINIKUBE_CONTAINER_GROUP ?= false
MINIKUBE_SETUP ?= false # if false, run minikube separately MINIKUBE_SETUP ?= false # if false, run minikube separately
EXTRA_SOURCES_ANSIBLE_OPTS ?= EXTRA_SOURCES_ANSIBLE_OPTS ?=
@@ -453,6 +503,15 @@ detect-schema-change: genschema
# Ignore differences in whitespace with -b # Ignore differences in whitespace with -b
diff -u -b reference-schema.json schema.json diff -u -b reference-schema.json schema.json
docker-compose-clean: awx/projects
docker-compose -f tools/docker-compose/_sources/docker-compose.yml rm -sf
docker-compose-container-group-clean:
@if [ -f "tools/docker-compose-minikube/_sources/minikube" ]; then \
tools/docker-compose-minikube/_sources/minikube delete; \
fi
rm -rf tools/docker-compose-minikube/_sources/
## Base development image build ## Base development image build
docker-compose-build: docker-compose-build:
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE) ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
@@ -460,6 +519,15 @@ docker-compose-build:
--build-arg BUILDKIT_INLINE_CACHE=1 \ --build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) . --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
docker-clean:
$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
if [ "$(shell docker images | grep awx_devel)" ]; then \
docker images | grep awx_devel | awk '{print $$3}' | xargs docker rmi --force; \
fi
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
docker-refresh: docker-clean docker-compose docker-refresh: docker-clean docker-compose
## Docker Development Environment with Elastic Stack Connected ## Docker Development Environment with Elastic Stack Connected
@@ -472,6 +540,14 @@ docker-compose-cluster-elk: awx/projects docker-compose-sources
docker-compose-container-group: docker-compose-container-group:
MINIKUBE_CONTAINER_GROUP=true make docker-compose MINIKUBE_CONTAINER_GROUP=true make docker-compose
clean-elk:
docker stop tools_kibana_1
docker stop tools_logstash_1
docker stop tools_elasticsearch_1
docker rm tools_logstash_1
docker rm tools_elasticsearch_1
docker rm tools_kibana_1
psql-container: psql-container:
docker run -it --net tools_default --rm postgres:12 sh -c 'exec psql -h "postgres" -p "5432" -U postgres' docker run -it --net tools_default --rm postgres:12 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
@@ -528,84 +604,6 @@ messages:
print-%: print-%:
@echo $($*) @echo $($*)
# Cleaning
# --------------------------------------
## Remove temporary build files, compiled Python files.
clean: clean-ui clean-api clean-awxkit clean-dist
rm -rf awx/public
rm -rf awx/lib/site-packages
rm -rf awx/job_status
rm -rf awx/job_output
rm -rf reports
rm -rf tmp
rm -rf $(I18N_FLAG_FILE)
mkdir tmp
clean-elk:
docker stop tools_kibana_1
docker stop tools_logstash_1
docker stop tools_elasticsearch_1
docker rm tools_logstash_1
docker rm tools_elasticsearch_1
docker rm tools_kibana_1
clean-ui:
rm -rf node_modules
rm -rf awx/ui/node_modules
rm -rf awx/ui/build
rm -rf awx/ui/src/locales/_build
rm -rf $(UI_BUILD_FLAG_FILE)
# the collectstatic command doesn't like it if this dir doesn't exist.
mkdir -p awx/ui/build/static
clean-tmp:
rm -rf tmp/
clean-venv:
rm -rf venv/
clean-dist:
rm -rf dist
clean-schema:
rm -rf swagger.json
rm -rf schema.json
rm -rf reference-schema.json
clean-languages:
rm -f $(I18N_FLAG_FILE)
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
clean-api:
rm -rf build $(NAME)-$(VERSION) *.egg-info
rm -rf .tox
find . -type f -regex ".*\.py[co]$$" -delete
find . -type d -name "__pycache__" -delete
rm -f awx/awx_test.sqlite3*
rm -rf requirements/vendor
rm -rf awx/projects
clean-awxkit:
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
docker-compose-clean: awx/projects
docker-compose -f tools/docker-compose/_sources/docker-compose.yml rm -sf
docker-compose-container-group-clean:
@if [ -f "tools/docker-compose-minikube/_sources/minikube" ]; then \
tools/docker-compose-minikube/_sources/minikube delete; \
fi
rm -rf tools/docker-compose-minikube/_sources/
docker-clean:
$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
if [ "$(shell docker images | grep awx_devel)" ]; then \
docker images | grep awx_devel | awk '{print $$3}' | xargs docker rmi --force; \
fi
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
# HELP related targets # HELP related targets
# -------------------------------------- # --------------------------------------

View File

@@ -7,7 +7,7 @@ receptor_work_commands:
command: ansible-runner command: ansible-runner
params: worker params: worker
allowruntimeparams: true allowruntimeparams: true
verifysignature: {{ sign_work }} verifysignature: true
custom_worksign_public_keyfile: receptor/work-public-key.pem custom_worksign_public_keyfile: receptor/work-public-key.pem
custom_tls_certfile: receptor/tls/receptor.crt custom_tls_certfile: receptor/tls/receptor.crt
custom_tls_keyfile: receptor/tls/receptor.key custom_tls_keyfile: receptor/tls/receptor.key

View File

@@ -5,7 +5,6 @@
import dateutil import dateutil
import functools import functools
import html import html
import itertools
import logging import logging
import re import re
import requests import requests
@@ -21,10 +20,9 @@ from urllib3.exceptions import ConnectTimeoutError
# Django # Django
from django.conf import settings from django.conf import settings
from django.core.exceptions import FieldError, ObjectDoesNotExist from django.core.exceptions import FieldError, ObjectDoesNotExist
from django.db.models import Q, Sum, Count from django.db.models import Q, Sum
from django.db import IntegrityError, ProgrammingError, transaction, connection from django.db import IntegrityError, ProgrammingError, transaction, connection
from django.db.models.fields.related import ManyToManyField, ForeignKey from django.db.models.fields.related import ManyToManyField, ForeignKey
from django.db.models.functions import Trunc
from django.shortcuts import get_object_or_404 from django.shortcuts import get_object_or_404
from django.utils.safestring import mark_safe from django.utils.safestring import mark_safe
from django.utils.timezone import now from django.utils.timezone import now
@@ -49,6 +47,9 @@ from rest_framework import status
from rest_framework_yaml.parsers import YAMLParser from rest_framework_yaml.parsers import YAMLParser
from rest_framework_yaml.renderers import YAMLRenderer from rest_framework_yaml.renderers import YAMLRenderer
# QSStats
import qsstats
# ANSIConv # ANSIConv
import ansiconv import ansiconv
@@ -282,50 +283,30 @@ class DashboardJobsGraphView(APIView):
success_query = success_query.filter(instance_of=models.ProjectUpdate) success_query = success_query.filter(instance_of=models.ProjectUpdate)
failed_query = failed_query.filter(instance_of=models.ProjectUpdate) failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
end = now() success_qss = qsstats.QuerySetStats(success_query, 'finished')
interval = 'day' failed_qss = qsstats.QuerySetStats(failed_query, 'finished')
start_date = now()
if period == 'month': if period == 'month':
start = end - dateutil.relativedelta.relativedelta(months=1) end_date = start_date - dateutil.relativedelta.relativedelta(months=1)
interval = 'days'
elif period == 'two_weeks': elif period == 'two_weeks':
start = end - dateutil.relativedelta.relativedelta(weeks=2) end_date = start_date - dateutil.relativedelta.relativedelta(weeks=2)
interval = 'days'
elif period == 'week': elif period == 'week':
start = end - dateutil.relativedelta.relativedelta(weeks=1) end_date = start_date - dateutil.relativedelta.relativedelta(weeks=1)
interval = 'days'
elif period == 'day': elif period == 'day':
start = end - dateutil.relativedelta.relativedelta(days=1) end_date = start_date - dateutil.relativedelta.relativedelta(days=1)
interval = 'hour' interval = 'hours'
else: else:
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST) return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
dashboard_data = {"jobs": {"successful": [], "failed": []}} dashboard_data = {"jobs": {"successful": [], "failed": []}}
for element in success_qss.time_series(end_date, start_date, interval=interval):
succ_list = dashboard_data['jobs']['successful'] dashboard_data['jobs']['successful'].append([time.mktime(element[0].timetuple()), element[1]])
fail_list = dashboard_data['jobs']['failed'] for element in failed_qss.time_series(end_date, start_date, interval=interval):
dashboard_data['jobs']['failed'].append([time.mktime(element[0].timetuple()), element[1]])
qs_s = (
success_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_s = {item['d']: item['agg'] for item in qs_s}
qs_f = (
failed_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_f = {item['d']: item['agg'] for item in qs_f}
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
for d in itertools.count():
date = start_date + dateutil.relativedelta.relativedelta(days=d)
if date > end:
break
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
return Response(dashboard_data) return Response(dashboard_data)

View File

@@ -238,9 +238,7 @@ def instance_info(since, include_hostnames=False, **kwargs):
info = {} info = {}
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance # Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
active_tasks = models.UnifiedJob.objects.filter(status__in=['running', 'waiting']).only('task_impact', 'controller_node', 'execution_node') active_tasks = models.UnifiedJob.objects.filter(status__in=['running', 'waiting']).only('task_impact', 'controller_node', 'execution_node')
tm_instances = TaskManagerInstances( tm_instances = TaskManagerInstances(active_tasks, instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
active_tasks, instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled', 'node_type']
)
for tm_instance in tm_instances.instances_by_hostname.values(): for tm_instance in tm_instances.instances_by_hostname.values():
instance = tm_instance.obj instance = tm_instance.obj
instance_info = { instance_info = {
@@ -253,7 +251,6 @@ def instance_info(since, include_hostnames=False, **kwargs):
'enabled': instance.enabled, 'enabled': instance.enabled,
'consumed_capacity': tm_instance.consumed_capacity, 'consumed_capacity': tm_instance.consumed_capacity,
'remaining_capacity': instance.capacity - tm_instance.consumed_capacity, 'remaining_capacity': instance.capacity - tm_instance.consumed_capacity,
'node_type': instance.node_type,
} }
if include_hostnames is True: if include_hostnames is True:
instance_info['hostname'] = instance.hostname instance_info['hostname'] = instance.hostname

View File

@@ -57,7 +57,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -85,7 +84,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -113,7 +111,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -123,7 +120,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -184,13 +180,12 @@ def metrics():
instance_data = instance_info(None, include_hostnames=True) instance_data = instance_info(None, include_hostnames=True)
for uuid, info in instance_data.items(): for uuid, info in instance_data.items():
hostname = info['hostname'] hostname = info['hostname']
node_type = info['node_type'] INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['capacity'])
INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['capacity'])
INSTANCE_CPU.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['cpu']) INSTANCE_CPU.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['cpu'])
INSTANCE_MEMORY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['memory']) INSTANCE_MEMORY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['memory'])
INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['consumed_capacity']) INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['consumed_capacity'])
INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['remaining_capacity']) INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['remaining_capacity'])
INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).info( INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid).info(
{ {
'enabled': str(instance_data[uuid]['enabled']), 'enabled': str(instance_data[uuid]['enabled']),
'managed_by_policy': str(instance_data[uuid]['managed_by_policy']), 'managed_by_policy': str(instance_data[uuid]['managed_by_policy']),

View File

@@ -5,9 +5,7 @@ import logging
from django.conf import settings from django.conf import settings
from django.apps import apps from django.apps import apps
from awx.main.consumers import emit_channel_notification from awx.main.consumers import emit_channel_notification
from awx.main.utils import is_testing
root_key = 'awx_metrics' root_key = 'awx_metrics'
logger = logging.getLogger('awx.main.analytics') logger = logging.getLogger('awx.main.analytics')
@@ -165,7 +163,7 @@ class Metrics:
Instance = apps.get_model('main', 'Instance') Instance = apps.get_model('main', 'Instance')
if instance_name: if instance_name:
self.instance_name = instance_name self.instance_name = instance_name
elif is_testing(): elif settings.IS_TESTING():
self.instance_name = "awx_testing" self.instance_name = "awx_testing"
else: else:
self.instance_name = Instance.objects.my_hostname() self.instance_name = Instance.objects.my_hostname()

View File

@@ -9,10 +9,16 @@ aim_inputs = {
'fields': [ 'fields': [
{ {
'id': 'url', 'id': 'url',
'label': _('CyberArk AIM URL'), 'label': _('CyberArk CCP URL'),
'type': 'string', 'type': 'string',
'format': 'url', 'format': 'url',
}, },
{
'id': 'webservice_id',
'label': _('Web Service ID'),
'type': 'string',
'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),
},
{ {
'id': 'app_id', 'id': 'app_id',
'label': _('Application ID'), 'label': _('Application ID'),
@@ -64,10 +70,13 @@ def aim_backend(**kwargs):
client_cert = kwargs.get('client_cert', None) client_cert = kwargs.get('client_cert', None)
client_key = kwargs.get('client_key', None) client_key = kwargs.get('client_key', None)
verify = kwargs['verify'] verify = kwargs['verify']
webservice_id = kwargs['webservice_id']
app_id = kwargs['app_id'] app_id = kwargs['app_id']
object_query = kwargs['object_query'] object_query = kwargs['object_query']
object_query_format = kwargs['object_query_format'] object_query_format = kwargs['object_query_format']
reason = kwargs.get('reason', None) reason = kwargs.get('reason', None)
if webservice_id == '':
webservice_id = 'AIMWebService'
query_params = { query_params = {
'AppId': app_id, 'AppId': app_id,
@@ -78,7 +87,7 @@ def aim_backend(**kwargs):
query_params['reason'] = reason query_params['reason'] = reason
request_qs = '?' + urlencode(query_params, quote_via=quote) request_qs = '?' + urlencode(query_params, quote_via=quote)
request_url = urljoin(url, '/'.join(['AIMWebService', 'api', 'Accounts'])) request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))
with CertFiles(client_cert, client_key) as cert: with CertFiles(client_cert, client_key) as cert:
res = requests.get( res = requests.get(
@@ -92,4 +101,4 @@ def aim_backend(**kwargs):
return res.json()['Content'] return res.json()['Content']
aim_plugin = CredentialPlugin('CyberArk AIM Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend) aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)

View File

@@ -466,7 +466,7 @@ class AutoscalePool(WorkerPool):
task_name = 'unknown' task_name = 'unknown'
if isinstance(body, dict): if isinstance(body, dict):
task_name = body.get('task') task_name = body.get('task')
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}') logger.warn(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
return super(AutoscalePool, self).write(preferred_queue, body) return super(AutoscalePool, self).write(preferred_queue, body)
except Exception: except Exception:
for conn in connections.all(): for conn in connections.all():

View File

@@ -1,13 +1,14 @@
import inspect import inspect
import logging import logging
import sys
import json import json
import time import time
from uuid import uuid4 from uuid import uuid4
from django.conf import settings
from django_guid import get_guid from django_guid import get_guid
from . import pg_bus_conn from . import pg_bus_conn
from awx.main.utils import is_testing
logger = logging.getLogger('awx.main.dispatch') logger = logging.getLogger('awx.main.dispatch')
@@ -92,7 +93,7 @@ class task:
obj.update(**kw) obj.update(**kw)
if callable(queue): if callable(queue):
queue = queue() queue = queue()
if not is_testing(): if not settings.IS_TESTING(sys.argv):
with pg_bus_conn() as conn: with pg_bus_conn() as conn:
conn.notify(queue, json.dumps(obj)) conn.notify(queue, json.dumps(obj))
return (obj, queue) return (obj, queue)

View File

@@ -233,12 +233,11 @@ class Instance(HasPolicyEditsMixin, BaseModel):
if not isinstance(vargs.get('grace_period'), int): if not isinstance(vargs.get('grace_period'), int):
vargs['grace_period'] = 60 # grace period of 60 minutes, need to set because CLI default will not take effect vargs['grace_period'] = 60 # grace period of 60 minutes, need to set because CLI default will not take effect
if 'exclude_strings' not in vargs and vargs.get('file_pattern'): if 'exclude_strings' not in vargs and vargs.get('file_pattern'):
active_job_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting')) active_pks = list(
if self.node_type == 'execution': UnifiedJob.objects.filter(
active_job_qs = active_job_qs.filter(execution_node=self.hostname) (models.Q(execution_node=self.hostname) | models.Q(controller_node=self.hostname)) & models.Q(status__in=('running', 'waiting'))
else: ).values_list('pk', flat=True)
active_job_qs = active_job_qs.filter(controller_node=self.hostname) )
active_pks = list(active_job_qs.values_list('pk', flat=True))
if active_pks: if active_pks:
vargs['exclude_strings'] = [JOB_FOLDER_PREFIX % job_id for job_id in active_pks] vargs['exclude_strings'] = [JOB_FOLDER_PREFIX % job_id for job_id in active_pks]
if 'remove_images' in vargs or 'image_prune' in vargs: if 'remove_images' in vargs or 'image_prune' in vargs:

View File

@@ -567,6 +567,17 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
# Use .job_host_summaries.all() to get jobs affecting this host. # Use .job_host_summaries.all() to get jobs affecting this host.
# Use .job_events.all() to get events affecting this host. # Use .job_events.all() to get events affecting this host.
'''
We don't use timestamp, but we may in the future.
'''
def update_ansible_facts(self, module, facts, timestamp=None):
if module == "ansible":
self.ansible_facts.update(facts)
else:
self.ansible_facts[module] = facts
self.save()
def get_effective_host_name(self): def get_effective_host_name(self):
""" """
Return the name of the host that will be used in actual ansible Return the name of the host that will be used in actual ansible

View File

@@ -44,7 +44,7 @@ from awx.main.models.notifications import (
NotificationTemplate, NotificationTemplate,
JobNotificationMixin, JobNotificationMixin,
) )
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic, log_excess_runtime from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
from awx.main.models.mixins import ( from awx.main.models.mixins import (
ResourceMixin, ResourceMixin,
@@ -857,11 +857,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
return host_queryset.iterator() return host_queryset.iterator()
return host_queryset return host_queryset
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Job {job_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True) def start_job_fact_cache(self, destination, modification_times, timeout=None):
def start_job_fact_cache(self, destination, log_data, timeout=None):
self.log_lifecycle("start_job_fact_cache") self.log_lifecycle("start_job_fact_cache")
log_data['job_id'] = self.id
log_data['written_ct'] = 0
os.makedirs(destination, mode=0o700) os.makedirs(destination, mode=0o700)
if timeout is None: if timeout is None:
@@ -872,8 +869,6 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout) hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout)
else: else:
hosts = self._get_inventory_hosts() hosts = self._get_inventory_hosts()
last_filepath_written = None
for host in hosts: for host in hosts:
filepath = os.sep.join(map(str, [destination, host.name])) filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination): if not os.path.realpath(filepath).startswith(destination):
@@ -883,38 +878,23 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
with codecs.open(filepath, 'w', encoding='utf-8') as f: with codecs.open(filepath, 'w', encoding='utf-8') as f:
os.chmod(f.name, 0o600) os.chmod(f.name, 0o600)
json.dump(host.ansible_facts, f) json.dump(host.ansible_facts, f)
log_data['written_ct'] += 1
last_filepath_written = filepath
except IOError: except IOError:
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name))) system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue continue
# make note of the time we wrote the last file so we can check if any file changed later # make note of the time we wrote the file so we can check if it changed later
if last_filepath_written: modification_times[filepath] = os.path.getmtime(filepath)
return os.path.getmtime(last_filepath_written)
return None
@log_excess_runtime( def finish_job_fact_cache(self, destination, modification_times):
logger,
debug_cutoff=0.01,
msg='Job {job_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
add_log_data=True,
)
def finish_job_fact_cache(self, destination, facts_write_time, log_data):
self.log_lifecycle("finish_job_fact_cache") self.log_lifecycle("finish_job_fact_cache")
log_data['job_id'] = self.id
log_data['updated_ct'] = 0
log_data['unmodified_ct'] = 0
log_data['cleared_ct'] = 0
hosts_to_update = []
for host in self._get_inventory_hosts(): for host in self._get_inventory_hosts():
filepath = os.sep.join(map(str, [destination, host.name])) filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination): if not os.path.realpath(filepath).startswith(destination):
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name))) system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue continue
if os.path.exists(filepath): if os.path.exists(filepath):
# If the file changed since we wrote the last facts file, pre-playbook run... # If the file changed since we wrote it pre-playbook run...
modified = os.path.getmtime(filepath) modified = os.path.getmtime(filepath)
if (not facts_write_time) or modified > facts_write_time: if modified > modification_times.get(filepath, 0):
with codecs.open(filepath, 'r', encoding='utf-8') as f: with codecs.open(filepath, 'r', encoding='utf-8') as f:
try: try:
ansible_facts = json.load(f) ansible_facts = json.load(f)
@@ -922,7 +902,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
continue continue
host.ansible_facts = ansible_facts host.ansible_facts = ansible_facts
host.ansible_facts_modified = now() host.ansible_facts_modified = now()
hosts_to_update.append(host) host.save(update_fields=['ansible_facts', 'ansible_facts_modified'])
system_tracking_logger.info( system_tracking_logger.info(
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)), 'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
extra=dict( extra=dict(
@@ -933,21 +913,12 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
job_id=self.id, job_id=self.id,
), ),
) )
log_data['updated_ct'] += 1
else:
log_data['unmodified_ct'] += 1
else: else:
# if the file goes missing, ansible removed it (likely via clear_facts) # if the file goes missing, ansible removed it (likely via clear_facts)
host.ansible_facts = {} host.ansible_facts = {}
host.ansible_facts_modified = now() host.ansible_facts_modified = now()
hosts_to_update.append(host)
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name))) system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
log_data['cleared_ct'] += 1 host.save()
if len(hosts_to_update) > 100:
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
hosts_to_update = []
if hosts_to_update:
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
class LaunchTimeConfigBase(BaseModel): class LaunchTimeConfigBase(BaseModel):

View File

@@ -39,7 +39,7 @@ from awx.main.utils import (
ScheduleTaskManager, ScheduleTaskManager,
ScheduleWorkflowManager, ScheduleWorkflowManager,
) )
from awx.main.utils.common import task_manager_bulk_reschedule, is_testing from awx.main.utils.common import task_manager_bulk_reschedule
from awx.main.signals import disable_activity_stream from awx.main.signals import disable_activity_stream
from awx.main.constants import ACTIVE_STATES from awx.main.constants import ACTIVE_STATES
from awx.main.scheduler.dependency_graph import DependencyGraph from awx.main.scheduler.dependency_graph import DependencyGraph
@@ -97,7 +97,7 @@ class TaskBase:
self.all_tasks = [t for t in qs] self.all_tasks = [t for t in qs]
def record_aggregate_metrics(self, *args): def record_aggregate_metrics(self, *args):
if not is_testing(): if not settings.IS_TESTING():
# increment task_manager_schedule_calls regardless if the other # increment task_manager_schedule_calls regardless if the other
# metrics are recorded # metrics are recorded
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1) s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)

View File

@@ -426,7 +426,7 @@ class BaseTask(object):
""" """
instance.log_lifecycle("post_run") instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir): def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
""" """
Hook for any steps to run after job/task is marked as complete. Hook for any steps to run after job/task is marked as complete.
""" """
@@ -469,6 +469,7 @@ class BaseTask(object):
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running") self.instance.websocket_emit_status("running")
status, rc = 'error', None status, rc = 'error', None
fact_modification_times = {}
self.runner_callback.event_ct = 0 self.runner_callback.event_ct = 0
''' '''
@@ -497,6 +498,14 @@ class BaseTask(object):
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH): if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH) raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value # May have to serialize the value
private_data_files, ssh_key_data = self.build_private_data_files(self.instance, private_data_dir) private_data_files, ssh_key_data = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs) passwords = self.build_passwords(self.instance, kwargs)
@@ -637,7 +646,7 @@ class BaseTask(object):
self.instance.send_notification_templates('succeeded' if status == 'successful' else 'failed') self.instance.send_notification_templates('succeeded' if status == 'successful' else 'failed')
try: try:
self.final_run_hook(self.instance, status, private_data_dir) self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception: except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format)) logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
@@ -1057,19 +1066,12 @@ class RunJob(SourceControlMixin, BaseTask):
# ran inside of the event saving code # ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory) update_smart_memberships_for_inventory(job.inventory)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if job.use_fact_cache:
self.facts_write_time = self.instance.start_job_fact_cache(os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'))
def build_project_dir(self, job, private_data_dir): def build_project_dir(self, job, private_data_dir):
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch) self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
def post_run_hook(self, job, status): def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).post_run_hook(job, status) super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
job.refresh_from_db(fields=['job_env']) if not private_data_dir:
private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR')
if (not private_data_dir) or (not hasattr(self, 'facts_write_time')):
# If there's no private data dir, that means we didn't get into the # If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in # actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method # the pre_run_hook method
@@ -1077,11 +1079,9 @@ class RunJob(SourceControlMixin, BaseTask):
if job.use_fact_cache: if job.use_fact_cache:
job.finish_job_fact_cache( job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
self.facts_write_time, fact_modification_times,
) )
def final_run_hook(self, job, status, private_data_dir):
super(RunJob, self).final_run_hook(job, status, private_data_dir)
try: try:
inventory = job.inventory inventory = job.inventory
except Inventory.DoesNotExist: except Inventory.DoesNotExist:

View File

@@ -1,7 +1,7 @@
import pytest import pytest
from unittest import mock from unittest import mock
from awx.main.models import AdHocCommand, InventoryUpdate, JobTemplate, Job from awx.main.models import AdHocCommand, InventoryUpdate, JobTemplate
from awx.main.models.activity_stream import ActivityStream from awx.main.models.activity_stream import ActivityStream
from awx.main.models.ha import Instance, InstanceGroup from awx.main.models.ha import Instance, InstanceGroup
from awx.main.tasks.system import apply_cluster_membership_policies from awx.main.tasks.system import apply_cluster_membership_policies
@@ -15,24 +15,6 @@ def test_default_tower_instance_group(default_instance_group, job_factory):
assert default_instance_group in job_factory().preferred_instance_groups assert default_instance_group in job_factory().preferred_instance_groups
@pytest.mark.django_db
@pytest.mark.parametrize('node_type', ('execution', 'control'))
@pytest.mark.parametrize('active', (True, False))
def test_get_cleanup_task_kwargs_active_jobs(node_type, active):
instance = Instance.objects.create(hostname='foobar', node_type=node_type)
job_kwargs = dict()
job_kwargs['controller_node' if node_type == 'control' else 'execution_node'] = instance.hostname
job_kwargs['status'] = 'running' if active else 'successful'
job = Job.objects.create(**job_kwargs)
kwargs = instance.get_cleanup_task_kwargs()
if active:
assert kwargs['exclude_strings'] == [f'awx_{job.pk}_']
else:
assert 'exclude_strings' not in kwargs
@pytest.mark.django_db @pytest.mark.django_db
class TestPolicyTaskScheduling: class TestPolicyTaskScheduling:
"""Tests make assertions about when the policy task gets scheduled""" """Tests make assertions about when the policy task gets scheduled"""

View File

@@ -36,14 +36,15 @@ def job(mocker, hosts, inventory):
def test_start_job_fact_cache(hosts, job, inventory, tmpdir): def test_start_job_fact_cache(hosts, job, inventory, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0) modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
for host in hosts: for host in hosts:
filepath = os.path.join(fact_cache, host.name) filepath = os.path.join(fact_cache, host.name)
assert os.path.exists(filepath) assert os.path.exists(filepath)
with open(filepath, 'r') as f: with open(filepath, 'r') as f:
assert f.read() == json.dumps(host.ansible_facts) assert f.read() == json.dumps(host.ansible_facts)
assert os.path.getmtime(filepath) <= last_modified assert filepath in modified_times
def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker): def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
@@ -57,16 +58,18 @@ def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
) )
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
job.start_job_fact_cache(fact_cache, timeout=0) job.start_job_fact_cache(fact_cache, {}, 0)
# a file called "foo" should _not_ be written outside the facts dir # a file called "foo" should _not_ be written outside the facts dir
assert os.listdir(os.path.join(fact_cache, '..')) == ['facts'] assert os.listdir(os.path.join(fact_cache, '..')) == ['facts']
def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker, tmpdir): def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0) modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update') for h in hosts:
h.save = mocker.Mock()
ansible_facts_new = {"foo": "bar"} ansible_facts_new = {"foo": "bar"}
filepath = os.path.join(fact_cache, hosts[1].name) filepath = os.path.join(fact_cache, hosts[1].name)
@@ -80,20 +83,23 @@ def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker,
new_modification_time = time.time() + 3600 new_modification_time = time.time() + 3600
os.utime(filepath, (new_modification_time, new_modification_time)) os.utime(filepath, (new_modification_time, new_modification_time))
job.finish_job_fact_cache(fact_cache, last_modified) job.finish_job_fact_cache(fact_cache, modified_times)
for host in (hosts[0], hosts[2], hosts[3]): for host in (hosts[0], hosts[2], hosts[3]):
host.save.assert_not_called()
assert host.ansible_facts == {"a": 1, "b": 2} assert host.ansible_facts == {"a": 1, "b": 2}
assert host.ansible_facts_modified is None assert host.ansible_facts_modified is None
assert hosts[1].ansible_facts == ansible_facts_new assert hosts[1].ansible_facts == ansible_facts_new
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified']) hosts[1].save.assert_called_once_with(update_fields=['ansible_facts', 'ansible_facts_modified'])
def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir): def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0) modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update') for h in hosts:
h.save = mocker.Mock()
for h in hosts: for h in hosts:
filepath = os.path.join(fact_cache, h.name) filepath = os.path.join(fact_cache, h.name)
@@ -103,22 +109,26 @@ def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpd
new_modification_time = time.time() + 3600 new_modification_time = time.time() + 3600
os.utime(filepath, (new_modification_time, new_modification_time)) os.utime(filepath, (new_modification_time, new_modification_time))
job.finish_job_fact_cache(fact_cache, last_modified) job.finish_job_fact_cache(fact_cache, modified_times)
bulk_update.assert_not_called() for h in hosts:
h.save.assert_not_called()
def test_finish_job_fact_cache_clear(job, hosts, inventory, mocker, tmpdir): def test_finish_job_fact_cache_clear(job, hosts, inventory, mocker, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0) modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update') for h in hosts:
h.save = mocker.Mock()
os.remove(os.path.join(fact_cache, hosts[1].name)) os.remove(os.path.join(fact_cache, hosts[1].name))
job.finish_job_fact_cache(fact_cache, last_modified) job.finish_job_fact_cache(fact_cache, modified_times)
for host in (hosts[0], hosts[2], hosts[3]): for host in (hosts[0], hosts[2], hosts[3]):
host.save.assert_not_called()
assert host.ansible_facts == {"a": 1, "b": 2} assert host.ansible_facts == {"a": 1, "b": 2}
assert host.ansible_facts_modified is None assert host.ansible_facts_modified is None
assert hosts[1].ansible_facts == {} assert hosts[1].ansible_facts == {}
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified']) hosts[1].save.assert_called_once_with()

View File

@@ -11,12 +11,11 @@ import os
import subprocess import subprocess
import re import re
import stat import stat
import sys
import urllib.parse import urllib.parse
import threading import threading
import contextlib import contextlib
import tempfile import tempfile
import functools from functools import reduce, wraps
# Django # Django
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
@@ -74,7 +73,6 @@ __all__ = [
'NullablePromptPseudoField', 'NullablePromptPseudoField',
'model_instance_diff', 'model_instance_diff',
'parse_yaml_or_json', 'parse_yaml_or_json',
'is_testing',
'RequireDebugTrueOrTest', 'RequireDebugTrueOrTest',
'has_model_field_prefetched', 'has_model_field_prefetched',
'set_environ', 'set_environ',
@@ -90,7 +88,6 @@ __all__ = [
'deepmerge', 'deepmerge',
'get_event_partition_epoch', 'get_event_partition_epoch',
'cleanup_new_process', 'cleanup_new_process',
'log_excess_runtime',
] ]
@@ -147,19 +144,6 @@ def underscore_to_camelcase(s):
return ''.join(x.capitalize() or '_' for x in s.split('_')) return ''.join(x.capitalize() or '_' for x in s.split('_'))
@functools.cache
def is_testing(argv=None):
'''Return True if running django or py.test unit tests.'''
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
return True
argv = sys.argv if argv is None else argv
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
return True
elif len(argv) >= 2 and argv[1] == 'test':
return True
return False
class RequireDebugTrueOrTest(logging.Filter): class RequireDebugTrueOrTest(logging.Filter):
""" """
Logging filter to output when in DEBUG mode or running tests. Logging filter to output when in DEBUG mode or running tests.
@@ -168,7 +152,7 @@ class RequireDebugTrueOrTest(logging.Filter):
def filter(self, record): def filter(self, record):
from django.conf import settings from django.conf import settings
return settings.DEBUG or is_testing() return settings.DEBUG or settings.IS_TESTING()
class IllegalArgumentError(ValueError): class IllegalArgumentError(ValueError):
@@ -190,7 +174,7 @@ def memoize(ttl=60, cache_key=None, track_function=False, cache=None):
cache = cache or get_memoize_cache() cache = cache or get_memoize_cache()
def memoize_decorator(f): def memoize_decorator(f):
@functools.wraps(f) @wraps(f)
def _memoizer(*args, **kwargs): def _memoizer(*args, **kwargs):
if track_function: if track_function:
cache_dict_key = slugify('%r %r' % (args, kwargs)) cache_dict_key = slugify('%r %r' % (args, kwargs))
@@ -1008,7 +992,7 @@ def getattrd(obj, name, default=NoDefaultProvided):
""" """
try: try:
return functools.reduce(getattr, name.split("."), obj) return reduce(getattr, name.split("."), obj)
except AttributeError: except AttributeError:
if default != NoDefaultProvided: if default != NoDefaultProvided:
return default return default
@@ -1204,7 +1188,7 @@ def cleanup_new_process(func):
Cleanup django connection, cache connection, before executing new thread or processes entry point, func. Cleanup django connection, cache connection, before executing new thread or processes entry point, func.
""" """
@functools.wraps(func) @wraps(func)
def wrapper_cleanup_new_process(*args, **kwargs): def wrapper_cleanup_new_process(*args, **kwargs):
from awx.conf.settings import SettingsWrapper # noqa from awx.conf.settings import SettingsWrapper # noqa
@@ -1216,30 +1200,15 @@ def cleanup_new_process(func):
return wrapper_cleanup_new_process return wrapper_cleanup_new_process
def log_excess_runtime(func_logger, cutoff=5.0, debug_cutoff=5.0, msg=None, add_log_data=False): def log_excess_runtime(func_logger, cutoff=5.0):
def log_excess_runtime_decorator(func): def log_excess_runtime_decorator(func):
@functools.wraps(func) @wraps(func)
def _new_func(*args, **kwargs): def _new_func(*args, **kwargs):
start_time = time.time() start_time = time.time()
log_data = {'name': repr(func.__name__)} return_value = func(*args, **kwargs)
delta = time.time() - start_time
if add_log_data: if delta > cutoff:
return_value = func(*args, log_data=log_data, **kwargs) logger.info(f'Running {func.__name__!r} took {delta:.2f}s')
else:
return_value = func(*args, **kwargs)
log_data['delta'] = time.time() - start_time
if isinstance(return_value, dict):
log_data.update(return_value)
if msg is None:
record_msg = 'Running {name} took {delta:.2f}s'
else:
record_msg = msg
if log_data['delta'] > cutoff:
func_logger.info(record_msg.format(**log_data))
elif log_data['delta'] > debug_cutoff:
func_logger.debug(record_msg.format(**log_data))
return return_value return return_value
return _new_func return _new_func

View File

@@ -10,6 +10,28 @@ import socket
from datetime import timedelta from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
def is_testing(argv=None):
import sys
'''Return True if running django or py.test unit tests.'''
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
return True
argv = sys.argv if argv is None else argv
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
return True
elif len(argv) >= 2 and argv[1] == 'test':
return True
return False
def IS_TESTING(argv=None):
return is_testing(argv)
if "pytest" in sys.modules: if "pytest" in sys.modules:
from unittest import mock from unittest import mock
@@ -18,13 +40,9 @@ if "pytest" in sys.modules:
else: else:
import ldap import ldap
DEBUG = True DEBUG = True
SQL_DEBUG = DEBUG SQL_DEBUG = DEBUG
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# FIXME: it would be nice to cycle back around and allow this to be # FIXME: it would be nice to cycle back around and allow this to be
# BigAutoField going forward, but we'd have to be explicit about our # BigAutoField going forward, but we'd have to be explicit about our
# existing models. # existing models.

View File

@@ -465,7 +465,7 @@
}, },
"created": "2020-05-18T21:53:35.370730Z", "created": "2020-05-18T21:53:35.370730Z",
"modified": "2020-05-18T21:54:05.436400Z", "modified": "2020-05-18T21:54:05.436400Z",
"name": "CyberArk AIM Central Credential Provider Lookup", "name": "CyberArk Central Credential Provider Lookup",
"description": "", "description": "",
"kind": "external", "kind": "external",
"namespace": "aim", "namespace": "aim",

View File

@@ -70,6 +70,7 @@ const getStdOutValue = (hostEvent) => {
function HostEventModal({ onClose, hostEvent = {}, isOpen = false }) { function HostEventModal({ onClose, hostEvent = {}, isOpen = false }) {
const [hostStatus, setHostStatus] = useState(null); const [hostStatus, setHostStatus] = useState(null);
const [activeTabKey, setActiveTabKey] = useState(0); const [activeTabKey, setActiveTabKey] = useState(0);
useEffect(() => { useEffect(() => {
setHostStatus(processEventStatus(hostEvent)); setHostStatus(processEventStatus(hostEvent));
}, [setHostStatus, hostEvent]); }, [setHostStatus, hostEvent]);
@@ -107,11 +108,11 @@ function HostEventModal({ onClose, hostEvent = {}, isOpen = false }) {
style={{ alignItems: 'center', marginTop: '20px' }} style={{ alignItems: 'center', marginTop: '20px' }}
gutter="sm" gutter="sm"
> >
<Detail label={t`Host`} value={hostEvent.event_data?.host} /> <Detail label={t`Host`} value={hostEvent.host_name} />
{hostEvent.summary_fields?.host?.description ? ( {hostEvent.summary_fields.host?.description ? (
<Detail <Detail
label={t`Description`} label={t`Description`}
value={hostEvent.summary_fields?.host?.description} value={hostEvent.summary_fields.host.description}
/> />
) : null} ) : null}
{hostStatus ? ( {hostStatus ? (
@@ -124,9 +125,12 @@ function HostEventModal({ onClose, hostEvent = {}, isOpen = false }) {
<Detail label={t`Task`} value={hostEvent.task} /> <Detail label={t`Task`} value={hostEvent.task} />
<Detail <Detail
label={t`Module`} label={t`Module`}
value={hostEvent.event_data?.task_action || t`No result found`} value={hostEvent.event_data.task_action || t`No result found`}
/>
<Detail
label={t`Command`}
value={hostEvent?.event_data?.res?.cmd}
/> />
<Detail label={t`Command`} value={hostEvent.event_data?.res?.cmd} />
</DetailList> </DetailList>
</Tab> </Tab>
<Tab <Tab

View File

@@ -52,47 +52,6 @@ const hostEvent = {
}, },
}; };
const partialHostEvent = {
changed: true,
event: 'runner_on_ok',
event_data: {
host: 'foo',
play: 'all',
playbook: 'run_command.yml',
res: {
ansible_loop_var: 'item',
changed: true,
item: '1',
msg: 'This is a debug message: 1',
stdout:
' total used free shared buff/cache available\nMem: 7973 3005 960 30 4007 4582\nSwap: 1023 0 1023',
stderr: 'problems',
cmd: ['free', '-m'],
stderr_lines: [],
stdout_lines: [
' total used free shared buff/cache available',
'Mem: 7973 3005 960 30 4007 4582',
'Swap: 1023 0 1023',
],
},
task: 'command',
task_action: 'command',
},
event_display: 'Host OK',
event_level: 3,
failed: false,
host: 1,
id: 123,
job: 4,
play: 'all',
playbook: 'run_command.yml',
stdout: `stdout: "changed: [localhost] => {"changed": true, "cmd": ["free", "-m"], "delta": "0:00:01.479609", "end": "2019-09-10 14:21:45.469533", "rc": 0, "start": "2019-09-10 14:21:43.989924", "stderr": "", "stderr_lines": [], "stdout": " total used free shared buff/cache available\nMem: 7973 3005 960 30 4007 4582\nSwap: 1023 0 1023", "stdout_lines": [" total used free shared buff/cache available", "Mem: 7973 3005 960 30 4007 4582", "Swap: 1023 0 1023"]}"
`,
task: 'command',
type: 'job_event',
url: '/api/v2/job_events/123/',
};
/* /*
Some libraries return a list of string in stdout Some libraries return a list of string in stdout
Example: https://github.com/ansible-collections/cisco.ios/blob/main/plugins/modules/ios_command.py#L124-L128 Example: https://github.com/ansible-collections/cisco.ios/blob/main/plugins/modules/ios_command.py#L124-L128
@@ -175,13 +134,6 @@ describe('HostEventModal', () => {
expect(wrapper).toHaveLength(1); expect(wrapper).toHaveLength(1);
}); });
test('renders successfully with partial data', () => {
const wrapper = shallow(
<HostEventModal hostEvent={partialHostEvent} onClose={() => {}} />
);
expect(wrapper).toHaveLength(1);
});
test('should render all tabs', () => { test('should render all tabs', () => {
const wrapper = shallow( const wrapper = shallow(
<HostEventModal hostEvent={hostEvent} onClose={() => {}} isOpen /> <HostEventModal hostEvent={hostEvent} onClose={() => {}} isOpen />

View File

@@ -52,7 +52,7 @@ options:
- The credential type being created. - The credential type being created.
- Can be a built-in credential type such as "Machine", or a custom credential type such as "My Credential Type" - Can be a built-in credential type such as "Machine", or a custom credential type such as "My Credential Type"
- Choices include Amazon Web Services, Ansible Galaxy/Automation Hub API Token, Centrify Vault Credential Provider Lookup, - Choices include Amazon Web Services, Ansible Galaxy/Automation Hub API Token, Centrify Vault Credential Provider Lookup,
Container Registry, CyberArk AIM Central Credential Provider Lookup, CyberArk Conjur Secrets Manager Lookup, Google Compute Engine, Container Registry, CyberArk Central Credential Provider Lookup, CyberArk Conjur Secret Lookup, Google Compute Engine,
GitHub Personal Access Token, GitLab Personal Access Token, GPG Public Key, HashiCorp Vault Secret Lookup, HashiCorp Vault Signed SSH, GitHub Personal Access Token, GitLab Personal Access Token, GPG Public Key, HashiCorp Vault Secret Lookup, HashiCorp Vault Signed SSH,
Insights, Machine, Microsoft Azure Key Vault, Microsoft Azure Resource Manager, Network, OpenShift or Kubernetes API Insights, Machine, Microsoft Azure Key Vault, Microsoft Azure Resource Manager, Network, OpenShift or Kubernetes API
Bearer Token, OpenStack, Red Hat Ansible Automation Platform, Red Hat Satellite 6, Red Hat Virtualization, Source Control, Bearer Token, OpenStack, Red Hat Ansible Automation Platform, Red Hat Satellite 6, Red Hat Virtualization, Source Control,

View File

@@ -84,7 +84,7 @@ options:
type: str type: str
execution_environment: execution_environment:
description: description:
- Execution Environment to use for the job template. - Execution Environment to use for the JT.
type: str type: str
custom_virtualenv: custom_virtualenv:
description: description:

View File

@@ -208,29 +208,6 @@ options:
description: description:
- Limit to act on, applied as a prompt, if job template prompts for limit - Limit to act on, applied as a prompt, if job template prompts for limit
type: str type: str
forks:
description:
- The number of parallel or simultaneous processes to use while executing the playbook, if job template prompts for forks
type: int
job_slice_count:
description:
- The number of jobs to slice into at runtime, if job template prompts for job slices. Will cause the Job Template to launch a workflow if value is greater than 1.
type: int
default: '1'
timeout:
description:
- Maximum time in seconds to wait for a job to finish (server-side), if job template prompts for timeout.
type: int
execution_environment:
description:
- Name of Execution Environment to be applied to job as launch-time prompts.
type: dict
suboptions:
name:
description:
- Name of Execution Environment to be applied to job as launch-time prompts.
- Uniqueness is not handled rigorously.
type: str
diff_mode: diff_mode:
description: description:
- Run diff mode, applied as a prompt, if job template prompts for diff mode - Run diff mode, applied as a prompt, if job template prompts for diff mode
@@ -321,6 +298,7 @@ options:
related: related:
description: description:
- Related items to this workflow node. - Related items to this workflow node.
- Must include credentials, failure_nodes, always_nodes, success_nodes, even if empty.
type: dict type: dict
suboptions: suboptions:
always_nodes: always_nodes:
@@ -364,46 +342,6 @@ options:
description: description:
- Name Credentials to be applied to job as launch-time prompts. - Name Credentials to be applied to job as launch-time prompts.
elements: str elements: str
organization:
description:
- Name of key for use in model for organizational reference
type: dict
suboptions:
name:
description:
- The organization of the credentials exists in.
type: str
labels:
description:
- Labels to be applied to job as launch-time prompts.
- List of Label names.
- Uniqueness is not handled rigorously.
type: list
suboptions:
name:
description:
- Name Labels to be applied to job as launch-time prompts.
elements: str
organization:
description:
- Name of key for use in model for organizational reference
type: dict
suboptions:
name:
description:
- The organization of the label node exists in.
type: str
instance_groups:
description:
- Instance groups to be applied to job as launch-time prompts.
- List of Instance group names.
- Uniqueness is not handled rigorously.
type: list
suboptions:
name:
description:
- Name of Instance groups to be applied to job as launch-time prompts.
elements: str
destroy_current_nodes: destroy_current_nodes:
description: description:
- Set in order to destroy current workflow_nodes on the workflow. - Set in order to destroy current workflow_nodes on the workflow.
@@ -536,21 +474,11 @@ EXAMPLES = '''
name: Default name: Default
name: job template 2 name: job template 2
type: job_template type: job_template
execution_environment:
name: My EE
related: related:
credentials: success_nodes: []
- name: cyberark failure_nodes: []
organization: always_nodes: []
name: Default credentials: []
instance_groups:
- name: SunCavanaugh Cloud
- name: default
labels:
- name: Custom Label
- name: Another Custom Label
organization:
name: Default
register: result register: result
''' '''
@@ -619,9 +547,6 @@ def create_workflow_nodes(module, response, workflow_nodes, workflow_id):
'limit', 'limit',
'diff_mode', 'diff_mode',
'verbosity', 'verbosity',
'forks',
'job_slice_count',
'timeout',
'all_parents_must_converge', 'all_parents_must_converge',
'state', 'state',
): ):
@@ -630,10 +555,6 @@ def create_workflow_nodes(module, response, workflow_nodes, workflow_id):
workflow_node_fields[field_name] = field_val workflow_node_fields[field_name] = field_val
if workflow_node['identifier']: if workflow_node['identifier']:
search_fields = {'identifier': workflow_node['identifier']} search_fields = {'identifier': workflow_node['identifier']}
if 'execution_environment' in workflow_node:
workflow_node_fields['execution_environment'] = module.get_one(
'execution_environments', name_or_id=workflow_node['execution_environment']['name']
)['id']
# Set Search fields # Set Search fields
search_fields['workflow_job_template'] = workflow_node_fields['workflow_job_template'] = workflow_id search_fields['workflow_job_template'] = workflow_node_fields['workflow_job_template'] = workflow_id
@@ -720,26 +641,15 @@ def create_workflow_nodes_association(module, response, workflow_nodes, workflow
# Get id's for association fields # Get id's for association fields
association_fields = {} association_fields = {}
for association in ( for association in ('always_nodes', 'success_nodes', 'failure_nodes', 'credentials'):
'always_nodes',
'success_nodes',
'failure_nodes',
'credentials',
'labels',
'instance_groups',
):
# Extract out information if it exists # Extract out information if it exists
# Test if it is defined, else move to next association. # Test if it is defined, else move to next association.
prompt_lookup = ['credentials', 'labels', 'instance_groups']
if association in workflow_node['related']: if association in workflow_node['related']:
id_list = [] id_list = []
lookup_data = {}
for sub_name in workflow_node['related'][association]: for sub_name in workflow_node['related'][association]:
if association in prompt_lookup: if association == 'credentials':
endpoint = association endpoint = 'credentials'
if 'organization' in sub_name: lookup_data = {'name': sub_name['name']}
lookup_data['organization'] = module.resolve_name_to_id('organizations', sub_name['organization']['name'])
lookup_data['name'] = sub_name['name']
else: else:
endpoint = 'workflow_job_template_nodes' endpoint = 'workflow_job_template_nodes'
lookup_data = {'identifier': sub_name['identifier']} lookup_data = {'identifier': sub_name['identifier']}

View File

@@ -14,7 +14,7 @@
credential: credential:
description: Credential for Testing Source description: Credential for Testing Source
name: "{{ src_cred_name }}" name: "{{ src_cred_name }}"
credential_type: CyberArk AIM Central Credential Provider Lookup credential_type: CyberArk Central Credential Provider Lookup
inputs: inputs:
url: "https://cyberark.example.com" url: "https://cyberark.example.com"
app_id: "My-App-ID" app_id: "My-App-ID"
@@ -58,7 +58,7 @@
credential: credential:
description: Credential for Testing Source Change description: Credential for Testing Source Change
name: "{{ src_cred_name }}-2" name: "{{ src_cred_name }}-2"
credential_type: CyberArk AIM Central Credential Provider Lookup credential_type: CyberArk Central Credential Provider Lookup
inputs: inputs:
url: "https://cyberark-prod.example.com" url: "https://cyberark-prod.example.com"
app_id: "My-App-ID" app_id: "My-App-ID"
@@ -92,7 +92,7 @@
credential: credential:
name: "{{ src_cred_name }}" name: "{{ src_cred_name }}"
organization: Default organization: Default
credential_type: CyberArk AIM Central Credential Provider Lookup credential_type: CyberArk Central Credential Provider Lookup
state: absent state: absent
register: result register: result
@@ -100,7 +100,7 @@
credential: credential:
name: "{{ src_cred_name }}-2" name: "{{ src_cred_name }}-2"
organization: Default organization: Default
credential_type: CyberArk AIM Central Credential Provider Lookup credential_type: CyberArk Central Credential Provider Lookup
state: absent state: absent
register: result register: result

View File

@@ -729,24 +729,6 @@
organization: organization:
name: Default name: Default
type: workflow_job_template type: workflow_job_template
forks: 12
job_slice_count: 2
timeout: 23
execution_environment:
name: "{{ ee1 }}"
related:
credentials:
- name: "{{ scm_cred_name }}"
organization:
name: Default
instance_groups:
- name: "{{ ig1 }}"
- name: "{{ ig2 }}"
labels:
- name: "{{ label1 }}"
- name: "{{ label2 }}"
organization:
name: "{{ org_name }}"
register: result register: result
- name: Delete copied workflow job template - name: Delete copied workflow job template

View File

@@ -52,7 +52,6 @@ html_static_path = ['_static']
rst_epilog = ''' rst_epilog = '''
.. |prog| replace:: awx .. |prog| replace:: awx
.. |at| replace:: automation controller .. |at| replace:: Ansible Tower
.. |At| replace:: Automation controller .. |RHAT| replace:: Red Hat Ansible Tower
.. |RHAT| replace:: Red Hat Ansible Automation Platform controller
''' '''

View File

@@ -197,10 +197,8 @@ def parse_resource(client, skip_deprecated=False):
if hasattr(client, 'v2'): if hasattr(client, 'v2'):
for k in client.v2.json.keys(): for k in client.v2.json.keys():
if k in ('dashboard', 'config'): if k in ('dashboard',):
# - the Dashboard API is deprecated and not supported # the Dashboard API is deprecated and not supported
# - the Config command is already dealt with by the
# CustomCommand section above
continue continue
# argparse aliases are *only* supported in Python3 (not 2.7) # argparse aliases are *only* supported in Python3 (not 2.7)

View File

@@ -0,0 +1,24 @@
Copyright (c) 2010, Matt Croydon, Mikhail Korobov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the tastypie nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL MATT CROYDON BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,22 +1,25 @@
# Dependency Management # Dependency Management
The `requirements.txt` file is generated from `requirements.in` and `requirements_git.txt`, using `pip-tools` and `pip-compile`. The `requirements.txt` file is generated from `requirements.in`, using `pip-tools` `pip-compile`.
## How To Use ## How To Use
Commands should be run in the awx container from inside the `./requirements` directory of the awx repository. Commands should be run from inside the `./requirements` directory of the awx repository.
### Upgrading or Adding Select Libraries ### Upgrading or Adding Select Libraries
If you need to add or upgrade one targeted library, then modify `requirements.in`, If you need to add or upgrade one targeted library, then modify `requirements.in`,
then run the script: then run the script:
`./updater.sh run` `./updater.sh`
NOTE: `./updater.sh` uses /usr/bin/python3.6, to match the current python version
(3.6) used to build releases.
#### Upgrading Unpinned Dependency #### Upgrading Unpinned Dependency
If you require a new version of a dependency that does not have a pinned version If you require a new version of a dependency that does not have a pinned version
for a fix or feature, pin a minimum version in `requirements.in` and run `./updater.sh run`. For example, for a fix or feature, pin a minimum version and run `./updater.sh`. For example,
replace the line `asgi-amqp` with `asgi-amqp>=1.1.4`, and consider leaving a replace the line `asgi-amqp` with `asgi-amqp>=1.1.4`, and consider leaving a
note. note.

View File

@@ -10,7 +10,7 @@ cryptography>=36.0.2,<37.0.0 # Until paramiko fixes https://github.com/paramiko/
Cython<3 # Since the bump to PyYAML 5.4.1 this is now a mandatory dep Cython<3 # Since the bump to PyYAML 5.4.1 this is now a mandatory dep
daphne daphne
distro distro
django==3.2.16 # see UPGRADE BLOCKERs https://github.com/ansible/awx/security/dependabot/67 django==3.2.13 # see UPGRADE BLOCKERs
django-auth-ldap django-auth-ldap
django-cors-headers>=3.5.0 django-cors-headers>=3.5.0
django-crum django-crum
@@ -19,6 +19,7 @@ django-guid==3.2.1
django-oauth-toolkit==1.4.1 django-oauth-toolkit==1.4.1
django-polymorphic django-polymorphic
django-pglocks django-pglocks
django-qsstats-magic
django-redis django-redis
django-solo django-solo
django-split-settings django-split-settings

View File

@@ -86,7 +86,7 @@ defusedxml==0.6.0
# social-auth-core # social-auth-core
distro==1.5.0 distro==1.5.0
# via -r /awx_devel/requirements/requirements.in # via -r /awx_devel/requirements/requirements.in
django==3.2.16 django==3.2.13
# via # via
# -r /awx_devel/requirements/requirements.in # -r /awx_devel/requirements/requirements.in
# channels # channels
@@ -115,6 +115,9 @@ django-pglocks==1.0.4
# via -r /awx_devel/requirements/requirements.in # via -r /awx_devel/requirements/requirements.in
django-polymorphic==3.1.0 django-polymorphic==3.1.0
# via -r /awx_devel/requirements/requirements.in # via -r /awx_devel/requirements/requirements.in
django-qsstats-magic==1.1.0
# via -r /awx_devel/requirements/requirements.in
# via -r /awx_devel/requirements/requirements_git.txt
django-redis==4.5.0 django-redis==4.5.0
# via -r /awx_devel/requirements/requirements.in # via -r /awx_devel/requirements/requirements.in
django-solo==2.0.0 django-solo==2.0.0

View File

@@ -33,47 +33,11 @@ generate_requirements() {
main() { main() {
base_dir=$(pwd) base_dir=$(pwd)
_tmp="$(mktemp -d --suffix .awx-requirements XXXX -p /tmp)"
_tmp=$(python -c "import tempfile; print(tempfile.mkdtemp(suffix='.awx-requirements', dir='/tmp'))")
trap _cleanup INT TERM EXIT trap _cleanup INT TERM EXIT
case $1 in if [ "$1" = "upgrade" ]; then
"run")
NEEDS_HELP=0
;;
"upgrade")
NEEDS_HELP=0
pip_compile="${pip_compile} --upgrade" pip_compile="${pip_compile} --upgrade"
;;
"help")
NEEDS_HELP=1
;;
*)
echo ""
echo "ERROR: Parameter $1 not valid"
echo ""
NEEDS_HELP=1
;;
esac
if [[ "$NEEDS_HELP" == "1" ]] ; then
echo "This script generates requirements.txt from requirements.in and requirements_git.in"
echo "It should be run from within the awx container"
echo ""
echo "Usage: $0 [run|upgrade]"
echo ""
echo "Commands:"
echo "help Print this message"
echo "run Run the process only upgrading pinned libraries from requirements.in"
echo "upgrade Upgrade all libraries to latest while respecting pinnings"
echo ""
exit
fi
if [[ ! -d /awx_devel ]] ; then
echo "This script should be run inside the awx container"
exit
fi fi
cp -vf requirements.txt "${_tmp}" cp -vf requirements.txt "${_tmp}"

View File

@@ -13,7 +13,6 @@ receptor_image: quay.io/ansible/receptor:devel
# Keys for signing work # Keys for signing work
receptor_rsa_bits: 4096 receptor_rsa_bits: 4096
receptor_work_sign_reconfigure: false receptor_work_sign_reconfigure: false
sign_work: no # currently defaults to no because openssl version mismatch causes "unknown block type PRIVATE KEY"
work_sign_key_dir: '../_sources/receptor' work_sign_key_dir: '../_sources/receptor'
work_sign_private_keyfile: "{{ work_sign_key_dir }}/work_private_key.pem" work_sign_private_keyfile: "{{ work_sign_key_dir }}/work_private_key.pem"
work_sign_public_keyfile: "{{ work_sign_key_dir }}/work_public_key.pem" work_sign_public_keyfile: "{{ work_sign_key_dir }}/work_public_key.pem"

View File

@@ -86,13 +86,11 @@
command: openssl genrsa -out {{ work_sign_private_keyfile }} {{ receptor_rsa_bits }} command: openssl genrsa -out {{ work_sign_private_keyfile }} {{ receptor_rsa_bits }}
args: args:
creates: "{{ work_sign_private_keyfile }}" creates: "{{ work_sign_private_keyfile }}"
when: sign_work | bool
- name: Generate public RSA key for signing work - name: Generate public RSA key for signing work
command: openssl rsa -in {{ work_sign_private_keyfile }} -out {{ work_sign_public_keyfile }} -outform PEM -pubout command: openssl rsa -in {{ work_sign_private_keyfile }} -out {{ work_sign_public_keyfile }} -outform PEM -pubout
args: args:
creates: "{{ work_sign_public_keyfile }}" creates: "{{ work_sign_public_keyfile }}"
when: sign_work | bool
- name: Include LDAP tasks if enabled - name: Include LDAP tasks if enabled
include_tasks: ldap.yml include_tasks: ldap.yml
@@ -130,8 +128,6 @@
src: "receptor-hop.conf.j2" src: "receptor-hop.conf.j2"
dest: "{{ sources_dest }}/receptor/receptor-hop.conf" dest: "{{ sources_dest }}/receptor/receptor-hop.conf"
mode: '0600' mode: '0600'
when:
- execution_node_count | int > 0
- name: Render Receptor Worker Config(s) - name: Render Receptor Worker Config(s)
template: template:

View File

@@ -43,10 +43,8 @@ services:
- "../../docker-compose/_sources/SECRET_KEY:/etc/tower/SECRET_KEY" - "../../docker-compose/_sources/SECRET_KEY:/etc/tower/SECRET_KEY"
- "../../docker-compose/_sources/receptor/receptor-awx-{{ loop.index }}.conf:/etc/receptor/receptor.conf" - "../../docker-compose/_sources/receptor/receptor-awx-{{ loop.index }}.conf:/etc/receptor/receptor.conf"
- "../../docker-compose/_sources/receptor/receptor-awx-{{ loop.index }}.conf.lock:/etc/receptor/receptor.conf.lock" - "../../docker-compose/_sources/receptor/receptor-awx-{{ loop.index }}.conf.lock:/etc/receptor/receptor.conf.lock"
{% if sign_work|bool %}
- "../../docker-compose/_sources/receptor/work_public_key.pem:/etc/receptor/work_public_key.pem" - "../../docker-compose/_sources/receptor/work_public_key.pem:/etc/receptor/work_public_key.pem"
- "../../docker-compose/_sources/receptor/work_private_key.pem:/etc/receptor/work_private_key.pem" - "../../docker-compose/_sources/receptor/work_private_key.pem:/etc/receptor/work_private_key.pem"
{% endif %}
# - "../../docker-compose/_sources/certs:/etc/receptor/certs" # TODO: optionally generate certs # - "../../docker-compose/_sources/certs:/etc/receptor/certs" # TODO: optionally generate certs
- "/sys/fs/cgroup:/sys/fs/cgroup" - "/sys/fs/cgroup:/sys/fs/cgroup"
- "~/.kube/config:/var/lib/awx/.kube/config" - "~/.kube/config:/var/lib/awx/.kube/config"

View File

@@ -11,16 +11,12 @@
- tcp-listener: - tcp-listener:
port: 2222 port: 2222
{% if sign_work|bool %}
- work-signing: - work-signing:
privatekey: /etc/receptor/work_private_key.pem privatekey: /etc/receptor/work_private_key.pem
tokenexpiration: 1m tokenexpiration: 1m
{% endif %}
{% if sign_work|bool %}
- work-verification: - work-verification:
publickey: /etc/receptor/work_public_key.pem publickey: /etc/receptor/work_public_key.pem
{% endif %}
{% for i in range(item | int + 1, control_plane_node_count | int + 1) %} {% for i in range(item | int + 1, control_plane_node_count | int + 1) %}
- tcp-peer: - tcp-peer:
@@ -44,7 +40,7 @@
command: ansible-runner command: ansible-runner
params: worker params: worker
allowruntimeparams: true allowruntimeparams: true
verifysignature: {{ sign_work }} verifysignature: true
- work-kubernetes: - work-kubernetes:
worktype: kubernetes-runtime-auth worktype: kubernetes-runtime-auth
@@ -52,7 +48,7 @@
allowruntimeauth: true allowruntimeauth: true
allowruntimepod: true allowruntimepod: true
allowruntimeparams: true allowruntimeparams: true
verifysignature: {{ sign_work }} verifysignature: true
- work-kubernetes: - work-kubernetes:
worktype: kubernetes-incluster-auth worktype: kubernetes-incluster-auth
@@ -60,4 +56,4 @@
allowruntimeauth: true allowruntimeauth: true
allowruntimepod: true allowruntimepod: true
allowruntimeparams: true allowruntimeparams: true
verifysignature: {{ sign_work }} verifysignature: true

View File

@@ -16,7 +16,7 @@
command: ansible-runner command: ansible-runner
params: worker params: worker
allowruntimeparams: true allowruntimeparams: true
verifysignature: {{ sign_work }} verifysignature: true
- control-service: - control-service:
service: control service: control