Merge branch 'devel' into bump_ipython_version

This commit is contained in:
Bianca Henderson 2021-03-31 10:49:24 -04:00 committed by GitHub
commit 730741e978
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
83 changed files with 1774 additions and 475 deletions

View File

@ -23,7 +23,7 @@ VENV_BASE ?= /var/lib/awx/venv/
SCL_PREFIX ?=
CELERY_SCHEDULE_FILE ?= /var/lib/awx/beat.db
DEV_DOCKER_TAG_BASE ?= gcr.io/ansible-tower-engineering
DEV_DOCKER_TAG_BASE ?= quay.io/awx
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
# Python packages to install only from source (not from binary wheels)
@ -272,12 +272,12 @@ reports:
mkdir -p $@
black: reports
command -v black >/dev/null 2>&1 || { echo "could not find black on your PATH, you may need to \`pip install black\`, or set AWX_IGNORE_BLACK=1" && exit 1; }
(set -o pipefail && $@ $(BLACK_ARGS) awx awxkit awx_collection | tee reports/$@.report)
@command -v black >/dev/null 2>&1 || { echo "could not find black on your PATH, you may need to \`pip install black\`, or set AWX_IGNORE_BLACK=1" && exit 1; }
@(set -o pipefail && $@ $(BLACK_ARGS) awx awxkit awx_collection | tee reports/$@.report)
.git/hooks/pre-commit:
echo "[ -z \$$AWX_IGNORE_BLACK ] && (black --check \`git diff --cached --name-only | grep -E '\.py$\'\` || (echo 'To fix this, run \`make black\` to auto-format your code prior to commit, or set AWX_IGNORE_BLACK=1' && exit 1))" > .git/hooks/pre-commit
chmod +x .git/hooks/pre-commit
@echo "[ -z \$$AWX_IGNORE_BLACK ] && (black --check \`git diff --cached --name-only --diff-filter=AM | grep -E '\.py$\'\` || (echo 'To fix this, run \`make black\` to auto-format your code prior to commit, or set AWX_IGNORE_BLACK=1' && exit 1))" > .git/hooks/pre-commit
@chmod +x .git/hooks/pre-commit
genschema: reports
$(MAKE) swagger PYTEST_ARGS="--genschema --create-db "

View File

@ -129,6 +129,18 @@ class PrometheusJSONRenderer(renderers.JSONRenderer):
parsed_metrics = text_string_to_metric_families(data)
data = {}
for family in parsed_metrics:
data[family.name] = {}
data[family.name]['help_text'] = family.documentation
data[family.name]['type'] = family.type
data[family.name]['samples'] = []
for sample in family.samples:
data[sample[0]] = {"labels": sample[1], "value": sample[2]}
sample_dict = {"labels": sample[1], "value": sample[2]}
if family.type == 'histogram':
if sample[0].endswith("_sum"):
sample_dict['sample_type'] = "sum"
elif sample[0].endswith("_count"):
sample_dict['sample_type'] = "count"
elif sample[0].endswith("_bucket"):
sample_dict['sample_type'] = "bucket"
data[family.name]['samples'].append(sample_dict)
return super(PrometheusJSONRenderer, self).render(data, accepted_media_type, renderer_context)

View File

@ -0,0 +1 @@
query params to filter response, e.g., ?subsystemonly=1&metric=callback_receiver_events_insert_db&node=awx-1

View File

@ -3043,6 +3043,8 @@ class WorkflowJobTemplateNodeCreateApproval(RetrieveAPIView):
return Response(data, status=status.HTTP_201_CREATED)
def check_permissions(self, request):
if not request.user.is_authenticated:
raise PermissionDenied()
obj = self.get_object().workflow_job_template
if request.method == 'POST':
if not request.user.can_access(models.WorkflowJobTemplate, 'change', obj, request.data):

View File

@ -14,6 +14,7 @@ from rest_framework.exceptions import PermissionDenied
# AWX
# from awx.main.analytics import collectors
import awx.main.analytics.subsystem_metrics as s_metrics
from awx.main.analytics.metrics import metrics
from awx.api import renderers
@ -33,5 +34,10 @@ class MetricsView(APIView):
def get(self, request):
''' Show Metrics Details '''
if request.user.is_superuser or request.user.is_system_auditor:
return Response(metrics().decode('UTF-8'))
metrics_to_show = ''
if not request.query_params.get('subsystemonly', "0") == "1":
metrics_to_show += metrics().decode('UTF-8')
if not request.query_params.get('dbonly', "0") == "1":
metrics_to_show += s_metrics.metrics(request)
return Response(metrics_to_show)
raise PermissionDenied()

View File

@ -24,7 +24,7 @@ from awx.api.generics import APIView
from awx.conf.registry import settings_registry
from awx.main.analytics import all_collectors
from awx.main.ha import is_ha_environment
from awx.main.utils import get_awx_version, get_ansible_version, get_custom_venv_choices, to_python_boolean
from awx.main.utils import get_awx_version, get_custom_venv_choices, to_python_boolean
from awx.main.utils.licensing import validate_entitlement_manifest
from awx.api.versioning import reverse, drf_reverse
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
@ -279,7 +279,6 @@ class ApiV2ConfigView(APIView):
time_zone=settings.TIME_ZONE,
license_info=license_data,
version=get_awx_version(),
ansible_version=get_ansible_version(),
eula=render_to_string("eula.md") if license_data.get('license_type', 'UNLICENSED') != 'open' else '',
analytics_status=pendo_state,
analytics_collectors=all_collectors(),

View File

@ -0,0 +1,14 @@
# Python
import logging
# AWX
from awx.main.analytics.subsystem_metrics import Metrics
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename
logger = logging.getLogger('awx.main.scheduler')
@task(queue=get_local_queuename)
def send_subsystem_metrics():
Metrics().send_metrics()

View File

@ -11,7 +11,7 @@ from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from awx.conf.license import get_license
from awx.main.utils import get_awx_version, get_ansible_version, get_custom_venv_choices, camelcase_to_underscore
from awx.main.utils import get_awx_version, get_custom_venv_choices, camelcase_to_underscore
from awx.main import models
from django.contrib.sessions.models import Session
from awx.main.analytics import register
@ -33,7 +33,7 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
'''
@register('config', '1.2', description=_('General platform configuration.'))
@register('config', '1.3', description=_('General platform configuration.'))
def config(since, **kwargs):
license_info = get_license()
install_type = 'traditional'
@ -52,7 +52,6 @@ def config(since, **kwargs):
'instance_uuid': settings.SYSTEM_UUID,
'tower_url_base': settings.TOWER_URL_BASE,
'tower_version': get_awx_version(),
'ansible_version': get_ansible_version(),
'license_type': license_info.get('license_type', 'UNLICENSED'),
'free_instances': license_info.get('free_instances', 0),
'total_licensed_instances': license_info.get('instance_count', 0),

View File

@ -1,8 +1,8 @@
from django.conf import settings
from prometheus_client import REGISTRY, PROCESS_COLLECTOR, PLATFORM_COLLECTOR, GC_COLLECTOR, Gauge, Info, generate_latest
from prometheus_client import PROCESS_COLLECTOR, PLATFORM_COLLECTOR, GC_COLLECTOR, CollectorRegistry, Gauge, Info, generate_latest
from awx.conf.license import get_license
from awx.main.utils import get_awx_version, get_ansible_version
from awx.main.utils import get_awx_version
from awx.main.analytics.collectors import (
counts,
instance_info,
@ -11,115 +11,123 @@ from awx.main.analytics.collectors import (
)
REGISTRY.unregister(PROCESS_COLLECTOR)
REGISTRY.unregister(PLATFORM_COLLECTOR)
REGISTRY.unregister(GC_COLLECTOR)
SYSTEM_INFO = Info('awx_system', 'AWX System Information')
ORG_COUNT = Gauge('awx_organizations_total', 'Number of organizations')
USER_COUNT = Gauge('awx_users_total', 'Number of users')
TEAM_COUNT = Gauge('awx_teams_total', 'Number of teams')
INV_COUNT = Gauge('awx_inventories_total', 'Number of inventories')
PROJ_COUNT = Gauge('awx_projects_total', 'Number of projects')
JT_COUNT = Gauge('awx_job_templates_total', 'Number of job templates')
WFJT_COUNT = Gauge('awx_workflow_job_templates_total', 'Number of workflow job templates')
HOST_COUNT = Gauge(
'awx_hosts_total',
'Number of hosts',
[
'type',
],
)
SCHEDULE_COUNT = Gauge('awx_schedules_total', 'Number of schedules')
INV_SCRIPT_COUNT = Gauge('awx_inventory_scripts_total', 'Number of invetory scripts')
USER_SESSIONS = Gauge(
'awx_sessions_total',
'Number of sessions',
[
'type',
],
)
CUSTOM_VENVS = Gauge('awx_custom_virtualenvs_total', 'Number of virtualenvs')
RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the Tower system')
PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the Tower system')
STATUS = Gauge(
'awx_status_total',
'Status of Job launched',
[
'status',
],
)
INSTANCE_CAPACITY = Gauge(
'awx_instance_capacity',
'Capacity of each node in a Tower system',
[
'hostname',
'instance_uuid',
],
)
INSTANCE_CPU = Gauge(
'awx_instance_cpu',
'CPU cores on each node in a Tower system',
[
'hostname',
'instance_uuid',
],
)
INSTANCE_MEMORY = Gauge(
'awx_instance_memory',
'RAM (Kb) on each node in a Tower system',
[
'hostname',
'instance_uuid',
],
)
INSTANCE_INFO = Info(
'awx_instance',
'Info about each node in a Tower system',
[
'hostname',
'instance_uuid',
],
)
INSTANCE_LAUNCH_TYPE = Gauge(
'awx_instance_launch_type_total',
'Type of Job launched',
[
'node',
'launch_type',
],
)
INSTANCE_STATUS = Gauge(
'awx_instance_status_total',
'Status of Job launched',
[
'node',
'status',
],
)
INSTANCE_CONSUMED_CAPACITY = Gauge(
'awx_instance_consumed_capacity',
'Consumed capacity of each node in a Tower system',
[
'hostname',
'instance_uuid',
],
)
INSTANCE_REMAINING_CAPACITY = Gauge(
'awx_instance_remaining_capacity',
'Remaining capacity of each node in a Tower system',
[
'hostname',
'instance_uuid',
],
)
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license')
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license')
def metrics():
REGISTRY = CollectorRegistry()
SYSTEM_INFO = Info('awx_system', 'AWX System Information', registry=REGISTRY)
ORG_COUNT = Gauge('awx_organizations_total', 'Number of organizations', registry=REGISTRY)
USER_COUNT = Gauge('awx_users_total', 'Number of users', registry=REGISTRY)
TEAM_COUNT = Gauge('awx_teams_total', 'Number of teams', registry=REGISTRY)
INV_COUNT = Gauge('awx_inventories_total', 'Number of inventories', registry=REGISTRY)
PROJ_COUNT = Gauge('awx_projects_total', 'Number of projects', registry=REGISTRY)
JT_COUNT = Gauge('awx_job_templates_total', 'Number of job templates', registry=REGISTRY)
WFJT_COUNT = Gauge('awx_workflow_job_templates_total', 'Number of workflow job templates', registry=REGISTRY)
HOST_COUNT = Gauge(
'awx_hosts_total',
'Number of hosts',
[
'type',
],
registry=REGISTRY,
)
SCHEDULE_COUNT = Gauge('awx_schedules_total', 'Number of schedules', registry=REGISTRY)
INV_SCRIPT_COUNT = Gauge('awx_inventory_scripts_total', 'Number of invetory scripts', registry=REGISTRY)
USER_SESSIONS = Gauge(
'awx_sessions_total',
'Number of sessions',
[
'type',
],
registry=REGISTRY,
)
CUSTOM_VENVS = Gauge('awx_custom_virtualenvs_total', 'Number of virtualenvs', registry=REGISTRY)
RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the Tower system', registry=REGISTRY)
PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the Tower system', registry=REGISTRY)
STATUS = Gauge(
'awx_status_total',
'Status of Job launched',
[
'status',
],
registry=REGISTRY,
)
INSTANCE_CAPACITY = Gauge(
'awx_instance_capacity',
'Capacity of each node in a Tower system',
[
'hostname',
'instance_uuid',
],
registry=REGISTRY,
)
INSTANCE_CPU = Gauge(
'awx_instance_cpu',
'CPU cores on each node in a Tower system',
[
'hostname',
'instance_uuid',
],
registry=REGISTRY,
)
INSTANCE_MEMORY = Gauge(
'awx_instance_memory',
'RAM (Kb) on each node in a Tower system',
[
'hostname',
'instance_uuid',
],
registry=REGISTRY,
)
INSTANCE_INFO = Info(
'awx_instance',
'Info about each node in a Tower system',
[
'hostname',
'instance_uuid',
],
registry=REGISTRY,
)
INSTANCE_LAUNCH_TYPE = Gauge(
'awx_instance_launch_type_total',
'Type of Job launched',
[
'node',
'launch_type',
],
registry=REGISTRY,
)
INSTANCE_STATUS = Gauge(
'awx_instance_status_total',
'Status of Job launched',
[
'node',
'status',
],
registry=REGISTRY,
)
INSTANCE_CONSUMED_CAPACITY = Gauge(
'awx_instance_consumed_capacity',
'Consumed capacity of each node in a Tower system',
[
'hostname',
'instance_uuid',
],
registry=REGISTRY,
)
INSTANCE_REMAINING_CAPACITY = Gauge(
'awx_instance_remaining_capacity',
'Remaining capacity of each node in a Tower system',
[
'hostname',
'instance_uuid',
],
registry=REGISTRY,
)
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY)
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY)
license_info = get_license()
SYSTEM_INFO.info(
{
@ -127,7 +135,6 @@ def metrics():
'insights_analytics': str(settings.INSIGHTS_TRACKING_STATE),
'tower_url_base': settings.TOWER_URL_BASE,
'tower_version': get_awx_version(),
'ansible_version': get_ansible_version(),
'license_type': license_info.get('license_type', 'UNLICENSED'),
'license_expiry': str(license_info.get('time_remaining', 0)),
'pendo_tracking': settings.PENDO_TRACKING_STATE,
@ -197,7 +204,7 @@ def metrics():
for status, value in statuses.items():
INSTANCE_STATUS.labels(node=node, status=status).set(value)
return generate_latest()
return generate_latest(registry=REGISTRY)
__all__ = ['metrics']

View File

@ -0,0 +1,304 @@
import redis
import json
import time
import logging
from django.conf import settings
from django.apps import apps
from awx.main.consumers import emit_channel_notification
root_key = 'awx_metrics'
logger = logging.getLogger('awx.main.wsbroadcast')
class BaseM:
def __init__(self, field, help_text):
self.field = field
self.help_text = help_text
self.current_value = 0
def clear_value(self, conn):
conn.hset(root_key, self.field, 0)
self.current_value = 0
def inc(self, value):
self.current_value += value
def set(self, value):
self.current_value = value
def decode(self, conn):
value = conn.hget(root_key, self.field)
return self.decode_value(value)
def to_prometheus(self, instance_data):
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} gauge\n"
for instance in instance_data:
output_text += f'{self.field}{{node="{instance}"}} {instance_data[instance][self.field]}\n'
return output_text
class FloatM(BaseM):
def decode_value(self, value):
if value is not None:
return float(value)
else:
return 0.0
def store_value(self, conn):
conn.hincrbyfloat(root_key, self.field, self.current_value)
self.current_value = 0
class IntM(BaseM):
def decode_value(self, value):
if value is not None:
return int(value)
else:
return 0
def store_value(self, conn):
conn.hincrby(root_key, self.field, self.current_value)
self.current_value = 0
class SetIntM(BaseM):
def decode_value(self, value):
if value is not None:
return int(value)
else:
return 0
def store_value(self, conn):
# do not set value if it has not changed since last time this was called
if self.current_value is not None:
conn.hset(root_key, self.field, self.current_value)
self.current_value = None
class SetFloatM(SetIntM):
def decode_value(self, value):
if value is not None:
return float(value)
else:
return 0
class HistogramM(BaseM):
def __init__(self, field, help_text, buckets):
self.buckets = buckets
self.buckets_to_keys = {}
for b in buckets:
self.buckets_to_keys[b] = IntM(field + '_' + str(b), '')
self.inf = IntM(field + '_inf', '')
self.sum = IntM(field + '_sum', '')
super(HistogramM, self).__init__(field, help_text)
def clear_value(self, conn):
conn.hset(root_key, self.field, 0)
self.inf.clear_value(conn)
self.sum.clear_value(conn)
for b in self.buckets_to_keys.values():
b.clear_value(conn)
super(HistogramM, self).clear_value(conn)
def observe(self, value):
for b in self.buckets:
if value <= b:
self.buckets_to_keys[b].inc(1)
break
self.sum.inc(value)
self.inf.inc(1)
def decode(self, conn):
values = {'counts': []}
for b in self.buckets_to_keys:
values['counts'].append(self.buckets_to_keys[b].decode(conn))
values['sum'] = self.sum.decode(conn)
values['inf'] = self.inf.decode(conn)
return values
def store_value(self, conn):
for b in self.buckets:
self.buckets_to_keys[b].store_value(conn)
self.sum.store_value(conn)
self.inf.store_value(conn)
def to_prometheus(self, instance_data):
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} histogram\n"
for instance in instance_data:
for i, b in enumerate(self.buckets):
output_text += f'{self.field}_bucket{{le="{b}",node="{instance}"}} {sum(instance_data[instance][self.field]["counts"][0:i+1])}\n'
output_text += f'{self.field}_bucket{{le="+Inf",node="{instance}"}} {instance_data[instance][self.field]["inf"]}\n'
output_text += f'{self.field}_count{{node="{instance}"}} {instance_data[instance][self.field]["inf"]}\n'
output_text += f'{self.field}_sum{{node="{instance}"}} {instance_data[instance][self.field]["sum"]}\n'
return output_text
class Metrics:
def __init__(self, auto_pipe_execute=True):
self.pipe = redis.Redis.from_url(settings.BROKER_URL).pipeline()
self.conn = redis.Redis.from_url(settings.BROKER_URL)
self.last_pipe_execute = time.time()
# track if metrics have been modified since last saved to redis
# start with True so that we get an initial save to redis
self.metrics_have_changed = True
self.pipe_execute_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SAVE_TO_REDIS
self.send_metrics_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SEND_METRICS
# auto pipe execute will commit transaction of metric data to redis
# at a regular interval (pipe_execute_interval). If set to False,
# the calling function should call .pipe_execute() explicitly
self.auto_pipe_execute = auto_pipe_execute
Instance = apps.get_model('main', 'Instance')
self.instance_name = Instance.objects.me().hostname
# metric name, help_text
METRICSLIST = [
SetIntM('callback_receiver_events_queue_size_redis', 'Current number of events in redis queue'),
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
FloatM('callback_receiver_events_insert_db_seconds', 'Time spent saving events to database'),
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
HistogramM(
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
),
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
]
# turn metric list into dictionary with the metric name as a key
self.METRICS = {}
for m in METRICSLIST:
self.METRICS[m.field] = m
# track last time metrics were sent to other nodes
self.previous_send_metrics = SetFloatM('send_metrics_time', 'Timestamp of previous send_metrics call')
def clear_values(self):
for m in self.METRICS.values():
m.clear_value(self.conn)
self.metrics_have_changed = True
self.conn.delete(root_key + "_lock")
def inc(self, field, value):
if value != 0:
self.METRICS[field].inc(value)
self.metrics_have_changed = True
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
self.pipe_execute()
def set(self, field, value):
self.METRICS[field].set(value)
self.metrics_have_changed = True
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
self.pipe_execute()
def observe(self, field, value):
self.METRICS[field].observe(value)
self.metrics_have_changed = True
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
self.pipe_execute()
def serialize_local_metrics(self):
data = self.load_local_metrics()
return json.dumps(data)
def load_local_metrics(self):
# generate python dictionary of key values from metrics stored in redis
data = {}
for field in self.METRICS:
data[field] = self.METRICS[field].decode(self.conn)
return data
def store_metrics(self, data_json):
# called when receiving metrics from other instances
data = json.loads(data_json)
if self.instance_name != data['instance']:
logger.debug(f"{self.instance_name} received subsystem metrics from {data['instance']}")
self.conn.set(root_key + "_instance_" + data['instance'], data['metrics'])
def should_pipe_execute(self):
if self.metrics_have_changed is False:
return False
if time.time() - self.last_pipe_execute > self.pipe_execute_interval:
return True
else:
return False
def pipe_execute(self):
if self.metrics_have_changed is True:
duration_to_save = time.perf_counter()
for m in self.METRICS:
self.METRICS[m].store_value(self.pipe)
self.pipe.execute()
self.last_pipe_execute = time.time()
self.metrics_have_changed = False
duration_to_save = time.perf_counter() - duration_to_save
self.METRICS['subsystem_metrics_pipe_execute_seconds'].inc(duration_to_save)
self.METRICS['subsystem_metrics_pipe_execute_calls'].inc(1)
duration_to_save = time.perf_counter()
self.send_metrics()
duration_to_save = time.perf_counter() - duration_to_save
self.METRICS['subsystem_metrics_send_metrics_seconds'].inc(duration_to_save)
def send_metrics(self):
# more than one thread could be calling this at the same time, so should
# get acquire redis lock before sending metrics
lock = self.conn.lock(root_key + '_lock', thread_local=False)
if not lock.acquire(blocking=False):
return
try:
current_time = time.time()
if current_time - self.previous_send_metrics.decode(self.conn) > self.send_metrics_interval:
payload = {
'instance': self.instance_name,
'metrics': self.serialize_local_metrics(),
}
# store a local copy as well
self.store_metrics(json.dumps(payload))
emit_channel_notification("metrics", payload)
self.previous_send_metrics.set(current_time)
self.previous_send_metrics.store_value(self.conn)
finally:
lock.release()
def load_other_metrics(self, request):
# data received from other nodes are stored in their own keys
# e.g., awx_metrics_instance_awx-1, awx_metrics_instance_awx-2
# this method looks for keys with "_instance_" in the name and loads the data
# also filters data based on request query params
# if additional filtering is added, update metrics_view.md
instances_filter = request.query_params.getlist("node")
# get a sorted list of instance names
instance_names = [self.instance_name]
for m in self.conn.scan_iter(root_key + '_instance_*'):
instance_names.append(m.decode('UTF-8').split('_instance_')[1])
instance_names.sort()
# load data, including data from the this local instance
instance_data = {}
for instance in instance_names:
if len(instances_filter) == 0 or instance in instances_filter:
instance_data_from_redis = self.conn.get(root_key + '_instance_' + instance)
# data from other instances may not be available. That is OK.
if instance_data_from_redis:
instance_data[instance] = json.loads(instance_data_from_redis.decode('UTF-8'))
return instance_data
def generate_metrics(self, request):
# takes the api request, filters, and generates prometheus data
# if additional filtering is added, update metrics_view.md
instance_data = self.load_other_metrics(request)
metrics_filter = request.query_params.getlist("metric")
output_text = ''
if instance_data:
for field in self.METRICS:
if len(metrics_filter) == 0 or field in metrics_filter:
output_text += self.METRICS[field].to_prometheus(instance_data)
return output_text
def metrics(request):
m = Metrics()
return m.generate_metrics(request)

View File

@ -186,7 +186,7 @@ register(
default=None,
queryset=ExecutionEnvironment.objects.all(),
label=_('Global default execution environment'),
help_text=_('.'),
help_text=_('The Execution Environment to be used when one has not been configured for a job template.'),
category=_('System'),
category_slug='system',
)

View File

@ -13,7 +13,6 @@ from channels.generic.websocket import AsyncJsonWebsocketConsumer
from channels.layers import get_channel_layer
from channels.db import database_sync_to_async
logger = logging.getLogger('awx.main.consumers')
XRF_KEY = '_auth_user_xrf'

View File

@ -20,7 +20,7 @@ from awx.main.models import JobEvent, AdHocCommandEvent, ProjectUpdateEvent, Inv
from awx.main.tasks import handle_success_and_failure_notifications
from awx.main.models.events import emit_event_detail
from awx.main.utils.profiling import AWXProfiler
import awx.main.analytics.subsystem_metrics as s_metrics
from .base import BaseWorker
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
@ -46,16 +46,22 @@ class CallbackBrokerWorker(BaseWorker):
self.buff = {}
self.pid = os.getpid()
self.redis = redis.Redis.from_url(settings.BROKER_URL)
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
self.queue_pop = 0
self.queue_name = settings.CALLBACK_QUEUE
self.prof = AWXProfiler("CallbackBrokerWorker")
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
self.redis.delete(key)
def read(self, queue):
try:
res = self.redis.blpop(settings.CALLBACK_QUEUE, timeout=1)
res = self.redis.blpop(self.queue_name, timeout=1)
if res is None:
return {'event': 'FLUSH'}
self.total += 1
self.queue_pop += 1
self.subsystem_metrics.inc('callback_receiver_events_popped_redis', 1)
self.subsystem_metrics.inc('callback_receiver_events_in_memory', 1)
return json.loads(res[1])
except redis.exceptions.RedisError:
logger.exception("encountered an error communicating with redis")
@ -64,8 +70,19 @@ class CallbackBrokerWorker(BaseWorker):
logger.exception("failed to decode JSON message from redis")
finally:
self.record_statistics()
self.record_read_metrics()
return {'event': 'FLUSH'}
def record_read_metrics(self):
if self.queue_pop == 0:
return
if self.subsystem_metrics.should_pipe_execute() is True:
queue_size = self.redis.llen(self.queue_name)
self.subsystem_metrics.set('callback_receiver_events_queue_size_redis', queue_size)
self.subsystem_metrics.pipe_execute()
self.queue_pop = 0
def record_statistics(self):
# buffer stat recording to once per (by default) 5s
if time.time() - self.last_stats > settings.JOB_EVENT_STATISTICS_INTERVAL:
@ -99,27 +116,44 @@ class CallbackBrokerWorker(BaseWorker):
def flush(self, force=False):
now = tz_now()
if force or (time.time() - self.last_flush) > settings.JOB_EVENT_BUFFER_SECONDS or any([len(events) >= 1000 for events in self.buff.values()]):
bulk_events_saved = 0
singular_events_saved = 0
metrics_events_batch_save_errors = 0
for cls, events in self.buff.items():
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
for e in events:
if not e.created:
e.created = now
e.modified = now
duration_to_save = time.perf_counter()
try:
cls.objects.bulk_create(events)
bulk_events_saved += len(events)
except Exception:
# if an exception occurs, we should re-attempt to save the
# events one-by-one, because something in the list is
# broken/stale
metrics_events_batch_save_errors += 1
for e in events:
try:
e.save()
singular_events_saved += 1
except Exception:
logger.exception('Database Error Saving Job Event')
duration_to_save = time.perf_counter() - duration_to_save
for e in events:
emit_event_detail(e)
self.buff = {}
self.last_flush = time.time()
# only update metrics if we saved events
if (bulk_events_saved + singular_events_saved) > 0:
self.subsystem_metrics.inc('callback_receiver_batch_events_errors', metrics_events_batch_save_errors)
self.subsystem_metrics.inc('callback_receiver_events_insert_db_seconds', duration_to_save)
self.subsystem_metrics.inc('callback_receiver_events_insert_db', bulk_events_saved + singular_events_saved)
self.subsystem_metrics.observe('callback_receiver_batch_events_insert_db', bulk_events_saved)
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -(bulk_events_saved + singular_events_saved))
if self.subsystem_metrics.should_pipe_execute() is True:
self.subsystem_metrics.pipe_execute()
def perform_work(self, body):
try:
@ -169,6 +203,7 @@ class CallbackBrokerWorker(BaseWorker):
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
finally:
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -1)
GuidMiddleware.set_guid('')
return

View File

@ -68,12 +68,12 @@ class Command(BaseCommand):
print('Demo Credential, Inventory, and Job Template added.')
changed = True
default_ee = settings.AWX_EXECUTION_ENVIRONMENT_DEFAULT_IMAGE
ee, created = ExecutionEnvironment.objects.get_or_create(name='Default EE', defaults={'image': default_ee, 'managed_by_tower': True})
for ee in reversed(settings.DEFAULT_EXECUTION_ENVIRONMENTS):
_, created = ExecutionEnvironment.objects.get_or_create(name=ee['name'], defaults={'image': ee['image'], 'managed_by_tower': True})
if created:
changed = True
print('Default Execution Environment registered.')
print('Default Execution Environment(s) registered.')
if changed:
print('(changed: True)')

View File

@ -29,6 +29,7 @@ from awx.main.utils.safe_yaml import sanitize_jinja
# other AWX imports
from awx.main.models.rbac import batch_role_ancestor_rebuilding
from awx.main.utils import ignore_inventory_computed_fields, get_licenser
from awx.main.utils.execution_environments import get_execution_environment_default
from awx.main.signals import disable_activity_stream
from awx.main.constants import STANDARD_INVENTORY_UPDATE_ENV
from awx.main.utils.pglock import advisory_lock
@ -90,7 +91,7 @@ class AnsibleInventoryLoader(object):
bargs.extend(['-v', '{0}:{0}:Z'.format(self.source)])
for key, value in STANDARD_INVENTORY_UPDATE_ENV.items():
bargs.extend(['-e', '{0}={1}'.format(key, value)])
bargs.extend([settings.AWX_EXECUTION_ENVIRONMENT_DEFAULT_IMAGE])
bargs.extend([get_execution_environment_default().image])
bargs.extend(['ansible-inventory', '-i', self.source])
bargs.extend(['--playbook-dir', functioning_dir(self.source)])
if self.verbosity:

View File

@ -0,0 +1,18 @@
# Generated by Django 2.2.16 on 2021-03-29 15:30
from django.db import migrations
import django.db.models.expressions
class Migration(migrations.Migration):
dependencies = [
('main', '0134_unifiedjob_ansible_version'),
]
operations = [
migrations.AlterModelOptions(
name='schedule',
options={'ordering': [django.db.models.expressions.OrderBy(django.db.models.expressions.F('next_run'), descending=True, nulls_last=True), 'id']},
),
]

View File

@ -115,6 +115,6 @@ def kubernetes_bearer_token(cred, env, private_data_dir):
with os.fdopen(handle, 'w') as f:
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
f.write(cred.get_input('ssl_ca_cert'))
env['K8S_AUTH_SSL_CA_CERT'] = path
env['K8S_AUTH_SSL_CA_CERT'] = os.path.join('/runner', os.path.basename(path))
else:
env['K8S_AUTH_VERIFY_SSL'] = 'False'

View File

@ -1227,6 +1227,10 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
null=True,
)
@property
def is_container_group_task(self):
return bool(self.instance_group and self.instance_group.is_container_group)
def _get_parent_field_name(self):
return 'inventory_source'

View File

@ -21,6 +21,7 @@ from django.utils.translation import ugettext_lazy as _
from awx.main.models.base import prevent_search
from awx.main.models.rbac import Role, RoleAncestorEntry, get_roles_on_resource
from awx.main.utils import parse_yaml_or_json, get_custom_venv_choices, get_licenser, polymorphic
from awx.main.utils.execution_environments import get_execution_environment_default
from awx.main.utils.encryption import decrypt_value, get_encryption_key, is_encrypted
from awx.main.utils.polymorphic import build_polymorphic_ctypes_map
from awx.main.fields import JSONField, AskForField
@ -461,13 +462,6 @@ class ExecutionEnvironmentMixin(models.Model):
help_text=_('The container image to be used for execution.'),
)
def get_execution_environment_default(self):
from awx.main.models.execution_environments import ExecutionEnvironment
if settings.DEFAULT_EXECUTION_ENVIRONMENT is not None:
return settings.DEFAULT_EXECUTION_ENVIRONMENT
return ExecutionEnvironment.objects.filter(organization=None, managed_by_tower=True).first()
def resolve_execution_environment(self):
"""
Return the execution environment that should be used when creating a new job.
@ -482,7 +476,7 @@ class ExecutionEnvironmentMixin(models.Model):
if self.inventory.organization.default_environment is not None:
return self.inventory.organization.default_environment
return self.get_execution_environment_default()
return get_execution_environment_default()
class CustomVirtualEnvMixin(models.Model):

View File

@ -63,7 +63,7 @@ class ScheduleManager(ScheduleFilterMethods, models.Manager):
class Schedule(PrimordialModel, LaunchTimeConfig):
class Meta:
app_label = 'main'
ordering = ['-next_run']
ordering = [models.F('next_run').desc(nulls_last=True), 'id']
unique_together = ('unified_job_template', 'name')
objects = ScheduleManager()

View File

@ -8,7 +8,7 @@ import redis
# Django
from django.conf import settings
import awx.main.analytics.subsystem_metrics as s_metrics
__all__ = ['CallbackQueueDispatcher']
@ -28,6 +28,7 @@ class CallbackQueueDispatcher(object):
self.queue = getattr(settings, 'CALLBACK_QUEUE', '')
self.logger = logging.getLogger('awx.main.queue.CallbackQueueDispatcher')
self.connection = redis.Redis.from_url(settings.BROKER_URL)
self.subsystem_metrics = s_metrics.Metrics()
def dispatch(self, obj):
self.connection.rpush(self.queue, json.dumps(obj, cls=AnsibleJSONEncoder))

View File

@ -97,6 +97,7 @@ from awx.main.utils import (
deepmerge,
parse_yaml_or_json,
)
from awx.main.utils.execution_environments import get_execution_environment_default
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
@ -107,6 +108,7 @@ from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
@ -170,6 +172,7 @@ def dispatch_startup():
cluster_node_heartbeat()
if Instance.objects.me().is_controller():
awx_isolated_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
@ -1804,13 +1807,14 @@ class RunJob(BaseTask):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
git_repo = git.Repo(project_path)
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName):
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
@ -2104,7 +2108,7 @@ class RunProjectUpdate(BaseTask):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d['Password:\s*?$'] = 'scm_password' # noqa
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
@ -2503,7 +2507,7 @@ class RunInventoryUpdate(BaseTask):
args.append(container_location)
args.append('--output')
args.append(os.path.join('/runner', 'artifacts', 'output.json'))
args.append(os.path.join('/runner', 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
@ -3008,7 +3012,7 @@ class AWXReceptorJob:
return self._run_internal(receptor_ctl)
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None:
if self.unit_id is not None and not settings.AWX_CONTAINER_GROUP_KEEP_POD:
receptor_ctl.simple_command(f"work release {self.unit_id}")
def _run_internal(self, receptor_ctl):
@ -3124,11 +3128,23 @@ class AWXReceptorJob:
@property
def pod_definition(self):
if self.task:
ee = self.task.instance.resolve_execution_environment()
else:
ee = get_execution_environment_default()
default_pod_spec = {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"namespace": settings.AWX_CONTAINER_GROUP_DEFAULT_NAMESPACE},
"spec": {"containers": [{"image": settings.AWX_CONTAINER_GROUP_DEFAULT_IMAGE, "name": 'worker', "args": ['ansible-runner', 'worker']}]},
"spec": {
"containers": [
{
"image": ee.image,
"name": 'worker',
}
],
},
}
pod_spec_override = {}

View File

@ -56,24 +56,28 @@ def test_metrics_counts(organization_factory, job_template_factory, workflow_job
assert EXPECTED_VALUES[name] == value
def get_metrics_view_db_only():
return reverse('api:metrics_view') + '?dbonly=1'
@pytest.mark.django_db
def test_metrics_permissions(get, admin, org_admin, alice, bob, organization):
assert get(reverse('api:metrics_view'), user=admin).status_code == 200
assert get(reverse('api:metrics_view'), user=org_admin).status_code == 403
assert get(reverse('api:metrics_view'), user=alice).status_code == 403
assert get(reverse('api:metrics_view'), user=bob).status_code == 403
assert get(get_metrics_view_db_only(), user=admin).status_code == 200
assert get(get_metrics_view_db_only(), user=org_admin).status_code == 403
assert get(get_metrics_view_db_only(), user=alice).status_code == 403
assert get(get_metrics_view_db_only(), user=bob).status_code == 403
organization.auditor_role.members.add(bob)
assert get(reverse('api:metrics_view'), user=bob).status_code == 403
assert get(get_metrics_view_db_only(), user=bob).status_code == 403
Role.singleton('system_auditor').members.add(bob)
bob.is_system_auditor = True
assert get(reverse('api:metrics_view'), user=bob).status_code == 200
assert get(get_metrics_view_db_only(), user=bob).status_code == 200
@pytest.mark.django_db
def test_metrics_http_methods(get, post, patch, put, options, admin):
assert get(reverse('api:metrics_view'), user=admin).status_code == 200
assert put(reverse('api:metrics_view'), user=admin).status_code == 405
assert patch(reverse('api:metrics_view'), user=admin).status_code == 405
assert post(reverse('api:metrics_view'), user=admin).status_code == 405
assert options(reverse('api:metrics_view'), user=admin).status_code == 200
assert get(get_metrics_view_db_only(), user=admin).status_code == 200
assert put(get_metrics_view_db_only(), user=admin).status_code == 405
assert patch(get_metrics_view_db_only(), user=admin).status_code == 405
assert post(get_metrics_view_db_only(), user=admin).status_code == 405
assert options(get_metrics_view_db_only(), user=admin).status_code == 200

View File

@ -140,7 +140,7 @@ def test_delete_instance_group_jobs_running(delete, instance_group_jobs_running,
@pytest.mark.django_db
def test_delete_rename_tower_instance_group_prevented(delete, options, tower_instance_group, instance_group, user, patch):
def test_delete_rename_tower_instance_group_prevented(delete, options, tower_instance_group, instance_group, user, patch, execution_environment):
url = reverse("api:instance_group_detail", kwargs={'pk': tower_instance_group.pk})
super_user = user('bob', True)

View File

@ -829,5 +829,5 @@ def slice_job_factory(slice_jt_factory):
@pytest.fixture
def execution_environment(organization):
return ExecutionEnvironment.objects.create(name="test-ee", description="test-ee", organization=organization)
def execution_environment():
return ExecutionEnvironment.objects.create(name="test-ee", description="test-ee", managed_by_tower=True)

View File

@ -1,10 +1,11 @@
import subprocess
import base64
from collections import namedtuple
from unittest import mock # noqa
import pytest
from awx.main.scheduler.kubernetes import PodManager
from awx.main.tasks import AWXReceptorJob
from awx.main.utils import (
create_temporary_fifo,
)
@ -34,7 +35,7 @@ def test_containerized_job(containerized_job):
@pytest.mark.django_db
def test_kubectl_ssl_verification(containerized_job):
def test_kubectl_ssl_verification(containerized_job, execution_environment):
cred = containerized_job.instance_group.credential
cred.inputs['verify_ssl'] = True
key_material = subprocess.run('openssl genrsa 2> /dev/null', shell=True, check=True, stdout=subprocess.PIPE)
@ -46,6 +47,8 @@ def test_kubectl_ssl_verification(containerized_job):
cert = subprocess.run(cmd.strip(), shell=True, check=True, stdout=subprocess.PIPE)
cred.inputs['ssl_ca_cert'] = cert.stdout
cred.save()
pm = PodManager(containerized_job)
ca_data = pm.kube_config['clusters'][0]['cluster']['certificate-authority-data']
RunJob = namedtuple('RunJob', ['instance', 'build_execution_environment_params'])
rj = RunJob(instance=containerized_job, build_execution_environment_params=lambda x: {})
receptor_job = AWXReceptorJob(rj, runner_params={'settings': {}})
ca_data = receptor_job.kube_config['clusters'][0]['cluster']['certificate-authority-data']
assert cert.stdout == base64.b64decode(ca_data.encode())

View File

@ -1,49 +0,0 @@
import pytest
from django.conf import settings
from awx.main.models import (
InstanceGroup,
Job,
JobTemplate,
Project,
Inventory,
)
from awx.main.scheduler.kubernetes import PodManager
@pytest.fixture
def container_group():
instance_group = InstanceGroup(name='container-group', id=1)
return instance_group
@pytest.fixture
def job(container_group):
return Job(pk=1, id=1, project=Project(), instance_group=container_group, inventory=Inventory(), job_template=JobTemplate(id=1, name='foo'))
def test_default_pod_spec(job):
default_image = PodManager(job).pod_definition['spec']['containers'][0]['image']
assert default_image == settings.AWX_CONTAINER_GROUP_DEFAULT_IMAGE
def test_custom_pod_spec(job):
job.instance_group.pod_spec_override = """
spec:
containers:
- image: my-custom-image
"""
custom_image = PodManager(job).pod_definition['spec']['containers'][0]['image']
assert custom_image == 'my-custom-image'
def test_pod_manager_namespace_property(job):
pm = PodManager(job)
assert pm.namespace == settings.AWX_CONTAINER_GROUP_DEFAULT_NAMESPACE
job.instance_group.pod_spec_override = """
metadata:
namespace: my-namespace
"""
assert PodManager(job).namespace == 'my-namespace'

View File

@ -1003,7 +1003,8 @@ class TestJobCredentials(TestJobExecution):
if verify:
assert env['K8S_AUTH_VERIFY_SSL'] == 'True'
cert = open(env['K8S_AUTH_SSL_CA_CERT'], 'r').read()
local_path = os.path.join(private_data_dir, os.path.basename(env['K8S_AUTH_SSL_CA_CERT']))
cert = open(local_path, 'r').read()
assert cert == 'CERTDATA'
else:
assert env['K8S_AUTH_VERIFY_SSL'] == 'False'

View File

@ -44,7 +44,6 @@ __all__ = [
'underscore_to_camelcase',
'memoize',
'memoize_delete',
'get_ansible_version',
'get_licenser',
'get_awx_http_client_headers',
'get_awx_version',
@ -192,20 +191,6 @@ def memoize_delete(function_name):
return cache.delete(function_name)
@memoize()
def get_ansible_version():
"""
Return Ansible version installed.
Ansible path needs to be provided to account for custom virtual environments
"""
try:
proc = subprocess.Popen(['ansible', '--version'], stdout=subprocess.PIPE)
result = smart_str(proc.communicate()[0])
return result.split('\n')[0].replace('ansible', '').strip()
except Exception:
return 'unknown'
def get_awx_version():
"""
Return AWX version as reported by setuptools.

View File

@ -0,0 +1,9 @@
from django.conf import settings
from awx.main.models.execution_environments import ExecutionEnvironment
def get_execution_environment_default():
if settings.DEFAULT_EXECUTION_ENVIRONMENT is not None:
return settings.DEFAULT_EXECUTION_ENVIRONMENT
return ExecutionEnvironment.objects.filter(organization=None, managed_by_tower=True).first()

View File

@ -5,6 +5,7 @@
import logging
import sys
import traceback
from datetime import datetime
# Django
from django.conf import settings
@ -34,7 +35,8 @@ class RSysLogHandler(logging.handlers.SysLogHandler):
# because the alternative is blocking the
# socket.send() in the Python process, which we definitely don't
# want to do)
msg = f'{record.asctime} ERROR rsyslogd was unresponsive: '
dt = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
msg = f'{dt} ERROR rsyslogd was unresponsive: '
exc = traceback.format_exc()
try:
msg += exc.splitlines()[-1]

View File

@ -15,7 +15,7 @@ from awx.main.analytics.broadcast_websocket import (
BroadcastWebsocketStats,
BroadcastWebsocketStatsManager,
)
import awx.main.analytics.subsystem_metrics as s_metrics
logger = logging.getLogger('awx.main.wsbroadcast')
@ -68,6 +68,7 @@ class WebsocketTask:
self.protocol = protocol
self.verify_ssl = verify_ssl
self.channel_layer = None
self.subsystem_metrics = s_metrics.Metrics()
async def run_loop(self, websocket: aiohttp.ClientWebSocketResponse):
raise RuntimeError("Implement me")
@ -144,9 +145,10 @@ class BroadcastWebsocketTask(WebsocketTask):
logmsg = "{} {}".format(logmsg, payload)
logger.warn(logmsg)
continue
(group, message) = unwrap_broadcast_msg(payload)
if group == "metrics":
self.subsystem_metrics.store_metrics(message)
continue
await self.channel_layer.group_send(group, {"type": "internal.message", "text": message})

View File

@ -68,17 +68,12 @@ DATABASES = {
# the K8S cluster where awx itself is running)
IS_K8S = False
# TODO: remove this setting in favor of a default execution environment
AWX_EXECUTION_ENVIRONMENT_DEFAULT_IMAGE = 'quay.io/ansible/awx-ee'
AWX_CONTAINER_GROUP_KEEP_POD = False
AWX_CONTAINER_GROUP_K8S_API_TIMEOUT = 10
AWX_CONTAINER_GROUP_POD_LAUNCH_RETRIES = 100
AWX_CONTAINER_GROUP_POD_LAUNCH_RETRY_DELAY = 5
AWX_CONTAINER_GROUP_DEFAULT_NAMESPACE = os.getenv('MY_POD_NAMESPACE', 'default')
# TODO: remove this setting in favor of a default execution environment
AWX_CONTAINER_GROUP_DEFAULT_IMAGE = AWX_EXECUTION_ENVIRONMENT_DEFAULT_IMAGE
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
#
@ -182,8 +177,15 @@ REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST']
PROXY_IP_ALLOWED_LIST = []
CUSTOM_VENV_PATHS = []
# Warning: this is a placeholder for a configure tower-in-tower setting
# This should not be set via a file.
DEFAULT_EXECUTION_ENVIRONMENT = None
# This list is used for creating default EEs when running awx-manage create_preload_data.
# Should be ordered from highest to lowest precedence.
DEFAULT_EXECUTION_ENVIRONMENTS = [{'name': 'AWX EE 0.1.1', 'image': 'quay.io/ansible/awx-ee:0.1.1'}]
# Note: This setting may be overridden by database settings.
STDOUT_MAX_BYTES_DISPLAY = 1048576
@ -224,6 +226,15 @@ JOB_EVENT_MAX_QUEUE_SIZE = 10000
# The number of job events to migrate per-transaction when moving from int -> bigint
JOB_EVENT_MIGRATION_CHUNK_SIZE = 1000000
# Histogram buckets for the callback_receiver_batch_events_insert_db metric
SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS = [10, 50, 150, 350, 650, 2000]
# Interval in seconds for sending local metrics to other nodes
SUBSYSTEM_METRICS_INTERVAL_SEND_METRICS = 3
# Interval in seconds for saving local metrics to redis
SUBSYSTEM_METRICS_INTERVAL_SAVE_TO_REDIS = 2
# The maximum allowed jobs to start on a given task manager cycle
START_TASK_LIMIT = 100
@ -427,6 +438,7 @@ CELERYBEAT_SCHEDULE = {
'gather_analytics': {'task': 'awx.main.tasks.gather_analytics', 'schedule': timedelta(minutes=5)},
'task_manager': {'task': 'awx.main.scheduler.tasks.run_task_manager', 'schedule': timedelta(seconds=20), 'options': {'expires': 20}},
'k8s_reaper': {'task': 'awx.main.tasks.awx_k8s_reaper', 'schedule': timedelta(seconds=60), 'options': {'expires': 50}},
'send_subsystem_metrics': {'task': 'awx.main.analytics.analytics_tasks.send_subsystem_metrics', 'schedule': timedelta(seconds=20)},
# 'isolated_heartbeat': set up at the end of production.py and development.py
}

View File

@ -77,7 +77,8 @@
"resizeOrientation",
"src",
"theme",
"gridColumns"
"gridColumns",
"rows"
],
"ignore": ["Ansible", "Tower", "JSON", "YAML", "lg"],
"ignoreComponent": [

View File

@ -5,6 +5,16 @@ class ExecutionEnvironments extends Base {
super(http);
this.baseUrl = '/api/v2/execution_environments/';
}
readUnifiedJobTemplates(id, params) {
return this.http.get(`${this.baseUrl}${id}/unified_job_templates/`, {
params,
});
}
readUnifiedJobTemplateOptions(id) {
return this.http.options(`${this.baseUrl}${id}/unified_job_templates/`);
}
}
export default ExecutionEnvironments;

View File

@ -36,10 +36,8 @@ class Organizations extends InstanceGroupsMixin(NotificationsMixin(Base)) {
});
}
readExecutionEnvironmentsOptions(id, params) {
return this.http.options(`${this.baseUrl}${id}/execution_environments/`, {
params,
});
readExecutionEnvironmentsOptions(id) {
return this.http.options(`${this.baseUrl}${id}/execution_environments/`);
}
createUser(id, data) {

View File

@ -2,17 +2,12 @@ import React from 'react';
import PropTypes from 'prop-types';
import { withI18n } from '@lingui/react';
import { t } from '@lingui/macro';
import {
AboutModal,
TextContent,
TextList,
TextListItem,
} from '@patternfly/react-core';
import { AboutModal } from '@patternfly/react-core';
import { BrandName } from '../../variables';
import brandLogoImg from './brand-logo.svg';
function About({ ansible_version, version, isOpen, onClose, i18n }) {
function About({ version, isOpen, onClose, i18n }) {
const createSpeechBubble = () => {
let text = `${BrandName} ${version}`;
let top = '';
@ -52,27 +47,17 @@ function About({ ansible_version, version, isOpen, onClose, i18n }) {
|| ||
`}
</pre>
<TextContent>
<TextList component="dl">
<TextListItem component="dt">
{i18n._(t`Ansible Version`)}
</TextListItem>
<TextListItem component="dd">{ansible_version}</TextListItem>
</TextList>
</TextContent>
</AboutModal>
);
}
About.propTypes = {
ansible_version: PropTypes.string,
isOpen: PropTypes.bool,
onClose: PropTypes.func.isRequired,
version: PropTypes.string,
};
About.defaultProps = {
ansible_version: null,
isOpen: false,
version: null,
};

View File

@ -204,7 +204,6 @@ function AppContainer({ i18n, navRouteConfig = [], children }) {
{isReady && <ConfigProvider value={config}>{children}</ConfigProvider>}
</Page>
<About
ansible_version={config?.ansible_version}
version={config?.version}
isOpen={isAboutModalOpen}
onClose={handleAboutModalClose}

View File

@ -10,13 +10,11 @@ import AppContainer from './AppContainer';
jest.mock('../../api');
describe('<AppContainer />', () => {
const ansible_version = '111';
const version = '222';
beforeEach(() => {
ConfigAPI.read.mockResolvedValue({
data: {
ansible_version,
version,
},
});
@ -93,7 +91,6 @@ describe('<AppContainer />', () => {
// check about modal content
const content = await waitForElement(wrapper, aboutModalContent);
expect(content.find('dd').text()).toContain(ansible_version);
expect(content.find('pre').text()).toContain(`< AWX ${version} >`);
// close about modal

View File

@ -1,5 +1,5 @@
import React, { useEffect, useRef, useCallback } from 'react';
import { oneOf, bool, number, string, func } from 'prop-types';
import { oneOf, bool, number, string, func, oneOfType } from 'prop-types';
import ReactAce from 'react-ace';
import 'ace-builds/src-noconflict/mode-json';
import 'ace-builds/src-noconflict/mode-javascript';
@ -77,6 +77,13 @@ function CodeEditor({
className,
i18n,
}) {
if (rows && typeof rows !== 'number' && rows !== 'auto') {
// eslint-disable-next-line no-console
console.warning(
`CodeEditor: Unexpected value for 'rows': ${rows}; expected number or 'auto'`
);
}
const wrapper = useRef(null);
const editor = useRef(null);
@ -117,7 +124,8 @@ function CodeEditor({
jinja2: 'django',
};
const numRows = fullHeight ? value.split('\n').length : rows;
const numRows = rows === 'auto' ? value.split('\n').length : rows;
const height = fullHeight ? '50vh' : `${numRows * LINE_HEIGHT + PADDING}px`;
return (
<>
@ -132,7 +140,7 @@ function CodeEditor({
editorProps={{ $blockScrolling: true }}
fontSize={16}
width="100%"
height={`${numRows * LINE_HEIGHT + PADDING}px`}
height={height}
hasErrors={hasErrors}
setOptions={{
readOnly,
@ -178,7 +186,7 @@ CodeEditor.propTypes = {
readOnly: bool,
hasErrors: bool,
fullHeight: bool,
rows: number,
rows: oneOfType([number, string]),
className: string,
};
CodeEditor.defaultProps = {

View File

@ -1,8 +1,16 @@
import 'styled-components/macro';
import React, { useState, useEffect } from 'react';
import { node, number, oneOfType, shape, string, arrayOf } from 'prop-types';
import { Trans, withI18n } from '@lingui/react';
import { Split, SplitItem, TextListItemVariants } from '@patternfly/react-core';
import { withI18n } from '@lingui/react';
import { t } from '@lingui/macro';
import {
Split,
SplitItem,
TextListItemVariants,
Button,
Modal,
} from '@patternfly/react-core';
import { ExpandArrowsAltIcon } from '@patternfly/react-icons';
import { DetailName, DetailValue } from '../DetailList';
import MultiButtonToggle from '../MultiButtonToggle';
import Popover from '../Popover';
@ -29,13 +37,14 @@ function getValueAsMode(value, mode) {
return mode === YAML_MODE ? jsonToYaml(value) : yamlToJson(value);
}
function VariablesDetail({ dataCy, helpText, value, label, rows, fullHeight }) {
function VariablesDetail({ dataCy, helpText, value, label, rows, i18n }) {
const [mode, setMode] = useState(
isJsonObject(value) || isJsonString(value) ? JSON_MODE : YAML_MODE
);
const [currentValue, setCurrentValue] = useState(
isJsonObject(value) ? JSON.stringify(value, null, 2) : value || '---'
);
const [isExpanded, setIsExpanded] = useState(false);
const [error, setError] = useState(null);
useEffect(() => {
@ -60,7 +69,112 @@ function VariablesDetail({ dataCy, helpText, value, label, rows, fullHeight }) {
fullWidth
css="grid-column: 1 / -1"
>
<Split hasGutter>
<ModeToggle
label={label}
helpText={helpText}
dataCy={dataCy}
mode={mode}
setMode={setMode}
currentValue={currentValue}
setCurrentValue={setCurrentValue}
setError={setError}
onExpand={() => setIsExpanded(true)}
i18n={i18n}
/>
</DetailName>
<DetailValue
data-cy={valueCy}
component={TextListItemVariants.dd}
fullWidth
css="grid-column: 1 / -1; margin-top: -20px"
>
<CodeEditor
mode={mode}
value={currentValue}
readOnly
rows={rows}
css="margin-top: 10px"
/>
{error && (
<div
css="color: var(--pf-global--danger-color--100);
font-size: var(--pf-global--FontSize--sm"
>
{i18n._(t`Error:`)} {error.message}
</div>
)}
</DetailValue>
<Modal
variant="xlarge"
title={label}
isOpen={isExpanded}
onClose={() => setIsExpanded(false)}
actions={[
<Button
aria-label={i18n._(t`Done`)}
key="select"
variant="primary"
onClick={() => setIsExpanded(false)}
ouiaId={`${dataCy}-unexpand`}
>
{i18n._(t`Done`)}
</Button>,
]}
>
<div className="pf-c-form">
<ModeToggle
label={label}
helpText={helpText}
dataCy={dataCy}
mode={mode}
setMode={setMode}
currentValue={currentValue}
setCurrentValue={setCurrentValue}
setError={setError}
i18n={i18n}
/>
<CodeEditor
mode={mode}
value={currentValue}
readOnly
rows={rows}
fullHeight
css="margin-top: 10px"
/>
</div>
</Modal>
</>
);
}
VariablesDetail.propTypes = {
value: oneOfType([shape({}), arrayOf(string), string]).isRequired,
label: node.isRequired,
rows: oneOfType([number, string]),
dataCy: string,
helpText: string,
};
VariablesDetail.defaultProps = {
rows: null,
dataCy: '',
helpText: '',
};
function ModeToggle({
label,
helpText,
dataCy,
currentValue,
setCurrentValue,
mode,
setMode,
setError,
onExpand,
i18n,
}) {
return (
<Split hasGutter>
<SplitItem isFilled>
<Split hasGutter css="align-items: baseline">
<SplitItem>
<div className="pf-c-form__label">
<span
@ -92,44 +206,21 @@ function VariablesDetail({ dataCy, helpText, value, label, rows, fullHeight }) {
/>
</SplitItem>
</Split>
</DetailName>
<DetailValue
data-cy={valueCy}
component={TextListItemVariants.dd}
fullWidth
css="grid-column: 1 / -1; margin-top: -20px"
>
<CodeEditor
mode={mode}
value={currentValue}
readOnly
rows={rows}
fullHeight={fullHeight}
css="margin-top: 10px"
/>
{error && (
<div
css="color: var(--pf-global--danger-color--100);
font-size: var(--pf-global--FontSize--sm"
</SplitItem>
{onExpand && (
<SplitItem>
<Button
variant="plain"
aria-label={i18n._(t`Expand input`)}
onClick={onExpand}
ouiaId={`${dataCy}-expand`}
>
<Trans>Error:</Trans> {error.message}
</div>
)}
</DetailValue>
</>
<ExpandArrowsAltIcon />
</Button>
</SplitItem>
)}
</Split>
);
}
VariablesDetail.propTypes = {
value: oneOfType([shape({}), arrayOf(string), string]).isRequired,
label: node.isRequired,
rows: number,
dataCy: string,
helpText: string,
};
VariablesDetail.defaultProps = {
rows: null,
dataCy: '',
helpText: '',
};
export default withI18n()(VariablesDetail);

View File

@ -4,7 +4,8 @@ import { withI18n } from '@lingui/react';
import { t } from '@lingui/macro';
import { useField } from 'formik';
import styled from 'styled-components';
import { Split, SplitItem } from '@patternfly/react-core';
import { Split, SplitItem, Button, Modal } from '@patternfly/react-core';
import { ExpandArrowsAltIcon } from '@patternfly/react-icons';
import { CheckboxField } from '../FormField';
import MultiButtonToggle from '../MultiButtonToggle';
import { yamlToJson, jsonToYaml, isJsonString } from '../../util/yaml';
@ -20,6 +21,7 @@ const FieldHeader = styled.div`
const StyledCheckboxField = styled(CheckboxField)`
--pf-c-check__label--FontSize: var(--pf-c-form__label--FontSize);
margin-left: auto;
`;
function VariablesField({
@ -31,10 +33,92 @@ function VariablesField({
promptId,
tooltip,
}) {
const [field, meta, helpers] = useField(name);
const [field, meta] = useField(name);
const [mode, setMode] = useState(
isJsonString(field.value) ? JSON_MODE : YAML_MODE
);
const [isExpanded, setIsExpanded] = useState(false);
return (
<>
<VariablesFieldInternals
i18n={i18n}
id={id}
name={name}
label={label}
readOnly={readOnly}
promptId={promptId}
tooltip={tooltip}
onExpand={() => setIsExpanded(true)}
mode={mode}
setMode={setMode}
/>
<Modal
variant="xlarge"
title={label}
isOpen={isExpanded}
onClose={() => setIsExpanded(false)}
actions={[
<Button
aria-label={i18n._(t`Done`)}
key="select"
variant="primary"
onClick={() => setIsExpanded(false)}
ouiaId={`${id}-variables-unexpand`}
>
{i18n._(t`Done`)}
</Button>,
]}
>
<div className="pf-c-form">
<VariablesFieldInternals
i18n={i18n}
id={`${id}-expanded`}
name={name}
label={label}
readOnly={readOnly}
promptId={promptId}
tooltip={tooltip}
fullHeight
mode={mode}
setMode={setMode}
/>
</div>
</Modal>
{meta.error ? (
<div className="pf-c-form__helper-text pf-m-error" aria-live="polite">
{meta.error}
</div>
) : null}
</>
);
}
VariablesField.propTypes = {
id: string.isRequired,
name: string.isRequired,
label: string.isRequired,
readOnly: bool,
promptId: string,
};
VariablesField.defaultProps = {
readOnly: false,
promptId: null,
};
function VariablesFieldInternals({
i18n,
id,
name,
label,
readOnly,
promptId,
tooltip,
fullHeight,
mode,
setMode,
onExpand,
}) {
const [field, meta, helpers] = useField(name);
return (
<div className="pf-c-form__group">
@ -75,6 +159,16 @@ function VariablesField({
name="ask_variables_on_launch"
/>
)}
{onExpand && (
<Button
variant="plain"
aria-label={i18n._(t`Expand input`)}
onClick={onExpand}
ouiaId={`${id}-variables-expand`}
>
<ExpandArrowsAltIcon />
</Button>
)}
</FieldHeader>
<CodeEditor
mode={mode}
@ -83,26 +177,11 @@ function VariablesField({
onChange={newVal => {
helpers.setValue(newVal);
}}
fullHeight={fullHeight}
hasErrors={!!meta.error}
/>
{meta.error ? (
<div className="pf-c-form__helper-text pf-m-error" aria-live="polite">
{meta.error}
</div>
) : null}
</div>
);
}
VariablesField.propTypes = {
id: string.isRequired,
name: string.isRequired,
label: string.isRequired,
readOnly: bool,
promptId: string,
};
VariablesField.defaultProps = {
readOnly: false,
promptId: null,
};
export default withI18n()(VariablesField);

View File

@ -32,7 +32,7 @@ describe('VariablesField', () => {
</Formik>
);
const buttons = wrapper.find('Button');
expect(buttons).toHaveLength(2);
expect(buttons).toHaveLength(3);
expect(buttons.at(0).prop('variant')).toEqual('primary');
expect(buttons.at(1).prop('variant')).toEqual('secondary');
await act(async () => {
@ -136,4 +136,27 @@ describe('VariablesField', () => {
expect(wrapper.find('CodeEditor').prop('mode')).toEqual('javascript');
});
it('should open modal when expanded', async () => {
const value = '---';
const wrapper = mountWithContexts(
<Formik initialValues={{ variables: value }} onSubmit={jest.fn()}>
{formik => (
<form onSubmit={formik.handleSubmit}>
<VariablesField id="the-field" name="variables" label="Variables" />
<button type="submit" id="submit">
Submit
</button>
</form>
)}
</Formik>
);
expect(wrapper.find('Modal').prop('isOpen')).toEqual(false);
wrapper.find('Button[variant="plain"]').invoke('onClick')();
wrapper.update();
expect(wrapper.find('Modal').prop('isOpen')).toEqual(true);
expect(wrapper.find('Modal CodeEditor')).toHaveLength(1);
});
});

View File

@ -14,15 +14,7 @@ import { DetailName, DetailValue } from './Detail';
import CodeEditor from '../CodeEditor';
import Popover from '../Popover';
function CodeDetail({
value,
label,
mode,
rows,
fullHeight,
helpText,
dataCy,
}) {
function CodeDetail({ value, label, mode, rows, helpText, dataCy }) {
const labelCy = dataCy ? `${dataCy}-label` : null;
const valueCy = dataCy ? `${dataCy}-value` : null;
@ -57,7 +49,6 @@ function CodeDetail({
value={value}
readOnly
rows={rows}
fullHeight={fullHeight}
css="margin-top: 10px"
/>
</DetailValue>
@ -69,7 +60,7 @@ CodeDetail.propTypes = {
label: node.isRequired,
dataCy: string,
helpText: string,
rows: number,
rows: oneOfType(number, string),
mode: oneOf(['javascript', 'yaml', 'jinja2']).isRequired,
};
CodeDetail.defaultProps = {

View File

@ -25,6 +25,7 @@ function ExecutionEnvironmentLookup({
globallyAvailable,
i18n,
isDefaultEnvironment,
isGlobalDefaultEnvironment,
isDisabled,
onBlur,
onChange,
@ -154,17 +155,26 @@ function ExecutionEnvironmentLookup({
</>
);
const renderLabel = (
globalDefaultEnvironment,
defaultExecutionEnvironment
) => {
if (globalDefaultEnvironment) {
return i18n._(t`Global Default Execution Environment`);
}
if (defaultExecutionEnvironment) {
return i18n._(t`Default Execution Environment`);
}
return i18n._(t`Execution Environment`);
};
return (
<FormGroup
fieldId="execution-environment-lookup"
label={
isDefaultEnvironment
? i18n._(t`Default Execution Environment`)
: i18n._(t`Execution Environment`)
}
label={renderLabel(isGlobalDefaultEnvironment, isDefaultEnvironment)}
labelIcon={popoverContent && <Popover content={popoverContent} />}
>
{isDisabled ? (
{tooltip ? (
<Tooltip content={tooltip}>{renderLookup()}</Tooltip>
) : (
renderLookup()
@ -180,6 +190,7 @@ ExecutionEnvironmentLookup.propTypes = {
popoverContent: string,
onChange: func.isRequired,
isDefaultEnvironment: bool,
isGlobalDefaultEnvironment: bool,
projectId: oneOfType([number, string]),
organizationId: oneOfType([number, string]),
};
@ -187,6 +198,7 @@ ExecutionEnvironmentLookup.propTypes = {
ExecutionEnvironmentLookup.defaultProps = {
popoverContent: '',
isDefaultEnvironment: false,
isGlobalDefaultEnvironment: false,
value: null,
projectId: null,
organizationId: null,

View File

@ -20,6 +20,7 @@ import ContentLoading from '../../components/ContentLoading';
import ExecutionEnvironmentDetails from './ExecutionEnvironmentDetails';
import ExecutionEnvironmentEdit from './ExecutionEnvironmentEdit';
import ExecutionEnvironmentTemplateList from './ExecutionEnvironmentTemplate';
function ExecutionEnvironment({ i18n, setBreadcrumb }) {
const { id } = useParams();
@ -64,6 +65,11 @@ function ExecutionEnvironment({ i18n, setBreadcrumb }) {
link: `/execution_environments/${id}/details`,
id: 0,
},
{
name: i18n._(t`Templates`),
link: `/execution_environments/${id}/templates`,
id: 1,
},
];
if (!isLoading && contentError) {
@ -114,6 +120,11 @@ function ExecutionEnvironment({ i18n, setBreadcrumb }) {
executionEnvironment={executionEnvironment}
/>
</Route>
<Route path="/execution_environments/:id/templates">
<ExecutionEnvironmentTemplateList
executionEnvironment={executionEnvironment}
/>
</Route>
</>
)}
</Switch>

View File

@ -64,6 +64,11 @@ function ExecutionEnvironmentDetails({ executionEnvironment, i18n }) {
value={description}
dataCy="execution-environment-detail-description"
/>
<Detail
label={i18n._(t`Managed by Tower`)}
value={managedByTower ? i18n._(t`True`) : i18n._(t`False`)}
dataCy="execution-environment-managed-by-tower"
/>
<Detail
label={i18n._(t`Organization`)}
value={
@ -79,6 +84,7 @@ function ExecutionEnvironmentDetails({ executionEnvironment, i18n }) {
}
dataCy="execution-environment-detail-organization"
/>
<Detail
label={i18n._(t`Pull`)}
value={pull === '' ? i18n._(t`Missing`) : toTitleCase(pull)}
@ -110,27 +116,31 @@ function ExecutionEnvironmentDetails({ executionEnvironment, i18n }) {
</DetailList>
{!managedByTower && (
<CardActionsRow>
<Button
ouiaId="execution-environment-detail-edit-button"
aria-label={i18n._(t`edit`)}
component={Link}
to={`/execution_environments/${id}/edit`}
>
{i18n._(t`Edit`)}
</Button>
<DeleteButton
name={image}
modalTitle={i18n._(t`Delete Execution Environment`)}
onConfirm={deleteExecutionEnvironment}
isDisabled={isLoading}
ouiaId="delete-button"
deleteDetailsRequests={deleteDetailsRequests}
deleteMessage={i18n._(
t`This execution environment is currently being used by other resources. Are you sure you want to delete it?`
)}
>
{i18n._(t`Delete`)}
</DeleteButton>
{summary_fields.user_capabilities?.edit && (
<Button
ouiaId="execution-environment-detail-edit-button"
aria-label={i18n._(t`edit`)}
component={Link}
to={`/execution_environments/${id}/edit`}
>
{i18n._(t`Edit`)}
</Button>
)}
{summary_fields.user_capabilities?.delete && (
<DeleteButton
name={image}
modalTitle={i18n._(t`Delete Execution Environment`)}
onConfirm={deleteExecutionEnvironment}
isDisabled={isLoading}
ouiaId="delete-button"
deleteDetailsRequests={deleteDetailsRequests}
deleteMessage={i18n._(
t`This execution environment is currently being used by other resources. Are you sure you want to delete it?`
)}
>
{i18n._(t`Delete`)}
</DeleteButton>
)}
</CardActionsRow>
)}

View File

@ -2,7 +2,10 @@ import React from 'react';
import { act } from 'react-dom/test-utils';
import { createMemoryHistory } from 'history';
import { mountWithContexts } from '../../../../testUtils/enzymeHelpers';
import {
mountWithContexts,
waitForElement,
} from '../../../../testUtils/enzymeHelpers';
import { ExecutionEnvironmentsAPI } from '../../../api';
import ExecutionEnvironmentDetails from './ExecutionEnvironmentDetails';
@ -22,6 +25,11 @@ const executionEnvironment = {
credential: '/api/v2/credentials/4/',
},
summary_fields: {
user_capabilities: {
edit: true,
delete: true,
copy: true,
},
credential: {
id: 4,
name: 'Container Registry',
@ -73,6 +81,9 @@ describe('<ExecutionEnvironmentDetails/>', () => {
expect(
wrapper.find('Detail[label="Credential"]').prop('value').props.children
).toEqual(executionEnvironment.summary_fields.credential.name);
expect(
wrapper.find('Detail[label="Managed by Tower"]').prop('value')
).toEqual('False');
const dates = wrapper.find('UserDateDetail');
expect(dates).toHaveLength(2);
expect(dates.at(0).prop('date')).toEqual(executionEnvironment.created);
@ -167,6 +178,9 @@ describe('<ExecutionEnvironmentDetails/>', () => {
expect(
wrapper.find('Detail[label="Credential"]').prop('value').props.children
).toEqual(executionEnvironment.summary_fields.credential.name);
expect(
wrapper.find('Detail[label="Managed by Tower"]').prop('value')
).toEqual('True');
const dates = wrapper.find('UserDateDetail');
expect(dates).toHaveLength(2);
expect(dates.at(0).prop('date')).toEqual(executionEnvironment.created);
@ -175,6 +189,7 @@ describe('<ExecutionEnvironmentDetails/>', () => {
expect(wrapper.find('Button[aria-label="Delete"]')).toHaveLength(0);
});
test('should have proper number of delete detail requests', async () => {
const history = createMemoryHistory({
initialEntries: ['/execution_environments/42/details'],
@ -193,4 +208,71 @@ describe('<ExecutionEnvironmentDetails/>', () => {
wrapper.find('DeleteButton').prop('deleteDetailsRequests')
).toHaveLength(4);
});
test('should show edit button for users with edit permission', async () => {
await act(async () => {
wrapper = mountWithContexts(
<ExecutionEnvironmentDetails
executionEnvironment={executionEnvironment}
/>
);
});
const editButton = await waitForElement(
wrapper,
'ExecutionEnvironmentDetails Button[aria-label="edit"]'
);
expect(editButton.text()).toEqual('Edit');
expect(editButton.prop('to')).toBe('/execution_environments/17/edit');
});
test('should hide edit button for users without edit permission', async () => {
await act(async () => {
wrapper = mountWithContexts(
<ExecutionEnvironmentDetails
executionEnvironment={{
...executionEnvironment,
summary_fields: { user_capabilities: { edit: false } },
}}
/>
);
});
await waitForElement(wrapper, 'ExecutionEnvironmentDetails');
expect(
wrapper.find('ExecutionEnvironmentDetails Button[aria-label="edit"]')
.length
).toBe(0);
});
test('should show delete button for users with delete permission', async () => {
await act(async () => {
wrapper = mountWithContexts(
<ExecutionEnvironmentDetails
executionEnvironment={executionEnvironment}
/>
);
});
const deleteButton = await waitForElement(
wrapper,
'ExecutionEnvironmentDetails Button[aria-label="Delete"]'
);
expect(deleteButton.text()).toEqual('Delete');
});
test('should hide delete button for users without delete permission', async () => {
await act(async () => {
wrapper = mountWithContexts(
<ExecutionEnvironmentDetails
executionEnvironment={{
...executionEnvironment,
summary_fields: { user_capabilities: { delete: false } },
}}
/>
);
});
await waitForElement(wrapper, 'ExecutionEnvironmentDetails');
expect(
wrapper.find('ExecutionEnvironmentDetails Button[aria-label="Delete"]')
.length
).toBe(0);
});
});

View File

@ -0,0 +1,139 @@
import React, { useEffect, useCallback } from 'react';
import { useLocation } from 'react-router-dom';
import { withI18n } from '@lingui/react';
import { t } from '@lingui/macro';
import { Card } from '@patternfly/react-core';
import { ExecutionEnvironmentsAPI } from '../../../api';
import { getQSConfig, parseQueryString } from '../../../util/qs';
import useRequest from '../../../util/useRequest';
import DatalistToolbar from '../../../components/DataListToolbar';
import PaginatedDataList from '../../../components/PaginatedDataList';
import ExecutionEnvironmentTemplateListItem from './ExecutionEnvironmentTemplateListItem';
const QS_CONFIG = getQSConfig(
'execution_environments',
{
page: 1,
page_size: 20,
order_by: 'name',
type: 'job_template,workflow_job_template',
},
['id', 'page', 'page_size']
);
function ExecutionEnvironmentTemplateList({ i18n, executionEnvironment }) {
const { id } = executionEnvironment;
const location = useLocation();
const {
error: contentError,
isLoading,
request: fetchTemplates,
result: {
templates,
templatesCount,
relatedSearchableKeys,
searchableKeys,
},
} = useRequest(
useCallback(async () => {
const params = parseQueryString(QS_CONFIG, location.search);
const [response, responseActions] = await Promise.all([
ExecutionEnvironmentsAPI.readUnifiedJobTemplates(id, params),
ExecutionEnvironmentsAPI.readUnifiedJobTemplateOptions(id),
]);
return {
templates: response.data.results,
templatesCount: response.data.count,
actions: responseActions.data.actions,
relatedSearchableKeys: (
responseActions?.data?.related_search_fields || []
).map(val => val.slice(0, -8)),
searchableKeys: Object.keys(
responseActions.data.actions?.GET || {}
).filter(key => responseActions.data.actions?.GET[key].filterable),
};
}, [location, id]),
{
templates: [],
templatesCount: 0,
actions: {},
relatedSearchableKeys: [],
searchableKeys: [],
}
);
useEffect(() => {
fetchTemplates();
}, [fetchTemplates]);
return (
<>
<Card>
<PaginatedDataList
contentError={contentError}
hasContentLoading={isLoading}
items={templates}
itemCount={templatesCount}
pluralizedItemName={i18n._(t`Templates`)}
qsConfig={QS_CONFIG}
toolbarSearchableKeys={searchableKeys}
toolbarRelatedSearchableKeys={relatedSearchableKeys}
toolbarSearchColumns={[
{
name: i18n._(t`Name`),
key: 'name__icontains',
isDefault: true,
},
{
name: i18n._(t`Type`),
key: 'or__type',
options: [
[`job_template`, i18n._(t`Job Template`)],
[`workflow_job_template`, i18n._(t`Workflow Template`)],
],
},
{
name: i18n._(t`Created By (Username)`),
key: 'created_by__username__icontains',
},
{
name: i18n._(t`Modified By (Username)`),
key: 'modified_by__username__icontains',
},
]}
toolbarSortColumns={[
{
name: i18n._(t`Name`),
key: 'name',
},
{
name: i18n._(t`Created`),
key: 'created',
},
{
name: i18n._(t`Modified`),
key: 'modified',
},
]}
renderToolbar={props => (
<DatalistToolbar {...props} qsConfig={QS_CONFIG} />
)}
renderItem={template => (
<ExecutionEnvironmentTemplateListItem
key={template.id}
template={template}
detailUrl={`/templates/${template.type}/${template.id}/details`}
/>
)}
/>
</Card>
</>
);
}
export default withI18n()(ExecutionEnvironmentTemplateList);

View File

@ -0,0 +1,116 @@
import React from 'react';
import { act } from 'react-dom/test-utils';
import {
mountWithContexts,
waitForElement,
} from '../../../../testUtils/enzymeHelpers';
import { ExecutionEnvironmentsAPI } from '../../../api';
import ExecutionEnvironmentTemplateList from './ExecutionEnvironmentTemplateList';
jest.mock('../../../api/');
const templates = {
data: {
count: 3,
results: [
{
id: 1,
type: 'job_template',
name: 'Foo',
url: '/api/v2/job_templates/1/',
related: {
execution_environment: '/api/v2/execution_environments/1/',
},
},
{
id: 2,
type: 'workflow_job_template',
name: 'Bar',
url: '/api/v2/workflow_job_templates/2/',
related: {
execution_environment: '/api/v2/execution_environments/1/',
},
},
{
id: 3,
type: 'job_template',
name: 'Fuzz',
url: '/api/v2/job_templates/3/',
related: {
execution_environment: '/api/v2/execution_environments/1/',
},
},
],
},
};
const mockExecutionEnvironment = {
id: 1,
name: 'Default EE',
};
const options = { data: { actions: { GET: {} } } };
describe('<ExecutionEnvironmentTemplateList/>', () => {
let wrapper;
test('should mount successfully', async () => {
await act(async () => {
wrapper = mountWithContexts(
<ExecutionEnvironmentTemplateList
executionEnvironment={mockExecutionEnvironment}
/>
);
});
await waitForElement(
wrapper,
'ExecutionEnvironmentTemplateList',
el => el.length > 0
);
});
test('should have data fetched and render 3 rows', async () => {
ExecutionEnvironmentsAPI.readUnifiedJobTemplates.mockResolvedValue(
templates
);
ExecutionEnvironmentsAPI.readUnifiedJobTemplateOptions.mockResolvedValue(
options
);
await act(async () => {
wrapper = mountWithContexts(
<ExecutionEnvironmentTemplateList
executionEnvironment={mockExecutionEnvironment}
/>
);
});
await waitForElement(
wrapper,
'ExecutionEnvironmentTemplateList',
el => el.length > 0
);
expect(wrapper.find('ExecutionEnvironmentTemplateListItem').length).toBe(3);
expect(ExecutionEnvironmentsAPI.readUnifiedJobTemplates).toBeCalled();
expect(ExecutionEnvironmentsAPI.readUnifiedJobTemplateOptions).toBeCalled();
});
test('should not render add button', async () => {
await act(async () => {
wrapper = mountWithContexts(
<ExecutionEnvironmentTemplateList
executionEnvironment={mockExecutionEnvironment}
/>
);
});
waitForElement(
wrapper,
'ExecutionEnvironmentTemplateList',
el => el.length > 0
);
expect(wrapper.find('ToolbarAddButton').length).toBe(0);
});
});

View File

@ -0,0 +1,43 @@
import React from 'react';
import { withI18n } from '@lingui/react';
import { t } from '@lingui/macro';
import { Link } from 'react-router-dom';
import {
DataListItem,
DataListItemRow,
DataListItemCells,
} from '@patternfly/react-core';
import DataListCell from '../../../components/DataListCell';
function ExecutionEnvironmentTemplateListItem({ template, detailUrl, i18n }) {
return (
<DataListItem
key={template.id}
aria-labelledby={`check-action-${template.id}`}
id={`${template.id}`}
>
<DataListItemRow>
<DataListItemCells
dataListCells={[
<DataListCell key="name" aria-label={i18n._(t`Name`)}>
<Link to={`${detailUrl}`}>
<b>{template.name}</b>
</Link>
</DataListCell>,
<DataListCell
key="template-type"
aria-label={i18n._(t`Template type`)}
>
{template.type === 'job_template'
? i18n._(t`Job Template`)
: i18n._(t`Workflow Job Template`)}
</DataListCell>,
]}
/>
</DataListItemRow>
</DataListItem>
);
}
export default withI18n()(ExecutionEnvironmentTemplateListItem);

View File

@ -0,0 +1,48 @@
import React from 'react';
import { act } from 'react-dom/test-utils';
import { mountWithContexts } from '../../../../testUtils/enzymeHelpers';
import ExecutionEnvironmentTemplateListItem from './ExecutionEnvironmentTemplateListItem';
describe('<ExecutionEnvironmentTemplateListItem/>', () => {
let wrapper;
const template = {
id: 1,
name: 'Foo',
type: 'job_template',
};
test('should mount successfully', async () => {
await act(async () => {
wrapper = mountWithContexts(
<ExecutionEnvironmentTemplateListItem
template={template}
detailUrl={`/templates/${template.type}/${template.id}/details`}
/>
);
});
expect(wrapper.find('ExecutionEnvironmentTemplateListItem').length).toBe(1);
expect(wrapper.find('DataListCell[aria-label="Name"]').text()).toBe(
template.name
);
expect(
wrapper.find('DataListCell[aria-label="Template type"]').text()
).toBe('Job Template');
});
test('should distinguish template types', async () => {
await act(async () => {
wrapper = mountWithContexts(
<ExecutionEnvironmentTemplateListItem
template={{ ...template, type: 'workflow_job_template' }}
detailUrl={`/templates/${template.type}/${template.id}/details`}
/>
);
});
expect(wrapper.find('ExecutionEnvironmentTemplateListItem').length).toBe(1);
expect(
wrapper.find('DataListCell[aria-label="Template type"]').text()
).toBe('Workflow Job Template');
});
});

View File

@ -0,0 +1 @@
export { default } from './ExecutionEnvironmentTemplateList';

View File

@ -36,7 +36,7 @@ function HostFacts({ i18n, host }) {
return (
<CardBody>
<DetailList gutter="sm">
<VariablesDetail label={i18n._(t`Facts`)} fullHeight value={facts} />
<VariablesDetail label={i18n._(t`Facts`)} rows="auto" value={facts} />
</DetailList>
</CardBody>
);

View File

@ -72,11 +72,12 @@ describe('<InventoryGroupDetail />', () => {
});
test('should open delete modal and then call api to delete the group', async () => {
expect(wrapper.find('Modal').length).toBe(1); // variables modal already mounted
await act(async () => {
wrapper.find('button[aria-label="Delete"]').simulate('click');
});
await waitForElement(wrapper, 'Modal', el => el.length === 1);
expect(wrapper.find('Modal').length).toBe(1);
wrapper.update();
expect(wrapper.find('Modal').length).toBe(2);
await act(async () => {
wrapper.find('Radio[id="radio-delete"]').invoke('onChange')();
});

View File

@ -35,7 +35,7 @@ function InventoryHostFacts({ i18n, host }) {
return (
<CardBody>
<DetailList gutter="sm">
<VariablesDetail label={i18n._(t`Facts`)} fullHeight value={result} />
<VariablesDetail label={i18n._(t`Facts`)} rows="auto" value={result} />
</DetailList>
</CardBody>
);

View File

@ -122,20 +122,14 @@ const SCMSubForm = ({ autoPopulateProject, i18n }) => {
onSelect={(event, value) => {
setIsOpen(false);
value = value.trim();
if (!value.endsWith('/')) {
value += '/';
}
sourcePathHelpers.setValue(value);
}}
aria-label={i18n._(t`Select source path`)}
placeholder={i18n._(t`Select source path`)}
createText={i18n._(t`Set source path to`)}
isCreatable
onCreateOption={value => {
value.trim();
if (!value.endsWith('/')) {
value += '/';
}
setSourcePath([...sourcePath, value]);
}}
>

View File

@ -98,7 +98,7 @@ describe('<SCMSubForm />', () => {
});
wrapper.update();
expect(wrapper.find('Select#source_path').prop('selections')).toEqual(
'bar/'
'bar'
);
await act(async () => {
@ -138,7 +138,7 @@ describe('<SCMSubForm />', () => {
customWrapper.find('Select').invoke('onSelect')({}, 'newPath');
});
customWrapper.update();
expect(customWrapper.find('Select').prop('selections')).toBe('newPath/');
expect(customWrapper.find('Select').prop('selections')).toBe('newPath');
});
test('Update on project update should be disabled', async () => {
const customInitialValues = {

View File

@ -38,7 +38,7 @@ function OrganizationExecEnvList({ i18n, organization }) {
const [response, responseActions] = await Promise.all([
OrganizationsAPI.readExecutionEnvironments(id, params),
OrganizationsAPI.readExecutionEnvironmentsOptions(id, params),
OrganizationsAPI.readExecutionEnvironmentsOptions(id),
]);
return {

View File

@ -9,7 +9,7 @@ import ContentError from '../../../../components/ContentError';
import ContentLoading from '../../../../components/ContentLoading';
import { DetailList } from '../../../../components/DetailList';
import RoutedTabs from '../../../../components/RoutedTabs';
import { SettingsAPI } from '../../../../api';
import { SettingsAPI, ExecutionEnvironmentsAPI } from '../../../../api';
import useRequest from '../../../../util/useRequest';
import { useConfig } from '../../../../contexts/Config';
import { useSettings } from '../../../../contexts/Settings';
@ -23,7 +23,15 @@ function MiscSystemDetail({ i18n }) {
const { isLoading, error, request, result: system } = useRequest(
useCallback(async () => {
const { data } = await SettingsAPI.readCategory('all');
let DEFAULT_EXECUTION_ENVIRONMENT = '';
if (data.DEFAULT_EXECUTION_ENVIRONMENT) {
const {
data: { name },
} = await ExecutionEnvironmentsAPI.readDetail(
data.DEFAULT_EXECUTION_ENVIRONMENT
);
DEFAULT_EXECUTION_ENVIRONMENT = name;
}
const {
OAUTH2_PROVIDER: {
ACCESS_TOKEN_EXPIRE_SECONDS,
@ -49,19 +57,17 @@ function MiscSystemDetail({ i18n }) {
'SESSION_COOKIE_AGE',
'TOWER_URL_BASE'
);
const systemData = {
...pluckedSystemData,
ACCESS_TOKEN_EXPIRE_SECONDS,
REFRESH_TOKEN_EXPIRE_SECONDS,
AUTHORIZATION_CODE_EXPIRE_SECONDS,
DEFAULT_EXECUTION_ENVIRONMENT,
};
const {
OAUTH2_PROVIDER: OAUTH2_PROVIDER_OPTIONS,
...options
} = allOptions;
const systemOptions = {
...options,
ACCESS_TOKEN_EXPIRE_SECONDS: {
@ -80,7 +86,6 @@ function MiscSystemDetail({ i18n }) {
label: i18n._(t`Authorization Code Expiration`),
},
};
const mergedData = {};
Object.keys(systemData).forEach(key => {
mergedData[key] = systemOptions[key];

View File

@ -5,7 +5,7 @@ import {
waitForElement,
} from '../../../../../testUtils/enzymeHelpers';
import { SettingsProvider } from '../../../../contexts/Settings';
import { SettingsAPI } from '../../../../api';
import { SettingsAPI, ExecutionEnvironmentsAPI } from '../../../../api';
import {
assertDetail,
assertVariableDetail,
@ -14,13 +14,14 @@ import mockAllOptions from '../../shared/data.allSettingOptions.json';
import MiscSystemDetail from './MiscSystemDetail';
jest.mock('../../../../api/models/Settings');
jest.mock('../../../../api/models/ExecutionEnvironments');
SettingsAPI.readCategory.mockResolvedValue({
data: {
ALLOW_OAUTH2_FOR_EXTERNAL_USERS: false,
AUTH_BASIC_ENABLED: true,
AUTOMATION_ANALYTICS_GATHER_INTERVAL: 14400,
AUTOMATION_ANALYTICS_URL: 'https://example.com',
CUSTOM_VENV_PATHS: [],
INSIGHTS_TRACKING_STATE: false,
LOGIN_REDIRECT_OVERRIDE: 'https://redirect.com',
MANAGE_ORGANIZATION_AUTH: true,
@ -36,6 +37,16 @@ SettingsAPI.readCategory.mockResolvedValue({
SESSIONS_PER_USER: -1,
SESSION_COOKIE_AGE: 30000000000,
TOWER_URL_BASE: 'https://towerhost',
DEFAULT_EXECUTION_ENVIRONMENT: 1,
},
});
ExecutionEnvironmentsAPI.readDetail.mockResolvedValue({
data: {
id: 1,
name: 'Foo',
image: 'quay.io/ansible/awx-ee',
pull: 'missing',
},
});
@ -110,6 +121,33 @@ describe('<MiscSystemDetail />', () => {
assertDetail(wrapper, 'Red Hat customer username', 'mock name');
assertDetail(wrapper, 'Refresh Token Expiration', '3 seconds');
assertVariableDetail(wrapper, 'Remote Host Headers', '[]');
assertDetail(wrapper, 'Global default execution environment', 'Foo');
});
test('should render execution environment as not configured', async () => {
ExecutionEnvironmentsAPI.readDetail.mockResolvedValue({
data: {},
});
let newWrapper;
await act(async () => {
newWrapper = mountWithContexts(
<SettingsProvider
value={{
...mockAllOptions.actions,
DEFAULT_EXECUTION_ENVIRONMENT: null,
}}
>
<MiscSystemDetail />
</SettingsProvider>
);
});
await waitForElement(newWrapper, 'ContentLoading', el => el.length === 0);
assertDetail(
newWrapper,
'Global default execution environment',
'Not configured'
);
});
test('should hide edit button from non-superusers', async () => {

View File

@ -9,6 +9,7 @@ import ContentError from '../../../../components/ContentError';
import ContentLoading from '../../../../components/ContentLoading';
import { FormSubmitError } from '../../../../components/FormField';
import { FormColumnLayout } from '../../../../components/FormLayout';
import { ExecutionEnvironmentLookup } from '../../../../components/Lookup';
import { useSettings } from '../../../../contexts/Settings';
import {
BooleanField,
@ -20,7 +21,7 @@ import {
} from '../../shared';
import useModal from '../../../../util/useModal';
import useRequest from '../../../../util/useRequest';
import { SettingsAPI } from '../../../../api';
import { SettingsAPI, ExecutionEnvironmentsAPI } from '../../../../api';
import { pluck, formatJson } from '../../shared/settingUtils';
function MiscSystemEdit({ i18n }) {
@ -44,7 +45,6 @@ function MiscSystemEdit({ i18n }) {
'AUTH_BASIC_ENABLED',
'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
'AUTOMATION_ANALYTICS_URL',
'CUSTOM_VENV_PATHS',
'INSIGHTS_TRACKING_STATE',
'LOGIN_REDIRECT_OVERRIDE',
'MANAGE_ORGANIZATION_AUTH',
@ -55,7 +55,8 @@ function MiscSystemEdit({ i18n }) {
'REMOTE_HOST_HEADERS',
'SESSIONS_PER_USER',
'SESSION_COOKIE_AGE',
'TOWER_URL_BASE'
'TOWER_URL_BASE',
'DEFAULT_EXECUTION_ENVIRONMENT'
);
const systemData = {
@ -128,6 +129,7 @@ function MiscSystemEdit({ i18n }) {
AUTHORIZATION_CODE_EXPIRE_SECONDS,
...formData
} = form;
await submitForm({
...formData,
REMOTE_HOST_HEADERS: formatJson(formData.REMOTE_HOST_HEADERS),
@ -136,6 +138,8 @@ function MiscSystemEdit({ i18n }) {
REFRESH_TOKEN_EXPIRE_SECONDS,
AUTHORIZATION_CODE_EXPIRE_SECONDS,
},
DEFAULT_EXECUTION_ENVIRONMENT:
formData.DEFAULT_EXECUTION_ENVIRONMENT?.id || null,
});
};
@ -178,16 +182,73 @@ function MiscSystemEdit({ i18n }) {
return acc;
}, {});
const executionEnvironmentId =
system?.DEFAULT_EXECUTION_ENVIRONMENT?.value || null;
const {
isLoading: isLoadingExecutionEnvironment,
error: errorExecutionEnvironment,
request: fetchExecutionEnvironment,
result: executionEnvironment,
} = useRequest(
useCallback(async () => {
if (!executionEnvironmentId) {
return '';
}
const { data } = await ExecutionEnvironmentsAPI.readDetail(
executionEnvironmentId
);
return data;
}, [executionEnvironmentId])
);
useEffect(() => {
fetchExecutionEnvironment();
}, [fetchExecutionEnvironment]);
return (
<CardBody>
{isLoading && <ContentLoading />}
{!isLoading && error && <ContentError error={error} />}
{!isLoading && system && (
<Formik initialValues={initialValues(system)} onSubmit={handleSubmit}>
{(isLoading || isLoadingExecutionEnvironment) && <ContentLoading />}
{!(isLoading || isLoadingExecutionEnvironment) && error && (
<ContentError error={error || errorExecutionEnvironment} />
)}
{!(isLoading || isLoadingExecutionEnvironment) && system && (
<Formik
initialValues={{
...initialValues(system),
DEFAULT_EXECUTION_ENVIRONMENT: executionEnvironment
? { id: executionEnvironment.id, name: executionEnvironment.name }
: null,
}}
onSubmit={handleSubmit}
>
{formik => {
return (
<Form autoComplete="off" onSubmit={formik.handleSubmit}>
<FormColumnLayout>
<ExecutionEnvironmentLookup
helperTextInvalid={
formik.errors.DEFAULT_EXECUTION_ENVIRONMENT
}
isValid={
!formik.touched.DEFAULT_EXECUTION_ENVIRONMENT ||
!formik.errors.DEFAULT_EXECUTION_ENVIRONMENT
}
onBlur={() =>
formik.setFieldTouched('DEFAULT_EXECUTION_ENVIRONMENT')
}
value={formik.values.DEFAULT_EXECUTION_ENVIRONMENT}
onChange={value =>
formik.setFieldValue(
'DEFAULT_EXECUTION_ENVIRONMENT',
value
)
}
popoverContent={i18n._(
t`The Execution Environment to be used when one has not been configured for a job template.`
)}
isGlobalDefaultEnvironment
/>
<InputField
name="TOWER_URL_BASE"
config={system.TOWER_URL_BASE}

View File

@ -8,14 +8,55 @@ import {
import mockAllOptions from '../../shared/data.allSettingOptions.json';
import mockAllSettings from '../../shared/data.allSettings.json';
import { SettingsProvider } from '../../../../contexts/Settings';
import { SettingsAPI } from '../../../../api';
import { SettingsAPI, ExecutionEnvironmentsAPI } from '../../../../api';
import MiscSystemEdit from './MiscSystemEdit';
jest.mock('../../../../api/models/Settings');
jest.mock('../../../../api/models/ExecutionEnvironments');
SettingsAPI.updateAll.mockResolvedValue({});
SettingsAPI.readCategory.mockResolvedValue({
data: mockAllSettings,
});
const mockExecutionEnvironment = [
{
id: 1,
name: 'Default EE',
description: '',
image: 'quay.io/ansible/awx-ee',
},
];
ExecutionEnvironmentsAPI.read.mockResolvedValue({
data: {
results: mockExecutionEnvironment,
count: 1,
},
});
const systemData = {
ALLOW_OAUTH2_FOR_EXTERNAL_USERS: false,
AUTH_BASIC_ENABLED: true,
AUTOMATION_ANALYTICS_GATHER_INTERVAL: 14400,
AUTOMATION_ANALYTICS_URL: 'https://example.com',
DEFAULT_EXECUTION_ENVIRONMENT: 1,
INSIGHTS_TRACKING_STATE: false,
LOGIN_REDIRECT_OVERRIDE: '',
MANAGE_ORGANIZATION_AUTH: true,
OAUTH2_PROVIDER: {
ACCESS_TOKEN_EXPIRE_SECONDS: 31536000000,
AUTHORIZATION_CODE_EXPIRE_SECONDS: 600,
REFRESH_TOKEN_EXPIRE_SECONDS: 2628000,
},
ORG_ADMINS_CAN_SEE_ALL_USERS: true,
REDHAT_PASSWORD: '',
REDHAT_USERNAME: '',
REMOTE_HOST_HEADERS: ['REMOTE_ADDR', 'REMOTE_HOST'],
SESSIONS_PER_USER: -1,
SESSION_COOKIE_AGE: 1800,
TOWER_URL_BASE: 'https://localhost:3000',
};
describe('<MiscSystemEdit />', () => {
let wrapper;
let history;
@ -42,10 +83,40 @@ describe('<MiscSystemEdit />', () => {
await waitForElement(wrapper, 'ContentLoading', el => el.length === 0);
});
test('initially renders without crashing', () => {
test('initially renders without crashing', async () => {
expect(wrapper.find('MiscSystemEdit').length).toBe(1);
});
test('save button should call updateAll', async () => {
expect(wrapper.find('MiscSystemEdit').length).toBe(1);
wrapper.find('ExecutionEnvironmentLookup').invoke('onChange')({
id: 1,
name: 'Foo',
});
wrapper.update();
await act(async () => {
wrapper.find('button[aria-label="Save"]').simulate('click');
});
wrapper.update();
expect(SettingsAPI.updateAll).toHaveBeenCalledWith(systemData);
});
test('should remove execution environment', async () => {
expect(wrapper.find('MiscSystemEdit').length).toBe(1);
wrapper.find('ExecutionEnvironmentLookup').invoke('onChange')(null);
wrapper.update();
await act(async () => {
wrapper.find('button[aria-label="Save"]').simulate('click');
});
expect(SettingsAPI.updateAll).toHaveBeenCalledWith({
...systemData,
DEFAULT_EXECUTION_ENVIRONMENT: null,
});
});
test('should successfully send default values to api on form revert all', async () => {
expect(SettingsAPI.updateAll).toHaveBeenCalledTimes(0);
expect(wrapper.find('RevertAllAlert')).toHaveLength(0);

View File

@ -88,6 +88,8 @@ export default withI18n()(
);
break;
case 'choice':
case 'field':
case 'string':
detail = (
<Detail
alwaysVisible
@ -110,18 +112,6 @@ export default withI18n()(
/>
);
break;
case 'string':
detail = (
<Detail
alwaysVisible
dataCy={id}
helpText={helpText}
isNotConfigured={!value}
label={label}
value={!value ? i18n._(t`Not configured`) : value}
/>
);
break;
default:
detail = null;
}

View File

@ -286,7 +286,7 @@ const ObjectField = withI18n()(({ i18n, name, config, isRequired = false }) => {
>
<CodeEditor
{...field}
fullHeight
rows="auto"
id={name}
mode="javascript"
onChange={value => {

View File

@ -2944,7 +2944,15 @@
"child": {
"type": "field"
}
}
},
"DEFAULT_EXECUTION_ENVIRONMENT": {
"type": "field",
"label": "Global default execution environment",
"help_text": "The Execution Environment to be used when one has not been configured for a job template.",
"category": "System",
"category_slug": "system",
"defined_in_file": false
}
},
"PUT": {
"ACTIVITY_STREAM_ENABLED": {
@ -7049,6 +7057,15 @@
"read_only": false
}
},
"DEFAULT_EXECUTION_ENVIRONMENT": {
"type": "field",
"required": false,
"label": "Global default execution environment",
"help_text": "The Execution Environment to be used when one has not been configured for a job template.",
"category": "System",
"category_slug": "system",
"default": null
},
"SOCIAL_AUTH_SAML_TEAM_ATTR": {
"type": "nested object",
"required": false,

View File

@ -303,5 +303,6 @@
"applications":{"fields":["name"],"adj_list":[["organization","organizations"]]},
"users":{"fields":["username"],"adj_list":[]},
"instances":{"fields":["hostname"],"adj_list":[]}
}
},
"DEFAULT_EXECUTION_ENVIRONMENT": 1
}

View File

@ -371,6 +371,7 @@ function JobTemplateDetail({ i18n, template }) {
value={extra_vars}
rows={4}
label={i18n._(t`Variables`)}
dataCy={`jt-details-${template.id}`}
/>
</DetailList>
<CardActionsRow>

View File

@ -72,6 +72,8 @@ Notable releases of the `awx.awx` collection:
The following notes are changes that may require changes to playbooks:
- When a project is created, it will wait for the update/sync to finish by default; this can be turned off with the `wait` parameter, if desired.
- When using the wait parameter with project update, if the project did not undergo a revision update, the result will be
'not changed'
- Creating a "scan" type job template is no longer supported.
- Specifying a custom certificate via the `TOWER_CERTIFICATE` environment variable no longer works.
- Type changes of variable fields:

View File

@ -7,6 +7,7 @@ from ansible.module_utils.urls import Request, SSLValidationError, ConnectionErr
from ansible.module_utils.six import PY2
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.six.moves.http_cookiejar import CookieJar
from distutils.version import LooseVersion as Version
import time
from json import loads, dumps
@ -259,10 +260,22 @@ class TowerAPIModule(TowerModule):
tower_type = response.info().getheader('X-API-Product-Name', None)
tower_version = response.info().getheader('X-API-Product-Version', None)
parsed_collection_version = Version(self._COLLECTION_VERSION).version
parsed_tower_version = Version(tower_version).version
if tower_type == 'AWX':
collection_compare_ver = parsed_collection_version[0]
tower_compare_ver = parsed_tower_version[0]
else:
collection_compare_ver = "{0}.{1}".format(parsed_collection_version[0], parsed_collection_version[1])
tower_compare_ver = '{0}.{1}'.format(parsed_tower_version[0], parsed_tower_version[1])
if self._COLLECTION_TYPE not in self.collection_to_version or self.collection_to_version[self._COLLECTION_TYPE] != tower_type:
self.warn("You are using the {0} version of this collection but connecting to {1}".format(self._COLLECTION_TYPE, tower_type))
elif self._COLLECTION_VERSION != tower_version:
self.warn("You are running collection version {0} but connecting to tower version {1}".format(self._COLLECTION_VERSION, tower_version))
elif collection_compare_ver != tower_compare_ver:
self.warn(
"You are running collection version {0} but connecting to {2} version {1}".format(self._COLLECTION_VERSION, tower_version, tower_type)
)
self.version_checked = True
response_body = ''

View File

@ -34,6 +34,7 @@ options:
wait:
description:
- Wait for the project to update.
- If scm revision has not changed module will return not changed.
default: True
type: bool
interval:
@ -109,6 +110,9 @@ def main():
if project is None:
module.fail_json(msg="Unable to find project")
if wait:
scm_revision_original = project['scm_revision']
# Update the project
result = module.post_endpoint(project['related']['update'])
@ -126,7 +130,12 @@ def main():
start = time.time()
# Invoke wait function
module.wait_on_url(url=result['json']['url'], object_name=module.get_item_name(project), object_type='Project Update', timeout=timeout, interval=interval)
result = module.wait_on_url(
url=result['json']['url'], object_name=module.get_item_name(project), object_type='Project Update', timeout=timeout, interval=interval
)
scm_revision_new = result['json']['scm_revision']
if scm_revision_new == scm_revision_original:
module.json_output['changed'] = False
module.exit_json(**module.json_output)

View File

@ -16,7 +16,7 @@ from requests.models import Response, PreparedRequest
import pytest
from awx.main.tests.functional.conftest import _request
from awx.main.models import Organization, Project, Inventory, JobTemplate, Credential, CredentialType
from awx.main.models import Organization, Project, Inventory, JobTemplate, Credential, CredentialType, ExecutionEnvironment
from django.db import transaction
@ -261,3 +261,8 @@ def silence_warning():
"""Warnings use global variable, same as deprecations."""
with mock.patch('ansible.module_utils.basic.AnsibleModule.warn') as this_mock:
yield this_mock
@pytest.fixture
def execution_environment():
return ExecutionEnvironment.objects.create(name="test-ee", description="test-ee", managed_by_tower=True)

View File

@ -157,7 +157,7 @@ def determine_state(module_id, endpoint, module, parameter, api_option, module_o
return 'OK'
def test_completeness(collection_import, request, admin_user, job_template):
def test_completeness(collection_import, request, admin_user, job_template, execution_environment):
option_comparison = {}
# Load a list of existing module files from disk
base_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))

View File

@ -9,9 +9,18 @@ from awx.main.models import Organization, Team, Project, Inventory
from requests.models import Response
from unittest import mock
awx_name = 'AWX'
tower_name = 'Red Hat Ansible Tower'
ping_version = '1.2.3'
def getheader(self, header_name, default):
mock_headers = {'X-API-Product-Name': 'not-junk', 'X-API-Product-Version': '1.2.3'}
def getTowerheader(self, header_name, default):
mock_headers = {'X-API-Product-Name': tower_name, 'X-API-Product-Version': ping_version}
return mock_headers.get(header_name, default)
def getAWXheader(self, header_name, default):
mock_headers = {'X-API-Product-Name': awx_name, 'X-API-Product-Version': ping_version}
return mock_headers.get(header_name, default)
@ -23,9 +32,17 @@ def status(self):
return 200
def mock_ping_response(self, method, url, **kwargs):
def mock_tower_ping_response(self, method, url, **kwargs):
r = Response()
r.getheader = getheader.__get__(r)
r.getheader = getTowerheader.__get__(r)
r.read = read.__get__(r)
r.status = status.__get__(r)
return r
def mock_awx_ping_response(self, method, url, **kwargs):
r = Response()
r.getheader = getAWXheader.__get__(r)
r.read = read.__get__(r)
r.status = status.__get__(r)
return r
@ -36,13 +53,62 @@ def test_version_warning(collection_import, silence_warning):
cli_data = {'ANSIBLE_MODULE_ARGS': {}}
testargs = ['module_file2.py', json.dumps(cli_data)]
with mock.patch.object(sys, 'argv', testargs):
with mock.patch('ansible.module_utils.urls.Request.open', new=mock_ping_response):
with mock.patch('ansible.module_utils.urls.Request.open', new=mock_awx_ping_response):
my_module = TowerAPIModule(argument_spec=dict())
my_module._COLLECTION_VERSION = "2.0.0"
my_module._COLLECTION_TYPE = "awx"
my_module.get_endpoint('ping')
silence_warning.assert_called_once_with(
'You are running collection version {0} but connecting to {1} version {2}'.format(my_module._COLLECTION_VERSION, awx_name, ping_version)
)
def test_version_warning_strictness_awx(collection_import, silence_warning):
TowerAPIModule = collection_import('plugins.module_utils.tower_api').TowerAPIModule
cli_data = {'ANSIBLE_MODULE_ARGS': {}}
testargs = ['module_file2.py', json.dumps(cli_data)]
# Compare 1.0.0 to 1.2.3 (major matches)
with mock.patch.object(sys, 'argv', testargs):
with mock.patch('ansible.module_utils.urls.Request.open', new=mock_awx_ping_response):
my_module = TowerAPIModule(argument_spec=dict())
my_module._COLLECTION_VERSION = "1.0.0"
my_module._COLLECTION_TYPE = "not-junk"
my_module.collection_to_version['not-junk'] = 'not-junk'
my_module._COLLECTION_TYPE = "awx"
my_module.get_endpoint('ping')
silence_warning.assert_called_once_with('You are running collection version 1.0.0 but connecting to tower version 1.2.3')
silence_warning.assert_not_called()
# Compare 1.2.0 to 1.2.3 (major matches minor does not count)
with mock.patch.object(sys, 'argv', testargs):
with mock.patch('ansible.module_utils.urls.Request.open', new=mock_awx_ping_response):
my_module = TowerAPIModule(argument_spec=dict())
my_module._COLLECTION_VERSION = "1.2.0"
my_module._COLLECTION_TYPE = "awx"
my_module.get_endpoint('ping')
silence_warning.assert_not_called()
def test_version_warning_strictness_tower(collection_import, silence_warning):
TowerAPIModule = collection_import('plugins.module_utils.tower_api').TowerAPIModule
cli_data = {'ANSIBLE_MODULE_ARGS': {}}
testargs = ['module_file2.py', json.dumps(cli_data)]
# Compare 1.2.0 to 1.2.3 (major/minor matches)
with mock.patch.object(sys, 'argv', testargs):
with mock.patch('ansible.module_utils.urls.Request.open', new=mock_tower_ping_response):
my_module = TowerAPIModule(argument_spec=dict())
my_module._COLLECTION_VERSION = "1.2.0"
my_module._COLLECTION_TYPE = "tower"
my_module.get_endpoint('ping')
silence_warning.assert_not_called()
# Compare 1.0.0 to 1.2.3 (major/minor fail to match)
with mock.patch.object(sys, 'argv', testargs):
with mock.patch('ansible.module_utils.urls.Request.open', new=mock_tower_ping_response):
my_module = TowerAPIModule(argument_spec=dict())
my_module._COLLECTION_VERSION = "1.0.0"
my_module._COLLECTION_TYPE = "tower"
my_module.get_endpoint('ping')
silence_warning.assert_called_once_with(
'You are running collection version {0} but connecting to {1} version {2}'.format(my_module._COLLECTION_VERSION, tower_name, ping_version)
)
def test_type_warning(collection_import, silence_warning):
@ -50,13 +116,14 @@ def test_type_warning(collection_import, silence_warning):
cli_data = {'ANSIBLE_MODULE_ARGS': {}}
testargs = ['module_file2.py', json.dumps(cli_data)]
with mock.patch.object(sys, 'argv', testargs):
with mock.patch('ansible.module_utils.urls.Request.open', new=mock_ping_response):
with mock.patch('ansible.module_utils.urls.Request.open', new=mock_awx_ping_response):
my_module = TowerAPIModule(argument_spec={})
my_module._COLLECTION_VERSION = "1.2.3"
my_module._COLLECTION_TYPE = "junk"
my_module.collection_to_version['junk'] = 'junk'
my_module._COLLECTION_VERSION = ping_version
my_module._COLLECTION_TYPE = "tower"
my_module.get_endpoint('ping')
silence_warning.assert_called_once_with('You are using the junk version of this collection but connecting to not-junk')
silence_warning.assert_called_once_with(
'You are using the {0} version of this collection but connecting to {1}'.format(my_module._COLLECTION_TYPE, awx_name)
)
def test_duplicate_config(collection_import, silence_warning):

View File

@ -1,25 +1,9 @@
---
- name: get tower host variable
shell: tower-cli config host | cut -d ' ' -f2
register: host
- name: get tower username variable
shell: tower-cli config username | cut -d ' ' -f2
register: username
- name: get tower password variable
shell: tower-cli config password | cut -d ' ' -f2
register: password
- name: Fetch project_base_dir
uri:
url: "{{ host.stdout }}/api/v2/config/"
user: "{{ username.stdout }}"
password: "{{ password.stdout }}"
validate_certs: false
return_content: true
force_basic_auth: true
register: awx_config
- name: Load the UI settings
set_fact:
project_base_dir: "{{ tower_settings.project_base_dir }}"
vars:
tower_settings: "{{ lookup('awx.awx.tower_api', 'config/') }}"
- tower_inventory:
name: localhost
@ -43,16 +27,29 @@
-----END EC PRIVATE KEY-----
organization: Default
- name: Disable process isolation
command: tower-cli setting modify AWX_PROOT_ENABLED false
- block:
- name: Add a path to a setting
tower_settings:
name: AWX_ISOLATION_SHOW_PATHS
value: "[{{ project_base_dir }}]"
- name: Create a directory for manual project
vars:
project_base_dir: "{{ awx_config.json.project_base_dir }}"
command: tower-cli ad_hoc launch --wait --inventory localhost
--credential dummy --module-name command
--module-args "mkdir -p {{ project_base_dir }}/{{ project_dir_name }}"
tower_ad_hoc_command:
credential: dummy
inventory: localhost
job_type: run
module_args: "mkdir -p {{ project_base_dir }}/{{ project_dir_name }}"
module_name: command
wait: true
always:
- name: enable process isolation
command: tower-cli setting modify AWX_PROOT_ENABLED true
- name: Delete path from setting
tower_settings:
name: AWX_ISOLATION_SHOW_PATHS
value: []
- name: Delete dummy credential
tower_credential:
name: dummy
kind: ssh
state: absent

View File

@ -53,6 +53,7 @@
- assert:
that:
- result is successful
- result is not changed
- name: Delete the test project 1
tower_project:

View File

@ -127,7 +127,7 @@ py.test awx_collection/test/awx/
## Running Integration Tests
The integration tests require a virtualenv with `ansible` >= 2.9 and `tower_cli`.
The integration tests require a virtualenv with `ansible` >= 2.9 and `awxkit`.
The collection must first be installed, which can be done using `make install_collection`.
You also need a configuration file, as described in the running section.

View File

@ -139,7 +139,7 @@ class UnifiedJob(HasStatus, base.Base):
"""
self.get()
job_args = self.job_args
expected_prefix = '/tmp/awx_{}'.format(self.id)
expected_prefix = '/tmp/pdd_wrapper_{}'.format(self.id)
for arg1, arg2 in zip(job_args[:-1], job_args[1:]):
if arg1 == '-v':
if ':' in arg2:

View File

@ -66,7 +66,7 @@ In the root of awx-operator:
```
$ ansible-playbook ansible/instantiate-awx-deployment.yml \
-e development_mode=yes \
-e tower_image=gcr.io/ansible-tower-engineering/awx_kube_devel:devel \
-e tower_image=quay.io/awx/awx_kube_devel:devel \
-e tower_image_pull_policy=Always \
-e tower_ingress_type=ingress
```
@ -81,7 +81,7 @@ In the root of the AWX repo:
```
$ make awx-kube-dev-build
$ docker push gcr.io/ansible-tower-engineering/awx_kube_devel:${COMPOSE_TAG}
$ docker push quay.io/awx/awx_kube_devel:${COMPOSE_TAG}
```
In the root of awx-operator:
@ -89,7 +89,7 @@ In the root of awx-operator:
```
$ ansible-playbook ansible/instantiate-awx-deployment.yml \
-e development_mode=yes \
-e tower_image=gcr.io/ansible-tower-engineering/awx_kube_devel:${COMPOSE_TAG} \
-e tower_image=quay.io/awx/awx_kube_devel:${COMPOSE_TAG} \
-e tower_image_pull_policy=Always \
-e tower_ingress_type=ingress
```

View File

@ -1,5 +1,5 @@
aiohttp
ansible-runner>=1.4.7
ansible-runner==2.0.0a1
ansiconv==1.0.0 # UPGRADE BLOCKER: from 2013, consider replacing instead of upgrading
asciichartpy
autobahn>=20.12.3 # CVE-2020-35678

View File

@ -4,10 +4,9 @@ aiohttp==3.6.2
# via -r /awx_devel/requirements/requirements.in
aioredis==1.3.1
# via channels-redis
#ansible-runner==1.4.7
ansible-runner==2.0.0a1
# via
# -r /awx_devel/requirements/requirements.in
# -r /awx_devel/requirements/requirements_git.txt
ansiconv==1.0.0
# via -r /awx_devel/requirements/requirements.in
asciichartpy==1.5.25

View File

@ -1,3 +1,2 @@
git+https://github.com/ansible/system-certifi.git@devel#egg=certifi
git+git://github.com/ansible/ansible-runner@devel#egg=ansible-runner
git+https://github.com/project-receptor/receptor.git@0.9.6#egg=receptorctl&subdirectory=receptorctl

View File

@ -1,5 +1,5 @@
---
version: '2'
version: '2.1'
services:
{% for i in range(cluster_node_count|int) %}
{% set container_postfix = loop.index %}
@ -87,8 +87,11 @@ services:
- "awx_db:/var/lib/postgresql/data"
volumes:
awx_db:
name: tools_awx_db
{% for i in range(cluster_node_count|int) -%}
{% set container_postfix = loop.index %}
receptor_{{ container_postfix }}:
name: tools_receptor_{{ container_postfix }}
redis_socket_{{ container_postfix }}:
name: tools_redis_socket_{{ container_postfix }}
{% endfor -%}