Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dd9160135d | ||
|
|
ad96a92fa7 | ||
|
|
ca8085fe7e | ||
|
|
b076cb00a9 | ||
|
|
ee9eac15dc | ||
|
|
3f2f7b75a6 | ||
|
|
b71645f3b1 | ||
|
|
eb300252b8 | ||
|
|
2e2cd7f2de | ||
|
|
727278aaa3 | ||
|
|
81825ab755 | ||
|
|
7f2a1b6b03 | ||
|
|
1b56d94d30 | ||
|
|
e1e32c971c | ||
|
|
a4a2fabc01 | ||
|
|
b7b7bfa520 | ||
|
|
887604317e | ||
|
|
d35d8b6ed7 | ||
|
|
ec28eff7f7 | ||
|
|
a5d17539c6 | ||
|
|
a49d894cf1 | ||
|
|
b3466d4449 |
4
.github/workflows/ci.yml
vendored
@@ -127,10 +127,6 @@ jobs:
|
|||||||
|
|
||||||
- name: Run sanity tests
|
- name: Run sanity tests
|
||||||
run: make test_collection_sanity
|
run: make test_collection_sanity
|
||||||
env:
|
|
||||||
# needed due to cgroupsv2. This is fixed, but a stable release
|
|
||||||
# with the fix has not been made yet.
|
|
||||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
|
||||||
|
|
||||||
collection-integration:
|
collection-integration:
|
||||||
name: awx_collection integration
|
name: awx_collection integration
|
||||||
|
|||||||
1
.github/workflows/devel_images.yml
vendored
@@ -3,6 +3,7 @@ name: Build/Push Development Images
|
|||||||
env:
|
env:
|
||||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||||
on:
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- devel
|
- devel
|
||||||
|
|||||||
113
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
{
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "run_ws_heartbeat",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "manage.py",
|
||||||
|
"args": ["run_ws_heartbeat"],
|
||||||
|
"django": true,
|
||||||
|
"preLaunchTask": "stop awx-ws-heartbeat",
|
||||||
|
"postDebugTask": "start awx-ws-heartbeat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "run_cache_clear",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "manage.py",
|
||||||
|
"args": ["run_cache_clear"],
|
||||||
|
"django": true,
|
||||||
|
"preLaunchTask": "stop awx-cache-clear",
|
||||||
|
"postDebugTask": "start awx-cache-clear"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "run_callback_receiver",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "manage.py",
|
||||||
|
"args": ["run_callback_receiver"],
|
||||||
|
"django": true,
|
||||||
|
"preLaunchTask": "stop awx-receiver",
|
||||||
|
"postDebugTask": "start awx-receiver"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "run_dispatcher",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "manage.py",
|
||||||
|
"args": ["run_dispatcher"],
|
||||||
|
"django": true,
|
||||||
|
"preLaunchTask": "stop awx-dispatcher",
|
||||||
|
"postDebugTask": "start awx-dispatcher"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "run_rsyslog_configurer",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "manage.py",
|
||||||
|
"args": ["run_rsyslog_configurer"],
|
||||||
|
"django": true,
|
||||||
|
"preLaunchTask": "stop awx-rsyslog-configurer",
|
||||||
|
"postDebugTask": "start awx-rsyslog-configurer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "run_cache_clear",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "manage.py",
|
||||||
|
"args": ["run_cache_clear"],
|
||||||
|
"django": true,
|
||||||
|
"preLaunchTask": "stop awx-cache-clear",
|
||||||
|
"postDebugTask": "start awx-cache-clear"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "run_wsrelay",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "manage.py",
|
||||||
|
"args": ["run_wsrelay"],
|
||||||
|
"django": true,
|
||||||
|
"preLaunchTask": "stop awx-wsrelay",
|
||||||
|
"postDebugTask": "start awx-wsrelay"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "daphne",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "/var/lib/awx/venv/awx/bin/daphne",
|
||||||
|
"args": ["-b", "127.0.0.1", "-p", "8051", "awx.asgi:channel_layer"],
|
||||||
|
"django": true,
|
||||||
|
"preLaunchTask": "stop awx-daphne",
|
||||||
|
"postDebugTask": "start awx-daphne"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "runserver(uwsgi alternative)",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "manage.py",
|
||||||
|
"args": ["runserver", "127.0.0.1:8052"],
|
||||||
|
"django": true,
|
||||||
|
"preLaunchTask": "stop awx-uwsgi",
|
||||||
|
"postDebugTask": "start awx-uwsgi"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "runserver_plus(uwsgi alternative)",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "manage.py",
|
||||||
|
"args": ["runserver_plus", "127.0.0.1:8052"],
|
||||||
|
"django": true,
|
||||||
|
"preLaunchTask": "stop awx-uwsgi and install Werkzeug",
|
||||||
|
"postDebugTask": "start awx-uwsgi"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "shell_plus",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "manage.py",
|
||||||
|
"args": ["shell_plus"],
|
||||||
|
"django": true,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}
|
||||||
100
.vscode/tasks.json
vendored
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
{
|
||||||
|
"version": "2.0.0",
|
||||||
|
"tasks": [
|
||||||
|
{
|
||||||
|
"label": "start awx-cache-clear",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl start tower-processes:awx-cache-clear"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "stop awx-cache-clear",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl stop tower-processes:awx-cache-clear"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "start awx-daphne",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl start tower-processes:awx-daphne"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "stop awx-daphne",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl stop tower-processes:awx-daphne"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "start awx-dispatcher",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl start tower-processes:awx-dispatcher"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "stop awx-dispatcher",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl stop tower-processes:awx-dispatcher"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "start awx-receiver",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl start tower-processes:awx-receiver"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "stop awx-receiver",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl stop tower-processes:awx-receiver"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "start awx-rsyslog-configurer",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl start tower-processes:awx-rsyslog-configurer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "stop awx-rsyslog-configurer",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl stop tower-processes:awx-rsyslog-configurer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "start awx-rsyslogd",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl start tower-processes:awx-rsyslogd"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "stop awx-rsyslogd",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl stop tower-processes:awx-rsyslogd"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "start awx-uwsgi",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl start tower-processes:awx-uwsgi"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "stop awx-uwsgi",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl stop tower-processes:awx-uwsgi"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "stop awx-uwsgi and install Werkzeug",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "pip install Werkzeug; supervisorctl stop tower-processes:awx-uwsgi"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "start awx-ws-heartbeat",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl start tower-processes:awx-ws-heartbeat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "stop awx-ws-heartbeat",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl stop tower-processes:awx-ws-heartbeat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "start awx-wsrelay",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl start tower-processes:awx-wsrelay"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "stop awx-wsrelay",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "supervisorctl stop tower-processes:awx-wsrelay"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
9
Makefile
@@ -216,8 +216,6 @@ collectstatic:
|
|||||||
fi; \
|
fi; \
|
||||||
$(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
$(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
||||||
|
|
||||||
DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:*
|
|
||||||
|
|
||||||
uwsgi: collectstatic
|
uwsgi: collectstatic
|
||||||
@if [ "$(VENV_BASE)" ]; then \
|
@if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
@@ -225,7 +223,7 @@ uwsgi: collectstatic
|
|||||||
uwsgi /etc/tower/uwsgi.ini
|
uwsgi /etc/tower/uwsgi.ini
|
||||||
|
|
||||||
awx-autoreload:
|
awx-autoreload:
|
||||||
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)"
|
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx
|
||||||
|
|
||||||
daphne:
|
daphne:
|
||||||
@if [ "$(VENV_BASE)" ]; then \
|
@if [ "$(VENV_BASE)" ]; then \
|
||||||
@@ -305,7 +303,7 @@ swagger: reports
|
|||||||
@if [ "$(VENV_BASE)" ]; then \
|
@if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
fi; \
|
fi; \
|
||||||
(set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs --release=$(VERSION_TARGET) | tee reports/$@.report)
|
(set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report)
|
||||||
|
|
||||||
check: black
|
check: black
|
||||||
|
|
||||||
@@ -631,9 +629,6 @@ clean-elk:
|
|||||||
docker rm tools_elasticsearch_1
|
docker rm tools_elasticsearch_1
|
||||||
docker rm tools_kibana_1
|
docker rm tools_kibana_1
|
||||||
|
|
||||||
psql-container:
|
|
||||||
docker run -it --net tools_default --rm postgres:12 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
|
|
||||||
|
|
||||||
VERSION:
|
VERSION:
|
||||||
@echo "awx: $(VERSION)"
|
@echo "awx: $(VERSION)"
|
||||||
|
|
||||||
|
|||||||
@@ -154,10 +154,12 @@ def manage():
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.core.management import execute_from_command_line
|
from django.core.management import execute_from_command_line
|
||||||
|
|
||||||
# enforce the postgres version is equal to 12. if not, then terminate program with exit code of 1
|
# enforce the postgres version is a minimum of 12 (we need this for partitioning); if not, then terminate program with exit code of 1
|
||||||
|
# In the future if we require a feature of a version of postgres > 12 this should be updated to reflect that.
|
||||||
|
# The return of connection.pg_version is something like 12013
|
||||||
if not os.getenv('SKIP_PG_VERSION_CHECK', False) and not MODE == 'development':
|
if not os.getenv('SKIP_PG_VERSION_CHECK', False) and not MODE == 'development':
|
||||||
if (connection.pg_version // 10000) < 12:
|
if (connection.pg_version // 10000) < 12:
|
||||||
sys.stderr.write("Postgres version 12 is required\n")
|
sys.stderr.write("At a minimum, postgres version 12 is required\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover
|
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
# Python
|
# Python
|
||||||
import contextlib
|
import contextlib
|
||||||
import logging
|
import logging
|
||||||
|
import psycopg
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import os
|
import os
|
||||||
@@ -13,7 +14,7 @@ from django.conf import settings, UserSettingsHolder
|
|||||||
from django.core.cache import cache as django_cache
|
from django.core.cache import cache as django_cache
|
||||||
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
|
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
|
||||||
from django.db import transaction, connection
|
from django.db import transaction, connection
|
||||||
from django.db.utils import Error as DBError, ProgrammingError
|
from django.db.utils import DatabaseError, ProgrammingError
|
||||||
from django.utils.functional import cached_property
|
from django.utils.functional import cached_property
|
||||||
|
|
||||||
# Django REST Framework
|
# Django REST Framework
|
||||||
@@ -80,18 +81,26 @@ def _ctit_db_wrapper(trans_safe=False):
|
|||||||
logger.debug('Obtaining database settings in spite of broken transaction.')
|
logger.debug('Obtaining database settings in spite of broken transaction.')
|
||||||
transaction.set_rollback(False)
|
transaction.set_rollback(False)
|
||||||
yield
|
yield
|
||||||
except DBError as exc:
|
except ProgrammingError as e:
|
||||||
|
# Exception raised for programming errors
|
||||||
|
# Examples may be table not found or already exists,
|
||||||
|
# this generally means we can't fetch Tower configuration
|
||||||
|
# because the database hasn't actually finished migrating yet;
|
||||||
|
# this is usually a sign that a service in a container (such as ws_broadcast)
|
||||||
|
# has come up *before* the database has finished migrating, and
|
||||||
|
# especially that the conf.settings table doesn't exist yet
|
||||||
|
# syntax error in the SQL statement, wrong number of parameters specified, etc.
|
||||||
if trans_safe:
|
if trans_safe:
|
||||||
level = logger.warning
|
logger.debug(f'Database settings are not available, using defaults. error: {str(e)}')
|
||||||
if isinstance(exc, ProgrammingError):
|
else:
|
||||||
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
logger.exception('Error modifying something related to database settings.')
|
||||||
# this generally means we can't fetch Tower configuration
|
except DatabaseError as e:
|
||||||
# because the database hasn't actually finished migrating yet;
|
if trans_safe:
|
||||||
# this is usually a sign that a service in a container (such as ws_broadcast)
|
cause = e.__cause__
|
||||||
# has come up *before* the database has finished migrating, and
|
if cause and hasattr(cause, 'sqlstate'):
|
||||||
# especially that the conf.settings table doesn't exist yet
|
sqlstate = cause.sqlstate
|
||||||
level = logger.debug
|
sqlstate_str = psycopg.errors.lookup(sqlstate)
|
||||||
level(f'Database settings are not available, using defaults. error: {str(exc)}')
|
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
|
||||||
else:
|
else:
|
||||||
logger.exception('Error modifying something related to database settings.')
|
logger.exception('Error modifying something related to database settings.')
|
||||||
finally:
|
finally:
|
||||||
|
|||||||
@@ -419,7 +419,7 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
|
|||||||
resolved_action,
|
resolved_action,
|
||||||
resolved_role,
|
resolved_role,
|
||||||
-- '-' operator listed here:
|
-- '-' operator listed here:
|
||||||
-- https://www.postgresql.org/docs/12/functions-json.html
|
-- https://www.postgresql.org/docs/15/functions-json.html
|
||||||
-- note that operator is only supported by jsonb objects
|
-- note that operator is only supported by jsonb objects
|
||||||
-- https://www.postgresql.org/docs/current/datatype-json.html
|
-- https://www.postgresql.org/docs/current/datatype-json.html
|
||||||
(CASE WHEN event = 'playbook_on_stats' THEN {event_data} - 'artifact_data' END) as playbook_on_stats,
|
(CASE WHEN event = 'playbook_on_stats' THEN {event_data} - 'artifact_data' END) as playbook_on_stats,
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ __all__ = [
|
|||||||
'STANDARD_INVENTORY_UPDATE_ENV',
|
'STANDARD_INVENTORY_UPDATE_ENV',
|
||||||
]
|
]
|
||||||
|
|
||||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights')
|
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights', 'terraform')
|
||||||
PRIVILEGE_ESCALATION_METHODS = [
|
PRIVILEGE_ESCALATION_METHODS = [
|
||||||
('sudo', _('Sudo')),
|
('sudo', _('Sudo')),
|
||||||
('su', _('Su')),
|
('su', _('Su')),
|
||||||
|
|||||||
@@ -0,0 +1,59 @@
|
|||||||
|
# Generated by Django 4.2.6 on 2024-02-15 20:51
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0189_inbound_hop_nodes'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='inventorysource',
|
||||||
|
name='source',
|
||||||
|
field=models.CharField(
|
||||||
|
choices=[
|
||||||
|
('file', 'File, Directory or Script'),
|
||||||
|
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||||
|
('scm', 'Sourced from a Project'),
|
||||||
|
('ec2', 'Amazon EC2'),
|
||||||
|
('gce', 'Google Compute Engine'),
|
||||||
|
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||||
|
('vmware', 'VMware vCenter'),
|
||||||
|
('satellite6', 'Red Hat Satellite 6'),
|
||||||
|
('openstack', 'OpenStack'),
|
||||||
|
('rhv', 'Red Hat Virtualization'),
|
||||||
|
('controller', 'Red Hat Ansible Automation Platform'),
|
||||||
|
('insights', 'Red Hat Insights'),
|
||||||
|
('terraform', 'Terraform State'),
|
||||||
|
],
|
||||||
|
default=None,
|
||||||
|
max_length=32,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='inventoryupdate',
|
||||||
|
name='source',
|
||||||
|
field=models.CharField(
|
||||||
|
choices=[
|
||||||
|
('file', 'File, Directory or Script'),
|
||||||
|
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||||
|
('scm', 'Sourced from a Project'),
|
||||||
|
('ec2', 'Amazon EC2'),
|
||||||
|
('gce', 'Google Compute Engine'),
|
||||||
|
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||||
|
('vmware', 'VMware vCenter'),
|
||||||
|
('satellite6', 'Red Hat Satellite 6'),
|
||||||
|
('openstack', 'OpenStack'),
|
||||||
|
('rhv', 'Red Hat Virtualization'),
|
||||||
|
('controller', 'Red Hat Ansible Automation Platform'),
|
||||||
|
('insights', 'Red Hat Insights'),
|
||||||
|
('terraform', 'Terraform State'),
|
||||||
|
],
|
||||||
|
default=None,
|
||||||
|
max_length=32,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
@@ -925,6 +925,7 @@ class InventorySourceOptions(BaseModel):
|
|||||||
('rhv', _('Red Hat Virtualization')),
|
('rhv', _('Red Hat Virtualization')),
|
||||||
('controller', _('Red Hat Ansible Automation Platform')),
|
('controller', _('Red Hat Ansible Automation Platform')),
|
||||||
('insights', _('Red Hat Insights')),
|
('insights', _('Red Hat Insights')),
|
||||||
|
('terraform', _('Terraform State')),
|
||||||
]
|
]
|
||||||
|
|
||||||
# From the options of the Django management base command
|
# From the options of the Django management base command
|
||||||
@@ -1630,6 +1631,20 @@ class satellite6(PluginFileInjector):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
class terraform(PluginFileInjector):
|
||||||
|
plugin_name = 'terraform_state'
|
||||||
|
base_injector = 'managed'
|
||||||
|
namespace = 'cloud'
|
||||||
|
collection = 'terraform'
|
||||||
|
use_fqcn = True
|
||||||
|
|
||||||
|
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||||
|
env = super(terraform, self).get_plugin_env(inventory_update, private_data_dir, None)
|
||||||
|
ret = super().inventory_as_dict(inventory_update, private_data_dir)
|
||||||
|
ret['backend_config_files'] = env["TF_BACKEND_CONFIG_FILE"]
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
class controller(PluginFileInjector):
|
class controller(PluginFileInjector):
|
||||||
plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection
|
plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection
|
||||||
base_injector = 'template'
|
base_injector = 'template'
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import itertools
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import psycopg
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
from contextlib import redirect_stdout
|
from contextlib import redirect_stdout
|
||||||
import shutil
|
import shutil
|
||||||
@@ -416,7 +417,7 @@ def handle_removed_image(remove_images=None):
|
|||||||
|
|
||||||
@task(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def cleanup_images_and_files():
|
def cleanup_images_and_files():
|
||||||
_cleanup_images_and_files()
|
_cleanup_images_and_files(image_prune=True)
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
@@ -630,10 +631,18 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
|||||||
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))
|
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))
|
||||||
|
|
||||||
except DatabaseError as e:
|
except DatabaseError as e:
|
||||||
if 'did not affect any rows' in str(e):
|
cause = e.__cause__
|
||||||
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
|
if cause and hasattr(cause, 'sqlstate'):
|
||||||
|
sqlstate = cause.sqlstate
|
||||||
|
sqlstate_str = psycopg.errors.lookup(sqlstate)
|
||||||
|
logger.debug('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
|
||||||
|
|
||||||
|
if sqlstate == psycopg.errors.NoData:
|
||||||
|
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
|
||||||
|
else:
|
||||||
|
logger.exception("Error marking {} as lost.".format(other_inst.hostname))
|
||||||
else:
|
else:
|
||||||
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
|
logger.exception('No SQL state available. Error marking {} as lost'.format(other_inst.hostname))
|
||||||
|
|
||||||
# Run local reaper
|
# Run local reaper
|
||||||
if worker_tasks is not None:
|
if worker_tasks is not None:
|
||||||
@@ -788,10 +797,19 @@ def update_inventory_computed_fields(inventory_id):
|
|||||||
try:
|
try:
|
||||||
i.update_computed_fields()
|
i.update_computed_fields()
|
||||||
except DatabaseError as e:
|
except DatabaseError as e:
|
||||||
if 'did not affect any rows' in str(e):
|
# https://github.com/django/django/blob/eff21d8e7a1cb297aedf1c702668b590a1b618f3/django/db/models/base.py#L1105
|
||||||
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
|
# django raises DatabaseError("Forced update did not affect any rows.")
|
||||||
return
|
|
||||||
raise
|
# if sqlstate is set then there was a database error and otherwise will re-raise that error
|
||||||
|
cause = e.__cause__
|
||||||
|
if cause and hasattr(cause, 'sqlstate'):
|
||||||
|
sqlstate = cause.sqlstate
|
||||||
|
sqlstate_str = psycopg.errors.lookup(sqlstate)
|
||||||
|
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
|
||||||
|
raise
|
||||||
|
|
||||||
|
# otherwise
|
||||||
|
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
|
||||||
|
|
||||||
|
|
||||||
def update_smart_memberships_for_inventory(smart_inventory):
|
def update_smart_memberships_for_inventory(smart_inventory):
|
||||||
|
|||||||
3
awx/main/tests/data/inventory/plugins/terraform/env.json
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
"TF_BACKEND_CONFIG_FILE": "{{ file_reference }}"
|
||||||
|
}
|
||||||
@@ -1,13 +1,8 @@
|
|||||||
from awx.main.tests.functional.conftest import * # noqa
|
from awx.main.tests.functional.conftest import * # noqa
|
||||||
|
import os
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
def pytest_addoption(parser):
|
@pytest.fixture()
|
||||||
parser.addoption("--release", action="store", help="a release version number, e.g., 3.3.0")
|
def release():
|
||||||
|
return os.environ.get('VERSION_TARGET', '')
|
||||||
|
|
||||||
def pytest_generate_tests(metafunc):
|
|
||||||
# This is called for every test. Only get/set command line arguments
|
|
||||||
# if the argument is specified in the list of test "fixturenames".
|
|
||||||
option_value = metafunc.config.option.release
|
|
||||||
if 'release' in metafunc.fixturenames and option_value is not None:
|
|
||||||
metafunc.parametrize("release", [option_value])
|
|
||||||
|
|||||||
@@ -3,15 +3,19 @@ import pytest
|
|||||||
from unittest import mock
|
from unittest import mock
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
from unittest.mock import PropertyMock
|
from unittest.mock import PropertyMock
|
||||||
|
import importlib
|
||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.urls import resolve
|
from django.urls import resolve
|
||||||
from django.http import Http404
|
from django.http import Http404
|
||||||
|
from django.apps import apps
|
||||||
from django.core.handlers.exception import response_for_exception
|
from django.core.handlers.exception import response_for_exception
|
||||||
from django.contrib.auth.models import User
|
from django.contrib.auth.models import User
|
||||||
from django.core.serializers.json import DjangoJSONEncoder
|
from django.core.serializers.json import DjangoJSONEncoder
|
||||||
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
|
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
|
||||||
|
|
||||||
|
from django.db.models.signals import post_migrate
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.models.projects import Project
|
from awx.main.models.projects import Project
|
||||||
from awx.main.models.ha import Instance
|
from awx.main.models.ha import Instance
|
||||||
@@ -41,10 +45,19 @@ from awx.main.models.workflow import WorkflowJobTemplate
|
|||||||
from awx.main.models.ad_hoc_commands import AdHocCommand
|
from awx.main.models.ad_hoc_commands import AdHocCommand
|
||||||
from awx.main.models.oauth import OAuth2Application as Application
|
from awx.main.models.oauth import OAuth2Application as Application
|
||||||
from awx.main.models.execution_environments import ExecutionEnvironment
|
from awx.main.models.execution_environments import ExecutionEnvironment
|
||||||
|
from awx.main.utils import is_testing
|
||||||
|
|
||||||
__SWAGGER_REQUESTS__ = {}
|
__SWAGGER_REQUESTS__ = {}
|
||||||
|
|
||||||
|
|
||||||
|
# HACK: the dab_resource_registry app required ServiceID in migrations which checks do not run
|
||||||
|
dab_rr_initial = importlib.import_module('ansible_base.resource_registry.migrations.0001_initial')
|
||||||
|
|
||||||
|
|
||||||
|
if is_testing():
|
||||||
|
post_migrate.connect(lambda **kwargs: dab_rr_initial.create_service_id(apps, None))
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def swagger_autogen(requests=__SWAGGER_REQUESTS__):
|
def swagger_autogen(requests=__SWAGGER_REQUESTS__):
|
||||||
return requests
|
return requests
|
||||||
|
|||||||
@@ -193,6 +193,7 @@ class TestInventorySourceInjectors:
|
|||||||
('satellite6', 'theforeman.foreman.foreman'),
|
('satellite6', 'theforeman.foreman.foreman'),
|
||||||
('insights', 'redhatinsights.insights.insights'),
|
('insights', 'redhatinsights.insights.insights'),
|
||||||
('controller', 'awx.awx.tower'),
|
('controller', 'awx.awx.tower'),
|
||||||
|
('terraform', 'cloud.terraform.terraform_state'),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_plugin_proper_names(self, source, proper_name):
|
def test_plugin_proper_names(self, source, proper_name):
|
||||||
|
|||||||
@@ -107,6 +107,7 @@ def read_content(private_data_dir, raw_env, inventory_update):
|
|||||||
for filename in os.listdir(os.path.join(private_data_dir, subdir)):
|
for filename in os.listdir(os.path.join(private_data_dir, subdir)):
|
||||||
filename_list.append(os.path.join(subdir, filename))
|
filename_list.append(os.path.join(subdir, filename))
|
||||||
filename_list = sorted(filename_list, key=lambda fn: inverse_env.get(os.path.join(private_data_dir, fn), [fn])[0])
|
filename_list = sorted(filename_list, key=lambda fn: inverse_env.get(os.path.join(private_data_dir, fn), [fn])[0])
|
||||||
|
inventory_content = ""
|
||||||
for filename in filename_list:
|
for filename in filename_list:
|
||||||
if filename in ('args', 'project'):
|
if filename in ('args', 'project'):
|
||||||
continue # Ansible runner
|
continue # Ansible runner
|
||||||
@@ -130,6 +131,7 @@ def read_content(private_data_dir, raw_env, inventory_update):
|
|||||||
dir_contents[abs_file_path] = f.read()
|
dir_contents[abs_file_path] = f.read()
|
||||||
# Declare a reference to inventory plugin file if it exists
|
# Declare a reference to inventory plugin file if it exists
|
||||||
if abs_file_path.endswith('.yml') and 'plugin: ' in dir_contents[abs_file_path]:
|
if abs_file_path.endswith('.yml') and 'plugin: ' in dir_contents[abs_file_path]:
|
||||||
|
inventory_content = dir_contents[abs_file_path]
|
||||||
referenced_paths.add(abs_file_path) # used as inventory file
|
referenced_paths.add(abs_file_path) # used as inventory file
|
||||||
elif cache_file_regex.match(abs_file_path):
|
elif cache_file_regex.match(abs_file_path):
|
||||||
file_aliases[abs_file_path] = 'cache_file'
|
file_aliases[abs_file_path] = 'cache_file'
|
||||||
@@ -157,7 +159,11 @@ def read_content(private_data_dir, raw_env, inventory_update):
|
|||||||
content = {}
|
content = {}
|
||||||
for abs_file_path, file_content in dir_contents.items():
|
for abs_file_path, file_content in dir_contents.items():
|
||||||
# assert that all files laid down are used
|
# assert that all files laid down are used
|
||||||
if abs_file_path not in referenced_paths and abs_file_path not in ignore_files:
|
if (
|
||||||
|
abs_file_path not in referenced_paths
|
||||||
|
and to_container_path(abs_file_path, private_data_dir) not in inventory_content
|
||||||
|
and abs_file_path not in ignore_files
|
||||||
|
):
|
||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
"File {} is not referenced. References and files:\n{}\n{}".format(abs_file_path, json.dumps(env, indent=4), json.dumps(dir_contents, indent=4))
|
"File {} is not referenced. References and files:\n{}\n{}".format(abs_file_path, json.dumps(env, indent=4), json.dumps(dir_contents, indent=4))
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -411,14 +411,14 @@ def test_project_delete(delete, organization, admin_user):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
'order_by, expected_names, expected_ids',
|
'order_by, expected_names',
|
||||||
[
|
[
|
||||||
('name', ['alice project', 'bob project', 'shared project'], [1, 2, 3]),
|
('name', ['alice project', 'bob project', 'shared project']),
|
||||||
('-name', ['shared project', 'bob project', 'alice project'], [3, 2, 1]),
|
('-name', ['shared project', 'bob project', 'alice project']),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_project_list_ordering_by_name(get, order_by, expected_names, expected_ids, organization_factory):
|
def test_project_list_ordering_by_name(get, order_by, expected_names, organization_factory):
|
||||||
'ensure sorted order of project list is maintained correctly when the requested order is invalid or not applicable'
|
'ensure sorted order of project list is maintained correctly when the requested order is invalid or not applicable'
|
||||||
objects = organization_factory(
|
objects = organization_factory(
|
||||||
'org1',
|
'org1',
|
||||||
@@ -426,13 +426,11 @@ def test_project_list_ordering_by_name(get, order_by, expected_names, expected_i
|
|||||||
superusers=['admin'],
|
superusers=['admin'],
|
||||||
)
|
)
|
||||||
project_names = []
|
project_names = []
|
||||||
project_ids = []
|
|
||||||
# TODO: ask for an order by here that doesn't apply
|
# TODO: ask for an order by here that doesn't apply
|
||||||
results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results']
|
results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results']
|
||||||
for x in range(len(results)):
|
for x in range(len(results)):
|
||||||
project_names.append(results[x]['name'])
|
project_names.append(results[x]['name'])
|
||||||
project_ids.append(results[x]['id'])
|
assert project_names == expected_names
|
||||||
assert project_names == expected_names and project_ids == expected_ids
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('order_by', ('name', '-name'))
|
@pytest.mark.parametrize('order_by', ('name', '-name'))
|
||||||
@@ -450,7 +448,8 @@ def test_project_list_ordering_with_duplicate_names(get, order_by, organization_
|
|||||||
for x in range(3):
|
for x in range(3):
|
||||||
results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results']
|
results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results']
|
||||||
project_ids[x] = [proj['id'] for proj in results]
|
project_ids[x] = [proj['id'] for proj in results]
|
||||||
assert project_ids[0] == project_ids[1] == project_ids[2] == [1, 2, 3, 4, 5]
|
assert project_ids[0] == project_ids[1] == project_ids[2]
|
||||||
|
assert project_ids[0] == sorted(project_ids[0])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
|
|||||||
@@ -1,11 +1,6 @@
|
|||||||
# Python
|
# Python
|
||||||
from unittest import mock
|
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
# patch python-ldap
|
|
||||||
with mock.patch('__main__.__builtins__.dir', return_value=[]):
|
|
||||||
import ldap # NOQA
|
|
||||||
|
|
||||||
# Load development settings for base variables.
|
# Load development settings for base variables.
|
||||||
from awx.settings.development import * # NOQA
|
from awx.settings.development import * # NOQA
|
||||||
|
|
||||||
|
|||||||
64
awx/main/tests/unit/tasks/test_system.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest.mock import MagicMock, patch
|
||||||
|
from awx.main.tasks.system import update_inventory_computed_fields
|
||||||
|
from awx.main.models import Inventory
|
||||||
|
from django.db import DatabaseError
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_logger():
|
||||||
|
with patch("awx.main.tasks.system.logger") as logger:
|
||||||
|
yield logger
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_inventory():
|
||||||
|
return MagicMock(spec=Inventory)
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_inventory_computed_fields_existing_inventory(mock_logger, mock_inventory):
|
||||||
|
# Mocking the Inventory.objects.filter method to return a non-empty queryset
|
||||||
|
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
|
||||||
|
mock_filter.return_value.exists.return_value = True
|
||||||
|
mock_filter.return_value.__getitem__.return_value = mock_inventory
|
||||||
|
|
||||||
|
# Mocking the update_computed_fields method
|
||||||
|
with patch.object(mock_inventory, "update_computed_fields") as mock_update_computed_fields:
|
||||||
|
update_inventory_computed_fields(1)
|
||||||
|
|
||||||
|
# Assertions
|
||||||
|
mock_filter.assert_called_once_with(id=1)
|
||||||
|
mock_update_computed_fields.assert_called_once()
|
||||||
|
|
||||||
|
# You can add more assertions based on your specific requirements
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_inventory_computed_fields_missing_inventory(mock_logger):
|
||||||
|
# Mocking the Inventory.objects.filter method to return an empty queryset
|
||||||
|
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
|
||||||
|
mock_filter.return_value.exists.return_value = False
|
||||||
|
|
||||||
|
update_inventory_computed_fields(1)
|
||||||
|
|
||||||
|
# Assertions
|
||||||
|
mock_filter.assert_called_once_with(id=1)
|
||||||
|
mock_logger.error.assert_called_once_with("Update Inventory Computed Fields failed due to missing inventory: 1")
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_inventory_computed_fields_database_error_nosqlstate(mock_logger, mock_inventory):
|
||||||
|
# Mocking the Inventory.objects.filter method to return a non-empty queryset
|
||||||
|
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
|
||||||
|
mock_filter.return_value.exists.return_value = True
|
||||||
|
mock_filter.return_value.__getitem__.return_value = mock_inventory
|
||||||
|
|
||||||
|
# Mocking the update_computed_fields method
|
||||||
|
with patch.object(mock_inventory, "update_computed_fields") as mock_update_computed_fields:
|
||||||
|
# Simulating the update_computed_fields method to explicitly raise a DatabaseError
|
||||||
|
mock_update_computed_fields.side_effect = DatabaseError("Some error")
|
||||||
|
|
||||||
|
update_inventory_computed_fields(1)
|
||||||
|
|
||||||
|
# Assertions
|
||||||
|
mock_filter.assert_called_once_with(id=1)
|
||||||
|
mock_update_computed_fields.assert_called_once()
|
||||||
|
mock_inventory.update_computed_fields.assert_called_once()
|
||||||
@@ -121,6 +121,10 @@ def test_get_model_for_valid_type(model_type, model_class):
|
|||||||
assert common.get_model_for_type(model_type) == model_class
|
assert common.get_model_for_type(model_type) == model_class
|
||||||
|
|
||||||
|
|
||||||
|
def test_is_testing():
|
||||||
|
assert common.is_testing() is True
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("model_type,model_class", [(name, cls) for cls, name in TEST_MODELS])
|
@pytest.mark.parametrize("model_type,model_class", [(name, cls) for cls, name in TEST_MODELS])
|
||||||
def test_get_capacity_type(model_type, model_class):
|
def test_get_capacity_type(model_type, model_class):
|
||||||
if model_type in ('job', 'ad_hoc_command', 'inventory_update', 'job_template'):
|
if model_type in ('job', 'ad_hoc_command', 'inventory_update', 'job_template'):
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import json
|
|||||||
import yaml
|
import yaml
|
||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
|
import psycopg
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import re
|
import re
|
||||||
@@ -23,7 +24,7 @@ from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
|
|||||||
from django.utils.dateparse import parse_datetime
|
from django.utils.dateparse import parse_datetime
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from django.utils.functional import cached_property
|
from django.utils.functional import cached_property
|
||||||
from django.db import connection, transaction, ProgrammingError, IntegrityError
|
from django.db import connection, DatabaseError, transaction, ProgrammingError, IntegrityError
|
||||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
|
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
|
||||||
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
|
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
|
||||||
from django.db.models.query import QuerySet
|
from django.db.models.query import QuerySet
|
||||||
@@ -136,7 +137,7 @@ def underscore_to_camelcase(s):
|
|||||||
@functools.cache
|
@functools.cache
|
||||||
def is_testing(argv=None):
|
def is_testing(argv=None):
|
||||||
'''Return True if running django or py.test unit tests.'''
|
'''Return True if running django or py.test unit tests.'''
|
||||||
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
|
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'awx.main.tests.settings_for_test':
|
||||||
return True
|
return True
|
||||||
argv = sys.argv if argv is None else argv
|
argv = sys.argv if argv is None else argv
|
||||||
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
|
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
|
||||||
@@ -1155,11 +1156,26 @@ def create_partition(tblname, start=None):
|
|||||||
f'ALTER TABLE {tblname} ATTACH PARTITION {tblname}_{partition_label} '
|
f'ALTER TABLE {tblname} ATTACH PARTITION {tblname}_{partition_label} '
|
||||||
f'FOR VALUES FROM (\'{start_timestamp}\') TO (\'{end_timestamp}\');'
|
f'FOR VALUES FROM (\'{start_timestamp}\') TO (\'{end_timestamp}\');'
|
||||||
)
|
)
|
||||||
|
|
||||||
except (ProgrammingError, IntegrityError) as e:
|
except (ProgrammingError, IntegrityError) as e:
|
||||||
if 'already exists' in str(e):
|
cause = e.__cause__
|
||||||
logger.info(f'Caught known error due to partition creation race: {e}')
|
if cause and hasattr(cause, 'sqlstate'):
|
||||||
else:
|
# 42P07 = DuplicateTable
|
||||||
raise
|
sqlstate = cause.sqlstate
|
||||||
|
sqlstate_str = psycopg.errors.lookup(sqlstate)
|
||||||
|
|
||||||
|
if psycopg.errors.DuplicateTable == sqlstate:
|
||||||
|
logger.info(f'Caught known error due to partition creation race: {e}')
|
||||||
|
else:
|
||||||
|
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
|
||||||
|
raise
|
||||||
|
except DatabaseError as e:
|
||||||
|
cause = e.__cause__
|
||||||
|
if cause and hasattr(cause, 'sqlstate'):
|
||||||
|
sqlstate = cause.sqlstate
|
||||||
|
sqlstate_str = psycopg.errors.lookup(sqlstate)
|
||||||
|
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def cleanup_new_process(func):
|
def cleanup_new_process(func):
|
||||||
|
|||||||
22
awx/resource_api.py
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
from ansible_base.resource_registry.registry import ParentResource, ResourceConfig, ServiceAPIConfig, SharedResource
|
||||||
|
from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType
|
||||||
|
|
||||||
|
from awx.main import models
|
||||||
|
|
||||||
|
|
||||||
|
class APIConfig(ServiceAPIConfig):
|
||||||
|
service_type = "awx"
|
||||||
|
|
||||||
|
|
||||||
|
RESOURCE_LIST = (
|
||||||
|
ResourceConfig(
|
||||||
|
models.Organization,
|
||||||
|
shared_resource=SharedResource(serializer=OrganizationType, is_provider=False),
|
||||||
|
),
|
||||||
|
ResourceConfig(models.User, shared_resource=SharedResource(serializer=UserType, is_provider=False), name_field="username"),
|
||||||
|
ResourceConfig(
|
||||||
|
models.Team,
|
||||||
|
shared_resource=SharedResource(serializer=TeamType, is_provider=False),
|
||||||
|
parent_resources=[ParentResource(model=models.Organization, field_name="organization")],
|
||||||
|
),
|
||||||
|
)
|
||||||
@@ -353,8 +353,11 @@ INSTALLED_APPS = [
|
|||||||
'awx.sso',
|
'awx.sso',
|
||||||
'solo',
|
'solo',
|
||||||
'ansible_base.rest_filters',
|
'ansible_base.rest_filters',
|
||||||
|
'ansible_base.jwt_consumer',
|
||||||
|
'ansible_base.resource_registry',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
INTERNAL_IPS = ('127.0.0.1',)
|
INTERNAL_IPS = ('127.0.0.1',)
|
||||||
|
|
||||||
MAX_PAGE_SIZE = 200
|
MAX_PAGE_SIZE = 200
|
||||||
@@ -362,6 +365,7 @@ REST_FRAMEWORK = {
|
|||||||
'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination',
|
'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination',
|
||||||
'PAGE_SIZE': 25,
|
'PAGE_SIZE': 25,
|
||||||
'DEFAULT_AUTHENTICATION_CLASSES': (
|
'DEFAULT_AUTHENTICATION_CLASSES': (
|
||||||
|
'ansible_base.jwt_consumer.awx.auth.AwxJWTAuthentication',
|
||||||
'awx.api.authentication.LoggedOAuth2Authentication',
|
'awx.api.authentication.LoggedOAuth2Authentication',
|
||||||
'awx.api.authentication.SessionAuthentication',
|
'awx.api.authentication.SessionAuthentication',
|
||||||
'awx.api.authentication.LoggedBasicAuthentication',
|
'awx.api.authentication.LoggedBasicAuthentication',
|
||||||
@@ -755,6 +759,14 @@ SATELLITE6_INSTANCE_ID_VAR = 'foreman_id,foreman.id'
|
|||||||
INSIGHTS_INSTANCE_ID_VAR = 'insights_id'
|
INSIGHTS_INSTANCE_ID_VAR = 'insights_id'
|
||||||
INSIGHTS_EXCLUDE_EMPTY_GROUPS = False
|
INSIGHTS_EXCLUDE_EMPTY_GROUPS = False
|
||||||
|
|
||||||
|
# ----------------
|
||||||
|
# -- Terraform State --
|
||||||
|
# ----------------
|
||||||
|
# TERRAFORM_ENABLED_VAR =
|
||||||
|
# TERRAFORM_ENABLED_VALUE =
|
||||||
|
TERRAFORM_INSTANCE_ID_VAR = 'id'
|
||||||
|
TERRAFORM_EXCLUDE_EMPTY_GROUPS = True
|
||||||
|
|
||||||
# ---------------------
|
# ---------------------
|
||||||
# ----- Custom -----
|
# ----- Custom -----
|
||||||
# ---------------------
|
# ---------------------
|
||||||
@@ -1108,6 +1120,7 @@ METRICS_SUBSYSTEM_CONFIG = {
|
|||||||
# django-ansible-base
|
# django-ansible-base
|
||||||
ANSIBLE_BASE_TEAM_MODEL = 'main.Team'
|
ANSIBLE_BASE_TEAM_MODEL = 'main.Team'
|
||||||
ANSIBLE_BASE_ORGANIZATION_MODEL = 'main.Organization'
|
ANSIBLE_BASE_ORGANIZATION_MODEL = 'main.Organization'
|
||||||
|
ANSIBLE_BASE_RESOURCE_CONFIG_MODULE = 'awx.resource_api'
|
||||||
|
|
||||||
from ansible_base.lib import dynamic_config # noqa: E402
|
from ansible_base.lib import dynamic_config # noqa: E402
|
||||||
|
|
||||||
|
|||||||
@@ -72,6 +72,8 @@ AWX_CALLBACK_PROFILE = True
|
|||||||
# Allows user to trigger task managers directly for debugging and profiling purposes.
|
# Allows user to trigger task managers directly for debugging and profiling purposes.
|
||||||
# Only works in combination with settings.SETTINGS_MODULE == 'awx.settings.development'
|
# Only works in combination with settings.SETTINGS_MODULE == 'awx.settings.development'
|
||||||
AWX_DISABLE_TASK_MANAGERS = False
|
AWX_DISABLE_TASK_MANAGERS = False
|
||||||
|
|
||||||
|
# Needed for launching runserver in debug mode
|
||||||
# ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!=================================
|
# ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!=================================
|
||||||
|
|
||||||
# Store a snapshot of default settings at this point before loading any
|
# Store a snapshot of default settings at this point before loading any
|
||||||
|
|||||||
46
awx/ui/package-lock.json
generated
@@ -13,7 +13,7 @@
|
|||||||
"@patternfly/react-table": "4.113.0",
|
"@patternfly/react-table": "4.113.0",
|
||||||
"ace-builds": "^1.10.1",
|
"ace-builds": "^1.10.1",
|
||||||
"ansi-to-html": "0.7.2",
|
"ansi-to-html": "0.7.2",
|
||||||
"axios": "0.27.2",
|
"axios": "^1.6.7",
|
||||||
"d3": "7.6.1",
|
"d3": "7.6.1",
|
||||||
"dagre": "^0.8.4",
|
"dagre": "^0.8.4",
|
||||||
"dompurify": "2.4.0",
|
"dompurify": "2.4.0",
|
||||||
@@ -5940,12 +5940,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/axios": {
|
"node_modules/axios": {
|
||||||
"version": "0.27.2",
|
"version": "1.6.7",
|
||||||
"resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz",
|
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.7.tgz",
|
||||||
"integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==",
|
"integrity": "sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA==",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"follow-redirects": "^1.14.9",
|
"follow-redirects": "^1.15.4",
|
||||||
"form-data": "^4.0.0"
|
"form-data": "^4.0.0",
|
||||||
|
"proxy-from-env": "^1.1.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/axios/node_modules/form-data": {
|
"node_modules/axios/node_modules/form-data": {
|
||||||
@@ -10387,9 +10388,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/follow-redirects": {
|
"node_modules/follow-redirects": {
|
||||||
"version": "1.15.1",
|
"version": "1.15.5",
|
||||||
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz",
|
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz",
|
||||||
"integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==",
|
"integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==",
|
||||||
"funding": [
|
"funding": [
|
||||||
{
|
{
|
||||||
"type": "individual",
|
"type": "individual",
|
||||||
@@ -18349,6 +18350,11 @@
|
|||||||
"node": ">= 0.10"
|
"node": ">= 0.10"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/proxy-from-env": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
|
||||||
|
},
|
||||||
"node_modules/pseudolocale": {
|
"node_modules/pseudolocale": {
|
||||||
"version": "1.2.0",
|
"version": "1.2.0",
|
||||||
"resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz",
|
"resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz",
|
||||||
@@ -26915,12 +26921,13 @@
|
|||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
"axios": {
|
"axios": {
|
||||||
"version": "0.27.2",
|
"version": "1.6.7",
|
||||||
"resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz",
|
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.7.tgz",
|
||||||
"integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==",
|
"integrity": "sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA==",
|
||||||
"requires": {
|
"requires": {
|
||||||
"follow-redirects": "^1.14.9",
|
"follow-redirects": "^1.15.4",
|
||||||
"form-data": "^4.0.0"
|
"form-data": "^4.0.0",
|
||||||
|
"proxy-from-env": "^1.1.0"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"form-data": {
|
"form-data": {
|
||||||
@@ -30371,9 +30378,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"follow-redirects": {
|
"follow-redirects": {
|
||||||
"version": "1.15.1",
|
"version": "1.15.5",
|
||||||
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz",
|
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz",
|
||||||
"integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA=="
|
"integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw=="
|
||||||
},
|
},
|
||||||
"fork-ts-checker-webpack-plugin": {
|
"fork-ts-checker-webpack-plugin": {
|
||||||
"version": "6.5.2",
|
"version": "6.5.2",
|
||||||
@@ -36325,6 +36332,11 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"proxy-from-env": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
|
||||||
|
},
|
||||||
"pseudolocale": {
|
"pseudolocale": {
|
||||||
"version": "1.2.0",
|
"version": "1.2.0",
|
||||||
"resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz",
|
"resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz",
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
"@patternfly/react-table": "4.113.0",
|
"@patternfly/react-table": "4.113.0",
|
||||||
"ace-builds": "^1.10.1",
|
"ace-builds": "^1.10.1",
|
||||||
"ansi-to-html": "0.7.2",
|
"ansi-to-html": "0.7.2",
|
||||||
"axios": "0.27.2",
|
"axios": "^1.6.7",
|
||||||
"d3": "7.6.1",
|
"d3": "7.6.1",
|
||||||
"dagre": "^0.8.4",
|
"dagre": "^0.8.4",
|
||||||
"dompurify": "2.4.0",
|
"dompurify": "2.4.0",
|
||||||
|
|||||||
@@ -21,6 +21,8 @@ const ansibleDocUrls = {
|
|||||||
'https://docs.ansible.com/ansible/latest/collections/community/vmware/vmware_vm_inventory_inventory.html',
|
'https://docs.ansible.com/ansible/latest/collections/community/vmware/vmware_vm_inventory_inventory.html',
|
||||||
constructed:
|
constructed:
|
||||||
'https://docs.ansible.com/ansible/latest/collections/ansible/builtin/constructed_inventory.html',
|
'https://docs.ansible.com/ansible/latest/collections/ansible/builtin/constructed_inventory.html',
|
||||||
|
terraform:
|
||||||
|
'https://github.com/ansible-collections/cloud.terraform/blob/stable-statefile-inventory/plugins/inventory/terraform_state.py',
|
||||||
};
|
};
|
||||||
|
|
||||||
const getInventoryHelpTextStrings = () => ({
|
const getInventoryHelpTextStrings = () => ({
|
||||||
@@ -119,10 +121,10 @@ const getInventoryHelpTextStrings = () => ({
|
|||||||
<br />
|
<br />
|
||||||
{value && (
|
{value && (
|
||||||
<div>
|
<div>
|
||||||
{t`If you want the Inventory Source to update on
|
{t`If you want the Inventory Source to update on launch , click on Update on Launch,
|
||||||
launch and on project update, click on Update on launch, and also go to`}
|
and also go to `}
|
||||||
<Link to={`/projects/${value.id}/details`}> {value.name} </Link>
|
<Link to={`/projects/${value.id}/details`}> {value.name} </Link>
|
||||||
{t`and click on Update Revision on Launch`}
|
{t`and click on Update Revision on Launch.`}
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
</>
|
</>
|
||||||
@@ -138,8 +140,8 @@ const getInventoryHelpTextStrings = () => ({
|
|||||||
<br />
|
<br />
|
||||||
{value && (
|
{value && (
|
||||||
<div>
|
<div>
|
||||||
{t`If you want the Inventory Source to update on
|
{t`If you want the Inventory Source to update on launch , click on Update on Launch,
|
||||||
launch and on project update, click on Update on launch, and also go to`}
|
and also go to `}
|
||||||
<Link to={`/projects/${value.id}/details`}> {value.name} </Link>
|
<Link to={`/projects/${value.id}/details`}> {value.name} </Link>
|
||||||
{t`and click on Update Revision on Launch`}
|
{t`and click on Update Revision on Launch`}
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ import {
|
|||||||
SCMSubForm,
|
SCMSubForm,
|
||||||
SatelliteSubForm,
|
SatelliteSubForm,
|
||||||
ControllerSubForm,
|
ControllerSubForm,
|
||||||
|
TerraformSubForm,
|
||||||
VMwareSubForm,
|
VMwareSubForm,
|
||||||
VirtualizationSubForm,
|
VirtualizationSubForm,
|
||||||
} from './InventorySourceSubForms';
|
} from './InventorySourceSubForms';
|
||||||
@@ -214,6 +215,14 @@ const InventorySourceFormFields = ({
|
|||||||
}
|
}
|
||||||
/>
|
/>
|
||||||
),
|
),
|
||||||
|
terraform: (
|
||||||
|
<TerraformSubForm
|
||||||
|
autoPopulateCredential={
|
||||||
|
!source?.id || source?.source !== 'terraform'
|
||||||
|
}
|
||||||
|
sourceOptions={sourceOptions}
|
||||||
|
/>
|
||||||
|
),
|
||||||
vmware: (
|
vmware: (
|
||||||
<VMwareSubForm
|
<VMwareSubForm
|
||||||
autoPopulateCredential={
|
autoPopulateCredential={
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ describe('<InventorySourceForm />', () => {
|
|||||||
['openstack', 'OpenStack'],
|
['openstack', 'OpenStack'],
|
||||||
['rhv', 'Red Hat Virtualization'],
|
['rhv', 'Red Hat Virtualization'],
|
||||||
['controller', 'Red Hat Ansible Automation Platform'],
|
['controller', 'Red Hat Ansible Automation Platform'],
|
||||||
|
['terraform', 'Terraform State'],
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,59 @@
|
|||||||
|
import React, { useCallback } from 'react';
|
||||||
|
import { useField, useFormikContext } from 'formik';
|
||||||
|
import { t } from '@lingui/macro';
|
||||||
|
import getDocsBaseUrl from 'util/getDocsBaseUrl';
|
||||||
|
import { useConfig } from 'contexts/Config';
|
||||||
|
import CredentialLookup from 'components/Lookup/CredentialLookup';
|
||||||
|
import { required } from 'util/validators';
|
||||||
|
import {
|
||||||
|
OptionsField,
|
||||||
|
VerbosityField,
|
||||||
|
EnabledVarField,
|
||||||
|
EnabledValueField,
|
||||||
|
HostFilterField,
|
||||||
|
SourceVarsField,
|
||||||
|
} from './SharedFields';
|
||||||
|
import getHelpText from '../Inventory.helptext';
|
||||||
|
|
||||||
|
const TerraformSubForm = ({ autoPopulateCredential }) => {
|
||||||
|
const helpText = getHelpText();
|
||||||
|
const { setFieldValue, setFieldTouched } = useFormikContext();
|
||||||
|
const [credentialField, credentialMeta, credentialHelpers] =
|
||||||
|
useField('credential');
|
||||||
|
const config = useConfig();
|
||||||
|
const handleCredentialUpdate = useCallback(
|
||||||
|
(value) => {
|
||||||
|
setFieldValue('credential', value);
|
||||||
|
setFieldTouched('credential', true, false);
|
||||||
|
},
|
||||||
|
[setFieldValue, setFieldTouched]
|
||||||
|
);
|
||||||
|
const docsBaseUrl = getDocsBaseUrl(config);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<CredentialLookup
|
||||||
|
credentialTypeNamespace="terraform"
|
||||||
|
label={t`Credential`}
|
||||||
|
helperTextInvalid={credentialMeta.error}
|
||||||
|
isValid={!credentialMeta.touched || !credentialMeta.error}
|
||||||
|
onBlur={() => credentialHelpers.setTouched()}
|
||||||
|
onChange={handleCredentialUpdate}
|
||||||
|
value={credentialField.value}
|
||||||
|
required
|
||||||
|
autoPopulate={autoPopulateCredential}
|
||||||
|
validate={required(t`Select a value for this field`)}
|
||||||
|
/>
|
||||||
|
<VerbosityField />
|
||||||
|
<HostFilterField />
|
||||||
|
<EnabledVarField />
|
||||||
|
<EnabledValueField />
|
||||||
|
<OptionsField />
|
||||||
|
<SourceVarsField
|
||||||
|
popoverContent={helpText.sourceVars(docsBaseUrl, 'terraform')}
|
||||||
|
/>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default TerraformSubForm;
|
||||||
@@ -0,0 +1,70 @@
|
|||||||
|
import React from 'react';
|
||||||
|
import { act } from 'react-dom/test-utils';
|
||||||
|
import { Formik } from 'formik';
|
||||||
|
import { CredentialsAPI } from 'api';
|
||||||
|
import { mountWithContexts } from '../../../../../testUtils/enzymeHelpers';
|
||||||
|
import TerraformSubForm from './TerraformSubForm';
|
||||||
|
|
||||||
|
jest.mock('../../../../api');
|
||||||
|
|
||||||
|
const initialValues = {
|
||||||
|
credential: null,
|
||||||
|
overwrite: false,
|
||||||
|
overwrite_vars: false,
|
||||||
|
source_path: '',
|
||||||
|
source_project: null,
|
||||||
|
source_script: null,
|
||||||
|
source_vars: '---\n',
|
||||||
|
update_cache_timeout: 0,
|
||||||
|
update_on_launch: true,
|
||||||
|
verbosity: 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockSourceOptions = {
|
||||||
|
actions: {
|
||||||
|
POST: {},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
describe('<TerraformSubForm />', () => {
|
||||||
|
let wrapper;
|
||||||
|
|
||||||
|
beforeEach(async () => {
|
||||||
|
CredentialsAPI.read.mockResolvedValue({
|
||||||
|
data: { count: 0, results: [] },
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
wrapper = mountWithContexts(
|
||||||
|
<Formik initialValues={initialValues}>
|
||||||
|
<TerraformSubForm sourceOptions={mockSourceOptions} />
|
||||||
|
</Formik>
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
afterAll(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should render subform fields', () => {
|
||||||
|
expect(wrapper.find('FormGroup[label="Credential"]')).toHaveLength(1);
|
||||||
|
expect(wrapper.find('FormGroup[label="Verbosity"]')).toHaveLength(1);
|
||||||
|
expect(wrapper.find('FormGroup[label="Update options"]')).toHaveLength(1);
|
||||||
|
expect(
|
||||||
|
wrapper.find('FormGroup[label="Cache timeout (seconds)"]')
|
||||||
|
).toHaveLength(1);
|
||||||
|
expect(
|
||||||
|
wrapper.find('VariablesField[label="Source variables"]')
|
||||||
|
).toHaveLength(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should make expected api calls', () => {
|
||||||
|
expect(CredentialsAPI.read).toHaveBeenCalledTimes(1);
|
||||||
|
expect(CredentialsAPI.read).toHaveBeenCalledWith({
|
||||||
|
credential_type__namespace: 'terraform',
|
||||||
|
order_by: 'name',
|
||||||
|
page: 1,
|
||||||
|
page_size: 5,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -6,5 +6,6 @@ export { default as OpenStackSubForm } from './OpenStackSubForm';
|
|||||||
export { default as SCMSubForm } from './SCMSubForm';
|
export { default as SCMSubForm } from './SCMSubForm';
|
||||||
export { default as SatelliteSubForm } from './SatelliteSubForm';
|
export { default as SatelliteSubForm } from './SatelliteSubForm';
|
||||||
export { default as ControllerSubForm } from './ControllerSubForm';
|
export { default as ControllerSubForm } from './ControllerSubForm';
|
||||||
|
export { default as TerraformSubForm } from './TerraformSubForm';
|
||||||
export { default as VMwareSubForm } from './VMwareSubForm';
|
export { default as VMwareSubForm } from './VMwareSubForm';
|
||||||
export { default as VirtualizationSubForm } from './VirtualizationSubForm';
|
export { default as VirtualizationSubForm } from './VirtualizationSubForm';
|
||||||
|
|||||||
@@ -4,6 +4,8 @@
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.urls import re_path, include
|
from django.urls import re_path, include
|
||||||
|
|
||||||
|
from ansible_base.resource_registry.urls import urlpatterns as resource_api_urls
|
||||||
|
|
||||||
from awx.main.views import handle_400, handle_403, handle_404, handle_500, handle_csp_violation, handle_login_redirect
|
from awx.main.views import handle_400, handle_403, handle_404, handle_500, handle_csp_violation, handle_login_redirect
|
||||||
|
|
||||||
|
|
||||||
@@ -11,6 +13,7 @@ urlpatterns = [
|
|||||||
re_path(r'', include('awx.ui.urls', namespace='ui')),
|
re_path(r'', include('awx.ui.urls', namespace='ui')),
|
||||||
re_path(r'^ui_next/.*', include('awx.ui_next.urls', namespace='ui_next')),
|
re_path(r'^ui_next/.*', include('awx.ui_next.urls', namespace='ui_next')),
|
||||||
re_path(r'^api/', include('awx.api.urls', namespace='api')),
|
re_path(r'^api/', include('awx.api.urls', namespace='api')),
|
||||||
|
re_path(r'^api/v2/', include(resource_api_urls)),
|
||||||
re_path(r'^sso/', include('awx.sso.urls', namespace='sso')),
|
re_path(r'^sso/', include('awx.sso.urls', namespace='sso')),
|
||||||
re_path(r'^sso/', include('social_django.urls', namespace='social')),
|
re_path(r'^sso/', include('social_django.urls', namespace='social')),
|
||||||
re_path(r'^(?:api/)?400.html$', handle_400),
|
re_path(r'^(?:api/)?400.html$', handle_400),
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 123 KiB |
|
After Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 132 KiB After Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 55 KiB After Width: | Height: | Size: 50 KiB |
|
Before Width: | Height: | Size: 49 KiB After Width: | Height: | Size: 45 KiB |
@@ -10,14 +10,15 @@ Secret Management System
|
|||||||
|
|
||||||
Users and admins upload machine and cloud credentials so that automation can access machines and external services on their behalf. By default, sensitive credential values (such as SSH passwords, SSH private keys, API tokens for cloud services) are stored in the database after being encrypted. With external credentials backed by credential plugins, you can map credential fields (like a password or an SSH Private key) to values stored in a :term:`secret management system` instead of providing them to AWX directly. AWX provides a secret management system that include integrations for:
|
Users and admins upload machine and cloud credentials so that automation can access machines and external services on their behalf. By default, sensitive credential values (such as SSH passwords, SSH private keys, API tokens for cloud services) are stored in the database after being encrypted. With external credentials backed by credential plugins, you can map credential fields (like a password or an SSH Private key) to values stored in a :term:`secret management system` instead of providing them to AWX directly. AWX provides a secret management system that include integrations for:
|
||||||
|
|
||||||
- Centrify Vault Credential Provider Lookup
|
- :ref:`ug_credentials_aws_lookup`
|
||||||
- CyberArk Central Credential Provider Lookup (CCP)
|
- :ref:`ug_credentials_centrify`
|
||||||
- CyberArk Conjur Secrets Manager Lookup
|
- :ref:`ug_credentials_cyberarkccp`
|
||||||
- HashiCorp Vault Key-Value Store (KV)
|
- :ref:`ug_credentials_cyberarkconjur`
|
||||||
- HashiCorp Vault SSH Secrets Engine
|
- :ref:`ug_credentials_hashivault` (KV)
|
||||||
- Microsoft Azure Key Management System (KMS)
|
- :ref:`ug_credentials_hashivaultssh`
|
||||||
- Thycotic DevOps Secrets Vault
|
- :ref:`ug_credentials_azurekeyvault` (KMS)
|
||||||
- Thycotic Secret Server
|
- :ref:`ug_credentials_thycoticvault`
|
||||||
|
- :ref:`ug_credentials_thycoticserver`
|
||||||
|
|
||||||
These external secret values will be fetched prior to running a playbook that needs them. For more information on specifying these credentials in the User Interface, see :ref:`ug_credentials`.
|
These external secret values will be fetched prior to running a playbook that needs them. For more information on specifying these credentials in the User Interface, see :ref:`ug_credentials`.
|
||||||
|
|
||||||
@@ -49,11 +50,92 @@ Use the AWX User Interface to configure and use each of the supported 3-party se
|
|||||||
.. image:: ../common/images/credentials-link-credential-prompt.png
|
.. image:: ../common/images/credentials-link-credential-prompt.png
|
||||||
:alt: Credential section of the external secret management system dialog
|
:alt: Credential section of the external secret management system dialog
|
||||||
|
|
||||||
4. Select the credential you want to link to, and click **Next**. This takes you to the **Metadata** tab of the input source. This example shows the Metadata prompt for HashiVault Secret Lookup. Metadata is specific to the input source you select. See the :ref:`ug_metadata_creds_inputs` table for details.
|
4. Select the credential you want to link to, and click **Next**. This takes you to the **Metadata** tab of the input source. Metadata is specific to the input source you select:
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
:widths: 10 10 25
|
||||||
|
:width: 1400px
|
||||||
|
:header-rows: 1
|
||||||
|
|
||||||
|
* - Input Source
|
||||||
|
- Metadata
|
||||||
|
- Description
|
||||||
|
* - *AWS Secrets Manager*
|
||||||
|
- AWS Secrets Manager Region (required)
|
||||||
|
- The region where the secrets manager is located.
|
||||||
|
* -
|
||||||
|
- AWS Secret Name (Required)
|
||||||
|
- Specify the AWS secret name that was generated by the AWS access key.
|
||||||
|
* - *Centrify Vault Credential Provider Lookup*
|
||||||
|
- Account Name (Required)
|
||||||
|
- Name of the system account or domain associated with Centrify Vault.
|
||||||
|
* -
|
||||||
|
- System Name
|
||||||
|
- Specify the name used by the Centrify portal.
|
||||||
|
* - *CyberArk Central Credential Provider Lookup*
|
||||||
|
- Object Query (Required)
|
||||||
|
- Lookup query for the object.
|
||||||
|
* -
|
||||||
|
- Object Query Format
|
||||||
|
- Select ``Exact`` for a specific secret name, or ``Regexp`` for a secret that has a dynamically generated name.
|
||||||
|
* -
|
||||||
|
- Object Property
|
||||||
|
- Specifies the name of the property to return (e.g., ``UserName``, ``Address``, etc.) other than the default of ``Content``.
|
||||||
|
* -
|
||||||
|
- Reason
|
||||||
|
- If required per the object's policy, supply a reason for checking out the secret, as CyberArk logs those.
|
||||||
|
* - *CyberArk Conjur Secrets Lookup*
|
||||||
|
- Secret Identifier
|
||||||
|
- The identifier for the secret.
|
||||||
|
* -
|
||||||
|
- Secret Version
|
||||||
|
- Specify a version of the secret, if necessary, otherwise, leave it empty to use the latest version.
|
||||||
|
* - *HashiVault Secret Lookup*
|
||||||
|
- Name of Secret Backend
|
||||||
|
- Specify the name of the KV backend to use. Leave it blank to use the first path segment of the **Path to Secret** field instead.
|
||||||
|
* -
|
||||||
|
- Path to Secret (required)
|
||||||
|
- Specify the path to where the secret information is stored; for example, ``/path/username``.
|
||||||
|
* -
|
||||||
|
- Key Name (required)
|
||||||
|
- Specify the name of the key to look up the secret information.
|
||||||
|
* -
|
||||||
|
- Secret Version (V2 Only)
|
||||||
|
- Specify a version if necessary, otherwise, leave it empty to use the latest version.
|
||||||
|
* - *HashiCorp Signed SSH*
|
||||||
|
- Unsigned Public Key (required)
|
||||||
|
- Specify the public key of the cert you want to get signed. It needs to be present in the authorized keys file of the target host(s).
|
||||||
|
* -
|
||||||
|
- Path to Secret (required)
|
||||||
|
- Specify the path to where the secret information is stored; for example, ``/path/username``.
|
||||||
|
* -
|
||||||
|
- Role Name (required)
|
||||||
|
- A role is a collection of SSH settings and parameters that are stored in Hashi vault. Typically, you can specify a couple of them with different privileges, timeouts, etc. So you could have a role that is allowed to get a cert signed for root, and other less privileged ones, for example.
|
||||||
|
* -
|
||||||
|
- Valid Principals
|
||||||
|
- Specify a user (or users) other than the default, that you are requesting vault to authorize the cert for the stored key. Hashi vault has a default user for whom it signs (e.g., ec2-user).
|
||||||
|
* - *Azure KMS*
|
||||||
|
- Secret Name (required)
|
||||||
|
- The actual name of the secret as it is referenced in Azure's Key vault app.
|
||||||
|
* -
|
||||||
|
- Secret Version
|
||||||
|
- Specify a version of the secret, if necessary, otherwise, leave it empty to use the latest version.
|
||||||
|
* - *Thycotic DevOps Secrets Vault*
|
||||||
|
- Secret Path (required)
|
||||||
|
- Specify the path to where the secret information is stored (e.g., /path/username).
|
||||||
|
* - *Thycotic Secret Server*
|
||||||
|
- Secret ID (required)
|
||||||
|
- The identifier for the secret.
|
||||||
|
* -
|
||||||
|
- Secret Field
|
||||||
|
- Specify the field to be used from the secret.
|
||||||
|
|
||||||
|
This example shows the Metadata prompt for HashiVault Secret Lookup.
|
||||||
|
|
||||||
.. image:: ../common/images/credentials-link-metadata-prompt.png
|
.. image:: ../common/images/credentials-link-metadata-prompt.png
|
||||||
:alt: Metadata section of the external secret management system dialog
|
:alt: Metadata section of the external secret management system dialog
|
||||||
|
|
||||||
|
|
||||||
5. Click **Test** to verify connection to the secret management system. If the lookup is unsuccessful, an error message like this one displays:
|
5. Click **Test** to verify connection to the secret management system. If the lookup is unsuccessful, an error message like this one displays:
|
||||||
|
|
||||||
.. image:: ../common/images/credentials-link-metadata-test-error.png
|
.. image:: ../common/images/credentials-link-metadata-test-error.png
|
||||||
@@ -65,133 +147,37 @@ Use the AWX User Interface to configure and use each of the supported 3-party se
|
|||||||
|
|
||||||
8. Click **Save** when done.
|
8. Click **Save** when done.
|
||||||
|
|
||||||
.. _ug_metadata_creds_inputs:
|
|
||||||
|
|
||||||
Metadata for credential input sources
|
.. _ug_credentials_aws_lookup:
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
**Centrify Vault Credential Provider Lookup**
|
AWS Secrets Manager Lookup
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
.. index::
|
||||||
|
pair: credential types; AWS
|
||||||
|
|
||||||
.. list-table::
|
This plugin allows AWS to be used as a credential input source to pull secrets from AWS SecretsManager. `AWS Secrets Manager <https://aws.amazon.com/secrets-manager/>`_ provides similar service to :ref:`ug_credentials_azurekeyvault`, and the AWS collection provides a lookup plugin for it.
|
||||||
:widths: 25 50
|
|
||||||
:header-rows: 1
|
|
||||||
|
|
||||||
* - Metadata
|
When **AWS Secrets Manager lookup** is selected for **Credential Type**, provide the following attributes to properly configure your lookup:
|
||||||
- Description
|
|
||||||
* - Account Name (Required)
|
|
||||||
- Name of the system account or domain associated with Centrify Vault.
|
|
||||||
* - System Name
|
|
||||||
- Specify the name used by the Centrify portal.
|
|
||||||
|
|
||||||
**CyberArk Central Credential Provider Lookup**
|
- **AWS Access Key** (required): provide the access key used for communicating with AWS' key management system
|
||||||
|
- **AWS Secret Key** (required): provide the secret as obtained by the AWS IAM console
|
||||||
|
|
||||||
.. list-table::
|
|
||||||
:widths: 25 50
|
|
||||||
:header-rows: 1
|
|
||||||
|
|
||||||
* - Metadata
|
Below shows an example of a configured AWS Secret Manager credential.
|
||||||
- Description
|
|
||||||
* - Object Query (Required)
|
|
||||||
- Lookup query for the object.
|
|
||||||
* - Object Query Format
|
|
||||||
- Select ``Exact`` for a specific secret name, or ``Regexp`` for a secret that has a dynamically generated name.
|
|
||||||
* - Object Property
|
|
||||||
- Specifies the name of the property to return (e.g., ``UserName``, ``Address``, etc.) other than the default of ``Content``.
|
|
||||||
* - Reason
|
|
||||||
- If required per the object's policy, supply a reason for checking out the secret, as CyberArk logs those.
|
|
||||||
|
|
||||||
**CyberArk Conjur Secrets Lookup**
|
.. image:: ../common/images/credentials-create-aws-secret-credential.png
|
||||||
|
:width: 1400px
|
||||||
|
:alt: Example new AWS Secret Manager credential lookup dialog
|
||||||
|
|
||||||
.. list-table::
|
|
||||||
:widths: 25 50
|
|
||||||
:header-rows: 1
|
|
||||||
|
|
||||||
* - Metadata
|
|
||||||
- Description
|
|
||||||
* - Secret Identifier
|
|
||||||
- The identifier for the secret.
|
|
||||||
* - Secret Version
|
|
||||||
- Specify a version of the secret, if necessary, otherwise, leave it empty to use the latest version.
|
|
||||||
|
|
||||||
**HashiVault Secret Lookup**
|
|
||||||
|
|
||||||
.. list-table::
|
|
||||||
:widths: 25 50
|
|
||||||
:header-rows: 1
|
|
||||||
|
|
||||||
* - Metadata
|
|
||||||
- Description
|
|
||||||
* - Name of Secret Backend
|
|
||||||
- Specify the name of the KV backend to use. Leave it blank to use the first path segment of the **Path to Secret** field instead.
|
|
||||||
* - Path to Secret (required)
|
|
||||||
- Specify the path to where the secret information is stored; for example, ``/path/username``.
|
|
||||||
* - Key Name (required)
|
|
||||||
- Specify the name of the key to look up the secret information.
|
|
||||||
* - Secret Version (V2 Only)
|
|
||||||
- Specify a version if necessary, otherwise, leave it empty to use the latest version.
|
|
||||||
|
|
||||||
**HashiCorp Signed SSH**
|
|
||||||
|
|
||||||
.. list-table::
|
|
||||||
:widths: 25 50
|
|
||||||
:header-rows: 1
|
|
||||||
|
|
||||||
* - Metadata
|
|
||||||
- Description
|
|
||||||
* - Unsigned Public Key (required)
|
|
||||||
- Specify the public key of the cert you want to get signed. It needs to be present in the authorized keys file of the target host(s).
|
|
||||||
* - Path to Secret (required)
|
|
||||||
- Specify the path to where the secret information is stored; for example, ``/path/username``.
|
|
||||||
* - Role Name (required)
|
|
||||||
- A role is a collection of SSH settings and parameters that are stored in Hashi vault. Typically, you can specify a couple of them with different privileges, timeouts, etc. So you could have a role that is allowed to get a cert signed for root, and other less privileged ones, for example.
|
|
||||||
* - Valid Principals
|
|
||||||
- Specify a user (or users) other than the default, that you are requesting vault to authorize the cert for the stored key. Hashi vault has a default user for whom it signs (e.g., ec2-user).
|
|
||||||
|
|
||||||
**Azure KMS**
|
|
||||||
|
|
||||||
.. list-table::
|
|
||||||
:widths: 25 50
|
|
||||||
:header-rows: 1
|
|
||||||
|
|
||||||
* - Metadata
|
|
||||||
- Description
|
|
||||||
* - Secret Name (required)
|
|
||||||
- The actual name of the secret as it is referenced in Azure's Key vault app.
|
|
||||||
* - Secret Version
|
|
||||||
- Specify a version of the secret, if necessary, otherwise, leave it empty to use the latest version.
|
|
||||||
|
|
||||||
**Thycotic DevOps Secrets Vault**
|
|
||||||
|
|
||||||
.. list-table::
|
|
||||||
:widths: 25 50
|
|
||||||
:header-rows: 1
|
|
||||||
|
|
||||||
* - Metadata
|
|
||||||
- Description
|
|
||||||
* - Secret Path (required)
|
|
||||||
- Specify the path to where the secret information is stored (e.g., /path/username).
|
|
||||||
|
|
||||||
**Thycotic Secret Server**
|
|
||||||
|
|
||||||
.. list-table::
|
|
||||||
:widths: 25 50
|
|
||||||
:header-rows: 1
|
|
||||||
|
|
||||||
* - Metadata
|
|
||||||
- Description
|
|
||||||
* - Secret ID (required)
|
|
||||||
- The identifier for the secret.
|
|
||||||
* - Secret Field
|
|
||||||
- Specify the field to be used from the secret.
|
|
||||||
|
|
||||||
.. _ug_credentials_centrify:
|
.. _ug_credentials_centrify:
|
||||||
|
|
||||||
Centrify Vault Credential Provider Lookup
|
Centrify Vault Credential Provider Lookup
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
.. index::
|
.. index::
|
||||||
pair: credential types; Centrify
|
pair: credential types; Centrify
|
||||||
|
|
||||||
You need the Centrify Vault web service running to store secrets in order for this integration to work. When **Centrify Vault Credential Provider Lookup** is selected for **Credential Type**, provide the following metadata to properly configure your lookup:
|
You need the Centrify Vault web service running to store secrets in order for this integration to work. When **Centrify Vault Credential Provider Lookup** is selected for **Credential Type**, provide the following attributes to properly configure your lookup:
|
||||||
|
|
||||||
- **Centrify Tenant URL** (required): provide the URL used for communicating with Centrify's secret management system
|
- **Centrify Tenant URL** (required): provide the URL used for communicating with Centrify's secret management system
|
||||||
- **Centrify API User** (required): provide the username
|
- **Centrify API User** (required): provide the username
|
||||||
@@ -208,12 +194,12 @@ Below shows an example of a configured CyberArk AIM credential.
|
|||||||
.. _ug_credentials_cyberarkccp:
|
.. _ug_credentials_cyberarkccp:
|
||||||
|
|
||||||
CyberArk Central Credential Provider (CCP) Lookup
|
CyberArk Central Credential Provider (CCP) Lookup
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
.. index::
|
.. index::
|
||||||
single: CyberArk CCP
|
single: CyberArk CCP
|
||||||
pair: credential; CyberArk CCP
|
pair: credential; CyberArk CCP
|
||||||
|
|
||||||
You need the CyberArk Central Credential Provider web service running to store secrets in order for this integration to work. When **CyberArk Central Credential Provider Lookup** is selected for **Credential Type**, provide the following metadata to properly configure your lookup:
|
You need the CyberArk Central Credential Provider web service running to store secrets in order for this integration to work. When **CyberArk Central Credential Provider Lookup** is selected for **Credential Type**, provide the following attributes to properly configure your lookup:
|
||||||
|
|
||||||
- **CyberArk CCP URL** (required): provide the URL used for communicating with CyberArk CCP's secret management system; must include URL scheme (http, https, etc.)
|
- **CyberArk CCP URL** (required): provide the URL used for communicating with CyberArk CCP's secret management system; must include URL scheme (http, https, etc.)
|
||||||
- **Web Service ID**: optionally specify the identifier for the web service; leaving it blank defaults to AIMWebService
|
- **Web Service ID**: optionally specify the identifier for the web service; leaving it blank defaults to AIMWebService
|
||||||
@@ -230,14 +216,14 @@ Below shows an example of a configured CyberArk CCP credential.
|
|||||||
.. _ug_credentials_cyberarkconjur:
|
.. _ug_credentials_cyberarkconjur:
|
||||||
|
|
||||||
CyberArk Conjur Secrets Manager Lookup
|
CyberArk Conjur Secrets Manager Lookup
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
.. index::
|
.. index::
|
||||||
single: CyberArk Conjur
|
single: CyberArk Conjur
|
||||||
pair: credential; CyberArk Conjur
|
pair: credential; CyberArk Conjur
|
||||||
|
|
||||||
With a Conjur Cloud tenant available to target, configure the CyberArk Conjur Secrets Lookup external management system credential plugin as documented.
|
With a Conjur Cloud tenant available to target, configure the CyberArk Conjur Secrets Lookup external management system credential plugin as documented.
|
||||||
|
|
||||||
When **CyberArk Conjur Secrets Manager Lookup** is selected for **Credential Type**, provide the following metadata to properly configure your lookup:
|
When **CyberArk Conjur Secrets Manager Lookup** is selected for **Credential Type**, provide the following attributes to properly configure your lookup:
|
||||||
|
|
||||||
- **Conjur URL** (required): provide the URL used for communicating with CyberArk Conjur's secret management system; must include URL scheme (http, https, etc.)
|
- **Conjur URL** (required): provide the URL used for communicating with CyberArk Conjur's secret management system; must include URL scheme (http, https, etc.)
|
||||||
- **API Key** (required): provide the key given by your Conjur admin
|
- **API Key** (required): provide the key given by your Conjur admin
|
||||||
@@ -253,12 +239,12 @@ Below shows an example of a configured CyberArk Conjur credential.
|
|||||||
.. _ug_credentials_hashivault:
|
.. _ug_credentials_hashivault:
|
||||||
|
|
||||||
HashiCorp Vault Secret Lookup
|
HashiCorp Vault Secret Lookup
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
.. index::
|
.. index::
|
||||||
single: HashiCorp Secret Lookup
|
single: HashiCorp Secret Lookup
|
||||||
pair: credential; HashiCorp KV
|
pair: credential; HashiCorp KV
|
||||||
|
|
||||||
When **HashiCorp Vault Secret Lookup** is selected for **Credential Type**, provide the following metadata to properly configure your lookup:
|
When **HashiCorp Vault Secret Lookup** is selected for **Credential Type**, provide the following attributes to properly configure your lookup:
|
||||||
|
|
||||||
- **Server URL** (required): provide the URL used for communicating with HashiCorp Vault's secret management system
|
- **Server URL** (required): provide the URL used for communicating with HashiCorp Vault's secret management system
|
||||||
- **Token**: specify the access token used to authenticate HashiCorp's server
|
- **Token**: specify the access token used to authenticate HashiCorp's server
|
||||||
@@ -291,7 +277,7 @@ Below shows an example of a configured HashiCorp Vault Secret Lookup credential
|
|||||||
.. image:: ../common/images/credentials-create-hashicorp-kv-credential.png
|
.. image:: ../common/images/credentials-create-hashicorp-kv-credential.png
|
||||||
:alt: Example new HashiCorp Vault Secret lookup dialog
|
:alt: Example new HashiCorp Vault Secret lookup dialog
|
||||||
|
|
||||||
To test the lookup, create another credential that uses the HashiCorp Vault lookup. The example below shows the metadata for a machine credential configured to look up HashiCorp Vault secret credentials:
|
To test the lookup, create another credential that uses the HashiCorp Vault lookup. The example below shows the attributes for a machine credential configured to look up HashiCorp Vault secret credentials:
|
||||||
|
|
||||||
.. image:: ../common/images/credentials-machine-test-hashicorp-metadata.png
|
.. image:: ../common/images/credentials-machine-test-hashicorp-metadata.png
|
||||||
:alt: Example machine credential lookup metadata for HashiCorp Vault.
|
:alt: Example machine credential lookup metadata for HashiCorp Vault.
|
||||||
@@ -300,12 +286,12 @@ To test the lookup, create another credential that uses the HashiCorp Vault look
|
|||||||
.. _ug_credentials_hashivaultssh:
|
.. _ug_credentials_hashivaultssh:
|
||||||
|
|
||||||
HashiCorp Vault Signed SSH
|
HashiCorp Vault Signed SSH
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
.. index::
|
.. index::
|
||||||
single: HashiCorp SSH Secrets Engine
|
single: HashiCorp SSH Secrets Engine
|
||||||
pair: credential; HashiCorp SSH Secrets Engine
|
pair: credential; HashiCorp SSH Secrets Engine
|
||||||
|
|
||||||
When **HashiCorp Vault Signed SSH** is selected for **Credential Type**, provide the following metadata to properly configure your lookup:
|
When **HashiCorp Vault Signed SSH** is selected for **Credential Type**, provide the following attributes to properly configure your lookup:
|
||||||
|
|
||||||
- **Server URL** (required): provide the URL used for communicating with HashiCorp Signed SSH's secret management system
|
- **Server URL** (required): provide the URL used for communicating with HashiCorp Signed SSH's secret management system
|
||||||
- **Token**: specify the access token used to authenticate HashiCorp's server
|
- **Token**: specify the access token used to authenticate HashiCorp's server
|
||||||
@@ -335,13 +321,13 @@ Below shows an example of a configured HashiCorp SSH Secrets Engine credential.
|
|||||||
.. _ug_credentials_azurekeyvault:
|
.. _ug_credentials_azurekeyvault:
|
||||||
|
|
||||||
Microsoft Azure Key Vault
|
Microsoft Azure Key Vault
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
.. index::
|
.. index::
|
||||||
single: MS Azure KMS
|
single: MS Azure KMS
|
||||||
pair: credential; MS Azure KMS
|
pair: credential; MS Azure KMS
|
||||||
triple: credential; Azure; KMS
|
triple: credential; Azure; KMS
|
||||||
|
|
||||||
When **Microsoft Azure Key Vault** is selected for **Credential Type**, provide the following metadata to properly configure your lookup:
|
When **Microsoft Azure Key Vault** is selected for **Credential Type**, provide the following attributes to properly configure your lookup:
|
||||||
|
|
||||||
- **Vault URL (DNS Name)** (required): provide the URL used for communicating with MS Azure's key management system
|
- **Vault URL (DNS Name)** (required): provide the URL used for communicating with MS Azure's key management system
|
||||||
- **Client ID** (required): provide the identifier as obtained by the Azure Active Directory
|
- **Client ID** (required): provide the identifier as obtained by the Azure Active Directory
|
||||||
@@ -357,12 +343,12 @@ Below shows an example of a configured Microsoft Azure KMS credential.
|
|||||||
.. _ug_credentials_thycoticvault:
|
.. _ug_credentials_thycoticvault:
|
||||||
|
|
||||||
Thycotic DevOps Secrets Vault
|
Thycotic DevOps Secrets Vault
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
.. index::
|
.. index::
|
||||||
single: Thycotic DevOps Secrets Vault
|
single: Thycotic DevOps Secrets Vault
|
||||||
pair: credential; Thycotic DevOps Secrets Vault
|
pair: credential; Thycotic DevOps Secrets Vault
|
||||||
|
|
||||||
When **Thycotic DevOps Secrets Vault** is selected for **Credential Type**, provide the following metadata to properly configure your lookup:
|
When **Thycotic DevOps Secrets Vault** is selected for **Credential Type**, provide the following attributes to properly configure your lookup:
|
||||||
|
|
||||||
- **Tenant** (required): provide the URL used for communicating with Thycotic's secret management system
|
- **Tenant** (required): provide the URL used for communicating with Thycotic's secret management system
|
||||||
- **Top-level Domain (TLD)** : provide the top-level domain designation (e.g., com, edu, org) associated with the secret vault you want to integrate
|
- **Top-level Domain (TLD)** : provide the top-level domain designation (e.g., com, edu, org) associated with the secret vault you want to integrate
|
||||||
@@ -379,12 +365,12 @@ Below shows an example of a configured Thycotic DevOps Secrets Vault credential.
|
|||||||
.. _ug_credentials_thycoticserver:
|
.. _ug_credentials_thycoticserver:
|
||||||
|
|
||||||
Thycotic Secret Server
|
Thycotic Secret Server
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
.. index::
|
.. index::
|
||||||
single: Thycotic Secret Server
|
single: Thycotic Secret Server
|
||||||
pair: credential; Thycotic Secret Server
|
pair: credential; Thycotic Secret Server
|
||||||
|
|
||||||
When **Thycotic Secrets Server** is selected for **Credential Type**, provide the following metadata to properly configure your lookup:
|
When **Thycotic Secrets Server** is selected for **Credential Type**, provide the following attributes to properly configure your lookup:
|
||||||
|
|
||||||
- **Secret Server URL** (required): provide the URL used for communicating with the Thycotic Secrets Server management system
|
- **Secret Server URL** (required): provide the URL used for communicating with the Thycotic Secrets Server management system
|
||||||
- **Username** (required): specify the authenticated user for this service
|
- **Username** (required): specify the authenticated user for this service
|
||||||
|
|||||||
@@ -128,7 +128,7 @@ The following credential types are supported with AWX:
|
|||||||
.. contents::
|
.. contents::
|
||||||
:local:
|
:local:
|
||||||
|
|
||||||
The credential types associated with Centrify, CyberArk, HashiCorp Vault, Microsoft Azure Key Management System (KMS), and Thycotic are part of the credential plugins capability that allows an external system to lookup your secrets information. See the :ref:`ug_credential_plugins` section for further detail.
|
The credential types associated with AWS Secrets Manager, Centrify, CyberArk, HashiCorp Vault, Microsoft Azure Key Management System (KMS), and Thycotic are part of the credential plugins capability that allows an external system to lookup your secrets information. See the :ref:`ug_credential_plugins` section for further detail.
|
||||||
|
|
||||||
|
|
||||||
.. _ug_credentials_aws:
|
.. _ug_credentials_aws:
|
||||||
@@ -166,6 +166,10 @@ AWX provides support for EC2 STS tokens (sometimes referred to as IAM STS creden
|
|||||||
|
|
||||||
To use implicit IAM role credentials, do not attach AWS cloud credentials in AWX when relying on IAM roles to access the AWS API. While it may seem to make sense to attach your AWS cloud credential to your job template, doing so will force the use of your AWS credentials and will not "fall through" to use your IAM role credentials (this is due to the use of the boto library.)
|
To use implicit IAM role credentials, do not attach AWS cloud credentials in AWX when relying on IAM roles to access the AWS API. While it may seem to make sense to attach your AWS cloud credential to your job template, doing so will force the use of your AWS credentials and will not "fall through" to use your IAM role credentials (this is due to the use of the boto library.)
|
||||||
|
|
||||||
|
AWS Secrets Manager
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
This is considered part of the secret management capability. See :ref:`ug_credentials_aws_lookup` for more detail.
|
||||||
|
|
||||||
|
|
||||||
Ansible Galaxy/Automation Hub API Token
|
Ansible Galaxy/Automation Hub API Token
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|||||||
20
licenses/ui/proxy-from-env.txt
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
The MIT License
|
||||||
|
|
||||||
|
Copyright (C) 2016-2018 Rob Wu <rob@robwu.nl>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
@@ -20,9 +20,13 @@ jupyter
|
|||||||
# matplotlib - Caused issues when bumping to setuptools 58
|
# matplotlib - Caused issues when bumping to setuptools 58
|
||||||
backports.tempfile # support in unit tests for py32+ tempfile.TemporaryDirectory
|
backports.tempfile # support in unit tests for py32+ tempfile.TemporaryDirectory
|
||||||
git+https://github.com/artefactual-labs/mockldap.git@master#egg=mockldap
|
git+https://github.com/artefactual-labs/mockldap.git@master#egg=mockldap
|
||||||
sdb
|
|
||||||
remote-pdb
|
|
||||||
gprof2dot
|
gprof2dot
|
||||||
atomicwrites==1.4.0
|
atomicwrites==1.4.0
|
||||||
flake8
|
flake8
|
||||||
yamllint
|
yamllint
|
||||||
|
pip>=21.3 # PEP 660 – Editable installs for pyproject.toml based builds (wheel based)
|
||||||
|
|
||||||
|
# python debuggers
|
||||||
|
debugpy
|
||||||
|
remote-pdb
|
||||||
|
sdb
|
||||||
|
|||||||
@@ -5,4 +5,4 @@ git+https://github.com/ansible/ansible-runner.git@devel#egg=ansible-runner
|
|||||||
# specifically need https://github.com/robgolding/django-radius/pull/27
|
# specifically need https://github.com/robgolding/django-radius/pull/27
|
||||||
git+https://github.com/ansible/django-radius.git@develop#egg=django-radius
|
git+https://github.com/ansible/django-radius.git@develop#egg=django-radius
|
||||||
git+https://github.com/ansible/python3-saml.git@devel#egg=python3-saml
|
git+https://github.com/ansible/python3-saml.git@devel#egg=python3-saml
|
||||||
django-ansible-base @ git+https://github.com/ansible/django-ansible-base@devel#egg=django-ansible-base[rest_filters,jwt_consumer]
|
django-ansible-base @ git+https://github.com/ansible/django-ansible-base@devel#egg=django-ansible-base[rest_filters,jwt_consumer,resource_registry]
|
||||||
|
|||||||
@@ -249,7 +249,7 @@ RUN for dir in \
|
|||||||
/var/lib/awx/.local/share/containers/storage \
|
/var/lib/awx/.local/share/containers/storage \
|
||||||
/var/run/awx-rsyslog \
|
/var/run/awx-rsyslog \
|
||||||
/var/log/nginx \
|
/var/log/nginx \
|
||||||
/var/lib/postgresql \
|
/var/lib/pgsql \
|
||||||
/var/run/supervisor \
|
/var/run/supervisor \
|
||||||
/var/run/awx-receptor \
|
/var/run/awx-receptor \
|
||||||
/var/lib/nginx ; \
|
/var/lib/nginx ; \
|
||||||
@@ -300,7 +300,6 @@ RUN ln -sf /dev/stdout /var/log/nginx/access.log && \
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
ENV HOME="/var/lib/awx"
|
ENV HOME="/var/lib/awx"
|
||||||
ENV PATH="/usr/pgsql-12/bin:${PATH}"
|
|
||||||
|
|
||||||
{% if build_dev|bool %}
|
{% if build_dev|bool %}
|
||||||
ENV PATH="/var/lib/awx/venv/awx/bin/:${PATH}"
|
ENV PATH="/var/lib/awx/venv/awx/bin/:${PATH}"
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ stderr_logfile_maxbytes=0
|
|||||||
|
|
||||||
{% if kube_dev | bool %}
|
{% if kube_dev | bool %}
|
||||||
[program:awx-autoreload]
|
[program:awx-autoreload]
|
||||||
command = /awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx 'supervisorctl -c /etc/supervisord_rsyslog.conf restart tower-processes:*'
|
command = /awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx
|
||||||
autostart = true
|
autostart = true
|
||||||
autorestart = true
|
autorestart = true
|
||||||
stopasgroup=true
|
stopasgroup=true
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ stderr_logfile_maxbytes=0
|
|||||||
|
|
||||||
{% if kube_dev | bool %}
|
{% if kube_dev | bool %}
|
||||||
[program:awx-autoreload]
|
[program:awx-autoreload]
|
||||||
command = /awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx 'supervisorctl -c /etc/supervisord_task.conf restart tower-processes:*'
|
command = /awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx
|
||||||
autostart = true
|
autostart = true
|
||||||
autorestart = true
|
autorestart = true
|
||||||
stopasgroup=true
|
stopasgroup=true
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ stderr_logfile_maxbytes=0
|
|||||||
|
|
||||||
{% if kube_dev | bool %}
|
{% if kube_dev | bool %}
|
||||||
[program:awx-autoreload]
|
[program:awx-autoreload]
|
||||||
command = /awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx 'supervisorctl -c /etc/supervisord_web.conf restart tower-processes:*'
|
command = /awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx
|
||||||
autostart = true
|
autostart = true
|
||||||
autorestart = true
|
autorestart = true
|
||||||
stopasgroup=true
|
stopasgroup=true
|
||||||
|
|||||||
@@ -207,17 +207,16 @@ services:
|
|||||||
# context: ../../docker-compose
|
# context: ../../docker-compose
|
||||||
# dockerfile: Dockerfile-logstash
|
# dockerfile: Dockerfile-logstash
|
||||||
postgres:
|
postgres:
|
||||||
image: postgres:12
|
image: quay.io/sclorg/postgresql-15-c9s
|
||||||
container_name: tools_postgres_1
|
container_name: tools_postgres_1
|
||||||
# additional logging settings for postgres can be found https://www.postgresql.org/docs/current/runtime-config-logging.html
|
# additional logging settings for postgres can be found https://www.postgresql.org/docs/current/runtime-config-logging.html
|
||||||
command: postgres -c log_destination=stderr -c log_min_messages=info -c log_min_duration_statement={{ pg_log_min_duration_statement|default(1000) }} -c max_connections={{ pg_max_connections|default(1024) }}
|
command: run-postgresql -c log_destination=stderr -c log_min_messages=info -c log_min_duration_statement={{ pg_log_min_duration_statement|default(1000) }} -c max_connections={{ pg_max_connections|default(1024) }}
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_HOST_AUTH_METHOD: trust
|
POSTGRESQL_USER: {{ pg_username }}
|
||||||
POSTGRES_USER: {{ pg_username }}
|
POSTGRESQL_DATABASE: {{ pg_database }}
|
||||||
POSTGRES_DB: {{ pg_database }}
|
POSTGRESQL_PASSWORD: {{ pg_password }}
|
||||||
POSTGRES_PASSWORD: {{ pg_password }}
|
|
||||||
volumes:
|
volumes:
|
||||||
- "awx_db:/var/lib/postgresql/data"
|
- "awx_db_15:/var/lib/pgsql/data"
|
||||||
networks:
|
networks:
|
||||||
- awx
|
- awx
|
||||||
ports:
|
ports:
|
||||||
@@ -305,8 +304,9 @@ services:
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
awx_db:
|
{# For the postgres 15 db upgrade we changed the mount name because 15 can't load a 12 DB #}
|
||||||
name: tools_awx_db
|
awx_db_15:
|
||||||
|
name: tools_awx_db_15
|
||||||
{% for i in range(control_plane_node_count|int) -%}
|
{% for i in range(control_plane_node_count|int) -%}
|
||||||
{% set container_postfix = loop.index %}
|
{% set container_postfix = loop.index %}
|
||||||
redis_socket_{{ container_postfix }}:
|
redis_socket_{{ container_postfix }}:
|
||||||
|
|||||||
@@ -29,6 +29,10 @@ http {
|
|||||||
server localhost:8050;
|
server localhost:8050;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
upstream runserver {
|
||||||
|
server localhost:8052;
|
||||||
|
}
|
||||||
|
|
||||||
upstream daphne {
|
upstream daphne {
|
||||||
server localhost:8051;
|
server localhost:8051;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,4 +38,13 @@ location {{ ingress_path }} {
|
|||||||
uwsgi_read_timeout 120s;
|
uwsgi_read_timeout 120s;
|
||||||
uwsgi_pass uwsgi;
|
uwsgi_pass uwsgi;
|
||||||
include /etc/nginx/uwsgi_params;
|
include /etc/nginx/uwsgi_params;
|
||||||
|
error_page 502 = @fallback;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Enable scenario where we shutdown uwsgi and launching runserver for debugging purposes
|
||||||
|
location @fallback {
|
||||||
|
# Add trailing / if missing
|
||||||
|
rewrite ^(.*)$http_host(.*[^/])$ $1$http_host$2/ permanent;
|
||||||
|
proxy_pass http://runserver;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
#!/bin/env bash
|
#!/bin/env bash
|
||||||
|
|
||||||
if [ $# -lt 2 ]; then
|
if [ $# -lt 1 ]; then
|
||||||
echo "Usage:"
|
echo "Usage:"
|
||||||
echo " autoreload directory command"
|
echo " autoreload directory"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -13,8 +13,14 @@ inotifywait -mrq -e create,delete,attrib,close_write,move --exclude '(/awx_devel
|
|||||||
since_last=$((this_reload-last_reload))
|
since_last=$((this_reload-last_reload))
|
||||||
if [[ "$file" =~ ^[^.].*\.py$ ]] && [[ "$since_last" -gt 1 ]]; then
|
if [[ "$file" =~ ^[^.].*\.py$ ]] && [[ "$since_last" -gt 1 ]]; then
|
||||||
echo "File changed: $file"
|
echo "File changed: $file"
|
||||||
echo "Running command: $2"
|
if [ -n "$SUPERVISOR_CONFIG_PATH" ]; then
|
||||||
eval $2
|
supervisorctl_command="supervisorctl -c $SUPERVISOR_CONFIG_PATH"
|
||||||
|
else
|
||||||
|
supervisorctl_command="supervisorctl"
|
||||||
|
fi
|
||||||
|
tower_processes=`$supervisorctl_command status tower-processes:* | grep -v STOPPED | awk '{print $1}' | tr '\n' ' '`
|
||||||
|
echo echo "Running command: $supervisorctl_command restart $tower_processes"
|
||||||
|
eval $supervisorctl_command restart $tower_processes
|
||||||
last_reload=`date +%s`
|
last_reload=`date +%s`
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|||||||