mirror of
https://github.com/ansible/awx.git
synced 2026-02-07 04:28:23 -03:30
Compare commits
47 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
445d892050 | ||
|
|
35a576f2dd | ||
|
|
7838641215 | ||
|
|
ab5cc2e69c | ||
|
|
5a63533967 | ||
|
|
b549ae1efa | ||
|
|
bd0089fd35 | ||
|
|
40d18e95c2 | ||
|
|
191a0f7f2a | ||
|
|
852bb0717c | ||
|
|
98bfe3f43f | ||
|
|
53a7b7818e | ||
|
|
e7c7454a3a | ||
|
|
63e82aa4a3 | ||
|
|
fc1b74aa68 | ||
|
|
ea455df9f4 | ||
|
|
8e2a5ed8ae | ||
|
|
1d7e54bd39 | ||
|
|
83df056f71 | ||
|
|
48edb15a03 | ||
|
|
8ddc19a927 | ||
|
|
b021ad7b28 | ||
|
|
b8ba2feecd | ||
|
|
8cfb704f86 | ||
|
|
efcac860de | ||
|
|
6c5590e0e6 | ||
|
|
0edcd688a2 | ||
|
|
b8c48f7d50 | ||
|
|
07e30a3d5f | ||
|
|
cb5a8aa194 | ||
|
|
8b49f910c7 | ||
|
|
a4f808df34 | ||
|
|
82abd18927 | ||
|
|
5e9d514e5e | ||
|
|
4a34ee1f1e | ||
|
|
3624fe2cac | ||
|
|
0f96d9aca2 | ||
|
|
989b80e771 | ||
|
|
cc64be937d | ||
|
|
94183d602c | ||
|
|
ac4ef141bf | ||
|
|
86f6b54eec | ||
|
|
bd8108b27c | ||
|
|
aed96fb365 | ||
|
|
fe2da52eec | ||
|
|
974465e46a | ||
|
|
c736986023 |
4
.github/pr_labeler.yml
vendored
4
.github/pr_labeler.yml
vendored
@@ -15,5 +15,5 @@
|
||||
|
||||
"dependencies":
|
||||
- any: ["awx/ui/package.json"]
|
||||
- any: ["awx/requirements/*.txt"]
|
||||
- any: ["awx/requirements/requirements.in"]
|
||||
- any: ["requirements/*.txt"]
|
||||
- any: ["requirements/requirements.in"]
|
||||
|
||||
7
.github/workflows/devel_images.yml
vendored
7
.github/workflows/devel_images.yml
vendored
@@ -48,8 +48,11 @@ jobs:
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
|
||||
|
||||
- name: Push image
|
||||
- name: Push development images
|
||||
run: |
|
||||
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
|
||||
|
||||
- name: Push AWX k8s image, only for upstream and feature branches
|
||||
run: docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
|
||||
if: endsWith(github.repository, '/awx')
|
||||
|
||||
2
.github/workflows/label_issue.yml
vendored
2
.github/workflows/label_issue.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Label Issue
|
||||
uses: github/issue-labeler@v2.4.1
|
||||
uses: github/issue-labeler@v3.1
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
not-before: 2021-12-07T07:00:00Z
|
||||
|
||||
12
Makefile
12
Makefile
@@ -1,6 +1,7 @@
|
||||
-include awx/ui_next/Makefile
|
||||
|
||||
PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
|
||||
SHELL := bash
|
||||
DOCKER_COMPOSE ?= docker-compose
|
||||
OFFICIAL ?= no
|
||||
NODE ?= node
|
||||
@@ -27,6 +28,8 @@ COLLECTION_TEMPLATE_VERSION ?= false
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
MAIN_NODE_TYPE ?= hybrid
|
||||
# If set to true docker-compose will also start a pgbouncer instance and use it
|
||||
PGBOUNCER ?= false
|
||||
# If set to true docker-compose will also start a keycloak instance
|
||||
KEYCLOAK ?= false
|
||||
# If set to true docker-compose will also start an ldap instance
|
||||
@@ -37,6 +40,8 @@ SPLUNK ?= false
|
||||
PROMETHEUS ?= false
|
||||
# If set to true docker-compose will also start a grafana instance
|
||||
GRAFANA ?= false
|
||||
# If set to true docker-compose will also start a hashicorp vault instance
|
||||
VAULT ?= false
|
||||
# If set to true docker-compose will also start a tacacs+ instance
|
||||
TACACS ?= false
|
||||
|
||||
@@ -520,15 +525,20 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e control_plane_node_count=$(CONTROL_PLANE_NODE_COUNT) \
|
||||
-e execution_node_count=$(EXECUTION_NODE_COUNT) \
|
||||
-e minikube_container_group=$(MINIKUBE_CONTAINER_GROUP) \
|
||||
-e enable_pgbouncer=$(PGBOUNCER) \
|
||||
-e enable_keycloak=$(KEYCLOAK) \
|
||||
-e enable_ldap=$(LDAP) \
|
||||
-e enable_splunk=$(SPLUNK) \
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA) \
|
||||
-e enable_vault=$(VAULT) \
|
||||
-e enable_tacacs=$(TACACS) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
|
||||
-e enable_vault=$(VAULT);
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||
|
||||
docker-compose-credential-plugins: awx/projects docker-compose-sources
|
||||
@@ -580,7 +590,7 @@ docker-clean:
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
docker volume rm -f tools_awx_db tools_vault_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
|
||||
@@ -232,7 +232,8 @@ class APIView(views.APIView):
|
||||
|
||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||
time_started = getattr(self, 'time_started', None)
|
||||
response['X-API-Product-Version'] = get_awx_version()
|
||||
if request.user.is_authenticated:
|
||||
response['X-API-Product-Version'] = get_awx_version()
|
||||
response['X-API-Product-Name'] = server_product_name()
|
||||
|
||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||
|
||||
@@ -1629,8 +1629,8 @@ class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
|
||||
fields = ('*', 'host_status_counts', 'playbook_counts')
|
||||
|
||||
def get_playbook_counts(self, obj):
|
||||
task_count = obj.project_update_events.filter(event='playbook_on_task_start').count()
|
||||
play_count = obj.project_update_events.filter(event='playbook_on_play_start').count()
|
||||
task_count = obj.get_event_queryset().filter(event='playbook_on_task_start').count()
|
||||
play_count = obj.get_event_queryset().filter(event='playbook_on_play_start').count()
|
||||
|
||||
data = {'play_count': play_count, 'task_count': task_count}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ receptor_work_commands:
|
||||
custom_worksign_public_keyfile: receptor/work_public_key.pem
|
||||
custom_tls_certfile: receptor/tls/receptor.crt
|
||||
custom_tls_keyfile: receptor/tls/receptor.key
|
||||
custom_ca_certfile: receptor/tls/ca/receptor-ca.crt
|
||||
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt
|
||||
receptor_protocol: 'tcp'
|
||||
receptor_listener: true
|
||||
receptor_port: {{ instance.listener_port }}
|
||||
|
||||
@@ -30,7 +30,7 @@ from awx.api.views import (
|
||||
OAuth2TokenList,
|
||||
ApplicationOAuth2TokenList,
|
||||
OAuth2ApplicationDetail,
|
||||
# HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
|
||||
HostMetricSummaryMonthlyList,
|
||||
)
|
||||
|
||||
from awx.api.views.bulk import (
|
||||
@@ -123,8 +123,7 @@ v2_urls = [
|
||||
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
|
||||
re_path(r'^hosts/', include(host_urls)),
|
||||
re_path(r'^host_metrics/', include(host_metric_urls)),
|
||||
# It will be enabled in future version of the AWX
|
||||
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
||||
re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
||||
re_path(r'^groups/', include(group_urls)),
|
||||
re_path(r'^inventory_sources/', include(inventory_source_urls)),
|
||||
re_path(r'^inventory_updates/', include(inventory_update_urls)),
|
||||
|
||||
@@ -1564,16 +1564,15 @@ class HostMetricDetail(RetrieveDestroyAPIView):
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
# It will be enabled in future version of the AWX
|
||||
# class HostMetricSummaryMonthlyList(ListAPIView):
|
||||
# name = _("Host Metrics Summary Monthly")
|
||||
# model = models.HostMetricSummaryMonthly
|
||||
# serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
||||
# permission_classes = (IsSystemAdminOrAuditor,)
|
||||
# search_fields = ('date',)
|
||||
#
|
||||
# def get_queryset(self):
|
||||
# return self.model.objects.all()
|
||||
class HostMetricSummaryMonthlyList(ListAPIView):
|
||||
name = _("Host Metrics Summary Monthly")
|
||||
model = models.HostMetricSummaryMonthly
|
||||
serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
search_fields = ('date',)
|
||||
|
||||
def get_queryset(self):
|
||||
return self.model.objects.all()
|
||||
|
||||
|
||||
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
from rest_framework.renderers import JSONRenderer
|
||||
from rest_framework.reverse import reverse
|
||||
@@ -18,6 +20,9 @@ from awx.api import (
|
||||
|
||||
|
||||
class BulkView(APIView):
|
||||
name = _('Bulk')
|
||||
swagger_topic = 'Bulk'
|
||||
|
||||
permission_classes = [IsAuthenticated]
|
||||
renderer_classes = [
|
||||
renderers.BrowsableAPIRenderer,
|
||||
|
||||
@@ -107,8 +107,7 @@ class ApiVersionRootView(APIView):
|
||||
data['groups'] = reverse('api:group_list', request=request)
|
||||
data['hosts'] = reverse('api:host_list', request=request)
|
||||
data['host_metrics'] = reverse('api:host_metric_list', request=request)
|
||||
# It will be enabled in future version of the AWX
|
||||
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
||||
data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||
data['jobs'] = reverse('api:job_list', request=request)
|
||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||
|
||||
@@ -14,7 +14,7 @@ class ConfConfig(AppConfig):
|
||||
def ready(self):
|
||||
self.module.autodiscover()
|
||||
|
||||
if not set(sys.argv) & {'migrate', 'check_migrations'}:
|
||||
if not set(sys.argv) & {'migrate', 'check_migrations', 'showmigrations'}:
|
||||
from .settings import SettingsWrapper
|
||||
|
||||
SettingsWrapper.initialize()
|
||||
|
||||
87
awx/main/cache.py
Normal file
87
awx/main/cache.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import functools
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache.backends.base import DEFAULT_TIMEOUT
|
||||
from django.core.cache.backends.redis import RedisCache
|
||||
|
||||
from redis.exceptions import ConnectionError, ResponseError, TimeoutError
|
||||
import socket
|
||||
|
||||
# This list comes from what django-redis ignores and the behavior we are trying
|
||||
# to retain while dropping the dependency on django-redis.
|
||||
IGNORED_EXCEPTIONS = (TimeoutError, ResponseError, ConnectionError, socket.timeout)
|
||||
|
||||
CONNECTION_INTERRUPTED_SENTINEL = object()
|
||||
|
||||
|
||||
def optionally_ignore_exceptions(func=None, return_value=None):
|
||||
if func is None:
|
||||
return functools.partial(optionally_ignore_exceptions, return_value=return_value)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except IGNORED_EXCEPTIONS as e:
|
||||
if settings.DJANGO_REDIS_IGNORE_EXCEPTIONS:
|
||||
return return_value
|
||||
raise e.__cause__ or e
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class AWXRedisCache(RedisCache):
|
||||
"""
|
||||
We just want to wrap the upstream RedisCache class so that we can ignore
|
||||
the exceptions that it raises when the cache is unavailable.
|
||||
"""
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().add(key, value, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions(return_value=CONNECTION_INTERRUPTED_SENTINEL)
|
||||
def _get(self, key, default=None, version=None):
|
||||
return super().get(key, default, version)
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
value = self._get(key, default, version)
|
||||
if value is CONNECTION_INTERRUPTED_SENTINEL:
|
||||
return default
|
||||
return value
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().set(key, value, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().touch(key, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def delete(self, key, version=None):
|
||||
return super().delete(key, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def get_many(self, keys, version=None):
|
||||
return super().get_many(keys, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def has_key(self, key, version=None):
|
||||
return super().has_key(key, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def incr(self, key, delta=1, version=None):
|
||||
return super().incr(key, delta, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().set_many(data, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def delete_many(self, keys, version=None):
|
||||
return super().delete_many(keys, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def clear(self):
|
||||
return super().clear()
|
||||
@@ -94,6 +94,20 @@ register(
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'CSRF_TRUSTED_ORIGINS',
|
||||
default=[],
|
||||
field_class=fields.StringListField,
|
||||
label=_('CSRF Trusted Origins List'),
|
||||
help_text=_(
|
||||
"If the service is behind a reverse proxy/load balancer, use this setting "
|
||||
"to configure the schema://addresses from which the service should trust "
|
||||
"Origin header values. "
|
||||
),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'LICENSE',
|
||||
field_class=fields.DictField,
|
||||
@@ -848,6 +862,15 @@ register(
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'HOST_METRIC_SUMMARY_TASK_LAST_TS',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last computing date of HostMetricSummaryMonthly'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_CLEANUP_PATHS',
|
||||
field_class=fields.BooleanField,
|
||||
|
||||
@@ -265,6 +265,8 @@ def kv_backend(**kwargs):
|
||||
|
||||
if secret_key:
|
||||
try:
|
||||
if (secret_key != 'data') and (secret_key not in json['data']) and ('data' in json['data']):
|
||||
return json['data']['data'][secret_key]
|
||||
return json['data'][secret_key]
|
||||
except KeyError:
|
||||
raise RuntimeError('{} is not present at {}'.format(secret_key, secret_path))
|
||||
|
||||
@@ -55,6 +55,23 @@ class PubSub(object):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
||||
|
||||
@staticmethod
|
||||
def current_notifies(conn):
|
||||
"""
|
||||
Altered version of .notifies method from psycopg library
|
||||
This removes the outer while True loop so that we only process
|
||||
queued notifications
|
||||
"""
|
||||
with conn.lock:
|
||||
try:
|
||||
ns = conn.wait(psycopg.generators.notifies(conn.pgconn))
|
||||
except psycopg.errors._NO_TRACEBACK as ex:
|
||||
raise ex.with_traceback(None)
|
||||
enc = psycopg._encodings.pgconn_encoding(conn.pgconn)
|
||||
for pgn in ns:
|
||||
n = psycopg.connection.Notify(pgn.relname.decode(enc), pgn.extra.decode(enc), pgn.be_pid)
|
||||
yield n
|
||||
|
||||
def events(self, select_timeout=5, yield_timeouts=False):
|
||||
if not self.conn.autocommit:
|
||||
raise RuntimeError('Listening for events can only be done in autocommit mode')
|
||||
@@ -64,7 +81,7 @@ class PubSub(object):
|
||||
if yield_timeouts:
|
||||
yield None
|
||||
else:
|
||||
notification_generator = self.conn.notifies()
|
||||
notification_generator = self.current_notifies(self.conn)
|
||||
for notification in notification_generator:
|
||||
yield notification
|
||||
|
||||
|
||||
@@ -417,16 +417,16 @@ class AutoscalePool(WorkerPool):
|
||||
# the task manager to never do more work
|
||||
current_task = w.current_task
|
||||
if current_task and isinstance(current_task, dict):
|
||||
endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager']
|
||||
endings = ('tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager')
|
||||
current_task_name = current_task.get('task', '')
|
||||
if any(current_task_name.endswith(e) for e in endings):
|
||||
if current_task_name.endswith(endings):
|
||||
if 'started' not in current_task:
|
||||
w.managed_tasks[current_task['uuid']]['started'] = time.time()
|
||||
age = time.time() - current_task['started']
|
||||
w.managed_tasks[current_task['uuid']]['age'] = age
|
||||
if age > self.task_manager_timeout:
|
||||
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}')
|
||||
os.kill(w.pid, signal.SIGTERM)
|
||||
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGUSR1 to {w.pid}')
|
||||
os.kill(w.pid, signal.SIGUSR1)
|
||||
|
||||
for m in orphaned:
|
||||
# if all the workers are dead, spawn at least one
|
||||
|
||||
@@ -121,10 +121,9 @@ class AWXConsumerBase(object):
|
||||
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
||||
try:
|
||||
self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
|
||||
self.last_stats = time.time()
|
||||
except Exception:
|
||||
logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
|
||||
self.last_stats = time.time()
|
||||
self.last_stats = time.time()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
signal.signal(signal.SIGINT, self.stop)
|
||||
@@ -175,9 +174,12 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
|
||||
# record subsystem metrics for the dispatcher
|
||||
if current_time - self.last_metrics_gather > 20:
|
||||
self.pool.produce_subsystem_metrics(self.subsystem_metrics)
|
||||
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
try:
|
||||
self.pool.produce_subsystem_metrics(self.subsystem_metrics)
|
||||
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
except Exception:
|
||||
logger.exception(f"encountered an error trying to store {self.name} metrics")
|
||||
self.listen_cumulative_time = 0.0
|
||||
self.last_metrics_gather = current_time
|
||||
|
||||
@@ -250,8 +252,8 @@ class BaseWorker(object):
|
||||
break
|
||||
except QueueEmpty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error("Exception on worker {}, restarting: ".format(idx) + str(e))
|
||||
except Exception:
|
||||
logger.exception("Exception on worker {}, reconnecting: ".format(idx))
|
||||
continue
|
||||
try:
|
||||
for conn in db.connections.all():
|
||||
|
||||
@@ -17,6 +17,6 @@ class Command(BaseCommand):
|
||||
months_ago = options.get('months-ago') or None
|
||||
|
||||
if not months_ago:
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.tasks.host_metrics import HostMetricSummaryMonthlyTask
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Computing of HostMetricSummaryMonthly'
|
||||
|
||||
def handle(self, *args, **options):
|
||||
HostMetricSummaryMonthlyTask().execute()
|
||||
@@ -9,13 +9,11 @@ from django.db import migrations, models
|
||||
import django.utils.timezone
|
||||
import django.db.models.deletion
|
||||
from django.conf import settings
|
||||
import taggit.managers
|
||||
import awx.main.fields
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('taggit', '0002_auto_20150616_2121'),
|
||||
('contenttypes', '0002_remove_content_type_name'),
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
]
|
||||
@@ -184,12 +182,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
'ordering': ('kind', 'name'),
|
||||
@@ -529,12 +521,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
('users', models.ManyToManyField(related_name='organizations', to=settings.AUTH_USER_MODEL, blank=True)),
|
||||
],
|
||||
options={
|
||||
@@ -589,12 +575,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
migrations.CreateModel(
|
||||
@@ -644,12 +624,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
'ordering': ['-next_run'],
|
||||
@@ -687,12 +661,6 @@ class Migration(migrations.Migration):
|
||||
),
|
||||
),
|
||||
('organization', models.ForeignKey(related_name='teams', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True)),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
('users', models.ManyToManyField(related_name='teams', to=settings.AUTH_USER_MODEL, blank=True)),
|
||||
],
|
||||
options={
|
||||
@@ -1267,13 +1235,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='created_by',
|
||||
@@ -1319,13 +1280,6 @@ class Migration(migrations.Migration):
|
||||
name='schedule',
|
||||
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to='main.Schedule', null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='unified_job_template',
|
||||
@@ -1370,13 +1324,6 @@ class Migration(migrations.Migration):
|
||||
help_text='Organization containing this inventory.',
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventory',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='host',
|
||||
name='inventory',
|
||||
@@ -1407,13 +1354,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='host',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='group',
|
||||
name='hosts',
|
||||
@@ -1441,13 +1381,6 @@ class Migration(migrations.Migration):
|
||||
name='parents',
|
||||
field=models.ManyToManyField(related_name='children', to='main.Group', blank=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='group',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='custominventoryscript',
|
||||
name='organization',
|
||||
@@ -1459,13 +1392,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='custominventoryscript',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='credential',
|
||||
name='team',
|
||||
|
||||
@@ -12,8 +12,6 @@ import django.db.models.deletion
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now
|
||||
|
||||
import taggit.managers
|
||||
|
||||
|
||||
def create_system_job_templates(apps, schema_editor):
|
||||
"""
|
||||
@@ -125,7 +123,6 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
('taggit', '0002_auto_20150616_2121'),
|
||||
('contenttypes', '0002_remove_content_type_name'),
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
('main', '0001_initial'),
|
||||
@@ -256,12 +253,6 @@ class Migration(migrations.Migration):
|
||||
'organization',
|
||||
models.ForeignKey(related_name='notification_templates', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
migrations.AddField(
|
||||
@@ -721,12 +712,6 @@ class Migration(migrations.Migration):
|
||||
help_text='Organization this label belongs to.',
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
'ordering': ('organization', 'name'),
|
||||
|
||||
@@ -5,7 +5,6 @@ from __future__ import unicode_literals
|
||||
# Django
|
||||
from django.db import connection, migrations, models, OperationalError, ProgrammingError
|
||||
from django.conf import settings
|
||||
import taggit.managers
|
||||
|
||||
# AWX
|
||||
import awx.main.fields
|
||||
@@ -317,10 +316,6 @@ class Migration(migrations.Migration):
|
||||
model_name='permission',
|
||||
name='project',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='permission',
|
||||
name='tags',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='permission',
|
||||
name='team',
|
||||
@@ -510,12 +505,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
'ordering': ('kind', 'name'),
|
||||
|
||||
@@ -4,7 +4,6 @@ from __future__ import unicode_literals
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
import taggit.managers
|
||||
|
||||
# AWX
|
||||
import awx.main.fields
|
||||
@@ -20,7 +19,6 @@ def setup_tower_managed_defaults(apps, schema_editor):
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
('taggit', '0002_auto_20150616_2121'),
|
||||
('main', '0066_v350_inventorysource_custom_virtualenv'),
|
||||
]
|
||||
|
||||
@@ -60,12 +58,6 @@ class Migration(migrations.Migration):
|
||||
'source_credential',
|
||||
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='target_input_sources', to='main.Credential'),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
(
|
||||
'target_credential',
|
||||
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='input_sources', to='main.Credential'),
|
||||
|
||||
@@ -4,12 +4,10 @@ from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
import django.db.models.expressions
|
||||
import taggit.managers
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('taggit', '0003_taggeditem_add_unique_index'),
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
('main', '0123_drop_hg_support'),
|
||||
]
|
||||
@@ -69,12 +67,6 @@ class Migration(migrations.Migration):
|
||||
to='main.Organization',
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
'ordering': (django.db.models.expressions.OrderBy(django.db.models.expressions.F('organization_id'), nulls_first=True), 'image'),
|
||||
|
||||
27
awx/main/migrations/0186_drop_django_taggit.py
Normal file
27
awx/main/migrations/0186_drop_django_taggit.py
Normal file
@@ -0,0 +1,27 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def delete_taggit_contenttypes(apps, schema_editor):
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
ContentType.objects.filter(app_label='taggit').delete()
|
||||
|
||||
|
||||
def delete_taggit_migration_records(apps, schema_editor):
|
||||
recorder = migrations.recorder.MigrationRecorder(connection=schema_editor.connection)
|
||||
recorder.migration_qs.filter(app='taggit').delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0185_move_JSONBlob_to_JSONField'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunSQL("DROP TABLE IF EXISTS taggit_tag CASCADE;"),
|
||||
migrations.RunSQL("DROP TABLE IF EXISTS taggit_taggeditem CASCADE;"),
|
||||
migrations.RunPython(delete_taggit_contenttypes),
|
||||
migrations.RunPython(delete_taggit_migration_records),
|
||||
]
|
||||
@@ -7,9 +7,6 @@ from django.core.exceptions import ValidationError, ObjectDoesNotExist
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.timezone import now
|
||||
|
||||
# Django-Taggit
|
||||
from taggit.managers import TaggableManager
|
||||
|
||||
# Django-CRUM
|
||||
from crum import get_current_user
|
||||
|
||||
@@ -301,8 +298,6 @@ class PrimordialModel(HasEditsMixin, CreatedModifiedModel):
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
|
||||
tags = TaggableManager(blank=True)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
r = super(PrimordialModel, self).__init__(*args, **kwargs)
|
||||
if self.pk:
|
||||
|
||||
@@ -899,18 +899,18 @@ class HostMetric(models.Model):
|
||||
|
||||
last_automation_before = now() - dateutil.relativedelta.relativedelta(months=months_ago)
|
||||
|
||||
logger.info(f'Cleanup [HostMetric]: soft-deleting records last automated before {last_automation_before}')
|
||||
logger.info(f'cleanup_host_metrics: soft-deleting records last automated before {last_automation_before}')
|
||||
HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||
deleted=True, deleted_counter=models.F('deleted_counter') + 1, last_deleted=now()
|
||||
)
|
||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||
except (TypeError, ValueError):
|
||||
logger.error(f"Cleanup [HostMetric]: months_ago({months_ago}) has to be a positive integer value")
|
||||
logger.error(f"cleanup_host_metrics: months_ago({months_ago}) has to be a positive integer value")
|
||||
|
||||
|
||||
class HostMetricSummaryMonthly(models.Model):
|
||||
"""
|
||||
HostMetric summaries computed by scheduled task <TODO> monthly
|
||||
HostMetric summaries computed by scheduled task 'awx.main.tasks.system.host_metric_summary_monthly' monthly
|
||||
"""
|
||||
|
||||
date = models.DateField(unique=True)
|
||||
|
||||
@@ -661,7 +661,11 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
|
||||
@property
|
||||
def event_processing_finished(self):
|
||||
return True
|
||||
return True # workflow jobs do not have events
|
||||
|
||||
@property
|
||||
def has_unpartitioned_events(self):
|
||||
return False # workflow jobs do not have events
|
||||
|
||||
def _get_parent_field_name(self):
|
||||
if self.job_template_id:
|
||||
@@ -914,7 +918,11 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
@property
|
||||
def event_processing_finished(self):
|
||||
return True
|
||||
return True # approval jobs do not have events
|
||||
|
||||
@property
|
||||
def has_unpartitioned_events(self):
|
||||
return False # approval jobs do not have events
|
||||
|
||||
def send_approval_notification(self, approval_status):
|
||||
from awx.main.tasks.system import send_notifications # avoid circular import
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
|
||||
from django.db.models.signals import pre_save, post_save, pre_delete, m2m_changed
|
||||
|
||||
from taggit.managers import TaggableManager
|
||||
|
||||
|
||||
class ActivityStreamRegistrar(object):
|
||||
def __init__(self):
|
||||
@@ -21,8 +19,6 @@ class ActivityStreamRegistrar(object):
|
||||
pre_delete.connect(activity_stream_delete, sender=model, dispatch_uid=str(self.__class__) + str(model) + "_delete")
|
||||
|
||||
for m2mfield in model._meta.many_to_many:
|
||||
if isinstance(m2mfield, TaggableManager):
|
||||
continue # Special case for taggit app
|
||||
try:
|
||||
m2m_attr = getattr(model, m2mfield.name)
|
||||
m2m_changed.connect(
|
||||
|
||||
@@ -25,7 +25,6 @@ from awx.main.models import (
|
||||
InventoryUpdate,
|
||||
Job,
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
UnifiedJob,
|
||||
WorkflowApproval,
|
||||
WorkflowJob,
|
||||
@@ -102,27 +101,33 @@ class TaskBase:
|
||||
|
||||
def record_aggregate_metrics(self, *args):
|
||||
if not is_testing():
|
||||
# increment task_manager_schedule_calls regardless if the other
|
||||
# metrics are recorded
|
||||
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
|
||||
# Only record metrics if the last time recording was more
|
||||
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
||||
# Prevents a short-duration task manager that runs directly after a
|
||||
# long task manager to override useful metrics.
|
||||
current_time = time.time()
|
||||
time_last_recorded = current_time - self.subsystem_metrics.decode(f"{self.prefix}_recorded_timestamp")
|
||||
if time_last_recorded > settings.SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL:
|
||||
logger.debug(f"recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||
self.subsystem_metrics.set(f"{self.prefix}_recorded_timestamp", current_time)
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
else:
|
||||
logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||
try:
|
||||
# increment task_manager_schedule_calls regardless if the other
|
||||
# metrics are recorded
|
||||
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
|
||||
# Only record metrics if the last time recording was more
|
||||
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
||||
# Prevents a short-duration task manager that runs directly after a
|
||||
# long task manager to override useful metrics.
|
||||
current_time = time.time()
|
||||
time_last_recorded = current_time - self.subsystem_metrics.decode(f"{self.prefix}_recorded_timestamp")
|
||||
if time_last_recorded > settings.SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL:
|
||||
logger.debug(f"recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||
self.subsystem_metrics.set(f"{self.prefix}_recorded_timestamp", current_time)
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
else:
|
||||
logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||
except Exception:
|
||||
logger.exception(f"Error saving metrics for {self.prefix}")
|
||||
|
||||
def record_aggregate_metrics_and_exit(self, *args):
|
||||
self.record_aggregate_metrics()
|
||||
sys.exit(1)
|
||||
|
||||
def schedule(self):
|
||||
# Always be able to restore the original signal handler if we finish
|
||||
original_sigusr1 = signal.getsignal(signal.SIGUSR1)
|
||||
|
||||
# Lock
|
||||
with task_manager_bulk_reschedule():
|
||||
with advisory_lock(f"{self.prefix}_lock", wait=False) as acquired:
|
||||
@@ -131,9 +136,14 @@ class TaskBase:
|
||||
logger.debug(f"Not running {self.prefix} scheduler, another task holds lock")
|
||||
return
|
||||
logger.debug(f"Starting {self.prefix} Scheduler")
|
||||
# if sigterm due to timeout, still record metrics
|
||||
signal.signal(signal.SIGTERM, self.record_aggregate_metrics_and_exit)
|
||||
self._schedule()
|
||||
# if sigusr1 due to timeout, still record metrics
|
||||
signal.signal(signal.SIGUSR1, self.record_aggregate_metrics_and_exit)
|
||||
try:
|
||||
self._schedule()
|
||||
finally:
|
||||
# Reset the signal handler back to the default just in case anything
|
||||
# else uses the same signal for other purposes
|
||||
signal.signal(signal.SIGUSR1, original_sigusr1)
|
||||
commit_start = time.time()
|
||||
|
||||
if self.prefix == "task_manager":
|
||||
@@ -154,7 +164,6 @@ class WorkflowManager(TaskBase):
|
||||
logger.warning("Workflow manager has reached time out while processing running workflows, exiting loop early")
|
||||
ScheduleWorkflowManager().schedule()
|
||||
# Do not process any more workflow jobs. Stop here.
|
||||
# Maybe we should schedule another WorkflowManager run
|
||||
break
|
||||
dag = WorkflowDAG(workflow_job)
|
||||
status_changed = False
|
||||
@@ -169,8 +178,8 @@ class WorkflowManager(TaskBase):
|
||||
workflow_job.save(update_fields=['status', 'start_args'])
|
||||
status_changed = True
|
||||
else:
|
||||
workflow_nodes = dag.mark_dnr_nodes()
|
||||
WorkflowJobNode.objects.bulk_update(workflow_nodes, ['do_not_run'])
|
||||
dnr_nodes = dag.mark_dnr_nodes()
|
||||
WorkflowJobNode.objects.bulk_update(dnr_nodes, ['do_not_run'])
|
||||
# If workflow is now done, we do special things to mark it as done.
|
||||
is_done = dag.is_workflow_done()
|
||||
if is_done:
|
||||
@@ -250,6 +259,7 @@ class WorkflowManager(TaskBase):
|
||||
job.status = 'failed'
|
||||
job.save(update_fields=['status', 'job_explanation'])
|
||||
job.websocket_emit_status('failed')
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||
# emit_websocket_notification('/socket.io/jobs', '', dict(id=))
|
||||
@@ -270,184 +280,115 @@ class WorkflowManager(TaskBase):
|
||||
class DependencyManager(TaskBase):
|
||||
def __init__(self):
|
||||
super().__init__(prefix="dependency_manager")
|
||||
self.all_projects = {}
|
||||
self.all_inventory_sources = {}
|
||||
|
||||
def create_project_update(self, task, project_id=None):
|
||||
if project_id is None:
|
||||
project_id = task.project_id
|
||||
project_task = Project.objects.get(id=project_id).create_project_update(_eager_fields=dict(launch_type='dependency'))
|
||||
|
||||
# Project created 1 seconds behind
|
||||
project_task.created = task.created - timedelta(seconds=1)
|
||||
project_task.status = 'pending'
|
||||
project_task.save()
|
||||
logger.debug('Spawned {} as dependency of {}'.format(project_task.log_format, task.log_format))
|
||||
return project_task
|
||||
|
||||
def create_inventory_update(self, task, inventory_source_task):
|
||||
inventory_task = InventorySource.objects.get(id=inventory_source_task.id).create_inventory_update(_eager_fields=dict(launch_type='dependency'))
|
||||
|
||||
inventory_task.created = task.created - timedelta(seconds=2)
|
||||
inventory_task.status = 'pending'
|
||||
inventory_task.save()
|
||||
logger.debug('Spawned {} as dependency of {}'.format(inventory_task.log_format, task.log_format))
|
||||
|
||||
return inventory_task
|
||||
|
||||
def add_dependencies(self, task, dependencies):
|
||||
with disable_activity_stream():
|
||||
task.dependent_jobs.add(*dependencies)
|
||||
|
||||
def get_inventory_source_tasks(self):
|
||||
def cache_projects_and_sources(self, task_list):
|
||||
project_ids = set()
|
||||
inventory_ids = set()
|
||||
for task in self.all_tasks:
|
||||
for task in task_list:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
self.all_inventory_sources = [invsrc for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True)]
|
||||
if task.project_id:
|
||||
project_ids.add(task.project_id)
|
||||
if task.inventory_id:
|
||||
inventory_ids.add(task.inventory_id)
|
||||
elif isinstance(task, InventoryUpdate):
|
||||
if task.inventory_source and task.inventory_source.source_project_id:
|
||||
project_ids.add(task.inventory_source.source_project_id)
|
||||
|
||||
def get_latest_inventory_update(self, inventory_source):
|
||||
latest_inventory_update = InventoryUpdate.objects.filter(inventory_source=inventory_source).order_by("-created")
|
||||
if not latest_inventory_update.exists():
|
||||
return None
|
||||
return latest_inventory_update.first()
|
||||
for proj in Project.objects.filter(id__in=project_ids, scm_update_on_launch=True):
|
||||
self.all_projects[proj.id] = proj
|
||||
|
||||
def should_update_inventory_source(self, job, latest_inventory_update):
|
||||
now = tz_now()
|
||||
for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True):
|
||||
self.all_inventory_sources.setdefault(invsrc.inventory_id, [])
|
||||
self.all_inventory_sources[invsrc.inventory_id].append(invsrc)
|
||||
|
||||
if latest_inventory_update is None:
|
||||
@staticmethod
|
||||
def should_update_again(update, cache_timeout):
|
||||
'''
|
||||
If it has never updated, we need to update
|
||||
If there is already an update in progress then we do not need to a new create one
|
||||
If the last update failed, we always need to try and update again
|
||||
If current time is more than cache_timeout after last update, then we need a new one
|
||||
'''
|
||||
if (update is None) or (update.status in ['failed', 'canceled', 'error']):
|
||||
return True
|
||||
'''
|
||||
If there's already a inventory update utilizing this job that's about to run
|
||||
then we don't need to create one
|
||||
'''
|
||||
if latest_inventory_update.status in ['waiting', 'pending', 'running']:
|
||||
if update.status in ['waiting', 'pending', 'running']:
|
||||
return False
|
||||
|
||||
timeout_seconds = timedelta(seconds=latest_inventory_update.inventory_source.update_cache_timeout)
|
||||
if (latest_inventory_update.finished + timeout_seconds) < now:
|
||||
return True
|
||||
if latest_inventory_update.inventory_source.update_on_launch is True and latest_inventory_update.status in ['failed', 'canceled', 'error']:
|
||||
return True
|
||||
return False
|
||||
return bool(((update.finished + timedelta(seconds=cache_timeout))) < tz_now())
|
||||
|
||||
def get_latest_project_update(self, project_id):
|
||||
latest_project_update = ProjectUpdate.objects.filter(project=project_id, job_type='check').order_by("-created")
|
||||
if not latest_project_update.exists():
|
||||
return None
|
||||
return latest_project_update.first()
|
||||
|
||||
def should_update_related_project(self, job, latest_project_update):
|
||||
now = tz_now()
|
||||
|
||||
if latest_project_update is None:
|
||||
return True
|
||||
|
||||
if latest_project_update.status in ['failed', 'canceled']:
|
||||
return True
|
||||
|
||||
'''
|
||||
If there's already a project update utilizing this job that's about to run
|
||||
then we don't need to create one
|
||||
'''
|
||||
if latest_project_update.status in ['waiting', 'pending', 'running']:
|
||||
return False
|
||||
|
||||
'''
|
||||
If the latest project update has a created time == job_created_time-1
|
||||
then consider the project update found. This is so we don't enter an infinite loop
|
||||
of updating the project when cache timeout is 0.
|
||||
'''
|
||||
if (
|
||||
latest_project_update.project.scm_update_cache_timeout == 0
|
||||
and latest_project_update.launch_type == 'dependency'
|
||||
and latest_project_update.created == job.created - timedelta(seconds=1)
|
||||
):
|
||||
return False
|
||||
'''
|
||||
Normal Cache Timeout Logic
|
||||
'''
|
||||
timeout_seconds = timedelta(seconds=latest_project_update.project.scm_update_cache_timeout)
|
||||
if (latest_project_update.finished + timeout_seconds) < now:
|
||||
return True
|
||||
return False
|
||||
def get_or_create_project_update(self, project_id):
|
||||
project = self.all_projects.get(project_id, None)
|
||||
if project is not None:
|
||||
latest_project_update = project.project_updates.filter(job_type='check').order_by("-created").first()
|
||||
if self.should_update_again(latest_project_update, project.scm_update_cache_timeout):
|
||||
project_task = project.create_project_update(_eager_fields=dict(launch_type='dependency'))
|
||||
project_task.signal_start()
|
||||
return [project_task]
|
||||
else:
|
||||
return [latest_project_update]
|
||||
return []
|
||||
|
||||
def gen_dep_for_job(self, task):
|
||||
created_dependencies = []
|
||||
dependencies = []
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
||||
latest_project_update = self.get_latest_project_update(task.project_id)
|
||||
if self.should_update_related_project(task, latest_project_update):
|
||||
latest_project_update = self.create_project_update(task)
|
||||
created_dependencies.append(latest_project_update)
|
||||
dependencies.append(latest_project_update)
|
||||
dependencies = self.get_or_create_project_update(task.project_id)
|
||||
|
||||
# Inventory created 2 seconds behind job
|
||||
try:
|
||||
start_args = json.loads(decrypt_field(task, field_name="start_args"))
|
||||
except ValueError:
|
||||
start_args = dict()
|
||||
# generator for inventory sources related to this task
|
||||
task_inv_sources = (invsrc for invsrc in self.all_inventory_sources if invsrc.inventory_id == task.inventory_id)
|
||||
for inventory_source in task_inv_sources:
|
||||
# generator for update-on-launch inventory sources related to this task
|
||||
for inventory_source in self.all_inventory_sources.get(task.inventory_id, []):
|
||||
if "inventory_sources_already_updated" in start_args and inventory_source.id in start_args['inventory_sources_already_updated']:
|
||||
continue
|
||||
if not inventory_source.update_on_launch:
|
||||
continue
|
||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
created_dependencies.append(inventory_task)
|
||||
latest_inventory_update = inventory_source.inventory_updates.order_by("-created").first()
|
||||
if self.should_update_again(latest_inventory_update, inventory_source.update_cache_timeout):
|
||||
inventory_task = inventory_source.create_inventory_update(_eager_fields=dict(launch_type='dependency'))
|
||||
inventory_task.signal_start()
|
||||
dependencies.append(inventory_task)
|
||||
else:
|
||||
dependencies.append(latest_inventory_update)
|
||||
|
||||
if dependencies:
|
||||
self.add_dependencies(task, dependencies)
|
||||
|
||||
return created_dependencies
|
||||
return dependencies
|
||||
|
||||
def gen_dep_for_inventory_update(self, inventory_task):
|
||||
created_dependencies = []
|
||||
if inventory_task.source == "scm":
|
||||
invsrc = inventory_task.inventory_source
|
||||
if not invsrc.source_project.scm_update_on_launch:
|
||||
return created_dependencies
|
||||
|
||||
latest_src_project_update = self.get_latest_project_update(invsrc.source_project_id)
|
||||
if self.should_update_related_project(inventory_task, latest_src_project_update):
|
||||
latest_src_project_update = self.create_project_update(inventory_task, project_id=invsrc.source_project_id)
|
||||
created_dependencies.append(latest_src_project_update)
|
||||
self.add_dependencies(inventory_task, [latest_src_project_update])
|
||||
latest_src_project_update.scm_inventory_updates.add(inventory_task)
|
||||
return created_dependencies
|
||||
if invsrc:
|
||||
return self.get_or_create_project_update(invsrc.source_project_id)
|
||||
return []
|
||||
|
||||
@timeit
|
||||
def generate_dependencies(self, undeped_tasks):
|
||||
created_dependencies = []
|
||||
dependencies = []
|
||||
self.cache_projects_and_sources(undeped_tasks)
|
||||
for task in undeped_tasks:
|
||||
task.log_lifecycle("acknowledged")
|
||||
if type(task) is Job:
|
||||
created_dependencies += self.gen_dep_for_job(task)
|
||||
job_deps = self.gen_dep_for_job(task)
|
||||
elif type(task) is InventoryUpdate:
|
||||
created_dependencies += self.gen_dep_for_inventory_update(task)
|
||||
job_deps = self.gen_dep_for_inventory_update(task)
|
||||
else:
|
||||
continue
|
||||
if job_deps:
|
||||
dependencies += job_deps
|
||||
with disable_activity_stream():
|
||||
task.dependent_jobs.add(*dependencies)
|
||||
logger.debug(f'Linked {[dep.log_format for dep in dependencies]} as dependencies of {task.log_format}')
|
||||
|
||||
UnifiedJob.objects.filter(pk__in=[task.pk for task in undeped_tasks]).update(dependencies_processed=True)
|
||||
|
||||
return created_dependencies
|
||||
|
||||
def process_tasks(self):
|
||||
deps = self.generate_dependencies(self.all_tasks)
|
||||
self.generate_dependencies(deps)
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(self.all_tasks) + len(deps))
|
||||
return dependencies
|
||||
|
||||
@timeit
|
||||
def _schedule(self):
|
||||
self.get_tasks(dict(status__in=["pending"], dependencies_processed=False))
|
||||
|
||||
if len(self.all_tasks) > 0:
|
||||
self.get_inventory_source_tasks()
|
||||
self.process_tasks()
|
||||
deps = self.generate_dependencies(self.all_tasks)
|
||||
undeped_deps = [dep for dep in deps if dep.dependencies_processed is False]
|
||||
self.generate_dependencies(undeped_deps)
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(self.all_tasks) + len(undeped_deps))
|
||||
ScheduleTaskManager().schedule()
|
||||
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
from . import jobs, receptor, system # noqa
|
||||
from . import host_metrics, jobs, receptor, system # noqa
|
||||
|
||||
205
awx/main/tasks/host_metrics.py
Normal file
205
awx/main/tasks/host_metrics.py
Normal file
@@ -0,0 +1,205 @@
|
||||
import datetime
|
||||
from dateutil.relativedelta import relativedelta
|
||||
import logging
|
||||
|
||||
from django.conf import settings
|
||||
from django.db.models import Count
|
||||
from django.db.models.functions import TruncMonth
|
||||
from django.utils.timezone import now
|
||||
from rest_framework.fields import DateTimeField
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
from awx.conf.license import get_license
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.host_metric_summary_monthly')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def host_metric_summary_monthly():
|
||||
"""Run cleanup host metrics summary monthly task each week"""
|
||||
if _is_run_threshold_reached(
|
||||
getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400
|
||||
):
|
||||
logger.info(f"Executing host_metric_summary_monthly, last ran at {getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', '---')}")
|
||||
HostMetricSummaryMonthlyTask().execute()
|
||||
logger.info("Finished host_metric_summary_monthly")
|
||||
|
||||
|
||||
def _is_run_threshold_reached(setting, threshold_seconds):
|
||||
last_time = DateTimeField().to_internal_value(setting) if setting else DateTimeField().to_internal_value('1970-01-01')
|
||||
|
||||
return (now() - last_time).total_seconds() > threshold_seconds
|
||||
|
||||
|
||||
class HostMetricSummaryMonthlyTask:
|
||||
"""
|
||||
This task computes last [threshold] months of HostMetricSummaryMonthly table
|
||||
[threshold] is setting CLEANUP_HOST_METRICS_HARD_THRESHOLD
|
||||
Each record in the table represents changes in HostMetric table in one month
|
||||
It always overrides all the months newer than <threshold>, never updates older months
|
||||
Algorithm:
|
||||
- hosts_added are HostMetric records with first_automation in given month
|
||||
- hosts_deleted are HostMetric records with deleted=True and last_deleted in given month
|
||||
- - HostMetrics soft-deleted before <threshold> also increases hosts_deleted in their last_deleted month
|
||||
- license_consumed is license_consumed(previous month) + hosts_added - hosts_deleted
|
||||
- - license_consumed for HostMetricSummaryMonthly.date < [threshold] is computed also from
|
||||
all HostMetrics.first_automation < [threshold]
|
||||
- license_capacity is set only for current month, and it's never updated (value taken from current subscription)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.host_metrics = {}
|
||||
self.processed_month = self._get_first_month()
|
||||
self.existing_summaries = None
|
||||
self.existing_summaries_idx = 0
|
||||
self.existing_summaries_cnt = 0
|
||||
self.records_to_create = []
|
||||
self.records_to_update = []
|
||||
|
||||
def execute(self):
|
||||
self._load_existing_summaries()
|
||||
self._load_hosts_added()
|
||||
self._load_hosts_deleted()
|
||||
|
||||
# Get first month after last hard delete
|
||||
month = self._get_first_month()
|
||||
license_consumed = self._get_license_consumed_before(month)
|
||||
|
||||
# Fill record for each month
|
||||
while month <= datetime.date.today().replace(day=1):
|
||||
summary = self._find_or_create_summary(month)
|
||||
# Update summary and update license_consumed by hosts added/removed this month
|
||||
self._update_summary(summary, month, license_consumed)
|
||||
license_consumed = summary.license_consumed
|
||||
|
||||
month = month + relativedelta(months=1)
|
||||
|
||||
# Create/Update stats
|
||||
HostMetricSummaryMonthly.objects.bulk_create(self.records_to_create, batch_size=1000)
|
||||
HostMetricSummaryMonthly.objects.bulk_update(self.records_to_update, ['license_consumed', 'hosts_added', 'hosts_deleted'], batch_size=1000)
|
||||
|
||||
# Set timestamp of last run
|
||||
settings.HOST_METRIC_SUMMARY_TASK_LAST_TS = now()
|
||||
|
||||
def _get_license_consumed_before(self, month):
|
||||
license_consumed = 0
|
||||
for metric_month, metric in self.host_metrics.items():
|
||||
if metric_month < month:
|
||||
hosts_added = metric.get('hosts_added', 0)
|
||||
hosts_deleted = metric.get('hosts_deleted', 0)
|
||||
license_consumed = license_consumed + hosts_added - hosts_deleted
|
||||
else:
|
||||
break
|
||||
return license_consumed
|
||||
|
||||
def _load_existing_summaries(self):
|
||||
"""Find all summaries newer than host metrics delete threshold"""
|
||||
self.existing_summaries = HostMetricSummaryMonthly.objects.filter(date__gte=self._get_first_month()).order_by('date')
|
||||
self.existing_summaries_idx = 0
|
||||
self.existing_summaries_cnt = len(self.existing_summaries)
|
||||
|
||||
def _load_hosts_added(self):
|
||||
"""Aggregates hosts added each month, by the 'first_automation' timestamp"""
|
||||
#
|
||||
# -- SQL translation (for better code readability)
|
||||
# SELECT date_trunc('month', first_automation) as month,
|
||||
# count(first_automation) AS hosts_added
|
||||
# FROM main_hostmetric
|
||||
# GROUP BY month
|
||||
# ORDER by month;
|
||||
result = (
|
||||
HostMetric.objects.annotate(month=TruncMonth('first_automation'))
|
||||
.values('month')
|
||||
.annotate(hosts_added=Count('first_automation'))
|
||||
.values('month', 'hosts_added')
|
||||
.order_by('month')
|
||||
)
|
||||
|
||||
for host_metric in list(result):
|
||||
month = host_metric['month']
|
||||
if month:
|
||||
beginning_of_month = datetime.date(month.year, month.month, 1)
|
||||
if self.host_metrics.get(beginning_of_month) is None:
|
||||
self.host_metrics[beginning_of_month] = {}
|
||||
self.host_metrics[beginning_of_month]['hosts_added'] = host_metric['hosts_added']
|
||||
|
||||
def _load_hosts_deleted(self):
|
||||
"""
|
||||
Aggregates hosts deleted each month, by the 'last_deleted' timestamp.
|
||||
Host metrics have to be deleted NOW to be counted as deleted before
|
||||
(by intention - statistics can change retrospectively by re-automation of previously deleted host)
|
||||
"""
|
||||
#
|
||||
# -- SQL translation (for better code readability)
|
||||
# SELECT date_trunc('month', last_deleted) as month,
|
||||
# count(last_deleted) AS hosts_deleted
|
||||
# FROM main_hostmetric
|
||||
# WHERE deleted = True
|
||||
# GROUP BY 1 # equal to "GROUP BY month"
|
||||
# ORDER by month;
|
||||
result = (
|
||||
HostMetric.objects.annotate(month=TruncMonth('last_deleted'))
|
||||
.values('month')
|
||||
.annotate(hosts_deleted=Count('last_deleted'))
|
||||
.values('month', 'hosts_deleted')
|
||||
.filter(deleted=True)
|
||||
.order_by('month')
|
||||
)
|
||||
for host_metric in list(result):
|
||||
month = host_metric['month']
|
||||
if month:
|
||||
beginning_of_month = datetime.date(month.year, month.month, 1)
|
||||
if self.host_metrics.get(beginning_of_month) is None:
|
||||
self.host_metrics[beginning_of_month] = {}
|
||||
self.host_metrics[beginning_of_month]['hosts_deleted'] = host_metric['hosts_deleted']
|
||||
|
||||
def _find_or_create_summary(self, month):
|
||||
summary = self._find_summary(month)
|
||||
|
||||
if not summary:
|
||||
summary = HostMetricSummaryMonthly(date=month)
|
||||
self.records_to_create.append(summary)
|
||||
else:
|
||||
self.records_to_update.append(summary)
|
||||
return summary
|
||||
|
||||
def _find_summary(self, month):
|
||||
"""
|
||||
Existing summaries are ordered by month ASC.
|
||||
This method is called with month in ascending order too => only 1 traversing is enough
|
||||
"""
|
||||
summary = None
|
||||
while not summary and self.existing_summaries_idx < self.existing_summaries_cnt:
|
||||
tmp = self.existing_summaries[self.existing_summaries_idx]
|
||||
if tmp.date < month:
|
||||
self.existing_summaries_idx += 1
|
||||
elif tmp.date == month:
|
||||
summary = tmp
|
||||
elif tmp.date > month:
|
||||
break
|
||||
return summary
|
||||
|
||||
def _update_summary(self, summary, month, license_consumed):
|
||||
"""Updates the metric with hosts added and deleted and set license info for current month"""
|
||||
# Get month counts from host metrics, zero if not found
|
||||
hosts_added, hosts_deleted = 0, 0
|
||||
if metric := self.host_metrics.get(month, None):
|
||||
hosts_added = metric.get('hosts_added', 0)
|
||||
hosts_deleted = metric.get('hosts_deleted', 0)
|
||||
|
||||
summary.license_consumed = license_consumed + hosts_added - hosts_deleted
|
||||
summary.hosts_added = hosts_added
|
||||
summary.hosts_deleted = hosts_deleted
|
||||
|
||||
# Set subscription count for current month
|
||||
if month == datetime.date.today().replace(day=1):
|
||||
license_info = get_license()
|
||||
summary.license_capacity = license_info.get('instance_count', 0)
|
||||
return summary
|
||||
|
||||
@staticmethod
|
||||
def _get_first_month():
|
||||
"""Returns first month after host metrics hard delete threshold"""
|
||||
threshold = getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36)
|
||||
return datetime.date.today().replace(day=1) - relativedelta(months=int(threshold) - 1)
|
||||
@@ -639,7 +639,7 @@ class AWXReceptorJob:
|
||||
#
|
||||
RECEPTOR_CONFIG_STARTER = (
|
||||
{'local-only': None},
|
||||
{'log-level': 'info'},
|
||||
{'log-level': settings.RECEPTOR_LOG_LEVEL},
|
||||
{'node': {'firewallrules': [{'action': 'reject', 'tonode': settings.CLUSTER_HOST_ID, 'toservice': 'control'}]}},
|
||||
{'control-service': {'service': 'control', 'filename': '/var/run/receptor/receptor.sock', 'permissions': '0660'}},
|
||||
{'work-command': {'worktype': 'local', 'command': 'ansible-runner', 'params': 'worker', 'allowruntimeparams': True}},
|
||||
|
||||
@@ -316,13 +316,8 @@ def send_notifications(notification_list, job_id=None):
|
||||
@task(queue=get_task_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
|
||||
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
|
||||
gather_time = now()
|
||||
|
||||
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
if is_run_threshold_reached(Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first(), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
analytics.gather()
|
||||
|
||||
|
||||
@@ -381,16 +376,25 @@ def cleanup_images_and_files():
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
"""Run cleanup host metrics ~each month"""
|
||||
# TODO: move whole method to host_metrics in follow-up PR
|
||||
from awx.conf.models import Setting
|
||||
|
||||
if is_run_threshold_reached(
|
||||
Setting.objects.filter(key='CLEANUP_HOST_METRICS_LAST_TS').first(), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400
|
||||
):
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||
logger.info("Executing cleanup_host_metrics")
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
logger.info("Finished cleanup_host_metrics")
|
||||
|
||||
|
||||
def is_run_threshold_reached(setting, threshold_seconds):
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
last_cleanup = Setting.objects.filter(key='CLEANUP_HOST_METRICS_LAST_TS').first()
|
||||
last_time = DateTimeField().to_internal_value(last_cleanup.value) if last_cleanup and last_cleanup.value else None
|
||||
last_time = DateTimeField().to_internal_value(setting.value) if setting and setting.value else DateTimeField().to_internal_value('1970-01-01')
|
||||
|
||||
cleanup_interval_secs = getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400
|
||||
if not last_time or ((now() - last_time).total_seconds() > cleanup_interval_secs):
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
return (now() - last_time).total_seconds() > threshold_seconds
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@@ -839,10 +843,7 @@ def delete_inventory(inventory_id, user_id, retries=5):
|
||||
user = None
|
||||
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
|
||||
try:
|
||||
i = Inventory.objects.get(id=inventory_id)
|
||||
for host in i.hosts.iterator():
|
||||
host.job_events_as_primary_host.update(host=None)
|
||||
i.delete()
|
||||
Inventory.objects.get(id=inventory_id).delete()
|
||||
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
|
||||
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
|
||||
except Inventory.DoesNotExist:
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
import json
|
||||
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from awx.main.models import (
|
||||
Organization,
|
||||
@@ -20,6 +23,7 @@ from awx.main.models import (
|
||||
WorkflowJobNode,
|
||||
WorkflowJobTemplateNode,
|
||||
)
|
||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
|
||||
# mk methods should create only a single object of a single type.
|
||||
# they should also have the option of being persisted or not.
|
||||
@@ -248,3 +252,42 @@ def mk_workflow_job_node(unified_job_template=None, success_nodes=None, failure_
|
||||
if persisted:
|
||||
workflow_node.save()
|
||||
return workflow_node
|
||||
|
||||
|
||||
def mk_host_metric(hostname, first_automation, last_automation=None, last_deleted=None, deleted=False, persisted=True):
|
||||
ok, idx = False, 1
|
||||
while not ok:
|
||||
try:
|
||||
with mock.patch("django.utils.timezone.now") as mock_now:
|
||||
mock_now.return_value = first_automation
|
||||
metric = HostMetric(
|
||||
hostname=hostname or f"host-{first_automation}-{idx}",
|
||||
first_automation=first_automation,
|
||||
last_automation=last_automation or first_automation,
|
||||
last_deleted=last_deleted,
|
||||
deleted=deleted,
|
||||
)
|
||||
metric.validate_unique()
|
||||
if persisted:
|
||||
metric.save()
|
||||
ok = True
|
||||
except ValidationError as e:
|
||||
# Repeat create for auto-generated hostname
|
||||
if not hostname and e.message_dict.get('hostname', None):
|
||||
idx += 1
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
def mk_host_metric_summary(date, license_consumed=0, license_capacity=0, hosts_added=0, hosts_deleted=0, indirectly_managed_hosts=0, persisted=True):
|
||||
summary = HostMetricSummaryMonthly(
|
||||
date=date,
|
||||
license_consumed=license_consumed,
|
||||
license_capacity=license_capacity,
|
||||
hosts_added=hosts_added,
|
||||
hosts_deleted=hosts_deleted,
|
||||
indirectly_managed_hosts=indirectly_managed_hosts,
|
||||
)
|
||||
if persisted:
|
||||
summary.save()
|
||||
return summary
|
||||
|
||||
@@ -0,0 +1,382 @@
|
||||
import pytest
|
||||
import datetime
|
||||
from dateutil.relativedelta import relativedelta
|
||||
from django.conf import settings
|
||||
from django.utils import timezone
|
||||
|
||||
|
||||
from awx.main.management.commands.host_metric_summary_monthly import Command
|
||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
from awx.main.tests.factories.fixtures import mk_host_metric, mk_host_metric_summary
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def threshold():
|
||||
return int(getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36))
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize("metrics_cnt", [0, 1, 2, 3])
|
||||
@pytest.mark.parametrize("mode", ["old_data", "actual_data", "all_data"])
|
||||
def test_summaries_counts(threshold, metrics_cnt, mode):
|
||||
assert HostMetricSummaryMonthly.objects.count() == 0
|
||||
|
||||
for idx in range(metrics_cnt):
|
||||
if mode == "old_data" or mode == "all_data":
|
||||
mk_host_metric(None, months_ago(threshold + idx, "dt"))
|
||||
elif mode == "actual_data" or mode == "all_data":
|
||||
mk_host_metric(None, (months_ago(threshold - idx, "dt")))
|
||||
|
||||
Command().handle()
|
||||
|
||||
# Number of records is equal to host metrics' hard cleanup months
|
||||
assert HostMetricSummaryMonthly.objects.count() == threshold
|
||||
|
||||
# Records start with date in the month following to the threshold month
|
||||
date = months_ago(threshold - 1)
|
||||
for metric in list(HostMetricSummaryMonthly.objects.order_by('date').all()):
|
||||
assert metric.date == date
|
||||
date += relativedelta(months=1)
|
||||
|
||||
# Older record are untouched
|
||||
mk_host_metric_summary(date=months_ago(threshold + 10))
|
||||
Command().handle()
|
||||
|
||||
assert HostMetricSummaryMonthly.objects.count() == threshold + 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize("mode", ["old_data", "actual_data", "all_data"])
|
||||
def test_summary_values(threshold, mode):
|
||||
tester = {"old_data": MetricsTesterOldData(threshold), "actual_data": MetricsTesterActualData(threshold), "all_data": MetricsTesterCombinedData(threshold)}[
|
||||
mode
|
||||
]
|
||||
|
||||
for iteration in ["create_metrics", "add_old_summaries", "change_metrics", "delete_metrics", "add_metrics"]:
|
||||
getattr(tester, iteration)() # call method by string
|
||||
|
||||
# Operation is idempotent, repeat twice
|
||||
for _ in range(2):
|
||||
Command().handle()
|
||||
# call assert method by string
|
||||
getattr(tester, f"assert_{iteration}")()
|
||||
|
||||
|
||||
class MetricsTester:
|
||||
def __init__(self, threshold, ignore_asserts=False):
|
||||
self.threshold = threshold
|
||||
self.expected_summaries = {}
|
||||
self.ignore_asserts = ignore_asserts
|
||||
|
||||
def add_old_summaries(self):
|
||||
"""These records don't correspond with Host metrics"""
|
||||
mk_host_metric_summary(self.below(4), license_consumed=100, hosts_added=10, hosts_deleted=5)
|
||||
mk_host_metric_summary(self.below(3), license_consumed=105, hosts_added=20, hosts_deleted=10)
|
||||
mk_host_metric_summary(self.below(2), license_consumed=115, hosts_added=60, hosts_deleted=75)
|
||||
|
||||
def assert_add_old_summaries(self):
|
||||
"""Old summary records should be untouched"""
|
||||
self.expected_summaries[self.below(4)] = {"date": self.below(4), "license_consumed": 100, "hosts_added": 10, "hosts_deleted": 5}
|
||||
self.expected_summaries[self.below(3)] = {"date": self.below(3), "license_consumed": 105, "hosts_added": 20, "hosts_deleted": 10}
|
||||
self.expected_summaries[self.below(2)] = {"date": self.below(2), "license_consumed": 115, "hosts_added": 60, "hosts_deleted": 75}
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
def assert_host_metric_summaries(self):
|
||||
"""Ignore asserts when old/actual test object is used only as a helper for Combined test"""
|
||||
if self.ignore_asserts:
|
||||
return True
|
||||
|
||||
for summary in list(HostMetricSummaryMonthly.objects.order_by('date').all()):
|
||||
assert self.expected_summaries.get(summary.date, None) is not None
|
||||
|
||||
assert self.expected_summaries[summary.date] == {
|
||||
"date": summary.date,
|
||||
"license_consumed": summary.license_consumed,
|
||||
"hosts_added": summary.hosts_added,
|
||||
"hosts_deleted": summary.hosts_deleted,
|
||||
}
|
||||
|
||||
def below(self, months, fmt="date"):
|
||||
"""months below threshold, returns first date of that month"""
|
||||
date = months_ago(self.threshold + months)
|
||||
if fmt == "dt":
|
||||
return timezone.make_aware(datetime.datetime.combine(date, datetime.datetime.min.time()))
|
||||
else:
|
||||
return date
|
||||
|
||||
def above(self, months, fmt="date"):
|
||||
"""months above threshold, returns first date of that month"""
|
||||
date = months_ago(self.threshold - months)
|
||||
if fmt == "dt":
|
||||
return timezone.make_aware(datetime.datetime.combine(date, datetime.datetime.min.time()))
|
||||
else:
|
||||
return date
|
||||
|
||||
|
||||
class MetricsTesterOldData(MetricsTester):
|
||||
def create_metrics(self):
|
||||
"""Creates 7 host metrics older than delete threshold"""
|
||||
mk_host_metric("host_1", first_automation=self.below(3, "dt"))
|
||||
mk_host_metric("host_2", first_automation=self.below(2, "dt"))
|
||||
mk_host_metric("host_3", first_automation=self.below(2, "dt"), last_deleted=self.above(2, "dt"), deleted=False)
|
||||
mk_host_metric("host_4", first_automation=self.below(2, "dt"), last_deleted=self.above(2, "dt"), deleted=True)
|
||||
mk_host_metric("host_5", first_automation=self.below(2, "dt"), last_deleted=self.below(2, "dt"), deleted=True)
|
||||
mk_host_metric("host_6", first_automation=self.below(1, "dt"), last_deleted=self.below(1, "dt"), deleted=False)
|
||||
mk_host_metric("host_7", first_automation=self.below(1, "dt"))
|
||||
|
||||
def assert_create_metrics(self):
|
||||
"""
|
||||
Month 1 is computed from older host metrics,
|
||||
Month 2 has deletion (host_4)
|
||||
Other months are unchanged (same as month 2)
|
||||
"""
|
||||
self.expected_summaries = {
|
||||
self.above(1): {"date": self.above(1), "license_consumed": 6, "hosts_added": 0, "hosts_deleted": 0},
|
||||
self.above(2): {"date": self.above(2), "license_consumed": 5, "hosts_added": 0, "hosts_deleted": 1},
|
||||
}
|
||||
# no change in months 3+
|
||||
idx = 3
|
||||
month = self.above(idx)
|
||||
while month <= beginning_of_the_month():
|
||||
self.expected_summaries[self.above(idx)] = {"date": self.above(idx), "license_consumed": 5, "hosts_added": 0, "hosts_deleted": 0}
|
||||
month += relativedelta(months=1)
|
||||
idx += 1
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
def add_old_summaries(self):
|
||||
super().add_old_summaries()
|
||||
|
||||
def assert_add_old_summaries(self):
|
||||
super().assert_add_old_summaries()
|
||||
|
||||
@staticmethod
|
||||
def change_metrics():
|
||||
"""Hosts 1,2 soft deleted, host_4 automated again (undeleted)"""
|
||||
HostMetric.objects.filter(hostname='host_1').update(last_deleted=beginning_of_the_month("dt"), deleted=True)
|
||||
HostMetric.objects.filter(hostname='host_2').update(last_deleted=timezone.now(), deleted=True)
|
||||
HostMetric.objects.filter(hostname='host_4').update(deleted=False)
|
||||
|
||||
def assert_change_metrics(self):
|
||||
"""
|
||||
Summaries since month 2 were changed (host_4 restored == automated again)
|
||||
Current month has 2 deletions (host_1, host_2)
|
||||
"""
|
||||
self.expected_summaries[self.above(2)] |= {'hosts_deleted': 0}
|
||||
for idx in range(2, self.threshold):
|
||||
self.expected_summaries[self.above(idx)] |= {'license_consumed': 6}
|
||||
self.expected_summaries[beginning_of_the_month()] |= {'license_consumed': 4, 'hosts_deleted': 2}
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
@staticmethod
|
||||
def delete_metrics():
|
||||
"""Deletes metric deleted before the threshold"""
|
||||
HostMetric.objects.filter(hostname='host_5').delete()
|
||||
|
||||
def assert_delete_metrics(self):
|
||||
"""No change"""
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
@staticmethod
|
||||
def add_metrics():
|
||||
"""Adds new metrics"""
|
||||
mk_host_metric("host_24", first_automation=beginning_of_the_month("dt"))
|
||||
mk_host_metric("host_25", first_automation=beginning_of_the_month("dt")) # timezone.now())
|
||||
|
||||
def assert_add_metrics(self):
|
||||
"""Summary in current month is updated"""
|
||||
self.expected_summaries[beginning_of_the_month()]['license_consumed'] = 6
|
||||
self.expected_summaries[beginning_of_the_month()]['hosts_added'] = 2
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
|
||||
class MetricsTesterActualData(MetricsTester):
|
||||
def create_metrics(self):
|
||||
"""Creates 16 host metrics newer than delete threshold"""
|
||||
mk_host_metric("host_8", first_automation=self.above(1, "dt"))
|
||||
mk_host_metric("host_9", first_automation=self.above(1, "dt"), last_deleted=self.above(1, "dt"))
|
||||
mk_host_metric("host_10", first_automation=self.above(1, "dt"), last_deleted=self.above(1, "dt"), deleted=True)
|
||||
mk_host_metric("host_11", first_automation=self.above(1, "dt"), last_deleted=self.above(2, "dt"))
|
||||
mk_host_metric("host_12", first_automation=self.above(1, "dt"), last_deleted=self.above(2, "dt"), deleted=True)
|
||||
mk_host_metric("host_13", first_automation=self.above(2, "dt"))
|
||||
mk_host_metric("host_14", first_automation=self.above(2, "dt"), last_deleted=self.above(2, "dt"))
|
||||
mk_host_metric("host_15", first_automation=self.above(2, "dt"), last_deleted=self.above(2, "dt"), deleted=True)
|
||||
mk_host_metric("host_16", first_automation=self.above(2, "dt"), last_deleted=self.above(3, "dt"))
|
||||
mk_host_metric("host_17", first_automation=self.above(2, "dt"), last_deleted=self.above(3, "dt"), deleted=True)
|
||||
mk_host_metric("host_18", first_automation=self.above(4, "dt"))
|
||||
# next one shouldn't happen in real (deleted=True, last_deleted = NULL)
|
||||
mk_host_metric("host_19", first_automation=self.above(4, "dt"), deleted=True)
|
||||
mk_host_metric("host_20", first_automation=self.above(4, "dt"), last_deleted=self.above(4, "dt"))
|
||||
mk_host_metric("host_21", first_automation=self.above(4, "dt"), last_deleted=self.above(4, "dt"), deleted=True)
|
||||
mk_host_metric("host_22", first_automation=self.above(4, "dt"), last_deleted=self.above(5, "dt"))
|
||||
mk_host_metric("host_23", first_automation=self.above(4, "dt"), last_deleted=self.above(5, "dt"), deleted=True)
|
||||
|
||||
def assert_create_metrics(self):
|
||||
self.expected_summaries = {
|
||||
self.above(1): {"date": self.above(1), "license_consumed": 4, "hosts_added": 5, "hosts_deleted": 1},
|
||||
self.above(2): {"date": self.above(2), "license_consumed": 7, "hosts_added": 5, "hosts_deleted": 2},
|
||||
self.above(3): {"date": self.above(3), "license_consumed": 6, "hosts_added": 0, "hosts_deleted": 1},
|
||||
self.above(4): {"date": self.above(4), "license_consumed": 11, "hosts_added": 6, "hosts_deleted": 1},
|
||||
self.above(5): {"date": self.above(5), "license_consumed": 10, "hosts_added": 0, "hosts_deleted": 1},
|
||||
}
|
||||
# no change in months 6+
|
||||
idx = 6
|
||||
month = self.above(idx)
|
||||
while month <= beginning_of_the_month():
|
||||
self.expected_summaries[self.above(idx)] = {"date": self.above(idx), "license_consumed": 10, "hosts_added": 0, "hosts_deleted": 0}
|
||||
month += relativedelta(months=1)
|
||||
idx += 1
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
def add_old_summaries(self):
|
||||
super().add_old_summaries()
|
||||
|
||||
def assert_add_old_summaries(self):
|
||||
super().assert_add_old_summaries()
|
||||
|
||||
@staticmethod
|
||||
def change_metrics():
|
||||
"""
|
||||
- Hosts 12, 19, 21 were automated again (undeleted)
|
||||
- Host 16 was soft deleted
|
||||
- Host 17 was undeleted and soft deleted again
|
||||
"""
|
||||
HostMetric.objects.filter(hostname='host_12').update(deleted=False)
|
||||
HostMetric.objects.filter(hostname='host_16').update(last_deleted=timezone.now(), deleted=True)
|
||||
HostMetric.objects.filter(hostname='host_17').update(last_deleted=beginning_of_the_month("dt"), deleted=True)
|
||||
HostMetric.objects.filter(hostname='host_19').update(deleted=False)
|
||||
HostMetric.objects.filter(hostname='host_21').update(deleted=False)
|
||||
|
||||
def assert_change_metrics(self):
|
||||
"""
|
||||
Summaries since month 2 were changed
|
||||
Current month has 2 deletions (host_16, host_17)
|
||||
"""
|
||||
self.expected_summaries[self.above(2)] |= {'license_consumed': 8, 'hosts_deleted': 1}
|
||||
self.expected_summaries[self.above(3)] |= {'license_consumed': 8, 'hosts_deleted': 0}
|
||||
self.expected_summaries[self.above(4)] |= {'license_consumed': 14, 'hosts_deleted': 0}
|
||||
|
||||
# month 5 had hosts_deleted 1 => license_consumed == 14 - 1
|
||||
for idx in range(5, self.threshold):
|
||||
self.expected_summaries[self.above(idx)] |= {'license_consumed': 13}
|
||||
self.expected_summaries[beginning_of_the_month()] |= {'license_consumed': 11, 'hosts_deleted': 2}
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
def delete_metrics(self):
|
||||
"""Hard cleanup can't delete metrics newer than threshold. No change"""
|
||||
pass
|
||||
|
||||
def assert_delete_metrics(self):
|
||||
"""No change"""
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
@staticmethod
|
||||
def add_metrics():
|
||||
"""Adds new metrics"""
|
||||
mk_host_metric("host_26", first_automation=beginning_of_the_month("dt"))
|
||||
mk_host_metric("host_27", first_automation=timezone.now())
|
||||
|
||||
def assert_add_metrics(self):
|
||||
"""
|
||||
Two metrics were deleted in current month by change_metrics()
|
||||
Two metrics are added now
|
||||
=> license_consumed is equal to the previous month (13 - 2 + 2)
|
||||
"""
|
||||
self.expected_summaries[beginning_of_the_month()] |= {'license_consumed': 13, 'hosts_added': 2}
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
|
||||
class MetricsTesterCombinedData(MetricsTester):
|
||||
def __init__(self, threshold):
|
||||
super().__init__(threshold)
|
||||
self.old_data = MetricsTesterOldData(threshold, ignore_asserts=True)
|
||||
self.actual_data = MetricsTesterActualData(threshold, ignore_asserts=True)
|
||||
|
||||
def assert_host_metric_summaries(self):
|
||||
self._combine_expected_summaries()
|
||||
super().assert_host_metric_summaries()
|
||||
|
||||
def create_metrics(self):
|
||||
self.old_data.create_metrics()
|
||||
self.actual_data.create_metrics()
|
||||
|
||||
def assert_create_metrics(self):
|
||||
self.old_data.assert_create_metrics()
|
||||
self.actual_data.assert_create_metrics()
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
def add_old_summaries(self):
|
||||
super().add_old_summaries()
|
||||
|
||||
def assert_add_old_summaries(self):
|
||||
self.old_data.assert_add_old_summaries()
|
||||
self.actual_data.assert_add_old_summaries()
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
def change_metrics(self):
|
||||
self.old_data.change_metrics()
|
||||
self.actual_data.change_metrics()
|
||||
|
||||
def assert_change_metrics(self):
|
||||
self.old_data.assert_change_metrics()
|
||||
self.actual_data.assert_change_metrics()
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
def delete_metrics(self):
|
||||
self.old_data.delete_metrics()
|
||||
self.actual_data.delete_metrics()
|
||||
|
||||
def assert_delete_metrics(self):
|
||||
self.old_data.assert_delete_metrics()
|
||||
self.actual_data.assert_delete_metrics()
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
def add_metrics(self):
|
||||
self.old_data.add_metrics()
|
||||
self.actual_data.add_metrics()
|
||||
|
||||
def assert_add_metrics(self):
|
||||
self.old_data.assert_add_metrics()
|
||||
self.actual_data.assert_add_metrics()
|
||||
|
||||
self.assert_host_metric_summaries()
|
||||
|
||||
def _combine_expected_summaries(self):
|
||||
"""
|
||||
Expected summaries are sum of expected values for tests with old and actual data
|
||||
Except data older than hard delete threshold (these summaries are untouched by task => the same in all tests)
|
||||
"""
|
||||
for date, summary in self.old_data.expected_summaries.items():
|
||||
if date <= months_ago(self.threshold):
|
||||
license_consumed = summary['license_consumed']
|
||||
hosts_added = summary['hosts_added']
|
||||
hosts_deleted = summary['hosts_deleted']
|
||||
else:
|
||||
license_consumed = summary['license_consumed'] + self.actual_data.expected_summaries[date]['license_consumed']
|
||||
hosts_added = summary['hosts_added'] + self.actual_data.expected_summaries[date]['hosts_added']
|
||||
hosts_deleted = summary['hosts_deleted'] + self.actual_data.expected_summaries[date]['hosts_deleted']
|
||||
self.expected_summaries[date] = {'date': date, 'license_consumed': license_consumed, 'hosts_added': hosts_added, 'hosts_deleted': hosts_deleted}
|
||||
|
||||
|
||||
def months_ago(num, fmt="date"):
|
||||
if num is None:
|
||||
return None
|
||||
return beginning_of_the_month(fmt) - relativedelta(months=num)
|
||||
|
||||
|
||||
def beginning_of_the_month(fmt="date"):
|
||||
date = datetime.date.today().replace(day=1)
|
||||
if fmt == "dt":
|
||||
return timezone.make_aware(datetime.datetime.combine(date, datetime.datetime.min.time()))
|
||||
else:
|
||||
return date
|
||||
@@ -331,15 +331,13 @@ def test_single_job_dependencies_project_launch(controlplane_instance_group, job
|
||||
p.save(skip_update=True)
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
dm = DependencyManager()
|
||||
with mock.patch.object(DependencyManager, "create_project_update", wraps=dm.create_project_update) as mock_pu:
|
||||
dm.schedule()
|
||||
mock_pu.assert_called_once_with(j)
|
||||
pu = [x for x in p.project_updates.all()]
|
||||
assert len(pu) == 1
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, instance)
|
||||
pu[0].status = "successful"
|
||||
pu[0].save()
|
||||
dm.schedule()
|
||||
pu = [x for x in p.project_updates.all()]
|
||||
assert len(pu) == 1
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, instance)
|
||||
pu[0].status = "successful"
|
||||
pu[0].save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||
@@ -359,15 +357,14 @@ def test_single_job_dependencies_inventory_update_launch(controlplane_instance_g
|
||||
i.inventory_sources.add(ii)
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
dm = DependencyManager()
|
||||
with mock.patch.object(DependencyManager, "create_inventory_update", wraps=dm.create_inventory_update) as mock_iu:
|
||||
dm.schedule()
|
||||
mock_iu.assert_called_once_with(j, ii)
|
||||
iu = [x for x in ii.inventory_updates.all()]
|
||||
assert len(iu) == 1
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, instance)
|
||||
iu[0].status = "successful"
|
||||
iu[0].save()
|
||||
dm.schedule()
|
||||
assert ii.inventory_updates.count() == 1
|
||||
iu = [x for x in ii.inventory_updates.all()]
|
||||
assert len(iu) == 1
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, instance)
|
||||
iu[0].status = "successful"
|
||||
iu[0].save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||
@@ -382,11 +379,11 @@ def test_inventory_update_launches_project_update(controlplane_instance_group, s
|
||||
iu = ii.create_inventory_update()
|
||||
iu.status = "pending"
|
||||
iu.save()
|
||||
assert project.project_updates.count() == 0
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
dm = DependencyManager()
|
||||
with mock.patch.object(DependencyManager, "create_project_update", wraps=dm.create_project_update) as mock_pu:
|
||||
dm.schedule()
|
||||
mock_pu.assert_called_with(iu, project_id=project.id)
|
||||
dm.schedule()
|
||||
assert project.project_updates.count() == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -407,9 +404,8 @@ def test_job_dependency_with_already_updated(controlplane_instance_group, job_te
|
||||
j.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
dm = DependencyManager()
|
||||
with mock.patch.object(DependencyManager, "create_inventory_update", wraps=dm.create_inventory_update) as mock_iu:
|
||||
dm.schedule()
|
||||
mock_iu.assert_not_called()
|
||||
dm.schedule()
|
||||
assert ii.inventory_updates.count() == 0
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||
@@ -442,7 +438,9 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
||||
TaskManager().schedule()
|
||||
pu = p.project_updates.first()
|
||||
iu = ii.inventory_updates.first()
|
||||
TaskManager.start_task.assert_has_calls([mock.call(iu, controlplane_instance_group, instance), mock.call(pu, controlplane_instance_group, instance)])
|
||||
TaskManager.start_task.assert_has_calls(
|
||||
[mock.call(iu, controlplane_instance_group, instance), mock.call(pu, controlplane_instance_group, instance)], any_order=True
|
||||
)
|
||||
pu.status = "successful"
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.save()
|
||||
@@ -451,7 +449,9 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
||||
iu.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_has_calls([mock.call(j1, controlplane_instance_group, instance), mock.call(j2, controlplane_instance_group, instance)])
|
||||
TaskManager.start_task.assert_has_calls(
|
||||
[mock.call(j1, controlplane_instance_group, instance), mock.call(j2, controlplane_instance_group, instance)], any_order=True
|
||||
)
|
||||
pu = [x for x in p.project_updates.all()]
|
||||
iu = [x for x in ii.inventory_updates.all()]
|
||||
assert len(pu) == 1
|
||||
|
||||
@@ -189,11 +189,12 @@
|
||||
connection: local
|
||||
name: Install content with ansible-galaxy command if necessary
|
||||
vars:
|
||||
galaxy_task_env: # configure in settings
|
||||
additional_collections_env:
|
||||
# These environment variables are used for installing collections, in addition to galaxy_task_env
|
||||
# setting the collections paths silences warnings
|
||||
galaxy_task_env: # configured in settings
|
||||
# additional_galaxy_env contains environment variables are used for installing roles and collections and will take precedence over items in galaxy_task_env
|
||||
additional_galaxy_env:
|
||||
# These paths control where ansible-galaxy installs collections and roles on top the filesystem
|
||||
ANSIBLE_COLLECTIONS_PATHS: "{{ projects_root }}/.__awx_cache/{{ local_path }}/stage/requirements_collections"
|
||||
ANSIBLE_ROLES_PATH: "{{ projects_root }}/.__awx_cache/{{ local_path }}/stage/requirements_roles"
|
||||
# Put the local tmp directory in same volume as collection destination
|
||||
# otherwise, files cannot be moved accross volumes and will cause error
|
||||
ANSIBLE_LOCAL_TEMP: "{{ projects_root }}/.__awx_cache/{{ local_path }}/stage/tmp"
|
||||
@@ -212,40 +213,53 @@
|
||||
- name: End play due to disabled content sync
|
||||
ansible.builtin.meta: end_play
|
||||
|
||||
- name: Fetch galaxy roles from requirements.(yml/yaml)
|
||||
ansible.builtin.command: >
|
||||
ansible-galaxy role install -r {{ item }}
|
||||
--roles-path {{ projects_root }}/.__awx_cache/{{ local_path }}/stage/requirements_roles
|
||||
{{ ' -' + 'v' * ansible_verbosity if ansible_verbosity else '' }}
|
||||
args:
|
||||
chdir: "{{ project_path | quote }}"
|
||||
register: galaxy_result
|
||||
with_fileglob:
|
||||
- "{{ project_path | quote }}/roles/requirements.yaml"
|
||||
- "{{ project_path | quote }}/roles/requirements.yml"
|
||||
changed_when: "'was installed successfully' in galaxy_result.stdout"
|
||||
environment: "{{ galaxy_task_env }}"
|
||||
when: roles_enabled | bool
|
||||
tags:
|
||||
- install_roles
|
||||
- block:
|
||||
- name: Fetch galaxy roles from roles/requirements.(yml/yaml)
|
||||
ansible.builtin.command:
|
||||
cmd: "ansible-galaxy role install -r {{ item }} {{ verbosity }}"
|
||||
register: galaxy_result
|
||||
with_fileglob:
|
||||
- "{{ project_path | quote }}/roles/requirements.yaml"
|
||||
- "{{ project_path | quote }}/roles/requirements.yml"
|
||||
changed_when: "'was installed successfully' in galaxy_result.stdout"
|
||||
when: roles_enabled | bool
|
||||
tags:
|
||||
- install_roles
|
||||
|
||||
- name: Fetch galaxy collections from collections/requirements.(yml/yaml)
|
||||
ansible.builtin.command: >
|
||||
ansible-galaxy collection install -r {{ item }}
|
||||
--collections-path {{ projects_root }}/.__awx_cache/{{ local_path }}/stage/requirements_collections
|
||||
{{ ' -' + 'v' * ansible_verbosity if ansible_verbosity else '' }}
|
||||
args:
|
||||
chdir: "{{ project_path | quote }}"
|
||||
register: galaxy_collection_result
|
||||
with_fileglob:
|
||||
- "{{ project_path | quote }}/collections/requirements.yaml"
|
||||
- "{{ project_path | quote }}/collections/requirements.yml"
|
||||
- "{{ project_path | quote }}/requirements.yaml"
|
||||
- "{{ project_path | quote }}/requirements.yml"
|
||||
changed_when: "'Installing ' in galaxy_collection_result.stdout"
|
||||
environment: "{{ additional_collections_env | combine(galaxy_task_env) }}"
|
||||
when:
|
||||
- "ansible_version.full is version_compare('2.9', '>=')"
|
||||
- collections_enabled | bool
|
||||
tags:
|
||||
- install_collections
|
||||
- name: Fetch galaxy collections from collections/requirements.(yml/yaml)
|
||||
ansible.builtin.command:
|
||||
cmd: "ansible-galaxy collection install -r {{ item }} {{ verbosity }}"
|
||||
register: galaxy_collection_result
|
||||
with_fileglob:
|
||||
- "{{ project_path | quote }}/collections/requirements.yaml"
|
||||
- "{{ project_path | quote }}/collections/requirements.yml"
|
||||
changed_when: "'Nothing to do.' not in galaxy_collection_result.stdout"
|
||||
when:
|
||||
- "ansible_version.full is version_compare('2.9', '>=')"
|
||||
- collections_enabled | bool
|
||||
tags:
|
||||
- install_collections
|
||||
|
||||
- name: Fetch galaxy roles and collections from requirements.(yml/yaml)
|
||||
ansible.builtin.command:
|
||||
cmd: "ansible-galaxy install -r {{ item }} {{ verbosity }}"
|
||||
register: galaxy_combined_result
|
||||
with_fileglob:
|
||||
- "{{ project_path | quote }}/requirements.yaml"
|
||||
- "{{ project_path | quote }}/requirements.yml"
|
||||
changed_when: "'Nothing to do.' not in galaxy_combined_result.stdout"
|
||||
when:
|
||||
- "ansible_version.full is version_compare('2.10', '>=')"
|
||||
- collections_enabled | bool
|
||||
- roles_enabled | bool
|
||||
tags:
|
||||
- install_collections
|
||||
- install_roles
|
||||
module_defaults:
|
||||
ansible.builtin.command:
|
||||
chdir: "{{ project_path | quote }}"
|
||||
|
||||
# We combine our additional_galaxy_env into galaxy_task_env so that our values are preferred over anything a user would set
|
||||
environment: "{{ galaxy_task_env | combine(additional_galaxy_env) }}"
|
||||
vars:
|
||||
verbosity: "{{ (ansible_verbosity) | ternary('-'+'v'*ansible_verbosity, '') }}"
|
||||
|
||||
@@ -158,6 +158,11 @@ REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST']
|
||||
# REMOTE_HOST_HEADERS will be trusted unconditionally')
|
||||
PROXY_IP_ALLOWED_LIST = []
|
||||
|
||||
# If we are behind a reverse proxy/load balancer, use this setting to
|
||||
# allow the scheme://addresses from which Tower should trust csrf requests from
|
||||
# If this setting is an empty list (the default), we will only trust ourself
|
||||
CSRF_TRUSTED_ORIGINS = []
|
||||
|
||||
CUSTOM_VENV_PATHS = []
|
||||
|
||||
# Warning: this is a placeholder for a database setting
|
||||
@@ -322,7 +327,6 @@ INSTALLED_APPS = [
|
||||
'rest_framework',
|
||||
'django_extensions',
|
||||
'polymorphic',
|
||||
'taggit',
|
||||
'social_django',
|
||||
'django_guid',
|
||||
'corsheaders',
|
||||
@@ -466,12 +470,13 @@ CELERYBEAT_SCHEDULE = {
|
||||
'receptor_reaper': {'task': 'awx.main.tasks.system.awx_receptor_workunit_reaper', 'schedule': timedelta(seconds=60)},
|
||||
'send_subsystem_metrics': {'task': 'awx.main.analytics.analytics_tasks.send_subsystem_metrics', 'schedule': timedelta(seconds=20)},
|
||||
'cleanup_images': {'task': 'awx.main.tasks.system.cleanup_images_and_files', 'schedule': timedelta(hours=3)},
|
||||
'cleanup_host_metrics': {'task': 'awx.main.tasks.system.cleanup_host_metrics', 'schedule': timedelta(days=1)},
|
||||
'cleanup_host_metrics': {'task': 'awx.main.tasks.system.cleanup_host_metrics', 'schedule': timedelta(hours=3, minutes=30)},
|
||||
'host_metric_summary_monthly': {'task': 'awx.main.tasks.host_metrics.host_metric_summary_monthly', 'schedule': timedelta(hours=4)},
|
||||
}
|
||||
|
||||
# Django Caching Configuration
|
||||
DJANGO_REDIS_IGNORE_EXCEPTIONS = True
|
||||
CACHES = {'default': {'BACKEND': 'django_redis.cache.RedisCache', 'LOCATION': 'unix:/var/run/redis/redis.sock?db=1'}}
|
||||
CACHES = {'default': {'BACKEND': 'awx.main.cache.AWXRedisCache', 'LOCATION': 'unix:/var/run/redis/redis.sock?db=1'}}
|
||||
|
||||
# Social Auth configuration.
|
||||
SOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy'
|
||||
@@ -959,6 +964,9 @@ AWX_RUNNER_KEEPALIVE_SECONDS = 0
|
||||
# Delete completed work units in receptor
|
||||
RECEPTOR_RELEASE_WORK = True
|
||||
|
||||
# K8S only. Use receptor_log_level on AWX spec to set this properly
|
||||
RECEPTOR_LOG_LEVEL = 'info'
|
||||
|
||||
MIDDLEWARE = [
|
||||
'django_guid.middleware.guid_middleware',
|
||||
'awx.main.middleware.SettingsCacheMiddleware',
|
||||
@@ -1046,4 +1054,12 @@ CLEANUP_HOST_METRICS_LAST_TS = None
|
||||
# Host metrics cleanup - minimal interval between two cleanups in days
|
||||
CLEANUP_HOST_METRICS_INTERVAL = 30 # days
|
||||
# Host metrics cleanup - soft-delete HostMetric records with last_automation < [threshold] (in months)
|
||||
CLEANUP_HOST_METRICS_THRESHOLD = 12 # months
|
||||
CLEANUP_HOST_METRICS_SOFT_THRESHOLD = 12 # months
|
||||
# Host metrics cleanup
|
||||
# - delete HostMetric record with deleted=True and last_deleted < [threshold]
|
||||
# - also threshold for computing HostMetricSummaryMonthly (command/scheduled task)
|
||||
CLEANUP_HOST_METRICS_HARD_THRESHOLD = 36 # months
|
||||
|
||||
# Host metric summary monthly task - last time of run
|
||||
HOST_METRIC_SUMMARY_TASK_LAST_TS = None
|
||||
HOST_METRIC_SUMMARY_TASK_INTERVAL = 7 # days
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<input type="text" name="username" maxlength="100"
|
||||
autocapitalize="off"
|
||||
autocorrect="off" class="form-control textinput textInput"
|
||||
id="id_username" required autofocus
|
||||
id="id_username" autocomplete="off" required autofocus
|
||||
{% if form.username.value %}value="{{ form.username.value }}"{% endif %}>
|
||||
{% if form.username.errors %}
|
||||
<p class="text-error">{{ form.username.errors|striptags }}</p>
|
||||
@@ -31,7 +31,8 @@
|
||||
<div class="form-group">
|
||||
<label for="id_password">Password:</label>
|
||||
<input type="password" name="password" maxlength="100" autocapitalize="off"
|
||||
autocorrect="off" class="form-control textinput textInput" id="id_password" required>
|
||||
autocorrect="off" class="form-control textinput textInput" id="id_password"
|
||||
autocomplete="off" required>
|
||||
{% if form.password.errors %}
|
||||
<p class="text-error">{{ form.password.errors|striptags }}</p>
|
||||
{% endif %}
|
||||
|
||||
@@ -91,7 +91,7 @@ function AdHocCredentialStep({ credentialTypeId }) {
|
||||
{meta.touched && meta.error && (
|
||||
<CredentialErrorAlert variant="danger" isInline title={meta.error} />
|
||||
)}
|
||||
<Form>
|
||||
<Form autoComplete="off">
|
||||
<FormGroup
|
||||
fieldId="credential"
|
||||
label={t`Machine Credential`}
|
||||
|
||||
@@ -50,7 +50,7 @@ function AdHocDetailsStep({ moduleOptions }) {
|
||||
: true;
|
||||
|
||||
return (
|
||||
<Form>
|
||||
<Form autoComplete="off">
|
||||
<FormColumnLayout>
|
||||
<FormFullWidthLayout>
|
||||
<FormGroup
|
||||
|
||||
@@ -84,7 +84,7 @@ function AdHocExecutionEnvironmentStep({ organizationId }) {
|
||||
}
|
||||
|
||||
return (
|
||||
<Form>
|
||||
<Form autoComplete="off">
|
||||
<FormGroup
|
||||
fieldId="execution_enviroment"
|
||||
label={t`Execution Environment`}
|
||||
|
||||
@@ -50,7 +50,7 @@ const userSortColumns = [
|
||||
const teamSearchColumns = [
|
||||
{
|
||||
name: t`Name`,
|
||||
key: 'name',
|
||||
key: 'name__icontains',
|
||||
isDefault: true,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -94,7 +94,7 @@ export default function FrequencyDetails({
|
||||
value={getRunEveryLabel()}
|
||||
dataCy={`${prefix}-run-every`}
|
||||
/>
|
||||
{type === 'week' ? (
|
||||
{type === 'week' && options.daysOfWeek ? (
|
||||
<Detail
|
||||
label={t`On days`}
|
||||
value={options.daysOfWeek
|
||||
|
||||
@@ -24,10 +24,10 @@ function DateTimePicker({ dateFieldName, timeFieldName, label }) {
|
||||
validate: combine([required(null), validateTime()]),
|
||||
});
|
||||
|
||||
const onDateChange = (inputDate, newDate) => {
|
||||
const onDateChange = (_, dateString, date) => {
|
||||
dateHelpers.setTouched();
|
||||
if (isValidDate(newDate) && inputDate === yyyyMMddFormat(newDate)) {
|
||||
dateHelpers.setValue(inputDate);
|
||||
if (isValidDate(date) && dateString === yyyyMMddFormat(date)) {
|
||||
dateHelpers.setValue(dateString);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -62,7 +62,7 @@ function DateTimePicker({ dateFieldName, timeFieldName, label }) {
|
||||
}
|
||||
time={timeField.value}
|
||||
{...timeField}
|
||||
onChange={(time) => timeHelpers.setValue(time)}
|
||||
onChange={(_, time) => timeHelpers.setValue(time)}
|
||||
/>
|
||||
</DateTimeGroup>
|
||||
</FormGroup>
|
||||
|
||||
@@ -43,10 +43,11 @@ describe('<DateTimePicker/>', () => {
|
||||
|
||||
await act(async () => {
|
||||
wrapper.find('DatePicker').prop('onChange')(
|
||||
null,
|
||||
'2021-05-29',
|
||||
new Date('Sat May 29 2021 00:00:00 GMT-0400 (Eastern Daylight Time)')
|
||||
);
|
||||
wrapper.find('TimePicker').prop('onChange')('7:15 PM');
|
||||
wrapper.find('TimePicker').prop('onChange')(null, '7:15 PM');
|
||||
});
|
||||
wrapper.update();
|
||||
expect(wrapper.find('DatePicker').prop('value')).toBe('2021-05-29');
|
||||
|
||||
@@ -885,6 +885,7 @@ describe('<ScheduleForm />', () => {
|
||||
).toBe(true);
|
||||
await act(async () => {
|
||||
wrapper.find('DatePicker[aria-label="End date"]').prop('onChange')(
|
||||
null,
|
||||
'2020-03-14',
|
||||
new Date('2020-03-14')
|
||||
);
|
||||
@@ -905,6 +906,7 @@ describe('<ScheduleForm />', () => {
|
||||
const laterTime = DateTime.now().plus({ hours: 1 }).toFormat('h:mm a');
|
||||
await act(async () => {
|
||||
wrapper.find('DatePicker[aria-label="End date"]').prop('onChange')(
|
||||
null,
|
||||
today,
|
||||
new Date(today)
|
||||
);
|
||||
@@ -919,6 +921,7 @@ describe('<ScheduleForm />', () => {
|
||||
);
|
||||
await act(async () => {
|
||||
wrapper.find('TimePicker[aria-label="End time"]').prop('onChange')(
|
||||
null,
|
||||
laterTime
|
||||
);
|
||||
});
|
||||
|
||||
@@ -67,7 +67,7 @@ function MetadataStep() {
|
||||
return (
|
||||
<>
|
||||
{fields.length > 0 && (
|
||||
<Form>
|
||||
<Form autoComplete="off">
|
||||
<FormFullWidthLayout>
|
||||
{fields.map((field) => {
|
||||
if (field.type === 'string') {
|
||||
|
||||
@@ -99,7 +99,7 @@ function ExternalTestModal({
|
||||
</Button>,
|
||||
]}
|
||||
>
|
||||
<Form>
|
||||
<Form autoComplete="off">
|
||||
<FormFullWidthLayout>
|
||||
{credentialType.inputs.metadata.map((field) => {
|
||||
const isRequired = credentialType.inputs?.required.includes(
|
||||
|
||||
@@ -122,7 +122,7 @@ function ConstructedInventoryHint() {
|
||||
<br />
|
||||
<Panel>
|
||||
<CardBody>
|
||||
<Form>
|
||||
<Form autoComplete="off">
|
||||
<b>{t`Constructed inventory examples`}</b>
|
||||
<LimitToIntersectionExample />
|
||||
<FilterOnNestedGroupExample />
|
||||
|
||||
@@ -178,6 +178,7 @@ function NotificationTemplatesList() {
|
||||
<HeaderCell sortKey="name">{t`Name`}</HeaderCell>
|
||||
<HeaderCell>{t`Status`}</HeaderCell>
|
||||
<HeaderCell sortKey="notification_type">{t`Type`}</HeaderCell>
|
||||
<HeaderCell sortKey="organization">{t`Organization`}</HeaderCell>
|
||||
<HeaderCell>{t`Actions`}</HeaderCell>
|
||||
</HeaderRow>
|
||||
}
|
||||
|
||||
@@ -20,6 +20,10 @@ const mockTemplates = {
|
||||
url: '/notification_templates/1',
|
||||
type: 'slack',
|
||||
summary_fields: {
|
||||
organization: {
|
||||
id: 1,
|
||||
name: 'Foo',
|
||||
},
|
||||
recent_notifications: [
|
||||
{
|
||||
status: 'success',
|
||||
@@ -36,6 +40,10 @@ const mockTemplates = {
|
||||
id: 2,
|
||||
url: '/notification_templates/2',
|
||||
summary_fields: {
|
||||
organization: {
|
||||
id: 2,
|
||||
name: 'Bar',
|
||||
},
|
||||
recent_notifications: [],
|
||||
user_capabilities: {
|
||||
delete: true,
|
||||
@@ -48,6 +56,10 @@ const mockTemplates = {
|
||||
id: 3,
|
||||
url: '/notification_templates/3',
|
||||
summary_fields: {
|
||||
organization: {
|
||||
id: 3,
|
||||
name: 'Test',
|
||||
},
|
||||
recent_notifications: [
|
||||
{
|
||||
status: 'failed',
|
||||
|
||||
@@ -121,6 +121,13 @@ function NotificationTemplateListItem({
|
||||
{NOTIFICATION_TYPES[template.notification_type] ||
|
||||
template.notification_type}
|
||||
</Td>
|
||||
<Td dataLabel={t`Oragnization`}>
|
||||
<Link
|
||||
to={`/organizations/${template.summary_fields.organization.id}/details`}
|
||||
>
|
||||
<b>{template.summary_fields.organization.name}</b>
|
||||
</Link>
|
||||
</Td>
|
||||
<ActionsTd dataLabel={t`Actions`}>
|
||||
<ActionItem visible tooltip={t`Test notification`}>
|
||||
<Button
|
||||
|
||||
@@ -12,6 +12,10 @@ const template = {
|
||||
notification_type: 'slack',
|
||||
name: 'Test Notification',
|
||||
summary_fields: {
|
||||
organization: {
|
||||
id: 1,
|
||||
name: 'Foo',
|
||||
},
|
||||
user_capabilities: {
|
||||
edit: true,
|
||||
copy: true,
|
||||
@@ -39,7 +43,7 @@ describe('<NotificationTemplateListItem />', () => {
|
||||
);
|
||||
|
||||
const cells = wrapper.find('Td');
|
||||
expect(cells).toHaveLength(5);
|
||||
expect(cells).toHaveLength(6);
|
||||
expect(cells.at(1).text()).toEqual('Test Notification');
|
||||
expect(cells.at(2).text()).toEqual('Success');
|
||||
expect(cells.at(3).text()).toEqual('Slack');
|
||||
@@ -133,6 +137,10 @@ describe('<NotificationTemplateListItem />', () => {
|
||||
template={{
|
||||
...template,
|
||||
summary_fields: {
|
||||
organization: {
|
||||
id: 3,
|
||||
name: 'Test',
|
||||
},
|
||||
user_capabilities: {
|
||||
copy: false,
|
||||
edit: false,
|
||||
|
||||
@@ -59,6 +59,7 @@ function MiscSystemDetail() {
|
||||
'TOWER_URL_BASE',
|
||||
'DEFAULT_EXECUTION_ENVIRONMENT',
|
||||
'PROXY_IP_ALLOWED_LIST',
|
||||
'CSRF_TRUSTED_ORIGINS',
|
||||
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
||||
'AUTOMATION_ANALYTICS_LAST_ENTRIES',
|
||||
'UI_NEXT'
|
||||
|
||||
@@ -29,6 +29,7 @@ describe('<MiscSystemDetail />', () => {
|
||||
TOWER_URL_BASE: 'https://towerhost',
|
||||
REMOTE_HOST_HEADERS: [],
|
||||
PROXY_IP_ALLOWED_LIST: [],
|
||||
CSRF_TRUSTED_ORIGINS: [],
|
||||
LICENSE: null,
|
||||
REDHAT_USERNAME: 'name1',
|
||||
REDHAT_PASSWORD: '$encrypted$',
|
||||
|
||||
@@ -53,6 +53,7 @@ function MiscSystemEdit() {
|
||||
'TOWER_URL_BASE',
|
||||
'DEFAULT_EXECUTION_ENVIRONMENT',
|
||||
'PROXY_IP_ALLOWED_LIST',
|
||||
'CSRF_TRUSTED_ORIGINS',
|
||||
'UI_NEXT'
|
||||
);
|
||||
|
||||
@@ -95,6 +96,7 @@ function MiscSystemEdit() {
|
||||
await submitForm({
|
||||
...form,
|
||||
PROXY_IP_ALLOWED_LIST: formatJson(form.PROXY_IP_ALLOWED_LIST),
|
||||
CSRF_TRUSTED_ORIGINS: formatJson(form.CSRF_TRUSTED_ORIGINS),
|
||||
REMOTE_HOST_HEADERS: formatJson(form.REMOTE_HOST_HEADERS),
|
||||
DEFAULT_EXECUTION_ENVIRONMENT:
|
||||
form.DEFAULT_EXECUTION_ENVIRONMENT?.id || null,
|
||||
@@ -239,6 +241,11 @@ function MiscSystemEdit() {
|
||||
config={system.PROXY_IP_ALLOWED_LIST}
|
||||
isRequired
|
||||
/>
|
||||
<ObjectField
|
||||
name="CSRF_TRUSTED_ORIGINS"
|
||||
config={system.CSRF_TRUSTED_ORIGINS}
|
||||
isRequired
|
||||
/>
|
||||
{submitError && <FormSubmitError error={submitError} />}
|
||||
{revertError && <FormSubmitError error={revertError} />}
|
||||
</FormColumnLayout>
|
||||
|
||||
@@ -39,6 +39,7 @@ const systemData = {
|
||||
REMOTE_HOST_HEADERS: ['REMOTE_ADDR', 'REMOTE_HOST'],
|
||||
TOWER_URL_BASE: 'https://localhost:3000',
|
||||
PROXY_IP_ALLOWED_LIST: [],
|
||||
CSRF_TRUSTED_ORIGINS: [],
|
||||
UI_NEXT: false,
|
||||
};
|
||||
|
||||
|
||||
@@ -89,7 +89,8 @@ function SubscriptionDetail() {
|
||||
/>
|
||||
)}
|
||||
{typeof automatedInstancesCount !== 'undefined' &&
|
||||
automatedInstancesCount !== null && (
|
||||
automatedInstancesCount !== null &&
|
||||
systemConfig?.SUBSCRIPTION_USAGE_MODEL !== '' && (
|
||||
<Detail
|
||||
dataCy="subscription-hosts-automated"
|
||||
label={t`Hosts automated`}
|
||||
@@ -105,11 +106,13 @@ function SubscriptionDetail() {
|
||||
}
|
||||
/>
|
||||
)}
|
||||
<Detail
|
||||
dataCy="subscription-hosts-imported"
|
||||
label={t`Hosts imported`}
|
||||
value={license_info.current_instances}
|
||||
/>
|
||||
{systemConfig?.SUBSCRIPTION_USAGE_MODEL !== '' && (
|
||||
<Detail
|
||||
dataCy="subscription-hosts-imported"
|
||||
label={t`Hosts imported`}
|
||||
value={license_info.current_instances}
|
||||
/>
|
||||
)}
|
||||
{systemConfig?.SUBSCRIPTION_USAGE_MODEL ===
|
||||
'unique_managed_hosts' && (
|
||||
<Detail
|
||||
@@ -134,20 +137,23 @@ function SubscriptionDetail() {
|
||||
value={license_info.reactivated_instances}
|
||||
/>
|
||||
)}
|
||||
{license_info.instance_count < 9999999 && (
|
||||
<Detail
|
||||
dataCy="subscription-hosts-available"
|
||||
label={t`Hosts available`}
|
||||
value={license_info.available_instances}
|
||||
/>
|
||||
)}
|
||||
{license_info.instance_count >= 9999999 && (
|
||||
<Detail
|
||||
dataCy="subscription-unlimited-hosts-available"
|
||||
label={t`Hosts available`}
|
||||
value={t`Unlimited`}
|
||||
/>
|
||||
)}
|
||||
|
||||
{systemConfig?.SUBSCRIPTION_USAGE_MODEL !== '' &&
|
||||
license_info.instance_count < 9999999 && (
|
||||
<Detail
|
||||
dataCy="subscription-hosts-available"
|
||||
label={t`Hosts available`}
|
||||
value={license_info.available_instances}
|
||||
/>
|
||||
)}
|
||||
{systemConfig?.SUBSCRIPTION_USAGE_MODEL !== '' &&
|
||||
license_info.instance_count >= 9999999 && (
|
||||
<Detail
|
||||
dataCy="subscription-unlimited-hosts-available"
|
||||
label={t`Hosts available`}
|
||||
value={t`Unlimited`}
|
||||
/>
|
||||
)}
|
||||
<Detail
|
||||
dataCy="subscription-type"
|
||||
label={t`Subscription type`}
|
||||
|
||||
@@ -78,6 +78,20 @@
|
||||
"read_only": false
|
||||
}
|
||||
},
|
||||
"CSRF_TRUSTED_ORIGINS": {
|
||||
"type": "list",
|
||||
"required": true,
|
||||
"label": "CSRF Origins List",
|
||||
"help_text": "If the service is behind a reverse proxy/load balancer, use this setting to configure the schema://addresses from which the service should trust Origin header values. ",
|
||||
"category": "System",
|
||||
"category_slug": "system",
|
||||
"default": [],
|
||||
"child": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"read_only": false
|
||||
}
|
||||
},
|
||||
"REDHAT_USERNAME": {
|
||||
"type": "string",
|
||||
"required": false,
|
||||
@@ -4487,6 +4501,17 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"CSRF_TRUSTED_ORIGINS": {
|
||||
"type": "list",
|
||||
"label": "CSRF Origins List",
|
||||
"help_text": "If the service is behind a reverse proxy/load balancer, use this setting to configure the schema://addresses from which the service should trust Origin header values. ",
|
||||
"category": "System",
|
||||
"category_slug": "system",
|
||||
"defined_in_file": false,
|
||||
"child": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"LICENSE": {
|
||||
"type": "nested object",
|
||||
"label": "License",
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
"REMOTE_HOST"
|
||||
],
|
||||
"PROXY_IP_ALLOWED_LIST": [],
|
||||
"CSRF_TRUSTED_ORIGINS": [],
|
||||
"LICENSE": {},
|
||||
"REDHAT_USERNAME": "",
|
||||
"REDHAT_PASSWORD": "",
|
||||
|
||||
@@ -164,7 +164,7 @@ function NodeTypeStep({ isIdentifierRequired }) {
|
||||
onUpdateNodeResource={nodeResourceHelpers.setValue}
|
||||
/>
|
||||
)}
|
||||
<Form css="margin-top: 20px;">
|
||||
<Form autoComplete="off" css="margin-top: 20px;">
|
||||
<FormColumnLayout>
|
||||
<FormFullWidthLayout>
|
||||
{nodeTypeField.value === 'workflow_approval_template' && (
|
||||
|
||||
@@ -1,8 +1,17 @@
|
||||
export default function getDocsBaseUrl(config) {
|
||||
let version = 'latest';
|
||||
const licenseType = config?.license_info?.license_type;
|
||||
|
||||
if (licenseType && licenseType !== 'open') {
|
||||
version = config?.version ? config.version.split('-')[0] : 'latest';
|
||||
if (config?.version) {
|
||||
if (parseFloat(config?.version.split('-')[0]) >= 4.3) {
|
||||
version = parseFloat(config?.version.split('-')[0]);
|
||||
} else {
|
||||
version = config?.version.split('-')[0];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
version = 'latest';
|
||||
}
|
||||
return `https://docs.ansible.com/automation-controller/${version}`;
|
||||
}
|
||||
|
||||
@@ -68,6 +68,7 @@ Notable releases of the `awx.awx` collection:
|
||||
- 7.0.0 is intended to be identical to the content prior to the migration, aside from changes necessary to function as a collection.
|
||||
- 11.0.0 has no non-deprecated modules that depend on the deprecated `tower-cli` [PyPI](https://pypi.org/project/ansible-tower-cli/).
|
||||
- 19.2.1 large renaming purged "tower" names (like options and module names), adding redirects for old names
|
||||
- X.X.X added support of named URLs to all modules. Anywhere that previously accepted name or id can also support named URLs
|
||||
- 0.0.1-devel is the version you should see if installing from source, which is intended for development and expected to be unstable.
|
||||
|
||||
The following notes are changes that may require changes to playbooks:
|
||||
|
||||
@@ -10,7 +10,7 @@ from ansible.module_utils.six import raise_from, string_types
|
||||
from ansible.module_utils.six.moves import StringIO
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
from ansible.module_utils.six.moves.http_cookiejar import CookieJar
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, quote
|
||||
from ansible.module_utils.six.moves.configparser import ConfigParser, NoOptionError
|
||||
from socket import getaddrinfo, IPPROTO_TCP
|
||||
import time
|
||||
@@ -56,6 +56,8 @@ class ControllerModule(AnsibleModule):
|
||||
),
|
||||
controller_config_file=dict(type='path', aliases=['tower_config_file'], required=False, default=None),
|
||||
)
|
||||
# Associations of these types are ordered and have special consideration in the modified associations function
|
||||
ordered_associations = ['instance_groups', 'galaxy_credentials']
|
||||
short_params = {
|
||||
'host': 'controller_host',
|
||||
'username': 'controller_username',
|
||||
@@ -381,29 +383,51 @@ class ControllerAPIModule(ControllerModule):
|
||||
|
||||
def get_one(self, endpoint, name_or_id=None, allow_none=True, check_exists=False, **kwargs):
|
||||
new_kwargs = kwargs.copy()
|
||||
if name_or_id:
|
||||
name_field = self.get_name_field_from_endpoint(endpoint)
|
||||
new_data = kwargs.get('data', {}).copy()
|
||||
if name_field in new_data:
|
||||
self.fail_json(msg="You can't specify the field {0} in your search data if using the name_or_id field".format(name_field))
|
||||
response = None
|
||||
|
||||
try:
|
||||
new_data['or__id'] = int(name_or_id)
|
||||
new_data['or__{0}'.format(name_field)] = name_or_id
|
||||
except ValueError:
|
||||
# If we get a value error, then we didn't have an integer so we can just pass and fall down to the fail
|
||||
new_data[name_field] = name_or_id
|
||||
new_kwargs['data'] = new_data
|
||||
# A named URL is pretty unique so if we have a ++ in the name then lets start by looking for that
|
||||
# This also needs to go first because if there was data passed in kwargs and we do the next lookup first there may be results
|
||||
if name_or_id is not None and '++' in name_or_id:
|
||||
# Maybe someone gave us a named URL so lets see if we get anything from that.
|
||||
url_quoted_name = quote(name_or_id, safe="+")
|
||||
named_endpoint = '{0}/{1}/'.format(endpoint, url_quoted_name)
|
||||
named_response = self.get_endpoint(named_endpoint)
|
||||
|
||||
response = self.get_endpoint(endpoint, **new_kwargs)
|
||||
if response['status_code'] != 200:
|
||||
fail_msg = "Got a {0} response when trying to get one from {1}".format(response['status_code'], endpoint)
|
||||
if 'detail' in response.get('json', {}):
|
||||
fail_msg += ', detail: {0}'.format(response['json']['detail'])
|
||||
self.fail_json(msg=fail_msg)
|
||||
if named_response['status_code'] == 200 and 'json' in named_response:
|
||||
# We found a named item but we expect to deal with a list view so mock that up
|
||||
response = {
|
||||
'json': {
|
||||
'count': 1,
|
||||
'results': [named_response['json']],
|
||||
}
|
||||
}
|
||||
|
||||
if 'count' not in response['json'] or 'results' not in response['json']:
|
||||
self.fail_json(msg="The endpoint did not provide count and results")
|
||||
# Since we didn't have a named URL, lets try and find it with a general search
|
||||
if response is None:
|
||||
if name_or_id:
|
||||
name_field = self.get_name_field_from_endpoint(endpoint)
|
||||
new_data = kwargs.get('data', {}).copy()
|
||||
if name_field in new_data:
|
||||
self.fail_json(msg="You can't specify the field {0} in your search data if using the name_or_id field".format(name_field))
|
||||
|
||||
try:
|
||||
new_data['or__id'] = int(name_or_id)
|
||||
new_data['or__{0}'.format(name_field)] = name_or_id
|
||||
except ValueError:
|
||||
# If we get a value error, then we didn't have an integer so we can just pass and fall down to the fail
|
||||
new_data[name_field] = name_or_id
|
||||
new_kwargs['data'] = new_data
|
||||
|
||||
response = self.get_endpoint(endpoint, **new_kwargs)
|
||||
|
||||
if response['status_code'] != 200:
|
||||
fail_msg = "Got a {0} response when trying to get one from {1}".format(response['status_code'], endpoint)
|
||||
if 'detail' in response.get('json', {}):
|
||||
fail_msg += ', detail: {0}'.format(response['json']['detail'])
|
||||
self.fail_json(msg=fail_msg)
|
||||
|
||||
if 'count' not in response['json'] or 'results' not in response['json']:
|
||||
self.fail_json(msg="The endpoint did not provide count and results")
|
||||
|
||||
if response['json']['count'] == 0:
|
||||
if allow_none:
|
||||
@@ -421,7 +445,6 @@ class ControllerAPIModule(ControllerModule):
|
||||
self.fail_wanted_one(response, endpoint, new_kwargs.get('data'))
|
||||
|
||||
if check_exists:
|
||||
name_field = self.get_name_field_from_endpoint(endpoint)
|
||||
self.json_output['id'] = response['json']['results'][0]['id']
|
||||
self.exit_json(**self.json_output)
|
||||
|
||||
@@ -531,13 +554,7 @@ class ControllerAPIModule(ControllerModule):
|
||||
controller_version = response.info().getheader('X-API-Product-Version', None)
|
||||
|
||||
parsed_collection_version = Version(self._COLLECTION_VERSION).version
|
||||
if not controller_version:
|
||||
self.warn(
|
||||
"You are using the {0} version of this collection but connecting to a controller that did not return a version".format(
|
||||
self._COLLECTION_VERSION
|
||||
)
|
||||
)
|
||||
else:
|
||||
if controller_version:
|
||||
parsed_controller_version = Version(controller_version).version
|
||||
if controller_type == 'AWX':
|
||||
collection_compare_ver = parsed_collection_version[0]
|
||||
@@ -680,17 +697,26 @@ class ControllerAPIModule(ControllerModule):
|
||||
response = self.get_all_endpoint(association_endpoint)
|
||||
existing_associated_ids = [association['id'] for association in response['json']['results']]
|
||||
|
||||
# Disassociate anything that is in existing_associated_ids but not in new_association_list
|
||||
ids_to_remove = list(set(existing_associated_ids) - set(new_association_list))
|
||||
for an_id in ids_to_remove:
|
||||
# Some associations can be ordered (like galaxy credentials)
|
||||
if association_endpoint.strip('/').split('/')[-1] in self.ordered_associations:
|
||||
if existing_associated_ids == new_association_list:
|
||||
return # If the current associations EXACTLY match the desired associations then we can return
|
||||
removal_list = existing_associated_ids # because of ordering, we have to remove everything
|
||||
addition_list = new_association_list # re-add everything back in-order
|
||||
else:
|
||||
if set(existing_associated_ids) == set(new_association_list):
|
||||
return
|
||||
removal_list = set(existing_associated_ids) - set(new_association_list)
|
||||
addition_list = set(new_association_list) - set(existing_associated_ids)
|
||||
|
||||
for an_id in removal_list:
|
||||
response = self.post_endpoint(association_endpoint, **{'data': {'id': int(an_id), 'disassociate': True}})
|
||||
if response['status_code'] == 204:
|
||||
self.json_output['changed'] = True
|
||||
else:
|
||||
self.fail_json(msg="Failed to disassociate item {0}".format(response['json'].get('detail', response['json'])))
|
||||
|
||||
# Associate anything that is in new_association_list but not in `association`
|
||||
for an_id in list(set(new_association_list) - set(existing_associated_ids)):
|
||||
for an_id in addition_list:
|
||||
response = self.post_endpoint(association_endpoint, **{'data': {'id': int(an_id)}})
|
||||
if response['status_code'] == 204:
|
||||
self.json_output['changed'] = True
|
||||
|
||||
@@ -29,12 +29,12 @@ options:
|
||||
choices: [ 'run', 'check' ]
|
||||
execution_environment:
|
||||
description:
|
||||
- Execution Environment to use for the ad hoc command.
|
||||
- Execution Environment name, ID, or named URL to use for the ad hoc command.
|
||||
required: False
|
||||
type: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory to use for the ad hoc command.
|
||||
- Inventory name, ID, or named URL to use for the ad hoc command.
|
||||
required: True
|
||||
type: str
|
||||
limit:
|
||||
@@ -43,7 +43,7 @@ options:
|
||||
type: str
|
||||
credential:
|
||||
description:
|
||||
- Credential to use for ad hoc command.
|
||||
- Credential name, ID, or named URL to use for ad hoc command.
|
||||
required: True
|
||||
type: str
|
||||
module_name:
|
||||
|
||||
@@ -48,7 +48,7 @@ options:
|
||||
required: False
|
||||
organization:
|
||||
description:
|
||||
- Name of organization for application.
|
||||
- Name, ID, or named URL of organization for application.
|
||||
type: str
|
||||
required: True
|
||||
redirect_uris:
|
||||
|
||||
@@ -48,7 +48,7 @@ options:
|
||||
type: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory name or ID the hosts should be made a member of.
|
||||
- Inventory name, ID, or named URL the hosts should be made a member of.
|
||||
required: True
|
||||
type: str
|
||||
extends_documentation_fragment: awx.awx.auth
|
||||
|
||||
@@ -128,7 +128,7 @@ options:
|
||||
type: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory name or ID to use for the jobs ran within the bulk job, only used if prompt for inventory is set.
|
||||
- Inventory name, ID, or named URL to use for the jobs ran within the bulk job, only used if prompt for inventory is set.
|
||||
type: str
|
||||
scm_branch:
|
||||
description:
|
||||
|
||||
@@ -45,7 +45,7 @@ options:
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- Organization that should own the credential.
|
||||
- Organization name, ID, or named URL that should own the credential.
|
||||
type: str
|
||||
credential_type:
|
||||
description:
|
||||
@@ -92,11 +92,11 @@ options:
|
||||
default: true
|
||||
user:
|
||||
description:
|
||||
- User that should own this credential.
|
||||
- User name, ID, or named URL that should own this credential.
|
||||
type: str
|
||||
team:
|
||||
description:
|
||||
- Team that should own this credential.
|
||||
- Team name, ID, or named URL that should own this credential.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
|
||||
@@ -38,12 +38,12 @@ options:
|
||||
type: dict
|
||||
target_credential:
|
||||
description:
|
||||
- The credential which will have its input defined by this source
|
||||
- The credential name, ID, or named URL which will have its input defined by this source
|
||||
required: true
|
||||
type: str
|
||||
source_credential:
|
||||
description:
|
||||
- The credential which is the source of the credential lookup
|
||||
- The credential name, ID, or named URL which is the source of the credential lookup
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
|
||||
@@ -41,11 +41,11 @@ options:
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- The organization the execution environment belongs to.
|
||||
- The organization name, ID, or named URL that the execution environment belongs to.
|
||||
type: str
|
||||
credential:
|
||||
description:
|
||||
- Name of the credential to use for the execution environment.
|
||||
- Name, ID, or named URL of the credential to use for the execution environment.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
|
||||
@@ -28,72 +28,72 @@ options:
|
||||
default: 'False'
|
||||
organizations:
|
||||
description:
|
||||
- organization names to export
|
||||
- organization names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
users:
|
||||
description:
|
||||
- user names to export
|
||||
- user names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
teams:
|
||||
description:
|
||||
- team names to export
|
||||
- team names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
credential_types:
|
||||
description:
|
||||
- credential type names to export
|
||||
- credential type names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
credentials:
|
||||
description:
|
||||
- credential names to export
|
||||
- credential names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
execution_environments:
|
||||
description:
|
||||
- execution environment names to export
|
||||
- execution environment names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
notification_templates:
|
||||
description:
|
||||
- notification template names to export
|
||||
- notification template names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
inventory_sources:
|
||||
description:
|
||||
- inventory soruces to export
|
||||
- inventory source name, ID, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
inventory:
|
||||
description:
|
||||
- inventory names to export
|
||||
- inventory names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
projects:
|
||||
description:
|
||||
- project names to export
|
||||
- project names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
job_templates:
|
||||
description:
|
||||
- job template names to export
|
||||
- job template names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
workflow_job_templates:
|
||||
description:
|
||||
- workflow names to export
|
||||
- workflow names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
applications:
|
||||
description:
|
||||
- OAuth2 application names to export
|
||||
- OAuth2 application names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
schedules:
|
||||
description:
|
||||
- schedule names to export
|
||||
- schedule names, IDs, or named URLs to export
|
||||
type: list
|
||||
elements: str
|
||||
requirements:
|
||||
@@ -154,7 +154,7 @@ def main():
|
||||
|
||||
# The exporter code currently works like the following:
|
||||
# Empty string == all assets of that type
|
||||
# Non-Empty string = just a list of assets of that type (by name or ID)
|
||||
# Non-Empty string = just a list of assets of that type (by name, ID, or named URL)
|
||||
# Asset type not present or None = skip asset type (unless everything is None, then export all)
|
||||
# Here we are going to setup a dict of values to export
|
||||
export_args = {}
|
||||
|
||||
@@ -32,7 +32,7 @@ options:
|
||||
type: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory the group should be made a member of.
|
||||
- Inventory name, ID, or named URL that the group should be made a member of.
|
||||
required: True
|
||||
type: str
|
||||
variables:
|
||||
@@ -41,12 +41,12 @@ options:
|
||||
type: dict
|
||||
hosts:
|
||||
description:
|
||||
- List of hosts that should be put in this group.
|
||||
- List of host names, IDs, or named URLs that should be put in this group.
|
||||
type: list
|
||||
elements: str
|
||||
children:
|
||||
description:
|
||||
- List of groups that should be nested inside in this group.
|
||||
- List of groups names, IDs, or named URLs that should be nested inside in this group.
|
||||
type: list
|
||||
elements: str
|
||||
aliases:
|
||||
|
||||
@@ -36,7 +36,7 @@ options:
|
||||
type: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory the host should be made a member of.
|
||||
- Inventory name, ID, or named URL the host should be made a member of.
|
||||
required: True
|
||||
type: str
|
||||
enabled:
|
||||
|
||||
@@ -33,7 +33,7 @@ options:
|
||||
type: str
|
||||
credential:
|
||||
description:
|
||||
- Credential to authenticate with Kubernetes or OpenShift. Must be of type "OpenShift or Kubernetes API Bearer Token".
|
||||
- Credential name, ID, or named URL to authenticate with Kubernetes or OpenShift. Must be of type "OpenShift or Kubernetes API Bearer Token".
|
||||
required: False
|
||||
type: str
|
||||
is_container_group:
|
||||
@@ -74,7 +74,7 @@ options:
|
||||
type: str
|
||||
instances:
|
||||
description:
|
||||
- The instances associated with this instance_group
|
||||
- The instance names, IDs, or named URLs associated with this instance_group
|
||||
required: False
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
@@ -44,7 +44,7 @@ options:
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- Organization the inventory belongs to.
|
||||
- Organization name, ID, or named URL the inventory belongs to.
|
||||
required: True
|
||||
type: str
|
||||
variables:
|
||||
@@ -62,12 +62,12 @@ options:
|
||||
type: str
|
||||
instance_groups:
|
||||
description:
|
||||
- list of Instance Groups for this Organization to run on.
|
||||
- list of Instance Group names, IDs, or named URLs for this Organization to run on.
|
||||
type: list
|
||||
elements: str
|
||||
input_inventories:
|
||||
description:
|
||||
- List of Inventories to use as input for Constructed Inventory.
|
||||
- List of Inventory names, IDs, or named URLs to use as input for Constructed Inventory.
|
||||
type: list
|
||||
elements: str
|
||||
prevent_instance_group_fallback:
|
||||
|
||||
@@ -36,7 +36,7 @@ options:
|
||||
type: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory the group should be made a member of.
|
||||
- Inventory name, ID, or named URL the group should be made a member of.
|
||||
required: True
|
||||
type: str
|
||||
source:
|
||||
@@ -70,11 +70,11 @@ options:
|
||||
type: str
|
||||
credential:
|
||||
description:
|
||||
- Credential to use for the source.
|
||||
- Credential name, ID, or named URL to use for the source.
|
||||
type: str
|
||||
execution_environment:
|
||||
description:
|
||||
- Execution Environment to use for the source.
|
||||
- Execution Environment name, ID, or named URL to use for the source.
|
||||
type: str
|
||||
custom_virtualenv:
|
||||
description:
|
||||
@@ -107,7 +107,7 @@ options:
|
||||
type: int
|
||||
source_project:
|
||||
description:
|
||||
- Project to use as source with scm option
|
||||
- Project name, ID, or named URL to use as source with scm option
|
||||
type: str
|
||||
scm_branch:
|
||||
description:
|
||||
|
||||
@@ -35,7 +35,7 @@ options:
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- Name of the inventory source's inventory's organization.
|
||||
- Name, ID, or named URL of the inventory source's inventory's organization.
|
||||
type: str
|
||||
wait:
|
||||
description:
|
||||
|
||||
@@ -34,17 +34,17 @@ options:
|
||||
type: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory to use for the job, only used if prompt for inventory is set.
|
||||
- Inventory name, ID, or named URL to use for the job, only used if prompt for inventory is set.
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- Organization the job template exists in.
|
||||
- Organization name, ID, or named URL the job template exists in.
|
||||
- Used to help lookup the object, cannot be modified using this module.
|
||||
- If not provided, will lookup by name only, which does not work with duplicates.
|
||||
type: str
|
||||
credentials:
|
||||
description:
|
||||
- Credential to use for job, only used if prompt for credential is set.
|
||||
- Credential names, IDs, or named URLs to use for job, only used if prompt for credential is set.
|
||||
type: list
|
||||
aliases: ['credential']
|
||||
elements: str
|
||||
@@ -88,7 +88,7 @@ options:
|
||||
type: dict
|
||||
execution_environment:
|
||||
description:
|
||||
- Execution environment to use for the job, only used if prompt for execution environment is set.
|
||||
- Execution environment name, ID, or named URL to use for the job, only used if prompt for execution environment is set.
|
||||
type: str
|
||||
forks:
|
||||
description:
|
||||
@@ -96,7 +96,7 @@ options:
|
||||
type: int
|
||||
instance_groups:
|
||||
description:
|
||||
- Instance groups to use for the job, only used if prompt for instance groups is set.
|
||||
- Instance group names, IDs, or named URLs to use for the job, only used if prompt for instance groups is set.
|
||||
type: list
|
||||
elements: str
|
||||
job_slice_count:
|
||||
|
||||
@@ -49,11 +49,11 @@ options:
|
||||
type: str
|
||||
inventory:
|
||||
description:
|
||||
- Name of the inventory to use for the job template.
|
||||
- Name, ID, or named URL of the inventory to use for the job template.
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- Organization the job template exists in.
|
||||
- Organization name, ID, or named URL the job template exists in.
|
||||
- Used to help lookup the object, cannot be modified using this module.
|
||||
- The Organization is inferred from the associated project
|
||||
- If not provided, will lookup by name only, which does not work with duplicates.
|
||||
@@ -61,7 +61,7 @@ options:
|
||||
type: str
|
||||
project:
|
||||
description:
|
||||
- Name of the project to use for the job template.
|
||||
- Name, ID, or named URL of the project to use for the job template.
|
||||
type: str
|
||||
playbook:
|
||||
description:
|
||||
@@ -69,22 +69,22 @@ options:
|
||||
type: str
|
||||
credential:
|
||||
description:
|
||||
- Name of the credential to use for the job template.
|
||||
- Name, ID, or named URL of the credential to use for the job template.
|
||||
- Deprecated, use 'credentials'.
|
||||
type: str
|
||||
credentials:
|
||||
description:
|
||||
- List of credentials to use for the job template.
|
||||
- List of credential names, IDs, or named URLs to use for the job template.
|
||||
type: list
|
||||
elements: str
|
||||
vault_credential:
|
||||
description:
|
||||
- Name of the vault credential to use for the job template.
|
||||
- Name, ID, or named URL of the vault credential to use for the job template.
|
||||
- Deprecated, use 'credentials'.
|
||||
type: str
|
||||
execution_environment:
|
||||
description:
|
||||
- Execution Environment to use for the job template.
|
||||
- Execution Environment name, ID, or named URL to use for the job template.
|
||||
type: str
|
||||
custom_virtualenv:
|
||||
description:
|
||||
@@ -94,7 +94,7 @@ options:
|
||||
type: str
|
||||
instance_groups:
|
||||
description:
|
||||
- list of Instance Groups for this Organization to run on.
|
||||
- list of Instance Group names, IDs, or named URLs for this Organization to run on.
|
||||
type: list
|
||||
elements: str
|
||||
forks:
|
||||
@@ -108,7 +108,7 @@ options:
|
||||
verbosity:
|
||||
description:
|
||||
- Control the output level Ansible produces as the playbook runs. 0 - Normal, 1 - Verbose, 2 - More Verbose, 3 - Debug, 4 - Connection Debug.
|
||||
choices: [0, 1, 2, 3, 4]
|
||||
choices: [0, 1, 2, 3, 4, 5]
|
||||
type: int
|
||||
extra_vars:
|
||||
description:
|
||||
@@ -404,7 +404,7 @@ def main():
|
||||
instance_groups=dict(type="list", elements='str'),
|
||||
forks=dict(type='int'),
|
||||
limit=dict(),
|
||||
verbosity=dict(type='int', choices=[0, 1, 2, 3, 4]),
|
||||
verbosity=dict(type='int', choices=[0, 1, 2, 3, 4, 5]),
|
||||
extra_vars=dict(type='dict'),
|
||||
job_tags=dict(),
|
||||
force_handlers=dict(type='bool', aliases=['force_handlers_enabled']),
|
||||
|
||||
@@ -34,7 +34,7 @@ options:
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- Organization this label belongs to.
|
||||
- Organization name, ID, or named URL this label belongs to.
|
||||
required: True
|
||||
type: str
|
||||
state:
|
||||
|
||||
@@ -44,7 +44,7 @@ options:
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- The organization the notification belongs to.
|
||||
- The organization name, ID, or named URL the notification belongs to.
|
||||
type: str
|
||||
notification_type:
|
||||
description:
|
||||
|
||||
@@ -36,7 +36,7 @@ options:
|
||||
type: str
|
||||
default_environment:
|
||||
description:
|
||||
- Default Execution Environment to use for jobs owned by the Organization.
|
||||
- Default Execution Environment name, ID, or named URL to use for jobs owned by the Organization.
|
||||
type: str
|
||||
custom_virtualenv:
|
||||
description:
|
||||
@@ -56,7 +56,7 @@ options:
|
||||
type: str
|
||||
instance_groups:
|
||||
description:
|
||||
- list of Instance Groups for this Organization to run on.
|
||||
- list of Instance Group names, IDs, or named URLs for this Organization to run on.
|
||||
type: list
|
||||
elements: str
|
||||
notification_templates_started:
|
||||
@@ -81,7 +81,7 @@ options:
|
||||
elements: str
|
||||
galaxy_credentials:
|
||||
description:
|
||||
- list of Ansible Galaxy credentials to associate to the organization
|
||||
- list of Ansible Galaxy credential names, IDs, or named URLs to associate to the organization
|
||||
type: list
|
||||
elements: str
|
||||
extends_documentation_fragment: awx.awx.auth
|
||||
|
||||
@@ -65,7 +65,7 @@ options:
|
||||
type: str
|
||||
credential:
|
||||
description:
|
||||
- Name of the credential to use with this SCM resource.
|
||||
- Name, ID, or named URL of the credential to use with this SCM resource.
|
||||
type: str
|
||||
aliases:
|
||||
- scm_credential
|
||||
@@ -106,7 +106,7 @@ options:
|
||||
- job_timeout
|
||||
default_environment:
|
||||
description:
|
||||
- Default Execution Environment to use for jobs relating to the project.
|
||||
- Default Execution Environment name, ID, or named URL to use for jobs relating to the project.
|
||||
type: str
|
||||
custom_virtualenv:
|
||||
description:
|
||||
@@ -116,7 +116,7 @@ options:
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- Name of organization for project.
|
||||
- Name, ID, or named URL of organization for the project.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
@@ -162,7 +162,7 @@ options:
|
||||
type: float
|
||||
signature_validation_credential:
|
||||
description:
|
||||
- Name of the credential to use for signature validation.
|
||||
- Name, ID, or named URL of the credential to use for signature validation.
|
||||
- If signature validation credential is provided, signature validation will be enabled.
|
||||
type: str
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ options:
|
||||
- project
|
||||
organization:
|
||||
description:
|
||||
- Organization the project exists in.
|
||||
- Organization name, ID, or named URL the project exists in.
|
||||
- Used to help lookup the object, cannot be modified using this module.
|
||||
- If not provided, will lookup by name only, which does not work with duplicates.
|
||||
type: str
|
||||
|
||||
@@ -23,22 +23,22 @@ description:
|
||||
options:
|
||||
user:
|
||||
description:
|
||||
- User that receives the permissions specified by the role.
|
||||
- User name, ID, or named URL that receives the permissions specified by the role.
|
||||
- Deprecated, use 'users'.
|
||||
type: str
|
||||
users:
|
||||
description:
|
||||
- Users that receive the permissions specified by the role.
|
||||
- User names, IDs, or named URLs that receive the permissions specified by the role.
|
||||
type: list
|
||||
elements: str
|
||||
team:
|
||||
description:
|
||||
- Team that receives the permissions specified by the role.
|
||||
- Team name, ID, or named URL that receives the permissions specified by the role.
|
||||
- Deprecated, use 'teams'.
|
||||
type: str
|
||||
teams:
|
||||
description:
|
||||
- Teams that receive the permissions specified by the role.
|
||||
- Team names, IDs, or named URLs that receive the permissions specified by the role.
|
||||
type: list
|
||||
elements: str
|
||||
role:
|
||||
@@ -50,87 +50,87 @@ options:
|
||||
type: str
|
||||
target_team:
|
||||
description:
|
||||
- Team that the role acts on.
|
||||
- Team name, ID, or named URL that the role acts on.
|
||||
- For example, make someone a member or an admin of a team.
|
||||
- Members of a team implicitly receive the permissions that the team has.
|
||||
- Deprecated, use 'target_teams'.
|
||||
type: str
|
||||
target_teams:
|
||||
description:
|
||||
- Team that the role acts on.
|
||||
- Team names, IDs, or named URLs that the role acts on.
|
||||
- For example, make someone a member or an admin of a team.
|
||||
- Members of a team implicitly receive the permissions that the team has.
|
||||
type: list
|
||||
elements: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory the role acts on.
|
||||
- Inventory name, ID, or named URL the role acts on.
|
||||
- Deprecated, use 'inventories'.
|
||||
type: str
|
||||
inventories:
|
||||
description:
|
||||
- Inventory the role acts on.
|
||||
- Inventory names, IDs, or named URLs the role acts on.
|
||||
type: list
|
||||
elements: str
|
||||
job_template:
|
||||
description:
|
||||
- The job template the role acts on.
|
||||
- The job template name, ID, or named URL the role acts on.
|
||||
- Deprecated, use 'job_templates'.
|
||||
type: str
|
||||
job_templates:
|
||||
description:
|
||||
- The job template the role acts on.
|
||||
- The job template names, IDs, or named URLs the role acts on.
|
||||
type: list
|
||||
elements: str
|
||||
workflow:
|
||||
description:
|
||||
- The workflow job template the role acts on.
|
||||
- The workflow job template name, ID, or named URL the role acts on.
|
||||
- Deprecated, use 'workflows'.
|
||||
type: str
|
||||
workflows:
|
||||
description:
|
||||
- The workflow job template the role acts on.
|
||||
- The workflow job template names, IDs, or named URLs the role acts on.
|
||||
type: list
|
||||
elements: str
|
||||
credential:
|
||||
description:
|
||||
- Credential the role acts on.
|
||||
- Credential name, ID, or named URL the role acts on.
|
||||
- Deprecated, use 'credentials'.
|
||||
type: str
|
||||
credentials:
|
||||
description:
|
||||
- Credential the role acts on.
|
||||
- Credential names, IDs, or named URLs the role acts on.
|
||||
type: list
|
||||
elements: str
|
||||
organization:
|
||||
description:
|
||||
- Organization the role acts on.
|
||||
- Organization name, ID, or named URL the role acts on.
|
||||
- Deprecated, use 'organizations'.
|
||||
type: str
|
||||
organizations:
|
||||
description:
|
||||
- Organization the role acts on.
|
||||
- Organization names, IDs, or named URLs the role acts on.
|
||||
type: list
|
||||
elements: str
|
||||
lookup_organization:
|
||||
description:
|
||||
- Organization the inventories, job templates, projects, or workflows the items exists in.
|
||||
- Organization name, ID, or named URL the inventories, job templates, projects, or workflows the items exists in.
|
||||
- Used to help lookup the object, for organization roles see organization.
|
||||
- If not provided, will lookup by name only, which does not work with duplicates.
|
||||
type: str
|
||||
project:
|
||||
description:
|
||||
- Project the role acts on.
|
||||
- Project name, ID, or named URL the role acts on.
|
||||
- Deprecated, use 'projects'.
|
||||
type: str
|
||||
projects:
|
||||
description:
|
||||
- Project the role acts on.
|
||||
- Project names, IDs, or named URLs the role acts on.
|
||||
type: list
|
||||
elements: str
|
||||
instance_groups:
|
||||
description:
|
||||
- Instance Group the role acts on.
|
||||
- Instance Group names, IDs, or named URLs the role acts on.
|
||||
type: list
|
||||
elements: str
|
||||
state:
|
||||
@@ -266,7 +266,7 @@ def main():
|
||||
resource_data = {}
|
||||
for key, value in resources.items():
|
||||
for resource in value:
|
||||
# Attempt to look up project based on the provided name or ID and lookup data
|
||||
# Attempt to look up project based on the provided name, ID, or named URL and lookup data
|
||||
lookup_key = key
|
||||
if key == 'organizations' or key == 'users':
|
||||
lookup_data_populated = {}
|
||||
|
||||
@@ -44,7 +44,7 @@ options:
|
||||
type: str
|
||||
execution_environment:
|
||||
description:
|
||||
- Execution Environment applied as a prompt, assuming jot template prompts for execution environment
|
||||
- Execution Environment name, ID, or named URL applied as a prompt, assuming job template prompts for execution environment
|
||||
type: str
|
||||
extra_data:
|
||||
description:
|
||||
@@ -57,12 +57,12 @@ options:
|
||||
type: int
|
||||
instance_groups:
|
||||
description:
|
||||
- List of Instance Groups applied as a prompt, assuming job template prompts for instance groups
|
||||
- List of Instance Group names, IDs, or named URLs applied as a prompt, assuming job template prompts for instance groups
|
||||
type: list
|
||||
elements: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory applied as a prompt, assuming job template prompts for inventory
|
||||
- Inventory name, ID, or named URL applied as a prompt, assuming job template prompts for inventory
|
||||
required: False
|
||||
type: str
|
||||
job_slice_count:
|
||||
@@ -76,7 +76,7 @@ options:
|
||||
elements: str
|
||||
credentials:
|
||||
description:
|
||||
- List of credentials applied as a prompt, assuming job template prompts for credentials
|
||||
- List of credential names, IDs, or named URLs applied as a prompt, assuming job template prompts for credentials
|
||||
type: list
|
||||
elements: str
|
||||
scm_branch:
|
||||
@@ -130,12 +130,12 @@ options:
|
||||
- 5
|
||||
unified_job_template:
|
||||
description:
|
||||
- Name of unified job template to schedule. Used to look up an already existing schedule.
|
||||
- Name, ID, or named URL of unified job template to schedule. Used to look up an already existing schedule.
|
||||
required: False
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- The organization the unified job template exists in.
|
||||
- The organization name, ID, or named URL the unified job template exists in.
|
||||
- Used for looking up the unified job template, not a direct model field.
|
||||
type: str
|
||||
enabled:
|
||||
|
||||
@@ -36,7 +36,7 @@ options:
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- Organization the team should be made a member of.
|
||||
- Organization name, ID, or named URL the team should be made a member of.
|
||||
required: True
|
||||
type: str
|
||||
state:
|
||||
|
||||
@@ -37,7 +37,7 @@ options:
|
||||
type: str
|
||||
application:
|
||||
description:
|
||||
- The application tied to this token.
|
||||
- The application name, ID, or named URL tied to this token.
|
||||
required: False
|
||||
type: str
|
||||
scope:
|
||||
|
||||
@@ -44,7 +44,7 @@ options:
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- The user will be created as a member of that organization (needed for organization admins to create new organization users).
|
||||
- The user will be created as a member of that organization name, ID, or named URL (needed for organization admins to create new organization users).
|
||||
type: str
|
||||
is_superuser:
|
||||
description:
|
||||
|
||||
@@ -58,7 +58,7 @@ options:
|
||||
- ask_tags
|
||||
organization:
|
||||
description:
|
||||
- Organization the workflow job template exists in.
|
||||
- Organization name, ID, or named URL the workflow job template exists in.
|
||||
- Used to help lookup the object, cannot be modified using this module.
|
||||
- If not provided, will lookup by name only, which does not work with duplicates.
|
||||
type: str
|
||||
@@ -72,7 +72,7 @@ options:
|
||||
type: bool
|
||||
inventory:
|
||||
description:
|
||||
- Inventory applied as a prompt, assuming job template prompts for inventory
|
||||
- Name, ID, or named URL of inventory applied as a prompt, assuming job template prompts for inventory
|
||||
type: str
|
||||
limit:
|
||||
description:
|
||||
|
||||
@@ -31,7 +31,7 @@ options:
|
||||
type: dict
|
||||
inventory:
|
||||
description:
|
||||
- Inventory applied as a prompt, if job template prompts for inventory
|
||||
- Name, ID, or named URL of the Inventory applied as a prompt, if job template prompts for inventory
|
||||
type: str
|
||||
scm_branch:
|
||||
description:
|
||||
@@ -73,7 +73,7 @@ options:
|
||||
- '5'
|
||||
workflow_job_template:
|
||||
description:
|
||||
- The workflow job template the node exists in.
|
||||
- The workflow job template name, ID, or named URL the node exists in.
|
||||
- Used for looking up the node, cannot be modified after creation.
|
||||
required: True
|
||||
type: str
|
||||
@@ -81,7 +81,7 @@ options:
|
||||
- workflow
|
||||
organization:
|
||||
description:
|
||||
- The organization of the workflow job template the node exists in.
|
||||
- The organization name, ID, or named URL of the workflow job template the node exists in.
|
||||
- Used for looking up the workflow, not a direct model field.
|
||||
type: str
|
||||
unified_job_template:
|
||||
@@ -93,7 +93,7 @@ options:
|
||||
type: str
|
||||
lookup_organization:
|
||||
description:
|
||||
- Organization the inventories, job template, project, inventory source the unified_job_template exists in.
|
||||
- Organization name, ID, or named URL the inventories, job template, project, inventory source the unified_job_template exists in.
|
||||
- If not provided, will lookup by name only, which does not work with duplicates.
|
||||
type: str
|
||||
approval_node:
|
||||
@@ -145,14 +145,14 @@ options:
|
||||
elements: str
|
||||
credentials:
|
||||
description:
|
||||
- Credentials to be applied to job as launch-time prompts.
|
||||
- List of credential names.
|
||||
- Credential names, IDs, or named URLs to be applied to job as launch-time prompts.
|
||||
- List of credential names, IDs, or named URLs.
|
||||
- Uniqueness is not handled rigorously.
|
||||
type: list
|
||||
elements: str
|
||||
execution_environment:
|
||||
description:
|
||||
- Execution Environment applied as a prompt, assuming jot template prompts for execution environment
|
||||
- Execution Environment name, ID, or named URL applied as a prompt, assuming job template prompts for execution environment
|
||||
type: str
|
||||
forks:
|
||||
description:
|
||||
@@ -160,7 +160,7 @@ options:
|
||||
type: int
|
||||
instance_groups:
|
||||
description:
|
||||
- List of Instance Groups applied as a prompt, assuming job template prompts for instance groups
|
||||
- List of Instance Group names, IDs, or named URLs applied as a prompt, assuming job template prompts for instance groups
|
||||
type: list
|
||||
elements: str
|
||||
job_slice_count:
|
||||
|
||||
@@ -27,13 +27,13 @@ options:
|
||||
- workflow_template
|
||||
organization:
|
||||
description:
|
||||
- Organization the workflow job template exists in.
|
||||
- Organization name, ID, or named URL the workflow job template exists in.
|
||||
- Used to help lookup the object, cannot be modified using this module.
|
||||
- If not provided, will lookup by name only, which does not work with duplicates.
|
||||
type: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory to use for the job ran with this workflow, only used if prompt for inventory is set.
|
||||
- Inventory name, ID, or named URL to use for the job ran with this workflow, only used if prompt for inventory is set.
|
||||
type: str
|
||||
limit:
|
||||
description:
|
||||
|
||||
@@ -2,9 +2,11 @@ from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
import random
|
||||
|
||||
import pytest
|
||||
|
||||
from awx.main.models import ActivityStream, JobTemplate, Job, NotificationTemplate
|
||||
from awx.main.models import ActivityStream, JobTemplate, Job, NotificationTemplate, Label
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -243,6 +245,42 @@ def test_job_template_with_survey_encrypted_default(run_module, admin_user, proj
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_associate_changed_status(run_module, admin_user, organization, project):
|
||||
# create JT and labels
|
||||
jt = JobTemplate.objects.create(name='foo', project=project, playbook='helloworld.yml')
|
||||
labels = [Label.objects.create(name=f'foo{i}', organization=organization) for i in range(10)]
|
||||
|
||||
# sanity: no-op without labels involved
|
||||
result = run_module('job_template', dict(name=jt.name, playbook='helloworld.yml'), admin_user)
|
||||
assert not result.get('failed', False), result.get('msg', result)
|
||||
assert result['changed'] is False
|
||||
|
||||
# first time adding labels, this should make the label list equal to what was specified
|
||||
result = run_module('job_template', dict(name=jt.name, playbook='helloworld.yml', labels=[l.name for l in labels]), admin_user)
|
||||
assert not result.get('failed', False), result.get('msg', result)
|
||||
assert result['changed']
|
||||
assert set(l.id for l in jt.labels.all()) == set(l.id for l in labels)
|
||||
|
||||
# shuffling the labels should not result in any change
|
||||
random.shuffle(labels)
|
||||
result = run_module('job_template', dict(name=jt.name, playbook='helloworld.yml', labels=[l.name for l in labels]), admin_user)
|
||||
assert not result.get('failed', False), result.get('msg', result)
|
||||
assert result['changed'] is False
|
||||
|
||||
# not specifying labels should not change labels
|
||||
result = run_module('job_template', dict(name=jt.name, playbook='helloworld.yml'), admin_user)
|
||||
assert not result.get('failed', False), result.get('msg', result)
|
||||
assert result['changed'] is False
|
||||
|
||||
# should be able to remove only some labels
|
||||
fewer_labels = labels[:7]
|
||||
result = run_module('job_template', dict(name=jt.name, playbook='helloworld.yml', labels=[l.name for l in fewer_labels]), admin_user)
|
||||
assert not result.get('failed', False), result.get('msg', result)
|
||||
assert result['changed']
|
||||
assert set(l.id for l in jt.labels.all()) == set(l.id for l in fewer_labels)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_associate_only_on_success(run_module, admin_user, organization, project):
|
||||
jt = JobTemplate.objects.create(
|
||||
|
||||
@@ -76,21 +76,6 @@ def test_version_warning(collection_import, silence_warning):
|
||||
)
|
||||
|
||||
|
||||
def test_no_version_warning(collection_import, silence_warning):
|
||||
ControllerAPIModule = collection_import('plugins.module_utils.controller_api').ControllerAPIModule
|
||||
cli_data = {'ANSIBLE_MODULE_ARGS': {}}
|
||||
testargs = ['module_file2.py', json.dumps(cli_data)]
|
||||
with mock.patch.object(sys, 'argv', testargs):
|
||||
with mock.patch('ansible.module_utils.urls.Request.open', new=mock_no_ping_response):
|
||||
my_module = ControllerAPIModule(argument_spec=dict())
|
||||
my_module._COLLECTION_VERSION = "2.0.0"
|
||||
my_module._COLLECTION_TYPE = "awx"
|
||||
my_module.get_endpoint('ping')
|
||||
silence_warning.assert_called_once_with(
|
||||
'You are using the {0} version of this collection but connecting to a controller that did not return a version'.format(my_module._COLLECTION_VERSION)
|
||||
)
|
||||
|
||||
|
||||
def test_version_warning_strictness_awx(collection_import, silence_warning):
|
||||
ControllerAPIModule = collection_import('plugins.module_utils.controller_api').ControllerAPIModule
|
||||
cli_data = {'ANSIBLE_MODULE_ARGS': {}}
|
||||
|
||||
@@ -3,8 +3,9 @@ from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import pytest
|
||||
import random
|
||||
|
||||
from awx.main.models import Organization
|
||||
from awx.main.models import Organization, Credential, CredentialType
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -30,3 +31,63 @@ def test_create_organization(run_module, admin_user):
|
||||
assert result == {"name": "foo", "changed": True, "id": org.id, "invocation": {"module_args": module_args}}
|
||||
|
||||
assert org.description == 'barfoo'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_galaxy_credential_order(run_module, admin_user):
|
||||
org = Organization.objects.create(name='foo')
|
||||
cred_type = CredentialType.defaults['galaxy_api_token']()
|
||||
cred_type.save()
|
||||
|
||||
cred_ids = []
|
||||
for number in range(1, 10):
|
||||
new_cred = Credential.objects.create(name=f"Galaxy Credential {number}", credential_type=cred_type, organization=org, inputs={'url': 'www.redhat.com'})
|
||||
cred_ids.append(new_cred.id)
|
||||
|
||||
random.shuffle(cred_ids)
|
||||
|
||||
module_args = {
|
||||
'name': 'foo',
|
||||
'state': 'present',
|
||||
'controller_host': None,
|
||||
'controller_username': None,
|
||||
'controller_password': None,
|
||||
'validate_certs': None,
|
||||
'controller_oauthtoken': None,
|
||||
'controller_config_file': None,
|
||||
'galaxy_credentials': cred_ids,
|
||||
}
|
||||
|
||||
result = run_module('organization', module_args, admin_user)
|
||||
print(result)
|
||||
assert result['changed'] is True
|
||||
|
||||
cred_order_in_org = []
|
||||
for a_cred in org.galaxy_credentials.all():
|
||||
cred_order_in_org.append(a_cred.id)
|
||||
|
||||
assert cred_order_in_org == cred_ids
|
||||
|
||||
# Shuffle them up and try again to make sure a new order is honored
|
||||
random.shuffle(cred_ids)
|
||||
|
||||
module_args = {
|
||||
'name': 'foo',
|
||||
'state': 'present',
|
||||
'controller_host': None,
|
||||
'controller_username': None,
|
||||
'controller_password': None,
|
||||
'validate_certs': None,
|
||||
'controller_oauthtoken': None,
|
||||
'controller_config_file': None,
|
||||
'galaxy_credentials': cred_ids,
|
||||
}
|
||||
|
||||
result = run_module('organization', module_args, admin_user)
|
||||
assert result['changed'] is True
|
||||
|
||||
cred_order_in_org = []
|
||||
for a_cred in org.galaxy_credentials.all():
|
||||
cred_order_in_org.append(a_cred.id)
|
||||
|
||||
assert cred_order_in_org == cred_ids
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Generate a random string for test
|
||||
set_fact:
|
||||
test_id: "{{ lookup('password', '/dev/null chars=ascii_letters length=16') }}"
|
||||
when: test_id is not defined
|
||||
|
||||
- include_tasks:
|
||||
file: test_named_reference.yml
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user