Compare commits

..

13 Commits

Author SHA1 Message Date
Cesar Francisco San Nicolas Martinez
c437a37be7 Merge pull request #13171 from infamousjoeg/cyberark-ccp-branding-webserviceid
Cyberark ccp branding webserviceid
2022-11-08 18:42:27 +01:00
Cesar Francisco San Nicolas Martinez
b59cee97d8 Merge branch 'test_cyberark' into cyberark-ccp-branding-webserviceid 2022-11-08 18:42:10 +01:00
Cesar Francisco San Nicolas Martinez
9ca554ce75 Merge pull request #13172 from infamousjoeg/fix-12846-conjur-versioning
Add proper declaration of secret version if present
2022-11-08 18:41:20 +01:00
Cesar Francisco San Nicolas Martinez
81e20c727d Merge pull request #13170 from infamousjoeg/conjur-cloud-and-branding
Conjur cloud and branding
2022-11-08 18:40:46 +01:00
Joe Garcia
f3482f4038 Removed final reference to CyberArk Conjur Secret Lookup 2022-10-28 12:42:02 -04:00
Joe Garcia
878035c13b Fixed webservice_id check to string 2022-10-26 12:45:59 -04:00
Joe Garcia
2cc971a43f default to AIMWebService if no val provided 2022-10-26 12:41:15 -04:00
Joe Garcia
9d77c54612 Remove references to AIM everywhere 2022-10-26 12:32:12 -04:00
Joe Garcia
546fabbb97 Update references across the board 2022-10-26 12:28:50 -04:00
Joe Garcia
ef651a3a21 Add Web Service ID & update branding 2022-10-26 11:54:09 -04:00
Joe Garcia
68862d5085 rm base64 import to pass lint 2022-10-26 11:14:14 -04:00
Joe Garcia
66c7d5e9be Fixes #13119 #13120 Cloud support & update brand 2022-10-26 10:25:19 -04:00
Joe Garcia
4a7335676d Add proper declaration of secret version if present 2022-10-17 14:43:16 -05:00
463 changed files with 218806 additions and 16206 deletions

View File

@@ -2,7 +2,6 @@
name: CI
env:
BRANCH: ${{ github.base_ref || 'devel' }}
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on:
pull_request:
jobs:

View File

@@ -1,7 +1,5 @@
---
name: Build/Push Development Images
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on:
push:
branches:

View File

@@ -1,8 +1,5 @@
---
name: E2E Tests
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on:
pull_request_target:
types: [labeled]

View File

@@ -1,7 +1,5 @@
---
name: Feature branch deletion cleanup
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on:
delete:
branches:

View File

@@ -1,9 +1,5 @@
---
name: Promote Release
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on:
release:
types: [published]

View File

@@ -1,9 +1,5 @@
---
name: Stage Release
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on:
workflow_dispatch:
inputs:

View File

@@ -1,9 +1,5 @@
---
name: Upload API Schema
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on:
push:
branches:

View File

@@ -12,7 +12,7 @@ recursive-include awx/plugins *.ps1
recursive-include requirements *.txt
recursive-include requirements *.yml
recursive-include config *
recursive-include licenses *
recursive-include docs/licenses *
recursive-exclude awx devonly.py*
recursive-exclude awx/api/tests *
recursive-exclude awx/main/tests *

View File

@@ -34,7 +34,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
# These should be upgraded in the AWX and Ansible venv before attempting
# to install the actual requirements
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==58.2.0 setuptools_scm[toml]==6.4.2 wheel==0.36.2
NAME ?= awx
@@ -118,7 +118,7 @@ virtualenv_awx:
fi; \
fi
## Install third-party requirements needed for AWX's environment.
## Install third-party requirements needed for AWX's environment.
# this does not use system site packages intentionally
requirements_awx: virtualenv_awx
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
@@ -452,7 +452,7 @@ awx/projects:
COMPOSE_UP_OPTS ?=
COMPOSE_OPTS ?=
CONTROL_PLANE_NODE_COUNT ?= 1
EXECUTION_NODE_COUNT ?= 0
EXECUTION_NODE_COUNT ?= 2
MINIKUBE_CONTAINER_GROUP ?= false
MINIKUBE_SETUP ?= false # if false, run minikube separately
EXTRA_SOURCES_ANSIBLE_OPTS ?=
@@ -593,6 +593,7 @@ pot: $(UI_BUILD_FLAG_FILE)
po: $(UI_BUILD_FLAG_FILE)
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
LANG = "en_us"
## generate API django .pot .po
messages:
@if [ "$(VENV_BASE)" ]; then \

View File

@@ -113,7 +113,7 @@ from awx.main.utils import (
)
from awx.main.utils.filters import SmartFilter
from awx.main.utils.named_url_graph import reset_counters
from awx.main.scheduler.task_manager_models import TaskManagerModels
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.validators import vars_validate_or_raise
@@ -5040,10 +5040,12 @@ class InstanceHealthCheckSerializer(BaseSerializer):
class InstanceGroupSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
capacity = serializers.SerializerMethodField()
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that ' 'are targeted for this instance group'), read_only=True
)
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance group'), read_only=True)
instances = serializers.SerializerMethodField()
is_container_group = serializers.BooleanField(
@@ -5069,22 +5071,6 @@ class InstanceGroupSerializer(BaseSerializer):
label=_('Policy Instance Minimum'),
help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."),
)
max_concurrent_jobs = serializers.IntegerField(
default=0,
min_value=0,
required=False,
initial=0,
label=_('Max Concurrent Jobs'),
help_text=_("Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced."),
)
max_forks = serializers.IntegerField(
default=0,
min_value=0,
required=False,
initial=0,
label=_('Max Forks'),
help_text=_("Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced."),
)
policy_instance_list = serializers.ListField(
child=serializers.CharField(),
required=False,
@@ -5106,8 +5092,6 @@ class InstanceGroupSerializer(BaseSerializer):
"consumed_capacity",
"percent_capacity_remaining",
"jobs_running",
"max_concurrent_jobs",
"max_forks",
"jobs_total",
"instances",
"is_container_group",
@@ -5189,39 +5173,28 @@ class InstanceGroupSerializer(BaseSerializer):
# Store capacity values (globally computed) in the context
if 'task_manager_igs' not in self.context:
instance_groups_queryset = None
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
if self.parent: # Is ListView:
instance_groups_queryset = self.parent.instance
tm_models = TaskManagerModels.init_with_consumed_capacity(
instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'],
instance_groups_queryset=instance_groups_queryset,
)
instances = TaskManagerInstances(jobs_qs)
instance_groups = TaskManagerInstanceGroups(instances_by_hostname=instances, instance_groups_queryset=instance_groups_queryset)
self.context['task_manager_igs'] = tm_models.instance_groups
self.context['task_manager_igs'] = instance_groups
return self.context['task_manager_igs']
def get_consumed_capacity(self, obj):
ig_mgr = self.get_ig_mgr()
return ig_mgr.get_consumed_capacity(obj.name)
def get_capacity(self, obj):
ig_mgr = self.get_ig_mgr()
return ig_mgr.get_capacity(obj.name)
def get_percent_capacity_remaining(self, obj):
capacity = self.get_capacity(obj)
if not capacity:
if not obj.capacity:
return 0.0
consumed_capacity = self.get_consumed_capacity(obj)
return float("{0:.2f}".format(((float(capacity) - float(consumed_capacity)) / (float(capacity))) * 100))
ig_mgr = self.get_ig_mgr()
return float("{0:.2f}".format((float(ig_mgr.get_remaining_capacity(obj.name)) / (float(obj.capacity))) * 100))
def get_instances(self, obj):
ig_mgr = self.get_ig_mgr()
return len(ig_mgr.get_instances(obj.name))
def get_jobs_running(self, obj):
ig_mgr = self.get_ig_mgr()
return ig_mgr.get_jobs_running(obj.name)
return obj.instances.count()
class ActivityStreamSerializer(BaseSerializer):

View File

@@ -5,7 +5,6 @@
import dateutil
import functools
import html
import itertools
import logging
import re
import requests
@@ -21,10 +20,9 @@ from urllib3.exceptions import ConnectTimeoutError
# Django
from django.conf import settings
from django.core.exceptions import FieldError, ObjectDoesNotExist
from django.db.models import Q, Sum, Count
from django.db.models import Q, Sum
from django.db import IntegrityError, ProgrammingError, transaction, connection
from django.db.models.fields.related import ManyToManyField, ForeignKey
from django.db.models.functions import Trunc
from django.shortcuts import get_object_or_404
from django.utils.safestring import mark_safe
from django.utils.timezone import now
@@ -49,6 +47,9 @@ from rest_framework import status
from rest_framework_yaml.parsers import YAMLParser
from rest_framework_yaml.renderers import YAMLRenderer
# QSStats
import qsstats
# ANSIConv
import ansiconv
@@ -282,50 +283,30 @@ class DashboardJobsGraphView(APIView):
success_query = success_query.filter(instance_of=models.ProjectUpdate)
failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
end = now()
interval = 'day'
success_qss = qsstats.QuerySetStats(success_query, 'finished')
failed_qss = qsstats.QuerySetStats(failed_query, 'finished')
start_date = now()
if period == 'month':
start = end - dateutil.relativedelta.relativedelta(months=1)
end_date = start_date - dateutil.relativedelta.relativedelta(months=1)
interval = 'days'
elif period == 'two_weeks':
start = end - dateutil.relativedelta.relativedelta(weeks=2)
end_date = start_date - dateutil.relativedelta.relativedelta(weeks=2)
interval = 'days'
elif period == 'week':
start = end - dateutil.relativedelta.relativedelta(weeks=1)
end_date = start_date - dateutil.relativedelta.relativedelta(weeks=1)
interval = 'days'
elif period == 'day':
start = end - dateutil.relativedelta.relativedelta(days=1)
interval = 'hour'
end_date = start_date - dateutil.relativedelta.relativedelta(days=1)
interval = 'hours'
else:
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
dashboard_data = {"jobs": {"successful": [], "failed": []}}
succ_list = dashboard_data['jobs']['successful']
fail_list = dashboard_data['jobs']['failed']
qs_s = (
success_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_s = {item['d']: item['agg'] for item in qs_s}
qs_f = (
failed_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_f = {item['d']: item['agg'] for item in qs_f}
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
for d in itertools.count():
date = start_date + dateutil.relativedelta.relativedelta(days=d)
if date > end:
break
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
for element in success_qss.time_series(end_date, start_date, interval=interval):
dashboard_data['jobs']['successful'].append([time.mktime(element[0].timetuple()), element[1]])
for element in failed_qss.time_series(end_date, start_date, interval=interval):
dashboard_data['jobs']['failed'].append([time.mktime(element[0].timetuple()), element[1]])
return Response(dashboard_data)

View File

@@ -6237,5 +6237,4 @@ msgstr "%s se está actualizando."
#: awx/ui/urls.py:24
msgid "This page will refresh when complete."
msgstr "Esta página se actualizará cuando se complete."
msgstr "Esta página se actualizará cuando se complete."

View File

@@ -721,7 +721,7 @@ msgstr "DTSTART valide obligatoire dans rrule. La valeur doit commencer par : DT
#: awx/api/serializers.py:4657
msgid ""
"DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ."
msgstr "DTSTART ne peut correspondre à une date-heure naïve. Spécifier ;TZINFO= ou YYYYMMDDTHHMMSSZZ."
msgstr "DTSTART ne peut correspondre à une DateHeure naïve. Spécifier ;TZINFO= ou YYYYMMDDTHHMMSSZZ."
#: awx/api/serializers.py:4659
msgid "Multiple DTSTART is not supported."
@@ -6239,5 +6239,4 @@ msgstr "%s est en cours de mise à niveau."
#: awx/ui/urls.py:24
msgid "This page will refresh when complete."
msgstr "Cette page sera rafraîchie une fois terminée."
msgstr "Cette page sera rafraîchie une fois terminée."

View File

@@ -6237,5 +6237,4 @@ msgstr "Er wordt momenteel een upgrade van%s geïnstalleerd."
#: awx/ui/urls.py:24
msgid "This page will refresh when complete."
msgstr "Deze pagina wordt vernieuwd als hij klaar is."
msgstr "Deze pagina wordt vernieuwd als hij klaar is."

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,7 @@
import datetime
import asyncio
import logging
import aioredis
import redis
import re
@@ -81,7 +82,7 @@ class BroadcastWebsocketStatsManager:
async def run_loop(self):
try:
redis_conn = await redis.asyncio.create_redis_pool(settings.BROKER_URL)
redis_conn = await aioredis.create_redis_pool(settings.BROKER_URL)
while True:
stats_data_str = ''.join(stat.serialize() for stat in self._stats.values())
await redis_conn.set(self._redis_key, stats_data_str)

View File

@@ -16,7 +16,7 @@ from awx.conf.license import get_license
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
from awx.main import models
from awx.main.analytics import register
from awx.main.scheduler.task_manager_models import TaskManagerModels
from awx.main.scheduler.task_manager_models import TaskManagerInstances
"""
This module is used to define metrics collected by awx.main.analytics.gather()
@@ -237,8 +237,9 @@ def projects_by_scm_type(since, **kwargs):
def instance_info(since, include_hostnames=False, **kwargs):
info = {}
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
tm_models = TaskManagerModels.init_with_consumed_capacity(instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
for tm_instance in tm_models.instances.instances_by_hostname.values():
active_tasks = models.UnifiedJob.objects.filter(status__in=['running', 'waiting']).only('task_impact', 'controller_node', 'execution_node')
tm_instances = TaskManagerInstances(active_tasks, instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
for tm_instance in tm_instances.instances_by_hostname.values():
instance = tm_instance.obj
instance_info = {
'uuid': instance.uuid,
@@ -250,7 +251,6 @@ def instance_info(since, include_hostnames=False, **kwargs):
'enabled': instance.enabled,
'consumed_capacity': tm_instance.consumed_capacity,
'remaining_capacity': instance.capacity - tm_instance.consumed_capacity,
'node_type': instance.node_type,
}
if include_hostnames is True:
instance_info['hostname'] = instance.hostname

View File

@@ -57,7 +57,6 @@ def metrics():
[
'hostname',
'instance_uuid',
'node_type',
],
registry=REGISTRY,
)
@@ -85,7 +84,6 @@ def metrics():
[
'hostname',
'instance_uuid',
'node_type',
],
registry=REGISTRY,
)
@@ -113,7 +111,6 @@ def metrics():
[
'hostname',
'instance_uuid',
'node_type',
],
registry=REGISTRY,
)
@@ -123,7 +120,6 @@ def metrics():
[
'hostname',
'instance_uuid',
'node_type',
],
registry=REGISTRY,
)
@@ -184,13 +180,12 @@ def metrics():
instance_data = instance_info(None, include_hostnames=True)
for uuid, info in instance_data.items():
hostname = info['hostname']
node_type = info['node_type']
INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['capacity'])
INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['capacity'])
INSTANCE_CPU.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['cpu'])
INSTANCE_MEMORY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['memory'])
INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['consumed_capacity'])
INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['remaining_capacity'])
INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).info(
INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['consumed_capacity'])
INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['remaining_capacity'])
INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid).info(
{
'enabled': str(instance_data[uuid]['enabled']),
'managed_by_policy': str(instance_data[uuid]['managed_by_policy']),

View File

@@ -5,9 +5,7 @@ import logging
from django.conf import settings
from django.apps import apps
from awx.main.consumers import emit_channel_notification
from awx.main.utils import is_testing
root_key = 'awx_metrics'
logger = logging.getLogger('awx.main.analytics')
@@ -165,7 +163,7 @@ class Metrics:
Instance = apps.get_model('main', 'Instance')
if instance_name:
self.instance_name = instance_name
elif is_testing():
elif settings.IS_TESTING():
self.instance_name = "awx_testing"
else:
self.instance_name = Instance.objects.my_hostname()

View File

@@ -9,10 +9,16 @@ aim_inputs = {
'fields': [
{
'id': 'url',
'label': _('CyberArk AIM URL'),
'label': _('CyberArk CCP URL'),
'type': 'string',
'format': 'url',
},
{
'id': 'webservice_id',
'label': _('Web Service ID'),
'type': 'string',
'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),
},
{
'id': 'app_id',
'label': _('Application ID'),
@@ -64,10 +70,13 @@ def aim_backend(**kwargs):
client_cert = kwargs.get('client_cert', None)
client_key = kwargs.get('client_key', None)
verify = kwargs['verify']
webservice_id = kwargs['webservice_id']
app_id = kwargs['app_id']
object_query = kwargs['object_query']
object_query_format = kwargs['object_query_format']
reason = kwargs.get('reason', None)
if webservice_id == '':
webservice_id = 'AIMWebService'
query_params = {
'AppId': app_id,
@@ -78,7 +87,7 @@ def aim_backend(**kwargs):
query_params['reason'] = reason
request_qs = '?' + urlencode(query_params, quote_via=quote)
request_url = urljoin(url, '/'.join(['AIMWebService', 'api', 'Accounts']))
request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))
with CertFiles(client_cert, client_key) as cert:
res = requests.get(
@@ -92,4 +101,4 @@ def aim_backend(**kwargs):
return res.json()['Content']
aim_plugin = CredentialPlugin('CyberArk AIM Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)

View File

@@ -466,7 +466,7 @@ class AutoscalePool(WorkerPool):
task_name = 'unknown'
if isinstance(body, dict):
task_name = body.get('task')
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
logger.warn(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
return super(AutoscalePool, self).write(preferred_queue, body)
except Exception:
for conn in connections.all():

View File

@@ -1,13 +1,14 @@
import inspect
import logging
import sys
import json
import time
from uuid import uuid4
from django.conf import settings
from django_guid import get_guid
from . import pg_bus_conn
from awx.main.utils import is_testing
logger = logging.getLogger('awx.main.dispatch')
@@ -92,7 +93,7 @@ class task:
obj.update(**kw)
if callable(queue):
queue = queue()
if not is_testing():
if not settings.IS_TESTING(sys.argv):
with pg_bus_conn() as conn:
conn.notify(queue, json.dumps(obj))
return (obj, queue)

View File

@@ -38,14 +38,7 @@ class Command(BaseCommand):
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
RegisterQueue(
settings.DEFAULT_EXECUTION_QUEUE_NAME,
100,
0,
[],
is_container_group=True,
pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE,
max_forks=settings.DEFAULT_EXECUTION_QUEUE_MAX_FORKS,
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE
).register()
else:
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid)

View File

@@ -17,9 +17,7 @@ class InstanceNotFound(Exception):
class RegisterQueue:
def __init__(
self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None, pod_spec_override=None, max_forks=None, max_concurrent_jobs=None
):
def __init__(self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None, pod_spec_override=None):
self.instance_not_found_err = None
self.queuename = queuename
self.instance_percent = instance_percent
@@ -27,8 +25,6 @@ class RegisterQueue:
self.hostname_list = hostname_list
self.is_container_group = is_container_group
self.pod_spec_override = pod_spec_override
self.max_forks = max_forks
self.max_concurrent_jobs = max_concurrent_jobs
def get_create_update_instance_group(self):
created = False
@@ -49,14 +45,6 @@ class RegisterQueue:
ig.pod_spec_override = self.pod_spec_override
changed = True
if self.max_forks and (ig.max_forks != self.max_forks):
ig.max_forks = self.max_forks
changed = True
if self.max_concurrent_jobs and (ig.max_concurrent_jobs != self.max_concurrent_jobs):
ig.max_concurrent_jobs = self.max_concurrent_jobs
changed = True
if changed:
ig.save()

View File

@@ -1,14 +1,24 @@
# Generated by Django 3.2.13 on 2022-06-21 21:29
from django.db import migrations
import logging
logger = logging.getLogger("awx")
def forwards(apps, schema_editor):
InventorySource = apps.get_model('main', 'InventorySource')
InventorySource.objects.filter(update_on_project_update=True).update(update_on_launch=True)
Project = apps.get_model('main', 'Project')
Project.objects.filter(scm_inventory_sources__update_on_project_update=True).update(scm_update_on_launch=True)
sources = InventorySource.objects.filter(update_on_project_update=True)
for src in sources:
if src.update_on_launch == False:
src.update_on_launch = True
src.save(update_fields=['update_on_launch'])
logger.info(f"Setting update_on_launch to True for {src}")
proj = src.source_project
if proj and proj.scm_update_on_launch is False:
proj.scm_update_on_launch = True
proj.save(update_fields=['scm_update_on_launch'])
logger.warning(f"Setting scm_update_on_launch to True for {proj}")
class Migration(migrations.Migration):

View File

@@ -1,23 +0,0 @@
# Generated by Django 3.2.13 on 2022-10-24 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0172_prevent_instance_fallback'),
]
operations = [
migrations.AddField(
model_name='instancegroup',
name='max_concurrent_jobs',
field=models.IntegerField(default=0, help_text='Maximum number of concurrent jobs to run on this group. Zero means no limit.'),
),
migrations.AddField(
model_name='instancegroup',
name='max_forks',
field=models.IntegerField(default=0, help_text='Max forks to execute on this group. Zero means no limit.'),
),
]

View File

@@ -233,12 +233,11 @@ class Instance(HasPolicyEditsMixin, BaseModel):
if not isinstance(vargs.get('grace_period'), int):
vargs['grace_period'] = 60 # grace period of 60 minutes, need to set because CLI default will not take effect
if 'exclude_strings' not in vargs and vargs.get('file_pattern'):
active_job_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
if self.node_type == 'execution':
active_job_qs = active_job_qs.filter(execution_node=self.hostname)
else:
active_job_qs = active_job_qs.filter(controller_node=self.hostname)
active_pks = list(active_job_qs.values_list('pk', flat=True))
active_pks = list(
UnifiedJob.objects.filter(
(models.Q(execution_node=self.hostname) | models.Q(controller_node=self.hostname)) & models.Q(status__in=('running', 'waiting'))
).values_list('pk', flat=True)
)
if active_pks:
vargs['exclude_strings'] = [JOB_FOLDER_PREFIX % job_id for job_id in active_pks]
if 'remove_images' in vargs or 'image_prune' in vargs:
@@ -379,8 +378,6 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
default='',
)
)
max_concurrent_jobs = models.IntegerField(default=0, help_text=_("Maximum number of concurrent jobs to run on this group. Zero means no limit."))
max_forks = models.IntegerField(default=0, help_text=_("Max forks to execute on this group. Zero means no limit."))
policy_instance_percentage = models.IntegerField(default=0, help_text=_("Percentage of Instances to automatically assign to this group"))
policy_instance_minimum = models.IntegerField(default=0, help_text=_("Static minimum number of Instances to automatically assign to this group"))
policy_instance_list = JSONBlob(
@@ -394,8 +391,6 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
@property
def capacity(self):
if self.is_container_group:
return self.max_forks
return sum(inst.capacity for inst in self.instances.all())
@property

View File

@@ -567,6 +567,17 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
# Use .job_host_summaries.all() to get jobs affecting this host.
# Use .job_events.all() to get events affecting this host.
'''
We don't use timestamp, but we may in the future.
'''
def update_ansible_facts(self, module, facts, timestamp=None):
if module == "ansible":
self.ansible_facts.update(facts)
else:
self.ansible_facts[module] = facts
self.save()
def get_effective_host_name(self):
"""
Return the name of the host that will be used in actual ansible

View File

@@ -44,7 +44,7 @@ from awx.main.models.notifications import (
NotificationTemplate,
JobNotificationMixin,
)
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic, log_excess_runtime
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
from awx.main.models.mixins import (
ResourceMixin,
@@ -857,11 +857,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
return host_queryset.iterator()
return host_queryset
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Job {job_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
def start_job_fact_cache(self, destination, log_data, timeout=None):
def start_job_fact_cache(self, destination, modification_times, timeout=None):
self.log_lifecycle("start_job_fact_cache")
log_data['job_id'] = self.id
log_data['written_ct'] = 0
os.makedirs(destination, mode=0o700)
if timeout is None:
@@ -872,8 +869,6 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout)
else:
hosts = self._get_inventory_hosts()
last_filepath_written = None
for host in hosts:
filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination):
@@ -883,38 +878,23 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
with codecs.open(filepath, 'w', encoding='utf-8') as f:
os.chmod(f.name, 0o600)
json.dump(host.ansible_facts, f)
log_data['written_ct'] += 1
last_filepath_written = filepath
except IOError:
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue
# make note of the time we wrote the last file so we can check if any file changed later
if last_filepath_written:
return os.path.getmtime(last_filepath_written)
return None
# make note of the time we wrote the file so we can check if it changed later
modification_times[filepath] = os.path.getmtime(filepath)
@log_excess_runtime(
logger,
debug_cutoff=0.01,
msg='Job {job_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
add_log_data=True,
)
def finish_job_fact_cache(self, destination, facts_write_time, log_data):
def finish_job_fact_cache(self, destination, modification_times):
self.log_lifecycle("finish_job_fact_cache")
log_data['job_id'] = self.id
log_data['updated_ct'] = 0
log_data['unmodified_ct'] = 0
log_data['cleared_ct'] = 0
hosts_to_update = []
for host in self._get_inventory_hosts():
filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination):
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue
if os.path.exists(filepath):
# If the file changed since we wrote the last facts file, pre-playbook run...
# If the file changed since we wrote it pre-playbook run...
modified = os.path.getmtime(filepath)
if (not facts_write_time) or modified > facts_write_time:
if modified > modification_times.get(filepath, 0):
with codecs.open(filepath, 'r', encoding='utf-8') as f:
try:
ansible_facts = json.load(f)
@@ -922,7 +902,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
continue
host.ansible_facts = ansible_facts
host.ansible_facts_modified = now()
hosts_to_update.append(host)
host.save(update_fields=['ansible_facts', 'ansible_facts_modified'])
system_tracking_logger.info(
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
extra=dict(
@@ -933,21 +913,12 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
job_id=self.id,
),
)
log_data['updated_ct'] += 1
else:
log_data['unmodified_ct'] += 1
else:
# if the file goes missing, ansible removed it (likely via clear_facts)
host.ansible_facts = {}
host.ansible_facts_modified = now()
hosts_to_update.append(host)
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
log_data['cleared_ct'] += 1
if len(hosts_to_update) > 100:
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
hosts_to_update = []
if hosts_to_update:
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
host.save()
class LaunchTimeConfigBase(BaseModel):

View File

@@ -1351,12 +1351,12 @@ class UnifiedJob(
if required in defined_fields and not credential.has_input(required):
missing_credential_inputs.append(required)
if missing_credential_inputs:
self.job_explanation = '{} cannot start because Credential {} does not provide one or more required fields ({}).'.format(
self._meta.verbose_name.title(), credential.name, ', '.join(sorted(missing_credential_inputs))
)
self.save(update_fields=['job_explanation'])
return (False, None)
if missing_credential_inputs:
self.job_explanation = '{} cannot start because Credential {} does not provide one or more required fields ({}).'.format(
self._meta.verbose_name.title(), credential.name, ', '.join(sorted(missing_credential_inputs))
)
self.save(update_fields=['job_explanation'])
return (False, None)
needed = self.get_passwords_needed_to_start()
try:

View File

@@ -27,8 +27,8 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
websocket_urlpatterns = [
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
re_path(r'websocket/$', consumers.EventConsumer),
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer),
]
application = AWXProtocolTypeRouter(

View File

@@ -39,11 +39,12 @@ from awx.main.utils import (
ScheduleTaskManager,
ScheduleWorkflowManager,
)
from awx.main.utils.common import task_manager_bulk_reschedule, is_testing
from awx.main.utils.common import task_manager_bulk_reschedule
from awx.main.signals import disable_activity_stream
from awx.main.constants import ACTIVE_STATES
from awx.main.scheduler.dependency_graph import DependencyGraph
from awx.main.scheduler.task_manager_models import TaskManagerModels
from awx.main.scheduler.task_manager_models import TaskManagerInstances
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
import awx.main.analytics.subsystem_metrics as s_metrics
from awx.main.utils import decrypt_field
@@ -70,12 +71,7 @@ class TaskBase:
# is called later.
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
self.start_time = time.time()
# We want to avoid calling settings in loops, so cache these settings at init time
self.start_task_limit = settings.START_TASK_LIMIT
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT
self.control_task_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
for m in self.subsystem_metrics.METRICS:
if m.startswith(self.prefix):
self.subsystem_metrics.set(m, 0)
@@ -83,7 +79,7 @@ class TaskBase:
def timed_out(self):
"""Return True/False if we have met or exceeded the timeout for the task manager."""
elapsed = time.time() - self.start_time
if elapsed >= self.task_manager_timeout:
if elapsed >= settings.TASK_MANAGER_TIMEOUT:
logger.warning(f"{self.prefix} manager has run for {elapsed} which is greater than TASK_MANAGER_TIMEOUT of {settings.TASK_MANAGER_TIMEOUT}.")
return True
return False
@@ -101,7 +97,7 @@ class TaskBase:
self.all_tasks = [t for t in qs]
def record_aggregate_metrics(self, *args):
if not is_testing():
if not settings.IS_TESTING():
# increment task_manager_schedule_calls regardless if the other
# metrics are recorded
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
@@ -475,8 +471,9 @@ class TaskManager(TaskBase):
Init AFTER we know this instance of the task manager will run because the lock is acquired.
"""
self.dependency_graph = DependencyGraph()
self.tm_models = TaskManagerModels()
self.controlplane_ig = self.tm_models.instance_groups.controlplane_ig
self.instances = TaskManagerInstances(self.all_tasks)
self.instance_groups = TaskManagerInstanceGroups(instances_by_hostname=self.instances)
self.controlplane_ig = self.instance_groups.controlplane_ig
def job_blocked_by(self, task):
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
@@ -508,15 +505,7 @@ class TaskManager(TaskBase):
@timeit
def start_task(self, task, instance_group, dependent_tasks=None, instance=None):
# Just like for process_running_tasks, add the job to the dependency graph and
# ask the TaskManagerInstanceGroups object to update consumed capacity on all
# implicated instances and container groups.
self.dependency_graph.add_job(task)
if instance_group is not None:
task.instance_group = instance_group
# We need the instance group assigned to correctly account for container group max_concurrent_jobs and max_forks
self.tm_models.consume_capacity(task)
self.subsystem_metrics.inc(f"{self.prefix}_tasks_started", 1)
self.start_task_limit -= 1
if self.start_task_limit == 0:
@@ -524,6 +513,12 @@ class TaskManager(TaskBase):
ScheduleTaskManager().schedule()
from awx.main.tasks.system import handle_work_error, handle_work_success
# update capacity for control node and execution node
if task.controller_node:
self.instances[task.controller_node].consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
if task.execution_node:
self.instances[task.execution_node].consume_capacity(task.task_impact)
dependent_tasks = dependent_tasks or []
task_actual = {
@@ -551,6 +546,7 @@ class TaskManager(TaskBase):
ScheduleWorkflowManager().schedule()
# at this point we already have control/execution nodes selected for the following cases
else:
task.instance_group = instance_group
execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
logger.debug(
f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.'
@@ -584,7 +580,6 @@ class TaskManager(TaskBase):
if type(task) is WorkflowJob:
ScheduleWorkflowManager().schedule()
self.dependency_graph.add_job(task)
self.tm_models.consume_capacity(task)
@timeit
def process_pending_tasks(self, pending_tasks):
@@ -616,11 +611,11 @@ class TaskManager(TaskBase):
# Determine if there is control capacity for the task
if task.capacity_type == 'control':
control_impact = task.task_impact + self.control_task_impact
control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
else:
control_impact = self.control_task_impact
control_instance = self.tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance(
task, instance_group_name=self.controlplane_ig.name, impact=control_impact, capacity_type='control'
control_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
control_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
task, instance_group_name=settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, impact=control_impact, capacity_type='control'
)
if not control_instance:
self.task_needs_capacity(task, tasks_to_update_job_explanation)
@@ -631,19 +626,15 @@ class TaskManager(TaskBase):
# All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups
if task.capacity_type == 'control':
if not self.tm_models.instance_groups[self.controlplane_ig.name].has_remaining_capacity(control_impact=True):
continue
task.execution_node = control_instance.hostname
execution_instance = self.tm_models.instances[control_instance.hostname].obj
execution_instance = self.instances[control_instance.hostname].obj
task.log_lifecycle("controller_node_chosen")
task.log_lifecycle("execution_node_chosen")
self.start_task(task, self.controlplane_ig, task.get_jobs_fail_chain(), execution_instance)
found_acceptable_queue = True
continue
for instance_group in self.tm_models.instance_groups.get_instance_groups_from_task_cache(task):
if not self.tm_models.instance_groups[instance_group.name].has_remaining_capacity(task):
continue
for instance_group in self.instance_groups.get_instance_groups_from_task_cache(task):
if instance_group.is_container_group:
self.start_task(task, instance_group, task.get_jobs_fail_chain(), None)
found_acceptable_queue = True
@@ -651,9 +642,9 @@ class TaskManager(TaskBase):
# at this point we know the instance group is NOT a container group
# because if it was, it would have started the task and broke out of the loop.
execution_instance = self.tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance(
execution_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
task, instance_group_name=instance_group.name, add_hybrid_control_cost=True
) or self.tm_models.instance_groups.find_largest_idle_instance(instance_group_name=instance_group.name, capacity_type=task.capacity_type)
) or self.instance_groups.find_largest_idle_instance(instance_group_name=instance_group.name, capacity_type=task.capacity_type)
if execution_instance:
task.execution_node = execution_instance.hostname
@@ -669,7 +660,7 @@ class TaskManager(TaskBase):
task.log_format, instance_group.name, execution_instance.hostname, execution_instance.remaining_capacity
)
)
execution_instance = self.tm_models.instances[execution_instance.hostname].obj
execution_instance = self.instances[execution_instance.hostname].obj
self.start_task(task, instance_group, task.get_jobs_fail_chain(), execution_instance)
found_acceptable_queue = True
break

View File

@@ -15,18 +15,15 @@ logger = logging.getLogger('awx.main.scheduler')
class TaskManagerInstance:
"""A class representing minimal data the task manager needs to represent an Instance."""
def __init__(self, obj, **kwargs):
def __init__(self, obj):
self.obj = obj
self.node_type = obj.node_type
self.consumed_capacity = 0
self.capacity = obj.capacity
self.hostname = obj.hostname
self.jobs_running = 0
def consume_capacity(self, impact, job_impact=False):
def consume_capacity(self, impact):
self.consumed_capacity += impact
if job_impact:
self.jobs_running += 1
@property
def remaining_capacity(self):
@@ -36,106 +33,9 @@ class TaskManagerInstance:
return remaining
class TaskManagerInstanceGroup:
"""A class representing minimal data the task manager needs to represent an InstanceGroup."""
def __init__(self, obj, task_manager_instances=None, **kwargs):
self.name = obj.name
self.is_container_group = obj.is_container_group
self.container_group_jobs = 0
self.container_group_consumed_forks = 0
_instances = obj.instances.all()
# We want the list of TaskManagerInstance objects because these are shared across the TaskManagerInstanceGroup objects.
# This way when we consume capacity on an instance that is in multiple groups, we tabulate across all the groups correctly.
self.instances = [task_manager_instances[instance.hostname] for instance in _instances if instance.hostname in task_manager_instances]
self.instance_hostnames = tuple([instance.hostname for instance in _instances if instance.hostname in task_manager_instances])
self.max_concurrent_jobs = obj.max_concurrent_jobs
self.max_forks = obj.max_forks
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
def consume_capacity(self, task):
"""We only consume capacity on an instance group level if it is a container group. Otherwise we consume capacity on an instance level."""
if self.is_container_group:
self.container_group_jobs += 1
self.container_group_consumed_forks += task.task_impact
else:
raise RuntimeError("We only track capacity for container groups at the instance group level. Otherwise, consume capacity on instances.")
def get_remaining_instance_capacity(self):
return sum(inst.remaining_capacity for inst in self.instances)
def get_instance_capacity(self):
return sum(inst.capacity for inst in self.instances)
def get_consumed_instance_capacity(self):
return sum(inst.consumed_capacity for inst in self.instances)
def get_instance_jobs_running(self):
return sum(inst.jobs_running for inst in self.instances)
def get_jobs_running(self):
if self.is_container_group:
return self.container_group_jobs
return sum(inst.jobs_running for inst in self.instances)
def get_capacity(self):
"""This reports any type of capacity, including that of container group jobs.
Container groups don't really have capacity, but if they have max_forks set,
we can interperet that as how much capacity the user has defined them to have.
"""
if self.is_container_group:
return self.max_forks
return self.get_instance_capacity()
def get_consumed_capacity(self):
if self.is_container_group:
return self.container_group_consumed_forks
return self.get_consumed_instance_capacity()
def get_remaining_capacity(self):
return self.get_capacity() - self.get_consumed_capacity()
def has_remaining_capacity(self, task=None, control_impact=False):
"""Pass either a task or control_impact=True to determine if the IG has capacity to run the control task or job task."""
task_impact = self.control_task_impact if control_impact else task.task_impact
job_impact = 0 if control_impact else 1
task_string = f"task {task.log_format} with impact of {task_impact}" if task else f"control task with impact of {task_impact}"
# We only want to loop over instances if self.max_concurrent_jobs is set
if self.max_concurrent_jobs == 0:
# Override the calculated remaining capacity, because when max_concurrent_jobs == 0 we don't enforce any max
remaining_jobs = 0
else:
remaining_jobs = self.max_concurrent_jobs - self.get_jobs_running() - job_impact
# We only want to loop over instances if self.max_forks is set
if self.max_forks == 0:
# Override the calculated remaining capacity, because when max_forks == 0 we don't enforce any max
remaining_forks = 0
else:
remaining_forks = self.max_forks - self.get_consumed_capacity() - task_impact
if remaining_jobs < 0 or remaining_forks < 0:
# A value less than zero means the task will not fit on the group
if remaining_jobs < 0:
logger.debug(f"{task_string} cannot fit on instance group {self.name} with {remaining_jobs} remaining jobs")
if remaining_forks < 0:
logger.debug(f"{task_string} cannot fit on instance group {self.name} with {remaining_forks} remaining forks")
return False
# Returning true means there is enough remaining capacity on the group to run the task (or no instance group level limits are being set)
logger.debug(f"{task_string} can fit on instance group {self.name} with {remaining_forks} remaining forks and {remaining_jobs}")
return True
class TaskManagerInstances:
def __init__(self, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled'), **kwargs):
def __init__(self, active_tasks, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled')):
self.instances_by_hostname = dict()
self.instance_groups_container_group_jobs = dict()
self.instance_groups_container_group_consumed_forks = dict()
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
if instances is None:
instances = (
Instance.objects.filter(hostname__isnull=False, node_state=Instance.States.READY, enabled=True)
@@ -143,15 +43,18 @@ class TaskManagerInstances:
.only('node_type', 'node_state', 'capacity', 'hostname', 'enabled')
)
for instance in instances:
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance, **kwargs)
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance)
def consume_capacity(self, task):
control_instance = self.instances_by_hostname.get(task.controller_node, '')
execution_instance = self.instances_by_hostname.get(task.execution_node, '')
if execution_instance and execution_instance.node_type in ('hybrid', 'execution'):
self.instances_by_hostname[task.execution_node].consume_capacity(task.task_impact, job_impact=True)
if control_instance and control_instance.node_type in ('hybrid', 'control'):
self.instances_by_hostname[task.controller_node].consume_capacity(self.control_task_impact)
# initialize remaining capacity based on currently waiting and running tasks
for task in active_tasks:
if task.status not in ['waiting', 'running']:
continue
control_instance = self.instances_by_hostname.get(task.controller_node, '')
execution_instance = self.instances_by_hostname.get(task.execution_node, '')
if execution_instance and execution_instance.node_type in ('hybrid', 'execution'):
self.instances_by_hostname[task.execution_node].consume_capacity(task.task_impact)
if control_instance and control_instance.node_type in ('hybrid', 'control'):
self.instances_by_hostname[task.controller_node].consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
def __getitem__(self, hostname):
return self.instances_by_hostname.get(hostname)
@@ -161,57 +64,42 @@ class TaskManagerInstances:
class TaskManagerInstanceGroups:
"""A class representing minimal data the task manager needs to represent all the InstanceGroups."""
"""A class representing minimal data the task manager needs to represent an InstanceGroup."""
def __init__(self, task_manager_instances=None, instance_groups=None, instance_groups_queryset=None, **kwargs):
def __init__(self, instances_by_hostname=None, instance_groups=None, instance_groups_queryset=None):
self.instance_groups = dict()
self.task_manager_instances = task_manager_instances if task_manager_instances is not None else TaskManagerInstances()
self.controlplane_ig = None
self.pk_ig_map = dict()
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
self.controlplane_ig_name = kwargs.get('controlplane_ig_name', settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
if instance_groups is not None: # for testing
self.instance_groups = {ig.name: TaskManagerInstanceGroup(ig, self.task_manager_instances, **kwargs) for ig in instance_groups}
self.pk_ig_map = {ig.pk: ig for ig in instance_groups}
self.instance_groups = instance_groups
else:
if instance_groups_queryset is None:
instance_groups_queryset = InstanceGroup.objects.prefetch_related('instances').only(
'name', 'instances', 'max_concurrent_jobs', 'max_forks', 'is_container_group'
)
instance_groups_queryset = InstanceGroup.objects.prefetch_related('instances').only('name', 'instances')
for instance_group in instance_groups_queryset:
if instance_group.name == self.controlplane_ig_name:
if instance_group.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
self.controlplane_ig = instance_group
self.instance_groups[instance_group.name] = TaskManagerInstanceGroup(instance_group, self.task_manager_instances, **kwargs)
self.instance_groups[instance_group.name] = dict(
instances=[
instances_by_hostname[instance.hostname] for instance in instance_group.instances.all() if instance.hostname in instances_by_hostname
],
)
self.pk_ig_map[instance_group.pk] = instance_group
def __getitem__(self, ig_name):
return self.instance_groups.get(ig_name)
def __contains__(self, ig_name):
return ig_name in self.instance_groups
def get_remaining_capacity(self, group_name):
return self.instance_groups[group_name].get_remaining_instance_capacity()
instances = self.instance_groups[group_name]['instances']
return sum(inst.remaining_capacity for inst in instances)
def get_consumed_capacity(self, group_name):
return self.instance_groups[group_name].get_consumed_capacity()
def get_jobs_running(self, group_name):
return self.instance_groups[group_name].get_jobs_running()
def get_capacity(self, group_name):
return self.instance_groups[group_name].get_capacity()
def get_instances(self, group_name):
return self.instance_groups[group_name].instances
instances = self.instance_groups[group_name]['instances']
return sum(inst.consumed_capacity for inst in instances)
def fit_task_to_most_remaining_capacity_instance(self, task, instance_group_name, impact=None, capacity_type=None, add_hybrid_control_cost=False):
impact = impact if impact else task.task_impact
capacity_type = capacity_type if capacity_type else task.capacity_type
instance_most_capacity = None
most_remaining_capacity = -1
instances = self.instance_groups[instance_group_name].instances
instances = self.instance_groups[instance_group_name]['instances']
for i in instances:
if i.node_type not in (capacity_type, 'hybrid'):
@@ -219,7 +107,7 @@ class TaskManagerInstanceGroups:
would_be_remaining = i.remaining_capacity - impact
# hybrid nodes _always_ control their own tasks
if add_hybrid_control_cost and i.node_type == 'hybrid':
would_be_remaining -= self.control_task_impact
would_be_remaining -= settings.AWX_CONTROL_NODE_TASK_IMPACT
if would_be_remaining >= 0 and (instance_most_capacity is None or would_be_remaining > most_remaining_capacity):
instance_most_capacity = i
most_remaining_capacity = would_be_remaining
@@ -227,13 +115,10 @@ class TaskManagerInstanceGroups:
def find_largest_idle_instance(self, instance_group_name, capacity_type='execution'):
largest_instance = None
instances = self.instance_groups[instance_group_name].instances
instances = self.instance_groups[instance_group_name]['instances']
for i in instances:
if i.node_type not in (capacity_type, 'hybrid'):
continue
if i.capacity <= 0:
# We don't want to select an idle instance with 0 capacity
continue
if (hasattr(i, 'jobs_running') and i.jobs_running == 0) or i.remaining_capacity == i.capacity:
if largest_instance is None:
largest_instance = i
@@ -254,56 +139,3 @@ class TaskManagerInstanceGroups:
logger.warn(f"No instance groups in cache exist, defaulting to global instance groups for task {task}")
return task.global_instance_groups
return igs
class TaskManagerModels:
def __init__(self, **kwargs):
# We want to avoid calls to settings over and over in loops, so cache this information here
kwargs['control_task_impact'] = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
kwargs['controlplane_ig_name'] = kwargs.get('controlplane_ig_name', settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
self.instances = TaskManagerInstances(**kwargs)
self.instance_groups = TaskManagerInstanceGroups(task_manager_instances=self.instances, **kwargs)
@classmethod
def init_with_consumed_capacity(cls, **kwargs):
tmm = cls(**kwargs)
tasks = kwargs.get('tasks', None)
if tasks is None:
instance_group_queryset = kwargs.get('instance_groups_queryset', None)
# No tasks were provided, so we will fetch them from the database
task_status_filter_list = kwargs.get('task_status_filter_list', ['running', 'waiting'])
task_fields = kwargs.get('task_fields', ('task_impact', 'controller_node', 'execution_node', 'instance_group'))
from awx.main.models import UnifiedJob
if instance_group_queryset is not None:
logger.debug("******************INSTANCE GROUP QUERYSET PASSED -- FILTERING TASKS ****************************")
# Sometimes things like the serializer pass a queryset that looks at not all instance groups. in this case,
# we also need to filter the tasks we look at
tasks = UnifiedJob.objects.filter(status__in=task_status_filter_list, instance_group__in=[ig.id for ig in instance_group_queryset]).only(
*task_fields
)
else:
# No instance group query set, look at all tasks in whole system
tasks = UnifiedJob.objects.filter(status__in=task_status_filter_list).only(*task_fields)
for task in tasks:
tmm.consume_capacity(task)
return tmm
def consume_capacity(self, task):
# Consume capacity on instances, which bubbles up to instance groups they are a member of
self.instances.consume_capacity(task)
# For container group jobs, additionally we must account for capacity consumed since
# The container groups have no instances to look at to track how many jobs/forks are consumed
if task.instance_group_id:
if not task.instance_group_id in self.instance_groups.pk_ig_map.keys():
logger.warn(
f"Task {task.log_format} assigned {task.instance_group_id} but this instance group not present in map of instance groups{self.instance_groups.pk_ig_map.keys()}"
)
else:
ig = self.instance_groups.pk_ig_map[task.instance_group_id]
if ig.is_container_group:
self.instance_groups[ig.name].consume_capacity(task)

View File

@@ -426,7 +426,7 @@ class BaseTask(object):
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir):
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
"""
Hook for any steps to run after job/task is marked as complete.
"""
@@ -469,6 +469,7 @@ class BaseTask(object):
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
fact_modification_times = {}
self.runner_callback.event_ct = 0
'''
@@ -497,6 +498,14 @@ class BaseTask(object):
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files, ssh_key_data = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
@@ -637,7 +646,7 @@ class BaseTask(object):
self.instance.send_notification_templates('succeeded' if status == 'successful' else 'failed')
try:
self.final_run_hook(self.instance, status, private_data_dir)
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
@@ -1057,19 +1066,12 @@ class RunJob(SourceControlMixin, BaseTask):
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if job.use_fact_cache:
self.facts_write_time = self.instance.start_job_fact_cache(os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'))
def build_project_dir(self, job, private_data_dir):
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
def post_run_hook(self, job, status):
super(RunJob, self).post_run_hook(job, status)
job.refresh_from_db(fields=['job_env'])
private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR')
if (not private_data_dir) or (not hasattr(self, 'facts_write_time')):
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
@@ -1077,11 +1079,9 @@ class RunJob(SourceControlMixin, BaseTask):
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
self.facts_write_time,
fact_modification_times,
)
def final_run_hook(self, job, status, private_data_dir):
super(RunJob, self).final_run_hook(job, status, private_data_dir)
try:
inventory = job.inventory
except Inventory.DoesNotExist:

View File

@@ -61,15 +61,10 @@ def read_receptor_config():
return yaml.safe_load(f)
def work_signing_enabled(config_data):
for section in config_data:
if 'work-verification' in section:
return True
return False
def get_receptor_sockfile():
data = read_receptor_config()
def get_receptor_sockfile(config_data):
for section in config_data:
for section in data:
for entry_name, entry_data in section.items():
if entry_name == 'control-service':
if 'filename' in entry_data:
@@ -80,11 +75,12 @@ def get_receptor_sockfile(config_data):
raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} does not have control-service entry needed to get sockfile')
def get_tls_client(config_data, use_stream_tls=None):
def get_tls_client(use_stream_tls=None):
if not use_stream_tls:
return None
for section in config_data:
data = read_receptor_config()
for section in data:
for entry_name, entry_data in section.items():
if entry_name == 'tls-client':
if 'name' in entry_data:
@@ -92,12 +88,10 @@ def get_tls_client(config_data, use_stream_tls=None):
return None
def get_receptor_ctl(config_data=None):
if config_data is None:
config_data = read_receptor_config()
receptor_sockfile = get_receptor_sockfile(config_data)
def get_receptor_ctl():
receptor_sockfile = get_receptor_sockfile()
try:
return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(config_data, True))
return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(True))
except RuntimeError:
return ReceptorControl(receptor_sockfile)
@@ -165,18 +159,15 @@ def run_until_complete(node, timing_data=None, **kwargs):
"""
Runs an ansible-runner work_type on remote node, waits until it completes, then returns stdout.
"""
config_data = read_receptor_config()
receptor_ctl = get_receptor_ctl(config_data)
receptor_ctl = get_receptor_ctl()
use_stream_tls = getattr(get_conn_type(node, receptor_ctl), 'name', None) == "STREAMTLS"
kwargs.setdefault('tlsclient', get_tls_client(config_data, use_stream_tls))
kwargs.setdefault('tlsclient', get_tls_client(use_stream_tls))
kwargs.setdefault('ttl', '20s')
kwargs.setdefault('payload', '')
if work_signing_enabled(config_data):
kwargs['signwork'] = True
transmit_start = time.time()
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, **kwargs)
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, signwork=True, **kwargs)
unit_id = result['unitid']
run_start = time.time()
@@ -311,8 +302,7 @@ class AWXReceptorJob:
def run(self):
# We establish a connection to the Receptor socket
self.config_data = read_receptor_config()
receptor_ctl = get_receptor_ctl(self.config_data)
receptor_ctl = get_receptor_ctl()
res = None
try:
@@ -337,7 +327,7 @@ class AWXReceptorJob:
if self.work_type == 'ansible-runner':
work_submit_kw['node'] = self.task.instance.execution_node
use_stream_tls = get_conn_type(work_submit_kw['node'], receptor_ctl).name == "STREAMTLS"
work_submit_kw['tlsclient'] = get_tls_client(self.config_data, use_stream_tls)
work_submit_kw['tlsclient'] = get_tls_client(use_stream_tls)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
transmitter_future = executor.submit(self.transmit, sockin)
@@ -487,9 +477,7 @@ class AWXReceptorJob:
@property
def sign_work(self):
if self.work_type in ('ansible-runner', 'local'):
return work_signing_enabled(self.config_data)
return False
return True if self.work_type in ('ansible-runner', 'local') else False
@property
def work_type(self):

View File

@@ -4,7 +4,7 @@ from awx.main.models import (
Instance,
InstanceGroup,
)
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
class TestInstanceGroupInstanceMapping(TransactionTestCase):
@@ -23,10 +23,11 @@ class TestInstanceGroupInstanceMapping(TransactionTestCase):
def test_mapping(self):
self.sample_cluster()
with self.assertNumQueries(3):
instance_groups = TaskManagerInstanceGroups()
instances = TaskManagerInstances([]) # empty task list
instance_groups = TaskManagerInstanceGroups(instances_by_hostname=instances)
ig_instance_map = instance_groups.instance_groups
assert set(i.hostname for i in ig_instance_map['ig_small'].instances) == set(['i1'])
assert set(i.hostname for i in ig_instance_map['ig_large'].instances) == set(['i2', 'i3'])
assert set(i.hostname for i in ig_instance_map['default'].instances) == set(['i2'])
assert set(i.hostname for i in ig_instance_map['ig_small']['instances']) == set(['i1'])
assert set(i.hostname for i in ig_instance_map['ig_large']['instances']) == set(['i2', 'i3'])
assert set(i.hostname for i in ig_instance_map['default']['instances']) == set(['i2'])

View File

@@ -10,10 +10,6 @@ from awx.main.utils import (
create_temporary_fifo,
)
from awx.main.scheduler import TaskManager
from . import create_job
@pytest.fixture
def containerized_job(default_instance_group, kube_credential, job_template_factory):
@@ -38,50 +34,6 @@ def test_containerized_job(containerized_job):
assert containerized_job.instance_group.credential.kubernetes
@pytest.mark.django_db
def test_max_concurrent_jobs_blocks_start_of_new_jobs(controlplane_instance_group, containerized_job, mocker):
"""Construct a scenario where only 1 job will fit within the max_concurrent_jobs of the container group.
Since max_concurrent_jobs is set to 1, even though 2 jobs are in pending
and would be launched into the container group, only one will be started.
"""
containerized_job.unified_job_template.allow_simultaneous = True
containerized_job.unified_job_template.save()
default_instance_group = containerized_job.instance_group
default_instance_group.max_concurrent_jobs = 1
default_instance_group.save()
task_impact = 1
# Create a second job that should not be scheduled at first, blocked by the other
create_job(containerized_job.unified_job_template)
tm = TaskManager()
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = task_impact
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule()
mock_job.assert_called_once()
@pytest.mark.django_db
def test_max_forks_blocks_start_of_new_jobs(controlplane_instance_group, containerized_job, mocker):
"""Construct a scenario where only 1 job will fit within the max_forks of the container group.
In this case, we set the container_group max_forks to 10, and make the task_impact of a job 6.
Therefore, only 1 job will fit within the max of 10.
"""
containerized_job.unified_job_template.allow_simultaneous = True
containerized_job.unified_job_template.save()
default_instance_group = containerized_job.instance_group
default_instance_group.max_forks = 10
# Create a second job that should not be scheduled
create_job(containerized_job.unified_job_template)
tm = TaskManager()
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = 6
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
tm.schedule()
tm.start_task.assert_called_once()
@pytest.mark.django_db
def test_kubectl_ssl_verification(containerized_job, default_job_execution_environment):
containerized_job.execution_environment = default_job_execution_environment

View File

@@ -248,76 +248,6 @@ def test_multi_jt_capacity_blocking(hybrid_instance, job_template_factory, mocke
mock_job.assert_called_once_with(j2, controlplane_instance_group, [], instance)
@pytest.mark.django_db
def test_max_concurrent_jobs_ig_capacity_blocking(hybrid_instance, job_template_factory, mocker):
"""When max_concurrent_jobs of an instance group is more restrictive than capacity of instances, enforce max_concurrent_jobs."""
instance = hybrid_instance
controlplane_instance_group = instance.rampart_groups.first()
# We will expect only 1 job to be started
controlplane_instance_group.max_concurrent_jobs = 1
controlplane_instance_group.save()
num_jobs = 3
jobs = []
for i in range(num_jobs):
jobs.append(
create_job(job_template_factory(f'jt{i}', organization=f'org{i}', project=f'proj{i}', inventory=f'inv{i}', credential=f'cred{i}').job_template)
)
tm = TaskManager()
task_impact = 1
# Sanity check that multiple jobs would run if not for the max_concurrent_jobs setting.
assert task_impact * num_jobs < controlplane_instance_group.capacity
tm = TaskManager()
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = task_impact
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule()
mock_job.assert_called_once()
jobs[0].status = 'running'
jobs[0].controller_node = instance.hostname
jobs[0].execution_node = instance.hostname
jobs[0].instance_group = controlplane_instance_group
jobs[0].save()
# while that job is running, we should not start another job
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = task_impact
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule()
mock_job.assert_not_called()
# now job is done, we should start one of the two other jobs
jobs[0].status = 'successful'
jobs[0].save()
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = task_impact
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule()
mock_job.assert_called_once()
@pytest.mark.django_db
def test_max_forks_ig_capacity_blocking(hybrid_instance, job_template_factory, mocker):
"""When max_forks of an instance group is less than the capacity of instances, enforce max_forks."""
instance = hybrid_instance
controlplane_instance_group = instance.rampart_groups.first()
controlplane_instance_group.max_forks = 15
controlplane_instance_group.save()
task_impact = 10
num_jobs = 2
# Sanity check that 2 jobs would run if not for the max_forks setting.
assert controlplane_instance_group.max_forks < controlplane_instance_group.capacity
assert task_impact * num_jobs > controlplane_instance_group.max_forks
assert task_impact * num_jobs < controlplane_instance_group.capacity
for i in range(num_jobs):
create_job(job_template_factory(f'jt{i}', organization=f'org{i}', project=f'proj{i}', inventory=f'inv{i}', credential=f'cred{i}').job_template)
tm = TaskManager()
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = task_impact
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule()
mock_job.assert_called_once()
@pytest.mark.django_db
def test_single_job_dependencies_project_launch(controlplane_instance_group, job_template_factory, mocker):
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')

View File

@@ -1,7 +1,7 @@
import pytest
from unittest import mock
from awx.main.models import AdHocCommand, InventoryUpdate, JobTemplate, Job
from awx.main.models import AdHocCommand, InventoryUpdate, JobTemplate
from awx.main.models.activity_stream import ActivityStream
from awx.main.models.ha import Instance, InstanceGroup
from awx.main.tasks.system import apply_cluster_membership_policies
@@ -15,24 +15,6 @@ def test_default_tower_instance_group(default_instance_group, job_factory):
assert default_instance_group in job_factory().preferred_instance_groups
@pytest.mark.django_db
@pytest.mark.parametrize('node_type', ('execution', 'control'))
@pytest.mark.parametrize('active', (True, False))
def test_get_cleanup_task_kwargs_active_jobs(node_type, active):
instance = Instance.objects.create(hostname='foobar', node_type=node_type)
job_kwargs = dict()
job_kwargs['controller_node' if node_type == 'control' else 'execution_node'] = instance.hostname
job_kwargs['status'] = 'running' if active else 'successful'
job = Job.objects.create(**job_kwargs)
kwargs = instance.get_cleanup_task_kwargs()
if active:
assert kwargs['exclude_strings'] == [f'awx_{job.pk}_']
else:
assert 'exclude_strings' not in kwargs
@pytest.mark.django_db
class TestPolicyTaskScheduling:
"""Tests make assertions about when the policy task gets scheduled"""

View File

@@ -121,8 +121,8 @@ def test_python_and_js_licenses():
return errors
base_dir = settings.BASE_DIR
api_licenses = index_licenses('%s/../licenses' % base_dir)
ui_licenses = index_licenses('%s/../licenses/ui' % base_dir)
api_licenses = index_licenses('%s/../docs/licenses' % base_dir)
ui_licenses = index_licenses('%s/../docs/licenses/ui' % base_dir)
api_requirements = read_api_requirements('%s/../requirements' % base_dir)
ui_requirements = read_ui_requirements('%s/ui' % base_dir)

View File

@@ -1,7 +1,10 @@
import pytest
from unittest import mock
from unittest.mock import Mock
from decimal import Decimal
from awx.main.models import Instance
from awx.main.models import InstanceGroup, Instance
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
@pytest.mark.parametrize('capacity_adjustment', [0.0, 0.25, 0.5, 0.75, 1, 1.5, 3])
@@ -14,6 +17,83 @@ def test_capacity_adjustment_no_save(capacity_adjustment):
assert inst.capacity == (float(inst.capacity_adjustment) * abs(inst.mem_capacity - inst.cpu_capacity) + min(inst.mem_capacity, inst.cpu_capacity))
def T(impact):
j = mock.Mock(spec_set=['task_impact', 'capacity_type'])
j.task_impact = impact
j.capacity_type = 'execution'
return j
def Is(param):
"""
param:
[remaining_capacity1, remaining_capacity2, remaining_capacity3, ...]
[(jobs_running1, capacity1), (jobs_running2, capacity2), (jobs_running3, capacity3), ...]
"""
instances = []
if isinstance(param[0], tuple):
for (jobs_running, capacity) in param:
inst = Mock()
inst.capacity = capacity
inst.jobs_running = jobs_running
inst.node_type = 'execution'
instances.append(inst)
else:
for i in param:
inst = Mock()
inst.remaining_capacity = i
inst.node_type = 'execution'
instances.append(inst)
return instances
class TestInstanceGroup(object):
@pytest.mark.parametrize(
'task,instances,instance_fit_index,reason',
[
(T(100), Is([100]), 0, "Only one, pick it"),
(T(100), Is([100, 100]), 0, "Two equally good fits, pick the first"),
(T(100), Is([50, 100]), 1, "First instance not as good as second instance"),
(T(100), Is([50, 0, 20, 100, 100, 100, 30, 20]), 3, "Pick Instance [3] as it is the first that the task fits in."),
(T(100), Is([50, 0, 20, 99, 11, 1, 5, 99]), None, "The task don't a fit, you must a quit!"),
],
)
def test_fit_task_to_most_remaining_capacity_instance(self, task, instances, instance_fit_index, reason):
InstanceGroup(id=10)
tm_igs = TaskManagerInstanceGroups(instance_groups={'controlplane': {'instances': instances}})
instance_picked = tm_igs.fit_task_to_most_remaining_capacity_instance(task, 'controlplane')
if instance_fit_index is None:
assert instance_picked is None, reason
else:
assert instance_picked == instances[instance_fit_index], reason
@pytest.mark.parametrize(
'instances,instance_fit_index,reason',
[
(Is([(0, 100)]), 0, "One idle instance, pick it"),
(Is([(1, 100)]), None, "One un-idle instance, pick nothing"),
(Is([(0, 100), (0, 200), (1, 500), (0, 700)]), 3, "Pick the largest idle instance"),
(Is([(0, 100), (0, 200), (1, 10000), (0, 700), (0, 699)]), 3, "Pick the largest idle instance"),
(Is([(0, 0)]), None, "One idle but down instance, don't pick it"),
],
)
def test_find_largest_idle_instance(self, instances, instance_fit_index, reason):
def filter_offline_instances(*args):
return filter(lambda i: i.capacity > 0, instances)
InstanceGroup(id=10)
instances_online_only = filter_offline_instances(instances)
tm_igs = TaskManagerInstanceGroups(instance_groups={'controlplane': {'instances': instances_online_only}})
if instance_fit_index is None:
assert tm_igs.find_largest_idle_instance('controlplane') is None, reason
else:
assert tm_igs.find_largest_idle_instance('controlplane') == instances[instance_fit_index], reason
def test_cleanup_params_defaults():
inst = Instance(hostname='foobar')
assert inst.get_cleanup_task_kwargs(exclude_strings=['awx_423_']) == {'exclude_strings': ['awx_423_'], 'file_pattern': '/tmp/awx_*_*', 'grace_period': 60}

View File

@@ -36,14 +36,15 @@ def job(mocker, hosts, inventory):
def test_start_job_fact_cache(hosts, job, inventory, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
for host in hosts:
filepath = os.path.join(fact_cache, host.name)
assert os.path.exists(filepath)
with open(filepath, 'r') as f:
assert f.read() == json.dumps(host.ansible_facts)
assert os.path.getmtime(filepath) <= last_modified
assert filepath in modified_times
def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
@@ -57,16 +58,18 @@ def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
)
fact_cache = os.path.join(tmpdir, 'facts')
job.start_job_fact_cache(fact_cache, timeout=0)
job.start_job_fact_cache(fact_cache, {}, 0)
# a file called "foo" should _not_ be written outside the facts dir
assert os.listdir(os.path.join(fact_cache, '..')) == ['facts']
def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
for h in hosts:
h.save = mocker.Mock()
ansible_facts_new = {"foo": "bar"}
filepath = os.path.join(fact_cache, hosts[1].name)
@@ -80,20 +83,23 @@ def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker,
new_modification_time = time.time() + 3600
os.utime(filepath, (new_modification_time, new_modification_time))
job.finish_job_fact_cache(fact_cache, last_modified)
job.finish_job_fact_cache(fact_cache, modified_times)
for host in (hosts[0], hosts[2], hosts[3]):
host.save.assert_not_called()
assert host.ansible_facts == {"a": 1, "b": 2}
assert host.ansible_facts_modified is None
assert hosts[1].ansible_facts == ansible_facts_new
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
hosts[1].save.assert_called_once_with(update_fields=['ansible_facts', 'ansible_facts_modified'])
def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
for h in hosts:
h.save = mocker.Mock()
for h in hosts:
filepath = os.path.join(fact_cache, h.name)
@@ -103,22 +109,26 @@ def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpd
new_modification_time = time.time() + 3600
os.utime(filepath, (new_modification_time, new_modification_time))
job.finish_job_fact_cache(fact_cache, last_modified)
job.finish_job_fact_cache(fact_cache, modified_times)
bulk_update.assert_not_called()
for h in hosts:
h.save.assert_not_called()
def test_finish_job_fact_cache_clear(job, hosts, inventory, mocker, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
for h in hosts:
h.save = mocker.Mock()
os.remove(os.path.join(fact_cache, hosts[1].name))
job.finish_job_fact_cache(fact_cache, last_modified)
job.finish_job_fact_cache(fact_cache, modified_times)
for host in (hosts[0], hosts[2], hosts[3]):
host.save.assert_not_called()
assert host.ansible_facts == {"a": 1, "b": 2}
assert host.ansible_facts_modified is None
assert hosts[1].ansible_facts == {}
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
hosts[1].save.assert_called_once_with()

View File

@@ -1,6 +1,6 @@
import pytest
from awx.main.scheduler.task_manager_models import TaskManagerModels
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
class FakeMeta(object):
@@ -16,64 +16,38 @@ class FakeObject(object):
class Job(FakeObject):
def __init__(self, **kwargs):
self.task_impact = kwargs.get('task_impact', 43)
self.is_container_group_task = kwargs.get('is_container_group_task', False)
self.controller_node = kwargs.get('controller_node', '')
self.execution_node = kwargs.get('execution_node', '')
self.instance_group = kwargs.get('instance_group', None)
self.instance_group_id = self.instance_group.id if self.instance_group else None
self.capacity_type = kwargs.get('capacity_type', 'execution')
task_impact = 43
is_container_group_task = False
controller_node = ''
execution_node = ''
def log_format(self):
return 'job 382 (fake)'
class Instances(FakeObject):
def add(self, *args):
for instance in args:
self.obj.instance_list.append(instance)
def all(self):
return self.obj.instance_list
class InstanceGroup(FakeObject):
def __init__(self, **kwargs):
super(InstanceGroup, self).__init__(**kwargs)
self.instance_list = []
self.pk = self.id = kwargs.get('id', 1)
@property
def instances(self):
mgr = Instances(obj=self)
return mgr
@property
def is_container_group(self):
return False
@property
def max_concurrent_jobs(self):
return 0
@property
def max_forks(self):
return 0
class Instance(FakeObject):
def __init__(self, **kwargs):
self.node_type = kwargs.get('node_type', 'hybrid')
self.capacity = kwargs.get('capacity', 0)
self.hostname = kwargs.get('hostname', 'fakehostname')
self.consumed_capacity = 0
self.jobs_running = 0
@pytest.fixture
def sample_cluster():
def stand_up_cluster():
class Instances(FakeObject):
def add(self, *args):
for instance in args:
self.obj.instance_list.append(instance)
def all(self):
return self.obj.instance_list
class InstanceGroup(FakeObject):
def __init__(self, **kwargs):
super(InstanceGroup, self).__init__(**kwargs)
self.instance_list = []
@property
def instances(self):
mgr = Instances(obj=self)
return mgr
class Instance(FakeObject):
pass
ig_small = InstanceGroup(name='ig_small')
ig_large = InstanceGroup(name='ig_large')
@@ -92,12 +66,14 @@ def sample_cluster():
@pytest.fixture
def create_ig_manager():
def _rf(ig_list, tasks):
tm_models = TaskManagerModels.init_with_consumed_capacity(
tasks=tasks,
instances=set(inst for ig in ig_list for inst in ig.instance_list),
instance_groups=ig_list,
)
return tm_models.instance_groups
instances = TaskManagerInstances(tasks, instances=set(inst for ig in ig_list for inst in ig.instance_list))
seed_igs = {}
for ig in ig_list:
seed_igs[ig.name] = {'instances': [instances[inst.hostname] for inst in ig.instance_list]}
instance_groups = TaskManagerInstanceGroups(instance_groups=seed_igs)
return instance_groups
return _rf
@@ -150,75 +126,3 @@ def test_RBAC_reduced_filter(sample_cluster, create_ig_manager):
# Cross-links between groups not visible to current user,
# so a naieve accounting of capacities is returned instead
assert instance_groups_mgr.get_consumed_capacity('default') == 43
def Is(param):
"""
param:
[remaining_capacity1, remaining_capacity2, remaining_capacity3, ...]
[(jobs_running1, capacity1), (jobs_running2, capacity2), (jobs_running3, capacity3), ...]
"""
instances = []
if isinstance(param[0], tuple):
for index, (jobs_running, capacity) in enumerate(param):
inst = Instance(capacity=capacity, node_type='execution', hostname=f'fakehost-{index}')
inst.jobs_running = jobs_running
instances.append(inst)
else:
for index, capacity in enumerate(param):
inst = Instance(capacity=capacity, node_type='execution', hostname=f'fakehost-{index}')
inst.node_type = 'execution'
instances.append(inst)
return instances
class TestSelectBestInstanceForTask(object):
@pytest.mark.parametrize(
'task,instances,instance_fit_index,reason',
[
(Job(task_impact=100), Is([100]), 0, "Only one, pick it"),
(Job(task_impact=100), Is([100, 100]), 0, "Two equally good fits, pick the first"),
(Job(task_impact=100), Is([50, 100]), 1, "First instance not as good as second instance"),
(Job(task_impact=100), Is([50, 0, 20, 100, 100, 100, 30, 20]), 3, "Pick Instance [3] as it is the first that the task fits in."),
(Job(task_impact=100), Is([50, 0, 20, 99, 11, 1, 5, 99]), None, "The task don't a fit, you must a quit!"),
],
)
def test_fit_task_to_most_remaining_capacity_instance(self, task, instances, instance_fit_index, reason):
ig = InstanceGroup(id=10, name='controlplane')
tasks = []
for instance in instances:
ig.instances.add(instance)
for _ in range(instance.jobs_running):
tasks.append(Job(execution_node=instance.hostname, controller_node=instance.hostname, instance_group=ig))
tm_models = TaskManagerModels.init_with_consumed_capacity(tasks=tasks, instances=instances, instance_groups=[ig])
instance_picked = tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance(task, 'controlplane')
if instance_fit_index is None:
assert instance_picked is None, reason
else:
assert instance_picked.hostname == instances[instance_fit_index].hostname, reason
@pytest.mark.parametrize(
'instances,instance_fit_index,reason',
[
(Is([(0, 100)]), 0, "One idle instance, pick it"),
(Is([(1, 100)]), None, "One un-idle instance, pick nothing"),
(Is([(0, 100), (0, 200), (1, 500), (0, 700)]), 3, "Pick the largest idle instance"),
(Is([(0, 100), (0, 200), (1, 10000), (0, 700), (0, 699)]), 3, "Pick the largest idle instance"),
(Is([(0, 0)]), None, "One idle but down instance, don't pick it"),
],
)
def test_find_largest_idle_instance(self, instances, instance_fit_index, reason):
ig = InstanceGroup(id=10, name='controlplane')
tasks = []
for instance in instances:
ig.instances.add(instance)
for _ in range(instance.jobs_running):
tasks.append(Job(execution_node=instance.hostname, controller_node=instance.hostname, instance_group=ig))
tm_models = TaskManagerModels.init_with_consumed_capacity(tasks=tasks, instances=instances, instance_groups=[ig])
if instance_fit_index is None:
assert tm_models.instance_groups.find_largest_idle_instance('controlplane') is None, reason
else:
assert tm_models.instance_groups.find_largest_idle_instance('controlplane').hostname == instances[instance_fit_index].hostname, reason

View File

@@ -11,12 +11,11 @@ import os
import subprocess
import re
import stat
import sys
import urllib.parse
import threading
import contextlib
import tempfile
import functools
from functools import reduce, wraps
# Django
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
@@ -74,7 +73,6 @@ __all__ = [
'NullablePromptPseudoField',
'model_instance_diff',
'parse_yaml_or_json',
'is_testing',
'RequireDebugTrueOrTest',
'has_model_field_prefetched',
'set_environ',
@@ -90,7 +88,6 @@ __all__ = [
'deepmerge',
'get_event_partition_epoch',
'cleanup_new_process',
'log_excess_runtime',
]
@@ -147,19 +144,6 @@ def underscore_to_camelcase(s):
return ''.join(x.capitalize() or '_' for x in s.split('_'))
@functools.cache
def is_testing(argv=None):
'''Return True if running django or py.test unit tests.'''
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
return True
argv = sys.argv if argv is None else argv
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
return True
elif len(argv) >= 2 and argv[1] == 'test':
return True
return False
class RequireDebugTrueOrTest(logging.Filter):
"""
Logging filter to output when in DEBUG mode or running tests.
@@ -168,7 +152,7 @@ class RequireDebugTrueOrTest(logging.Filter):
def filter(self, record):
from django.conf import settings
return settings.DEBUG or is_testing()
return settings.DEBUG or settings.IS_TESTING()
class IllegalArgumentError(ValueError):
@@ -190,7 +174,7 @@ def memoize(ttl=60, cache_key=None, track_function=False, cache=None):
cache = cache or get_memoize_cache()
def memoize_decorator(f):
@functools.wraps(f)
@wraps(f)
def _memoizer(*args, **kwargs):
if track_function:
cache_dict_key = slugify('%r %r' % (args, kwargs))
@@ -1008,7 +992,7 @@ def getattrd(obj, name, default=NoDefaultProvided):
"""
try:
return functools.reduce(getattr, name.split("."), obj)
return reduce(getattr, name.split("."), obj)
except AttributeError:
if default != NoDefaultProvided:
return default
@@ -1204,7 +1188,7 @@ def cleanup_new_process(func):
Cleanup django connection, cache connection, before executing new thread or processes entry point, func.
"""
@functools.wraps(func)
@wraps(func)
def wrapper_cleanup_new_process(*args, **kwargs):
from awx.conf.settings import SettingsWrapper # noqa
@@ -1216,30 +1200,15 @@ def cleanup_new_process(func):
return wrapper_cleanup_new_process
def log_excess_runtime(func_logger, cutoff=5.0, debug_cutoff=5.0, msg=None, add_log_data=False):
def log_excess_runtime(func_logger, cutoff=5.0):
def log_excess_runtime_decorator(func):
@functools.wraps(func)
@wraps(func)
def _new_func(*args, **kwargs):
start_time = time.time()
log_data = {'name': repr(func.__name__)}
if add_log_data:
return_value = func(*args, log_data=log_data, **kwargs)
else:
return_value = func(*args, **kwargs)
log_data['delta'] = time.time() - start_time
if isinstance(return_value, dict):
log_data.update(return_value)
if msg is None:
record_msg = 'Running {name} took {delta:.2f}s'
else:
record_msg = msg
if log_data['delta'] > cutoff:
func_logger.info(record_msg.format(**log_data))
elif log_data['delta'] > debug_cutoff:
func_logger.debug(record_msg.format(**log_data))
return_value = func(*args, **kwargs)
delta = time.time() - start_time
if delta > cutoff:
logger.info(f'Running {func.__name__!r} took {delta:.2f}s')
return return_value
return _new_func

View File

@@ -10,6 +10,28 @@ import socket
from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
def is_testing(argv=None):
import sys
'''Return True if running django or py.test unit tests.'''
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
return True
argv = sys.argv if argv is None else argv
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
return True
elif len(argv) >= 2 and argv[1] == 'test':
return True
return False
def IS_TESTING(argv=None):
return is_testing(argv)
if "pytest" in sys.modules:
from unittest import mock
@@ -18,13 +40,9 @@ if "pytest" in sys.modules:
else:
import ldap
DEBUG = True
SQL_DEBUG = DEBUG
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# FIXME: it would be nice to cycle back around and allow this to be
# BigAutoField going forward, but we'd have to be explicit about our
# existing models.
@@ -304,13 +322,11 @@ INSTALLED_APPS = [
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
# daphne has to be installed before django.contrib.staticfiles for the app to startup
# According to channels 4.0 docs you install daphne instead of channels now
'daphne',
'django.contrib.staticfiles',
'oauth2_provider',
'rest_framework',
'django_extensions',
'channels',
'polymorphic',
'taggit',
'social_django',
@@ -985,13 +1001,6 @@ DJANGO_GUID = {'GUID_HEADER_NAME': 'X-API-Request-Id'}
DEFAULT_EXECUTION_QUEUE_NAME = 'default'
# pod spec used when the default execution queue is a container group, e.g. when deploying on k8s/ocp with the operator
DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE = ''
# Max number of concurrently consumed forks for the default execution queue
# Zero means no limit
DEFAULT_EXECUTION_QUEUE_MAX_FORKS = 0
# Max number of concurrently running jobs for the default execution queue
# Zero means no limit
DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS = 0
# Name of the default controlplane queue
DEFAULT_CONTROL_PLANE_QUEUE_NAME = 'controlplane'

View File

@@ -1,49 +0,0 @@
import React from 'react';
import { arrayOf, bool, number, shape, string } from 'prop-types';
import { Label, LabelGroup } from '@patternfly/react-core';
import { Link } from 'react-router-dom';
function InstanceGroupLabels({ labels, isLinkable }) {
const buildLinkURL = (isContainerGroup) =>
isContainerGroup
? '/instance_groups/container_group/'
: '/instance_groups/';
return (
<LabelGroup numLabels={5}>
{labels.map(({ id, name, is_container_group }) =>
isLinkable ? (
<Label
color="blue"
key={id}
render={({ className, content, componentRef }) => (
<Link
className={className}
innerRef={componentRef}
to={`${buildLinkURL(is_container_group)}${id}/details`}
>
{content}
</Link>
)}
>
{name}
</Label>
) : (
<Label color="blue" key={id}>
{name}
</Label>
)
)}
</LabelGroup>
);
}
InstanceGroupLabels.propTypes = {
labels: arrayOf(shape({ id: number.isRequired, name: string.isRequired }))
.isRequired,
isLinkable: bool,
};
InstanceGroupLabels.defaultProps = { isLinkable: false };
export default InstanceGroupLabels;

View File

@@ -1 +0,0 @@
export { default } from './InstanceGroupLabels';

View File

@@ -6,7 +6,6 @@ import { Link } from 'react-router-dom';
import styled from 'styled-components';
import { Chip, Divider, Title } from '@patternfly/react-core';
import { toTitleCase } from 'util/strings';
import InstanceGroupLabels from 'components/InstanceGroupLabels';
import CredentialChip from '../CredentialChip';
import ChipGroup from '../ChipGroup';
import { DetailList, Detail, UserDateDetail } from '../DetailList';
@@ -228,7 +227,21 @@ function PromptDetail({
label={t`Instance Groups`}
rows={4}
value={
<InstanceGroupLabels labels={overrides.instance_groups} />
<ChipGroup
numChips={5}
totalChips={overrides.instance_groups.length}
ouiaId="prompt-instance-groups-chips"
>
{overrides.instance_groups.map((instance_group) => (
<Chip
key={instance_group.id}
ouiaId={`instance-group-${instance_group.id}-chip`}
isReadOnly
>
{instance_group.name}
</Chip>
))}
</ChipGroup>
}
/>
)}

View File

@@ -10,7 +10,6 @@ import useRequest, { useDismissableError } from 'hooks/useRequest';
import { JobTemplatesAPI, SchedulesAPI, WorkflowJobTemplatesAPI } from 'api';
import { parseVariableField, jsonToYaml } from 'util/yaml';
import { useConfig } from 'contexts/Config';
import InstanceGroupLabels from 'components/InstanceGroupLabels';
import parseRuleObj from '../shared/parseRuleObj';
import FrequencyDetails from './FrequencyDetails';
import AlertModal from '../../AlertModal';
@@ -28,6 +27,11 @@ import { VariablesDetail } from '../../CodeEditor';
import { VERBOSITY } from '../../VerbositySelectField';
import getHelpText from '../../../screens/Template/shared/JobTemplate.helptext';
const buildLinkURL = (instance) =>
instance.is_container_group
? '/instance_groups/container_group/'
: '/instance_groups/';
const PromptDivider = styled(Divider)`
margin-top: var(--pf-global--spacer--lg);
margin-bottom: var(--pf-global--spacer--lg);
@@ -494,7 +498,26 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
fullWidth
label={t`Instance Groups`}
value={
<InstanceGroupLabels labels={instanceGroups} isLinkable />
<ChipGroup
numChips={5}
totalChips={instanceGroups.length}
ouiaId="instance-group-chips"
>
{instanceGroups.map((ig) => (
<Link
to={`${buildLinkURL(ig)}${ig.id}/details`}
key={ig.id}
>
<Chip
key={ig.id}
ouiaId={`instance-group-${ig.id}-chip`}
isReadOnly
>
{ig.name}
</Chip>
</Link>
))}
</ChipGroup>
}
isEmpty={instanceGroups.length === 0}
/>

View File

@@ -24,10 +24,12 @@ function WorkflowOutputNavigation({ relatedJobs, parentRef }) {
const { id } = useParams();
const relevantResults = relatedJobs.filter(
({ job: jobId, summary_fields }) =>
jobId &&
`${jobId}` !== id &&
summary_fields.job.type !== 'workflow_approval'
({
job: jobId,
summary_fields: {
unified_job_template: { unified_job_type },
},
}) => jobId && `${jobId}` !== id && unified_job_type !== 'workflow_approval'
);
const [isOpen, setIsOpen] = useState(false);
@@ -99,14 +101,16 @@ function WorkflowOutputNavigation({ relatedJobs, parentRef }) {
{sortedJobs?.map((node) => (
<SelectOption
key={node.id}
to={`/jobs/${JOB_URL_SEGMENT_MAP[node.summary_fields.job.type]}/${
node.summary_fields.job?.id
}/output`}
to={`/jobs/${
JOB_URL_SEGMENT_MAP[
node.summary_fields.unified_job_template.unified_job_type
]
}/${node.summary_fields.job?.id}/output`}
component={Link}
value={node.summary_fields.job.name}
value={node.summary_fields.unified_job_template.name}
>
{stringIsUUID(node.identifier)
? node.summary_fields.job.name
? node.summary_fields.unified_job_template.name
: node.identifier}
</SelectOption>
))}

View File

@@ -1,85 +0,0 @@
import React from 'react';
import { within, render, screen, waitFor } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import WorkflowOutputNavigation from './WorkflowOutputNavigation';
import { createMemoryHistory } from 'history';
import { I18nProvider } from '@lingui/react';
import { i18n } from '@lingui/core';
import { en } from 'make-plural/plurals';
import english from '../../../src/locales/en/messages';
import { Router } from 'react-router-dom';
jest.mock('react-router-dom', () => ({
...jest.requireActual('react-router-dom'),
useParams: () => ({
id: 1,
}),
}));
const jobs = [
{
id: 1,
summary_fields: {
job: {
name: 'Ansible',
type: 'project_update',
id: 1,
status: 'successful',
},
},
job: 4,
},
{
id: 2,
summary_fields: {
job: {
name: 'Durham',
type: 'job',
id: 2,
status: 'successful',
},
},
job: 3,
},
{
id: 3,
summary_fields: {
job: {
name: 'Red hat',
type: 'job',
id: 3,
status: 'successful',
},
},
job: 2,
},
];
describe('<WorkflowOuputNavigation/>', () => {
test('Should open modal and deprovision node', async () => {
i18n.loadLocaleData({ en: { plurals: en } });
i18n.load({ en: english });
i18n.activate('en');
const user = userEvent.setup();
const ref = jest
.spyOn(React, 'useRef')
.mockReturnValueOnce({ current: 'div' });
const history = createMemoryHistory({
initialEntries: ['jobs/playbook/2/output'],
});
render(
<I18nProvider i18n={i18n}>
<Router history={history}>
<WorkflowOutputNavigation relatedJobs={jobs} parentRef={ref} />
</Router>
</I18nProvider>
);
const button = screen.getByRole('button');
await user.click(button);
await waitFor(() => screen.getByText('Workflow Nodes'));
await waitFor(() => screen.getByText('Red hat'));
await waitFor(() => screen.getByText('Durham'));
await waitFor(() => screen.getByText('Ansible'));
});
});

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -465,7 +465,7 @@
},
"created": "2020-05-18T21:53:35.370730Z",
"modified": "2020-05-18T21:54:05.436400Z",
"name": "CyberArk AIM Central Credential Provider Lookup",
"name": "CyberArk Central Credential Provider Lookup",
"description": "",
"kind": "external",
"namespace": "aim",

View File

@@ -29,10 +29,6 @@ function ContainerGroupAdd() {
try {
const { data: response } = await InstanceGroupsAPI.create({
name: values.name,
max_forks: values.max_forks ? values.max_forks : 0,
max_concurrent_jobs: values.max_concurrent_jobs
? values.max_concurrent_jobs
: 0,
credential: values?.credential?.id,
pod_spec_override: values.override
? getPodSpecValue(values.pod_spec_override)

View File

@@ -33,8 +33,6 @@ const initialPodSpec = {
const instanceGroupCreateData = {
name: 'Fuz',
credential: { id: 71, name: 'CG' },
max_concurrent_jobs: 0,
max_forks: 0,
pod_spec_override:
'apiVersion: v1\nkind: Pod\nmetadata:\n namespace: default\nspec:\n containers:\n - image: ansible/ansible-runner\n tty: true\n stdin: true\n imagePullPolicy: Always\n args:\n - sleep\n - infinity\n - test',
};

View File

@@ -9,12 +9,7 @@ import AlertModal from 'components/AlertModal';
import ErrorDetail from 'components/ErrorDetail';
import { CardBody, CardActionsRow } from 'components/Card';
import DeleteButton from 'components/DeleteButton';
import {
Detail,
DetailList,
UserDateDetail,
DetailBadge,
} from 'components/DetailList';
import { Detail, DetailList, UserDateDetail } from 'components/DetailList';
import useRequest, { useDismissableError } from 'hooks/useRequest';
import { jsonToYaml, isJsonString } from 'util/yaml';
import { InstanceGroupsAPI } from 'api';
@@ -52,20 +47,6 @@ function ContainerGroupDetails({ instanceGroup }) {
value={t`Container group`}
dataCy="container-group-type"
/>
<DetailBadge
label={t`Max concurrent jobs`}
dataCy="instance-group-max-concurrent-jobs"
helpText={t`Maximum number of jobs to run concurrently on this group.
Zero means no limit will be enforced.`}
content={instanceGroup.max_concurrent_jobs}
/>
<DetailBadge
label={t`Max forks`}
dataCy="instance-group-max-forks"
helpText={t`Maximum number of forks to allow across all jobs running concurrently on this group.
Zero means no limit will be enforced.`}
content={instanceGroup.max_forks}
/>
{instanceGroup.summary_fields.credential && (
<Detail
label={t`Credential`}

View File

@@ -23,8 +23,6 @@ const instanceGroup = {
created: '2020-09-03T18:26:47.113934Z',
modified: '2020-09-03T19:34:23.244694Z',
capacity: 0,
max_concurrent_jobs: 0,
max_forks: 0,
committed_capacity: 0,
consumed_capacity: 0,
percent_capacity_remaining: 0.0,

View File

@@ -39,10 +39,6 @@ function ContainerGroupEdit({ instanceGroup }) {
name: values.name,
credential: values.credential ? values.credential.id : null,
pod_spec_override: values.override ? values.pod_spec_override : null,
max_forks: values.max_forks ? values.max_forks : 0,
max_concurrent_jobs: values.max_concurrent_jobs
? values.max_concurrent_jobs
: 0,
is_container_group: true,
});
history.push(detailsIUrl);

View File

@@ -34,8 +34,6 @@ const instanceGroup = {
policy_instance_percentage: 0,
policy_instance_minimum: 0,
policy_instance_list: [],
max_concurrent_jobs: 0,
max_forks: 0,
pod_spec_override: '',
summary_fields: {
credential: {
@@ -146,8 +144,6 @@ describe('<ContainerGroupEdit/>', () => {
...updatedInstanceGroup,
credential: 12,
pod_spec_override: null,
max_concurrent_jobs: 0,
max_forks: 0,
is_container_group: true,
});
expect(history.location.pathname).toEqual(

View File

@@ -42,8 +42,6 @@ const instanceGroup = {
credential: null,
policy_instance_percentage: 100,
policy_instance_minimum: 0,
max_concurrent_jobs: 0,
max_forks: 0,
policy_instance_list: ['receptor-1', 'receptor-2'],
pod_spec_override: '',
summary_fields: {

View File

@@ -73,20 +73,6 @@ function InstanceGroupDetails({ instanceGroup }) {
dataCy="instance-group-policy-instance-percentage"
content={`${instanceGroup.policy_instance_percentage} %`}
/>
<DetailBadge
label={t`Max concurrent jobs`}
dataCy="instance-group-max-concurrent-jobs"
helpText={t`Maximum number of jobs to run concurrently on this group.
Zero means no limit will be enforced.`}
content={instanceGroup.max_concurrent_jobs}
/>
<DetailBadge
label={t`Max forks`}
dataCy="instance-group-max-forks"
helpText={t`Maximum number of forks to allow across all jobs running concurrently on this group.
Zero means no limit will be enforced.`}
content={instanceGroup.max_forks}
/>
{instanceGroup.capacity ? (
<DetailBadge
label={t`Used capacity`}

View File

@@ -19,8 +19,6 @@ const instanceGroups = [
policy_instance_minimum: 10,
policy_instance_percentage: 50,
percent_capacity_remaining: 60,
max_concurrent_jobs: 0,
max_forks: 0,
is_container_group: false,
created: '2020-07-21T18:41:02.818081Z',
modified: '2020-07-24T20:32:03.121079Z',
@@ -40,8 +38,6 @@ const instanceGroups = [
policy_instance_minimum: 0,
policy_instance_percentage: 0,
percent_capacity_remaining: 0,
max_concurrent_jobs: 0,
max_forks: 0,
is_container_group: true,
created: '2020-07-21T18:41:02.818081Z',
modified: '2020-07-24T20:32:03.121079Z',

View File

@@ -11,7 +11,7 @@ import FormField, {
CheckboxField,
} from 'components/FormField';
import FormActionGroup from 'components/FormActionGroup';
import { required, minMaxValue } from 'util/validators';
import { required } from 'util/validators';
import {
FormColumnLayout,
FormFullWidthLayout,
@@ -57,26 +57,6 @@ function ContainerGroupFormFields({ instanceGroup }) {
tooltip={t`Credential to authenticate with Kubernetes or OpenShift. Must be of type "Kubernetes/OpenShift API Bearer Token". If left blank, the underlying Pod's service account will be used.`}
autoPopulate={!instanceGroup?.id}
/>
<FormField
id="instance-group-max-concurrent-jobs"
label={t`Max concurrent jobs`}
name="max_concurrent_jobs"
type="number"
min="0"
validate={minMaxValue(0, 2147483647)}
tooltip={t`Maximum number of jobs to run concurrently on this group.
Zero means no limit will be enforced.`}
/>
<FormField
id="instance-group-max-forks"
label={t`Max forks`}
name="max_forks"
type="number"
min="0"
validate={minMaxValue(0, 2147483647)}
tooltip={t`Maximum number of forks to allow across all jobs running concurrently on this group.
Zero means no limit will be enforced.`}
/>
<FormGroup fieldId="container-groups-option-checkbox" label={t`Options`}>
<FormCheckboxLayout>
@@ -117,8 +97,6 @@ function ContainerGroupForm({
const initialValues = {
name: instanceGroup?.name || '',
max_concurrent_jobs: instanceGroup.max_concurrent_jobs || 0,
max_forks: instanceGroup.max_forks || 0,
credential: instanceGroup?.summary_fields?.credential,
pod_spec_override: isCheckboxChecked
? instanceGroup?.pod_spec_override

View File

@@ -42,26 +42,6 @@ function InstanceGroupFormFields() {
assigned to this group when new instances come online.`}
validate={minMaxValue(0, 100)}
/>
<FormField
id="instance-group-max-concurrent-jobs"
label={t`Max concurrent jobs`}
name="max_concurrent_jobs"
type="number"
min="0"
validate={minMaxValue(0, 2147483647)}
tooltip={t`Maximum number of jobs to run concurrently on this group.
Zero means no limit will be enforced.`}
/>
<FormField
id="instance-group-max-forks"
label={t`Max forks`}
name="max_forks"
type="number"
min="0"
validate={minMaxValue(0, 2147483647)}
tooltip={t`Maximum number of forks to allow across all jobs running concurrently on this group.
Zero means no limit will be enforced.`}
/>
</>
);
}
@@ -77,8 +57,6 @@ function InstanceGroupForm({
name: instanceGroup.name || '',
policy_instance_minimum: instanceGroup.policy_instance_minimum || 0,
policy_instance_percentage: instanceGroup.policy_instance_percentage || 0,
max_concurrent_jobs: instanceGroup.max_concurrent_jobs || 0,
max_forks: instanceGroup.max_forks || 0,
};
return (
<Formik

View File

@@ -1,6 +1,6 @@
import React, { useCallback, useEffect, useState } from 'react';
import { useHistory, useParams } from 'react-router-dom';
import { Link, useHistory, useParams } from 'react-router-dom';
import { t, Plural } from '@lingui/macro';
import {
Button,
@@ -11,6 +11,7 @@ import {
CodeBlockCode,
Tooltip,
Slider,
Label,
} from '@patternfly/react-core';
import { DownloadIcon, OutlinedClockIcon } from '@patternfly/react-icons';
import styled from 'styled-components';
@@ -33,7 +34,6 @@ import useRequest, {
useDismissableError,
} from 'hooks/useRequest';
import HealthCheckAlert from 'components/HealthCheckAlert';
import InstanceGroupLabels from 'components/InstanceGroupLabels';
import RemoveInstanceButton from '../Shared/RemoveInstanceButton';
const Unavailable = styled.span`
@@ -156,6 +156,11 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
</>
);
const buildLinkURL = (inst) =>
inst.is_container_group
? '/instance_groups/container_group/'
: '/instance_groups/';
const { error, dismissError } = useDismissableError(
updateInstanceError || healthCheckError
);
@@ -220,9 +225,25 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
label={t`Instance Groups`}
dataCy="instance-groups"
helpText={t`The Instance Groups to which this instance belongs.`}
value={
<InstanceGroupLabels labels={instanceGroups} isLinkable />
}
value={instanceGroups.map((ig) => (
<React.Fragment key={ig.id}>
<Label
color="blue"
isTruncated
render={({ className, content, componentRef }) => (
<Link
to={`${buildLinkURL(ig)}${ig.id}/details`}
className={className}
innerRef={componentRef}
>
{content}
</Link>
)}
>
{ig.name}
</Label>{' '}
</React.Fragment>
))}
isEmpty={instanceGroups.length === 0}
/>
)}

View File

@@ -23,7 +23,6 @@ import { InventoriesAPI } from 'api';
import useRequest, { useDismissableError } from 'hooks/useRequest';
import { Inventory } from 'types';
import { relatedResourceDeleteRequests } from 'util/getRelatedResourceDeleteDetails';
import InstanceGroupLabels from 'components/InstanceGroupLabels';
import getHelpText from '../shared/Inventory.helptext';
function InventoryDetail({ inventory }) {
@@ -106,7 +105,23 @@ function InventoryDetail({ inventory }) {
<Detail
fullWidth
label={t`Instance Groups`}
value={<InstanceGroupLabels labels={instanceGroups} isLinkable />}
value={
<ChipGroup
numChips={5}
totalChips={instanceGroups?.length}
ouiaId="instance-group-chips"
>
{instanceGroups?.map((ig) => (
<Chip
key={ig.id}
isReadOnly
ouiaId={`instance-group-${ig.id}-chip`}
>
{ig.name}
</Chip>
))}
</ChipGroup>
}
isEmpty={instanceGroups.length === 0}
/>
)}

View File

@@ -131,8 +131,9 @@ describe('<InventoryDetail />', () => {
expect(InventoriesAPI.readInstanceGroups).toHaveBeenCalledWith(
mockInventory.id
);
const label = wrapper.find('Label').at(0);
expect(label.prop('children')).toEqual('Foo');
const chip = wrapper.find('Chip').at(0);
expect(chip.prop('isReadOnly')).toEqual(true);
expect(chip.prop('children')).toEqual('Foo');
});
test('should not load instance groups', async () => {

View File

@@ -2,7 +2,7 @@ import React, { useCallback, useEffect } from 'react';
import { Link, useHistory } from 'react-router-dom';
import { t } from '@lingui/macro';
import { Button, Label } from '@patternfly/react-core';
import { Button, Chip, Label } from '@patternfly/react-core';
import { Inventory } from 'types';
import { InventoriesAPI, UnifiedJobsAPI } from 'api';
@@ -10,6 +10,7 @@ import useRequest, { useDismissableError } from 'hooks/useRequest';
import AlertModal from 'components/AlertModal';
import { CardBody, CardActionsRow } from 'components/Card';
import ChipGroup from 'components/ChipGroup';
import { VariablesDetail } from 'components/CodeEditor';
import ContentError from 'components/ContentError';
import ContentLoading from 'components/ContentLoading';
@@ -17,7 +18,6 @@ import DeleteButton from 'components/DeleteButton';
import { DetailList, Detail, UserDateDetail } from 'components/DetailList';
import ErrorDetail from 'components/ErrorDetail';
import Sparkline from 'components/Sparkline';
import InstanceGroupLabels from 'components/InstanceGroupLabels';
function SmartInventoryDetail({ inventory }) {
const history = useHistory();
@@ -120,7 +120,23 @@ function SmartInventoryDetail({ inventory }) {
<Detail
fullWidth
label={t`Instance groups`}
value={<InstanceGroupLabels labels={instanceGroups} />}
value={
<ChipGroup
numChips={5}
totalChips={instanceGroups.length}
ouiaId="instance-group-chips"
>
{instanceGroups.map((ig) => (
<Chip
key={ig.id}
isReadOnly
ouiaId={`instance-group-${ig.id}-chip`}
>
{ig.name}
</Chip>
))}
</ChipGroup>
}
isEmpty={instanceGroups.length === 0}
/>
<VariablesDetail

View File

@@ -70,6 +70,7 @@ const getStdOutValue = (hostEvent) => {
function HostEventModal({ onClose, hostEvent = {}, isOpen = false }) {
const [hostStatus, setHostStatus] = useState(null);
const [activeTabKey, setActiveTabKey] = useState(0);
useEffect(() => {
setHostStatus(processEventStatus(hostEvent));
}, [setHostStatus, hostEvent]);
@@ -107,11 +108,11 @@ function HostEventModal({ onClose, hostEvent = {}, isOpen = false }) {
style={{ alignItems: 'center', marginTop: '20px' }}
gutter="sm"
>
<Detail label={t`Host`} value={hostEvent.event_data?.host} />
{hostEvent.summary_fields?.host?.description ? (
<Detail label={t`Host`} value={hostEvent.host_name} />
{hostEvent.summary_fields.host?.description ? (
<Detail
label={t`Description`}
value={hostEvent.summary_fields?.host?.description}
value={hostEvent.summary_fields.host.description}
/>
) : null}
{hostStatus ? (
@@ -124,9 +125,12 @@ function HostEventModal({ onClose, hostEvent = {}, isOpen = false }) {
<Detail label={t`Task`} value={hostEvent.task} />
<Detail
label={t`Module`}
value={hostEvent.event_data?.task_action || t`No result found`}
value={hostEvent.event_data.task_action || t`No result found`}
/>
<Detail
label={t`Command`}
value={hostEvent?.event_data?.res?.cmd}
/>
<Detail label={t`Command`} value={hostEvent.event_data?.res?.cmd} />
</DetailList>
</Tab>
<Tab

View File

@@ -52,47 +52,6 @@ const hostEvent = {
},
};
const partialHostEvent = {
changed: true,
event: 'runner_on_ok',
event_data: {
host: 'foo',
play: 'all',
playbook: 'run_command.yml',
res: {
ansible_loop_var: 'item',
changed: true,
item: '1',
msg: 'This is a debug message: 1',
stdout:
' total used free shared buff/cache available\nMem: 7973 3005 960 30 4007 4582\nSwap: 1023 0 1023',
stderr: 'problems',
cmd: ['free', '-m'],
stderr_lines: [],
stdout_lines: [
' total used free shared buff/cache available',
'Mem: 7973 3005 960 30 4007 4582',
'Swap: 1023 0 1023',
],
},
task: 'command',
task_action: 'command',
},
event_display: 'Host OK',
event_level: 3,
failed: false,
host: 1,
id: 123,
job: 4,
play: 'all',
playbook: 'run_command.yml',
stdout: `stdout: "changed: [localhost] => {"changed": true, "cmd": ["free", "-m"], "delta": "0:00:01.479609", "end": "2019-09-10 14:21:45.469533", "rc": 0, "start": "2019-09-10 14:21:43.989924", "stderr": "", "stderr_lines": [], "stdout": " total used free shared buff/cache available\nMem: 7973 3005 960 30 4007 4582\nSwap: 1023 0 1023", "stdout_lines": [" total used free shared buff/cache available", "Mem: 7973 3005 960 30 4007 4582", "Swap: 1023 0 1023"]}"
`,
task: 'command',
type: 'job_event',
url: '/api/v2/job_events/123/',
};
/*
Some libraries return a list of string in stdout
Example: https://github.com/ansible-collections/cisco.ios/blob/main/plugins/modules/ios_command.py#L124-L128
@@ -175,13 +134,6 @@ describe('HostEventModal', () => {
expect(wrapper).toHaveLength(1);
});
test('renders successfully with partial data', () => {
const wrapper = shallow(
<HostEventModal hostEvent={partialHostEvent} onClose={() => {}} />
);
expect(wrapper).toHaveLength(1);
});
test('should render all tabs', () => {
const wrapper = shallow(
<HostEventModal hostEvent={hostEvent} onClose={() => {}} isOpen />

View File

@@ -4,7 +4,6 @@ import { getJobModel } from 'util/jobs';
export default function useWsJob(initialJob) {
const [job, setJob] = useState(initialJob);
const [pendingMessages, setPendingMessages] = useState([]);
const lastMessage = useWebsocket({
jobs: ['status_changed'],
control: ['limit_reached_1'],
@@ -14,48 +13,30 @@ export default function useWsJob(initialJob) {
setJob(initialJob);
}, [initialJob]);
const processMessage = (message) => {
if (message.unified_job_id !== job.id) {
return;
}
if (
['successful', 'failed', 'error', 'cancelled'].includes(message.status)
) {
fetchJob();
}
setJob(updateJob(job, message));
};
async function fetchJob() {
const { data } = await getJobModel(job.type).readDetail(job.id);
setJob(data);
}
useEffect(
() => {
if (!lastMessage) {
async function fetchJob() {
const { data } = await getJobModel(job.type).readDetail(job.id);
setJob(data);
}
if (!job || lastMessage?.unified_job_id !== job.id) {
return;
}
if (job) {
processMessage(lastMessage);
} else if (lastMessage.unified_job_id) {
setPendingMessages(pendingMessages.concat(lastMessage));
if (
['successful', 'failed', 'error', 'cancelled'].includes(
lastMessage.status
)
) {
fetchJob();
} else {
setJob(updateJob(job, lastMessage));
}
},
[lastMessage] // eslint-disable-line react-hooks/exhaustive-deps
);
useEffect(() => {
if (!job || !pendingMessages.length) {
return;
}
pendingMessages.forEach((message) => {
processMessage(message);
});
setPendingMessages([]);
}, [job, pendingMessages]); // eslint-disable-line react-hooks/exhaustive-deps
return job;
}

View File

@@ -2,7 +2,7 @@ import React, { useEffect, useState, useCallback } from 'react';
import { Link, useHistory, useRouteMatch } from 'react-router-dom';
import { t } from '@lingui/macro';
import { Button } from '@patternfly/react-core';
import { Button, Chip } from '@patternfly/react-core';
import { OrganizationsAPI } from 'api';
import { DetailList, Detail, UserDateDetail } from 'components/DetailList';
import { CardBody, CardActionsRow } from 'components/Card';
@@ -16,7 +16,6 @@ import ErrorDetail from 'components/ErrorDetail';
import useRequest, { useDismissableError } from 'hooks/useRequest';
import { useConfig } from 'contexts/Config';
import ExecutionEnvironmentDetail from 'components/ExecutionEnvironmentDetail';
import InstanceGroupLabels from 'components/InstanceGroupLabels';
import { relatedResourceDeleteRequests } from 'util/getRelatedResourceDeleteDetails';
function OrganizationDetail({ organization }) {
@@ -80,6 +79,11 @@ function OrganizationDetail({ organization }) {
return <ContentError error={contentError} />;
}
const buildLinkURL = (instance) =>
instance.is_container_group
? '/instance_groups/container_group/'
: '/instance_groups/';
return (
<CardBody>
<DetailList>
@@ -122,7 +126,25 @@ function OrganizationDetail({ organization }) {
fullWidth
label={t`Instance Groups`}
helpText={t`The Instance Groups for this Organization to run on.`}
value={<InstanceGroupLabels labels={instanceGroups} isLinkable />}
value={
<ChipGroup
numChips={5}
totalChips={instanceGroups.length}
ouiaId="instance-group-chips"
>
{instanceGroups.map((ig) => (
<Link to={`${buildLinkURL(ig)}${ig.id}/details`} key={ig.id}>
<Chip
key={ig.id}
isReadOnly
ouiaId={`instance-group-${ig.id}-chip`}
>
{ig.name}
</Chip>
</Link>
))}
</ChipGroup>
}
isEmpty={instanceGroups.length === 0}
/>
)}

Some files were not shown because too many files have changed in this diff Show More