Merge branch 'devel' into collection-existential-state-for-credential-module
3
.gitignore
vendored
@ -161,3 +161,6 @@ use_dev_supervisor.txt
|
||||
/_build/
|
||||
/_build_kube_dev/
|
||||
/Dockerfile.kube-dev
|
||||
|
||||
awx/ui_next/src
|
||||
awx/ui_next/build
|
||||
|
||||
@ -6,6 +6,7 @@ recursive-include awx/templates *.html
|
||||
recursive-include awx/api/templates *.md *.html *.yml
|
||||
recursive-include awx/ui/build *.html
|
||||
recursive-include awx/ui/build *
|
||||
recursive-include awx/ui_next/build *
|
||||
recursive-include awx/playbooks *.yml
|
||||
recursive-include awx/lib/site-packages *
|
||||
recursive-include awx/plugins *.ps1
|
||||
|
||||
52
Makefile
@ -1,3 +1,5 @@
|
||||
-include awx/ui_next/Makefile
|
||||
|
||||
PYTHON ?= python3.9
|
||||
DOCKER_COMPOSE ?= docker-compose
|
||||
OFFICIAL ?= no
|
||||
@ -84,7 +86,7 @@ clean-schema:
|
||||
|
||||
clean-languages:
|
||||
rm -f $(I18N_FLAG_FILE)
|
||||
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
|
||||
find ./awx/locale/ -type f -regex '.*\.mo$$' -delete
|
||||
|
||||
## Remove temporary build files, compiled Python files.
|
||||
clean: clean-ui clean-api clean-awxkit clean-dist
|
||||
@ -215,12 +217,6 @@ daphne:
|
||||
fi; \
|
||||
daphne -b 127.0.0.1 -p 8051 awx.asgi:channel_layer
|
||||
|
||||
wsbroadcast:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsbroadcast
|
||||
|
||||
## Run to start the background task dispatcher for development.
|
||||
dispatcher:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@ -228,7 +224,6 @@ dispatcher:
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_dispatcher
|
||||
|
||||
|
||||
## Run to start the zeromq callback receiver
|
||||
receiver:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@ -245,6 +240,34 @@ jupyter:
|
||||
fi; \
|
||||
$(MANAGEMENT_COMMAND) shell_plus --notebook
|
||||
|
||||
## Start the rsyslog configurer process in background in development environment.
|
||||
run-rsyslog-configurer:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_rsyslog_configurer
|
||||
|
||||
## Start cache_clear process in background in development environment.
|
||||
run-cache-clear:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_cache_clear
|
||||
|
||||
## Start the wsrelay process in background in development environment.
|
||||
run-wsrelay:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsrelay
|
||||
|
||||
## Start the heartbeat process in background in development environment.
|
||||
run-heartbeet:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_heartbeet
|
||||
|
||||
reports:
|
||||
mkdir -p $@
|
||||
|
||||
@ -418,7 +441,7 @@ ui-devel: awx/ui/node_modules
|
||||
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css; \
|
||||
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js; \
|
||||
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media; \
|
||||
fi
|
||||
fi
|
||||
|
||||
ui-devel-instrumented: awx/ui/node_modules
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented
|
||||
@ -445,11 +468,12 @@ ui-test-general:
|
||||
$(NPM_BIN) run --prefix awx/ui pretest
|
||||
$(NPM_BIN) run --prefix awx/ui/ test-general --runInBand
|
||||
|
||||
# NOTE: The make target ui-next is imported from awx/ui_next/Makefile
|
||||
HEADLESS ?= no
|
||||
ifeq ($(HEADLESS), yes)
|
||||
dist/$(SDIST_TAR_FILE):
|
||||
else
|
||||
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE)
|
||||
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE) ui-next
|
||||
endif
|
||||
$(PYTHON) -m build -s
|
||||
ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz
|
||||
@ -497,8 +521,6 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA) $(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||
|
||||
@ -539,7 +561,7 @@ docker-compose-build:
|
||||
|
||||
docker-clean:
|
||||
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
@ -654,3 +676,7 @@ help/generate:
|
||||
} \
|
||||
{ lastLine = $$0 }' $(MAKEFILE_LIST) | sort -u
|
||||
@printf "\n"
|
||||
|
||||
## Display help for ui-next targets
|
||||
help/ui-next:
|
||||
@make -s help MAKEFILE_LIST="awx/ui_next/Makefile"
|
||||
|
||||
@ -56,6 +56,8 @@ from awx.main.models import (
|
||||
ExecutionEnvironment,
|
||||
Group,
|
||||
Host,
|
||||
HostMetric,
|
||||
HostMetricSummaryMonthly,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
InstanceLink,
|
||||
@ -156,6 +158,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'kind',
|
||||
),
|
||||
'host': DEFAULT_SUMMARY_FIELDS,
|
||||
'constructed_host': DEFAULT_SUMMARY_FIELDS,
|
||||
'group': DEFAULT_SUMMARY_FIELDS,
|
||||
'default_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
'execution_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
@ -189,6 +192,11 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
}
|
||||
|
||||
|
||||
# These fields can be edited on a constructed inventory's generated source (possibly by using the constructed
|
||||
# inventory's special API endpoint, but also by using the inventory sources endpoint).
|
||||
CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS = ('source_vars', 'update_cache_timeout', 'limit', 'verbosity')
|
||||
|
||||
|
||||
def reverse_gfk(content_object, request):
|
||||
"""
|
||||
Computes a reverse for a GenericForeignKey field.
|
||||
@ -1670,13 +1678,8 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
res.update(
|
||||
dict(
|
||||
hosts=self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),
|
||||
groups=self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),
|
||||
root_groups=self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),
|
||||
variable_data=self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),
|
||||
script=self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),
|
||||
tree=self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),
|
||||
inventory_sources=self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),
|
||||
update_inventory_sources=self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),
|
||||
activity_stream=self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),
|
||||
job_templates=self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),
|
||||
ad_hoc_commands=self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
|
||||
@ -1687,8 +1690,18 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
labels=self.reverse('api:inventory_label_list', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
if obj.kind in ('', 'constructed'):
|
||||
# links not relevant for the "old" smart inventory
|
||||
res['groups'] = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk})
|
||||
res['root_groups'] = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk})
|
||||
res['update_inventory_sources'] = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk})
|
||||
res['inventory_sources'] = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk})
|
||||
res['tree'] = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk})
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||
if obj.kind == 'constructed':
|
||||
res['input_inventories'] = self.reverse('api:inventory_input_inventories', kwargs={'pk': obj.pk})
|
||||
res['constructed_url'] = self.reverse('api:constructed_inventory_detail', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def to_representation(self, obj):
|
||||
@ -1730,6 +1743,91 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
return super(InventorySerializer, self).validate(attrs)
|
||||
|
||||
|
||||
class ConstructedFieldMixin(serializers.Field):
|
||||
def get_attribute(self, instance):
|
||||
if not hasattr(instance, '_constructed_inv_src'):
|
||||
instance._constructed_inv_src = instance.inventory_sources.first()
|
||||
inv_src = instance._constructed_inv_src
|
||||
return super().get_attribute(inv_src) # yoink
|
||||
|
||||
|
||||
class ConstructedCharField(ConstructedFieldMixin, serializers.CharField):
|
||||
pass
|
||||
|
||||
|
||||
class ConstructedIntegerField(ConstructedFieldMixin, serializers.IntegerField):
|
||||
pass
|
||||
|
||||
|
||||
class ConstructedInventorySerializer(InventorySerializer):
|
||||
source_vars = ConstructedCharField(
|
||||
required=False,
|
||||
default=None,
|
||||
allow_blank=True,
|
||||
help_text=_('The source_vars for the related auto-created inventory source, special to constructed inventory.'),
|
||||
)
|
||||
update_cache_timeout = ConstructedIntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
min_value=0,
|
||||
default=None,
|
||||
help_text=_('The cache timeout for the related auto-created inventory source, special to constructed inventory'),
|
||||
)
|
||||
limit = ConstructedCharField(
|
||||
required=False,
|
||||
default=None,
|
||||
allow_blank=True,
|
||||
help_text=_('The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory.'),
|
||||
)
|
||||
verbosity = ConstructedIntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
min_value=0,
|
||||
max_value=2,
|
||||
default=None,
|
||||
help_text=_('The verbosity level for the related auto-created inventory source, special to constructed inventory'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
fields = ('*', '-host_filter') + CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS
|
||||
read_only_fields = ('*', 'kind')
|
||||
|
||||
def pop_inv_src_data(self, data):
|
||||
inv_src_data = {}
|
||||
for field in CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS:
|
||||
if field in data:
|
||||
# values always need to be removed, as they are not valid for Inventory model
|
||||
value = data.pop(field)
|
||||
# null is not valid for any of those fields, taken as not-provided
|
||||
if value is not None:
|
||||
inv_src_data[field] = value
|
||||
return inv_src_data
|
||||
|
||||
def apply_inv_src_data(self, inventory, inv_src_data):
|
||||
if inv_src_data:
|
||||
update_fields = []
|
||||
inv_src = inventory.inventory_sources.first()
|
||||
for field, value in inv_src_data.items():
|
||||
setattr(inv_src, field, value)
|
||||
update_fields.append(field)
|
||||
if update_fields:
|
||||
inv_src.save(update_fields=update_fields)
|
||||
|
||||
def create(self, validated_data):
|
||||
validated_data['kind'] = 'constructed'
|
||||
inv_src_data = self.pop_inv_src_data(validated_data)
|
||||
inventory = super().create(validated_data)
|
||||
self.apply_inv_src_data(inventory, inv_src_data)
|
||||
return inventory
|
||||
|
||||
def update(self, obj, validated_data):
|
||||
inv_src_data = self.pop_inv_src_data(validated_data)
|
||||
obj = super().update(obj, validated_data)
|
||||
self.apply_inv_src_data(obj, inv_src_data)
|
||||
return obj
|
||||
|
||||
|
||||
class InventoryScriptSerializer(InventorySerializer):
|
||||
class Meta:
|
||||
fields = ()
|
||||
@ -1783,6 +1881,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
ansible_facts=self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
if obj.inventory.kind == 'constructed':
|
||||
res['original_host'] = self.reverse('api:host_detail', kwargs={'pk': obj.instance_id})
|
||||
res['ansible_facts'] = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.instance_id})
|
||||
if obj.inventory:
|
||||
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
|
||||
if obj.last_job:
|
||||
@ -1804,6 +1905,10 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]
|
||||
group_cnt = obj.groups.count()
|
||||
d.setdefault('groups', {'count': group_cnt, 'results': group_list})
|
||||
if obj.inventory.kind == 'constructed':
|
||||
summaries_qs = obj.constructed_host_summaries
|
||||
else:
|
||||
summaries_qs = obj.job_host_summaries
|
||||
d.setdefault(
|
||||
'recent_jobs',
|
||||
[
|
||||
@ -1814,7 +1919,7 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
'status': j.job.status,
|
||||
'finished': j.job.finished,
|
||||
}
|
||||
for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created').defer('job__extra_vars', 'job__artifacts')[:5]
|
||||
for j in summaries_qs.select_related('job__job_template').order_by('-created').defer('job__extra_vars', 'job__artifacts')[:5]
|
||||
],
|
||||
)
|
||||
return d
|
||||
@ -1839,8 +1944,8 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart Inventory")})
|
||||
if value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart or Constructed Inventories")})
|
||||
return value
|
||||
|
||||
def validate_variables(self, value):
|
||||
@ -1938,8 +2043,8 @@ class GroupSerializer(BaseSerializerWithVariables):
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart Inventory")})
|
||||
if value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart or Constructed Inventories")})
|
||||
return value
|
||||
|
||||
def to_representation(self, obj):
|
||||
@ -2138,6 +2243,7 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
'custom_virtualenv',
|
||||
'timeout',
|
||||
'verbosity',
|
||||
'limit',
|
||||
)
|
||||
read_only_fields = ('*', 'custom_virtualenv')
|
||||
|
||||
@ -2244,8 +2350,8 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value and value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
|
||||
if value and value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart or Constructed Inventories")})
|
||||
return value
|
||||
|
||||
# TODO: remove when old 'credential' fields are removed
|
||||
@ -2289,9 +2395,16 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
if get_field_from_model_or_attrs('source') == 'scm':
|
||||
if self.instance and self.instance.source == 'constructed':
|
||||
allowed_fields = CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS
|
||||
for field in attrs:
|
||||
if attrs[field] != getattr(self.instance, field) and field not in allowed_fields:
|
||||
raise serializers.ValidationError({"error": _("Cannot change field '{}' on a constructed inventory source.").format(field)})
|
||||
elif get_field_from_model_or_attrs('source') == 'scm':
|
||||
if ('source' in attrs or 'source_project' in attrs) and get_field_from_model_or_attrs('source_project') is None:
|
||||
raise serializers.ValidationError({"source_project": _("Project required for scm type sources.")})
|
||||
elif get_field_from_model_or_attrs('source') == 'constructed':
|
||||
raise serializers.ValidationError({"error": _('constructed not a valid source for inventory')})
|
||||
else:
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path', 'scm_branch']))
|
||||
if redundant_scm_fields:
|
||||
@ -4033,6 +4146,7 @@ class JobHostSummarySerializer(BaseSerializer):
|
||||
'-description',
|
||||
'job',
|
||||
'host',
|
||||
'constructed_host',
|
||||
'host_name',
|
||||
'changed',
|
||||
'dark',
|
||||
@ -5386,6 +5500,32 @@ class InstanceHealthCheckSerializer(BaseSerializer):
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
class HostMetricSerializer(BaseSerializer):
|
||||
show_capabilities = ['delete']
|
||||
|
||||
class Meta:
|
||||
model = HostMetric
|
||||
fields = (
|
||||
"id",
|
||||
"hostname",
|
||||
"url",
|
||||
"first_automation",
|
||||
"last_automation",
|
||||
"last_deleted",
|
||||
"automated_counter",
|
||||
"deleted_counter",
|
||||
"deleted",
|
||||
"used_in_inventories",
|
||||
)
|
||||
|
||||
|
||||
class HostMetricSummaryMonthlySerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = HostMetricSummaryMonthly
|
||||
read_only_fields = ("id", "date", "license_consumed", "license_capacity", "hosts_added", "hosts_deleted", "indirectly_managed_hosts")
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
class InstanceGroupSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
capacity = serializers.SerializerMethodField()
|
||||
|
||||
18
awx/api/templates/api/host_metric_detail.md
Normal file
@ -0,0 +1,18 @@
|
||||
{% ifmeth GET %}
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
Make GET request to this resource to retrieve a single {{ model_verbose_name }}
|
||||
record containing the following fields:
|
||||
|
||||
{% include "api/_result_fields_common.md" %}
|
||||
{% endifmeth %}
|
||||
|
||||
{% ifmeth DELETE %}
|
||||
# Delete {{ model_verbose_name|title|anora }}:
|
||||
|
||||
Make a DELETE request to this resource to soft-delete this {{ model_verbose_name }}.
|
||||
|
||||
A soft deletion will mark the `deleted` field as true and exclude the host
|
||||
metric from license calculations.
|
||||
This may be undone later if the same hostname is automated again afterwards.
|
||||
{% endifmeth %}
|
||||
@ -2,6 +2,7 @@ receptor_user: awx
|
||||
receptor_group: awx
|
||||
receptor_verify: true
|
||||
receptor_tls: true
|
||||
receptor_mintls13: false
|
||||
receptor_work_commands:
|
||||
ansible-runner:
|
||||
command: ansible-runner
|
||||
|
||||
31
awx/api/urls/analytics.py
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
import awx.api.views.analytics as analytics
|
||||
|
||||
|
||||
urls = [
|
||||
re_path(r'^$', analytics.AnalyticsRootView.as_view(), name='analytics_root_view'),
|
||||
re_path(r'^authorized/$', analytics.AnalyticsAuthorizedView.as_view(), name='analytics_authorized'),
|
||||
re_path(r'^reports/$', analytics.AnalyticsReportsList.as_view(), name='analytics_reports_list'),
|
||||
re_path(r'^report/(?P<slug>[\w-]+)/$', analytics.AnalyticsReportDetail.as_view(), name='analytics_report_detail'),
|
||||
re_path(r'^report_options/$', analytics.AnalyticsReportOptionsList.as_view(), name='analytics_report_options_list'),
|
||||
re_path(r'^adoption_rate/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate'),
|
||||
re_path(r'^adoption_rate_options/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate_options'),
|
||||
re_path(r'^event_explorer/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer'),
|
||||
re_path(r'^event_explorer_options/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer_options'),
|
||||
re_path(r'^host_explorer/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer'),
|
||||
re_path(r'^host_explorer_options/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer_options'),
|
||||
re_path(r'^job_explorer/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer'),
|
||||
re_path(r'^job_explorer_options/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer_options'),
|
||||
re_path(r'^probe_templates/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_explorer'),
|
||||
re_path(r'^probe_templates_options/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_options'),
|
||||
re_path(r'^probe_template_for_hosts/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_explorer'),
|
||||
re_path(r'^probe_template_for_hosts_options/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_options'),
|
||||
re_path(r'^roi_templates/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_explorer'),
|
||||
re_path(r'^roi_templates_options/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_options'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
10
awx/api/urls/host_metric.py
Normal file
@ -0,0 +1,10 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import HostMetricList, HostMetricDetail
|
||||
|
||||
urls = [re_path(r'^$', HostMetricList.as_view(), name='host_metric_list'), re_path(r'^(?P<pk>[0-9]+)/$', HostMetricDetail.as_view(), name='host_metric_detail')]
|
||||
|
||||
__all__ = ['urls']
|
||||
@ -6,7 +6,10 @@ from django.urls import re_path
|
||||
from awx.api.views.inventory import (
|
||||
InventoryList,
|
||||
InventoryDetail,
|
||||
ConstructedInventoryDetail,
|
||||
ConstructedInventoryList,
|
||||
InventoryActivityStreamList,
|
||||
InventoryInputInventoriesList,
|
||||
InventoryJobTemplateList,
|
||||
InventoryAccessList,
|
||||
InventoryObjectRolesList,
|
||||
@ -37,6 +40,7 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/script/$', InventoryScriptView.as_view(), name='inventory_script_view'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/tree/$', InventoryTreeView.as_view(), name='inventory_tree_view'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/inventory_sources/$', InventoryInventorySourcesList.as_view(), name='inventory_inventory_sources_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/input_inventories/$', InventoryInputInventoriesList.as_view(), name='inventory_input_inventories'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/update_inventory_sources/$', InventoryInventorySourcesUpdate.as_view(), name='inventory_inventory_sources_update'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', InventoryActivityStreamList.as_view(), name='inventory_activity_stream_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/job_templates/$', InventoryJobTemplateList.as_view(), name='inventory_job_template_list'),
|
||||
@ -48,4 +52,10 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/copy/$', InventoryCopy.as_view(), name='inventory_copy'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
# Constructed inventory special views
|
||||
constructed_inventory_urls = [
|
||||
re_path(r'^$', ConstructedInventoryList.as_view(), name='constructed_inventory_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/$', ConstructedInventoryDetail.as_view(), name='constructed_inventory_detail'),
|
||||
]
|
||||
|
||||
__all__ = ['urls', 'constructed_inventory_urls']
|
||||
|
||||
@ -30,6 +30,7 @@ from awx.api.views import (
|
||||
OAuth2TokenList,
|
||||
ApplicationOAuth2TokenList,
|
||||
OAuth2ApplicationDetail,
|
||||
# HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
|
||||
)
|
||||
|
||||
from awx.api.views.bulk import (
|
||||
@ -41,15 +42,17 @@ from awx.api.views.bulk import (
|
||||
from awx.api.views.mesh_visualizer import MeshVisualizer
|
||||
|
||||
from awx.api.views.metrics import MetricsView
|
||||
from awx.api.views.analytics import AWX_ANALYTICS_API_PREFIX
|
||||
|
||||
from .organization import urls as organization_urls
|
||||
from .user import urls as user_urls
|
||||
from .project import urls as project_urls
|
||||
from .project_update import urls as project_update_urls
|
||||
from .inventory import urls as inventory_urls
|
||||
from .inventory import urls as inventory_urls, constructed_inventory_urls
|
||||
from .execution_environments import urls as execution_environment_urls
|
||||
from .team import urls as team_urls
|
||||
from .host import urls as host_urls
|
||||
from .host_metric import urls as host_metric_urls
|
||||
from .group import urls as group_urls
|
||||
from .inventory_source import urls as inventory_source_urls
|
||||
from .inventory_update import urls as inventory_update_urls
|
||||
@ -80,7 +83,7 @@ from .oauth2 import urls as oauth2_urls
|
||||
from .oauth2_root import urls as oauth2_root_urls
|
||||
from .workflow_approval_template import urls as workflow_approval_template_urls
|
||||
from .workflow_approval import urls as workflow_approval_urls
|
||||
|
||||
from .analytics import urls as analytics_urls
|
||||
|
||||
v2_urls = [
|
||||
re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
|
||||
@ -117,7 +120,11 @@ v2_urls = [
|
||||
re_path(r'^project_updates/', include(project_update_urls)),
|
||||
re_path(r'^teams/', include(team_urls)),
|
||||
re_path(r'^inventories/', include(inventory_urls)),
|
||||
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
|
||||
re_path(r'^hosts/', include(host_urls)),
|
||||
re_path(r'^host_metrics/', include(host_metric_urls)),
|
||||
# It will be enabled in future version of the AWX
|
||||
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
||||
re_path(r'^groups/', include(group_urls)),
|
||||
re_path(r'^inventory_sources/', include(inventory_source_urls)),
|
||||
re_path(r'^inventory_updates/', include(inventory_update_urls)),
|
||||
@ -141,6 +148,7 @@ v2_urls = [
|
||||
re_path(r'^unified_job_templates/$', UnifiedJobTemplateList.as_view(), name='unified_job_template_list'),
|
||||
re_path(r'^unified_jobs/$', UnifiedJobList.as_view(), name='unified_job_list'),
|
||||
re_path(r'^activity_stream/', include(activity_stream_urls)),
|
||||
re_path(rf'^{AWX_ANALYTICS_API_PREFIX}/', include(analytics_urls)),
|
||||
re_path(r'^workflow_approval_templates/', include(workflow_approval_template_urls)),
|
||||
re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
|
||||
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
|
||||
|
||||
@ -17,7 +17,6 @@ from collections import OrderedDict
|
||||
|
||||
from urllib3.exceptions import ConnectTimeoutError
|
||||
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||
@ -30,7 +29,7 @@ from django.utils.safestring import mark_safe
|
||||
from django.utils.timezone import now
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from django.template.loader import render_to_string
|
||||
from django.http import HttpResponse
|
||||
from django.http import HttpResponse, HttpResponseRedirect
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
@ -1548,6 +1547,41 @@ class HostRelatedSearchMixin(object):
|
||||
return ret
|
||||
|
||||
|
||||
class HostMetricList(ListAPIView):
|
||||
name = _("Host Metrics List")
|
||||
model = models.HostMetric
|
||||
serializer_class = serializers.HostMetricSerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
search_fields = ('hostname', 'deleted')
|
||||
|
||||
def get_queryset(self):
|
||||
return self.model.objects.all()
|
||||
|
||||
|
||||
class HostMetricDetail(RetrieveDestroyAPIView):
|
||||
name = _("Host Metric Detail")
|
||||
model = models.HostMetric
|
||||
serializer_class = serializers.HostMetricSerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
|
||||
def delete(self, request, *args, **kwargs):
|
||||
self.get_object().soft_delete()
|
||||
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
# It will be enabled in future version of the AWX
|
||||
# class HostMetricSummaryMonthlyList(ListAPIView):
|
||||
# name = _("Host Metrics Summary Monthly")
|
||||
# model = models.HostMetricSummaryMonthly
|
||||
# serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
||||
# permission_classes = (IsSystemAdminOrAuditor,)
|
||||
# search_fields = ('date',)
|
||||
#
|
||||
# def get_queryset(self):
|
||||
# return self.model.objects.all()
|
||||
|
||||
|
||||
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
always_allow_superuser = False
|
||||
model = models.Host
|
||||
@ -1576,6 +1610,8 @@ class HostDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
def delete(self, request, *args, **kwargs):
|
||||
if self.get_object().inventory.pending_deletion:
|
||||
return Response({"error": _("The inventory for this host is already being deleted.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
if self.get_object().inventory.kind == 'constructed':
|
||||
return Response({"error": _("Delete constructed inventory hosts from input inventory.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
return super(HostDetail, self).delete(request, *args, **kwargs)
|
||||
|
||||
|
||||
@ -1583,6 +1619,14 @@ class HostAnsibleFactsDetail(RetrieveAPIView):
|
||||
model = models.Host
|
||||
serializer_class = serializers.AnsibleFactsSerializer
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.inventory.kind == 'constructed':
|
||||
# If this is a constructed inventory host, it is not the source of truth about facts
|
||||
# redirect to the original input inventory host instead
|
||||
return HttpResponseRedirect(reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.instance_id}, request=self.request))
|
||||
return super().get(request, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView):
|
||||
model = models.Host
|
||||
|
||||
297
awx/api/views/analytics.py
Normal file
@ -0,0 +1,297 @@
|
||||
import requests
|
||||
import logging
|
||||
import urllib.parse as urlparse
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils import translation
|
||||
|
||||
from awx.api.generics import APIView, Response
|
||||
from awx.api.permissions import IsSystemAdminOrAuditor
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import get_awx_version
|
||||
from rest_framework.permissions import AllowAny
|
||||
from rest_framework import status
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
AUTOMATION_ANALYTICS_API_URL_PATH = "/api/tower-analytics/v1"
|
||||
AWX_ANALYTICS_API_PREFIX = 'analytics'
|
||||
|
||||
ERROR_UPLOAD_NOT_ENABLED = "analytics-upload-not-enabled"
|
||||
ERROR_MISSING_URL = "missing-url"
|
||||
ERROR_MISSING_USER = "missing-user"
|
||||
ERROR_MISSING_PASSWORD = "missing-password"
|
||||
ERROR_NO_DATA_OR_ENTITLEMENT = "no-data-or-entitlement"
|
||||
ERROR_NOT_FOUND = "not-found"
|
||||
ERROR_UNAUTHORIZED = "unauthorized"
|
||||
ERROR_UNKNOWN = "unknown"
|
||||
ERROR_UNSUPPORTED_METHOD = "unsupported-method"
|
||||
|
||||
logger = logging.getLogger('awx.api.views.analytics')
|
||||
|
||||
|
||||
class MissingSettings(Exception):
|
||||
"""Settings are not correct Exception"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class GetNotAllowedMixin(object):
|
||||
def get(self, request, format=None):
|
||||
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
|
||||
|
||||
class AnalyticsRootView(APIView):
|
||||
permission_classes = (AllowAny,)
|
||||
name = _('Automation Analytics')
|
||||
swagger_topic = 'Automation Analytics'
|
||||
|
||||
def get(self, request, format=None):
|
||||
data = OrderedDict()
|
||||
data['authorized'] = reverse('api:analytics_authorized')
|
||||
data['reports'] = reverse('api:analytics_reports_list')
|
||||
data['report_options'] = reverse('api:analytics_report_options_list')
|
||||
data['adoption_rate'] = reverse('api:analytics_adoption_rate')
|
||||
data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options')
|
||||
data['event_explorer'] = reverse('api:analytics_event_explorer')
|
||||
data['event_explorer_options'] = reverse('api:analytics_event_explorer_options')
|
||||
data['host_explorer'] = reverse('api:analytics_host_explorer')
|
||||
data['host_explorer_options'] = reverse('api:analytics_host_explorer_options')
|
||||
data['job_explorer'] = reverse('api:analytics_job_explorer')
|
||||
data['job_explorer_options'] = reverse('api:analytics_job_explorer_options')
|
||||
data['probe_templates'] = reverse('api:analytics_probe_templates_explorer')
|
||||
data['probe_templates_options'] = reverse('api:analytics_probe_templates_options')
|
||||
data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer')
|
||||
data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options')
|
||||
data['roi_templates'] = reverse('api:analytics_roi_templates_explorer')
|
||||
data['roi_templates_options'] = reverse('api:analytics_roi_templates_options')
|
||||
return Response(data)
|
||||
|
||||
|
||||
class AnalyticsGenericView(APIView):
|
||||
"""
|
||||
Example:
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
params = {
|
||||
'limit': '20',
|
||||
'offset': '0',
|
||||
'sort_by': 'name:asc',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'limit': '20',
|
||||
'offset': '0',
|
||||
'sort_options': 'name',
|
||||
'sort_order': 'asc',
|
||||
'tags': [],
|
||||
'slug': [],
|
||||
'name': [],
|
||||
'description': '',
|
||||
}
|
||||
|
||||
response = requests.post(f'{AUTOMATION_ANALYTICS_API_URL}/reports/', params=params,
|
||||
headers=headers, json=json_data)
|
||||
|
||||
return Response(response.json(), status=response.status_code)
|
||||
"""
|
||||
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
|
||||
@staticmethod
|
||||
def _request_headers(request):
|
||||
headers = {}
|
||||
for header in ['Content-Type', 'Content-Length', 'Accept-Encoding', 'User-Agent', 'Accept']:
|
||||
if request.headers.get(header, None):
|
||||
headers[header] = request.headers.get(header)
|
||||
headers['X-Rh-Analytics-Source'] = 'controller'
|
||||
headers['X-Rh-Analytics-Source-Version'] = get_awx_version()
|
||||
headers['Accept-Language'] = translation.get_language()
|
||||
|
||||
return headers
|
||||
|
||||
@staticmethod
|
||||
def _get_analytics_path(request_path):
|
||||
parts = request_path.split(f'{AWX_ANALYTICS_API_PREFIX}/')
|
||||
path_specific = parts[-1]
|
||||
return f"{AUTOMATION_ANALYTICS_API_URL_PATH}/{path_specific}"
|
||||
|
||||
def _get_analytics_url(self, request_path):
|
||||
analytics_path = self._get_analytics_path(request_path)
|
||||
url = getattr(settings, 'AUTOMATION_ANALYTICS_URL', None)
|
||||
if not url:
|
||||
raise MissingSettings(ERROR_MISSING_URL)
|
||||
url_parts = urlparse.urlsplit(url)
|
||||
analytics_url = urlparse.urlunsplit([url_parts.scheme, url_parts.netloc, analytics_path, url_parts.query, url_parts.fragment])
|
||||
return analytics_url
|
||||
|
||||
@staticmethod
|
||||
def _get_setting(setting_name, default, error_message):
|
||||
setting = getattr(settings, setting_name, default)
|
||||
if not setting:
|
||||
raise MissingSettings(error_message)
|
||||
return setting
|
||||
|
||||
@staticmethod
|
||||
def _error_response(keyword, message=None, remote=True, remote_status_code=None, status_code=status.HTTP_403_FORBIDDEN):
|
||||
text = {"error": {"remote": remote, "remote_status": remote_status_code, "keyword": keyword}}
|
||||
if message:
|
||||
text["error"]["message"] = message
|
||||
return Response(text, status=status_code)
|
||||
|
||||
def _error_response_404(self, response):
|
||||
try:
|
||||
json_response = response.json()
|
||||
# Subscription/entitlement problem or missing tenant data in AA db => HTTP 403
|
||||
message = json_response.get('error', None)
|
||||
if message:
|
||||
return self._error_response(ERROR_NO_DATA_OR_ENTITLEMENT, message, remote=True, remote_status_code=response.status_code)
|
||||
|
||||
# Standard 404 problem => HTTP 404
|
||||
message = json_response.get('detail', None) or response.text
|
||||
except requests.exceptions.JSONDecodeError:
|
||||
# Unexpected text => still HTTP 404
|
||||
message = response.text
|
||||
|
||||
return self._error_response(ERROR_NOT_FOUND, message, remote=True, remote_status_code=status.HTTP_404_NOT_FOUND, status_code=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
@staticmethod
|
||||
def _update_response_links(json_response):
|
||||
if not json_response.get('links', None):
|
||||
return
|
||||
|
||||
for key, value in json_response['links'].items():
|
||||
if value:
|
||||
json_response['links'][key] = value.replace(AUTOMATION_ANALYTICS_API_URL_PATH, f"/api/v2/{AWX_ANALYTICS_API_PREFIX}")
|
||||
|
||||
def _forward_response(self, response):
|
||||
try:
|
||||
content_type = response.headers.get('content-type', '')
|
||||
if content_type.find('application/json') != -1:
|
||||
json_response = response.json()
|
||||
self._update_response_links(json_response)
|
||||
|
||||
return Response(json_response, status=response.status_code)
|
||||
except Exception as e:
|
||||
logger.error(f"Analytics API: Response error: {e}")
|
||||
|
||||
return Response(response.content, status=response.status_code)
|
||||
|
||||
def _send_to_analytics(self, request, method):
|
||||
try:
|
||||
headers = self._request_headers(request)
|
||||
|
||||
self._get_setting('INSIGHTS_TRACKING_STATE', False, ERROR_UPLOAD_NOT_ENABLED)
|
||||
url = self._get_analytics_url(request.path)
|
||||
rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||
|
||||
if method not in ["GET", "POST", "OPTIONS"]:
|
||||
return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
else:
|
||||
response = requests.request(
|
||||
method,
|
||||
url,
|
||||
auth=(rh_user, rh_password),
|
||||
verify=settings.INSIGHTS_CERT_PATH,
|
||||
params=request.query_params,
|
||||
headers=headers,
|
||||
json=request.data,
|
||||
timeout=(31, 31),
|
||||
)
|
||||
#
|
||||
# Missing or wrong user/pass
|
||||
#
|
||||
if response.status_code == status.HTTP_401_UNAUTHORIZED:
|
||||
text = (response.text or '').rstrip("\n")
|
||||
return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code)
|
||||
#
|
||||
# Not found, No entitlement or No data in Analytics
|
||||
#
|
||||
elif response.status_code == status.HTTP_404_NOT_FOUND:
|
||||
return self._error_response_404(response)
|
||||
#
|
||||
# Success or not a 401/404 errors are just forwarded
|
||||
#
|
||||
else:
|
||||
return self._forward_response(response)
|
||||
|
||||
except MissingSettings as e:
|
||||
logger.warning(f"Analytics API: Setting missing: {e.args[0]}")
|
||||
return self._error_response(e.args[0], remote=False)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Analytics API: Request error: {e}")
|
||||
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
except Exception as e:
|
||||
logger.error(f"Analytics API: Error: {e}")
|
||||
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
|
||||
class AnalyticsGenericListView(AnalyticsGenericView):
|
||||
def get(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="GET")
|
||||
|
||||
def post(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="POST")
|
||||
|
||||
def options(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="OPTIONS")
|
||||
|
||||
|
||||
class AnalyticsGenericDetailView(AnalyticsGenericView):
|
||||
def get(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="GET")
|
||||
|
||||
def post(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="POST")
|
||||
|
||||
def options(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="OPTIONS")
|
||||
|
||||
|
||||
class AnalyticsAuthorizedView(AnalyticsGenericListView):
|
||||
name = _("Authorized")
|
||||
|
||||
|
||||
class AnalyticsReportsList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Reports")
|
||||
swagger_topic = "Automation Analytics"
|
||||
|
||||
|
||||
class AnalyticsReportDetail(AnalyticsGenericDetailView):
|
||||
name = _("Report")
|
||||
|
||||
|
||||
class AnalyticsReportOptionsList(AnalyticsGenericListView):
|
||||
name = _("Report Options")
|
||||
|
||||
|
||||
class AnalyticsAdoptionRateList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Adoption Rate")
|
||||
|
||||
|
||||
class AnalyticsEventExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Event Explorer")
|
||||
|
||||
|
||||
class AnalyticsHostExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Host Explorer")
|
||||
|
||||
|
||||
class AnalyticsJobExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Job Explorer")
|
||||
|
||||
|
||||
class AnalyticsProbeTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Probe Templates")
|
||||
|
||||
|
||||
class AnalyticsProbeTemplateForHostsList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Probe Template For Hosts")
|
||||
|
||||
|
||||
class AnalyticsRoiTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("ROI Templates")
|
||||
@ -14,6 +14,7 @@ from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
from rest_framework import serializers
|
||||
|
||||
# AWX
|
||||
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
|
||||
@ -31,6 +32,7 @@ from awx.api.views.labels import LabelSubListCreateAttachDetachView
|
||||
|
||||
from awx.api.serializers import (
|
||||
InventorySerializer,
|
||||
ConstructedInventorySerializer,
|
||||
ActivityStreamSerializer,
|
||||
RoleSerializer,
|
||||
InstanceGroupSerializer,
|
||||
@ -79,7 +81,9 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
|
||||
|
||||
# Do not allow changes to an Inventory kind.
|
||||
if kind is not None and obj.kind != kind:
|
||||
return Response(dict(error=_('You cannot turn a regular inventory into a "smart" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
return Response(
|
||||
dict(error=_('You cannot turn a regular inventory into a "smart" or "constructed" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED
|
||||
)
|
||||
return super(InventoryDetail, self).update(request, *args, **kwargs)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
@ -94,6 +98,29 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
|
||||
return Response(dict(error=_("{0}".format(e))), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class ConstructedInventoryDetail(InventoryDetail):
|
||||
serializer_class = ConstructedInventorySerializer
|
||||
|
||||
|
||||
class ConstructedInventoryList(InventoryList):
|
||||
serializer_class = ConstructedInventorySerializer
|
||||
|
||||
def get_queryset(self):
|
||||
r = super().get_queryset()
|
||||
return r.filter(kind='constructed')
|
||||
|
||||
|
||||
class InventoryInputInventoriesList(SubListAttachDetachAPIView):
|
||||
model = Inventory
|
||||
serializer_class = InventorySerializer
|
||||
parent_model = Inventory
|
||||
relationship = 'input_inventories'
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.kind == 'constructed':
|
||||
raise serializers.ValidationError({'error': 'You cannot add a constructed inventory to another constructed inventory.'})
|
||||
|
||||
|
||||
class InventoryActivityStreamList(SubListAPIView):
|
||||
model = ActivityStream
|
||||
serializer_class = ActivityStreamSerializer
|
||||
|
||||
@ -98,10 +98,14 @@ class ApiVersionRootView(APIView):
|
||||
data['tokens'] = reverse('api:o_auth2_token_list', request=request)
|
||||
data['metrics'] = reverse('api:metrics_view', request=request)
|
||||
data['inventory'] = reverse('api:inventory_list', request=request)
|
||||
data['constructed_inventory'] = reverse('api:constructed_inventory_list', request=request)
|
||||
data['inventory_sources'] = reverse('api:inventory_source_list', request=request)
|
||||
data['inventory_updates'] = reverse('api:inventory_update_list', request=request)
|
||||
data['groups'] = reverse('api:group_list', request=request)
|
||||
data['hosts'] = reverse('api:host_list', request=request)
|
||||
data['host_metrics'] = reverse('api:host_metric_list', request=request)
|
||||
# It will be enabled in future version of the AWX
|
||||
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||
data['jobs'] = reverse('api:job_list', request=request)
|
||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||
@ -122,6 +126,7 @@ class ApiVersionRootView(APIView):
|
||||
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
|
||||
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
|
||||
data['bulk'] = reverse('api:bulk', request=request)
|
||||
data['analytics'] = reverse('api:analytics_root_view', request=request)
|
||||
return Response(data)
|
||||
|
||||
|
||||
@ -272,6 +277,9 @@ class ApiV2ConfigView(APIView):
|
||||
|
||||
pendo_state = settings.PENDO_TRACKING_STATE if settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off'
|
||||
|
||||
# Guarding against settings.UI_NEXT being set to a non-boolean value
|
||||
ui_next_state = settings.UI_NEXT if settings.UI_NEXT in (True, False) else False
|
||||
|
||||
data = dict(
|
||||
time_zone=settings.TIME_ZONE,
|
||||
license_info=license_data,
|
||||
@ -280,6 +288,7 @@ class ApiV2ConfigView(APIView):
|
||||
analytics_status=pendo_state,
|
||||
analytics_collectors=all_collectors(),
|
||||
become_methods=PRIVILEGE_ESCALATION_METHODS,
|
||||
ui_next=ui_next_state,
|
||||
)
|
||||
|
||||
# If LDAP is enabled, user_ldap_fields will return a list of field
|
||||
|
||||
@ -5,11 +5,13 @@ import threading
|
||||
import time
|
||||
import os
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
# Django
|
||||
from django.conf import LazySettings
|
||||
from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
|
||||
from django.db import transaction, connection
|
||||
from django.db.utils import Error as DBError, ProgrammingError
|
||||
from django.utils.functional import cached_property
|
||||
@ -157,7 +159,7 @@ class EncryptedCacheProxy(object):
|
||||
obj_id = self.cache.get(Setting.get_cache_id_key(key), default=empty)
|
||||
if obj_id is empty:
|
||||
logger.info('Efficiency notice: Corresponding id not stored in cache %s', Setting.get_cache_id_key(key))
|
||||
obj_id = getattr(self._get_setting_from_db(key), 'pk', None)
|
||||
obj_id = getattr(_get_setting_from_db(self.registry, key), 'pk', None)
|
||||
elif obj_id == SETTING_CACHE_NONE:
|
||||
obj_id = None
|
||||
return method(TransientSetting(pk=obj_id, value=value), 'value')
|
||||
@ -166,11 +168,6 @@ class EncryptedCacheProxy(object):
|
||||
# a no-op; it just returns the provided value
|
||||
return value
|
||||
|
||||
def _get_setting_from_db(self, key):
|
||||
field = self.registry.get_setting_field(key)
|
||||
if not field.read_only:
|
||||
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.cache, name)
|
||||
|
||||
@ -186,6 +183,22 @@ def get_settings_to_cache(registry):
|
||||
return dict([(key, SETTING_CACHE_NOTSET) for key in get_writeable_settings(registry)])
|
||||
|
||||
|
||||
# Will first attempt to get the setting from the database in synchronous mode.
|
||||
# If call from async context, it will attempt to get the setting from the database in a thread.
|
||||
def _get_setting_from_db(registry, key):
|
||||
def get_settings_from_db_sync(registry, key):
|
||||
field = registry.get_setting_field(key)
|
||||
if not field.read_only or key == 'INSTALL_UUID':
|
||||
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
|
||||
|
||||
try:
|
||||
return get_settings_from_db_sync(registry, key)
|
||||
except SynchronousOnlyOperation:
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
future = executor.submit(get_settings_from_db_sync, registry, key)
|
||||
return future.result()
|
||||
|
||||
|
||||
def get_cache_value(value):
|
||||
"""Returns the proper special cache setting for a value
|
||||
based on instance type.
|
||||
@ -345,7 +358,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
setting_id = None
|
||||
# this value is read-only, however we *do* want to fetch its value from the database
|
||||
if not field.read_only or name == 'INSTALL_UUID':
|
||||
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
|
||||
setting = _get_setting_from_db(self.registry, name)
|
||||
if setting:
|
||||
if getattr(field, 'encrypted', False):
|
||||
value = decrypt_field(setting, 'value')
|
||||
|
||||
@ -94,9 +94,7 @@ def test_setting_singleton_retrieve_readonly(api_request, dummy_setting):
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 3})
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
assert response.data['FOO_BAR'] == 3
|
||||
@ -112,7 +110,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
|
||||
# sure that the _Forbidden validator doesn't get used for the
|
||||
# fields. See also https://github.com/ansible/awx/issues/4099.
|
||||
with dummy_setting('FOO_BAR', field_class=sso_fields.SAMLOrgAttrField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request(
|
||||
'patch',
|
||||
@ -126,7 +124,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=4, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 5})
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
@ -136,7 +134,7 @@ def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_encrypted_mark(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.CharField, encrypted=True, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 'password'})
|
||||
assert Setting.objects.get(key='FOO_BAR').value.startswith('$encrypted$')
|
||||
@ -155,16 +153,14 @@ def test_setting_singleton_update_runs_custom_validate(api_request, dummy_settin
|
||||
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), dummy_validate(
|
||||
'foobar', func_raising_exception
|
||||
), mock.patch('awx.conf.views.handle_setting_changes'):
|
||||
), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
response = api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 23})
|
||||
assert response.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
assert not response.data['FOO_BAR']
|
||||
@ -173,7 +169,7 @@ def test_setting_singleton_delete(api_request, dummy_setting):
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=23, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
|
||||
@ -26,10 +26,11 @@ from awx.api.generics import APIView, GenericAPIView, ListAPIView, RetrieveUpdat
|
||||
from awx.api.permissions import IsSystemAdminOrAuditor
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import camelcase_to_underscore
|
||||
from awx.main.tasks.system import handle_setting_changes
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
|
||||
from awx.conf import settings_registry
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
|
||||
|
||||
SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name'))
|
||||
@ -118,7 +119,10 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.save(update_fields=['value'])
|
||||
settings_change_list.append(key)
|
||||
if settings_change_list:
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
|
||||
# call notify to rsyslog. no data is need so payload is empty
|
||||
reconfigure_rsyslog.delay()
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
@ -133,7 +137,10 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.delete()
|
||||
settings_change_list.append(setting.key)
|
||||
if settings_change_list:
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
|
||||
# call notify to rsyslog. no data is need so payload is empty
|
||||
reconfigure_rsyslog.delay()
|
||||
|
||||
# When TOWER_URL_BASE is deleted from the API, reset it to the hostname
|
||||
# used to make the request as a default.
|
||||
|
||||
@ -4,11 +4,11 @@ import logging
|
||||
# AWX
|
||||
from awx.main.analytics.subsystem_metrics import Metrics
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def send_subsystem_metrics():
|
||||
Metrics().send_metrics()
|
||||
|
||||
@ -65,7 +65,7 @@ class FixedSlidingWindow:
|
||||
return sum(self.buckets.values()) or 0
|
||||
|
||||
|
||||
class BroadcastWebsocketStatsManager:
|
||||
class RelayWebsocketStatsManager:
|
||||
def __init__(self, event_loop, local_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
|
||||
@ -74,7 +74,7 @@ class BroadcastWebsocketStatsManager:
|
||||
self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME
|
||||
|
||||
def new_remote_host_stats(self, remote_hostname):
|
||||
self._stats[remote_hostname] = BroadcastWebsocketStats(self._local_hostname, remote_hostname)
|
||||
self._stats[remote_hostname] = RelayWebsocketStats(self._local_hostname, remote_hostname)
|
||||
return self._stats[remote_hostname]
|
||||
|
||||
def delete_remote_host_stats(self, remote_hostname):
|
||||
@ -107,7 +107,7 @@ class BroadcastWebsocketStatsManager:
|
||||
return parser.text_string_to_metric_families(stats_str.decode('UTF-8'))
|
||||
|
||||
|
||||
class BroadcastWebsocketStats:
|
||||
class RelayWebsocketStats:
|
||||
def __init__(self, local_hostname, remote_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
self._remote_hostname = remote_hostname
|
||||
|
||||
@ -6,7 +6,7 @@ import platform
|
||||
import distro
|
||||
|
||||
from django.db import connection
|
||||
from django.db.models import Count
|
||||
from django.db.models import Count, Min
|
||||
from django.conf import settings
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now, timedelta
|
||||
@ -35,7 +35,7 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
|
||||
"""
|
||||
|
||||
|
||||
def trivial_slicing(key, since, until, last_gather):
|
||||
def trivial_slicing(key, since, until, last_gather, **kwargs):
|
||||
if since is not None:
|
||||
return [(since, until)]
|
||||
|
||||
@ -48,7 +48,7 @@ def trivial_slicing(key, since, until, last_gather):
|
||||
return [(last_entry, until)]
|
||||
|
||||
|
||||
def four_hour_slicing(key, since, until, last_gather):
|
||||
def four_hour_slicing(key, since, until, last_gather, **kwargs):
|
||||
if since is not None:
|
||||
last_entry = since
|
||||
else:
|
||||
@ -69,6 +69,54 @@ def four_hour_slicing(key, since, until, last_gather):
|
||||
start = end
|
||||
|
||||
|
||||
def host_metric_slicing(key, since, until, last_gather, **kwargs):
|
||||
"""
|
||||
Slicing doesn't start 4 weeks ago, but sends whole table monthly or first time
|
||||
"""
|
||||
from awx.main.models.inventory import HostMetric
|
||||
|
||||
if since is not None:
|
||||
return [(since, until)]
|
||||
|
||||
from awx.conf.models import Setting
|
||||
|
||||
# Check if full sync should be done
|
||||
full_sync_enabled = kwargs.get('full_sync_enabled', False)
|
||||
last_entry = None
|
||||
if not full_sync_enabled:
|
||||
#
|
||||
# If not, try incremental sync first
|
||||
#
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||
last_entry = last_entries.get(key)
|
||||
if not last_entry:
|
||||
#
|
||||
# If not done before, switch to full sync
|
||||
#
|
||||
full_sync_enabled = True
|
||||
|
||||
if full_sync_enabled:
|
||||
#
|
||||
# Find the lowest date for full sync
|
||||
#
|
||||
min_dates = HostMetric.objects.aggregate(min_last_automation=Min('last_automation'), min_last_deleted=Min('last_deleted'))
|
||||
if min_dates['min_last_automation'] and min_dates['min_last_deleted']:
|
||||
last_entry = min(min_dates['min_last_automation'], min_dates['min_last_deleted'])
|
||||
elif min_dates['min_last_automation'] or min_dates['min_last_deleted']:
|
||||
last_entry = min_dates['min_last_automation'] or min_dates['min_last_deleted']
|
||||
|
||||
if not last_entry:
|
||||
# empty table
|
||||
return []
|
||||
|
||||
start, end = last_entry, None
|
||||
while start < until:
|
||||
end = min(start + timedelta(days=30), until)
|
||||
yield (start, end)
|
||||
start = end
|
||||
|
||||
|
||||
def _identify_lower(key, since, until, last_gather):
|
||||
from awx.conf.models import Setting
|
||||
|
||||
@ -83,7 +131,7 @@ def _identify_lower(key, since, until, last_gather):
|
||||
return lower, last_entries
|
||||
|
||||
|
||||
@register('config', '1.4', description=_('General platform configuration.'))
|
||||
@register('config', '1.5', description=_('General platform configuration.'))
|
||||
def config(since, **kwargs):
|
||||
license_info = get_license()
|
||||
install_type = 'traditional'
|
||||
@ -119,6 +167,7 @@ def config(since, **kwargs):
|
||||
'compliant': license_info.get('compliant'),
|
||||
'date_warning': license_info.get('date_warning'),
|
||||
'date_expired': license_info.get('date_expired'),
|
||||
'subscription_usage_model': getattr(settings, 'SUBSCRIPTION_USAGE_MODEL', ''), # 1.5+
|
||||
'free_instances': license_info.get('free_instances', 0),
|
||||
'total_licensed_instances': license_info.get('instance_count', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
@ -536,3 +585,25 @@ def workflow_job_template_node_table(since, full_path, **kwargs):
|
||||
) always_nodes ON main_workflowjobtemplatenode.id = always_nodes.from_workflowjobtemplatenode_id
|
||||
ORDER BY main_workflowjobtemplatenode.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
return _copy_table(table='workflow_job_template_node', query=workflow_job_template_node_query, path=full_path)
|
||||
|
||||
|
||||
@register(
|
||||
'host_metric_table', '1.0', format='csv', description=_('Host Metric data, incremental/full sync'), expensive=host_metric_slicing, full_sync_interval=30
|
||||
)
|
||||
def host_metric_table(since, full_path, until, **kwargs):
|
||||
host_metric_query = '''COPY (SELECT main_hostmetric.id,
|
||||
main_hostmetric.hostname,
|
||||
main_hostmetric.first_automation,
|
||||
main_hostmetric.last_automation,
|
||||
main_hostmetric.last_deleted,
|
||||
main_hostmetric.deleted,
|
||||
main_hostmetric.automated_counter,
|
||||
main_hostmetric.deleted_counter,
|
||||
main_hostmetric.used_in_inventories
|
||||
FROM main_hostmetric
|
||||
WHERE (main_hostmetric.last_automation > '{}' AND main_hostmetric.last_automation <= '{}') OR
|
||||
(main_hostmetric.last_deleted > '{}' AND main_hostmetric.last_deleted <= '{}')
|
||||
ORDER BY main_hostmetric.id ASC) TO STDOUT WITH CSV HEADER'''.format(
|
||||
since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat()
|
||||
)
|
||||
return _copy_table(table='host_metric', query=host_metric_query, path=full_path)
|
||||
|
||||
@ -52,7 +52,7 @@ def all_collectors():
|
||||
}
|
||||
|
||||
|
||||
def register(key, version, description=None, format='json', expensive=None):
|
||||
def register(key, version, description=None, format='json', expensive=None, full_sync_interval=None):
|
||||
"""
|
||||
A decorator used to register a function as a metric collector.
|
||||
|
||||
@ -71,6 +71,7 @@ def register(key, version, description=None, format='json', expensive=None):
|
||||
f.__awx_analytics_description__ = description
|
||||
f.__awx_analytics_type__ = format
|
||||
f.__awx_expensive__ = expensive
|
||||
f.__awx_full_sync_interval__ = full_sync_interval
|
||||
return f
|
||||
|
||||
return decorate
|
||||
@ -259,10 +260,19 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
# These slicer functions may return a generator. The `since` parameter is
|
||||
# allowed to be None, and will fall back to LAST_ENTRIES[key] or to
|
||||
# LAST_GATHER (truncated appropriately to match the 4-week limit).
|
||||
#
|
||||
# Or it can force full table sync if interval is given
|
||||
kwargs = dict()
|
||||
full_sync_enabled = False
|
||||
if func.__awx_full_sync_interval__:
|
||||
last_full_sync = last_entries.get(f"{key}_full")
|
||||
full_sync_enabled = not last_full_sync or last_full_sync < now() - timedelta(days=func.__awx_full_sync_interval__)
|
||||
|
||||
kwargs['full_sync_enabled'] = full_sync_enabled
|
||||
if func.__awx_expensive__:
|
||||
slices = func.__awx_expensive__(key, since, until, last_gather)
|
||||
slices = func.__awx_expensive__(key, since, until, last_gather, **kwargs)
|
||||
else:
|
||||
slices = collectors.trivial_slicing(key, since, until, last_gather)
|
||||
slices = collectors.trivial_slicing(key, since, until, last_gather, **kwargs)
|
||||
|
||||
for start, end in slices:
|
||||
files = func(start, full_path=gather_dir, until=end)
|
||||
@ -301,6 +311,12 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
succeeded = False
|
||||
logger.exception("Could not generate metric {}".format(filename))
|
||||
|
||||
# update full sync timestamp if successfully shipped
|
||||
if full_sync_enabled and collection_type != 'dry-run' and succeeded:
|
||||
with disable_activity_stream():
|
||||
last_entries[f"{key}_full"] = now()
|
||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||
|
||||
if collection_type != 'dry-run':
|
||||
if succeeded:
|
||||
for fpath in tarfiles:
|
||||
@ -359,9 +375,7 @@ def ship(path):
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
response = s.post(
|
||||
url, files=files, verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31)
|
||||
)
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||
# Accept 2XX status_codes
|
||||
if response.status_code >= 300:
|
||||
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))
|
||||
|
||||
@ -9,7 +9,7 @@ from django.apps import apps
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.utils import is_testing
|
||||
|
||||
root_key = 'awx_metrics'
|
||||
root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
@ -264,13 +264,6 @@ class Metrics:
|
||||
data[field] = self.METRICS[field].decode(self.conn)
|
||||
return data
|
||||
|
||||
def store_metrics(self, data_json):
|
||||
# called when receiving metrics from other instances
|
||||
data = json.loads(data_json)
|
||||
if self.instance_name != data['instance']:
|
||||
logger.debug(f"{self.instance_name} received subsystem metrics from {data['instance']}")
|
||||
self.conn.set(root_key + "_instance_" + data['instance'], data['metrics'])
|
||||
|
||||
def should_pipe_execute(self):
|
||||
if self.metrics_have_changed is False:
|
||||
return False
|
||||
@ -309,9 +302,9 @@ class Metrics:
|
||||
'instance': self.instance_name,
|
||||
'metrics': self.serialize_local_metrics(),
|
||||
}
|
||||
# store a local copy as well
|
||||
self.store_metrics(json.dumps(payload))
|
||||
|
||||
emit_channel_notification("metrics", payload)
|
||||
|
||||
self.previous_send_metrics.set(current_time)
|
||||
self.previous_send_metrics.store_value(self.conn)
|
||||
finally:
|
||||
|
||||
@ -10,7 +10,7 @@ from rest_framework import serializers
|
||||
# AWX
|
||||
from awx.conf import fields, register, register_validate
|
||||
from awx.main.models import ExecutionEnvironment
|
||||
|
||||
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
|
||||
|
||||
logger = logging.getLogger('awx.main.conf')
|
||||
|
||||
@ -795,6 +795,42 @@ register(
|
||||
category_slug='bulk',
|
||||
)
|
||||
|
||||
register(
|
||||
'UI_NEXT',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Enable Preview of New User Interface'),
|
||||
help_text=_('Enable preview of new user interface.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'SUBSCRIPTION_USAGE_MODEL',
|
||||
field_class=fields.ChoiceField,
|
||||
choices=[
|
||||
('', _('Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting')),
|
||||
(
|
||||
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS,
|
||||
_('Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes'),
|
||||
),
|
||||
],
|
||||
default='',
|
||||
allow_blank=True,
|
||||
label=_('Defines subscription usage model and shows Host Metrics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'CLEANUP_HOST_METRICS_LAST_TS',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last cleanup date for HostMetrics'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):
|
||||
|
||||
@ -38,6 +38,8 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
'ANSIBLE_INVENTORY_EXPORT': 'True',
|
||||
# Redirecting output to stderr allows JSON parsing to still work with -vvv
|
||||
'ANSIBLE_VERBOSE_TO_STDERR': 'True',
|
||||
# if ansible-inventory --limit is used for an inventory import, unmatched should be a failure
|
||||
'ANSIBLE_HOST_PATTERN_MISMATCH': 'error',
|
||||
}
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
@ -63,7 +65,7 @@ ENV_BLOCKLIST = frozenset(
|
||||
'INVENTORY_HOSTVARS',
|
||||
'AWX_HOST',
|
||||
'PROJECT_REVISION',
|
||||
'SUPERVISOR_WEB_CONFIG_PATH',
|
||||
'SUPERVISOR_CONFIG_PATH',
|
||||
)
|
||||
)
|
||||
|
||||
@ -106,3 +108,9 @@ JOB_VARIABLE_PREFIXES = [
|
||||
ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE = (
|
||||
'\u001b[31m \u001b[1m This can be caused if the version of ansible-runner in your execution environment is out of date.\u001b[0m'
|
||||
)
|
||||
|
||||
# Values for setting SUBSCRIPTION_USAGE_MODEL
|
||||
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS = 'unique_managed_hosts'
|
||||
|
||||
# Shared prefetch to use for creating a queryset for the purpose of writing or saving facts
|
||||
HOST_FACTS_FIELDS = ('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id')
|
||||
|
||||
@ -3,6 +3,7 @@ import logging
|
||||
import time
|
||||
import hmac
|
||||
import asyncio
|
||||
import redis
|
||||
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.conf import settings
|
||||
@ -80,7 +81,7 @@ class WebsocketSecretAuthHelper:
|
||||
WebsocketSecretAuthHelper.verify_secret(secret)
|
||||
|
||||
|
||||
class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
class RelayConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
try:
|
||||
WebsocketSecretAuthHelper.is_authorized(self.scope)
|
||||
@ -100,6 +101,21 @@ class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def internal_message(self, event):
|
||||
await self.send(event['text'])
|
||||
|
||||
async def receive_json(self, data):
|
||||
(group, message) = unwrap_broadcast_msg(data)
|
||||
if group == "metrics":
|
||||
message = json.loads(message['text'])
|
||||
conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics'])
|
||||
else:
|
||||
await self.channel_layer.group_send(group, message)
|
||||
|
||||
async def consumer_subscribe(self, event):
|
||||
await self.send_json(event)
|
||||
|
||||
async def consumer_unsubscribe(self, event):
|
||||
await self.send_json(event)
|
||||
|
||||
|
||||
class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
@ -128,6 +144,11 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.unsubscribe", "groups": list(current_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
|
||||
@database_sync_to_async
|
||||
def user_can_see_object_id(self, user_access, oid):
|
||||
# At this point user is a channels.auth.UserLazyObject object
|
||||
@ -176,9 +197,20 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
if len(old_groups):
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.unsubscribe", "groups": list(old_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
|
||||
new_groups_exclusive = new_groups - current_groups
|
||||
for group_name in new_groups_exclusive:
|
||||
await self.channel_layer.group_add(group_name, self.channel_name)
|
||||
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.subscribe", "groups": list(new_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
self.scope['session']['groups'] = new_groups
|
||||
await self.send_json({"groups_current": list(new_groups), "groups_left": list(old_groups), "groups_joined": list(new_groups_exclusive)})
|
||||
|
||||
@ -200,9 +232,11 @@ def _dump_payload(payload):
|
||||
return None
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
|
||||
def unwrap_broadcast_msg(payload: dict):
|
||||
return (payload['group'], payload['message'])
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
payload_dumped = _dump_payload(payload)
|
||||
if payload_dumped is None:
|
||||
return
|
||||
@ -212,16 +246,6 @@ def emit_channel_notification(group, payload):
|
||||
run_sync(
|
||||
channel_layer.group_send(
|
||||
group,
|
||||
{"type": "internal.message", "text": payload_dumped},
|
||||
)
|
||||
)
|
||||
|
||||
run_sync(
|
||||
channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": wrap_broadcast_msg(group, payload_dumped),
|
||||
},
|
||||
{"type": "internal.message", "text": payload_dumped, "needs_relay": True},
|
||||
)
|
||||
)
|
||||
|
||||
@ -63,7 +63,7 @@ class RecordedQueryLog(object):
|
||||
if not os.path.isdir(self.dest):
|
||||
os.makedirs(self.dest)
|
||||
progname = ' '.join(sys.argv)
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsbroadcast'):
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsrelay'):
|
||||
if match in progname:
|
||||
progname = match
|
||||
break
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import os
|
||||
import psycopg2
|
||||
import select
|
||||
|
||||
@ -6,7 +7,6 @@ from contextlib import contextmanager
|
||||
from django.conf import settings
|
||||
from django.db import connection as pg_connection
|
||||
|
||||
|
||||
NOT_READY = ([], [], [])
|
||||
|
||||
|
||||
@ -14,6 +14,29 @@ def get_local_queuename():
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
|
||||
def get_task_queuename():
|
||||
if os.getenv('AWX_COMPONENT') != 'web':
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
random_task_instance = (
|
||||
Instance.objects.filter(
|
||||
node_type__in=(Instance.Types.CONTROL, Instance.Types.HYBRID),
|
||||
node_state=Instance.States.READY,
|
||||
enabled=True,
|
||||
)
|
||||
.only('hostname')
|
||||
.order_by('?')
|
||||
.first()
|
||||
)
|
||||
|
||||
if random_task_instance is None:
|
||||
raise ValueError('No task instances are READY and Enabled.')
|
||||
|
||||
return random_task_instance.hostname
|
||||
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, conn):
|
||||
self.conn = conn
|
||||
|
||||
@ -6,7 +6,7 @@ from django.conf import settings
|
||||
from django.db import connection
|
||||
import redis
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
from . import pg_bus_conn
|
||||
|
||||
@ -21,7 +21,7 @@ class Control(object):
|
||||
if service not in self.services:
|
||||
raise RuntimeError('{} must be in {}'.format(service, self.services))
|
||||
self.service = service
|
||||
self.queuename = host or get_local_queuename()
|
||||
self.queuename = host or get_task_queuename()
|
||||
|
||||
def status(self, *args, **kwargs):
|
||||
r = redis.Redis.from_url(settings.BROKER_URL)
|
||||
|
||||
@ -26,8 +26,8 @@ class TaskWorker(BaseWorker):
|
||||
`awx.main.dispatch.publish`.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resolve_callable(cls, task):
|
||||
@staticmethod
|
||||
def resolve_callable(task):
|
||||
"""
|
||||
Transform a dotted notation task into an imported, callable function, e.g.,
|
||||
|
||||
@ -46,7 +46,8 @@ class TaskWorker(BaseWorker):
|
||||
|
||||
return _call
|
||||
|
||||
def run_callable(self, body):
|
||||
@staticmethod
|
||||
def run_callable(body):
|
||||
"""
|
||||
Given some AMQP message, import the correct Python code and run it.
|
||||
"""
|
||||
|
||||
@ -954,6 +954,16 @@ class OrderedManyToManyDescriptor(ManyToManyDescriptor):
|
||||
def get_queryset(self):
|
||||
return super(OrderedManyRelatedManager, self).get_queryset().order_by('%s__position' % self.through._meta.model_name)
|
||||
|
||||
def add(self, *objects):
|
||||
if len(objects) > 1:
|
||||
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
|
||||
return super().add(*objects)
|
||||
|
||||
def remove(self, *objects):
|
||||
if len(objects) > 1:
|
||||
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
|
||||
return super().remove(*objects)
|
||||
|
||||
return OrderedManyRelatedManager
|
||||
|
||||
return add_custom_queryset_to_many_related_manager(
|
||||
@ -971,13 +981,12 @@ class OrderedManyToManyField(models.ManyToManyField):
|
||||
by a special `position` column on the M2M table
|
||||
"""
|
||||
|
||||
def _update_m2m_position(self, sender, **kwargs):
|
||||
if kwargs.get('action') in ('post_add', 'post_remove'):
|
||||
order_with_respect_to = None
|
||||
for field in sender._meta.local_fields:
|
||||
if isinstance(field, models.ForeignKey) and isinstance(kwargs['instance'], field.related_model):
|
||||
order_with_respect_to = field.name
|
||||
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: kwargs['instance'].pk})):
|
||||
def _update_m2m_position(self, sender, instance, action, **kwargs):
|
||||
if action in ('post_add', 'post_remove'):
|
||||
descriptor = getattr(instance, self.name)
|
||||
order_with_respect_to = descriptor.source_field_name
|
||||
|
||||
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: instance.pk})):
|
||||
if ig.position != i:
|
||||
ig.position = i
|
||||
ig.save()
|
||||
|
||||
22
awx/main/management/commands/cleanup_host_metrics.py
Normal file
@ -0,0 +1,22 @@
|
||||
from awx.main.models import HostMetric
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Run soft-deleting of HostMetrics
|
||||
"""
|
||||
|
||||
help = 'Run soft-deleting of HostMetrics'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
months_ago = options.get('months-ago') or None
|
||||
|
||||
if not months_ago:
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
||||
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
@ -1,5 +1,6 @@
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@ -31,5 +32,7 @@ class Command(BaseCommand):
|
||||
else:
|
||||
raise CommandError('Please pass --enable flag to allow local auth or --disable flag to disable local auth')
|
||||
|
||||
clear_setting_cache.delay(['DISABLE_LOCAL_AUTH'])
|
||||
|
||||
def handle(self, **options):
|
||||
self._enable_disable_auth(options.get('enable'), options.get('disable'))
|
||||
|
||||
@ -1,53 +1,230 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
import datetime
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from awx.main.models.inventory import HostMetric
|
||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
from awx.main.analytics.collectors import config
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
import tarfile
|
||||
import csv
|
||||
|
||||
CSV_PREFERRED_ROW_COUNT = 500000
|
||||
BATCHED_FETCH_COUNT = 10000
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'This is for offline licensing usage'
|
||||
|
||||
def host_metric_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
|
||||
list_of_queryset = list(
|
||||
result.values(
|
||||
'id',
|
||||
'hostname',
|
||||
'first_automation',
|
||||
'last_automation',
|
||||
'last_deleted',
|
||||
'automated_counter',
|
||||
'deleted_counter',
|
||||
'deleted',
|
||||
'used_in_inventories',
|
||||
).order_by('first_automation')[offset : offset + limit]
|
||||
)
|
||||
|
||||
return list_of_queryset
|
||||
|
||||
def host_metric_summary_monthly_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
|
||||
list_of_queryset = list(
|
||||
result.values(
|
||||
'id',
|
||||
'date',
|
||||
'license_consumed',
|
||||
'license_capacity',
|
||||
'hosts_added',
|
||||
'hosts_deleted',
|
||||
'indirectly_managed_hosts',
|
||||
).order_by(
|
||||
'date'
|
||||
)[offset : offset + limit]
|
||||
)
|
||||
|
||||
return list_of_queryset
|
||||
|
||||
def paginated_db_retrieval(self, type, filter_kwargs, rows_per_file):
|
||||
offset = 0
|
||||
list_of_queryset = []
|
||||
while True:
|
||||
if type == 'host_metric':
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_queryset(result, offset, rows_per_file)
|
||||
elif type == 'host_metric_summary_monthly':
|
||||
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_summary_monthly_queryset(result, offset, rows_per_file)
|
||||
|
||||
if not list_of_queryset:
|
||||
break
|
||||
else:
|
||||
yield list_of_queryset
|
||||
|
||||
offset += len(list_of_queryset)
|
||||
|
||||
def controlled_db_retrieval(self, type, filter_kwargs, offset=0, fetch_count=BATCHED_FETCH_COUNT):
|
||||
if type == 'host_metric':
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
return self.host_metric_queryset(result, offset, fetch_count)
|
||||
elif type == 'host_metric_summary_monthly':
|
||||
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
|
||||
return self.host_metric_summary_monthly_queryset(result, offset, fetch_count)
|
||||
|
||||
def write_to_csv(self, csv_file, list_of_queryset, always_header, first_write=False, mode='a'):
|
||||
with open(csv_file, mode, newline='') as output_file:
|
||||
try:
|
||||
keys = list_of_queryset[0].keys() if list_of_queryset else []
|
||||
dict_writer = csv.DictWriter(output_file, keys)
|
||||
if always_header or first_write:
|
||||
dict_writer.writeheader()
|
||||
dict_writer.writerows(list_of_queryset)
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
def csv_for_tar(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
|
||||
for index, list_of_queryset in enumerate(self.paginated_db_retrieval(type, filter_kwargs, rows_per_file)):
|
||||
csv_file = f'{temp_dir}/{type}{index+1}.csv'
|
||||
arcname_file = f'{type}{index+1}.csv'
|
||||
|
||||
first_write = True if index == 0 else False
|
||||
|
||||
self.write_to_csv(csv_file, list_of_queryset, always_header, first_write, 'w')
|
||||
yield csv_file, arcname_file
|
||||
|
||||
def csv_for_tar_batched_fetch(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
|
||||
csv_iteration = 1
|
||||
|
||||
offset = 0
|
||||
rows_written_per_csv = 0
|
||||
to_fetch = BATCHED_FETCH_COUNT
|
||||
|
||||
while True:
|
||||
list_of_queryset = self.controlled_db_retrieval(type, filter_kwargs, offset, to_fetch)
|
||||
|
||||
if not list_of_queryset:
|
||||
break
|
||||
|
||||
csv_file = f'{temp_dir}/{type}{csv_iteration}.csv'
|
||||
arcname_file = f'{type}{csv_iteration}.csv'
|
||||
self.write_to_csv(csv_file, list_of_queryset, always_header)
|
||||
|
||||
offset += to_fetch
|
||||
rows_written_per_csv += to_fetch
|
||||
always_header = False
|
||||
|
||||
remaining_rows_per_csv = rows_per_file - rows_written_per_csv
|
||||
|
||||
if not remaining_rows_per_csv:
|
||||
yield csv_file, arcname_file
|
||||
|
||||
rows_written_per_csv = 0
|
||||
always_header = True
|
||||
to_fetch = BATCHED_FETCH_COUNT
|
||||
csv_iteration += 1
|
||||
elif remaining_rows_per_csv < BATCHED_FETCH_COUNT:
|
||||
to_fetch = remaining_rows_per_csv
|
||||
|
||||
if rows_written_per_csv:
|
||||
yield csv_file, arcname_file
|
||||
|
||||
def config_for_tar(self, options, temp_dir):
|
||||
config_json = json.dumps(config(options.get('since')))
|
||||
config_file = f'{temp_dir}/config.json'
|
||||
arcname_file = 'config.json'
|
||||
with open(config_file, 'w') as f:
|
||||
f.write(config_json)
|
||||
return config_file, arcname_file
|
||||
|
||||
def output_json(self, options, filter_kwargs):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in self.csv_for_tar(temp_dir, options.get('json', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, True):
|
||||
csv_file = csv_detail[0]
|
||||
|
||||
with open(csv_file) as f:
|
||||
reader = csv.DictReader(f)
|
||||
rows = list(reader)
|
||||
json_result = json.dumps(rows, cls=DjangoJSONEncoder)
|
||||
print(json_result)
|
||||
|
||||
def output_csv(self, options, filter_kwargs):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in self.csv_for_tar(temp_dir, options.get('csv', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, False):
|
||||
csv_file = csv_detail[0]
|
||||
with open(csv_file) as f:
|
||||
sys.stdout.write(f.read())
|
||||
|
||||
def output_tarball(self, options, filter_kwargs):
|
||||
always_header = True
|
||||
rows_per_file = options['rows_per_file'] or CSV_PREFERRED_ROW_COUNT
|
||||
|
||||
tar = tarfile.open("./host_metrics.tar.gz", "w:gz")
|
||||
|
||||
if rows_per_file <= BATCHED_FETCH_COUNT:
|
||||
csv_function = self.csv_for_tar
|
||||
else:
|
||||
csv_function = self.csv_for_tar_batched_fetch
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in csv_function(temp_dir, 'host_metric', filter_kwargs, rows_per_file, always_header):
|
||||
tar.add(csv_detail[0], arcname=csv_detail[1])
|
||||
|
||||
for csv_detail in csv_function(temp_dir, 'host_metric_summary_monthly', filter_kwargs, rows_per_file, always_header):
|
||||
tar.add(csv_detail[0], arcname=csv_detail[1])
|
||||
|
||||
config_file, arcname_file = self.config_for_tar(options, temp_dir)
|
||||
tar.add(config_file, arcname=arcname_file)
|
||||
|
||||
tar.close()
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--since', type=datetime.datetime.fromisoformat, help='Start Date in ISO format YYYY-MM-DD')
|
||||
parser.add_argument('--until', type=datetime.datetime.fromisoformat, help='End Date in ISO format YYYY-MM-DD')
|
||||
parser.add_argument('--json', action='store_true', help='Select output as JSON')
|
||||
parser.add_argument('--json', type=str, const='host_metric', nargs='?', help='Select output as JSON for host_metric or host_metric_summary_monthly')
|
||||
parser.add_argument('--csv', type=str, const='host_metric', nargs='?', help='Select output as CSV for host_metric or host_metric_summary_monthly')
|
||||
parser.add_argument('--tarball', action='store_true', help=f'Package CSV files into a tar with upto {CSV_PREFERRED_ROW_COUNT} rows')
|
||||
parser.add_argument('--rows_per_file', type=int, help=f'Split rows in chunks of {CSV_PREFERRED_ROW_COUNT}')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
since = options.get('since')
|
||||
until = options.get('until')
|
||||
|
||||
if since is None and until is None:
|
||||
print("No Arguments received")
|
||||
return None
|
||||
|
||||
if since is not None and since.tzinfo is None:
|
||||
since = since.replace(tzinfo=datetime.timezone.utc)
|
||||
|
||||
if until is not None and until.tzinfo is None:
|
||||
until = until.replace(tzinfo=datetime.timezone.utc)
|
||||
|
||||
filter_kwargs = {}
|
||||
if since is not None:
|
||||
filter_kwargs['last_automation__gte'] = since
|
||||
if until is not None:
|
||||
filter_kwargs['last_automation__lte'] = until
|
||||
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
filter_kwargs_host_metrics_summary = {}
|
||||
if since is not None:
|
||||
filter_kwargs_host_metrics_summary['date__gte'] = since
|
||||
|
||||
if options['rows_per_file'] and options.get('rows_per_file') > CSV_PREFERRED_ROW_COUNT:
|
||||
print(f"rows_per_file exceeds the allowable limit of {CSV_PREFERRED_ROW_COUNT}.")
|
||||
return
|
||||
|
||||
# if --json flag is set, output the result in json format
|
||||
if options['json']:
|
||||
list_of_queryset = list(result.values('hostname', 'first_automation', 'last_automation'))
|
||||
json_result = json.dumps(list_of_queryset, cls=DjangoJSONEncoder)
|
||||
print(json_result)
|
||||
self.output_json(options, filter_kwargs)
|
||||
elif options['csv']:
|
||||
self.output_csv(options, filter_kwargs)
|
||||
elif options['tarball']:
|
||||
self.output_tarball(options, filter_kwargs)
|
||||
|
||||
# --json flag is not set, output in plain text
|
||||
else:
|
||||
print(f"Total Number of hosts automated: {len(result)}")
|
||||
for item in result:
|
||||
print(f"Printing up to {BATCHED_FETCH_COUNT} automated hosts:")
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_queryset(result, 0, BATCHED_FETCH_COUNT)
|
||||
for item in list_of_queryset:
|
||||
print(
|
||||
"Hostname : {hostname} | first_automation : {first_automation} | last_automation : {last_automation}".format(
|
||||
hostname=item.hostname, first_automation=item.first_automation, last_automation=item.last_automation
|
||||
hostname=item['hostname'], first_automation=item['first_automation'], last_automation=item['last_automation']
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
@ -458,12 +458,19 @@ class Command(BaseCommand):
|
||||
# TODO: We disable variable overwrite here in case user-defined inventory variables get
|
||||
# mangled. But we still need to figure out a better way of processing multiple inventory
|
||||
# update variables mixing with each other.
|
||||
all_obj = self.inventory
|
||||
db_variables = all_obj.variables_dict
|
||||
db_variables.update(self.all_group.variables)
|
||||
if db_variables != all_obj.variables_dict:
|
||||
all_obj.variables = json.dumps(db_variables)
|
||||
all_obj.save(update_fields=['variables'])
|
||||
# issue for this: https://github.com/ansible/awx/issues/11623
|
||||
|
||||
if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars:
|
||||
# NOTE: we had to add a exception case to not merge variables
|
||||
# to make constructed inventory coherent
|
||||
db_variables = self.all_group.variables
|
||||
else:
|
||||
db_variables = self.inventory.variables_dict
|
||||
db_variables.update(self.all_group.variables)
|
||||
|
||||
if db_variables != self.inventory.variables_dict:
|
||||
self.inventory.variables = json.dumps(db_variables)
|
||||
self.inventory.save(update_fields=['variables'])
|
||||
logger.debug('Inventory variables updated from "all" group')
|
||||
else:
|
||||
logger.debug('Inventory variables unmodified')
|
||||
@ -522,16 +529,32 @@ class Command(BaseCommand):
|
||||
def _update_db_host_from_mem_host(self, db_host, mem_host):
|
||||
# Update host variables.
|
||||
db_variables = db_host.variables_dict
|
||||
if self.overwrite_vars:
|
||||
db_variables = mem_host.variables
|
||||
else:
|
||||
db_variables.update(mem_host.variables)
|
||||
mem_variables = mem_host.variables
|
||||
update_fields = []
|
||||
|
||||
# Update host instance_id.
|
||||
instance_id = self._get_instance_id(mem_variables)
|
||||
if instance_id != db_host.instance_id:
|
||||
old_instance_id = db_host.instance_id
|
||||
db_host.instance_id = instance_id
|
||||
update_fields.append('instance_id')
|
||||
|
||||
if self.inventory.kind == 'constructed':
|
||||
# remote towervars so the constructed hosts do not have extra variables
|
||||
for prefix in ('host', 'tower'):
|
||||
for var in ('remote_{}_enabled', 'remote_{}_id'):
|
||||
mem_variables.pop(var.format(prefix), None)
|
||||
|
||||
if self.overwrite_vars:
|
||||
db_variables = mem_variables
|
||||
else:
|
||||
db_variables.update(mem_variables)
|
||||
|
||||
if db_variables != db_host.variables_dict:
|
||||
db_host.variables = json.dumps(db_variables)
|
||||
update_fields.append('variables')
|
||||
# Update host enabled flag.
|
||||
enabled = self._get_enabled(mem_host.variables)
|
||||
enabled = self._get_enabled(mem_variables)
|
||||
if enabled is not None and db_host.enabled != enabled:
|
||||
db_host.enabled = enabled
|
||||
update_fields.append('enabled')
|
||||
@ -540,12 +563,6 @@ class Command(BaseCommand):
|
||||
old_name = db_host.name
|
||||
db_host.name = mem_host.name
|
||||
update_fields.append('name')
|
||||
# Update host instance_id.
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
if instance_id != db_host.instance_id:
|
||||
old_instance_id = db_host.instance_id
|
||||
db_host.instance_id = instance_id
|
||||
update_fields.append('instance_id')
|
||||
# Update host and display message(s) on what changed.
|
||||
if update_fields:
|
||||
db_host.save(update_fields=update_fields)
|
||||
@ -654,13 +671,19 @@ class Command(BaseCommand):
|
||||
mem_host = self.all_group.all_hosts[mem_host_name]
|
||||
import_vars = mem_host.variables
|
||||
host_desc = import_vars.pop('_awx_description', 'imported')
|
||||
host_attrs = dict(variables=json.dumps(import_vars), description=host_desc)
|
||||
host_attrs = dict(description=host_desc)
|
||||
enabled = self._get_enabled(mem_host.variables)
|
||||
if enabled is not None:
|
||||
host_attrs['enabled'] = enabled
|
||||
if self.instance_id_var:
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
host_attrs['instance_id'] = instance_id
|
||||
if self.inventory.kind == 'constructed':
|
||||
# remote towervars so the constructed hosts do not have extra variables
|
||||
for prefix in ('host', 'tower'):
|
||||
for var in ('remote_{}_enabled', 'remote_{}_id'):
|
||||
import_vars.pop(var.format(prefix), None)
|
||||
host_attrs['variables'] = json.dumps(import_vars)
|
||||
try:
|
||||
sanitize_jinja(mem_host_name)
|
||||
except ValueError as e:
|
||||
|
||||
@ -44,16 +44,18 @@ class Command(BaseCommand):
|
||||
|
||||
for x in ig.instances.all():
|
||||
color = '\033[92m'
|
||||
end_color = '\033[0m'
|
||||
if x.capacity == 0 and x.node_type != 'hop':
|
||||
color = '\033[91m'
|
||||
if not x.enabled:
|
||||
color = '\033[90m[DISABLED] '
|
||||
if no_color:
|
||||
color = ''
|
||||
end_color = ''
|
||||
|
||||
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
|
||||
version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
|
||||
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m')
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}{end_color}')
|
||||
|
||||
print()
|
||||
|
||||
32
awx/main/management/commands/run_cache_clear.py
Normal file
@ -0,0 +1,32 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.dispatch.worker.task import TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.cache_clear')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Cache Clear
|
||||
Runs as a management command and starts a daemon that listens for a pg_notify message to clear the cache.
|
||||
"""
|
||||
|
||||
help = 'Launch the cache clear daemon'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
conn.listen("tower_settings_change")
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
body = json.loads(e.payload)
|
||||
logger.info(f"Cache clear request received. Clearing now, payload: {e.payload}")
|
||||
TaskWorker.run_callable(body)
|
||||
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in cache clear main loop')
|
||||
raise
|
||||
@ -8,7 +8,7 @@ from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||
@ -76,7 +76,7 @@ class Command(BaseCommand):
|
||||
consumer = None
|
||||
|
||||
try:
|
||||
queues = ['tower_broadcast_all', get_local_queuename()]
|
||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4))
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
|
||||
67
awx/main/management/commands/run_heartbeet.py
Normal file
@ -0,0 +1,67 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_heartbeet')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the web server beacon (heartbeet)'
|
||||
|
||||
def print_banner(self):
|
||||
heartbeet = r"""
|
||||
********** **********
|
||||
************* *************
|
||||
*****************************
|
||||
***********HEART***********
|
||||
*************************
|
||||
*******************
|
||||
*************** _._
|
||||
*********** /`._ `'. __
|
||||
******* \ .\| \ _'` `)
|
||||
*** (``_) \| ).'` /`- /
|
||||
* `\ `;\_ `\\//`-'` /
|
||||
\ `'.'.| / __/`
|
||||
`'--v_|/`'`
|
||||
__||-._
|
||||
/'` `-`` `'\\
|
||||
/ .'` )
|
||||
\ BEET ' )
|
||||
\. /
|
||||
'. /'`
|
||||
`) |
|
||||
//
|
||||
'(.
|
||||
`\`.
|
||||
``"""
|
||||
print(heartbeet)
|
||||
|
||||
def construct_payload(self, action='online'):
|
||||
payload = {
|
||||
'hostname': settings.CLUSTER_HOST_ID,
|
||||
'ip': os.environ.get('MY_POD_IP'),
|
||||
'action': action,
|
||||
}
|
||||
return json.dumps(payload)
|
||||
|
||||
def do_hearbeat_loop(self):
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
while True:
|
||||
logger.debug('Sending heartbeat')
|
||||
conn.notify('web_heartbeet', self.construct_payload())
|
||||
time.sleep(settings.BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS)
|
||||
|
||||
# TODO: Send a message with action=offline if we notice a SIGTERM or SIGINT
|
||||
# (wsrelay can use this to remove the node quicker)
|
||||
def handle(self, *arg, **options):
|
||||
self.print_banner()
|
||||
|
||||
# Note: We don't really try any reconnect logic to pg_notify here,
|
||||
# just let supervisor restart if we fail.
|
||||
self.do_hearbeat_loop()
|
||||
41
awx/main/management/commands/run_rsyslog_configurer.py
Normal file
@ -0,0 +1,41 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.dispatch.worker.task import TaskWorker
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
|
||||
logger = logging.getLogger('awx.main.rsyslog_configurer')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Rsyslog Configurer
|
||||
Runs as a management command and starts rsyslog configurer daemon. Daemon listens
|
||||
for pg_notify then calls reconfigure_rsyslog
|
||||
"""
|
||||
|
||||
help = 'Launch the rsyslog_configurer daemon'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
conn.listen("rsyslog_configurer")
|
||||
# reconfigure rsyslog on start up
|
||||
reconfigure_rsyslog()
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
logger.info("Change in logging settings found. Restarting rsyslogd")
|
||||
# clear the cache of relevant settings then restart
|
||||
setting_keys = [k for k in dir(settings) if k.startswith('LOG_AGGREGATOR')]
|
||||
cache.delete_many(setting_keys)
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
body = json.loads(e.payload)
|
||||
TaskWorker.run_callable(body)
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in rsyslog_configurer main loop')
|
||||
raise
|
||||
@ -13,13 +13,13 @@ from django.db import connection
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
BroadcastWebsocketStatsManager,
|
||||
RelayWebsocketStatsManager,
|
||||
safe_name,
|
||||
)
|
||||
from awx.main.wsbroadcast import BroadcastWebsocketManager
|
||||
from awx.main.wsrelay import WebSocketRelayManager
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
logger = logging.getLogger('awx.main.wsrelay')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@ -99,7 +99,7 @@ class Command(BaseCommand):
|
||||
executor = MigrationExecutor(connection)
|
||||
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
||||
except Exception as exc:
|
||||
logger.info(f'Error on startup of run_wsbroadcast (error: {exc}), retry in 10s...')
|
||||
logger.warning(f'Error on startup of run_wsrelay (error: {exc}), retry in 10s...')
|
||||
time.sleep(10)
|
||||
return
|
||||
|
||||
@ -130,9 +130,9 @@ class Command(BaseCommand):
|
||||
|
||||
if options.get('status'):
|
||||
try:
|
||||
stats_all = BroadcastWebsocketStatsManager.get_stats_sync()
|
||||
stats_all = RelayWebsocketStatsManager.get_stats_sync()
|
||||
except redis.exceptions.ConnectionError as e:
|
||||
print(f"Unable to get Broadcast Websocket Status. Failed to connect to redis {e}")
|
||||
print(f"Unable to get Relay Websocket Status. Failed to connect to redis {e}")
|
||||
return
|
||||
|
||||
data = {}
|
||||
@ -151,22 +151,19 @@ class Command(BaseCommand):
|
||||
host_stats = Command.get_connection_status(hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'Broadcast websocket connection status from "{my_hostname}" to:')
|
||||
print(f'Relay websocket connection status from "{my_hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
host_stats = Command.get_connection_stats(hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'\nBroadcast websocket connection stats from "{my_hostname}" to:')
|
||||
print(f'\nRelay websocket connection stats from "{my_hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
broadcast_websocket_mgr = BroadcastWebsocketManager()
|
||||
task = broadcast_websocket_mgr.start()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(task)
|
||||
websocket_relay_manager = WebSocketRelayManager()
|
||||
asyncio.run(websocket_relay_manager.run())
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Websocket Broadcaster')
|
||||
logger.info('Terminating Websocket Relayer')
|
||||
@ -79,6 +79,11 @@ class HostManager(models.Manager):
|
||||
return qs
|
||||
|
||||
|
||||
class HostMetricActiveManager(models.Manager):
|
||||
def get_queryset(self):
|
||||
return super().get_queryset().filter(deleted=False)
|
||||
|
||||
|
||||
def get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping):
|
||||
# Create IG mapping by union of all groups their instances are members of
|
||||
ig_ig_mapping = {}
|
||||
|
||||
43
awx/main/migrations/0180_add_hostmetric_fields.py
Normal file
@ -0,0 +1,43 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-03 09:40
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0179_change_cyberark_plugin_names'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(model_name='hostmetric', name='hostname', field=models.CharField(max_length=512, primary_key=False, serialize=True, unique=True)),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='last_deleted',
|
||||
field=models.DateTimeField(db_index=True, null=True, help_text='When the host was last deleted'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='automated_counter',
|
||||
field=models.BigIntegerField(default=0, help_text='How many times was the host automated'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='deleted_counter',
|
||||
field=models.IntegerField(default=0, help_text='How many times was the host deleted'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='deleted',
|
||||
field=models.BooleanField(
|
||||
default=False, help_text='Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='used_in_inventories',
|
||||
field=models.IntegerField(null=True, help_text='How many inventories contain this host'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric', name='id', field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
),
|
||||
]
|
||||
33
awx/main/migrations/0181_hostmetricsummarymonthly.py
Normal file
@ -0,0 +1,33 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-10 12:26
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0180_add_hostmetric_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='HostMetricSummaryMonthly',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('date', models.DateField(unique=True)),
|
||||
('license_consumed', models.BigIntegerField(default=0, help_text='How many unique hosts are consumed from the license')),
|
||||
('license_capacity', models.BigIntegerField(default=0, help_text="'License capacity as max. number of unique hosts")),
|
||||
(
|
||||
'hosts_added',
|
||||
models.IntegerField(default=0, help_text='How many hosts were added in the associated month, consuming more license capacity'),
|
||||
),
|
||||
(
|
||||
'hosts_deleted',
|
||||
models.IntegerField(default=0, help_text='How many hosts were deleted in the associated month, freeing the license capacity'),
|
||||
),
|
||||
(
|
||||
'indirectly_managed_hosts',
|
||||
models.IntegerField(default=0, help_text='Manually entered number indirectly managed hosts for a certain month'),
|
||||
),
|
||||
],
|
||||
),
|
||||
]
|
||||
138
awx/main/migrations/0182_constructed_inventory.py
Normal file
@ -0,0 +1,138 @@
|
||||
# Generated by Django 3.2.16 on 2022-12-07 14:20
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0181_hostmetricsummarymonthly'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='InventoryConstructedInventoryMembership',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||
(
|
||||
'constructed_inventory',
|
||||
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.inventory', related_name='constructed_inventory_memberships'),
|
||||
),
|
||||
('input_inventory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.inventory')),
|
||||
],
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventory',
|
||||
name='input_inventories',
|
||||
field=awx.main.fields.OrderedManyToManyField(
|
||||
blank=True,
|
||||
through_fields=('constructed_inventory', 'input_inventory'),
|
||||
help_text='Only valid for constructed inventories, this links to the inventories that will be used.',
|
||||
related_name='destination_inventories',
|
||||
through='main.InventoryConstructedInventoryMembership',
|
||||
to='main.Inventory',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventory',
|
||||
name='kind',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
choices=[
|
||||
('', 'Hosts have a direct link to this inventory.'),
|
||||
('smart', 'Hosts for inventory generated using the host_filter property.'),
|
||||
('constructed', 'Parse list of source inventories with the constructed inventory plugin.'),
|
||||
],
|
||||
default='',
|
||||
help_text='Kind of inventory being represented.',
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventorysource',
|
||||
name='limit',
|
||||
field=models.TextField(blank=True, default='', help_text='Enter host, group or pattern match'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventoryupdate',
|
||||
name='limit',
|
||||
field=models.TextField(blank=True, default='', help_text='Enter host, group or pattern match'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='host_filter',
|
||||
field=models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='host_filter',
|
||||
field=models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.',
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobhostsummary',
|
||||
name='constructed_host',
|
||||
field=models.ForeignKey(
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text='Only for jobs run against constructed inventories, this links to the host inside the constructed inventory.',
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
related_name='constructed_host_summaries',
|
||||
to='main.host',
|
||||
),
|
||||
),
|
||||
]
|
||||
@ -16,7 +16,9 @@ from awx.main.models.inventory import ( # noqa
|
||||
Group,
|
||||
Host,
|
||||
HostMetric,
|
||||
HostMetricSummaryMonthly,
|
||||
Inventory,
|
||||
InventoryConstructedInventoryMembership,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
SmartInventoryMembership,
|
||||
|
||||
@ -7,6 +7,7 @@ from collections import defaultdict
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import models, DatabaseError
|
||||
from django.db.models.functions import Cast
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
from django.utils.timezone import utc, now
|
||||
@ -536,25 +537,38 @@ class JobEvent(BasePlaybookEvent):
|
||||
return
|
||||
job = self.job
|
||||
|
||||
from awx.main.models import Host, JobHostSummary, HostMetric # circular import
|
||||
from awx.main.models import Host, JobHostSummary # circular import
|
||||
|
||||
if self.job.inventory.kind == 'constructed':
|
||||
all_hosts = Host.objects.filter(id__in=self.job.inventory.hosts.values_list(Cast('instance_id', output_field=models.IntegerField()))).only(
|
||||
'id', 'name'
|
||||
)
|
||||
constructed_host_map = self.host_map
|
||||
host_map = {host.name: host.id for host in all_hosts}
|
||||
else:
|
||||
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
|
||||
constructed_host_map = {}
|
||||
host_map = self.host_map
|
||||
|
||||
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
|
||||
existing_host_ids = set(h.id for h in all_hosts)
|
||||
|
||||
summaries = dict()
|
||||
updated_hosts_list = list()
|
||||
for host in hostnames:
|
||||
updated_hosts_list.append(host.lower())
|
||||
host_id = self.host_map.get(host, None)
|
||||
host_id = host_map.get(host)
|
||||
if host_id not in existing_host_ids:
|
||||
host_id = None
|
||||
constructed_host_id = constructed_host_map.get(host)
|
||||
host_stats = {}
|
||||
for stat in ('changed', 'dark', 'failures', 'ignored', 'ok', 'processed', 'rescued', 'skipped'):
|
||||
try:
|
||||
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
|
||||
except AttributeError: # in case event_data[stat] isn't a dict.
|
||||
pass
|
||||
summary = JobHostSummary(created=now(), modified=now(), job_id=job.id, host_id=host_id, host_name=host, **host_stats)
|
||||
summary = JobHostSummary(
|
||||
created=now(), modified=now(), job_id=job.id, host_id=host_id, constructed_host_id=constructed_host_id, host_name=host, **host_stats
|
||||
)
|
||||
summary.failed = bool(summary.dark or summary.failures)
|
||||
summaries[(host_id, host)] = summary
|
||||
|
||||
@ -575,12 +589,26 @@ class JobEvent(BasePlaybookEvent):
|
||||
|
||||
Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
|
||||
|
||||
# bulk-create
|
||||
current_time = now()
|
||||
HostMetric.objects.bulk_create(
|
||||
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
|
||||
# Create/update Host Metrics
|
||||
self._update_host_metrics(updated_hosts_list)
|
||||
|
||||
@staticmethod
|
||||
def _update_host_metrics(updated_hosts_list):
|
||||
from awx.main.models import HostMetric # circular import
|
||||
|
||||
# bulk-create
|
||||
current_time = now()
|
||||
HostMetric.objects.bulk_create(
|
||||
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
|
||||
)
|
||||
# bulk-update
|
||||
batch_start, batch_size = 0, 1000
|
||||
while batch_start <= len(updated_hosts_list):
|
||||
batched_host_list = updated_hosts_list[batch_start : (batch_start + batch_size)]
|
||||
HostMetric.objects.filter(hostname__in=batched_host_list).update(
|
||||
last_automation=current_time, automated_counter=models.F('automated_counter') + 1, deleted=False
|
||||
)
|
||||
HostMetric.objects.filter(hostname__in=updated_hosts_list).update(last_automation=current_time)
|
||||
batch_start += batch_size
|
||||
|
||||
@property
|
||||
def job_verbosity(self):
|
||||
|
||||
@ -9,6 +9,8 @@ import re
|
||||
import copy
|
||||
import os.path
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import dateutil.relativedelta
|
||||
import yaml
|
||||
|
||||
# Django
|
||||
@ -17,6 +19,7 @@ from django.db import models, connection
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.db import transaction
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.urls import resolve
|
||||
from django.utils.timezone import now
|
||||
from django.db.models import Q
|
||||
|
||||
@ -32,7 +35,7 @@ from awx.main.fields import (
|
||||
SmartFilterField,
|
||||
OrderedManyToManyField,
|
||||
)
|
||||
from awx.main.managers import HostManager
|
||||
from awx.main.managers import HostManager, HostMetricActiveManager
|
||||
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json
|
||||
from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate
|
||||
@ -49,15 +52,25 @@ from awx.main.models.notifications import (
|
||||
from awx.main.models.credential.injectors import _openstack_data
|
||||
from awx.main.utils import _inventory_updates
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
from awx.main.utils.execution_environments import to_container_path
|
||||
from awx.main.utils.execution_environments import to_container_path, get_control_plane_execution_environment
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
|
||||
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership']
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership', 'HostMetric', 'HostMetricSummaryMonthly']
|
||||
|
||||
logger = logging.getLogger('awx.main.models.inventory')
|
||||
|
||||
|
||||
class InventoryConstructedInventoryMembership(models.Model):
|
||||
constructed_inventory = models.ForeignKey('Inventory', on_delete=models.CASCADE, related_name='constructed_inventory_memberships')
|
||||
input_inventory = models.ForeignKey('Inventory', on_delete=models.CASCADE)
|
||||
position = models.PositiveIntegerField(
|
||||
null=True,
|
||||
default=None,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
|
||||
class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
"""
|
||||
an inventory source contains lists and hosts.
|
||||
@ -67,6 +80,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
KIND_CHOICES = [
|
||||
('', _('Hosts have a direct link to this inventory.')),
|
||||
('smart', _('Hosts for inventory generated using the host_filter property.')),
|
||||
('constructed', _('Parse list of source inventories with the constructed inventory plugin.')),
|
||||
]
|
||||
|
||||
class Meta:
|
||||
@ -139,6 +153,14 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
default=None,
|
||||
help_text=_('Filter that will be applied to the hosts of this inventory.'),
|
||||
)
|
||||
input_inventories = OrderedManyToManyField(
|
||||
'Inventory',
|
||||
blank=True,
|
||||
through_fields=('constructed_inventory', 'input_inventory'),
|
||||
related_name='destination_inventories',
|
||||
help_text=_('Only valid for constructed inventories, this links to the inventories that will be used.'),
|
||||
through='InventoryConstructedInventoryMembership',
|
||||
)
|
||||
instance_groups = OrderedManyToManyField(
|
||||
'InstanceGroup',
|
||||
blank=True,
|
||||
@ -187,6 +209,14 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
if request is not None:
|
||||
# circular import
|
||||
from awx.api.urls.inventory import constructed_inventory_urls
|
||||
|
||||
route = resolve(request.path_info)
|
||||
if any(route.url_name == url.name for url in constructed_inventory_urls):
|
||||
return reverse('api:constructed_inventory_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
return reverse('api:inventory_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
@ -338,13 +368,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
for host in hosts:
|
||||
data['_meta']['hostvars'][host.name] = host.variables_dict
|
||||
if towervars:
|
||||
tower_dict = dict(
|
||||
remote_tower_enabled=str(host.enabled).lower(),
|
||||
remote_tower_id=host.id,
|
||||
remote_host_enabled=str(host.enabled).lower(),
|
||||
remote_host_id=host.id,
|
||||
)
|
||||
data['_meta']['hostvars'][host.name].update(tower_dict)
|
||||
for prefix in ('host', 'tower'):
|
||||
tower_dict = {
|
||||
f'remote_{prefix}_enabled': str(host.enabled).lower(),
|
||||
f'remote_{prefix}_id': host.id,
|
||||
}
|
||||
data['_meta']['hostvars'][host.name].update(tower_dict)
|
||||
|
||||
return data
|
||||
|
||||
@ -431,12 +460,24 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
|
||||
connection.on_commit(on_commit)
|
||||
|
||||
def _enforce_constructed_source(self):
|
||||
"""
|
||||
Constructed inventory should always have exactly 1 inventory source, constructed type
|
||||
this enforces that requirement
|
||||
"""
|
||||
if self.kind == 'constructed':
|
||||
if not self.inventory_sources.exists():
|
||||
self.inventory_sources.create(
|
||||
source='constructed', name=f'Auto-created source for: {self.name}'[:512], overwrite=True, overwrite_vars=True, update_on_launch=True
|
||||
)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
super(Inventory, self).save(*args, **kwargs)
|
||||
if self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and connection.vendor != 'sqlite':
|
||||
# Minimal update of host_count for smart inventory host filter changes
|
||||
self.update_computed_fields()
|
||||
self._enforce_constructed_source()
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
@ -820,9 +861,64 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
|
||||
|
||||
class HostMetric(models.Model):
|
||||
hostname = models.CharField(primary_key=True, max_length=512)
|
||||
hostname = models.CharField(unique=True, max_length=512)
|
||||
first_automation = models.DateTimeField(auto_now_add=True, null=False, db_index=True, help_text=_('When the host was first automated against'))
|
||||
last_automation = models.DateTimeField(db_index=True, help_text=_('When the host was last automated against'))
|
||||
last_deleted = models.DateTimeField(null=True, db_index=True, help_text=_('When the host was last deleted'))
|
||||
automated_counter = models.BigIntegerField(default=0, help_text=_('How many times was the host automated'))
|
||||
deleted_counter = models.IntegerField(default=0, help_text=_('How many times was the host deleted'))
|
||||
deleted = models.BooleanField(
|
||||
default=False, help_text=_('Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption')
|
||||
)
|
||||
used_in_inventories = models.IntegerField(null=True, help_text=_('How many inventories contain this host'))
|
||||
|
||||
objects = models.Manager()
|
||||
active_objects = HostMetricActiveManager()
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:host_metric_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def soft_delete(self):
|
||||
if not self.deleted:
|
||||
self.deleted_counter = (self.deleted_counter or 0) + 1
|
||||
self.last_deleted = now()
|
||||
self.deleted = True
|
||||
self.save(update_fields=['deleted', 'deleted_counter', 'last_deleted'])
|
||||
|
||||
def soft_restore(self):
|
||||
if self.deleted:
|
||||
self.deleted = False
|
||||
self.save(update_fields=['deleted'])
|
||||
|
||||
@classmethod
|
||||
def cleanup_task(cls, months_ago):
|
||||
try:
|
||||
months_ago = int(months_ago)
|
||||
if months_ago <= 0:
|
||||
raise ValueError()
|
||||
|
||||
last_automation_before = now() - dateutil.relativedelta.relativedelta(months=months_ago)
|
||||
|
||||
logger.info(f'Cleanup [HostMetric]: soft-deleting records last automated before {last_automation_before}')
|
||||
HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||
deleted=True, deleted_counter=models.F('deleted_counter') + 1, last_deleted=now()
|
||||
)
|
||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||
except (TypeError, ValueError):
|
||||
logger.error(f"Cleanup [HostMetric]: months_ago({months_ago}) has to be a positive integer value")
|
||||
|
||||
|
||||
class HostMetricSummaryMonthly(models.Model):
|
||||
"""
|
||||
HostMetric summaries computed by scheduled task <TODO> monthly
|
||||
"""
|
||||
|
||||
date = models.DateField(unique=True)
|
||||
license_consumed = models.BigIntegerField(default=0, help_text=_("How many unique hosts are consumed from the license"))
|
||||
license_capacity = models.BigIntegerField(default=0, help_text=_("'License capacity as max. number of unique hosts"))
|
||||
hosts_added = models.IntegerField(default=0, help_text=_("How many hosts were added in the associated month, consuming more license capacity"))
|
||||
hosts_deleted = models.IntegerField(default=0, help_text=_("How many hosts were deleted in the associated month, freeing the license capacity"))
|
||||
indirectly_managed_hosts = models.IntegerField(default=0, help_text=("Manually entered number indirectly managed hosts for a certain month"))
|
||||
|
||||
|
||||
class InventorySourceOptions(BaseModel):
|
||||
@ -834,6 +930,7 @@ class InventorySourceOptions(BaseModel):
|
||||
|
||||
SOURCE_CHOICES = [
|
||||
('file', _('File, Directory or Script')),
|
||||
('constructed', _('Template additional groups and hostvars at runtime')),
|
||||
('scm', _('Sourced from a Project')),
|
||||
('ec2', _('Amazon EC2')),
|
||||
('gce', _('Google Compute Engine')),
|
||||
@ -913,7 +1010,7 @@ class InventorySourceOptions(BaseModel):
|
||||
host_filter = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text=_('Regex where only matching hosts will be imported.'),
|
||||
help_text=_('This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.'),
|
||||
)
|
||||
overwrite = models.BooleanField(
|
||||
default=False,
|
||||
@ -933,6 +1030,21 @@ class InventorySourceOptions(BaseModel):
|
||||
blank=True,
|
||||
default=1,
|
||||
)
|
||||
limit = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text=_("Enter host, group or pattern match"),
|
||||
)
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
"""
|
||||
Project updates, themselves, will use the control plane execution environment.
|
||||
Jobs using the project can use the default_environment, but the project updates
|
||||
are not flexible enough to allow customizing the image they use.
|
||||
"""
|
||||
if self.inventory.kind == 'constructed':
|
||||
return get_control_plane_execution_environment()
|
||||
return super().resolve_execution_environment()
|
||||
|
||||
@staticmethod
|
||||
def cloud_credential_validation(source, cred):
|
||||
@ -1369,6 +1481,8 @@ class PluginFileInjector(object):
|
||||
env.update(injector_env)
|
||||
# Preserves current behavior for Ansible change in default planned for 2.10
|
||||
env['ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS'] = 'never'
|
||||
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
return env
|
||||
|
||||
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
@ -1552,5 +1666,18 @@ class insights(PluginFileInjector):
|
||||
use_fqcn = True
|
||||
|
||||
|
||||
class constructed(PluginFileInjector):
|
||||
plugin_name = 'constructed'
|
||||
namespace = 'ansible'
|
||||
collection = 'builtin'
|
||||
|
||||
def build_env(self, *args, **kwargs):
|
||||
env = super().build_env(*args, **kwargs)
|
||||
# Enable script inventory plugin so we pick up the script files from source inventories
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] += ',script'
|
||||
env['ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED'] = 'True'
|
||||
return env
|
||||
|
||||
|
||||
for cls in PluginFileInjector.__subclasses__():
|
||||
InventorySourceOptions.injectors[cls.__name__] = cls
|
||||
|
||||
@ -2,12 +2,8 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import codecs
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
from urllib.parse import urljoin
|
||||
|
||||
|
||||
@ -15,11 +11,9 @@ from urllib.parse import urljoin
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
from django.db.models.query import QuerySet
|
||||
from django.db.models.functions import Cast
|
||||
|
||||
# from django.core.cache import cache
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
|
||||
@ -28,6 +22,7 @@ from rest_framework.exceptions import ParseError
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.constants import HOST_FACTS_FIELDS
|
||||
from awx.main.models.base import (
|
||||
BaseModel,
|
||||
CreatedModifiedModel,
|
||||
@ -44,7 +39,7 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic, log_excess_runtime
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
|
||||
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
@ -60,8 +55,6 @@ from awx.main.constants import JOB_VARIABLE_PREFIXES
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.models.jobs')
|
||||
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
__all__ = ['JobTemplate', 'JobLaunchConfig', 'Job', 'JobHostSummary', 'SystemJobTemplate', 'SystemJob']
|
||||
|
||||
@ -578,12 +571,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
hosts = models.ManyToManyField(
|
||||
'Host',
|
||||
related_name='jobs',
|
||||
editable=False,
|
||||
through='JobHostSummary',
|
||||
)
|
||||
hosts = models.ManyToManyField('Host', related_name='jobs', editable=False, through='JobHostSummary', through_fields=('job', 'host'))
|
||||
artifacts = JSONBlob(
|
||||
default=dict,
|
||||
blank=True,
|
||||
@ -848,109 +836,26 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def get_notification_friendly_name(self):
|
||||
return "Job"
|
||||
|
||||
def _get_inventory_hosts(self, only=('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id'), **filters):
|
||||
"""Return value is an iterable for the relevant hosts for this job"""
|
||||
if not self.inventory:
|
||||
return []
|
||||
host_queryset = self.inventory.hosts.only(*only)
|
||||
if filters:
|
||||
host_queryset = host_queryset.filter(**filters)
|
||||
host_queryset = self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
|
||||
if isinstance(host_queryset, QuerySet):
|
||||
return host_queryset.iterator()
|
||||
return host_queryset
|
||||
def get_hosts_for_fact_cache(self):
|
||||
"""
|
||||
Builds the queryset to use for writing or finalizing the fact cache
|
||||
these need to be the 'real' hosts associated with the job.
|
||||
For constructed inventories, that means the original (input inventory) hosts
|
||||
when slicing, that means only returning hosts in that slice
|
||||
"""
|
||||
Host = JobHostSummary._meta.get_field('host').related_model
|
||||
if not self.inventory_id:
|
||||
return Host.objects.none()
|
||||
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Job {job_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_job_fact_cache(self, destination, log_data, timeout=None):
|
||||
self.log_lifecycle("start_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['written_ct'] = 0
|
||||
os.makedirs(destination, mode=0o700)
|
||||
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
if timeout > 0:
|
||||
# exclude hosts with fact data older than `settings.ANSIBLE_FACT_CACHE_TIMEOUT seconds`
|
||||
timeout = now() - datetime.timedelta(seconds=timeout)
|
||||
hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout)
|
||||
if self.inventory.kind == 'constructed':
|
||||
id_field = Host._meta.get_field('id')
|
||||
host_qs = Host.objects.filter(id__in=self.inventory.hosts.exclude(instance_id='').values_list(Cast('instance_id', output_field=id_field)))
|
||||
else:
|
||||
hosts = self._get_inventory_hosts()
|
||||
host_qs = self.inventory.hosts
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
try:
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_filepath_written = filepath
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
|
||||
@log_excess_runtime(
|
||||
logger,
|
||||
debug_cutoff=0.01,
|
||||
msg='Job {job_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_job_fact_cache(self, destination, facts_write_time, log_data):
|
||||
self.log_lifecycle("finish_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
hosts_to_update = []
|
||||
for host in self._get_inventory_hosts():
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
ansible_facts = json.load(f)
|
||||
except ValueError:
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
inventory_id=host.inventory.id,
|
||||
host_name=host.name,
|
||||
ansible_facts=host.ansible_facts,
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
|
||||
job_id=self.id,
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
log_data['cleared_ct'] += 1
|
||||
if len(hosts_to_update) > 100:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update = []
|
||||
if hosts_to_update:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
host_qs = host_qs.only(*HOST_FACTS_FIELDS)
|
||||
host_qs = self.inventory.get_sliced_hosts(host_qs, self.job_slice_number, self.job_slice_count)
|
||||
return host_qs
|
||||
|
||||
|
||||
class LaunchTimeConfigBase(BaseModel):
|
||||
@ -1172,6 +1077,15 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
editable=False,
|
||||
)
|
||||
host = models.ForeignKey('Host', related_name='job_host_summaries', null=True, default=None, on_delete=models.SET_NULL, editable=False)
|
||||
constructed_host = models.ForeignKey(
|
||||
'Host',
|
||||
related_name='constructed_host_summaries',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
help_text='Only for jobs run against constructed inventories, this links to the host inside the constructed inventory.',
|
||||
)
|
||||
|
||||
host_name = models.CharField(
|
||||
max_length=1024,
|
||||
|
||||
@ -32,7 +32,7 @@ from polymorphic.models import PolymorphicModel
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel, prevent_search
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.registrar import activity_stream_registrar
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin
|
||||
@ -1567,7 +1567,7 @@ class UnifiedJob(
|
||||
return r
|
||||
|
||||
def get_queue_name(self):
|
||||
return self.controller_node or self.execution_node or get_local_queuename()
|
||||
return self.controller_node or self.execution_node or get_task_queuename()
|
||||
|
||||
@property
|
||||
def is_container_group_task(self):
|
||||
|
||||
@ -28,7 +28,7 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
|
||||
websocket_urlpatterns = [
|
||||
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
|
||||
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
|
||||
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()),
|
||||
]
|
||||
|
||||
application = AWXProtocolTypeRouter(
|
||||
|
||||
@ -8,7 +8,7 @@ from django.conf import settings
|
||||
from awx import MODE
|
||||
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
@ -20,16 +20,16 @@ def run_manager(manager, prefix):
|
||||
manager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def task_manager():
|
||||
run_manager(TaskManager, "task")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def dependency_manager():
|
||||
run_manager(DependencyManager, "dependency")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def workflow_manager():
|
||||
run_manager(WorkflowManager, "workflow")
|
||||
|
||||
117
awx/main/tasks/facts.py
Normal file
@ -0,0 +1,117 @@
|
||||
import codecs
|
||||
import datetime
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db.models.query import QuerySet
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
from awx.main.utils.common import log_excess_runtime
|
||||
from awx.main.models.inventory import Host
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.facts')
|
||||
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Inventory {inventory_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['written_ct'] = 0
|
||||
try:
|
||||
os.makedirs(destination, mode=0o700)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
if (not host.ansible_facts_modified) or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
|
||||
continue # facts are expired - do not write them
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
try:
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_filepath_written = filepath
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
|
||||
|
||||
@log_excess_runtime(
|
||||
logger,
|
||||
debug_cutoff=0.01,
|
||||
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
|
||||
hosts_to_update = []
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
ansible_facts = json.load(f)
|
||||
except ValueError:
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
inventory_id=host.inventory.id,
|
||||
host_name=host.name,
|
||||
ansible_facts=host.ansible_facts,
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
|
||||
job_id=job_id,
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
log_data['cleared_ct'] += 1
|
||||
if len(hosts_to_update) > 100:
|
||||
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update = []
|
||||
if hosts_to_update:
|
||||
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
@ -29,7 +29,7 @@ from gitdb.exc import BadName as BadGitName
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.constants import (
|
||||
PRIVILEGE_ESCALATION_METHODS,
|
||||
STANDARD_INVENTORY_UPDATE_ENV,
|
||||
@ -37,6 +37,7 @@ from awx.main.constants import (
|
||||
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
||||
CONTAINER_VOLUMES_MOUNT_TYPES,
|
||||
ACTIVE_STATES,
|
||||
HOST_FACTS_FIELDS,
|
||||
)
|
||||
from awx.main.models import (
|
||||
Instance,
|
||||
@ -63,6 +64,7 @@ from awx.main.tasks.callback import (
|
||||
)
|
||||
from awx.main.tasks.signals import with_signal_handling, signal_callback
|
||||
from awx.main.tasks.receptor import AWXReceptorJob
|
||||
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
|
||||
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
from awx.main.utils.execution_environments import CONTAINER_ROOT, to_container_path
|
||||
@ -315,17 +317,22 @@ class BaseTask(object):
|
||||
|
||||
return env
|
||||
|
||||
def write_inventory_file(self, inventory, private_data_dir, file_name, script_params):
|
||||
script_data = inventory.get_script_data(**script_params)
|
||||
for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items():
|
||||
# maintain a list of host_name --> host_id
|
||||
# so we can associate emitted events to Host objects
|
||||
self.runner_callback.host_map[hostname] = hv.get('remote_tower_id', '')
|
||||
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
|
||||
return self.write_private_data_file(private_data_dir, file_name, file_content, sub_dir='inventory', file_permissions=0o700)
|
||||
|
||||
def build_inventory(self, instance, private_data_dir):
|
||||
script_params = dict(hostvars=True, towervars=True)
|
||||
if hasattr(instance, 'job_slice_number'):
|
||||
script_params['slice_number'] = instance.job_slice_number
|
||||
script_params['slice_count'] = instance.job_slice_count
|
||||
script_data = instance.inventory.get_script_data(**script_params)
|
||||
# maintain a list of host_name --> host_id
|
||||
# so we can associate emitted events to Host objects
|
||||
self.runner_callback.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
|
||||
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
|
||||
return self.write_private_data_file(private_data_dir, 'hosts', file_content, sub_dir='inventory', file_permissions=0o700)
|
||||
|
||||
return self.write_inventory_file(instance.inventory, private_data_dir, 'hosts', script_params)
|
||||
|
||||
def build_args(self, instance, private_data_dir, passwords):
|
||||
raise NotImplementedError
|
||||
@ -450,6 +457,9 @@ class BaseTask(object):
|
||||
instance.ansible_version = ansible_version_info
|
||||
instance.save(update_fields=['ansible_version'])
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return False
|
||||
|
||||
@with_path_cleanup
|
||||
@with_signal_handling
|
||||
def run(self, pk, **kwargs):
|
||||
@ -548,7 +558,8 @@ class BaseTask(object):
|
||||
params['module'] = self.build_module_name(self.instance)
|
||||
params['module_args'] = self.build_module_args(self.instance)
|
||||
|
||||
if getattr(self.instance, 'use_fact_cache', False):
|
||||
# TODO: refactor into a better BasTask method
|
||||
if self.should_use_fact_cache():
|
||||
# Enable Ansible fact cache.
|
||||
params['fact_cache_type'] = 'jsonfile'
|
||||
else:
|
||||
@ -795,7 +806,7 @@ class SourceControlMixin(BaseTask):
|
||||
self.release_lock(project)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunJob(SourceControlMixin, BaseTask):
|
||||
"""
|
||||
Run a job using ansible-playbook.
|
||||
@ -1003,6 +1014,9 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
|
||||
return args
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return self.instance.use_fact_cache
|
||||
|
||||
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
|
||||
return job.playbook
|
||||
|
||||
@ -1068,8 +1082,11 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
|
||||
# Fetch "cached" fact data from prior runs and put on the disk
|
||||
# where ansible expects to find it
|
||||
if job.use_fact_cache:
|
||||
self.facts_write_time = self.instance.start_job_fact_cache(os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'))
|
||||
if self.should_use_fact_cache():
|
||||
job.log_lifecycle("start_job_fact_cache")
|
||||
self.facts_write_time = start_fact_cache(
|
||||
job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
|
||||
)
|
||||
|
||||
def build_project_dir(self, job, private_data_dir):
|
||||
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
|
||||
@ -1083,10 +1100,14 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
# actual `run()` call; this _usually_ means something failed in
|
||||
# the pre_run_hook method
|
||||
return
|
||||
if job.use_fact_cache:
|
||||
job.finish_job_fact_cache(
|
||||
if self.should_use_fact_cache():
|
||||
job.log_lifecycle("finish_job_fact_cache")
|
||||
finish_fact_cache(
|
||||
job.get_hosts_for_fact_cache(),
|
||||
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
|
||||
self.facts_write_time,
|
||||
facts_write_time=self.facts_write_time,
|
||||
job_id=job.id,
|
||||
inventory_id=job.inventory_id,
|
||||
)
|
||||
|
||||
def final_run_hook(self, job, status, private_data_dir):
|
||||
@ -1100,7 +1121,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunProjectUpdate(BaseTask):
|
||||
model = ProjectUpdate
|
||||
event_model = ProjectUpdateEvent
|
||||
@ -1422,7 +1443,7 @@ class RunProjectUpdate(BaseTask):
|
||||
return params
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
model = InventoryUpdate
|
||||
event_model = InventoryUpdateEvent
|
||||
@ -1469,8 +1490,6 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
if injector is not None:
|
||||
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
|
||||
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
|
||||
if inventory_update.source == 'scm':
|
||||
for env_k in inventory_update.source_vars_dict:
|
||||
@ -1523,6 +1542,22 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
args = ['ansible-inventory', '--list', '--export']
|
||||
|
||||
# special case for constructed inventories, we pass source inventories from database
|
||||
# these must come in order, and in order _before_ the constructed inventory itself
|
||||
if inventory_update.inventory.kind == 'constructed':
|
||||
inventory_update.log_lifecycle("start_job_fact_cache")
|
||||
for input_inventory in inventory_update.inventory.input_inventories.all():
|
||||
args.append('-i')
|
||||
script_params = dict(hostvars=True, towervars=True)
|
||||
source_inv_path = self.write_inventory_file(input_inventory, private_data_dir, f'hosts_{input_inventory.id}', script_params)
|
||||
args.append(to_container_path(source_inv_path, private_data_dir))
|
||||
# Include any facts from input inventories so they can be used in filters
|
||||
start_fact_cache(
|
||||
input_inventory.hosts.only(*HOST_FACTS_FIELDS),
|
||||
os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),
|
||||
inventory_id=input_inventory.id,
|
||||
)
|
||||
|
||||
# Add arguments for the source inventory file/script/thing
|
||||
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
|
||||
container_location = os.path.join(CONTAINER_ROOT, rel_path)
|
||||
@ -1530,6 +1565,11 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
args.append('-i')
|
||||
args.append(container_location)
|
||||
# Added this in order to allow older versions of ansible-inventory https://github.com/ansible/ansible/pull/79596
|
||||
# limit should be usable in ansible-inventory 2.15+
|
||||
if inventory_update.limit:
|
||||
args.append('--limit')
|
||||
args.append(inventory_update.limit)
|
||||
|
||||
args.append('--output')
|
||||
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
|
||||
@ -1545,6 +1585,9 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
return args
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return bool(self.instance.source == 'constructed')
|
||||
|
||||
def build_inventory(self, inventory_update, private_data_dir):
|
||||
return None # what runner expects in order to not deal with inventory
|
||||
|
||||
@ -1663,7 +1706,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunAdHocCommand(BaseTask):
|
||||
"""
|
||||
Run an ad hoc command using ansible.
|
||||
@ -1816,7 +1859,7 @@ class RunAdHocCommand(BaseTask):
|
||||
return d
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunSystemJob(BaseTask):
|
||||
model = SystemJob
|
||||
event_model = SystemJobEvent
|
||||
|
||||
@ -28,7 +28,7 @@ from awx.main.utils.common import (
|
||||
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
|
||||
# Receptorctl
|
||||
@ -668,6 +668,7 @@ RECEPTOR_CONFIG_STARTER = (
|
||||
'rootcas': '/etc/receptor/tls/ca/receptor-ca.crt',
|
||||
'cert': '/etc/receptor/tls/receptor.crt',
|
||||
'key': '/etc/receptor/tls/receptor.key',
|
||||
'mintls13': False,
|
||||
}
|
||||
},
|
||||
)
|
||||
@ -712,7 +713,7 @@ def write_receptor_config():
|
||||
links.update(link_state=InstanceLink.States.ESTABLISHED)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def remove_deprovisioned_node(hostname):
|
||||
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
|
||||
@ -47,10 +47,11 @@ from awx.main.models import (
|
||||
Inventory,
|
||||
SmartInventoryMembership,
|
||||
Job,
|
||||
HostMetric,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.dispatch import get_task_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
get_type_for_model,
|
||||
ignore_inventory_computed_fields,
|
||||
@ -59,7 +60,6 @@ from awx.main.utils.common import (
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||
@ -115,9 +115,6 @@ def dispatch_startup():
|
||||
m = Metrics()
|
||||
m.reset_values()
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
def inform_cluster_of_shutdown():
|
||||
try:
|
||||
@ -132,7 +129,7 @@ def inform_cluster_of_shutdown():
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def apply_cluster_membership_policies():
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
@ -244,8 +241,10 @@ def apply_cluster_membership_policies():
|
||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def handle_setting_changes(setting_keys):
|
||||
@task(queue='tower_settings_change')
|
||||
def clear_setting_cache(setting_keys):
|
||||
# log that cache is being cleared
|
||||
logger.info(f"clear_setting_cache of keys {setting_keys}")
|
||||
orig_len = len(setting_keys)
|
||||
for i in range(orig_len):
|
||||
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
|
||||
@ -254,9 +253,6 @@ def handle_setting_changes(setting_keys):
|
||||
logger.debug('cache delete_many(%r)', cache_keys)
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def delete_project_files(project_path):
|
||||
@ -286,7 +282,7 @@ def profile_sql(threshold=1, minutes=1):
|
||||
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def send_notifications(notification_list, job_id=None):
|
||||
if not isinstance(notification_list, list):
|
||||
raise TypeError("notification_list should be of type list")
|
||||
@ -317,7 +313,7 @@ def send_notifications(notification_list, job_id=None):
|
||||
logger.exception('Error saving notification {} result.'.format(notification.id))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
@ -330,7 +326,7 @@ def gather_analytics():
|
||||
analytics.gather()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def purge_old_stdout_files():
|
||||
nowtime = time.time()
|
||||
for f in os.listdir(settings.JOBOUTPUT_ROOT):
|
||||
@ -378,12 +374,26 @@ def handle_removed_image(remove_images=None):
|
||||
_cleanup_images_and_files(remove_images=remove_images, file_pattern='')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_images_and_files():
|
||||
_cleanup_images_and_files()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
last_cleanup = Setting.objects.filter(key='CLEANUP_HOST_METRICS_LAST_TS').first()
|
||||
last_time = DateTimeField().to_internal_value(last_cleanup.value) if last_cleanup and last_cleanup.value else None
|
||||
|
||||
cleanup_interval_secs = getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400
|
||||
if not last_time or ((now() - last_time).total_seconds() > cleanup_interval_secs):
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cluster_node_health_check(node):
|
||||
"""
|
||||
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
|
||||
@ -402,7 +412,7 @@ def cluster_node_health_check(node):
|
||||
this_inst.local_health_check()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def execution_node_health_check(node):
|
||||
if node == '':
|
||||
logger.warning('Remote health check incorrectly called with blank string')
|
||||
@ -496,7 +506,7 @@ def inspect_execution_nodes(instance_list):
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
|
||||
|
||||
@task(queue=get_local_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
@task(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
logger.debug("Cluster node heartbeat task.")
|
||||
nowtime = now()
|
||||
@ -586,7 +596,7 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_receptor_workunit_reaper():
|
||||
"""
|
||||
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
|
||||
@ -622,7 +632,7 @@ def awx_receptor_workunit_reaper():
|
||||
administrative_workunit_reaper(receptor_work_list)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_k8s_reaper():
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
return
|
||||
@ -642,7 +652,7 @@ def awx_k8s_reaper():
|
||||
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_periodic_scheduler():
|
||||
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
@ -708,7 +718,7 @@ def schedule_manager_success_or_error(instance):
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@ -720,7 +730,7 @@ def handle_work_success(task_actual):
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_error(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@ -760,7 +770,7 @@ def handle_work_error(task_actual):
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
"""
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
@ -801,7 +811,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
|
||||
return False
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def update_host_smart_inventory_memberships():
|
||||
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
||||
changed_inventories = set([])
|
||||
@ -817,7 +827,7 @@ def update_host_smart_inventory_memberships():
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
if user_id is None:
|
||||
@ -882,7 +892,7 @@ def _reconstruct_relationships(copy_mapping):
|
||||
new_obj.save()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
|
||||
sub_obj_list = cache.get(uuid)
|
||||
if sub_obj_list is None:
|
||||
|
||||
86
awx/main/tests/functional/api/test_analytics.py
Normal file
@ -0,0 +1,86 @@
|
||||
import pytest
|
||||
import requests
|
||||
from awx.api.views.analytics import AnalyticsGenericView, MissingSettings, AUTOMATION_ANALYTICS_API_URL_PATH
|
||||
from django.test.utils import override_settings
|
||||
|
||||
from awx.main.utils import get_awx_version
|
||||
from django.utils import translation
|
||||
|
||||
|
||||
class TestAnalyticsGenericView:
|
||||
@pytest.mark.parametrize(
|
||||
"existing_headers,expected_headers",
|
||||
[
|
||||
({}, {}),
|
||||
({'Hey': 'There'}, {}), # We don't forward just any headers
|
||||
({'Content-Type': 'text/html', 'Content-Length': '12'}, {'Content-Type': 'text/html', 'Content-Length': '12'}),
|
||||
# Requests will auto-add the following headers (so we don't need to test them): 'Accept-Encoding', 'User-Agent', 'Accept'
|
||||
],
|
||||
)
|
||||
def test__request_headers(self, existing_headers, expected_headers):
|
||||
expected_headers['X-Rh-Analytics-Source'] = 'controller'
|
||||
expected_headers['X-Rh-Analytics-Source-Version'] = get_awx_version()
|
||||
expected_headers['Accept-Language'] = translation.get_language()
|
||||
|
||||
request = requests.session()
|
||||
request.headers.update(existing_headers)
|
||||
assert set(expected_headers.items()).issubset(set(AnalyticsGenericView._request_headers(request).items()))
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"path,expected_path",
|
||||
[
|
||||
('A/B', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/A/B'),
|
||||
('B', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/B'),
|
||||
('/a/b/c/analytics/reports/my_slug', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/reports/my_slug'),
|
||||
('/a/b/c/analytics/', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/'),
|
||||
('/a/b/c/analytics', f'{AUTOMATION_ANALYTICS_API_URL_PATH}//a/b/c/analytics'), # Because there is no ending / on analytics we get a weird condition
|
||||
('/a/b/c/analytics/', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/'),
|
||||
],
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test__get_analytics_path(self, path, expected_path):
|
||||
assert AnalyticsGenericView._get_analytics_path(path) == expected_path
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test__get_analytics_url_no_url(self):
|
||||
with override_settings(AUTOMATION_ANALYTICS_URL=None):
|
||||
with pytest.raises(MissingSettings):
|
||||
agw = AnalyticsGenericView()
|
||||
agw._get_analytics_url('A')
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"request_path,ending_url",
|
||||
[
|
||||
('A', 'A'),
|
||||
('A/B', 'A/B'),
|
||||
('A/B/analytics/', ''), # we split on analytics but because there is nothing after
|
||||
('A/B/analytics/report', 'report'),
|
||||
('A/B/analytics/report/slug', 'report/slug'),
|
||||
],
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test__get_analytics_url(self, request_path, ending_url):
|
||||
base_url = 'http://testing'
|
||||
with override_settings(AUTOMATION_ANALYTICS_URL=base_url):
|
||||
agw = AnalyticsGenericView()
|
||||
assert agw._get_analytics_url(request_path) == f'{base_url}{AUTOMATION_ANALYTICS_API_URL_PATH}/{ending_url}'
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"setting_name,setting_value,raises",
|
||||
[
|
||||
('INSIGHTS_TRACKING_STATE', None, True),
|
||||
('INSIGHTS_TRACKING_STATE', False, True),
|
||||
('INSIGHTS_TRACKING_STATE', True, False),
|
||||
('INSIGHTS_TRACKING_STATE', 'Steve', False),
|
||||
('INSIGHTS_TRACKING_STATE', 1, False),
|
||||
('INSIGHTS_TRACKING_STATE', '', True),
|
||||
],
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test__get_setting(self, setting_name, setting_value, raises):
|
||||
with override_settings(**{setting_name: setting_value}):
|
||||
if raises:
|
||||
with pytest.raises(MissingSettings):
|
||||
AnalyticsGenericView._get_setting(setting_name, False, None)
|
||||
else:
|
||||
assert AnalyticsGenericView._get_setting(setting_name, False, None) == setting_value
|
||||
@ -594,3 +594,108 @@ class TestControlledBySCM:
|
||||
rando,
|
||||
expect=403,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestConstructedInventory:
|
||||
@pytest.fixture
|
||||
def constructed_inventory(self, organization):
|
||||
return Inventory.objects.create(name='constructed-test-inventory', kind='constructed', organization=organization)
|
||||
|
||||
def test_get_constructed_inventory(self, constructed_inventory, admin_user, get):
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
inv_src.update_cache_timeout = 53
|
||||
inv_src.save(update_fields=['update_cache_timeout'])
|
||||
r = get(url=reverse('api:constructed_inventory_detail', kwargs={'pk': constructed_inventory.pk}), user=admin_user, expect=200)
|
||||
assert r.data['update_cache_timeout'] == 53
|
||||
|
||||
def test_patch_constructed_inventory(self, constructed_inventory, admin_user, patch):
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
assert inv_src.update_cache_timeout == 0
|
||||
assert inv_src.limit == ''
|
||||
r = patch(
|
||||
url=reverse('api:constructed_inventory_detail', kwargs={'pk': constructed_inventory.pk}),
|
||||
data=dict(update_cache_timeout=54, limit='foobar'),
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
assert r.data['update_cache_timeout'] == 54
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
assert inv_src.update_cache_timeout == 54
|
||||
assert inv_src.limit == 'foobar'
|
||||
|
||||
def test_patch_constructed_inventory_generated_source_limits_editable_fields(self, constructed_inventory, admin_user, project, patch):
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
r = patch(
|
||||
url=inv_src.get_absolute_url(),
|
||||
data={
|
||||
'source': 'scm',
|
||||
'source_project': project.pk,
|
||||
'source_path': '',
|
||||
'source_vars': 'plugin: a.b.c',
|
||||
},
|
||||
expect=400,
|
||||
user=admin_user,
|
||||
)
|
||||
assert str(r.data['error'][0]) == "Cannot change field 'source' on a constructed inventory source."
|
||||
|
||||
# Make sure it didn't get updated before we got the error
|
||||
inv_src_after_err = constructed_inventory.inventory_sources.first()
|
||||
assert inv_src.id == inv_src_after_err.id
|
||||
assert inv_src.source == inv_src_after_err.source
|
||||
assert inv_src.source_project == inv_src_after_err.source_project
|
||||
assert inv_src.source_path == inv_src_after_err.source_path
|
||||
assert inv_src.source_vars == inv_src_after_err.source_vars
|
||||
|
||||
def test_patch_constructed_inventory_generated_source_allows_source_vars_edit(self, constructed_inventory, admin_user, patch):
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
patch(
|
||||
url=inv_src.get_absolute_url(),
|
||||
data={
|
||||
'source_vars': 'plugin: a.b.c',
|
||||
},
|
||||
expect=200,
|
||||
user=admin_user,
|
||||
)
|
||||
|
||||
inv_src_after_patch = constructed_inventory.inventory_sources.first()
|
||||
|
||||
# sanity checks
|
||||
assert inv_src.id == inv_src_after_patch.id
|
||||
assert inv_src.source == 'constructed'
|
||||
assert inv_src_after_patch.source == 'constructed'
|
||||
assert inv_src.source_vars == ''
|
||||
|
||||
assert inv_src_after_patch.source_vars == 'plugin: a.b.c'
|
||||
|
||||
def test_create_constructed_inventory(self, constructed_inventory, admin_user, post, organization):
|
||||
r = post(
|
||||
url=reverse('api:constructed_inventory_list'),
|
||||
data=dict(name='constructed-inventory-just-created', kind='constructed', organization=organization.id, update_cache_timeout=55, limit='foobar'),
|
||||
user=admin_user,
|
||||
expect=201,
|
||||
)
|
||||
pk = r.data['id']
|
||||
constructed_inventory = Inventory.objects.get(pk=pk)
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
assert inv_src.update_cache_timeout == 55
|
||||
assert inv_src.limit == 'foobar'
|
||||
|
||||
def test_get_absolute_url_for_constructed_inventory(self, constructed_inventory, admin_user, get):
|
||||
"""
|
||||
If we are using the normal inventory API endpoint to look at a
|
||||
constructed inventory, then we should get a normal inventory API route
|
||||
back. If we are accessing it via the special constructed inventory
|
||||
endpoint, then we should get that back.
|
||||
"""
|
||||
|
||||
url_const = reverse('api:constructed_inventory_detail', kwargs={'pk': constructed_inventory.pk})
|
||||
url_inv = reverse('api:inventory_detail', kwargs={'pk': constructed_inventory.pk})
|
||||
|
||||
const_r = get(url=url_const, user=admin_user, expect=200)
|
||||
inv_r = get(url=url_inv, user=admin_user, expect=200)
|
||||
assert const_r.data['url'] == url_const
|
||||
assert inv_r.data['url'] == url_inv
|
||||
assert inv_r.data['url'] != const_r.data['url']
|
||||
assert inv_r.data['related']['constructed_url'] == url_const
|
||||
assert const_r.data['related']['constructed_url'] == url_const
|
||||
|
||||
@ -511,6 +511,14 @@ def group(inventory):
|
||||
return inventory.groups.create(name='single-group')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def constructed_inventory(organization):
|
||||
"""
|
||||
creates a new constructed inventory source
|
||||
"""
|
||||
return Inventory.objects.create(name='dummy1', kind='constructed', organization=organization)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def inventory_source(inventory):
|
||||
# by making it ec2, the credential is not required
|
||||
|
||||
@ -3,178 +3,209 @@ import pytest
|
||||
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.main.models import Job, JobEvent, Inventory, Host, JobHostSummary
|
||||
from django.db.models import Q
|
||||
|
||||
from awx.main.models import Job, JobEvent, Inventory, Host, JobHostSummary, HostMetric
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.models.events.emit_event_detail')
|
||||
def test_parent_changed(emit):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.changed is False
|
||||
class TestEvents:
|
||||
def setup_method(self):
|
||||
self.hostnames = []
|
||||
self.host_map = dict()
|
||||
self.inventory = None
|
||||
self.job = None
|
||||
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='runner_on_ok', event_data={'res': {'changed': ['localhost']}}).save()
|
||||
# the `playbook_on_stats` event is where we update the parent changed linkage
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save()
|
||||
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', 'runner_on_ok'])
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.changed is True
|
||||
@mock.patch('awx.main.models.events.emit_event_detail')
|
||||
def test_parent_changed(self, emit):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.changed is False
|
||||
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='runner_on_ok', event_data={'res': {'changed': ['localhost']}}).save()
|
||||
# the `playbook_on_stats` event is where we update the parent changed linkage
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save()
|
||||
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', 'runner_on_ok'])
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.changed is True
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('event', JobEvent.FAILED_EVENTS)
|
||||
@mock.patch('awx.main.models.events.emit_event_detail')
|
||||
def test_parent_failed(emit, event):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.failed is False
|
||||
@pytest.mark.parametrize('event', JobEvent.FAILED_EVENTS)
|
||||
@mock.patch('awx.main.models.events.emit_event_detail')
|
||||
def test_parent_failed(self, emit, event):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.failed is False
|
||||
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event=event).save()
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event=event).save()
|
||||
|
||||
# the `playbook_on_stats` event is where we update the parent failed linkage
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save()
|
||||
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', event])
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.failed is True
|
||||
# the `playbook_on_stats` event is where we update the parent failed linkage
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save()
|
||||
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', event])
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.failed is True
|
||||
|
||||
def test_host_summary_generation(self):
|
||||
self._generate_hosts(100)
|
||||
self._create_job_event(ok=dict((hostname, len(hostname)) for hostname in self.hostnames))
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_summary_generation():
|
||||
hostnames = [f'Host {i}' for i in range(100)]
|
||||
inv = Inventory()
|
||||
inv.save()
|
||||
Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames])
|
||||
j = Job(inventory=inv)
|
||||
j.save()
|
||||
host_map = dict((host.name, host.id) for host in inv.hosts.all())
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
assert self.job.job_host_summaries.count() == len(self.hostnames)
|
||||
assert sorted([s.host_name for s in self.job.job_host_summaries.all()]) == sorted(self.hostnames)
|
||||
|
||||
for s in self.job.job_host_summaries.all():
|
||||
assert self.host_map[s.host_name] == s.host_id
|
||||
assert s.ok == len(s.host_name)
|
||||
assert s.changed == 0
|
||||
assert s.dark == 0
|
||||
assert s.failures == 0
|
||||
assert s.ignored == 0
|
||||
assert s.processed == 0
|
||||
assert s.rescued == 0
|
||||
assert s.skipped == 0
|
||||
|
||||
for host in Host.objects.all():
|
||||
assert host.last_job_id == self.job.id
|
||||
assert host.last_job_host_summary.host == host
|
||||
|
||||
def test_host_summary_generation_with_deleted_hosts(self):
|
||||
self._generate_hosts(10)
|
||||
|
||||
# delete half of the hosts during the playbook run
|
||||
for h in self.inventory.hosts.all()[:5]:
|
||||
h.delete()
|
||||
|
||||
self._create_job_event(ok=dict((hostname, len(hostname)) for hostname in self.hostnames))
|
||||
|
||||
ids = sorted([s.host_id or -1 for s in self.job.job_host_summaries.order_by('id').all()])
|
||||
names = sorted([s.host_name for s in self.job.job_host_summaries.all()])
|
||||
assert ids == [-1, -1, -1, -1, -1, 6, 7, 8, 9, 10]
|
||||
assert names == ['Host 0', 'Host 1', 'Host 2', 'Host 3', 'Host 4', 'Host 5', 'Host 6', 'Host 7', 'Host 8', 'Host 9']
|
||||
|
||||
def test_host_summary_generation_with_limit(self):
|
||||
# Make an inventory with 10 hosts, run a playbook with a --limit
|
||||
# pointed at *one* host,
|
||||
# Verify that *only* that host has an associated JobHostSummary and that
|
||||
# *only* that host has an updated value for .last_job.
|
||||
self._generate_hosts(10)
|
||||
|
||||
# by making the playbook_on_stats *only* include Host 1, we're emulating
|
||||
# the behavior of a `--limit=Host 1`
|
||||
matching_host = Host.objects.get(name='Host 1')
|
||||
self._create_job_event(ok={matching_host.name: len(matching_host.name)}) # effectively, limit=Host 1
|
||||
|
||||
# since the playbook_on_stats only references one host,
|
||||
# there should *only* be on JobHostSummary record (and it should
|
||||
# be related to the appropriate Host)
|
||||
assert JobHostSummary.objects.count() == 1
|
||||
for h in Host.objects.all():
|
||||
if h.name == 'Host 1':
|
||||
assert h.last_job_id == self.job.id
|
||||
assert h.last_job_host_summary_id == JobHostSummary.objects.first().id
|
||||
else:
|
||||
# all other hosts in the inventory should remain untouched
|
||||
assert h.last_job_id is None
|
||||
assert h.last_job_host_summary_id is None
|
||||
|
||||
def test_host_metrics_insert(self):
|
||||
self._generate_hosts(10)
|
||||
|
||||
self._create_job_event(
|
||||
ok=dict((hostname, len(hostname)) for hostname in self.hostnames[0:3]),
|
||||
failures=dict((hostname, len(hostname)) for hostname in self.hostnames[3:6]),
|
||||
processed=dict((hostname, len(hostname)) for hostname in self.hostnames[6:9]),
|
||||
skipped=dict((hostname, len(hostname)) for hostname in [self.hostnames[9]]),
|
||||
)
|
||||
|
||||
metrics = HostMetric.objects.all()
|
||||
assert len(metrics) == 10
|
||||
for hm in metrics:
|
||||
assert hm.automated_counter == 1
|
||||
assert hm.last_automation is not None
|
||||
assert hm.deleted is False
|
||||
|
||||
def test_host_metrics_update(self):
|
||||
self._generate_hosts(12)
|
||||
|
||||
self._create_job_event(ok=dict((hostname, len(hostname)) for hostname in self.hostnames))
|
||||
|
||||
# Soft delete 6 host metrics
|
||||
for hm in HostMetric.objects.filter(id__in=[1, 3, 5, 7, 9, 11]):
|
||||
hm.soft_delete()
|
||||
|
||||
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6
|
||||
assert len(HostMetric.objects.filter(Q(deleted=True) & Q(deleted_counter=1) & Q(last_deleted__isnull=False))) == 6
|
||||
|
||||
# hostnames in 'ignored' and 'rescued' stats are ignored
|
||||
self.job = Job(inventory=self.inventory)
|
||||
self.job.save()
|
||||
self._create_job_event(
|
||||
ignored=dict((hostname, len(hostname)) for hostname in self.hostnames[0:6]),
|
||||
rescued=dict((hostname, len(hostname)) for hostname in self.hostnames[6:11]),
|
||||
)
|
||||
|
||||
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6
|
||||
assert len(HostMetric.objects.filter(Q(deleted=True) & Q(deleted_counter=1) & Q(last_deleted__isnull=False))) == 6
|
||||
|
||||
# hostnames in 'changed', 'dark', 'failures', 'ok', 'processed', 'skipped' are processed
|
||||
self.job = Job(inventory=self.inventory)
|
||||
self.job.save()
|
||||
self._create_job_event(
|
||||
changed=dict((hostname, len(hostname)) for hostname in self.hostnames[0:2]),
|
||||
dark=dict((hostname, len(hostname)) for hostname in self.hostnames[2:4]),
|
||||
failures=dict((hostname, len(hostname)) for hostname in self.hostnames[4:6]),
|
||||
ok=dict((hostname, len(hostname)) for hostname in self.hostnames[6:8]),
|
||||
processed=dict((hostname, len(hostname)) for hostname in self.hostnames[8:10]),
|
||||
skipped=dict((hostname, len(hostname)) for hostname in self.hostnames[10:12]),
|
||||
)
|
||||
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6
|
||||
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=1) & Q(last_deleted__isnull=False))) == 6
|
||||
|
||||
def _generate_hosts(self, cnt, id_from=0):
|
||||
self.hostnames = [f'Host {i}' for i in range(id_from, id_from + cnt)]
|
||||
self.inventory = Inventory()
|
||||
self.inventory.save()
|
||||
Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=self.inventory.id) for h in self.hostnames])
|
||||
self.job = Job(inventory=self.inventory)
|
||||
self.job.save()
|
||||
|
||||
# host map is a data structure that tracks a mapping of host name --> ID
|
||||
# for the inventory, _regardless_ of whether or not there's a limit
|
||||
# applied to the actual playbook run
|
||||
self.host_map = dict((host.name, host.id) for host in self.inventory.hosts.all())
|
||||
|
||||
def _create_job_event(
|
||||
self,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats',
|
||||
event_data={
|
||||
'ok': dict((hostname, len(hostname)) for hostname in hostnames),
|
||||
'changed': {},
|
||||
'dark': {},
|
||||
'failures': {},
|
||||
'ignored': {},
|
||||
'processed': {},
|
||||
'rescued': {},
|
||||
'skipped': {},
|
||||
},
|
||||
host_map=host_map,
|
||||
).save()
|
||||
|
||||
assert j.job_host_summaries.count() == len(hostnames)
|
||||
assert sorted([s.host_name for s in j.job_host_summaries.all()]) == sorted(hostnames)
|
||||
|
||||
for s in j.job_host_summaries.all():
|
||||
assert host_map[s.host_name] == s.host_id
|
||||
assert s.ok == len(s.host_name)
|
||||
assert s.changed == 0
|
||||
assert s.dark == 0
|
||||
assert s.failures == 0
|
||||
assert s.ignored == 0
|
||||
assert s.processed == 0
|
||||
assert s.rescued == 0
|
||||
assert s.skipped == 0
|
||||
|
||||
for host in Host.objects.all():
|
||||
assert host.last_job_id == j.id
|
||||
assert host.last_job_host_summary.host == host
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_summary_generation_with_deleted_hosts():
|
||||
hostnames = [f'Host {i}' for i in range(10)]
|
||||
inv = Inventory()
|
||||
inv.save()
|
||||
Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames])
|
||||
j = Job(inventory=inv)
|
||||
j.save()
|
||||
host_map = dict((host.name, host.id) for host in inv.hosts.all())
|
||||
|
||||
# delete half of the hosts during the playbook run
|
||||
for h in inv.hosts.all()[:5]:
|
||||
h.delete()
|
||||
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats',
|
||||
event_data={
|
||||
'ok': dict((hostname, len(hostname)) for hostname in hostnames),
|
||||
'changed': {},
|
||||
'dark': {},
|
||||
'failures': {},
|
||||
'ignored': {},
|
||||
'processed': {},
|
||||
'rescued': {},
|
||||
'skipped': {},
|
||||
},
|
||||
host_map=host_map,
|
||||
).save()
|
||||
|
||||
ids = sorted([s.host_id or -1 for s in j.job_host_summaries.order_by('id').all()])
|
||||
names = sorted([s.host_name for s in j.job_host_summaries.all()])
|
||||
assert ids == [-1, -1, -1, -1, -1, 6, 7, 8, 9, 10]
|
||||
assert names == ['Host 0', 'Host 1', 'Host 2', 'Host 3', 'Host 4', 'Host 5', 'Host 6', 'Host 7', 'Host 8', 'Host 9']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_summary_generation_with_limit():
|
||||
# Make an inventory with 10 hosts, run a playbook with a --limit
|
||||
# pointed at *one* host,
|
||||
# Verify that *only* that host has an associated JobHostSummary and that
|
||||
# *only* that host has an updated value for .last_job.
|
||||
hostnames = [f'Host {i}' for i in range(10)]
|
||||
inv = Inventory()
|
||||
inv.save()
|
||||
Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames])
|
||||
j = Job(inventory=inv)
|
||||
j.save()
|
||||
|
||||
# host map is a data structure that tracks a mapping of host name --> ID
|
||||
# for the inventory, _regardless_ of whether or not there's a limit
|
||||
# applied to the actual playbook run
|
||||
host_map = dict((host.name, host.id) for host in inv.hosts.all())
|
||||
|
||||
# by making the playbook_on_stats *only* include Host 1, we're emulating
|
||||
# the behavior of a `--limit=Host 1`
|
||||
matching_host = Host.objects.get(name='Host 1')
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats',
|
||||
event_data={
|
||||
'ok': {matching_host.name: len(matching_host.name)}, # effectively, limit=Host 1
|
||||
'changed': {},
|
||||
'dark': {},
|
||||
'failures': {},
|
||||
'ignored': {},
|
||||
'processed': {},
|
||||
'rescued': {},
|
||||
'skipped': {},
|
||||
},
|
||||
host_map=host_map,
|
||||
).save()
|
||||
|
||||
# since the playbook_on_stats only references one host,
|
||||
# there should *only* be on JobHostSummary record (and it should
|
||||
# be related to the appropriate Host)
|
||||
assert JobHostSummary.objects.count() == 1
|
||||
for h in Host.objects.all():
|
||||
if h.name == 'Host 1':
|
||||
assert h.last_job_id == j.id
|
||||
assert h.last_job_host_summary_id == JobHostSummary.objects.first().id
|
||||
else:
|
||||
# all other hosts in the inventory should remain untouched
|
||||
assert h.last_job_id is None
|
||||
assert h.last_job_host_summary_id is None
|
||||
ok=None,
|
||||
changed=None,
|
||||
dark=None,
|
||||
failures=None,
|
||||
ignored=None,
|
||||
processed=None,
|
||||
rescued=None,
|
||||
skipped=None,
|
||||
):
|
||||
JobEvent.create_from_data(
|
||||
job_id=self.job.pk,
|
||||
parent_uuid=parent_uuid,
|
||||
event=event,
|
||||
event_data={
|
||||
'ok': ok or {},
|
||||
'changed': changed or {},
|
||||
'dark': dark or {},
|
||||
'failures': failures or {},
|
||||
'ignored': ignored or {},
|
||||
'processed': processed or {},
|
||||
'rescued': rescued or {},
|
||||
'skipped': skipped or {},
|
||||
},
|
||||
host_map=self.host_map,
|
||||
).save()
|
||||
|
||||
@ -20,3 +20,53 @@ def test_host_metrics_generation():
|
||||
date_today = now().strftime('%Y-%m-%d')
|
||||
result = HostMetric.objects.filter(first_automation__startswith=date_today).count()
|
||||
assert result == len(hostnames)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_soft_delete():
|
||||
hostnames = [f'Host to delete {i}' for i in range(2)]
|
||||
current_time = now()
|
||||
HostMetric.objects.bulk_create([HostMetric(hostname=h, last_automation=current_time, automated_counter=42) for h in hostnames])
|
||||
|
||||
hm = HostMetric.objects.get(hostname="Host to delete 0")
|
||||
assert hm.last_deleted is None
|
||||
|
||||
last_deleted = None
|
||||
for _ in range(3):
|
||||
# soft delete 1st
|
||||
# 2nd/3rd delete don't have an effect
|
||||
hm.soft_delete()
|
||||
if last_deleted is None:
|
||||
last_deleted = hm.last_deleted
|
||||
|
||||
assert hm.deleted is True
|
||||
assert hm.deleted_counter == 1
|
||||
assert hm.last_deleted == last_deleted
|
||||
assert hm.automated_counter == 42
|
||||
|
||||
# 2nd record is not touched
|
||||
hm = HostMetric.objects.get(hostname="Host to delete 1")
|
||||
assert hm.deleted is False
|
||||
assert hm.deleted_counter == 0
|
||||
assert hm.last_deleted is None
|
||||
assert hm.automated_counter == 42
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_soft_restore():
|
||||
current_time = now()
|
||||
HostMetric.objects.create(hostname="Host 1", last_automation=current_time, deleted=True)
|
||||
HostMetric.objects.create(hostname="Host 2", last_automation=current_time, deleted=True, last_deleted=current_time)
|
||||
HostMetric.objects.create(hostname="Host 3", last_automation=current_time, deleted=False, last_deleted=current_time)
|
||||
HostMetric.objects.all().update(automated_counter=42, deleted_counter=10)
|
||||
|
||||
# 1. deleted, last_deleted not null
|
||||
for hm in HostMetric.objects.all():
|
||||
for _ in range(3):
|
||||
hm.soft_restore()
|
||||
assert hm.deleted is False
|
||||
assert hm.automated_counter == 42 and hm.deleted_counter == 10
|
||||
if hm.hostname == "Host 1":
|
||||
assert hm.last_deleted is None
|
||||
else:
|
||||
assert hm.last_deleted == current_time
|
||||
|
||||
@ -169,7 +169,8 @@ class TestInventorySourceInjectors:
|
||||
CLOUD_PROVIDERS constant contains the same names as what are
|
||||
defined within the injectors
|
||||
"""
|
||||
assert set(CLOUD_PROVIDERS) == set(InventorySource.injectors.keys())
|
||||
# slight exception case for constructed, because it has a FQCN but is not a cloud source
|
||||
assert set(CLOUD_PROVIDERS) | set(['constructed']) == set(InventorySource.injectors.keys())
|
||||
|
||||
@pytest.mark.parametrize('source,filename', [('ec2', 'aws_ec2.yml'), ('openstack', 'openstack.yml'), ('gce', 'gcp_compute.yml')])
|
||||
def test_plugin_filenames(self, source, filename):
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.models import InstanceGroup
|
||||
from awx.main.models import InstanceGroup, Inventory
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
@ -38,6 +38,16 @@ def test_instance_group_ordering(source_model):
|
||||
assert source_model.instance_groups.through.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('source_model', ['job_template', 'inventory', 'organization'], indirect=True)
|
||||
def test_instance_group_bulk_add(source_model):
|
||||
groups = [InstanceGroup.objects.create(name='host-%d' % i) for i in range(5)]
|
||||
groups.reverse()
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
source_model.instance_groups.add(*groups)
|
||||
assert 'Ordered many-to-many fields do not support multiple objects' in str(err)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('source_model', ['job_template', 'inventory', 'organization'], indirect=True)
|
||||
def test_instance_group_middle_deletion(source_model):
|
||||
@ -66,3 +76,33 @@ def test_explicit_ordering(source_model):
|
||||
|
||||
assert [g.name for g in source_model.instance_groups.all()] == ['host-4', 'host-3', 'host-2', 'host-1', 'host-0']
|
||||
assert [g.name for g in source_model.instance_groups.order_by('name').all()] == ['host-0', 'host-1', 'host-2', 'host-3', 'host-4']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_input_inventories_ordering():
|
||||
constructed_inventory = Inventory.objects.create(name='my_constructed', kind='constructed')
|
||||
input_inventories = [Inventory.objects.create(name='inv-%d' % i) for i in range(5)]
|
||||
input_inventories.reverse()
|
||||
for inv in input_inventories:
|
||||
constructed_inventory.input_inventories.add(inv)
|
||||
|
||||
assert [g.name for g in constructed_inventory.input_inventories.all()] == ['inv-4', 'inv-3', 'inv-2', 'inv-1', 'inv-0']
|
||||
assert [(row.position, row.input_inventory.name) for row in constructed_inventory.input_inventories.through.objects.all()] == [
|
||||
(0, 'inv-4'),
|
||||
(1, 'inv-3'),
|
||||
(2, 'inv-2'),
|
||||
(3, 'inv-1'),
|
||||
(4, 'inv-0'),
|
||||
]
|
||||
|
||||
constructed_inventory.input_inventories.remove(input_inventories[0])
|
||||
assert [g.name for g in constructed_inventory.input_inventories.all()] == ['inv-3', 'inv-2', 'inv-1', 'inv-0']
|
||||
assert [(row.position, row.input_inventory.name) for row in constructed_inventory.input_inventories.through.objects.all()] == [
|
||||
(0, 'inv-3'),
|
||||
(1, 'inv-2'),
|
||||
(2, 'inv-1'),
|
||||
(3, 'inv-0'),
|
||||
]
|
||||
|
||||
constructed_inventory.input_inventories.clear()
|
||||
assert constructed_inventory.input_inventories.through.objects.count() == 0
|
||||
|
||||
@ -94,7 +94,8 @@ def test_instance_dup(org_admin, organization, project, instance_factory, instan
|
||||
|
||||
ig_all = instance_group_factory("all", instances=[i1, i2, i3])
|
||||
ig_dup = instance_group_factory("duplicates", instances=[i1])
|
||||
project.organization.instance_groups.add(ig_all, ig_dup)
|
||||
project.organization.instance_groups.add(ig_all)
|
||||
project.organization.instance_groups.add(ig_dup)
|
||||
actual_num_instances = Instance.objects.count()
|
||||
list_response = get(reverse('api:instance_list'), user=system_auditor)
|
||||
api_num_instances_auditor = list(list_response.data.items())[0][1]
|
||||
|
||||
@ -0,0 +1,61 @@
|
||||
import pytest
|
||||
from awx.main.models import Inventory
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_constructed_inventory_post(post, admin_user, organization):
|
||||
inv1 = Inventory.objects.create(name='dummy1', kind='constructed', organization=organization)
|
||||
inv2 = Inventory.objects.create(name='dummy2', kind='constructed', organization=organization)
|
||||
resp = post(
|
||||
url=reverse('api:inventory_input_inventories', kwargs={'pk': inv1.pk}),
|
||||
data={'id': inv2.pk},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_add_constructed_inventory_source(post, admin_user, constructed_inventory):
|
||||
resp = post(
|
||||
url=reverse('api:inventory_inventory_sources_list', kwargs={'pk': constructed_inventory.pk}),
|
||||
data={'name': 'dummy1', 'source': 'constructed'},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_add_constructed_inventory_host(post, admin_user, constructed_inventory):
|
||||
resp = post(
|
||||
url=reverse('api:inventory_hosts_list', kwargs={'pk': constructed_inventory.pk}),
|
||||
data={'name': 'dummy1'},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_add_constructed_inventory_group(post, admin_user, constructed_inventory):
|
||||
resp = post(
|
||||
reverse('api:inventory_groups_list', kwargs={'pk': constructed_inventory.pk}),
|
||||
data={'name': 'group-test'},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_edit_constructed_inventory_source(patch, admin_user, inventory_source_factory):
|
||||
inv_src = inventory_source_factory(name='dummy1', source='constructed')
|
||||
resp = patch(
|
||||
reverse('api:inventory_source_detail', kwargs={'pk': inv_src.pk}),
|
||||
data={'description': inv_src.name},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
@ -26,12 +26,12 @@ def test_python_and_js_licenses():
|
||||
return (is_gpl, is_lgpl)
|
||||
|
||||
def find_embedded_source_version(path, name):
|
||||
for entry in os.listdir(path):
|
||||
# Check variations of '-' and '_' in filenames due to python
|
||||
for fname in [name, name.replace('-', '_')]:
|
||||
if entry.startswith(fname) and entry.endswith('.tar.gz'):
|
||||
v = entry.split(name + '-')[1].split('.tar.gz')[0]
|
||||
return v
|
||||
files = os.listdir(path)
|
||||
tgz_files = [f for f in files if f.endswith('.tar.gz')]
|
||||
for tgz in tgz_files:
|
||||
pkg_name = tgz.split('-')[0].split('_')[0]
|
||||
if pkg_name == name:
|
||||
return tgz.split('-')[1].split('.tar.gz')[0]
|
||||
return None
|
||||
|
||||
list = {}
|
||||
|
||||
@ -50,6 +50,7 @@ class TestApiRootView:
|
||||
'activity_stream',
|
||||
'workflow_job_templates',
|
||||
'workflow_jobs',
|
||||
'analytics',
|
||||
]
|
||||
view = ApiVersionRootView()
|
||||
ret = view.get(mocker.MagicMock())
|
||||
|
||||
@ -6,37 +6,35 @@ import time
|
||||
import pytest
|
||||
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
Inventory,
|
||||
Host,
|
||||
)
|
||||
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
|
||||
|
||||
from django.utils.timezone import now
|
||||
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def hosts(inventory):
|
||||
def ref_time():
|
||||
return now() - timedelta(seconds=5)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def hosts(ref_time):
|
||||
inventory = Inventory(id=5)
|
||||
return [
|
||||
Host(name='host1', ansible_facts={"a": 1, "b": 2}, inventory=inventory),
|
||||
Host(name='host2', ansible_facts={"a": 1, "b": 2}, inventory=inventory),
|
||||
Host(name='host3', ansible_facts={"a": 1, "b": 2}, inventory=inventory),
|
||||
Host(name=u'Iñtërnâtiônàlizætiøn', ansible_facts={"a": 1, "b": 2}, inventory=inventory),
|
||||
Host(name='host1', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=ref_time, inventory=inventory),
|
||||
Host(name='host2', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=ref_time, inventory=inventory),
|
||||
Host(name='host3', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=ref_time, inventory=inventory),
|
||||
Host(name=u'Iñtërnâtiônàlizætiøn', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=ref_time, inventory=inventory),
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def inventory():
|
||||
return Inventory(id=5)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def job(mocker, hosts, inventory):
|
||||
j = Job(inventory=inventory, id=2)
|
||||
j._get_inventory_hosts = mocker.Mock(return_value=hosts)
|
||||
return j
|
||||
|
||||
|
||||
def test_start_job_fact_cache(hosts, job, inventory, tmpdir):
|
||||
def test_start_job_fact_cache(hosts, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
for host in hosts:
|
||||
filepath = os.path.join(fact_cache, host.name)
|
||||
@ -46,25 +44,43 @@ def test_start_job_fact_cache(hosts, job, inventory, tmpdir):
|
||||
assert os.path.getmtime(filepath) <= last_modified
|
||||
|
||||
|
||||
def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
|
||||
job._get_inventory_hosts = mocker.Mock(
|
||||
return_value=[
|
||||
Host(
|
||||
name='../foo',
|
||||
ansible_facts={"a": 1, "b": 2},
|
||||
),
|
||||
]
|
||||
)
|
||||
def test_fact_cache_with_invalid_path_traversal(tmpdir):
|
||||
hosts = [
|
||||
Host(
|
||||
name='../foo',
|
||||
ansible_facts={"a": 1, "b": 2},
|
||||
),
|
||||
]
|
||||
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
# a file called "foo" should _not_ be written outside the facts dir
|
||||
assert os.listdir(os.path.join(fact_cache, '..')) == ['facts']
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker, tmpdir):
|
||||
def test_start_job_fact_cache_past_timeout(hosts, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
# the hosts fixture was modified 5s ago, which is more than 2s
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=2)
|
||||
assert last_modified is None
|
||||
|
||||
for host in hosts:
|
||||
assert not os.path.exists(os.path.join(fact_cache, host.name))
|
||||
|
||||
|
||||
def test_start_job_fact_cache_within_timeout(hosts, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
# the hosts fixture was modified 5s ago, which is less than 7s
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=7)
|
||||
assert last_modified
|
||||
|
||||
for host in hosts:
|
||||
assert os.path.exists(os.path.join(fact_cache, host.name))
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_existing_data(hosts, mocker, tmpdir, ref_time):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
@ -80,18 +96,19 @@ def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker,
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
finish_fact_cache(hosts, fact_cache, last_modified)
|
||||
|
||||
for host in (hosts[0], hosts[2], hosts[3]):
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified is None
|
||||
assert host.ansible_facts_modified == ref_time
|
||||
assert hosts[1].ansible_facts == ansible_facts_new
|
||||
assert hosts[1].ansible_facts_modified > ref_time
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir):
|
||||
def test_finish_job_fact_cache_with_bad_data(hosts, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
@ -103,22 +120,23 @@ def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpd
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
finish_fact_cache(hosts, fact_cache, last_modified)
|
||||
|
||||
bulk_update.assert_not_called()
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_clear(job, hosts, inventory, mocker, tmpdir):
|
||||
def test_finish_job_fact_cache_clear(hosts, mocker, ref_time, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
os.remove(os.path.join(fact_cache, hosts[1].name))
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
finish_fact_cache(hosts, fact_cache, last_modified)
|
||||
|
||||
for host in (hosts[0], hosts[2], hosts[3]):
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified is None
|
||||
assert host.ansible_facts_modified == ref_time
|
||||
assert hosts[1].ansible_facts == {}
|
||||
assert hosts[1].ansible_facts_modified > ref_time
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
@ -6,6 +6,7 @@ import urllib.parse as urlparse
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.utils.reload import supervisor_service_command
|
||||
from awx.main.dispatch.publish import task
|
||||
|
||||
|
||||
def construct_rsyslog_conf_template(settings=settings):
|
||||
@ -114,6 +115,7 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
return tmpl
|
||||
|
||||
|
||||
@task(queue='rsyslog_configurer')
|
||||
def reconfigure_rsyslog():
|
||||
tmpl = construct_rsyslog_conf_template()
|
||||
# Write config to a temp file then move it to preserve atomicity
|
||||
|
||||
@ -35,6 +35,7 @@ from cryptography import x509
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
|
||||
|
||||
MAX_INSTANCES = 9999999
|
||||
|
||||
@ -382,8 +383,19 @@ class Licenser(object):
|
||||
|
||||
current_instances = Host.objects.active_count()
|
||||
license_date = int(attrs.get('license_date', 0) or 0)
|
||||
automated_instances = HostMetric.objects.count()
|
||||
first_host = HostMetric.objects.only('first_automation').order_by('first_automation').first()
|
||||
|
||||
subscription_model = getattr(settings, 'SUBSCRIPTION_USAGE_MODEL', '')
|
||||
if subscription_model == SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS:
|
||||
automated_instances = HostMetric.active_objects.count()
|
||||
first_host = HostMetric.active_objects.only('first_automation').order_by('first_automation').first()
|
||||
attrs['deleted_instances'] = HostMetric.objects.filter(deleted=True).count()
|
||||
attrs['reactivated_instances'] = HostMetric.active_objects.filter(deleted_counter__gte=1).count()
|
||||
else:
|
||||
automated_instances = 0
|
||||
first_host = HostMetric.objects.only('first_automation').order_by('first_automation').first()
|
||||
attrs['deleted_instances'] = 0
|
||||
attrs['reactivated_instances'] = 0
|
||||
|
||||
if first_host:
|
||||
automated_since = int(first_host.first_automation.timestamp())
|
||||
else:
|
||||
|
||||
@ -17,7 +17,7 @@ def supervisor_service_command(command, service='*', communicate=True):
|
||||
"""
|
||||
args = ['supervisorctl']
|
||||
|
||||
supervisor_config_path = os.getenv('SUPERVISOR_WEB_CONFIG_PATH', None)
|
||||
supervisor_config_path = os.getenv('SUPERVISOR_CONFIG_PATH', None)
|
||||
if supervisor_config_path:
|
||||
args.extend(['-c', supervisor_config_path])
|
||||
|
||||
|
||||
@ -1,208 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import asyncio
|
||||
|
||||
import aiohttp
|
||||
from aiohttp import client_exceptions
|
||||
from asgiref.sync import sync_to_async
|
||||
|
||||
from channels.layers import get_channel_layer
|
||||
|
||||
from django.conf import settings
|
||||
from django.apps import apps
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
BroadcastWebsocketStats,
|
||||
BroadcastWebsocketStatsManager,
|
||||
)
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
|
||||
|
||||
def wrap_broadcast_msg(group, message: str):
|
||||
# TODO: Maybe wrap as "group","message" so that we don't need to
|
||||
# encode/decode as json.
|
||||
return json.dumps(dict(group=group, message=message), cls=DjangoJSONEncoder)
|
||||
|
||||
|
||||
def unwrap_broadcast_msg(payload: dict):
|
||||
return (payload['group'], payload['message'])
|
||||
|
||||
|
||||
@sync_to_async
|
||||
def get_broadcast_hosts():
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
instances = (
|
||||
Instance.objects.exclude(hostname=Instance.objects.my_hostname())
|
||||
.exclude(node_type='execution')
|
||||
.exclude(node_type='hop')
|
||||
.order_by('hostname')
|
||||
.values('hostname', 'ip_address')
|
||||
.distinct()
|
||||
)
|
||||
return {i['hostname']: i['ip_address'] or i['hostname'] for i in instances}
|
||||
|
||||
|
||||
def get_local_host():
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
return Instance.objects.my_hostname()
|
||||
|
||||
|
||||
class WebsocketTask:
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
event_loop,
|
||||
stats: BroadcastWebsocketStats,
|
||||
remote_host: str,
|
||||
remote_port: int = settings.BROADCAST_WEBSOCKET_PORT,
|
||||
protocol: str = settings.BROADCAST_WEBSOCKET_PROTOCOL,
|
||||
verify_ssl: bool = settings.BROADCAST_WEBSOCKET_VERIFY_CERT,
|
||||
endpoint: str = 'broadcast',
|
||||
):
|
||||
self.name = name
|
||||
self.event_loop = event_loop
|
||||
self.stats = stats
|
||||
self.remote_host = remote_host
|
||||
self.remote_port = remote_port
|
||||
self.endpoint = endpoint
|
||||
self.protocol = protocol
|
||||
self.verify_ssl = verify_ssl
|
||||
self.channel_layer = None
|
||||
self.subsystem_metrics = s_metrics.Metrics(instance_name=name)
|
||||
|
||||
async def run_loop(self, websocket: aiohttp.ClientWebSocketResponse):
|
||||
raise RuntimeError("Implement me")
|
||||
|
||||
async def connect(self, attempt):
|
||||
from awx.main.consumers import WebsocketSecretAuthHelper # noqa
|
||||
|
||||
logger.debug(f"Connection from {self.name} to {self.remote_host} attempt number {attempt}.")
|
||||
|
||||
'''
|
||||
Can not put get_channel_layer() in the init code because it is in the init
|
||||
path of channel layers i.e. RedisChannelLayer() calls our init code.
|
||||
'''
|
||||
if not self.channel_layer:
|
||||
self.channel_layer = get_channel_layer()
|
||||
|
||||
try:
|
||||
if attempt > 0:
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_RECONNECT_RETRY_RATE_SECONDS)
|
||||
except asyncio.CancelledError:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} cancelled")
|
||||
raise
|
||||
|
||||
uri = f"{self.protocol}://{self.remote_host}:{self.remote_port}/websocket/{self.endpoint}/"
|
||||
timeout = aiohttp.ClientTimeout(total=10)
|
||||
|
||||
secret_val = WebsocketSecretAuthHelper.construct_secret()
|
||||
try:
|
||||
async with aiohttp.ClientSession(headers={'secret': secret_val}, timeout=timeout) as session:
|
||||
async with session.ws_connect(uri, ssl=self.verify_ssl, heartbeat=20) as websocket:
|
||||
logger.info(f"Connection from {self.name} to {self.remote_host} established.")
|
||||
self.stats.record_connection_established()
|
||||
attempt = 0
|
||||
await self.run_loop(websocket)
|
||||
except asyncio.CancelledError:
|
||||
# TODO: Check if connected and disconnect
|
||||
# Possibly use run_until_complete() if disconnect is async
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} cancelled.")
|
||||
self.stats.record_connection_lost()
|
||||
raise
|
||||
except client_exceptions.ClientConnectorError as e:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} failed: '{e}'.")
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} timed out.")
|
||||
except Exception as e:
|
||||
# Early on, this is our canary. I'm not sure what exceptions we can really encounter.
|
||||
logger.exception(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.")
|
||||
else:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} list.")
|
||||
|
||||
self.stats.record_connection_lost()
|
||||
self.start(attempt=attempt + 1)
|
||||
|
||||
def start(self, attempt=0):
|
||||
self.async_task = self.event_loop.create_task(self.connect(attempt=attempt))
|
||||
|
||||
def cancel(self):
|
||||
self.async_task.cancel()
|
||||
|
||||
|
||||
class BroadcastWebsocketTask(WebsocketTask):
|
||||
async def run_loop(self, websocket: aiohttp.ClientWebSocketResponse):
|
||||
async for msg in websocket:
|
||||
self.stats.record_message_received()
|
||||
|
||||
if msg.type == aiohttp.WSMsgType.ERROR:
|
||||
break
|
||||
elif msg.type == aiohttp.WSMsgType.TEXT:
|
||||
try:
|
||||
payload = json.loads(msg.data)
|
||||
except json.JSONDecodeError:
|
||||
logmsg = "Failed to decode broadcast message"
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logmsg = "{} {}".format(logmsg, payload)
|
||||
logger.warning(logmsg)
|
||||
continue
|
||||
(group, message) = unwrap_broadcast_msg(payload)
|
||||
if group == "metrics":
|
||||
self.subsystem_metrics.store_metrics(message)
|
||||
continue
|
||||
await self.channel_layer.group_send(group, {"type": "internal.message", "text": message})
|
||||
|
||||
|
||||
class BroadcastWebsocketManager(object):
|
||||
def __init__(self):
|
||||
self.event_loop = asyncio.get_event_loop()
|
||||
'''
|
||||
{
|
||||
'hostname1': BroadcastWebsocketTask(),
|
||||
'hostname2': BroadcastWebsocketTask(),
|
||||
'hostname3': BroadcastWebsocketTask(),
|
||||
}
|
||||
'''
|
||||
self.broadcast_tasks = dict()
|
||||
self.local_hostname = get_local_host()
|
||||
self.stats_mgr = BroadcastWebsocketStatsManager(self.event_loop, self.local_hostname)
|
||||
|
||||
async def run_per_host_websocket(self):
|
||||
while True:
|
||||
known_hosts = await get_broadcast_hosts()
|
||||
future_remote_hosts = known_hosts.keys()
|
||||
current_remote_hosts = self.broadcast_tasks.keys()
|
||||
deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts)
|
||||
new_remote_hosts = set(future_remote_hosts) - set(current_remote_hosts)
|
||||
|
||||
remote_addresses = {k: v.remote_host for k, v in self.broadcast_tasks.items()}
|
||||
for hostname, address in known_hosts.items():
|
||||
if hostname in self.broadcast_tasks and address != remote_addresses[hostname]:
|
||||
deleted_remote_hosts.add(hostname)
|
||||
new_remote_hosts.add(hostname)
|
||||
|
||||
if deleted_remote_hosts:
|
||||
logger.warning(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||
if new_remote_hosts:
|
||||
logger.warning(f"Adding {new_remote_hosts} to websocket broadcast list")
|
||||
|
||||
for h in deleted_remote_hosts:
|
||||
self.broadcast_tasks[h].cancel()
|
||||
del self.broadcast_tasks[h]
|
||||
self.stats_mgr.delete_remote_host_stats(h)
|
||||
|
||||
for h in new_remote_hosts:
|
||||
stats = self.stats_mgr.new_remote_host_stats(h)
|
||||
broadcast_task = BroadcastWebsocketTask(name=self.local_hostname, event_loop=self.event_loop, stats=stats, remote_host=known_hosts[h])
|
||||
broadcast_task.start()
|
||||
self.broadcast_tasks[h] = broadcast_task
|
||||
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS)
|
||||
|
||||
def start(self):
|
||||
self.stats_mgr.start()
|
||||
|
||||
self.async_task = self.event_loop.create_task(self.run_per_host_websocket())
|
||||
return self.async_task
|
||||
305
awx/main/wsrelay.py
Normal file
@ -0,0 +1,305 @@
|
||||
import json
|
||||
import logging
|
||||
import asyncio
|
||||
from typing import Dict
|
||||
|
||||
import aiohttp
|
||||
from aiohttp import client_exceptions
|
||||
|
||||
from channels.layers import get_channel_layer
|
||||
|
||||
from django.conf import settings
|
||||
from django.apps import apps
|
||||
|
||||
import psycopg
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
RelayWebsocketStats,
|
||||
RelayWebsocketStatsManager,
|
||||
)
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
|
||||
logger = logging.getLogger('awx.main.wsrelay')
|
||||
|
||||
|
||||
def wrap_broadcast_msg(group, message: str):
|
||||
# TODO: Maybe wrap as "group","message" so that we don't need to
|
||||
# encode/decode as json.
|
||||
return dict(group=group, message=message)
|
||||
|
||||
|
||||
def get_local_host():
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
return Instance.objects.my_hostname()
|
||||
|
||||
|
||||
class WebsocketRelayConnection:
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
stats: RelayWebsocketStats,
|
||||
remote_host: str,
|
||||
remote_port: int = settings.BROADCAST_WEBSOCKET_PORT,
|
||||
protocol: str = settings.BROADCAST_WEBSOCKET_PROTOCOL,
|
||||
verify_ssl: bool = settings.BROADCAST_WEBSOCKET_VERIFY_CERT,
|
||||
):
|
||||
self.name = name
|
||||
self.event_loop = asyncio.get_event_loop()
|
||||
self.stats = stats
|
||||
self.remote_host = remote_host
|
||||
self.remote_port = remote_port
|
||||
self.protocol = protocol
|
||||
self.verify_ssl = verify_ssl
|
||||
self.channel_layer = None
|
||||
self.subsystem_metrics = s_metrics.Metrics(instance_name=name)
|
||||
self.producers = dict()
|
||||
self.connected = False
|
||||
|
||||
async def run_loop(self, websocket: aiohttp.ClientWebSocketResponse):
|
||||
raise RuntimeError("Implement me")
|
||||
|
||||
async def connect(self):
|
||||
from awx.main.consumers import WebsocketSecretAuthHelper # noqa
|
||||
|
||||
logger.debug(f"Connection attempt from {self.name} to {self.remote_host}")
|
||||
|
||||
'''
|
||||
Can not put get_channel_layer() in the init code because it is in the init
|
||||
path of channel layers i.e. RedisChannelLayer() calls our init code.
|
||||
'''
|
||||
if not self.channel_layer:
|
||||
self.channel_layer = get_channel_layer()
|
||||
|
||||
uri = f"{self.protocol}://{self.remote_host}:{self.remote_port}/websocket/relay/"
|
||||
timeout = aiohttp.ClientTimeout(total=10)
|
||||
|
||||
secret_val = WebsocketSecretAuthHelper.construct_secret()
|
||||
try:
|
||||
async with aiohttp.ClientSession(headers={'secret': secret_val}, timeout=timeout) as session:
|
||||
async with session.ws_connect(uri, ssl=self.verify_ssl, heartbeat=20) as websocket:
|
||||
logger.info(f"Connection from {self.name} to {self.remote_host} established.")
|
||||
self.stats.record_connection_established()
|
||||
self.connected = True
|
||||
await self.run_connection(websocket)
|
||||
except asyncio.CancelledError:
|
||||
# TODO: Check if connected and disconnect
|
||||
# Possibly use run_until_complete() if disconnect is async
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} cancelled.")
|
||||
except client_exceptions.ClientConnectorError as e:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} failed: '{e}'.", exc_info=True)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} timed out.")
|
||||
except Exception as e:
|
||||
# Early on, this is our canary. I'm not sure what exceptions we can really encounter.
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.", exc_info=True)
|
||||
else:
|
||||
logger.debug(f"Connection from {self.name} to {self.remote_host} lost, but no exception was raised.")
|
||||
finally:
|
||||
self.connected = False
|
||||
self.stats.record_connection_lost()
|
||||
|
||||
def start(self):
|
||||
self.async_task = self.event_loop.create_task(self.connect())
|
||||
return self.async_task
|
||||
|
||||
def cancel(self):
|
||||
self.async_task.cancel()
|
||||
|
||||
async def run_connection(self, websocket: aiohttp.ClientWebSocketResponse):
|
||||
# create a dedicated subsystem metric producer to handle local subsystem
|
||||
# metrics messages
|
||||
# the "metrics" group is not subscribed to in the typical fashion, so we
|
||||
# just explicitly create it
|
||||
producer = self.event_loop.create_task(self.run_producer("metrics", websocket, "metrics"))
|
||||
self.producers["metrics"] = {"task": producer, "subscriptions": {"metrics"}}
|
||||
async for msg in websocket:
|
||||
self.stats.record_message_received()
|
||||
|
||||
if msg.type == aiohttp.WSMsgType.ERROR:
|
||||
break
|
||||
elif msg.type == aiohttp.WSMsgType.TEXT:
|
||||
try:
|
||||
payload = json.loads(msg.data)
|
||||
except json.JSONDecodeError:
|
||||
logmsg = "Failed to decode message from web node"
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logmsg = "{} {}".format(logmsg, payload)
|
||||
logger.warning(logmsg)
|
||||
continue
|
||||
|
||||
if payload.get("type") == "consumer.subscribe":
|
||||
for group in payload['groups']:
|
||||
name = f"{self.remote_host}-{group}"
|
||||
origin_channel = payload['origin_channel']
|
||||
if not self.producers.get(name):
|
||||
producer = self.event_loop.create_task(self.run_producer(name, websocket, group))
|
||||
self.producers[name] = {"task": producer, "subscriptions": {origin_channel}}
|
||||
logger.debug(f"Producer {name} started.")
|
||||
else:
|
||||
self.producers[name]["subscriptions"].add(origin_channel)
|
||||
logger.debug(f"Connection from {self.name} to {self.remote_host} added subscription to {group}.")
|
||||
|
||||
if payload.get("type") == "consumer.unsubscribe":
|
||||
for group in payload['groups']:
|
||||
name = f"{self.remote_host}-{group}"
|
||||
origin_channel = payload['origin_channel']
|
||||
try:
|
||||
self.producers[name]["subscriptions"].remove(origin_channel)
|
||||
logger.debug(f"Unsubscribed {origin_channel} from {name}")
|
||||
except KeyError:
|
||||
logger.warning(f"Producer {name} not found.")
|
||||
|
||||
async def run_producer(self, name, websocket, group):
|
||||
try:
|
||||
logger.info(f"Starting producer for {name}")
|
||||
|
||||
consumer_channel = await self.channel_layer.new_channel()
|
||||
await self.channel_layer.group_add(group, consumer_channel)
|
||||
logger.debug(f"Producer {name} added to group {group} and is now awaiting messages.")
|
||||
|
||||
while True:
|
||||
try:
|
||||
msg = await asyncio.wait_for(self.channel_layer.receive(consumer_channel), timeout=10)
|
||||
if not msg.get("needs_relay"):
|
||||
# This is added in by emit_channel_notification(). It prevents us from looping
|
||||
# in the event that we are sharing a redis with a web instance. We'll see the
|
||||
# message once (it'll have needs_relay=True), we'll delete that, and then forward
|
||||
# the message along. The web instance will add it back to the same channels group,
|
||||
# but it won't have needs_relay=True, so we'll ignore it.
|
||||
continue
|
||||
|
||||
# We need to copy the message because we're going to delete the needs_relay key
|
||||
# and we don't want to modify the original message because other producers may
|
||||
# still need to act on it. It seems weird, but it's necessary.
|
||||
msg = dict(msg)
|
||||
del msg["needs_relay"]
|
||||
except asyncio.TimeoutError:
|
||||
current_subscriptions = self.producers[name]["subscriptions"]
|
||||
if len(current_subscriptions) == 0:
|
||||
logger.info(f"Producer {name} has no subscribers, shutting down.")
|
||||
return
|
||||
|
||||
continue
|
||||
|
||||
await websocket.send_json(wrap_broadcast_msg(group, msg))
|
||||
except ConnectionResetError:
|
||||
# This can be hit when a web node is scaling down and we try to write to it.
|
||||
# There's really nothing to do in this case and it's a fairly typical thing to happen.
|
||||
# We'll log it as debug, but it's not really a problem.
|
||||
logger.debug(f"Producer {name} connection reset.")
|
||||
pass
|
||||
except Exception:
|
||||
# Note, this is very intentional and important since we do not otherwise
|
||||
# ever check the result of this future. Without this line you will not see an error if
|
||||
# something goes wrong in here.
|
||||
logger.exception(f"Event relay producer {name} crashed")
|
||||
finally:
|
||||
await self.channel_layer.group_discard(group, consumer_channel)
|
||||
del self.producers[name]
|
||||
|
||||
|
||||
class WebSocketRelayManager(object):
|
||||
def __init__(self):
|
||||
self.local_hostname = get_local_host()
|
||||
self.relay_connections = dict()
|
||||
# hostname -> ip
|
||||
self.known_hosts: Dict[str, str] = dict()
|
||||
|
||||
async def pg_consumer(self, conn):
|
||||
try:
|
||||
await conn.execute("LISTEN web_heartbeet")
|
||||
async for notif in conn.notifies():
|
||||
if notif is not None and notif.channel == "web_heartbeet":
|
||||
try:
|
||||
payload = json.loads(notif.payload)
|
||||
except json.JSONDecodeError:
|
||||
logmsg = "Failed to decode message from pg_notify channel `web_heartbeet`"
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logmsg = "{} {}".format(logmsg, payload)
|
||||
logger.warning(logmsg)
|
||||
continue
|
||||
|
||||
# Skip if the message comes from the same host we are running on
|
||||
# In this case, we'll be sharing a redis, no need to relay.
|
||||
if payload.get("hostname") == self.local_hostname:
|
||||
continue
|
||||
|
||||
if payload.get("action") == "online":
|
||||
hostname = payload["hostname"]
|
||||
ip = payload["ip"]
|
||||
if ip is None:
|
||||
# If we don't get an IP, just try the hostname, maybe it resolves
|
||||
ip = hostname
|
||||
self.known_hosts[hostname] = ip
|
||||
logger.debug(f"Web host {hostname} ({ip}) online heartbeat received.")
|
||||
elif payload.get("action") == "offline":
|
||||
hostname = payload["hostname"]
|
||||
del self.known_hosts[hostname]
|
||||
logger.debug(f"Web host {hostname} ({ip}) offline heartbeat received.")
|
||||
except Exception as e:
|
||||
# This catch-all is the same as the one above. asyncio will eat the exception
|
||||
# but we want to know about it.
|
||||
logger.exception(f"pg_consumer exception: {e}")
|
||||
|
||||
async def run(self):
|
||||
event_loop = asyncio.get_running_loop()
|
||||
|
||||
stats_mgr = RelayWebsocketStatsManager(event_loop, self.local_hostname)
|
||||
stats_mgr.start()
|
||||
|
||||
# Set up a pg_notify consumer for allowing web nodes to "provision" and "deprovision" themselves gracefully.
|
||||
database_conf = settings.DATABASES['default']
|
||||
async_conn = await psycopg.AsyncConnection.connect(
|
||||
dbname=database_conf['NAME'],
|
||||
host=database_conf['HOST'],
|
||||
user=database_conf['USER'],
|
||||
password=database_conf['PASSWORD'],
|
||||
port=database_conf['PORT'],
|
||||
**database_conf.get("OPTIONS", {}),
|
||||
)
|
||||
await async_conn.set_autocommit(True)
|
||||
event_loop.create_task(self.pg_consumer(async_conn))
|
||||
|
||||
# Establishes a websocket connection to /websocket/relay on all API servers
|
||||
while True:
|
||||
# logger.info("Current known hosts: {}".format(self.known_hosts))
|
||||
future_remote_hosts = self.known_hosts.keys()
|
||||
current_remote_hosts = self.relay_connections.keys()
|
||||
deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts)
|
||||
new_remote_hosts = set(future_remote_hosts) - set(current_remote_hosts)
|
||||
|
||||
# This loop handles if we get an advertisement from a host we already know about but
|
||||
# the advertisement has a different IP than we are currently connected to.
|
||||
for hostname, address in self.known_hosts.items():
|
||||
if hostname not in self.relay_connections:
|
||||
# We've picked up a new hostname that we don't know about yet.
|
||||
continue
|
||||
|
||||
if address != self.relay_connections[hostname].remote_host:
|
||||
deleted_remote_hosts.add(hostname)
|
||||
new_remote_hosts.add(hostname)
|
||||
|
||||
# Delete any hosts with closed connections
|
||||
for hostname, relay_conn in self.relay_connections.items():
|
||||
if not relay_conn.connected:
|
||||
deleted_remote_hosts.add(hostname)
|
||||
|
||||
if deleted_remote_hosts:
|
||||
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||
|
||||
if new_remote_hosts:
|
||||
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")
|
||||
|
||||
for h in deleted_remote_hosts:
|
||||
self.relay_connections[h].cancel()
|
||||
del self.relay_connections[h]
|
||||
del self.known_hosts[h]
|
||||
stats_mgr.delete_remote_host_stats(h)
|
||||
|
||||
for h in new_remote_hosts:
|
||||
stats = stats_mgr.new_remote_host_stats(h)
|
||||
relay_connection = WebsocketRelayConnection(name=self.local_hostname, stats=stats, remote_host=self.known_hosts[h])
|
||||
relay_connection.start()
|
||||
self.relay_connections[h] = relay_connection
|
||||
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS)
|
||||
@ -85,7 +85,11 @@ USE_L10N = True
|
||||
|
||||
USE_TZ = True
|
||||
|
||||
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'ui', 'build', 'static'), os.path.join(BASE_DIR, 'static')]
|
||||
STATICFILES_DIRS = [
|
||||
os.path.join(BASE_DIR, 'ui', 'build', 'static'),
|
||||
os.path.join(BASE_DIR, 'ui_next', 'build'),
|
||||
os.path.join(BASE_DIR, 'static'),
|
||||
]
|
||||
|
||||
# Absolute filesystem path to the directory where static file are collected via
|
||||
# the collectstatic command.
|
||||
@ -224,6 +228,9 @@ JOB_EVENT_MAX_QUEUE_SIZE = 10000
|
||||
# The number of job events to migrate per-transaction when moving from int -> bigint
|
||||
JOB_EVENT_MIGRATION_CHUNK_SIZE = 1000000
|
||||
|
||||
# The prefix of the redis key that stores metrics
|
||||
SUBSYSTEM_METRICS_REDIS_KEY_PREFIX = "awx_metrics"
|
||||
|
||||
# Histogram buckets for the callback_receiver_batch_events_insert_db metric
|
||||
SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS = [10, 50, 150, 350, 650, 2000]
|
||||
|
||||
@ -299,7 +306,12 @@ TEMPLATES = [
|
||||
],
|
||||
'builtins': ['awx.main.templatetags.swagger'],
|
||||
},
|
||||
'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR, 'ui', 'build'), os.path.join(BASE_DIR, 'ui', 'public')],
|
||||
'DIRS': [
|
||||
os.path.join(BASE_DIR, 'templates'),
|
||||
os.path.join(BASE_DIR, 'ui', 'build'),
|
||||
os.path.join(BASE_DIR, 'ui', 'public'),
|
||||
os.path.join(BASE_DIR, 'ui_next', 'build', 'awx'),
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
@ -463,6 +475,7 @@ CELERYBEAT_SCHEDULE = {
|
||||
'receptor_reaper': {'task': 'awx.main.tasks.system.awx_receptor_workunit_reaper', 'schedule': timedelta(seconds=60)},
|
||||
'send_subsystem_metrics': {'task': 'awx.main.analytics.analytics_tasks.send_subsystem_metrics', 'schedule': timedelta(seconds=20)},
|
||||
'cleanup_images': {'task': 'awx.main.tasks.system.cleanup_images_and_files', 'schedule': timedelta(hours=3)},
|
||||
'cleanup_host_metrics': {'task': 'awx.main.tasks.system.cleanup_host_metrics', 'schedule': timedelta(days=1)},
|
||||
}
|
||||
|
||||
# Django Caching Configuration
|
||||
@ -751,6 +764,13 @@ CUSTOM_EXCLUDE_EMPTY_GROUPS = False
|
||||
SCM_EXCLUDE_EMPTY_GROUPS = False
|
||||
# SCM_INSTANCE_ID_VAR =
|
||||
|
||||
# ----------------
|
||||
# -- Constructed --
|
||||
# ----------------
|
||||
CONSTRUCTED_INSTANCE_ID_VAR = 'remote_tower_id'
|
||||
|
||||
CONSTRUCTED_EXCLUDE_EMPTY_GROUPS = False
|
||||
|
||||
# ---------------------
|
||||
# -- Activity Stream --
|
||||
# ---------------------
|
||||
@ -773,6 +793,7 @@ INSIGHTS_URL_BASE = "https://example.org"
|
||||
INSIGHTS_AGENT_MIME = 'application/example'
|
||||
# See https://github.com/ansible/awx-facts-playbooks
|
||||
INSIGHTS_SYSTEM_ID_FILE = '/etc/redhat-access-insights/machine-id'
|
||||
INSIGHTS_CERT_PATH = "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem"
|
||||
|
||||
TOWER_SETTINGS_MANIFEST = {}
|
||||
|
||||
@ -850,7 +871,10 @@ LOGGING = {
|
||||
'awx.main.commands.run_callback_receiver': {'handlers': ['callback_receiver']}, # level handled by dynamic_level_filter
|
||||
'awx.main.dispatch': {'handlers': ['dispatcher']},
|
||||
'awx.main.consumers': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'INFO'},
|
||||
'awx.main.wsbroadcast': {'handlers': ['wsbroadcast']},
|
||||
'awx.main.rsyslog_configurer': {'handlers': ['rsyslog_configurer']},
|
||||
'awx.main.cache_clear': {'handlers': ['cache_clear']},
|
||||
'awx.main.heartbeet': {'handlers': ['heartbeet']},
|
||||
'awx.main.wsrelay': {'handlers': ['wsrelay']},
|
||||
'awx.main.commands.inventory_import': {'handlers': ['inventory_import'], 'propagate': False},
|
||||
'awx.main.tasks': {'handlers': ['task_system', 'external_logger'], 'propagate': False},
|
||||
'awx.main.analytics': {'handlers': ['task_system', 'external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
@ -859,7 +883,7 @@ LOGGING = {
|
||||
'awx.main.signals': {'level': 'INFO'}, # very verbose debug-level logs
|
||||
'awx.api.permissions': {'level': 'INFO'}, # very verbose debug-level logs
|
||||
'awx.analytics': {'handlers': ['external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
'awx.analytics.broadcast_websocket': {'handlers': ['console', 'file', 'wsbroadcast', 'external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
'awx.analytics.broadcast_websocket': {'handlers': ['console', 'file', 'wsrelay', 'external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
'awx.analytics.performance': {'handlers': ['console', 'file', 'tower_warnings', 'external_logger'], 'level': 'DEBUG', 'propagate': False},
|
||||
'awx.analytics.job_lifecycle': {'handlers': ['console', 'job_lifecycle'], 'level': 'DEBUG', 'propagate': False},
|
||||
'django_auth_ldap': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG'},
|
||||
@ -877,10 +901,13 @@ handler_config = {
|
||||
'tower_warnings': {'filename': 'tower.log'},
|
||||
'callback_receiver': {'filename': 'callback_receiver.log'},
|
||||
'dispatcher': {'filename': 'dispatcher.log', 'formatter': 'dispatcher'},
|
||||
'wsbroadcast': {'filename': 'wsbroadcast.log'},
|
||||
'wsrelay': {'filename': 'wsrelay.log'},
|
||||
'task_system': {'filename': 'task_system.log'},
|
||||
'rbac_migrations': {'filename': 'tower_rbac_migrations.log'},
|
||||
'job_lifecycle': {'filename': 'job_lifecycle.log', 'formatter': 'job_lifecycle'},
|
||||
'rsyslog_configurer': {'filename': 'rsyslog_configurer.log'},
|
||||
'cache_clear': {'filename': 'cache_clear.log'},
|
||||
'heartbeet': {'filename': 'heartbeet.log'},
|
||||
}
|
||||
|
||||
# If running on a VM, we log to files. When running in a container, we log to stdout.
|
||||
@ -991,6 +1018,9 @@ BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS = 10
|
||||
# How often websocket process will generate stats
|
||||
BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS = 5
|
||||
|
||||
# How often should web instances advertise themselves?
|
||||
BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS = 15
|
||||
|
||||
DJANGO_GUID = {'GUID_HEADER_NAME': 'X-API-Request-Id'}
|
||||
|
||||
# Name of the default task queue
|
||||
@ -1017,3 +1047,17 @@ AWX_MOUNT_ISOLATED_PATHS_ON_K8S = False
|
||||
|
||||
# This is overridden downstream via /etc/tower/conf.d/cluster_host_id.py
|
||||
CLUSTER_HOST_ID = socket.gethostname()
|
||||
|
||||
UI_NEXT = True
|
||||
|
||||
# License compliance for total host count. Possible values:
|
||||
# - '': No model - Subscription not counted from Host Metrics
|
||||
# - 'unique_managed_hosts': Compliant = automated - deleted hosts (using /api/v2/host_metrics/)
|
||||
SUBSCRIPTION_USAGE_MODEL = ''
|
||||
|
||||
# Host metrics cleanup - last time of the cleanup run (soft-deleting records)
|
||||
CLEANUP_HOST_METRICS_LAST_TS = None
|
||||
# Host metrics cleanup - minimal interval between two cleanups in days
|
||||
CLEANUP_HOST_METRICS_INTERVAL = 30 # days
|
||||
# Host metrics cleanup - soft-delete HostMetric records with last_automation < [threshold] (in months)
|
||||
CLEANUP_HOST_METRICS_THRESHOLD = 12 # months
|
||||
|
||||
@ -148,6 +148,16 @@ register(
|
||||
placeholder=['username', 'email'],
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Use Email address for usernames'),
|
||||
help_text=_('Enabling this setting will tell social auth to use the full Email as username instead of the full name'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
|
||||
###############################################################################
|
||||
# LDAP AUTHENTICATION SETTINGS
|
||||
###############################################################################
|
||||
|
||||
@ -84,6 +84,7 @@
|
||||
"displayKey",
|
||||
"sortedColumnKey",
|
||||
"maxHeight",
|
||||
"maxWidth",
|
||||
"role",
|
||||
"aria-haspopup",
|
||||
"dropDirection",
|
||||
@ -97,7 +98,8 @@
|
||||
"data-cy",
|
||||
"fieldName",
|
||||
"splitButtonVariant",
|
||||
"pageKey"
|
||||
"pageKey",
|
||||
"textId"
|
||||
],
|
||||
"ignore": [
|
||||
"Ansible",
|
||||
|
||||
BIN
awx/ui/public/static/media/192.png
Normal file
|
After Width: | Height: | Size: 19 KiB |
BIN
awx/ui/public/static/media/256.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
awx/ui/public/static/media/384.png
Normal file
|
After Width: | Height: | Size: 56 KiB |
BIN
awx/ui/public/static/media/512.png
Normal file
|
After Width: | Height: | Size: 87 KiB |
BIN
awx/ui/public/static/media/brand-logo.png
Normal file
|
After Width: | Height: | Size: 7.3 KiB |
232
awx/ui/public/static/media/brand-logo.svg
Normal file
@ -0,0 +1,232 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 21.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 500 500" style="enable-background:new 0 0 500 500;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{display:none;}
|
||||
.st1{display:inline;fill:#ED1C24;}
|
||||
.st2{fill:#42210B;}
|
||||
.st3{fill:#FFFFFF;}
|
||||
.st4{fill:#C69C6D;stroke:#8C6239;stroke-width:5;stroke-miterlimit:10;}
|
||||
.st5{fill:#FFFFFF;stroke:#42210B;stroke-width:3;stroke-miterlimit:10;}
|
||||
.st6{fill:#ED1C24;stroke:#8C6239;stroke-width:5;stroke-miterlimit:10;}
|
||||
.st7{fill:#A67C52;}
|
||||
.st8{fill:#ED1C24;}
|
||||
</style>
|
||||
<g class="st0">
|
||||
<path class="st1" d="M319.8,169.3c1.5-14.2,13.7-27.2,29.9-31.9c-13.1,1.5-27.3-1.7-36-10c-8.7-8.3-10-21.9-1.4-30.1
|
||||
c-12,6.7-28.1,8.1-41.4,3.4c-13.3-4.6-23.5-15.1-26.2-26.9c-2-8.8,0-17.9,2-26.7c-6.2,9.4-17.6,17.3-30.5,17.3
|
||||
c-12.9,0.1-25.7-10.2-22.9-20.7c-5.5,7.8-11.4,15.9-21,20.2c-9.5,4.3-23.7,2.7-28.2-5.5c-1.6,10.8-7.5,22-19.1,27
|
||||
c-9,3.9-21.5,2.2-28-3.8c5.7,11.4,4.3,25.3-4.1,35.6c-9.9,12.2-29.1,18.6-46.4,15.6c14.7,7.2,28.5,17.7,32.1,31.5
|
||||
c3.7,13.8-7.1,30.7-24.1,31.7c13.6,3.1,28,7.4,35.6,17.2c7.6,9.8,2.9,26.4-11.1,28c12.8-2.6,27.4,3.9,31.9,14.2
|
||||
c4.1,9.5-0.9,20.9-10.9,26.5c18.6-8.9,41-17.1,59.6-8.8c13.9,6.2,20.8,21.6,15.1,33.8c10.4-10.6,23-21.3,39.2-23.5
|
||||
c12.8-1.8,27.5,4.6,31.9,14.1c-0.3-12.7,6.1-25.5,17.5-34c13.8-10.3,34.4-14,52-9.2c-11.1-7.8-14.9-22-8.9-33
|
||||
c6-11,21.3-18,35.7-16.2C327.5,198.1,318.3,183.5,319.8,169.3z"/>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st2" d="M179.7,297.3c-10.1,3.2-20.3,6-30.6,8.4c-10.7,2.5-21.7,5-32.8,5.1C96,311.1,79.9,297.2,60,296.1
|
||||
c-5.8-0.3-5.8,8.7,0,9c9.9,0.5,18.9,5.1,27.9,8.8c9.8,4,19.6,6.3,30.2,5.9c21.5-0.8,43.5-7.4,64-13.8
|
||||
C187.6,304.3,185.2,295.6,179.7,297.3L179.7,297.3z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st2" d="M322.2,194.8c17.9-8,36-18.5,44.3-37.2c4.2-9.3,6-19.2,7.2-29.3c1.5-11.7,2.5-23.4,3.7-35.2
|
||||
c0.6-5.8-8.4-5.7-9,0c-1.1,10.3-2.1,20.6-3.3,30.9c-1.1,9.7-2.5,19.7-6.4,28.7c-7.5,17.5-24.6,26.8-41.2,34.2
|
||||
C312.4,189.4,316.9,197.2,322.2,194.8L322.2,194.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
|
||||
<ellipse transform="matrix(0.5541 -0.8324 0.8324 0.5541 -219.4917 376.0051)" class="st2" cx="241.2" cy="392.9" rx="65.5" ry="33.7"/>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M224.1,442.5c22-11.5,38.7-31,47.1-54.3c2-5.5-6.7-7.8-8.7-2.4c-7.6,21.1-23.1,38.5-43,48.9
|
||||
C214.4,437.4,218.9,445.1,224.1,442.5L224.1,442.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
|
||||
<ellipse transform="matrix(0.9684 -0.2494 0.2494 0.9684 -66.4734 109.0276)" class="st2" cx="397" cy="316.8" rx="63.9" ry="32.9"/>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M363.8,341.5c28.3,7,58.7-0.8,80.2-20.5c4.3-3.9-2.1-10.3-6.4-6.4c-19.1,17.5-46.4,24.4-71.5,18.2
|
||||
C360.5,331.5,358.1,340.1,363.8,341.5L363.8,341.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<path class="st4" d="M156.9,96c-25.4,4.5-32.9,20.2-45,46.9c-20.2,44.4,2,90.3,5.6,97.5c18.4,36.5,42.3,36.8,60,80.6
|
||||
c8.6,21.2,4.6,25.2,13.1,37.5c20.4,29.2,63.7,36.1,91.9,33.8c40.3-3.3,91.5-28.8,108.8-82.5c17.1-53.2-6-112.1-41.2-131.2
|
||||
c-25.3-13.7-44.9-0.5-71.2-20.6c-21.6-16.5-18.4-33.1-37.5-48.8C227.9,98.1,203.7,87.7,156.9,96z"/>
|
||||
<ellipse transform="matrix(0.6622 -0.7494 0.7494 0.6622 65.2068 309.6339)" class="st2" cx="376" cy="82.5" rx="21" ry="15.5"/>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M379.8,75.3c0.8,0.2-0.6-0.4-0.1-0.1c0.2,0.1,0.3,0.2,0.5,0.3c0.4,0.2-0.6-0.7-0.1-0.1
|
||||
c0.1,0.1,0.7,0.8,0.2,0.2c-0.4-0.5,0,0,0.1,0.1c0.4,0.7,0,0.2,0-0.2c0,0.1,0.1,0.4,0.2,0.5c0.3,0.9-0.1-1,0-0.1
|
||||
c0.1,2.3,2,4.6,4.5,4.5c2.3-0.1,4.6-2,4.5-4.5c-0.3-4.4-3-8.1-7.3-9.4c-2.2-0.7-5,0.8-5.5,3.1C376.1,72.2,377.4,74.5,379.8,75.3
|
||||
L379.8,75.3z"/>
|
||||
</g>
|
||||
</g>
|
||||
|
||||
<ellipse transform="matrix(0.9999 -1.433736e-02 1.433736e-02 0.9999 -4.303 0.8051)" class="st2" cx="54" cy="300.5" rx="21" ry="15.5"/>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M52.2,297.5c1.1-0.3,1.4-0.4,2.5,0c0.8,0.3,1.3,0.7,2,1.7c1.5,1.9,4.8,1.6,6.4,0c1.9-1.9,1.5-4.4,0-6.4
|
||||
c-3.1-3.9-8.6-5.4-13.3-4C44.3,290.5,46.7,299.2,52.2,297.5L52.2,297.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st2" d="M149.3,108.8c4.9-10.8-1.3-24.2-12.9-26.9c-1.9-0.4-2.7,2.4-0.8,2.9c9.6,2.3,15.3,13.5,11.2,22.5
|
||||
C145.9,109,148.5,110.5,149.3,108.8L149.3,108.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st2" d="M141.2,112.3c2.4-9.4-5.4-19.3-15.2-19c-1.9,0.1-1.9,3.1,0,3c7.8-0.2,14.2,7.6,12.3,15.2
|
||||
C137.8,113.4,140.7,114.2,141.2,112.3L141.2,112.3z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st2" d="M132.6,118c-1.1-8.3-10.9-13.4-18.2-9.1c-1.7,1-0.2,3.6,1.5,2.6c5.2-3,12.9,0.4,13.7,6.5
|
||||
C129.8,119.9,132.8,119.9,132.6,118L132.6,118z"/>
|
||||
</g>
|
||||
</g>
|
||||
<path class="st5" d="M215.5,166.5l34-73c0,0,35,0,46,21c7.5,14.3,8,39,8,39L215.5,166.5z"/>
|
||||
<path class="st5" d="M208.2,170.5l-79.5-12.7c0,0-19.6,29-8.4,49.9c7.6,14.2,27.8,28.5,27.8,28.5L208.2,170.5z"/>
|
||||
<path class="st2" d="M210.5,164.5l33-74c0,0-2.5-5.5-8-7s-12,0-12,0L210.5,164.5z"/>
|
||||
<path class="st2" d="M207.4,165.3l-73.1-35c0,0-5.6,2.4-7.2,7.8c-1.6,5.5-0.3,12-0.3,12L207.4,165.3z"/>
|
||||
<path d="M215.5,166.5L234,127c0,0,17-6,25.5,7.5c8.6,13.6-3.5,25.5-3.5,25.5L215.5,166.5z"/>
|
||||
<path d="M206.7,170.9l-29.6,32c0,0-18,0.5-22-14.9c-4-15.6,11.1-23.2,11.1-23.2L206.7,170.9z"/>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M243.4,139.1c-0.6,0.2-0.7,0.3-0.4,0.2c0.3-0.1,0.2-0.1-0.5,0.1c0.7,0-0.3,0-0.4-0.1c0.1,0,0.3,0.1,0.4,0.1
|
||||
c0.3,0.1,0.2,0-0.4-0.2c0,0,0.6,0.3,0.6,0.3c0.5,0.2-0.9-0.6-0.1-0.1c0.6,0.4-0.3-0.5-0.1-0.1c0.3,0.5-0.3-1-0.1-0.2
|
||||
c0.2,0.8,0-1,0-0.1c0,2.4,2.1,4.6,4.5,4.5c2.5-0.1,4.5-2,4.5-4.5c0-3-1.6-5.7-4.1-7.3c-2.6-1.7-5.6-1.6-8.4-0.4
|
||||
c-2.2,0.9-2.8,4.3-1.6,6.2C238.7,139.7,241,140.1,243.4,139.1L243.4,139.1z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M173.5,176.4c-0.5-0.3-0.1,0,0.2,0.1c-0.7-0.6,0.3,0.5,0.1,0c-0.3-0.5,0.4,0.8,0.1,0.2
|
||||
c-0.4-0.8,0.2,0.2,0,0.1c0,0,0-0.6,0-0.6c-0.1,0.1-0.1,1,0,0.3c-0.1,0.2-0.1,0.3-0.2,0.5c0.2-0.3,0.2-0.4,0-0.1
|
||||
c-0.2,0.2-0.2,0.3-0.1,0.1c0.2-0.2,0.1-0.2-0.3,0.2c1.9-1.4,3-4,1.6-6.2c-1.2-1.9-4.1-3.1-6.2-1.6c-2.4,1.7-4,4.3-3.9,7.4
|
||||
c0.1,3,1.6,5.7,4.1,7.3c2,1.2,5,0.5,6.2-1.6C176.3,180.4,175.7,177.7,173.5,176.4L173.5,176.4z"/>
|
||||
</g>
|
||||
</g>
|
||||
|
||||
<ellipse transform="matrix(0.862 -0.5069 0.5069 0.862 -88.3186 186.5516)" class="st6" cx="298.5" cy="255.5" rx="79.5" ry="68.5"/>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M173.6,109.8c-2.1,2-3.9,4.6-3.6,7.6c0.3,3.5,2.8,6.6,6.6,6.7c6,0.2,11.5-7.7,8.2-13c-1-1.7-3.1-3.1-5.2-3
|
||||
c-1.7,0.1-3.1,0.8-4.4,1.9c-2,1.8-2.8,5.2-1.9,7.7c2.4,6.6,11.8,5.9,13.8-0.7c0.7-2.5-0.9-5.6-3.5-6.2c-2.7-0.6-5.4,0.8-6.2,3.5
|
||||
c0.6-2.1,3.1-2.6,4.6-1c0.8,0.9,1,1.8,0.8,2.8c0.2-0.5,0.1-0.4-0.1,0.3c-0.4,0.7-1,1.2-1.8,1.4c-0.9,0-1.8,0-2.7,0
|
||||
c-1.8-0.6-2.5-1.6-2.3-3.1c-0.1-0.4-0.1-0.7,0.1-1c0.2-0.3,0.1-0.3-0.1,0.1c0.1-0.1,0.2-0.2,0.3-0.4c-0.2,0.3-0.5,0.5-0.7,0.8
|
||||
c-0.1,0.1-0.2,0.2-0.3,0.3c-0.3,0.2-0.2,0.2,0.1-0.1c1.3,0.2,2.6,0.4,3.9,0.6c0.2,0.4,0.5,0.9,0.7,1.3c0.2,0.6-0.2,0.9-0.2,1.4
|
||||
c0,0.4,0.4-0.5-0.1,0.1c0.3-0.4,0.6-0.7,1-1c1.9-1.8,2-5.3,0-7.1C178.7,107.9,175.6,107.8,173.6,109.8L173.6,109.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M151.2,248.6c-5.7,7,1.7,16.9,10,13.3c3.4-1.5,6.3-5,6.3-8.9c0-4.2-2.7-7.6-7-7.8c-3.1-0.1-5.8,3.3-4.8,6.3
|
||||
c1.2,3.4,3.7,6.1,7.3,7c2.6,0.6,5.4-0.8,6.2-3.5c0.7-2.5-0.9-5.5-3.5-6.2c-1.7-0.4,0,0.1-0.2,0.1c-0.4,0-0.4-0.8-0.1-0.1
|
||||
c-1.6,2.1-3.2,4.2-4.8,6.3c-2.4-0.1-2.8-1.1-3-2.6c0.1,0.7-0.1,0.2,0.1-0.1c0.7-0.9-0.5,0.5,0,0c-0.5,0.5-0.3,0.1-0.2,0.2
|
||||
c0.1,0,0.6,0,0.7,0c0.4,0.1,0.5,0.4,0.8,0.6c0.2,0.3,0.2,0.2-0.1-0.2c0.1,0.1,0.1,0.3,0.2,0.4c0,1,0.1,1.1-0.7,2.1
|
||||
c1.7-2.1,2-5,0-7.1C156.5,246.8,152.9,246.5,151.2,248.6L151.2,248.6z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M204.1,205.7c0.8,4.8,5.3,8.6,10.1,8.6c5.1,0,9.5-3.9,10.3-8.9c0.7-4.4-0.2-12.1-5.3-13.6
|
||||
c-2.7-0.8-5.2,0.5-7,2.4c-1.1,1.2-1.5,1.7-3.1,1.2c0.7,2.8,1.5,5.6,2.2,8.4c0.2-0.2-0.5,0.2-0.5,0.2c6.3,1.4,8.9-8.2,2.7-9.6
|
||||
c-3.5-0.8-6.6,0-9.3,2.4c-3,2.6-1.1,7.2,2.2,8.4c2.6,0.9,5.5,0.8,8-0.2c1.3-0.5,2.4-1.2,3.4-2.1c0.4-0.3,0.7-0.6,1-1
|
||||
c0.2-0.3,0.4-0.5,0.6-0.7c0.4-0.4,0.3-0.4-0.5,0.3c-0.9,0-1.8,0-2.7,0c0.2,0.1,0.3,0.1,0.5,0.2c-0.7-0.4-1.5-0.9-2.2-1.3
|
||||
c0.1,0.2,0.3,0.3,0.4,0.5c-0.4-0.7-0.9-1.5-1.3-2.2c0.4,1.2,0.8,2.5,1,3.7c0,0.4,0,0.8,0,1.2c0,0.5-0.5,0.9,0,0.4
|
||||
c-0.8,0.6-0.9,0.2-1.1-0.9c-0.4-2.7-3.8-4.1-6.2-3.5C204.7,200.3,203.7,203,204.1,205.7L204.1,205.7z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M265.9,179.6c0.2,0.4,0.5,0.9,0.7,1.3c0.6,1.1,1.8,2,3,2.3c1.2,0.3,2.8,0.2,3.9-0.5c1.1-0.7,2-1.7,2.3-3
|
||||
c0.3-1.4,0.1-2.6-0.5-3.9c-0.2-0.4-0.5-0.9-0.7-1.3c-0.6-1.1-1.8-2-3-2.3c-1.2-0.3-2.8-0.2-3.9,0.5c-1.1,0.7-2,1.7-2.3,3
|
||||
C265.1,177.1,265.3,178.3,265.9,179.6L265.9,179.6z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M200.4,295.8c-6.1,1.6-8.1,8.6-5,13.7c2.8,4.7,9.1,7.2,14.3,5.4c4.9-1.7,7.8-7.1,6.3-12.2
|
||||
c-0.8-2.7-2.7-4.8-5.3-5.8c-1.4-0.5-2.8-0.7-4.2-0.8c-0.1,0-0.9-0.1-0.9-0.1c0.2-0.4,1.2,2.5,0.9,0.7c0,0.9,0,1.8,0,2.7
|
||||
c-0.1,0.1-0.1,0.1-0.2,0.2c3.1-5.6-5.5-10.7-8.6-5c-1.7,3-1.1,6.6,1.4,9c1.3,1.2,2.8,2,4.5,2.3c0.8,0.1,1.6,0.2,2.4,0.3
|
||||
c0.4,0,0.7,0,1.1,0.1c0.2,0.1,0.1,0.1-0.2-0.1c0,0.1-0.6-0.5-0.6-0.5c-0.1-0.1-0.1-0.2,0-0.3c0.1-0.3,0.1-0.1-0.1,0.5
|
||||
c-0.3-0.1,0.7-0.2-0.3-0.3c-0.9-0.1-1.1-0.6-1.8-0.9c0,0-0.2-0.3-0.3-0.3c0.3,0-0.8,1.2-0.8,1.2
|
||||
C209.3,303.8,206.6,294.2,200.4,295.8L200.4,295.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M244.8,355.3c-4-6.2-11.2-2.3-12,3.9c-0.8,5.9,1.8,12,6.5,15.6c4.5,3.5,11.5,4.9,16.7,2.1
|
||||
c6.4-3.3,5.4-9.8,4.9-15.9c-0.5-6.3-1.9-12-9.5-12.1c-5.1-0.1-13.1,0.2-14.5,6.4c-1.2,5.4,2.5,12.8,8.2,13.8
|
||||
c6.2,1.1,11.2-5.5,7.8-11c-2.2-3.5-8.1-3.1-9.1,1.2c-1.1,4.4,0.5,8,4.1,10.6c5.2,3.8,10.2-4.8,5-8.6c0.2,0.2,0.4,0.5,0.5,0.7
|
||||
c-3,0.4-6.1,0.8-9.1,1.2c-0.4-0.7,3.4-3.1,2.9-4.8c-0.8-2.6-1.7,1.4-1.9,1.1c0,0.1,5.2-0.1,5.6-0.4c0.7,0.1,0.8-0.1,0.2-0.6
|
||||
c-0.4-0.7-0.5-0.8-0.4-0.3c-0.2,0.3,0.2,1.9,0.2,2.3c0.2,2,0.3,4,0.5,5.9c0.1,1.6,0.4,1.7-1.1,2c-1.3,0.2-2.9-0.3-4-0.9
|
||||
c-1.4-0.8-2.5-2-3.1-3.5c-0.3-0.7-0.4-1.3-0.5-2c0-0.3-0.1-0.7,0-1c0.2-1.9-1.1-1.5-3.8,1.2c-1-0.8-2-1.5-3-2.3
|
||||
c0.1,0.2,0.2,0.4,0.4,0.6C239.6,365.7,248.3,360.7,244.8,355.3L244.8,355.3z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M336.5,337.4c-2.4-1.5-5.1-2.5-7.9-1.8c-2.7,0.7-4.9,3.2-5.3,6c-0.9,6.4,6.3,8.3,11.2,8.4
|
||||
c4.8,0.1,10.6-2.4,10.9-7.9c0.2-5.6-5.5-9.6-10.6-6.9c-5.7,3-0.7,11.6,5,8.6c-0.1,0.1-0.2,0.1-0.3,0.2c-0.9,0-1.8,0-2.7,0
|
||||
c-2.1-0.4-1.4-4.8-0.3-4.3c0,0-1.3,0.3-1.3,0.3c-0.6,0-1.2,0-1.8-0.1c-0.5-0.1-1-0.2-1.5-0.4c-1.2-0.5-1-0.2,0.6,0.7
|
||||
c0.2,0.8,0.5,1.7,0.7,2.5c-3.4,1.1-4.4,1.9-2.8,2.7c0.4,0.2,0.7,0.4,1.1,0.7C336.9,349.6,341.9,340.9,336.5,337.4L336.5,337.4z"
|
||||
/>
|
||||
</g>
|
||||
</g>
|
||||
<path class="st3" d="M224.3,256.5L252,273v-40l32,20v-38l28,17l4-28l23,12l-3-24c0,0-14-8-35.5-6.4c-11.6,0.9-24.3,6.8-33.5,11.4
|
||||
c-14,7-23.7,18.9-31.2,29.1C227,238,224.3,256.5,224.3,256.5z"/>
|
||||
<path class="st3" d="M372.9,248.9l-28.8-14.5l2.9,39.9l-33.3-17.7l2.7,37.9l-29.1-15l-2,28.2l-23.8-10.3l4.7,23.7
|
||||
c0,0,14.5,7,35.9,3.8c11.5-1.7,23.7-8.5,32.6-13.8c13.5-8,22.3-20.5,29-31.2C371.5,267.5,372.9,248.9,372.9,248.9z"/>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M235.2,121.6c8.5-3.1,23.2-0.1,27.8,8.4c2.3,4.4,4.5,9.9,4.5,14.9c0.1,5.5-2.7,10.5-5.3,15.3
|
||||
c-1.5,2.8,2.8,5.4,4.3,2.5c3.1-5.8,6.3-11.9,6-18.7c-0.3-6-2.8-12.8-5.9-17.9c-6-9.5-22.6-13.1-32.7-9.4
|
||||
C230.9,117.8,232.2,122.7,235.2,121.6L235.2,121.6z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M241.1,110.5c11.6-2.3,25.6,2.3,32.2,12.4c6.6,10.2,6.1,22.8,3.1,34.2c-1.3,5,6.4,7.1,7.7,2.1
|
||||
c3.8-14.3,3.8-30.3-5.5-42.6c-8.9-11.7-25.5-16.6-39.6-13.8C233.9,103.8,236.1,111.5,241.1,110.5L241.1,110.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M245.4,97.5c7.8-1.8,15.5,0,22.9,2.8c7.2,2.7,15,6.1,20.3,11.8c10.7,11.7,9.5,29.3,8.7,44
|
||||
c-0.3,6.4,9.7,6.4,10,0c1-17.9,1.2-38.5-12.7-52.1c-6.4-6.3-15.3-10.2-23.6-13.3c-9.1-3.4-18.6-4.9-28.2-2.8
|
||||
C236.5,89.2,239.1,98.9,245.4,97.5L245.4,97.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M155.8,158.5c-13.1,4.8-14.2,21.6-10.1,33.1c4.3,12,15.2,20.6,28.2,20.5c3.2,0,3.2-5,0-5
|
||||
c-9.9,0.1-18.6-5.9-22.6-14.9c-3.9-8.6-5.2-24.8,5.8-28.9C160.2,162.3,158.9,157.4,155.8,158.5L155.8,158.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M164.1,216.5c-11.4-2.2-18.8-11.4-22.7-21.9c-3.6-9.6-7.7-25.3,1.2-33.1c3.9-3.4-1.8-9-5.7-5.7
|
||||
c-11.3,9.9-7.9,28.5-3.3,40.9c4.8,13,14.1,24.7,28.3,27.5C167,225.2,169.1,217.5,164.1,216.5L164.1,216.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M152,231.7c-27.3-13.3-38.1-46.5-23.3-73.2c3.1-5.6-5.5-10.7-8.6-5c-17.3,31.2-5.3,71.1,26.9,86.9
|
||||
C152.7,243.1,157.8,234.5,152,231.7L152,231.7z"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 12 KiB |
BIN
awx/ui/public/static/media/brand-logo192.png
Normal file
|
After Width: | Height: | Size: 7.7 KiB |
BIN
awx/ui/public/static/media/favicon.png
Normal file
|
After Width: | Height: | Size: 3.6 KiB |
232
awx/ui/public/static/media/favicon.svg
Normal file
@ -0,0 +1,232 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 21.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 500 500" style="enable-background:new 0 0 500 500;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{display:none;}
|
||||
.st1{display:inline;fill:#ED1C24;}
|
||||
.st2{fill:#42210B;}
|
||||
.st3{fill:#FFFFFF;}
|
||||
.st4{fill:#C69C6D;stroke:#8C6239;stroke-width:5;stroke-miterlimit:10;}
|
||||
.st5{fill:#FFFFFF;stroke:#42210B;stroke-width:3;stroke-miterlimit:10;}
|
||||
.st6{fill:#ED1C24;stroke:#8C6239;stroke-width:5;stroke-miterlimit:10;}
|
||||
.st7{fill:#A67C52;}
|
||||
.st8{fill:#ED1C24;}
|
||||
</style>
|
||||
<g class="st0">
|
||||
<path class="st1" d="M319.8,169.3c1.5-14.2,13.7-27.2,29.9-31.9c-13.1,1.5-27.3-1.7-36-10c-8.7-8.3-10-21.9-1.4-30.1
|
||||
c-12,6.7-28.1,8.1-41.4,3.4c-13.3-4.6-23.5-15.1-26.2-26.9c-2-8.8,0-17.9,2-26.7c-6.2,9.4-17.6,17.3-30.5,17.3
|
||||
c-12.9,0.1-25.7-10.2-22.9-20.7c-5.5,7.8-11.4,15.9-21,20.2c-9.5,4.3-23.7,2.7-28.2-5.5c-1.6,10.8-7.5,22-19.1,27
|
||||
c-9,3.9-21.5,2.2-28-3.8c5.7,11.4,4.3,25.3-4.1,35.6c-9.9,12.2-29.1,18.6-46.4,15.6c14.7,7.2,28.5,17.7,32.1,31.5
|
||||
c3.7,13.8-7.1,30.7-24.1,31.7c13.6,3.1,28,7.4,35.6,17.2c7.6,9.8,2.9,26.4-11.1,28c12.8-2.6,27.4,3.9,31.9,14.2
|
||||
c4.1,9.5-0.9,20.9-10.9,26.5c18.6-8.9,41-17.1,59.6-8.8c13.9,6.2,20.8,21.6,15.1,33.8c10.4-10.6,23-21.3,39.2-23.5
|
||||
c12.8-1.8,27.5,4.6,31.9,14.1c-0.3-12.7,6.1-25.5,17.5-34c13.8-10.3,34.4-14,52-9.2c-11.1-7.8-14.9-22-8.9-33
|
||||
c6-11,21.3-18,35.7-16.2C327.5,198.1,318.3,183.5,319.8,169.3z"/>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st2" d="M179.7,297.3c-10.1,3.2-20.3,6-30.6,8.4c-10.7,2.5-21.7,5-32.8,5.1C96,311.1,79.9,297.2,60,296.1
|
||||
c-5.8-0.3-5.8,8.7,0,9c9.9,0.5,18.9,5.1,27.9,8.8c9.8,4,19.6,6.3,30.2,5.9c21.5-0.8,43.5-7.4,64-13.8
|
||||
C187.6,304.3,185.2,295.6,179.7,297.3L179.7,297.3z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st2" d="M322.2,194.8c17.9-8,36-18.5,44.3-37.2c4.2-9.3,6-19.2,7.2-29.3c1.5-11.7,2.5-23.4,3.7-35.2
|
||||
c0.6-5.8-8.4-5.7-9,0c-1.1,10.3-2.1,20.6-3.3,30.9c-1.1,9.7-2.5,19.7-6.4,28.7c-7.5,17.5-24.6,26.8-41.2,34.2
|
||||
C312.4,189.4,316.9,197.2,322.2,194.8L322.2,194.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
|
||||
<ellipse transform="matrix(0.5541 -0.8324 0.8324 0.5541 -219.4917 376.0051)" class="st2" cx="241.2" cy="392.9" rx="65.5" ry="33.7"/>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M224.1,442.5c22-11.5,38.7-31,47.1-54.3c2-5.5-6.7-7.8-8.7-2.4c-7.6,21.1-23.1,38.5-43,48.9
|
||||
C214.4,437.4,218.9,445.1,224.1,442.5L224.1,442.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
|
||||
<ellipse transform="matrix(0.9684 -0.2494 0.2494 0.9684 -66.4734 109.0276)" class="st2" cx="397" cy="316.8" rx="63.9" ry="32.9"/>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M363.8,341.5c28.3,7,58.7-0.8,80.2-20.5c4.3-3.9-2.1-10.3-6.4-6.4c-19.1,17.5-46.4,24.4-71.5,18.2
|
||||
C360.5,331.5,358.1,340.1,363.8,341.5L363.8,341.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<path class="st4" d="M156.9,96c-25.4,4.5-32.9,20.2-45,46.9c-20.2,44.4,2,90.3,5.6,97.5c18.4,36.5,42.3,36.8,60,80.6
|
||||
c8.6,21.2,4.6,25.2,13.1,37.5c20.4,29.2,63.7,36.1,91.9,33.8c40.3-3.3,91.5-28.8,108.8-82.5c17.1-53.2-6-112.1-41.2-131.2
|
||||
c-25.3-13.7-44.9-0.5-71.2-20.6c-21.6-16.5-18.4-33.1-37.5-48.8C227.9,98.1,203.7,87.7,156.9,96z"/>
|
||||
<ellipse transform="matrix(0.6622 -0.7494 0.7494 0.6622 65.2068 309.6339)" class="st2" cx="376" cy="82.5" rx="21" ry="15.5"/>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M379.8,75.3c0.8,0.2-0.6-0.4-0.1-0.1c0.2,0.1,0.3,0.2,0.5,0.3c0.4,0.2-0.6-0.7-0.1-0.1
|
||||
c0.1,0.1,0.7,0.8,0.2,0.2c-0.4-0.5,0,0,0.1,0.1c0.4,0.7,0,0.2,0-0.2c0,0.1,0.1,0.4,0.2,0.5c0.3,0.9-0.1-1,0-0.1
|
||||
c0.1,2.3,2,4.6,4.5,4.5c2.3-0.1,4.6-2,4.5-4.5c-0.3-4.4-3-8.1-7.3-9.4c-2.2-0.7-5,0.8-5.5,3.1C376.1,72.2,377.4,74.5,379.8,75.3
|
||||
L379.8,75.3z"/>
|
||||
</g>
|
||||
</g>
|
||||
|
||||
<ellipse transform="matrix(0.9999 -1.433736e-02 1.433736e-02 0.9999 -4.303 0.8051)" class="st2" cx="54" cy="300.5" rx="21" ry="15.5"/>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M52.2,297.5c1.1-0.3,1.4-0.4,2.5,0c0.8,0.3,1.3,0.7,2,1.7c1.5,1.9,4.8,1.6,6.4,0c1.9-1.9,1.5-4.4,0-6.4
|
||||
c-3.1-3.9-8.6-5.4-13.3-4C44.3,290.5,46.7,299.2,52.2,297.5L52.2,297.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st2" d="M149.3,108.8c4.9-10.8-1.3-24.2-12.9-26.9c-1.9-0.4-2.7,2.4-0.8,2.9c9.6,2.3,15.3,13.5,11.2,22.5
|
||||
C145.9,109,148.5,110.5,149.3,108.8L149.3,108.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st2" d="M141.2,112.3c2.4-9.4-5.4-19.3-15.2-19c-1.9,0.1-1.9,3.1,0,3c7.8-0.2,14.2,7.6,12.3,15.2
|
||||
C137.8,113.4,140.7,114.2,141.2,112.3L141.2,112.3z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st2" d="M132.6,118c-1.1-8.3-10.9-13.4-18.2-9.1c-1.7,1-0.2,3.6,1.5,2.6c5.2-3,12.9,0.4,13.7,6.5
|
||||
C129.8,119.9,132.8,119.9,132.6,118L132.6,118z"/>
|
||||
</g>
|
||||
</g>
|
||||
<path class="st5" d="M215.5,166.5l34-73c0,0,35,0,46,21c7.5,14.3,8,39,8,39L215.5,166.5z"/>
|
||||
<path class="st5" d="M208.2,170.5l-79.5-12.7c0,0-19.6,29-8.4,49.9c7.6,14.2,27.8,28.5,27.8,28.5L208.2,170.5z"/>
|
||||
<path class="st2" d="M210.5,164.5l33-74c0,0-2.5-5.5-8-7s-12,0-12,0L210.5,164.5z"/>
|
||||
<path class="st2" d="M207.4,165.3l-73.1-35c0,0-5.6,2.4-7.2,7.8c-1.6,5.5-0.3,12-0.3,12L207.4,165.3z"/>
|
||||
<path d="M215.5,166.5L234,127c0,0,17-6,25.5,7.5c8.6,13.6-3.5,25.5-3.5,25.5L215.5,166.5z"/>
|
||||
<path d="M206.7,170.9l-29.6,32c0,0-18,0.5-22-14.9c-4-15.6,11.1-23.2,11.1-23.2L206.7,170.9z"/>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M243.4,139.1c-0.6,0.2-0.7,0.3-0.4,0.2c0.3-0.1,0.2-0.1-0.5,0.1c0.7,0-0.3,0-0.4-0.1c0.1,0,0.3,0.1,0.4,0.1
|
||||
c0.3,0.1,0.2,0-0.4-0.2c0,0,0.6,0.3,0.6,0.3c0.5,0.2-0.9-0.6-0.1-0.1c0.6,0.4-0.3-0.5-0.1-0.1c0.3,0.5-0.3-1-0.1-0.2
|
||||
c0.2,0.8,0-1,0-0.1c0,2.4,2.1,4.6,4.5,4.5c2.5-0.1,4.5-2,4.5-4.5c0-3-1.6-5.7-4.1-7.3c-2.6-1.7-5.6-1.6-8.4-0.4
|
||||
c-2.2,0.9-2.8,4.3-1.6,6.2C238.7,139.7,241,140.1,243.4,139.1L243.4,139.1z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M173.5,176.4c-0.5-0.3-0.1,0,0.2,0.1c-0.7-0.6,0.3,0.5,0.1,0c-0.3-0.5,0.4,0.8,0.1,0.2
|
||||
c-0.4-0.8,0.2,0.2,0,0.1c0,0,0-0.6,0-0.6c-0.1,0.1-0.1,1,0,0.3c-0.1,0.2-0.1,0.3-0.2,0.5c0.2-0.3,0.2-0.4,0-0.1
|
||||
c-0.2,0.2-0.2,0.3-0.1,0.1c0.2-0.2,0.1-0.2-0.3,0.2c1.9-1.4,3-4,1.6-6.2c-1.2-1.9-4.1-3.1-6.2-1.6c-2.4,1.7-4,4.3-3.9,7.4
|
||||
c0.1,3,1.6,5.7,4.1,7.3c2,1.2,5,0.5,6.2-1.6C176.3,180.4,175.7,177.7,173.5,176.4L173.5,176.4z"/>
|
||||
</g>
|
||||
</g>
|
||||
|
||||
<ellipse transform="matrix(0.862 -0.5069 0.5069 0.862 -88.3186 186.5516)" class="st6" cx="298.5" cy="255.5" rx="79.5" ry="68.5"/>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M173.6,109.8c-2.1,2-3.9,4.6-3.6,7.6c0.3,3.5,2.8,6.6,6.6,6.7c6,0.2,11.5-7.7,8.2-13c-1-1.7-3.1-3.1-5.2-3
|
||||
c-1.7,0.1-3.1,0.8-4.4,1.9c-2,1.8-2.8,5.2-1.9,7.7c2.4,6.6,11.8,5.9,13.8-0.7c0.7-2.5-0.9-5.6-3.5-6.2c-2.7-0.6-5.4,0.8-6.2,3.5
|
||||
c0.6-2.1,3.1-2.6,4.6-1c0.8,0.9,1,1.8,0.8,2.8c0.2-0.5,0.1-0.4-0.1,0.3c-0.4,0.7-1,1.2-1.8,1.4c-0.9,0-1.8,0-2.7,0
|
||||
c-1.8-0.6-2.5-1.6-2.3-3.1c-0.1-0.4-0.1-0.7,0.1-1c0.2-0.3,0.1-0.3-0.1,0.1c0.1-0.1,0.2-0.2,0.3-0.4c-0.2,0.3-0.5,0.5-0.7,0.8
|
||||
c-0.1,0.1-0.2,0.2-0.3,0.3c-0.3,0.2-0.2,0.2,0.1-0.1c1.3,0.2,2.6,0.4,3.9,0.6c0.2,0.4,0.5,0.9,0.7,1.3c0.2,0.6-0.2,0.9-0.2,1.4
|
||||
c0,0.4,0.4-0.5-0.1,0.1c0.3-0.4,0.6-0.7,1-1c1.9-1.8,2-5.3,0-7.1C178.7,107.9,175.6,107.8,173.6,109.8L173.6,109.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M151.2,248.6c-5.7,7,1.7,16.9,10,13.3c3.4-1.5,6.3-5,6.3-8.9c0-4.2-2.7-7.6-7-7.8c-3.1-0.1-5.8,3.3-4.8,6.3
|
||||
c1.2,3.4,3.7,6.1,7.3,7c2.6,0.6,5.4-0.8,6.2-3.5c0.7-2.5-0.9-5.5-3.5-6.2c-1.7-0.4,0,0.1-0.2,0.1c-0.4,0-0.4-0.8-0.1-0.1
|
||||
c-1.6,2.1-3.2,4.2-4.8,6.3c-2.4-0.1-2.8-1.1-3-2.6c0.1,0.7-0.1,0.2,0.1-0.1c0.7-0.9-0.5,0.5,0,0c-0.5,0.5-0.3,0.1-0.2,0.2
|
||||
c0.1,0,0.6,0,0.7,0c0.4,0.1,0.5,0.4,0.8,0.6c0.2,0.3,0.2,0.2-0.1-0.2c0.1,0.1,0.1,0.3,0.2,0.4c0,1,0.1,1.1-0.7,2.1
|
||||
c1.7-2.1,2-5,0-7.1C156.5,246.8,152.9,246.5,151.2,248.6L151.2,248.6z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M204.1,205.7c0.8,4.8,5.3,8.6,10.1,8.6c5.1,0,9.5-3.9,10.3-8.9c0.7-4.4-0.2-12.1-5.3-13.6
|
||||
c-2.7-0.8-5.2,0.5-7,2.4c-1.1,1.2-1.5,1.7-3.1,1.2c0.7,2.8,1.5,5.6,2.2,8.4c0.2-0.2-0.5,0.2-0.5,0.2c6.3,1.4,8.9-8.2,2.7-9.6
|
||||
c-3.5-0.8-6.6,0-9.3,2.4c-3,2.6-1.1,7.2,2.2,8.4c2.6,0.9,5.5,0.8,8-0.2c1.3-0.5,2.4-1.2,3.4-2.1c0.4-0.3,0.7-0.6,1-1
|
||||
c0.2-0.3,0.4-0.5,0.6-0.7c0.4-0.4,0.3-0.4-0.5,0.3c-0.9,0-1.8,0-2.7,0c0.2,0.1,0.3,0.1,0.5,0.2c-0.7-0.4-1.5-0.9-2.2-1.3
|
||||
c0.1,0.2,0.3,0.3,0.4,0.5c-0.4-0.7-0.9-1.5-1.3-2.2c0.4,1.2,0.8,2.5,1,3.7c0,0.4,0,0.8,0,1.2c0,0.5-0.5,0.9,0,0.4
|
||||
c-0.8,0.6-0.9,0.2-1.1-0.9c-0.4-2.7-3.8-4.1-6.2-3.5C204.7,200.3,203.7,203,204.1,205.7L204.1,205.7z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M265.9,179.6c0.2,0.4,0.5,0.9,0.7,1.3c0.6,1.1,1.8,2,3,2.3c1.2,0.3,2.8,0.2,3.9-0.5c1.1-0.7,2-1.7,2.3-3
|
||||
c0.3-1.4,0.1-2.6-0.5-3.9c-0.2-0.4-0.5-0.9-0.7-1.3c-0.6-1.1-1.8-2-3-2.3c-1.2-0.3-2.8-0.2-3.9,0.5c-1.1,0.7-2,1.7-2.3,3
|
||||
C265.1,177.1,265.3,178.3,265.9,179.6L265.9,179.6z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M200.4,295.8c-6.1,1.6-8.1,8.6-5,13.7c2.8,4.7,9.1,7.2,14.3,5.4c4.9-1.7,7.8-7.1,6.3-12.2
|
||||
c-0.8-2.7-2.7-4.8-5.3-5.8c-1.4-0.5-2.8-0.7-4.2-0.8c-0.1,0-0.9-0.1-0.9-0.1c0.2-0.4,1.2,2.5,0.9,0.7c0,0.9,0,1.8,0,2.7
|
||||
c-0.1,0.1-0.1,0.1-0.2,0.2c3.1-5.6-5.5-10.7-8.6-5c-1.7,3-1.1,6.6,1.4,9c1.3,1.2,2.8,2,4.5,2.3c0.8,0.1,1.6,0.2,2.4,0.3
|
||||
c0.4,0,0.7,0,1.1,0.1c0.2,0.1,0.1,0.1-0.2-0.1c0,0.1-0.6-0.5-0.6-0.5c-0.1-0.1-0.1-0.2,0-0.3c0.1-0.3,0.1-0.1-0.1,0.5
|
||||
c-0.3-0.1,0.7-0.2-0.3-0.3c-0.9-0.1-1.1-0.6-1.8-0.9c0,0-0.2-0.3-0.3-0.3c0.3,0-0.8,1.2-0.8,1.2
|
||||
C209.3,303.8,206.6,294.2,200.4,295.8L200.4,295.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M244.8,355.3c-4-6.2-11.2-2.3-12,3.9c-0.8,5.9,1.8,12,6.5,15.6c4.5,3.5,11.5,4.9,16.7,2.1
|
||||
c6.4-3.3,5.4-9.8,4.9-15.9c-0.5-6.3-1.9-12-9.5-12.1c-5.1-0.1-13.1,0.2-14.5,6.4c-1.2,5.4,2.5,12.8,8.2,13.8
|
||||
c6.2,1.1,11.2-5.5,7.8-11c-2.2-3.5-8.1-3.1-9.1,1.2c-1.1,4.4,0.5,8,4.1,10.6c5.2,3.8,10.2-4.8,5-8.6c0.2,0.2,0.4,0.5,0.5,0.7
|
||||
c-3,0.4-6.1,0.8-9.1,1.2c-0.4-0.7,3.4-3.1,2.9-4.8c-0.8-2.6-1.7,1.4-1.9,1.1c0,0.1,5.2-0.1,5.6-0.4c0.7,0.1,0.8-0.1,0.2-0.6
|
||||
c-0.4-0.7-0.5-0.8-0.4-0.3c-0.2,0.3,0.2,1.9,0.2,2.3c0.2,2,0.3,4,0.5,5.9c0.1,1.6,0.4,1.7-1.1,2c-1.3,0.2-2.9-0.3-4-0.9
|
||||
c-1.4-0.8-2.5-2-3.1-3.5c-0.3-0.7-0.4-1.3-0.5-2c0-0.3-0.1-0.7,0-1c0.2-1.9-1.1-1.5-3.8,1.2c-1-0.8-2-1.5-3-2.3
|
||||
c0.1,0.2,0.2,0.4,0.4,0.6C239.6,365.7,248.3,360.7,244.8,355.3L244.8,355.3z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st7" d="M336.5,337.4c-2.4-1.5-5.1-2.5-7.9-1.8c-2.7,0.7-4.9,3.2-5.3,6c-0.9,6.4,6.3,8.3,11.2,8.4
|
||||
c4.8,0.1,10.6-2.4,10.9-7.9c0.2-5.6-5.5-9.6-10.6-6.9c-5.7,3-0.7,11.6,5,8.6c-0.1,0.1-0.2,0.1-0.3,0.2c-0.9,0-1.8,0-2.7,0
|
||||
c-2.1-0.4-1.4-4.8-0.3-4.3c0,0-1.3,0.3-1.3,0.3c-0.6,0-1.2,0-1.8-0.1c-0.5-0.1-1-0.2-1.5-0.4c-1.2-0.5-1-0.2,0.6,0.7
|
||||
c0.2,0.8,0.5,1.7,0.7,2.5c-3.4,1.1-4.4,1.9-2.8,2.7c0.4,0.2,0.7,0.4,1.1,0.7C336.9,349.6,341.9,340.9,336.5,337.4L336.5,337.4z"
|
||||
/>
|
||||
</g>
|
||||
</g>
|
||||
<path class="st3" d="M224.3,256.5L252,273v-40l32,20v-38l28,17l4-28l23,12l-3-24c0,0-14-8-35.5-6.4c-11.6,0.9-24.3,6.8-33.5,11.4
|
||||
c-14,7-23.7,18.9-31.2,29.1C227,238,224.3,256.5,224.3,256.5z"/>
|
||||
<path class="st3" d="M372.9,248.9l-28.8-14.5l2.9,39.9l-33.3-17.7l2.7,37.9l-29.1-15l-2,28.2l-23.8-10.3l4.7,23.7
|
||||
c0,0,14.5,7,35.9,3.8c11.5-1.7,23.7-8.5,32.6-13.8c13.5-8,22.3-20.5,29-31.2C371.5,267.5,372.9,248.9,372.9,248.9z"/>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M235.2,121.6c8.5-3.1,23.2-0.1,27.8,8.4c2.3,4.4,4.5,9.9,4.5,14.9c0.1,5.5-2.7,10.5-5.3,15.3
|
||||
c-1.5,2.8,2.8,5.4,4.3,2.5c3.1-5.8,6.3-11.9,6-18.7c-0.3-6-2.8-12.8-5.9-17.9c-6-9.5-22.6-13.1-32.7-9.4
|
||||
C230.9,117.8,232.2,122.7,235.2,121.6L235.2,121.6z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M241.1,110.5c11.6-2.3,25.6,2.3,32.2,12.4c6.6,10.2,6.1,22.8,3.1,34.2c-1.3,5,6.4,7.1,7.7,2.1
|
||||
c3.8-14.3,3.8-30.3-5.5-42.6c-8.9-11.7-25.5-16.6-39.6-13.8C233.9,103.8,236.1,111.5,241.1,110.5L241.1,110.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M245.4,97.5c7.8-1.8,15.5,0,22.9,2.8c7.2,2.7,15,6.1,20.3,11.8c10.7,11.7,9.5,29.3,8.7,44
|
||||
c-0.3,6.4,9.7,6.4,10,0c1-17.9,1.2-38.5-12.7-52.1c-6.4-6.3-15.3-10.2-23.6-13.3c-9.1-3.4-18.6-4.9-28.2-2.8
|
||||
C236.5,89.2,239.1,98.9,245.4,97.5L245.4,97.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M155.8,158.5c-13.1,4.8-14.2,21.6-10.1,33.1c4.3,12,15.2,20.6,28.2,20.5c3.2,0,3.2-5,0-5
|
||||
c-9.9,0.1-18.6-5.9-22.6-14.9c-3.9-8.6-5.2-24.8,5.8-28.9C160.2,162.3,158.9,157.4,155.8,158.5L155.8,158.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M164.1,216.5c-11.4-2.2-18.8-11.4-22.7-21.9c-3.6-9.6-7.7-25.3,1.2-33.1c3.9-3.4-1.8-9-5.7-5.7
|
||||
c-11.3,9.9-7.9,28.5-3.3,40.9c4.8,13,14.1,24.7,28.3,27.5C167,225.2,169.1,217.5,164.1,216.5L164.1,216.5z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st8" d="M152,231.7c-27.3-13.3-38.1-46.5-23.3-73.2c3.1-5.6-5.5-10.7-8.6-5c-17.3,31.2-5.3,71.1,26.9,86.9
|
||||
C152.7,243.1,157.8,234.5,152,231.7L152,231.7z"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 12 KiB |
@ -6,6 +6,7 @@ import Config from './models/Config';
|
||||
import CredentialInputSources from './models/CredentialInputSources';
|
||||
import CredentialTypes from './models/CredentialTypes';
|
||||
import Credentials from './models/Credentials';
|
||||
import ConstructedInventories from './models/ConstructedInventories';
|
||||
import Dashboard from './models/Dashboard';
|
||||
import ExecutionEnvironments from './models/ExecutionEnvironments';
|
||||
import Groups from './models/Groups';
|
||||
@ -44,6 +45,7 @@ import WorkflowApprovalTemplates from './models/WorkflowApprovalTemplates';
|
||||
import WorkflowJobTemplateNodes from './models/WorkflowJobTemplateNodes';
|
||||
import WorkflowJobTemplates from './models/WorkflowJobTemplates';
|
||||
import WorkflowJobs from './models/WorkflowJobs';
|
||||
import HostMetrics from './models/HostMetrics';
|
||||
|
||||
const ActivityStreamAPI = new ActivityStream();
|
||||
const AdHocCommandsAPI = new AdHocCommands();
|
||||
@ -53,6 +55,7 @@ const ConfigAPI = new Config();
|
||||
const CredentialInputSourcesAPI = new CredentialInputSources();
|
||||
const CredentialTypesAPI = new CredentialTypes();
|
||||
const CredentialsAPI = new Credentials();
|
||||
const ConstructedInventoriesAPI = new ConstructedInventories();
|
||||
const DashboardAPI = new Dashboard();
|
||||
const ExecutionEnvironmentsAPI = new ExecutionEnvironments();
|
||||
const GroupsAPI = new Groups();
|
||||
@ -91,6 +94,7 @@ const WorkflowApprovalTemplatesAPI = new WorkflowApprovalTemplates();
|
||||
const WorkflowJobTemplateNodesAPI = new WorkflowJobTemplateNodes();
|
||||
const WorkflowJobTemplatesAPI = new WorkflowJobTemplates();
|
||||
const WorkflowJobsAPI = new WorkflowJobs();
|
||||
const HostMetricsAPI = new HostMetrics();
|
||||
|
||||
export {
|
||||
ActivityStreamAPI,
|
||||
@ -101,6 +105,7 @@ export {
|
||||
CredentialInputSourcesAPI,
|
||||
CredentialTypesAPI,
|
||||
CredentialsAPI,
|
||||
ConstructedInventoriesAPI,
|
||||
DashboardAPI,
|
||||
ExecutionEnvironmentsAPI,
|
||||
GroupsAPI,
|
||||
@ -139,4 +144,5 @@ export {
|
||||
WorkflowJobTemplateNodesAPI,
|
||||
WorkflowJobTemplatesAPI,
|
||||
WorkflowJobsAPI,
|
||||
HostMetricsAPI,
|
||||
};
|
||||
|
||||
10
awx/ui/src/api/models/ConstructedInventories.js
Normal file
@ -0,0 +1,10 @@
|
||||
import Base from '../Base';
|
||||
import InstanceGroupsMixin from '../mixins/InstanceGroups.mixin';
|
||||
|
||||
class ConstructedInventories extends InstanceGroupsMixin(Base) {
|
||||
constructor(http) {
|
||||
super(http);
|
||||
this.baseUrl = 'api/v2/constructed_inventories/';
|
||||
}
|
||||
}
|
||||
export default ConstructedInventories;
|
||||
10
awx/ui/src/api/models/HostMetrics.js
Normal file
@ -0,0 +1,10 @@
|
||||
import Base from '../Base';
|
||||
|
||||
class HostMetrics extends Base {
|
||||
constructor(http) {
|
||||
super(http);
|
||||
this.baseUrl = 'api/v2/host_metrics/';
|
||||
}
|
||||
}
|
||||
|
||||
export default HostMetrics;
|
||||
@ -13,6 +13,9 @@ class Inventories extends InstanceGroupsMixin(Base) {
|
||||
this.readGroups = this.readGroups.bind(this);
|
||||
this.readGroupsOptions = this.readGroupsOptions.bind(this);
|
||||
this.promoteGroup = this.promoteGroup.bind(this);
|
||||
this.readInputInventories = this.readInputInventories.bind(this);
|
||||
this.associateInventory = this.associateInventory.bind(this);
|
||||
this.disassociateInventory = this.disassociateInventory.bind(this);
|
||||
}
|
||||
|
||||
readAccessList(id, params) {
|
||||
@ -72,6 +75,12 @@ class Inventories extends InstanceGroupsMixin(Base) {
|
||||
});
|
||||
}
|
||||
|
||||
readInputInventories(inventoryId, params) {
|
||||
return this.http.get(`${this.baseUrl}${inventoryId}/input_inventories/`, {
|
||||
params,
|
||||
});
|
||||
}
|
||||
|
||||
readSources(inventoryId, params) {
|
||||
return this.http.get(`${this.baseUrl}${inventoryId}/inventory_sources/`, {
|
||||
params,
|
||||
@ -130,6 +139,19 @@ class Inventories extends InstanceGroupsMixin(Base) {
|
||||
disassociate: true,
|
||||
});
|
||||
}
|
||||
|
||||
associateInventory(id, inputInventoryId) {
|
||||
return this.http.post(`${this.baseUrl}${id}/input_inventories/`, {
|
||||
id: inputInventoryId,
|
||||
});
|
||||
}
|
||||
|
||||
disassociateInventory(id, inputInventoryId) {
|
||||
return this.http.post(`${this.baseUrl}${id}/input_inventories/`, {
|
||||
id: inputInventoryId,
|
||||
disassociate: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export default Inventories;
|
||||
|
||||
@ -18,6 +18,10 @@ class Settings extends Base {
|
||||
return this.http.get(`${this.baseUrl}all/`);
|
||||
}
|
||||
|
||||
readSystem() {
|
||||
return this.http.get(`${this.baseUrl}system/`);
|
||||
}
|
||||
|
||||
updateCategory(category, data) {
|
||||
return this.http.patch(`${this.baseUrl}${category}/`, data);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import React, { useState, useEffect, useCallback } from 'react';
|
||||
import { string, bool, func, oneOf } from 'prop-types';
|
||||
import { string, bool, func, oneOf, shape } from 'prop-types';
|
||||
|
||||
import { t } from '@lingui/macro';
|
||||
import { useField } from 'formik';
|
||||
@ -38,6 +38,8 @@ function VariablesField({
|
||||
tooltip,
|
||||
initialMode,
|
||||
onModeChange,
|
||||
isRequired,
|
||||
validators,
|
||||
}) {
|
||||
// track focus manually, because the Code Editor library doesn't wire
|
||||
// into Formik completely
|
||||
@ -48,13 +50,22 @@ function VariablesField({
|
||||
return undefined;
|
||||
}
|
||||
try {
|
||||
parseVariableField(value);
|
||||
const parsedVariables = parseVariableField(value);
|
||||
if (validators) {
|
||||
const errorMessages = Object.keys(validators)
|
||||
.map((field) => validators[field](parsedVariables[field]))
|
||||
.filter((e) => e);
|
||||
|
||||
if (errorMessages.length > 0) {
|
||||
return errorMessages;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
return error.message;
|
||||
}
|
||||
return undefined;
|
||||
},
|
||||
[shouldValidate]
|
||||
[shouldValidate, validators]
|
||||
);
|
||||
const [field, meta, helpers] = useField({ name, validate });
|
||||
const [mode, setMode] = useState(() =>
|
||||
@ -120,6 +131,7 @@ function VariablesField({
|
||||
setMode={handleModeChange}
|
||||
setShouldValidate={setShouldValidate}
|
||||
handleChange={handleChange}
|
||||
isRequired={isRequired}
|
||||
/>
|
||||
<Modal
|
||||
variant="xlarge"
|
||||
@ -157,7 +169,11 @@ function VariablesField({
|
||||
</Modal>
|
||||
{meta.error ? (
|
||||
<div className="pf-c-form__helper-text pf-m-error" aria-live="polite">
|
||||
{meta.error}
|
||||
{(Array.isArray(meta.error) ? meta.error : [meta.error]).map(
|
||||
(errorMessage) => (
|
||||
<p key={errorMessage}>{errorMessage}</p>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
) : null}
|
||||
</div>
|
||||
@ -171,12 +187,16 @@ VariablesField.propTypes = {
|
||||
promptId: string,
|
||||
initialMode: oneOf([YAML_MODE, JSON_MODE]),
|
||||
onModeChange: func,
|
||||
isRequired: bool,
|
||||
validators: shape({}),
|
||||
};
|
||||
VariablesField.defaultProps = {
|
||||
readOnly: false,
|
||||
promptId: null,
|
||||
initialMode: YAML_MODE,
|
||||
onModeChange: () => {},
|
||||
isRequired: false,
|
||||
validators: {},
|
||||
};
|
||||
|
||||
function VariablesFieldInternals({
|
||||
@ -192,6 +212,7 @@ function VariablesFieldInternals({
|
||||
onExpand,
|
||||
setShouldValidate,
|
||||
handleChange,
|
||||
isRequired,
|
||||
}) {
|
||||
const [field, meta, helpers] = useField(name);
|
||||
|
||||
@ -213,6 +234,12 @@ function VariablesFieldInternals({
|
||||
<SplitItem>
|
||||
<label htmlFor={id} className="pf-c-form__label">
|
||||
<span className="pf-c-form__label-text">{label}</span>
|
||||
{isRequired && (
|
||||
<span className="pf-c-form__label-required" aria-hidden="true">
|
||||
{' '}
|
||||
*{' '}
|
||||
</span>
|
||||
)}
|
||||
</label>
|
||||
{tooltip && <Popover content={tooltip} id={`${id}-tooltip`} />}
|
||||
</SplitItem>
|
||||
|
||||
@ -57,6 +57,7 @@ function DataListToolbar({
|
||||
enableRelatedFuzzyFiltering,
|
||||
handleIsAnsibleFactsSelected,
|
||||
isFilterCleared,
|
||||
advancedSearchDisabled,
|
||||
}) {
|
||||
const showExpandCollapse = onCompact && onExpand;
|
||||
const [isKebabOpen, setIsKebabOpen] = useState(false);
|
||||
@ -86,6 +87,10 @@ function DataListToolbar({
|
||||
}),
|
||||
[setIsKebabModalOpen]
|
||||
);
|
||||
const columns = [...searchColumns];
|
||||
if (!advancedSearchDisabled) {
|
||||
columns.push({ name: t`Advanced`, key: 'advanced' });
|
||||
}
|
||||
return (
|
||||
<Toolbar
|
||||
id={`${qsConfig.namespace}-list-toolbar`}
|
||||
@ -134,10 +139,7 @@ function DataListToolbar({
|
||||
<ToolbarItem>
|
||||
<Search
|
||||
qsConfig={qsConfig}
|
||||
columns={[
|
||||
...searchColumns,
|
||||
{ name: t`Advanced`, key: 'advanced' },
|
||||
]}
|
||||
columns={columns}
|
||||
searchableKeys={searchableKeys}
|
||||
relatedSearchableKeys={relatedSearchableKeys}
|
||||
onSearch={onSearch}
|
||||
@ -224,6 +226,7 @@ DataListToolbar.propTypes = {
|
||||
additionalControls: PropTypes.arrayOf(PropTypes.node),
|
||||
enableNegativeFiltering: PropTypes.bool,
|
||||
enableRelatedFuzzyFiltering: PropTypes.bool,
|
||||
advancedSearchDisabled: PropTypes.bool,
|
||||
};
|
||||
|
||||
DataListToolbar.defaultProps = {
|
||||
@ -243,6 +246,7 @@ DataListToolbar.defaultProps = {
|
||||
additionalControls: [],
|
||||
enableNegativeFiltering: true,
|
||||
enableRelatedFuzzyFiltering: true,
|
||||
advancedSearchDisabled: false,
|
||||
};
|
||||
|
||||
export default DataListToolbar;
|
||||
|
||||
@ -38,7 +38,7 @@ const InventoryLookupField = ({ isDisabled }) => {
|
||||
error={inventoryMeta.error}
|
||||
validate={required(t`Select a value for this field`)}
|
||||
isDisabled={isDisabled}
|
||||
hideSmartInventories
|
||||
hideAdvancedInventories
|
||||
autoPopulate={!inventoryField.value?.id}
|
||||
/>
|
||||
);
|
||||
|
||||
@ -84,6 +84,7 @@ const QS_CONFIG = getQSConfig(
|
||||
page: 1,
|
||||
page_size: 5,
|
||||
order_by: 'name',
|
||||
not__inventory__kind: 'constructed',
|
||||
},
|
||||
['id', 'page', 'page_size', 'inventory']
|
||||
);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import React, { useCallback, useEffect } from 'react';
|
||||
import { func, bool, string } from 'prop-types';
|
||||
import { func, bool, string, oneOfType, arrayOf } from 'prop-types';
|
||||
import { withRouter } from 'react-router-dom';
|
||||
import { t } from '@lingui/macro';
|
||||
import { InventoriesAPI } from 'api';
|
||||
@ -23,7 +23,7 @@ function InventoryLookup({
|
||||
autoPopulate,
|
||||
fieldId,
|
||||
fieldName,
|
||||
hideSmartInventories,
|
||||
hideAdvancedInventories,
|
||||
history,
|
||||
isDisabled,
|
||||
isPromptableField,
|
||||
@ -34,6 +34,7 @@ function InventoryLookup({
|
||||
required,
|
||||
validate,
|
||||
value,
|
||||
multiple,
|
||||
}) {
|
||||
const autoPopulateLookup = useAutoPopulateLookup(onChange);
|
||||
|
||||
@ -45,8 +46,8 @@ function InventoryLookup({
|
||||
} = useRequest(
|
||||
useCallback(async () => {
|
||||
const params = parseQueryString(QS_CONFIG, history.location.search);
|
||||
const inventoryKindParams = hideSmartInventories
|
||||
? { not__kind: 'smart' }
|
||||
const inventoryKindParams = hideAdvancedInventories
|
||||
? { not__kind: ['smart', 'constructed'] }
|
||||
: {};
|
||||
const [{ data }, actionsResponse] = await Promise.all([
|
||||
InventoriesAPI.read(
|
||||
@ -69,7 +70,10 @@ function InventoryLookup({
|
||||
).map((val) => val.slice(0, -8)),
|
||||
searchableKeys: Object.keys(actionsResponse.data.actions?.GET || {})
|
||||
.filter((key) => {
|
||||
if (['kind', 'host_filter'].includes(key) && hideSmartInventories) {
|
||||
if (
|
||||
['kind', 'host_filter'].includes(key) &&
|
||||
hideAdvancedInventories
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
return actionsResponse.data.actions?.GET[key].filterable;
|
||||
@ -187,6 +191,7 @@ function InventoryLookup({
|
||||
onDebounce={checkInventoryName}
|
||||
fieldName={fieldName}
|
||||
validate={validate}
|
||||
multiple={multiple}
|
||||
onBlur={onBlur}
|
||||
required={required}
|
||||
isLoading={isLoading}
|
||||
@ -227,6 +232,10 @@ function InventoryLookup({
|
||||
readOnly={!canDelete}
|
||||
selectItem={(item) => dispatch({ type: 'SELECT_ITEM', item })}
|
||||
deselectItem={(item) => dispatch({ type: 'DESELECT_ITEM', item })}
|
||||
sortSelectedItems={(selectedItems) =>
|
||||
dispatch({ type: 'SET_SELECTED_ITEMS', selectedItems })
|
||||
}
|
||||
isSelectedDraggable
|
||||
/>
|
||||
)}
|
||||
/>
|
||||
@ -239,19 +248,19 @@ InventoryLookup.propTypes = {
|
||||
autoPopulate: bool,
|
||||
fieldId: string,
|
||||
fieldName: string,
|
||||
hideSmartInventories: bool,
|
||||
hideAdvancedInventories: bool,
|
||||
isDisabled: bool,
|
||||
onChange: func.isRequired,
|
||||
required: bool,
|
||||
validate: func,
|
||||
value: Inventory,
|
||||
value: oneOfType([Inventory, arrayOf(Inventory)]),
|
||||
};
|
||||
|
||||
InventoryLookup.defaultProps = {
|
||||
autoPopulate: false,
|
||||
fieldId: 'inventory',
|
||||
fieldName: 'inventory',
|
||||
hideSmartInventories: false,
|
||||
hideAdvancedInventories: false,
|
||||
isDisabled: false,
|
||||
required: false,
|
||||
validate: () => {},
|
||||
|
||||
@ -70,14 +70,14 @@ describe('InventoryLookup', () => {
|
||||
await act(async () => {
|
||||
wrapper = mountWithContexts(
|
||||
<Formik>
|
||||
<InventoryLookup onChange={() => {}} hideSmartInventories />
|
||||
<InventoryLookup onChange={() => {}} hideAdvancedInventories />
|
||||
</Formik>
|
||||
);
|
||||
});
|
||||
wrapper.update();
|
||||
expect(InventoriesAPI.read).toHaveBeenCalledTimes(1);
|
||||
expect(InventoriesAPI.read).toHaveBeenCalledWith({
|
||||
not__kind: 'smart',
|
||||
not__kind: ['smart', 'constructed'],
|
||||
order_by: 'name',
|
||||
page: 1,
|
||||
page_size: 5,
|
||||
|
||||
@ -113,6 +113,9 @@ function ScheduleEdit({
|
||||
days: values.daysToKeep,
|
||||
});
|
||||
} else {
|
||||
if (typeof requestData.extra_data === 'string') {
|
||||
requestData.extra_data = JSON.parse(requestData.extra_data);
|
||||
}
|
||||
requestData.extra_data.days = values.daysToKeep;
|
||||
}
|
||||
}
|
||||
|
||||
@ -8,6 +8,7 @@ import useRequest, { useDismissableError } from 'hooks/useRequest';
|
||||
import AlertModal from 'components/AlertModal';
|
||||
import ErrorDetail from 'components/ErrorDetail';
|
||||
import { useSession } from './Session';
|
||||
import { SettingsAPI } from '../api';
|
||||
|
||||
// eslint-disable-next-line import/prefer-default-export
|
||||
export const ConfigContext = React.createContext({});
|
||||
@ -40,6 +41,11 @@ export const ConfigProvider = ({ children }) => {
|
||||
},
|
||||
},
|
||||
] = await Promise.all([ConfigAPI.read(), MeAPI.read()]);
|
||||
let systemConfig = {};
|
||||
if (me?.is_superuser || me?.is_system_auditor) {
|
||||
const { data: systemConfigResults } = await SettingsAPI.readSystem();
|
||||
systemConfig = systemConfigResults;
|
||||
}
|
||||
|
||||
const [
|
||||
{
|
||||
@ -62,10 +68,21 @@ export const ConfigProvider = ({ children }) => {
|
||||
role_level: 'execution_environment_admin_role',
|
||||
}),
|
||||
]);
|
||||
|
||||
return { ...data, me, adminOrgCount, notifAdminCount, execEnvAdminCount };
|
||||
return {
|
||||
...data,
|
||||
me,
|
||||
adminOrgCount,
|
||||
notifAdminCount,
|
||||
execEnvAdminCount,
|
||||
systemConfig,
|
||||
};
|
||||
}, []),
|
||||
{ adminOrgCount: 0, notifAdminCount: 0, execEnvAdminCount: 0 }
|
||||
{
|
||||
adminOrgCount: 0,
|
||||
notifAdminCount: 0,
|
||||
execEnvAdminCount: 0,
|
||||
systemConfig: {},
|
||||
}
|
||||
);
|
||||
|
||||
const { error, dismissError } = useDismissableError(configError);
|
||||
@ -112,6 +129,7 @@ export const useUserProfile = () => {
|
||||
isOrgAdmin: config.adminOrgCount,
|
||||
isNotificationAdmin: config.notifAdminCount,
|
||||
isExecEnvAdmin: config.execEnvAdminCount,
|
||||
systemConfig: config.systemConfig,
|
||||
};
|
||||
};
|
||||
|
||||
|
||||