mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 19:44:43 -03:30
Compare commits
264 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
929ed3e09d | ||
|
|
f83a005038 | ||
|
|
f5c176701b | ||
|
|
8a4bffcd50 | ||
|
|
c8cf28f266 | ||
|
|
e0d6b138b0 | ||
|
|
f94db36273 | ||
|
|
77c573d64a | ||
|
|
0badc2fff7 | ||
|
|
43d33281a5 | ||
|
|
6bea5dd294 | ||
|
|
1d442452b0 | ||
|
|
ed259cf0aa | ||
|
|
ba7b55bfe3 | ||
|
|
64efc2c006 | ||
|
|
028c48c409 | ||
|
|
ee68dd00aa | ||
|
|
6f7d594d0f | ||
|
|
cc5a73aeb9 | ||
|
|
04ca1cb1a3 | ||
|
|
94b9892a1b | ||
|
|
61f0edc5e8 | ||
|
|
24a903142a | ||
|
|
1471945b9e | ||
|
|
58147f8bc1 | ||
|
|
cd6e8969d5 | ||
|
|
c22c0a393c | ||
|
|
efd9e22adc | ||
|
|
acb6e3e6e9 | ||
|
|
31c35fd473 | ||
|
|
881ea1295f | ||
|
|
86aad8b910 | ||
|
|
8374b41e21 | ||
|
|
f9dc5cd32b | ||
|
|
b2497a84ad | ||
|
|
315018b274 | ||
|
|
033adcb8b8 | ||
|
|
54d50e2459 | ||
|
|
cca3698d19 | ||
|
|
bf892443df | ||
|
|
35e778b7ae | ||
|
|
5c03fa9e84 | ||
|
|
2b5fe0e6c4 | ||
|
|
7095e266a5 | ||
|
|
a75cbe683c | ||
|
|
584fda5178 | ||
|
|
191a25cccc | ||
|
|
6267b264bf | ||
|
|
e6bde23aea | ||
|
|
4857c5edcb | ||
|
|
f4d848e596 | ||
|
|
1e7b7d1a30 | ||
|
|
67f7998ab9 | ||
|
|
0bdd873bd3 | ||
|
|
cf51dc5cea | ||
|
|
c29fda0385 | ||
|
|
203f5763ad | ||
|
|
0cbfd1129f | ||
|
|
208bde6215 | ||
|
|
ac42604aa7 | ||
|
|
a744f0d30f | ||
|
|
555b25321e | ||
|
|
ec312358e2 | ||
|
|
fa02fd8563 | ||
|
|
eb648d9447 | ||
|
|
2143b60ebf | ||
|
|
4acae40d4a | ||
|
|
71500a6554 | ||
|
|
8e579b2e74 | ||
|
|
78195a4203 | ||
|
|
146fb720db | ||
|
|
9fd2c5ba16 | ||
|
|
27c15caddd | ||
|
|
a7fe519063 | ||
|
|
8d20add2d5 | ||
|
|
4d339fe275 | ||
|
|
9bfbf8d556 | ||
|
|
bcbf0bac8c | ||
|
|
c64fec0d98 | ||
|
|
6fb57fb5fc | ||
|
|
224c3de2c9 | ||
|
|
ce1f3009f9 | ||
|
|
4cd4845617 | ||
|
|
afe4279e5f | ||
|
|
88f70253a5 | ||
|
|
f397679cc5 | ||
|
|
3fc4baae06 | ||
|
|
65cee65fad | ||
|
|
17e3279f1c | ||
|
|
a17c34f041 | ||
|
|
1819a7963a | ||
|
|
6a599695db | ||
|
|
b0cdfe7625 | ||
|
|
efabc05270 | ||
|
|
c1a009d128 | ||
|
|
8142f5fb55 | ||
|
|
8241ebbc9a | ||
|
|
5f39b6276d | ||
|
|
8c0366675a | ||
|
|
b40e8d15c0 | ||
|
|
f28ad90bf3 | ||
|
|
8440e3f41d | ||
|
|
33b6da4456 | ||
|
|
a54aab9717 | ||
|
|
38352063e8 | ||
|
|
855cb162b7 | ||
|
|
fb97a79aca | ||
|
|
4a8c63c579 | ||
|
|
2740155877 | ||
|
|
e67b5f57b4 | ||
|
|
cfc8b485ee | ||
|
|
21c493724c | ||
|
|
658543c0fd | ||
|
|
562ba53833 | ||
|
|
17b8589ff2 | ||
|
|
2d2d7b14a9 | ||
|
|
bcf911daf1 | ||
|
|
968c056057 | ||
|
|
c48fbec30c | ||
|
|
0cdf57f31f | ||
|
|
edaec8dfbb | ||
|
|
eec4f8dcc2 | ||
|
|
5ef7dd894a | ||
|
|
7fd5a4e79a | ||
|
|
b862434bec | ||
|
|
f5a69d37dc | ||
|
|
54a1712767 | ||
|
|
fe4440f7e9 | ||
|
|
ddcbef8545 | ||
|
|
fc2d877983 | ||
|
|
40be9607ee | ||
|
|
673579fe26 | ||
|
|
0ca024c929 | ||
|
|
07d08b57d1 | ||
|
|
9540ed4364 | ||
|
|
182d4d3098 | ||
|
|
95db251314 | ||
|
|
8ab81216f3 | ||
|
|
1e3cfdc986 | ||
|
|
c64ec6bbf8 | ||
|
|
4566e7a2a6 | ||
|
|
3912f2b57c | ||
|
|
fa61ec6b3c | ||
|
|
33567f8729 | ||
|
|
1c888ca58b | ||
|
|
f98b92073d | ||
|
|
1d89ecaf4f | ||
|
|
7583525366 | ||
|
|
b729377a2c | ||
|
|
c7de869a07 | ||
|
|
459874e4b5 | ||
|
|
329df4c0b5 | ||
|
|
b9389208dd | ||
|
|
ce588a6af5 | ||
|
|
7223ab4d29 | ||
|
|
8135ac4883 | ||
|
|
16bd9b44dc | ||
|
|
a857352e30 | ||
|
|
19da1ad263 | ||
|
|
e346dbfc97 | ||
|
|
dff43e973e | ||
|
|
27d56726a3 | ||
|
|
032341c7fc | ||
|
|
a4de7fffaf | ||
|
|
804cf74cd8 | ||
|
|
b817967377 | ||
|
|
768fe94088 | ||
|
|
8375141d67 | ||
|
|
5e228c4d98 | ||
|
|
e30b4ca875 | ||
|
|
850d04b5c0 | ||
|
|
54cb303ac5 | ||
|
|
956cffe073 | ||
|
|
d834519aae | ||
|
|
d53d41b84a | ||
|
|
a194dfdbbb | ||
|
|
dc0256441f | ||
|
|
08a969bf8b | ||
|
|
2793b5b01e | ||
|
|
32200cd893 | ||
|
|
6fef4e1ab7 | ||
|
|
ad07d31b9a | ||
|
|
5bb93e1f5d | ||
|
|
95f5188462 | ||
|
|
1d4a83e613 | ||
|
|
8f2ef6ce01 | ||
|
|
c633313152 | ||
|
|
637b540a4d | ||
|
|
8dd4e68385 | ||
|
|
dff3103d96 | ||
|
|
1bd71024e3 | ||
|
|
634c9892df | ||
|
|
7435458a7b | ||
|
|
2ebd4c72c1 | ||
|
|
e05fdf9ebb | ||
|
|
1dc3b80c68 | ||
|
|
77cc3306a5 | ||
|
|
f807b76044 | ||
|
|
ac0f534208 | ||
|
|
c738772cd5 | ||
|
|
79118cfbe2 | ||
|
|
445b2fef84 | ||
|
|
94038006aa | ||
|
|
be9622d03f | ||
|
|
03fbeb2a27 | ||
|
|
30f08582ed | ||
|
|
6dd5fc937b | ||
|
|
83d340ab1f | ||
|
|
b82318161c | ||
|
|
90081e4e6e | ||
|
|
cd372e4c74 | ||
|
|
b933155f07 | ||
|
|
51257a2f62 | ||
|
|
0d2ab5f61e | ||
|
|
4a62932ecd | ||
|
|
e61d0c5cb7 | ||
|
|
7c57aebd46 | ||
|
|
98bb296c6a | ||
|
|
e67923382a | ||
|
|
4de7cf0296 | ||
|
|
4db3c36ac3 | ||
|
|
f5c00431bd | ||
|
|
b5f77bfe4c | ||
|
|
f137ff7d43 | ||
|
|
2c61e8f6de | ||
|
|
1e4b44e54f | ||
|
|
c72cc6486c | ||
|
|
9ee7281b0b | ||
|
|
5f93ba7690 | ||
|
|
300f5a3a1f | ||
|
|
9f68ffc1cc | ||
|
|
ae1fd5a814 | ||
|
|
f1a987793c | ||
|
|
7c13d749b1 | ||
|
|
d479237734 | ||
|
|
bdd41c70af | ||
|
|
c6eb7da68d | ||
|
|
311c44341e | ||
|
|
d012362ade | ||
|
|
ce2e41a6fa | ||
|
|
62c91aea4a | ||
|
|
a0780aa287 | ||
|
|
0bc4702a26 | ||
|
|
6e46183ba6 | ||
|
|
8a86867f69 | ||
|
|
c7a2a1b9f6 | ||
|
|
4e75e9438e | ||
|
|
b75d0c1dad | ||
|
|
aa69a493b6 | ||
|
|
cb32f5b096 | ||
|
|
89e28d6d4a | ||
|
|
f05ffa521a | ||
|
|
a3aab7228d | ||
|
|
385c4a16db | ||
|
|
43c8cabaa6 | ||
|
|
b85559fe13 | ||
|
|
09c176847d | ||
|
|
5ff2e7442c | ||
|
|
8c73a51730 | ||
|
|
812a4e53df | ||
|
|
8ea247123a | ||
|
|
eda9bcbf62 | ||
|
|
664d19510f | ||
|
|
e56c5dbfe3 |
4
.github/ISSUE_TEMPLATE.md
vendored
4
.github/ISSUE_TEMPLATE.md
vendored
@@ -23,8 +23,8 @@ https://www.ansible.com/security
|
||||
|
||||
##### ENVIRONMENT
|
||||
* AWX version: X.Y.Z
|
||||
* AWX install method: openshift, minishift, docker on linux, docker for mac, boot2docker
|
||||
* Ansible version: X.Y.Z
|
||||
* AWX install method: operator, developer environment
|
||||
* AWX deployment target: openshift, kubernetes, minikube
|
||||
* Operating System:
|
||||
* Web Browser:
|
||||
|
||||
|
||||
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -17,7 +17,6 @@ the change does.
|
||||
<!--- Name of the module/plugin/module/task -->
|
||||
- API
|
||||
- UI
|
||||
- Installer
|
||||
|
||||
##### AWX VERSION
|
||||
<!--- Paste verbatim output from `make VERSION` between quotes below -->
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Coding Standards and Practices
|
||||
==============================
|
||||
|
||||
This is not meant to be a style document so much as a practices document for ensuring performance and convention in the Ansible Tower API.
|
||||
This is not meant to be a style document so much as a practices document for ensuring performance and convention in the AWX API.
|
||||
|
||||
Paginate Everything
|
||||
===================
|
||||
|
||||
12
CHANGELOG.md
12
CHANGELOG.md
@@ -2,6 +2,18 @@
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
# 19.1.0 (May 1, 2021)
|
||||
|
||||
- Custom inventory scripts have been removed from the API https://github.com/ansible/awx/pull/9822
|
||||
- Old scripts can be exported via `awx-manage export_custom_scripts`
|
||||
- Fixed a bug where ad-hoc commands targeted against multiple hosts would run against only 1 host https://github.com/ansible/awx/pull/9973
|
||||
- AWX will now look for a top-level requirements.yml when installing collections / roles in project updates https://github.com/ansible/awx/pull/9945
|
||||
- Improved error handling when Container Group pods fail to launch https://github.com/ansible/awx/pull/10025
|
||||
- Added ability to set server-side password policies using Django's AUTH_PASSWORD_VALIDATORS setting https://github.com/ansible/awx/pull/9999
|
||||
- Bumped versions of Ansible Runner & AWX EE https://github.com/ansible/awx/pull/10013
|
||||
- If you have built any custom EEs on top of awx-ee 0.1.0, you will need to rebuild on top of 0.2.0.
|
||||
- Remove legacy resource profiling code https://github.com/ansible/awx/pull/9883
|
||||
|
||||
# 19.0.0 (April 7, 2021)
|
||||
|
||||
- AWX now runs on Python 3.8 (https://github.com/ansible/awx/pull/8778/)
|
||||
|
||||
11
INSTALL.md
11
INSTALL.md
@@ -32,6 +32,9 @@ If you don't have an existing OpenShift or Kubernetes cluster, minikube is a fas
|
||||
|
||||
To install minikube, follow the steps in their [documentation](https://minikube.sigs.k8s.io/docs/start/).
|
||||
|
||||
:warning: NOTE |
|
||||
--- |
|
||||
If you're about to install minikube or have already installed it, please be sure you're using [Minikube v1.18.1](https://github.com/kubernetes/minikube/releases/tag/v1.18.1). There's a [bug](https://github.com/ansible/awx-operator/issues/205) right now that will not allow you to run it using Minikube v1.19.x.
|
||||
#### Starting minikube
|
||||
|
||||
Once you have installed minikube, run the following command to start it. You may wish to customize these options.
|
||||
@@ -42,10 +45,12 @@ $ minikube start --cpus=4 --memory=8g --addons=ingress
|
||||
|
||||
#### Deploying the AWX Operator
|
||||
|
||||
For a comprehensive overview of features, see [README.md](https://github.com/ansible/awx-operator/blob/devel/README.md) in the awx-operator repo. The following steps are the bare minimum to get AWX up and running.
|
||||
For a comprehensive overview of features, see [README.md](https://github.com/ansible/awx-operator/blob/devel/README.md) in the awx-operator repo. The following steps are the bare minimum to get AWX up and running.
|
||||
|
||||
Start by going to https://github.com/ansible/awx-operator/releases and making note of the latest release. Replace `<tag>` in the URL below with the version you are deploying:
|
||||
|
||||
```
|
||||
$ minikube kubectl -- apply -f https://raw.githubusercontent.com/ansible/awx-operator/devel/deploy/awx-operator.yaml
|
||||
$ minikube kubectl -- apply -f https://raw.githubusercontent.com/ansible/awx-operator/<tag>/deploy/awx-operator.yaml
|
||||
```
|
||||
|
||||
##### Verifying the Operator Deployment
|
||||
@@ -73,6 +78,8 @@ spec:
|
||||
tower_ingress_type: Ingress
|
||||
```
|
||||
|
||||
> If a custom AWX image is needed, see [these docs](./docs/build_awx_image.md) on how to build and use it.
|
||||
|
||||
And then creating the AWX object in the Kubernetes API:
|
||||
|
||||
```
|
||||
|
||||
12
Makefile
12
Makefile
@@ -65,7 +65,8 @@ I18N_FLAG_FILE = .i18n_built
|
||||
receiver test test_unit test_coverage coverage_html \
|
||||
dev_build release_build sdist \
|
||||
ui-release ui-devel \
|
||||
VERSION docker-compose-sources
|
||||
VERSION docker-compose-sources \
|
||||
.git/hooks/pre-commit
|
||||
|
||||
clean-tmp:
|
||||
rm -rf tmp/
|
||||
@@ -173,12 +174,7 @@ init:
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(MANAGEMENT_COMMAND) provision_instance --hostname=$(COMPOSE_HOST); \
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename=tower --instance_percent=100;\
|
||||
if [ "$(AWX_GROUP_QUEUES)" == "tower,thepentagon" ]; then \
|
||||
$(MANAGEMENT_COMMAND) provision_instance --hostname=isolated; \
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename='thepentagon' --hostnames=isolated --controller=tower; \
|
||||
$(MANAGEMENT_COMMAND) generate_isolated_key > /awx_devel/awx/main/isolated/authorized_keys; \
|
||||
fi;
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename=tower --instance_percent=100;
|
||||
|
||||
# Refresh development environment after pulling new code.
|
||||
refresh: clean requirements_dev version_file develop migrate
|
||||
@@ -391,7 +387,7 @@ clean-ui:
|
||||
rm -rf $(UI_BUILD_FLAG_FILE)
|
||||
|
||||
awx/ui_next/node_modules:
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn --ignore-scripts install
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn install
|
||||
|
||||
$(UI_BUILD_FLAG_FILE):
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run compile-strings
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is the upstream project for [Tower](https://www.ansible.com/tower), a commercial derivative of AWX.
|
||||
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is one of the upstream projects for [Red Hat Ansible Automation Platform](https://www.ansible.com/products/automation-platform).
|
||||
|
||||
To install AWX, please view the [Install guide](./INSTALL.md).
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ def oauth2_getattribute(self, attr):
|
||||
# setting lookups for references to model classes (e.g.,
|
||||
# oauth2_settings.REFRESH_TOKEN_MODEL)
|
||||
# If we're doing an OAuth2 setting lookup *while running* a migration,
|
||||
# don't do our usual "Configure Tower in Tower" database setting lookup
|
||||
# don't do our usual database settings lookup
|
||||
val = settings.OAUTH2_PROVIDER.get(attr)
|
||||
if val is None:
|
||||
val = object.__getattribute__(self, attr)
|
||||
|
||||
@@ -77,7 +77,7 @@ register(
|
||||
required=False,
|
||||
default='',
|
||||
label=_('Login redirect override URL'),
|
||||
help_text=_('URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the Tower login page.'),
|
||||
help_text=_('URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page.'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
|
||||
@@ -789,7 +789,7 @@ class RetrieveUpdateAPIView(RetrieveAPIView, generics.RetrieveUpdateAPIView):
|
||||
return super(RetrieveUpdateAPIView, self).partial_update(request, *args, **kwargs)
|
||||
|
||||
def update_filter(self, request, *args, **kwargs):
|
||||
''' scrub any fields the user cannot/should not put/patch, based on user context. This runs after read-only serialization filtering '''
|
||||
'''scrub any fields the user cannot/should not put/patch, based on user context. This runs after read-only serialization filtering'''
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -26,6 +26,9 @@ from awx.main.fields import JSONField, ImplicitRoleField
|
||||
from awx.main.models import NotificationTemplate
|
||||
from awx.main.tasks import AWXReceptorJob
|
||||
|
||||
# Polymorphic
|
||||
from polymorphic.models import PolymorphicModel
|
||||
|
||||
|
||||
class Metadata(metadata.SimpleMetadata):
|
||||
def get_field_info(self, field):
|
||||
@@ -78,7 +81,9 @@ class Metadata(metadata.SimpleMetadata):
|
||||
field_info['help_text'] = field_help_text[field.field_name].format(verbose_name)
|
||||
|
||||
if field.field_name == 'type':
|
||||
field_info['filterable'] = True
|
||||
# Only include model classes with `type` field.
|
||||
if issubclass(serializer.Meta.model, PolymorphicModel):
|
||||
field_info['filterable'] = True
|
||||
else:
|
||||
for model_field in serializer.Meta.model._meta.fields:
|
||||
if field.field_name == model_field.name:
|
||||
|
||||
@@ -21,6 +21,7 @@ from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError
|
||||
from django.conf import settings
|
||||
from django.contrib.auth import update_session_auth_hash
|
||||
from django.contrib.auth.models import User
|
||||
from django.contrib.auth.password_validation import validate_password as django_validate_password
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.exceptions import ObjectDoesNotExist, ValidationError as DjangoValidationError
|
||||
from django.db import models
|
||||
@@ -43,7 +44,7 @@ from polymorphic.models import PolymorphicModel
|
||||
|
||||
# AWX
|
||||
from awx.main.access import get_user_capabilities
|
||||
from awx.main.constants import SCHEDULEABLE_PROVIDERS, ACTIVE_STATES, CENSOR_VALUE
|
||||
from awx.main.constants import ACTIVE_STATES, CENSOR_VALUE
|
||||
from awx.main.models import (
|
||||
ActivityStream,
|
||||
AdHocCommand,
|
||||
@@ -51,7 +52,6 @@ from awx.main.models import (
|
||||
Credential,
|
||||
CredentialInputSource,
|
||||
CredentialType,
|
||||
CustomInventoryScript,
|
||||
ExecutionEnvironment,
|
||||
Group,
|
||||
Host,
|
||||
@@ -92,6 +92,7 @@ from awx.main.models import (
|
||||
WorkflowJobTemplate,
|
||||
WorkflowJobTemplateNode,
|
||||
StdoutMaxBytesExceeded,
|
||||
CLOUD_INVENTORY_SOURCES,
|
||||
)
|
||||
from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES
|
||||
from awx.main.models.rbac import get_roles_on_resource, role_summary_fields_generator
|
||||
@@ -167,11 +168,9 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
'current_job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
'inventory_source': ('id', 'name', 'source', 'last_updated', 'status'),
|
||||
'custom_inventory_script': DEFAULT_SUMMARY_FIELDS,
|
||||
'source_script': DEFAULT_SUMMARY_FIELDS,
|
||||
'role': ('id', 'role_field'),
|
||||
'notification_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'instance_group': ('id', 'name', 'controller_id', 'is_container_group'),
|
||||
'instance_group': ('id', 'name', 'is_container_group'),
|
||||
'insights_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
@@ -963,6 +962,7 @@ class UserSerializer(BaseSerializer):
|
||||
return ret
|
||||
|
||||
def validate_password(self, value):
|
||||
django_validate_password(value)
|
||||
if not self.instance and value in (None, ''):
|
||||
raise serializers.ValidationError(_('Password required for new User.'))
|
||||
return value
|
||||
@@ -1350,6 +1350,7 @@ class ProjectOptionsSerializer(BaseSerializer):
|
||||
'scm_branch',
|
||||
'scm_refspec',
|
||||
'scm_clean',
|
||||
'scm_track_submodules',
|
||||
'scm_delete_on_update',
|
||||
'credential',
|
||||
'timeout',
|
||||
@@ -1384,6 +1385,8 @@ class ProjectOptionsSerializer(BaseSerializer):
|
||||
errors['scm_branch'] = _('SCM branch cannot be used with archive projects.')
|
||||
if attrs.get('scm_refspec') and scm_type != 'git':
|
||||
errors['scm_refspec'] = _('SCM refspec can only be used with git projects.')
|
||||
if attrs.get('scm_track_submodules') and scm_type != 'git':
|
||||
errors['scm_track_submodules'] = _('SCM track_submodules can only be used with git projects.')
|
||||
|
||||
if errors:
|
||||
raise serializers.ValidationError(errors)
|
||||
@@ -1412,6 +1415,19 @@ class ExecutionEnvironmentSerializer(BaseSerializer):
|
||||
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential.pk})
|
||||
return res
|
||||
|
||||
def validate_credential(self, value):
|
||||
if value and value.kind != 'registry':
|
||||
raise serializers.ValidationError(_('Only Container Registry credentials can be associated with an Execution Environment'))
|
||||
return value
|
||||
|
||||
def validate(self, attrs):
|
||||
# prevent changing organization of ee. Unsetting (change to null) is allowed
|
||||
if self.instance:
|
||||
org = attrs.get('organization', None)
|
||||
if org and org.pk != self.instance.organization_id:
|
||||
raise serializers.ValidationError({"organization": _("Cannot change the organization of an execution environment")})
|
||||
return super(ExecutionEnvironmentSerializer, self).validate(attrs)
|
||||
|
||||
|
||||
class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
|
||||
@@ -1497,7 +1513,7 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
)
|
||||
|
||||
if get_field_from_model_or_attrs('scm_type') == '':
|
||||
for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_clean'):
|
||||
for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_track_submodules', 'scm_clean'):
|
||||
if get_field_from_model_or_attrs(fd):
|
||||
raise serializers.ValidationError({fd: _('Update options must be set to false for manual projects.')})
|
||||
return super(ProjectSerializer, self).validate(attrs)
|
||||
@@ -1968,49 +1984,6 @@ class GroupVariableDataSerializer(BaseVariableDataSerializer):
|
||||
model = Group
|
||||
|
||||
|
||||
class CustomInventoryScriptSerializer(BaseSerializer):
|
||||
|
||||
script = serializers.CharField(trim_whitespace=False)
|
||||
show_capabilities = ['edit', 'delete', 'copy']
|
||||
capabilities_prefetch = [{'edit': 'admin'}]
|
||||
|
||||
class Meta:
|
||||
model = CustomInventoryScript
|
||||
fields = ('*', "script", "organization")
|
||||
|
||||
def validate_script(self, value):
|
||||
if not value.startswith("#!"):
|
||||
raise serializers.ValidationError(_('Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python'))
|
||||
return value
|
||||
|
||||
def to_representation(self, obj):
|
||||
ret = super(CustomInventoryScriptSerializer, self).to_representation(obj)
|
||||
if obj is None:
|
||||
return ret
|
||||
request = self.context.get('request', None)
|
||||
if (
|
||||
request.user not in obj.admin_role
|
||||
and not request.user.is_superuser
|
||||
and not request.user.is_system_auditor
|
||||
and not (obj.organization is not None and request.user in obj.organization.auditor_role)
|
||||
):
|
||||
ret['script'] = None
|
||||
return ret
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(CustomInventoryScriptSerializer, self).get_related(obj)
|
||||
res.update(
|
||||
dict(
|
||||
object_roles=self.reverse('api:inventory_script_object_roles_list', kwargs={'pk': obj.pk}),
|
||||
copy=self.reverse('api:inventory_script_copy', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||
return res
|
||||
|
||||
|
||||
class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
credential = DeprecatedCredentialField(help_text=_('Cloud credential to use for inventory updates.'))
|
||||
|
||||
@@ -2019,7 +1992,6 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
'*',
|
||||
'source',
|
||||
'source_path',
|
||||
'source_script',
|
||||
'source_vars',
|
||||
'credential',
|
||||
'enabled_var',
|
||||
@@ -2037,8 +2009,6 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
|
||||
if obj.credential: # TODO: remove when 'credential' field is removed
|
||||
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential})
|
||||
if obj.source_script:
|
||||
res['source_script'] = self.reverse('api:inventory_script_detail', kwargs={'pk': obj.source_script.pk})
|
||||
return res
|
||||
|
||||
def validate_source_vars(self, value):
|
||||
@@ -2048,34 +2018,6 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
raise serializers.ValidationError(_("`{}` is a prohibited environment variable".format(env_k)))
|
||||
return ret
|
||||
|
||||
def validate(self, attrs):
|
||||
# TODO: Validate source
|
||||
errors = {}
|
||||
|
||||
source = attrs.get('source', self.instance and self.instance.source or '')
|
||||
source_script = attrs.get('source_script', self.instance and self.instance.source_script or '')
|
||||
if source == 'custom':
|
||||
if source_script is None or source_script == '':
|
||||
errors['source_script'] = _("If 'source' is 'custom', 'source_script' must be provided.")
|
||||
else:
|
||||
try:
|
||||
if not self.instance:
|
||||
dest_inventory = attrs.get('inventory', None)
|
||||
if not dest_inventory:
|
||||
errors['inventory'] = _("Must provide an inventory.")
|
||||
else:
|
||||
dest_inventory = self.instance.inventory
|
||||
if dest_inventory and source_script.organization != dest_inventory.organization:
|
||||
errors['source_script'] = _("The 'source_script' does not belong to the same organization as the inventory.")
|
||||
except Exception:
|
||||
errors['source_script'] = _("'source_script' doesn't exist.")
|
||||
logger.exception('Problem processing source_script validation.')
|
||||
|
||||
if errors:
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
return super(InventorySourceOptionsSerializer, self).validate(attrs)
|
||||
|
||||
# TODO: remove when old 'credential' fields are removed
|
||||
def get_summary_fields(self, obj):
|
||||
summary_fields = super(InventorySourceOptionsSerializer, self).get_summary_fields(obj)
|
||||
@@ -4792,7 +4734,7 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
return summary_fields
|
||||
|
||||
def validate_unified_job_template(self, value):
|
||||
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
|
||||
if type(value) == InventorySource and value.source not in CLOUD_INVENTORY_SOURCES:
|
||||
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
|
||||
elif type(value) == Project and value.scm_type == '':
|
||||
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
|
||||
@@ -4805,6 +4747,14 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
)
|
||||
return value
|
||||
|
||||
def validate(self, attrs):
|
||||
# if the schedule is being disabled, there's no need
|
||||
# validate the related UnifiedJobTemplate
|
||||
# see: https://github.com/ansible/awx/issues/8641
|
||||
if self.context['request'].method == 'PATCH' and attrs == {'enabled': False}:
|
||||
return attrs
|
||||
return super(ScheduleSerializer, self).validate(attrs)
|
||||
|
||||
|
||||
class InstanceSerializer(BaseSerializer):
|
||||
|
||||
@@ -4868,10 +4818,6 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
)
|
||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance group'), read_only=True)
|
||||
instances = serializers.SerializerMethodField()
|
||||
is_controller = serializers.BooleanField(help_text=_('Indicates whether instance group controls any other group'), read_only=True)
|
||||
is_isolated = serializers.BooleanField(
|
||||
help_text=_('Indicates whether instances in this group are isolated.' 'Isolated groups have a designated controller group.'), read_only=True
|
||||
)
|
||||
is_container_group = serializers.BooleanField(
|
||||
required=False,
|
||||
help_text=_('Indicates whether instances in this group are containerized.' 'Containerized groups have a designated Openshift or Kubernetes cluster.'),
|
||||
@@ -4919,9 +4865,6 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
"jobs_running",
|
||||
"jobs_total",
|
||||
"instances",
|
||||
"controller",
|
||||
"is_controller",
|
||||
"is_isolated",
|
||||
"is_container_group",
|
||||
"credential",
|
||||
"policy_instance_percentage",
|
||||
@@ -4935,8 +4878,6 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
res = super(InstanceGroupSerializer, self).get_related(obj)
|
||||
res['jobs'] = self.reverse('api:instance_group_unified_jobs_list', kwargs={'pk': obj.pk})
|
||||
res['instances'] = self.reverse('api:instance_group_instance_list', kwargs={'pk': obj.pk})
|
||||
if obj.controller_id:
|
||||
res['controller'] = self.reverse('api:instance_group_detail', kwargs={'pk': obj.controller_id})
|
||||
if obj.credential:
|
||||
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential_id})
|
||||
|
||||
@@ -4948,10 +4889,6 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
raise serializers.ValidationError(_('Duplicate entry {}.').format(instance_name))
|
||||
if not Instance.objects.filter(hostname=instance_name).exists():
|
||||
raise serializers.ValidationError(_('{} is not a valid hostname of an existing instance.').format(instance_name))
|
||||
if Instance.objects.get(hostname=instance_name).is_isolated():
|
||||
raise serializers.ValidationError(_('Isolated instances may not be added or removed from instances groups via the API.'))
|
||||
if self.instance and self.instance.controller_id is not None:
|
||||
raise serializers.ValidationError(_('Isolated instance group membership may not be managed via the API.'))
|
||||
if value and self.instance and self.instance.is_container_group:
|
||||
raise serializers.ValidationError(_('Containerized instances may not be managed via the API'))
|
||||
return value
|
||||
|
||||
@@ -62,7 +62,7 @@ class SwaggerSchemaView(APIView):
|
||||
renderer_classes = [CoreJSONRenderer, renderers.OpenAPIRenderer, renderers.SwaggerUIRenderer]
|
||||
|
||||
def get(self, request):
|
||||
generator = SuperUserSchemaGenerator(title='Ansible Tower API', patterns=None, urlconf=None)
|
||||
generator = SuperUserSchemaGenerator(title='Ansible Automation Platform controller API', patterns=None, urlconf=None)
|
||||
schema = generator.get_schema(request=request)
|
||||
# python core-api doesn't support the deprecation yet, so track it
|
||||
# ourselves and return it in a response header
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.conf.urls import url
|
||||
|
||||
from awx.api.views import InventoryScriptList, InventoryScriptDetail, InventoryScriptObjectRolesList, InventoryScriptCopy
|
||||
|
||||
|
||||
urls = [
|
||||
url(r'^$', InventoryScriptList.as_view(), name='inventory_script_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', InventoryScriptDetail.as_view(), name='inventory_script_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/object_roles/$', InventoryScriptObjectRolesList.as_view(), name='inventory_script_object_roles_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/copy/$', InventoryScriptCopy.as_view(), name='inventory_script_copy'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -43,7 +43,6 @@ from .host import urls as host_urls
|
||||
from .group import urls as group_urls
|
||||
from .inventory_source import urls as inventory_source_urls
|
||||
from .inventory_update import urls as inventory_update_urls
|
||||
from .inventory_script import urls as inventory_script_urls
|
||||
from .credential_type import urls as credential_type_urls
|
||||
from .credential import urls as credential_urls
|
||||
from .credential_input_source import urls as credential_input_source_urls
|
||||
@@ -111,7 +110,6 @@ v2_urls = [
|
||||
url(r'^groups/', include(group_urls)),
|
||||
url(r'^inventory_sources/', include(inventory_source_urls)),
|
||||
url(r'^inventory_updates/', include(inventory_update_urls)),
|
||||
url(r'^inventory_scripts/', include(inventory_script_urls)),
|
||||
url(r'^credentials/', include(credential_urls)),
|
||||
url(r'^roles/', include(role_urls)),
|
||||
url(r'^job_templates/', include(job_template_urls)),
|
||||
|
||||
@@ -152,10 +152,6 @@ from awx.api.views.inventory import ( # noqa
|
||||
InventoryList,
|
||||
InventoryDetail,
|
||||
InventoryUpdateEventsList,
|
||||
InventoryScriptList,
|
||||
InventoryScriptDetail,
|
||||
InventoryScriptObjectRolesList,
|
||||
InventoryScriptCopy,
|
||||
InventoryList,
|
||||
InventoryDetail,
|
||||
InventoryActivityStreamList,
|
||||
@@ -211,7 +207,7 @@ class DashboardView(APIView):
|
||||
swagger_topic = 'Dashboard'
|
||||
|
||||
def get(self, request, format=None):
|
||||
''' Show Dashboard Details '''
|
||||
'''Show Dashboard Details'''
|
||||
data = OrderedDict()
|
||||
data['related'] = {'jobs_graph': reverse('api:dashboard_jobs_graph_view', request=request)}
|
||||
user_inventory = get_user_queryset(request.user, models.Inventory)
|
||||
@@ -422,14 +418,6 @@ class InstanceGroupDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAP
|
||||
data.pop('policy_instance_list', None)
|
||||
return super(InstanceGroupDetail, self).update_raw_data(data)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
if instance.controller is not None:
|
||||
raise PermissionDenied(detail=_("Isolated Groups can not be removed from the API"))
|
||||
if instance.controlled_groups.count():
|
||||
raise PermissionDenied(detail=_("Instance Groups acting as a controller for an Isolated Group can not be removed from the API"))
|
||||
return super(InstanceGroupDetail, self).destroy(request, *args, **kwargs)
|
||||
|
||||
|
||||
class InstanceGroupUnifiedJobsList(SubListAPIView):
|
||||
|
||||
@@ -546,7 +534,7 @@ class ScheduleUnifiedJobsList(SubListAPIView):
|
||||
|
||||
|
||||
class AuthView(APIView):
|
||||
''' List enabled single-sign-on endpoints '''
|
||||
'''List enabled single-sign-on endpoints'''
|
||||
|
||||
authentication_classes = []
|
||||
permission_classes = (AllowAny,)
|
||||
@@ -1237,7 +1225,7 @@ class UserDetail(RetrieveUpdateDestroyAPIView):
|
||||
serializer_class = serializers.UserSerializer
|
||||
|
||||
def update_filter(self, request, *args, **kwargs):
|
||||
''' make sure non-read-only fields that can only be edited by admins, are only edited by admins '''
|
||||
'''make sure non-read-only fields that can only be edited by admins, are only edited by admins'''
|
||||
obj = self.get_object()
|
||||
can_change = request.user.can_access(models.User, 'change', obj, request.data)
|
||||
can_admin = request.user.can_access(models.User, 'admin', obj, request.data)
|
||||
@@ -1600,7 +1588,7 @@ class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIVie
|
||||
|
||||
|
||||
class HostGroupsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
|
||||
''' the list of groups a host is directly a member of '''
|
||||
'''the list of groups a host is directly a member of'''
|
||||
|
||||
model = models.Group
|
||||
serializer_class = serializers.GroupSerializer
|
||||
@@ -1622,7 +1610,7 @@ class HostGroupsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
|
||||
|
||||
|
||||
class HostAllGroupsList(SubListAPIView):
|
||||
''' the list of all groups of which the host is directly or indirectly a member '''
|
||||
'''the list of all groups of which the host is directly or indirectly a member'''
|
||||
|
||||
model = models.Group
|
||||
serializer_class = serializers.GroupSerializer
|
||||
@@ -1862,7 +1850,7 @@ class GroupPotentialChildrenList(SubListAPIView):
|
||||
|
||||
|
||||
class GroupHostsList(HostRelatedSearchMixin, ControlledByScmMixin, SubListCreateAttachDetachAPIView):
|
||||
''' the list of hosts directly below a group '''
|
||||
'''the list of hosts directly below a group'''
|
||||
|
||||
model = models.Host
|
||||
serializer_class = serializers.HostSerializer
|
||||
@@ -1887,7 +1875,7 @@ class GroupHostsList(HostRelatedSearchMixin, ControlledByScmMixin, SubListCreate
|
||||
|
||||
|
||||
class GroupAllHostsList(HostRelatedSearchMixin, SubListAPIView):
|
||||
''' the list of all hosts below a group, even including subgroups '''
|
||||
'''the list of all hosts below a group, even including subgroups'''
|
||||
|
||||
model = models.Host
|
||||
serializer_class = serializers.HostSerializer
|
||||
@@ -4262,13 +4250,13 @@ class NotificationTemplateTest(GenericAPIView):
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
msg = "Tower Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
msg = "Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
if obj.notification_type in ('email', 'pagerduty'):
|
||||
body = "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
body = "Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
elif obj.notification_type in ('webhook', 'grafana'):
|
||||
body = '{{"body": "Ansible Tower Test Notification {} {}"}}'.format(obj.id, settings.TOWER_URL_BASE)
|
||||
body = '{{"body": "Test Notification {} {}"}}'.format(obj.id, settings.TOWER_URL_BASE)
|
||||
else:
|
||||
body = {"body": "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)}
|
||||
body = {"body": "Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)}
|
||||
notification = obj.generate_notification(msg, body)
|
||||
|
||||
if not notification:
|
||||
|
||||
@@ -25,8 +25,6 @@ from awx.main.models import (
|
||||
InstanceGroup,
|
||||
InventoryUpdateEvent,
|
||||
InventoryUpdate,
|
||||
InventorySource,
|
||||
CustomInventoryScript,
|
||||
)
|
||||
from awx.api.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, SubListAPIView, SubListAttachDetachAPIView, ResourceAccessList, CopyAPIView
|
||||
|
||||
@@ -36,7 +34,6 @@ from awx.api.serializers import (
|
||||
RoleSerializer,
|
||||
InstanceGroupSerializer,
|
||||
InventoryUpdateEventSerializer,
|
||||
CustomInventoryScriptSerializer,
|
||||
JobTemplateSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, ControlledByScmMixin
|
||||
@@ -58,55 +55,6 @@ class InventoryUpdateEventsList(SubListAPIView):
|
||||
return super(InventoryUpdateEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryScriptList(ListCreateAPIView):
|
||||
|
||||
deprecated = True
|
||||
|
||||
model = CustomInventoryScript
|
||||
serializer_class = CustomInventoryScriptSerializer
|
||||
|
||||
|
||||
class InventoryScriptDetail(RetrieveUpdateDestroyAPIView):
|
||||
|
||||
deprecated = True
|
||||
|
||||
model = CustomInventoryScript
|
||||
serializer_class = CustomInventoryScriptSerializer
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
can_delete = request.user.can_access(self.model, 'delete', instance)
|
||||
if not can_delete:
|
||||
raise PermissionDenied(_("Cannot delete inventory script."))
|
||||
for inv_src in InventorySource.objects.filter(source_script=instance):
|
||||
inv_src.source_script = None
|
||||
inv_src.save()
|
||||
return super(InventoryScriptDetail, self).destroy(request, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryScriptObjectRolesList(SubListAPIView):
|
||||
|
||||
deprecated = True
|
||||
|
||||
model = Role
|
||||
serializer_class = RoleSerializer
|
||||
parent_model = CustomInventoryScript
|
||||
search_fields = ('role_field', 'content_type__model')
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
content_type = ContentType.objects.get_for_model(self.parent_model)
|
||||
return Role.objects.filter(content_type=content_type, object_id=po.pk)
|
||||
|
||||
|
||||
class InventoryScriptCopy(CopyAPIView):
|
||||
|
||||
deprecated = True
|
||||
|
||||
model = CustomInventoryScript
|
||||
copy_return_serializer_class = CustomInventoryScriptSerializer
|
||||
|
||||
|
||||
class InventoryList(ListCreateAPIView):
|
||||
|
||||
model = Inventory
|
||||
|
||||
@@ -32,7 +32,7 @@ class MetricsView(APIView):
|
||||
renderer_classes = [renderers.PlainTextRenderer, renderers.PrometheusJSONRenderer, renderers.BrowsableAPIRenderer]
|
||||
|
||||
def get(self, request):
|
||||
''' Show Metrics Details '''
|
||||
'''Show Metrics Details'''
|
||||
if request.user.is_superuser or request.user.is_system_auditor:
|
||||
metrics_to_show = ''
|
||||
if not request.query_params.get('subsystemonly', "0") == "1":
|
||||
|
||||
@@ -85,15 +85,6 @@ class InstanceGroupMembershipMixin(object):
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
return response
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.is_isolated():
|
||||
return {'error': _('Isolated instances may not be added or removed from instances groups via the API.')}
|
||||
if self.parent_model is InstanceGroup:
|
||||
ig_obj = self.get_parent_object()
|
||||
if ig_obj.controller_id is not None:
|
||||
return {'error': _('Isolated instance group membership may not be managed via the API.')}
|
||||
return None
|
||||
|
||||
def unattach_validate(self, request):
|
||||
(sub_id, res) = super(InstanceGroupMembershipMixin, self).unattach_validate(request)
|
||||
if res:
|
||||
|
||||
@@ -43,7 +43,7 @@ class ApiRootView(APIView):
|
||||
|
||||
@method_decorator(ensure_csrf_cookie)
|
||||
def get(self, request, format=None):
|
||||
''' List supported API versions '''
|
||||
'''List supported API versions'''
|
||||
|
||||
v2 = reverse('api:api_v2_root_view', kwargs={'version': 'v2'})
|
||||
data = OrderedDict()
|
||||
@@ -78,7 +78,7 @@ class ApiVersionRootView(APIView):
|
||||
swagger_topic = 'Versioning'
|
||||
|
||||
def get(self, request, format=None):
|
||||
''' List top level resources '''
|
||||
'''List top level resources'''
|
||||
data = OrderedDict()
|
||||
data['ping'] = reverse('api:api_v2_ping_view', request=request)
|
||||
data['instances'] = reverse('api:instance_list', request=request)
|
||||
@@ -100,7 +100,6 @@ class ApiVersionRootView(APIView):
|
||||
data['tokens'] = reverse('api:o_auth2_token_list', request=request)
|
||||
data['metrics'] = reverse('api:metrics_view', request=request)
|
||||
data['inventory'] = reverse('api:inventory_list', request=request)
|
||||
data['inventory_scripts'] = reverse('api:inventory_script_list', request=request)
|
||||
data['inventory_sources'] = reverse('api:inventory_source_list', request=request)
|
||||
data['inventory_updates'] = reverse('api:inventory_update_list', request=request)
|
||||
data['groups'] = reverse('api:group_list', request=request)
|
||||
|
||||
@@ -30,8 +30,8 @@ if MODE == 'production':
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except ValueError as e:
|
||||
logger.error("Missing or incorrect metadata for Tower version. Ensure Tower was installed using the setup playbook.")
|
||||
raise Exception("Missing or incorrect metadata for Tower version. Ensure Tower was installed using the setup playbook.") from e
|
||||
logger.error("Missing or incorrect metadata for controller version. Ensure controller was installed using the setup playbook.")
|
||||
raise Exception("Missing or incorrect metadata for controller version. Ensure controller was installed using the setup playbook.") from e
|
||||
|
||||
|
||||
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings")
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# Django
|
||||
from django.utils.module_loading import autodiscover_modules
|
||||
|
||||
# Tower
|
||||
# AWX
|
||||
from .registry import settings_registry
|
||||
|
||||
default_app_config = 'awx.conf.apps.ConfConfig'
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# Django
|
||||
from django.db.models import Q
|
||||
|
||||
# Tower
|
||||
# AWX
|
||||
from awx.main.access import BaseAccess, register_access
|
||||
from awx.conf.models import Setting
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
from django.conf import settings
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Tower
|
||||
# AWX
|
||||
from awx.conf import fields, register
|
||||
from awx.conf import settings_registry
|
||||
|
||||
|
||||
@@ -11,5 +11,5 @@ def _get_validated_license_data():
|
||||
|
||||
|
||||
def get_license():
|
||||
"""Return a dictionary representing the active license on this Tower instance."""
|
||||
"""Return a dictionary representing the active license on this instance."""
|
||||
return _get_validated_license_data()
|
||||
|
||||
@@ -7,7 +7,7 @@ import json
|
||||
# Django
|
||||
from django.db import models
|
||||
|
||||
# Tower
|
||||
# AWX
|
||||
from awx.main.models.base import CreatedModifiedModel, prevent_search
|
||||
from awx.main.fields import JSONField
|
||||
from awx.main.utils import encrypt_field
|
||||
|
||||
@@ -92,11 +92,7 @@ class SettingsRegistry(object):
|
||||
continue
|
||||
if kwargs.get('category_slug', None) in slugs_to_ignore:
|
||||
continue
|
||||
if (
|
||||
read_only in {True, False}
|
||||
and kwargs.get('read_only', False) != read_only
|
||||
and setting not in ('INSTALL_UUID', 'AWX_ISOLATED_PRIVATE_KEY', 'AWX_ISOLATED_PUBLIC_KEY')
|
||||
):
|
||||
if read_only in {True, False} and kwargs.get('read_only', False) != read_only and setting != 'INSTALL_UUID':
|
||||
# Note: Doesn't catch fields that set read_only via __init__;
|
||||
# read-only field kwargs should always include read_only=True.
|
||||
continue
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Django REST Framework
|
||||
from rest_framework import serializers
|
||||
|
||||
# Tower
|
||||
# AWX
|
||||
from awx.api.fields import VerbatimField
|
||||
from awx.api.serializers import BaseSerializer
|
||||
from awx.conf.models import Setting
|
||||
@@ -28,7 +28,7 @@ class SettingSerializer(BaseSerializer):
|
||||
|
||||
|
||||
class SettingCategorySerializer(serializers.Serializer):
|
||||
"""Serialize setting category """
|
||||
"""Serialize setting category"""
|
||||
|
||||
url = serializers.CharField(read_only=True)
|
||||
slug = serializers.CharField(read_only=True)
|
||||
@@ -81,10 +81,8 @@ class SettingSingletonSerializer(serializers.Serializer):
|
||||
if self.instance and not hasattr(self.instance, key):
|
||||
continue
|
||||
extra_kwargs = {}
|
||||
# Make LICENSE and AWX_ISOLATED_KEY_GENERATION read-only here;
|
||||
# LICENSE is only updated via /api/v2/config/
|
||||
# AWX_ISOLATED_KEY_GENERATION is only set/unset via the setup playbook
|
||||
if key in ('LICENSE', 'AWX_ISOLATED_KEY_GENERATION'):
|
||||
# Make LICENSE read-only here; LICENSE is only updated via /api/v2/config/
|
||||
if key == 'LICENSE':
|
||||
extra_kwargs['read_only'] = True
|
||||
field = settings_registry.get_setting_field(key, mixin_class=SettingFieldMixin, for_user=bool(category_slug == 'user'), **extra_kwargs)
|
||||
fields[key] = field
|
||||
|
||||
@@ -20,7 +20,7 @@ from rest_framework.fields import empty, SkipField
|
||||
|
||||
import cachetools
|
||||
|
||||
# Tower
|
||||
# AWX
|
||||
from awx.main.utils import encrypt_field, decrypt_field
|
||||
from awx.conf import settings_registry
|
||||
from awx.conf.models import Setting
|
||||
@@ -350,13 +350,8 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
if value is empty:
|
||||
setting = None
|
||||
setting_id = None
|
||||
if not field.read_only or name in (
|
||||
# these values are read-only - however - we *do* want
|
||||
# to fetch their value from the database
|
||||
'INSTALL_UUID',
|
||||
'AWX_ISOLATED_PRIVATE_KEY',
|
||||
'AWX_ISOLATED_PUBLIC_KEY',
|
||||
):
|
||||
# this value is read-only, however we *do* want to fetch its value from the database
|
||||
if not field.read_only or name == 'INSTALL_UUID':
|
||||
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
|
||||
if setting:
|
||||
if getattr(field, 'encrypted', False):
|
||||
|
||||
@@ -8,7 +8,7 @@ from django.db.models.signals import post_save, pre_delete, post_delete
|
||||
from django.core.cache import cache
|
||||
from django.dispatch import receiver
|
||||
|
||||
# Tower
|
||||
# AWX
|
||||
from awx.conf import settings_registry
|
||||
from awx.conf.models import Setting
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ from rest_framework.response import Response
|
||||
from rest_framework import serializers
|
||||
from rest_framework import status
|
||||
|
||||
# Tower
|
||||
# AWX
|
||||
from awx.api.generics import APIView, GenericAPIView, ListAPIView, RetrieveUpdateDestroyAPIView
|
||||
from awx.api.permissions import IsSuperUser
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -34,7 +34,6 @@ from awx.main.models import (
|
||||
Credential,
|
||||
CredentialType,
|
||||
CredentialInputSource,
|
||||
CustomInventoryScript,
|
||||
ExecutionEnvironment,
|
||||
Group,
|
||||
Host,
|
||||
@@ -465,7 +464,7 @@ class BaseAccess(object):
|
||||
if display_method == 'schedule':
|
||||
user_capabilities['schedule'] = user_capabilities['start']
|
||||
continue
|
||||
elif display_method == 'delete' and not isinstance(obj, (User, UnifiedJob, CustomInventoryScript, CredentialInputSource)):
|
||||
elif display_method == 'delete' and not isinstance(obj, (User, UnifiedJob, CredentialInputSource)):
|
||||
user_capabilities['delete'] = user_capabilities['edit']
|
||||
continue
|
||||
elif display_method == 'copy' and isinstance(obj, (Group, Host)):
|
||||
@@ -1031,7 +1030,7 @@ class InventorySourceAccess(NotificationAttachMixin, BaseAccess):
|
||||
|
||||
model = InventorySource
|
||||
select_related = ('created_by', 'modified_by', 'inventory')
|
||||
prefetch_related = ('credentials__credential_type', 'last_job', 'source_script', 'source_project')
|
||||
prefetch_related = ('credentials__credential_type', 'last_job', 'source_project')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(inventory__in=Inventory.accessible_pk_qs(self.user, 'read_role'))
|
||||
@@ -1093,7 +1092,7 @@ class InventoryUpdateAccess(BaseAccess):
|
||||
'modified_by',
|
||||
'inventory_source',
|
||||
)
|
||||
prefetch_related = ('unified_job_template', 'instance_group', 'credentials__credential_type', 'inventory', 'source_script')
|
||||
prefetch_related = ('unified_job_template', 'instance_group', 'credentials__credential_type', 'inventory')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(inventory_source__inventory__in=Inventory.accessible_pk_qs(self.user, 'read_role'))
|
||||
@@ -2627,7 +2626,7 @@ class LabelAccess(BaseAccess):
|
||||
return self.model.objects.filter(
|
||||
Q(organization__in=Organization.accessible_pk_qs(self.user, 'read_role'))
|
||||
| Q(unifiedjobtemplate_labels__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
)
|
||||
).distinct()
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data):
|
||||
@@ -2671,7 +2670,6 @@ class ActivityStreamAccess(BaseAccess):
|
||||
'role',
|
||||
'actor',
|
||||
'schedule',
|
||||
'custom_inventory_script',
|
||||
'unified_job_template',
|
||||
'workflow_job_template_node',
|
||||
)
|
||||
@@ -2755,33 +2753,6 @@ class ActivityStreamAccess(BaseAccess):
|
||||
return False
|
||||
|
||||
|
||||
class CustomInventoryScriptAccess(BaseAccess):
|
||||
|
||||
model = CustomInventoryScript
|
||||
prefetch_related = ('created_by', 'modified_by', 'organization')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.accessible_objects(self.user, 'read_role').all()
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data):
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'admin_role').exists()
|
||||
return self.check_related('organization', Organization, data, mandatory=True)
|
||||
|
||||
@check_superuser
|
||||
def can_admin(self, obj, data=None):
|
||||
return self.check_related('organization', Organization, data, obj=obj) and self.user in obj.admin_role
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
return self.can_admin(obj, data=data)
|
||||
|
||||
@check_superuser
|
||||
def can_delete(self, obj):
|
||||
return self.can_admin(obj)
|
||||
|
||||
|
||||
class RoleAccess(BaseAccess):
|
||||
"""
|
||||
- I can see roles when
|
||||
|
||||
@@ -36,7 +36,7 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
|
||||
"""
|
||||
|
||||
|
||||
def trivial_slicing(key, since, until):
|
||||
def trivial_slicing(key, since, until, last_gather):
|
||||
if since is not None:
|
||||
return [(since, until)]
|
||||
|
||||
@@ -45,11 +45,11 @@ def trivial_slicing(key, since, until):
|
||||
horizon = until - timedelta(weeks=4)
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||
last_entry = max(last_entries.get(key) or settings.AUTOMATION_ANALYTICS_LAST_GATHER or horizon, horizon)
|
||||
last_entry = max(last_entries.get(key) or last_gather, horizon)
|
||||
return [(last_entry, until)]
|
||||
|
||||
|
||||
def four_hour_slicing(key, since, until):
|
||||
def four_hour_slicing(key, since, until, last_gather):
|
||||
if since is not None:
|
||||
last_entry = since
|
||||
else:
|
||||
@@ -58,7 +58,7 @@ def four_hour_slicing(key, since, until):
|
||||
horizon = until - timedelta(weeks=4)
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||
last_entry = max(last_entries.get(key) or settings.AUTOMATION_ANALYTICS_LAST_GATHER or horizon, horizon)
|
||||
last_entry = max(last_entries.get(key) or last_gather, horizon)
|
||||
|
||||
start, end = last_entry, None
|
||||
while start < until:
|
||||
@@ -67,15 +67,14 @@ def four_hour_slicing(key, since, until):
|
||||
start = end
|
||||
|
||||
|
||||
def events_slicing(key, since, until):
|
||||
def events_slicing(key, since, until, last_gather):
|
||||
from awx.conf.models import Setting
|
||||
|
||||
last_gather = settings.AUTOMATION_ANALYTICS_LAST_GATHER
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||
horizon = until - timedelta(weeks=4)
|
||||
|
||||
lower = since or last_gather or horizon
|
||||
lower = since or last_gather
|
||||
if not since and last_entries.get(key):
|
||||
lower = horizon
|
||||
pk_values = models.JobEvent.objects.filter(created__gte=lower, created__lte=until).aggregate(Min('pk'), Max('pk'))
|
||||
@@ -135,7 +134,6 @@ def counts(since, **kwargs):
|
||||
models.WorkflowJobTemplate,
|
||||
models.Host,
|
||||
models.Schedule,
|
||||
models.CustomInventoryScript,
|
||||
models.NotificationTemplate,
|
||||
):
|
||||
counts[camelcase_to_underscore(cls.__name__)] = cls.objects.count()
|
||||
@@ -221,17 +219,11 @@ def projects_by_scm_type(since, **kwargs):
|
||||
return counts
|
||||
|
||||
|
||||
def _get_isolated_datetime(last_check):
|
||||
if last_check:
|
||||
return last_check.isoformat()
|
||||
return last_check
|
||||
|
||||
|
||||
@register('instance_info', '1.0', description=_('Cluster topology and capacity'))
|
||||
@register('instance_info', '1.1', description=_('Cluster topology and capacity'))
|
||||
def instance_info(since, include_hostnames=False, **kwargs):
|
||||
info = {}
|
||||
instances = models.Instance.objects.values_list('hostname').values(
|
||||
'uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'hostname', 'last_isolated_check', 'enabled'
|
||||
'uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'hostname', 'enabled'
|
||||
)
|
||||
for instance in instances:
|
||||
consumed_capacity = sum(x.task_impact for x in models.UnifiedJob.objects.filter(execution_node=instance['hostname'], status__in=('running', 'waiting')))
|
||||
@@ -242,7 +234,6 @@ def instance_info(since, include_hostnames=False, **kwargs):
|
||||
'cpu': instance['cpu'],
|
||||
'memory': instance['memory'],
|
||||
'managed_by_policy': instance['managed_by_policy'],
|
||||
'last_isolated_check': _get_isolated_datetime(instance['last_isolated_check']),
|
||||
'enabled': instance['enabled'],
|
||||
'consumed_capacity': consumed_capacity,
|
||||
'remaining_capacity': instance['capacity'] - consumed_capacity,
|
||||
@@ -347,35 +338,35 @@ def _copy_table(table, query, path):
|
||||
@register('events_table', '1.2', format='csv', description=_('Automation task records'), expensive=events_slicing)
|
||||
def events_table(since, full_path, until, **kwargs):
|
||||
def query(event_data):
|
||||
return f'''COPY (SELECT main_jobevent.id,
|
||||
return f'''COPY (SELECT main_jobevent.id,
|
||||
main_jobevent.created,
|
||||
main_jobevent.modified,
|
||||
main_jobevent.uuid,
|
||||
main_jobevent.parent_uuid,
|
||||
main_jobevent.event,
|
||||
main_jobevent.event,
|
||||
{event_data}->'task_action' AS task_action,
|
||||
(CASE WHEN event = 'playbook_on_stats' THEN event_data END) as playbook_on_stats,
|
||||
main_jobevent.failed,
|
||||
main_jobevent.changed,
|
||||
main_jobevent.playbook,
|
||||
main_jobevent.failed,
|
||||
main_jobevent.changed,
|
||||
main_jobevent.playbook,
|
||||
main_jobevent.play,
|
||||
main_jobevent.task,
|
||||
main_jobevent.role,
|
||||
main_jobevent.job_id,
|
||||
main_jobevent.host_id,
|
||||
main_jobevent.role,
|
||||
main_jobevent.job_id,
|
||||
main_jobevent.host_id,
|
||||
main_jobevent.host_name,
|
||||
CAST({event_data}->>'start' AS TIMESTAMP WITH TIME ZONE) AS start,
|
||||
CAST({event_data}->>'end' AS TIMESTAMP WITH TIME ZONE) AS end,
|
||||
{event_data}->'duration' AS duration,
|
||||
{event_data}->'res'->'warnings' AS warnings,
|
||||
{event_data}->'res'->'deprecations' AS deprecations
|
||||
FROM main_jobevent
|
||||
FROM main_jobevent
|
||||
WHERE (main_jobevent.id > {since} AND main_jobevent.id <= {until})
|
||||
ORDER BY main_jobevent.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
|
||||
try:
|
||||
return _copy_table(table='events', query=query("main_jobevent.event_data::json"), path=full_path)
|
||||
except UntranslatableCharacter as exc:
|
||||
except UntranslatableCharacter:
|
||||
return _copy_table(table='events', query=query("replace(main_jobevent.event_data::text, '\\u0000', '')::json"), path=full_path)
|
||||
|
||||
|
||||
@@ -386,22 +377,22 @@ def unified_jobs_table(since, full_path, until, **kwargs):
|
||||
django_content_type.model,
|
||||
main_unifiedjob.organization_id,
|
||||
main_organization.name as organization_name,
|
||||
main_job.inventory_id,
|
||||
main_job.inventory_id,
|
||||
main_inventory.name as inventory_name,
|
||||
main_unifiedjob.created,
|
||||
main_unifiedjob.name,
|
||||
main_unifiedjob.unified_job_template_id,
|
||||
main_unifiedjob.launch_type,
|
||||
main_unifiedjob.schedule_id,
|
||||
main_unifiedjob.execution_node,
|
||||
main_unifiedjob.controller_node,
|
||||
main_unifiedjob.cancel_flag,
|
||||
main_unifiedjob.status,
|
||||
main_unifiedjob.failed,
|
||||
main_unifiedjob.started,
|
||||
main_unifiedjob.finished,
|
||||
main_unifiedjob.elapsed,
|
||||
main_unifiedjob.job_explanation,
|
||||
main_unifiedjob.created,
|
||||
main_unifiedjob.name,
|
||||
main_unifiedjob.unified_job_template_id,
|
||||
main_unifiedjob.launch_type,
|
||||
main_unifiedjob.schedule_id,
|
||||
main_unifiedjob.execution_node,
|
||||
main_unifiedjob.controller_node,
|
||||
main_unifiedjob.cancel_flag,
|
||||
main_unifiedjob.status,
|
||||
main_unifiedjob.failed,
|
||||
main_unifiedjob.started,
|
||||
main_unifiedjob.finished,
|
||||
main_unifiedjob.elapsed,
|
||||
main_unifiedjob.job_explanation,
|
||||
main_unifiedjob.instance_group_id,
|
||||
main_unifiedjob.installed_collections,
|
||||
main_unifiedjob.ansible_version
|
||||
@@ -422,21 +413,21 @@ def unified_jobs_table(since, full_path, until, **kwargs):
|
||||
|
||||
@register('unified_job_template_table', '1.0', format='csv', description=_('Data on job templates'))
|
||||
def unified_job_template_table(since, full_path, **kwargs):
|
||||
unified_job_template_query = '''COPY (SELECT main_unifiedjobtemplate.id,
|
||||
unified_job_template_query = '''COPY (SELECT main_unifiedjobtemplate.id,
|
||||
main_unifiedjobtemplate.polymorphic_ctype_id,
|
||||
django_content_type.model,
|
||||
main_unifiedjobtemplate.created,
|
||||
main_unifiedjobtemplate.modified,
|
||||
main_unifiedjobtemplate.created_by_id,
|
||||
main_unifiedjobtemplate.modified_by_id,
|
||||
main_unifiedjobtemplate.name,
|
||||
main_unifiedjobtemplate.current_job_id,
|
||||
main_unifiedjobtemplate.last_job_id,
|
||||
main_unifiedjobtemplate.last_job_failed,
|
||||
main_unifiedjobtemplate.last_job_run,
|
||||
main_unifiedjobtemplate.next_job_run,
|
||||
main_unifiedjobtemplate.next_schedule_id,
|
||||
main_unifiedjobtemplate.status
|
||||
main_unifiedjobtemplate.created,
|
||||
main_unifiedjobtemplate.modified,
|
||||
main_unifiedjobtemplate.created_by_id,
|
||||
main_unifiedjobtemplate.modified_by_id,
|
||||
main_unifiedjobtemplate.name,
|
||||
main_unifiedjobtemplate.current_job_id,
|
||||
main_unifiedjobtemplate.last_job_id,
|
||||
main_unifiedjobtemplate.last_job_failed,
|
||||
main_unifiedjobtemplate.last_job_run,
|
||||
main_unifiedjobtemplate.next_job_run,
|
||||
main_unifiedjobtemplate.next_schedule_id,
|
||||
main_unifiedjobtemplate.status
|
||||
FROM main_unifiedjobtemplate, django_content_type
|
||||
WHERE main_unifiedjobtemplate.polymorphic_ctype_id = django_content_type.id
|
||||
ORDER BY main_unifiedjobtemplate.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
@@ -447,15 +438,15 @@ def unified_job_template_table(since, full_path, **kwargs):
|
||||
def workflow_job_node_table(since, full_path, until, **kwargs):
|
||||
workflow_job_node_query = '''COPY (SELECT main_workflowjobnode.id,
|
||||
main_workflowjobnode.created,
|
||||
main_workflowjobnode.modified,
|
||||
main_workflowjobnode.job_id,
|
||||
main_workflowjobnode.unified_job_template_id,
|
||||
main_workflowjobnode.workflow_job_id,
|
||||
main_workflowjobnode.inventory_id,
|
||||
main_workflowjobnode.modified,
|
||||
main_workflowjobnode.job_id,
|
||||
main_workflowjobnode.unified_job_template_id,
|
||||
main_workflowjobnode.workflow_job_id,
|
||||
main_workflowjobnode.inventory_id,
|
||||
success_nodes.nodes AS success_nodes,
|
||||
failure_nodes.nodes AS failure_nodes,
|
||||
always_nodes.nodes AS always_nodes,
|
||||
main_workflowjobnode.do_not_run,
|
||||
main_workflowjobnode.do_not_run,
|
||||
main_workflowjobnode.all_parents_must_converge
|
||||
FROM main_workflowjobnode
|
||||
LEFT JOIN (
|
||||
@@ -483,12 +474,12 @@ def workflow_job_node_table(since, full_path, until, **kwargs):
|
||||
|
||||
@register('workflow_job_template_node_table', '1.0', format='csv', description=_('Data on workflows'))
|
||||
def workflow_job_template_node_table(since, full_path, **kwargs):
|
||||
workflow_job_template_node_query = '''COPY (SELECT main_workflowjobtemplatenode.id,
|
||||
workflow_job_template_node_query = '''COPY (SELECT main_workflowjobtemplatenode.id,
|
||||
main_workflowjobtemplatenode.created,
|
||||
main_workflowjobtemplatenode.modified,
|
||||
main_workflowjobtemplatenode.unified_job_template_id,
|
||||
main_workflowjobtemplatenode.workflow_job_template_id,
|
||||
main_workflowjobtemplatenode.inventory_id,
|
||||
main_workflowjobtemplatenode.modified,
|
||||
main_workflowjobtemplatenode.unified_job_template_id,
|
||||
main_workflowjobtemplatenode.workflow_job_template_id,
|
||||
main_workflowjobtemplatenode.inventory_id,
|
||||
success_nodes.nodes AS success_nodes,
|
||||
failure_nodes.nodes AS failure_nodes,
|
||||
always_nodes.nodes AS always_nodes,
|
||||
|
||||
@@ -116,6 +116,51 @@ def package(target, data, timestamp):
|
||||
return None
|
||||
|
||||
|
||||
def calculate_collection_interval(since, until):
|
||||
_now = now()
|
||||
|
||||
# Make sure that the endpoints are not in the future.
|
||||
if until is not None and until > _now:
|
||||
until = _now
|
||||
logger.warning(f"End of the collection interval is in the future, setting to {_now}.")
|
||||
if since is not None and since > _now:
|
||||
since = _now
|
||||
logger.warning(f"Start of the collection interval is in the future, setting to {_now}.")
|
||||
|
||||
# The value of `until` needs to be concrete, so resolve it. If it wasn't passed in,
|
||||
# set it to `now`, but only if that isn't more than 4 weeks ahead of a passed-in
|
||||
# `since` parameter.
|
||||
if since is not None:
|
||||
if until is not None:
|
||||
if until > since + timedelta(weeks=4):
|
||||
until = since + timedelta(weeks=4)
|
||||
logger.warning(f"End of the collection interval is greater than 4 weeks from start, setting end to {until}.")
|
||||
else: # until is None
|
||||
until = min(since + timedelta(weeks=4), _now)
|
||||
elif until is None:
|
||||
until = _now
|
||||
|
||||
if since and since >= until:
|
||||
logger.warning("Start of the collection interval is later than the end, ignoring request.")
|
||||
raise ValueError
|
||||
|
||||
# The ultimate beginning of the interval needs to be compared to 4 weeks prior to
|
||||
# `until`, but we want to keep `since` empty if it wasn't passed in because we use that
|
||||
# case to know whether to use the bookkeeping settings variables to decide the start of
|
||||
# the interval.
|
||||
horizon = until - timedelta(weeks=4)
|
||||
if since is not None and since < horizon:
|
||||
since = horizon
|
||||
logger.warning(f"Start of the collection interval is more than 4 weeks prior to {until}, setting to {horizon}.")
|
||||
|
||||
last_gather = settings.AUTOMATION_ANALYTICS_LAST_GATHER or horizon
|
||||
if last_gather < horizon:
|
||||
last_gather = horizon
|
||||
logger.warning(f"Last analytics run was more than 4 weeks prior to {until}, using {horizon} instead.")
|
||||
|
||||
return since, until, last_gather
|
||||
|
||||
|
||||
def gather(dest=None, module=None, subset=None, since=None, until=None, collection_type='scheduled'):
|
||||
"""
|
||||
Gather all defined metrics and write them as JSON files in a .tgz
|
||||
@@ -148,30 +193,12 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
from awx.main.analytics import collectors
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
_now = now()
|
||||
|
||||
# Make sure that the endpoints are not in the future.
|
||||
until = None if until is None else min(until, _now)
|
||||
since = None if since is None else min(since, _now)
|
||||
|
||||
if since and not until:
|
||||
# If `since` is explicit but not `until`, `since` should be used to calculate the 4-week limit
|
||||
until = min(since + timedelta(weeks=4), _now)
|
||||
else:
|
||||
until = _now if until is None else until
|
||||
|
||||
horizon = until - timedelta(weeks=4)
|
||||
if since is not None:
|
||||
# Make sure the start isn't more than 4 weeks prior to `until`.
|
||||
since = max(since, horizon)
|
||||
|
||||
if since and since >= until:
|
||||
logger.warning("Start of the collection interval is later than the end, ignoring request.")
|
||||
return None
|
||||
|
||||
logger.debug("Last analytics run was: {}".format(settings.AUTOMATION_ANALYTICS_LAST_GATHER))
|
||||
# LAST_GATHER time should always get truncated to less than 4 weeks back.
|
||||
last_gather = max(settings.AUTOMATION_ANALYTICS_LAST_GATHER or horizon, horizon)
|
||||
|
||||
try:
|
||||
since, until, last_gather = calculate_collection_interval(since, until)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||
@@ -201,7 +228,7 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
key = func.__awx_analytics_key__
|
||||
filename = f'{key}.json'
|
||||
try:
|
||||
last_entry = max(last_entries.get(key) or last_gather, horizon)
|
||||
last_entry = max(last_entries.get(key) or last_gather, until - timedelta(weeks=4))
|
||||
results = (func(since or last_entry, collection_type=collection_type, until=until), func.__awx_analytics_version__)
|
||||
json.dumps(results) # throwaway check to see if the data is json-serializable
|
||||
data[filename] = results
|
||||
@@ -233,9 +260,9 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
# allowed to be None, and will fall back to LAST_ENTRIES[key] or to
|
||||
# LAST_GATHER (truncated appropriately to match the 4-week limit).
|
||||
if func.__awx_expensive__:
|
||||
slices = func.__awx_expensive__(key, since, until)
|
||||
slices = func.__awx_expensive__(key, since, until, last_gather)
|
||||
else:
|
||||
slices = collectors.trivial_slicing(key, since, until)
|
||||
slices = collectors.trivial_slicing(key, since, until, last_gather)
|
||||
|
||||
for start, end in slices:
|
||||
files = func(start, full_path=gather_dir, until=end)
|
||||
@@ -259,9 +286,10 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
tgzfile = package(dest.parent, payload, until)
|
||||
if tgzfile is not None:
|
||||
tarfiles.append(tgzfile)
|
||||
if not ship(tgzfile):
|
||||
slice_succeeded, succeeded = False, False
|
||||
break
|
||||
if collection_type != 'dry-run':
|
||||
if not ship(tgzfile):
|
||||
slice_succeeded, succeeded = False, False
|
||||
break
|
||||
|
||||
if slice_succeeded and collection_type != 'dry-run':
|
||||
with disable_activity_stream():
|
||||
@@ -278,6 +306,14 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
os.remove(fpath)
|
||||
with disable_activity_stream():
|
||||
if not settings.AUTOMATION_ANALYTICS_LAST_GATHER or until > settings.AUTOMATION_ANALYTICS_LAST_GATHER:
|
||||
# `AUTOMATION_ANALYTICS_LAST_GATHER` is set whether collection succeeds or fails;
|
||||
# if collection fails because of a persistent, underlying issue and we do not set last_gather,
|
||||
# we risk the collectors hitting an increasingly greater workload while the underlying issue
|
||||
# remains unresolved. Put simply, if collection fails, we just move on.
|
||||
|
||||
# All that said, `AUTOMATION_ANALYTICS_LAST_GATHER` plays a much smaller role in determining
|
||||
# what is actually collected than it used to; collectors now mostly rely on their respective entry
|
||||
# under `last_entries` to determine what should be collected.
|
||||
settings.AUTOMATION_ANALYTICS_LAST_GATHER = until
|
||||
|
||||
shutil.rmtree(dest, ignore_errors=True) # clean up individual artifact files
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from django.conf import settings
|
||||
from prometheus_client import PROCESS_COLLECTOR, PLATFORM_COLLECTOR, GC_COLLECTOR, CollectorRegistry, Gauge, Info, generate_latest
|
||||
from prometheus_client import CollectorRegistry, Gauge, Info, generate_latest
|
||||
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils import get_awx_version
|
||||
@@ -31,7 +31,6 @@ def metrics():
|
||||
registry=REGISTRY,
|
||||
)
|
||||
SCHEDULE_COUNT = Gauge('awx_schedules_total', 'Number of schedules', registry=REGISTRY)
|
||||
INV_SCRIPT_COUNT = Gauge('awx_inventory_scripts_total', 'Number of invetory scripts', registry=REGISTRY)
|
||||
USER_SESSIONS = Gauge(
|
||||
'awx_sessions_total',
|
||||
'Number of sessions',
|
||||
@@ -41,8 +40,8 @@ def metrics():
|
||||
registry=REGISTRY,
|
||||
)
|
||||
CUSTOM_VENVS = Gauge('awx_custom_virtualenvs_total', 'Number of virtualenvs', registry=REGISTRY)
|
||||
RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the Tower system', registry=REGISTRY)
|
||||
PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the Tower system', registry=REGISTRY)
|
||||
RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the system', registry=REGISTRY)
|
||||
PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the system', registry=REGISTRY)
|
||||
STATUS = Gauge(
|
||||
'awx_status_total',
|
||||
'Status of Job launched',
|
||||
@@ -54,7 +53,7 @@ def metrics():
|
||||
|
||||
INSTANCE_CAPACITY = Gauge(
|
||||
'awx_instance_capacity',
|
||||
'Capacity of each node in a Tower system',
|
||||
'Capacity of each node in the system',
|
||||
[
|
||||
'hostname',
|
||||
'instance_uuid',
|
||||
@@ -63,7 +62,7 @@ def metrics():
|
||||
)
|
||||
INSTANCE_CPU = Gauge(
|
||||
'awx_instance_cpu',
|
||||
'CPU cores on each node in a Tower system',
|
||||
'CPU cores on each node in the system',
|
||||
[
|
||||
'hostname',
|
||||
'instance_uuid',
|
||||
@@ -72,7 +71,7 @@ def metrics():
|
||||
)
|
||||
INSTANCE_MEMORY = Gauge(
|
||||
'awx_instance_memory',
|
||||
'RAM (Kb) on each node in a Tower system',
|
||||
'RAM (Kb) on each node in the system',
|
||||
[
|
||||
'hostname',
|
||||
'instance_uuid',
|
||||
@@ -81,7 +80,7 @@ def metrics():
|
||||
)
|
||||
INSTANCE_INFO = Info(
|
||||
'awx_instance',
|
||||
'Info about each node in a Tower system',
|
||||
'Info about each node in the system',
|
||||
[
|
||||
'hostname',
|
||||
'instance_uuid',
|
||||
@@ -108,7 +107,7 @@ def metrics():
|
||||
)
|
||||
INSTANCE_CONSUMED_CAPACITY = Gauge(
|
||||
'awx_instance_consumed_capacity',
|
||||
'Consumed capacity of each node in a Tower system',
|
||||
'Consumed capacity of each node in the system',
|
||||
[
|
||||
'hostname',
|
||||
'instance_uuid',
|
||||
@@ -117,7 +116,7 @@ def metrics():
|
||||
)
|
||||
INSTANCE_REMAINING_CAPACITY = Gauge(
|
||||
'awx_instance_remaining_capacity',
|
||||
'Remaining capacity of each node in a Tower system',
|
||||
'Remaining capacity of each node in the system',
|
||||
[
|
||||
'hostname',
|
||||
'instance_uuid',
|
||||
@@ -160,7 +159,6 @@ def metrics():
|
||||
HOST_COUNT.labels(type='active').set(current_counts['active_host_count'])
|
||||
|
||||
SCHEDULE_COUNT.set(current_counts['schedule'])
|
||||
INV_SCRIPT_COUNT.set(current_counts['custom_inventory_script'])
|
||||
CUSTOM_VENVS.set(current_counts['custom_virtualenvs'])
|
||||
|
||||
USER_SESSIONS.labels(type='all').set(current_counts['active_sessions'])
|
||||
@@ -186,7 +184,6 @@ def metrics():
|
||||
INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid).info(
|
||||
{
|
||||
'enabled': str(instance_data[uuid]['enabled']),
|
||||
'last_isolated_check': getattr(instance_data[uuid], 'last_isolated_check', 'None'),
|
||||
'managed_by_policy': str(instance_data[uuid]['managed_by_policy']),
|
||||
'version': instance_data[uuid]['version'],
|
||||
}
|
||||
|
||||
161
awx/main/conf.py
161
awx/main/conf.py
@@ -6,9 +6,8 @@ from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework import serializers
|
||||
from rest_framework.fields import FloatField
|
||||
|
||||
# Tower
|
||||
# AWX
|
||||
from awx.conf import fields, register, register_validate
|
||||
from awx.main.models import ExecutionEnvironment
|
||||
|
||||
@@ -59,8 +58,8 @@ register(
|
||||
field_class=fields.URLField,
|
||||
schemes=('http', 'https'),
|
||||
allow_plain_hostname=True, # Allow hostname only without TLD.
|
||||
label=_('Base URL of the Tower host'),
|
||||
help_text=_('This setting is used by services like notifications to render ' 'a valid url to the Tower host.'),
|
||||
label=_('Base URL of the service'),
|
||||
help_text=_('This setting is used by services like notifications to render ' 'a valid url to the service.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -85,8 +84,8 @@ register(
|
||||
field_class=fields.StringListField,
|
||||
label=_('Proxy IP Allowed List'),
|
||||
help_text=_(
|
||||
"If Tower is behind a reverse proxy/load balancer, use this setting "
|
||||
"to configure the proxy IP addresses from which Tower should trust "
|
||||
"If the service is behind a reverse proxy/load balancer, use this setting "
|
||||
"to configure the proxy IP addresses from which the service should trust "
|
||||
"custom REMOTE_HOST_HEADERS header values. "
|
||||
"If this setting is an empty list (the default), the headers specified by "
|
||||
"REMOTE_HOST_HEADERS will be trusted unconditionally')"
|
||||
@@ -173,7 +172,7 @@ register(
|
||||
register(
|
||||
'INSTALL_UUID',
|
||||
field_class=fields.CharField,
|
||||
label=_('Unique identifier for an AWX/Tower installation'),
|
||||
label=_('Unique identifier for an installation'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
read_only=True,
|
||||
@@ -224,7 +223,7 @@ register(
|
||||
help_text=_(
|
||||
'Ansible allows variable substitution via the Jinja2 templating '
|
||||
'language for --extra-vars. This poses a potential security '
|
||||
'risk where Tower users with the ability to specify extra vars at job '
|
||||
'risk where users with the ability to specify extra vars at job '
|
||||
'launch time can use Jinja2 templates to run arbitrary Python. It is '
|
||||
'recommended that this value be set to "template" or "never".'
|
||||
),
|
||||
@@ -236,11 +235,7 @@ register(
|
||||
'AWX_ISOLATION_BASE_PATH',
|
||||
field_class=fields.CharField,
|
||||
label=_('Job execution path'),
|
||||
help_text=_(
|
||||
'The directory in which Tower will create new temporary '
|
||||
'directories for job execution and isolation '
|
||||
'(such as credential files and custom inventory scripts).'
|
||||
),
|
||||
help_text=_('The directory in which the service will create new temporary directories for job execution and isolation (such as credential files).'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
@@ -255,138 +250,6 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ISOLATED_CHECK_INTERVAL',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
label=_('Isolated status check interval'),
|
||||
help_text=_('The number of seconds to sleep between status checks for jobs running on isolated instances.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ISOLATED_LAUNCH_TIMEOUT',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
label=_('Isolated launch timeout'),
|
||||
help_text=_(
|
||||
'The timeout (in seconds) for launching jobs on isolated instances. '
|
||||
'This includes the time needed to copy source control files (playbooks) to the isolated instance.'
|
||||
),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ISOLATED_CONNECTION_TIMEOUT',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
default=10,
|
||||
label=_('Isolated connection timeout'),
|
||||
help_text=_(
|
||||
'Ansible SSH connection timeout (in seconds) to use when communicating with isolated instances. '
|
||||
'Value should be substantially greater than expected network latency.'
|
||||
),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ISOLATED_HOST_KEY_CHECKING',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Isolated host key checking'),
|
||||
help_text=_('When set to True, AWX will enforce strict host key checking for communication with isolated nodes.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
default=False,
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ISOLATED_KEY_GENERATION',
|
||||
field_class=fields.BooleanField,
|
||||
default=True,
|
||||
label=_('Generate RSA keys for isolated instances'),
|
||||
help_text=_(
|
||||
'If set, a random RSA key will be generated and distributed to '
|
||||
'isolated instances. To disable this behavior and manage authentication '
|
||||
'for isolated instances outside of Tower, disable this setting.'
|
||||
), # noqa
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ISOLATED_PRIVATE_KEY',
|
||||
field_class=fields.CharField,
|
||||
default='',
|
||||
allow_blank=True,
|
||||
encrypted=True,
|
||||
read_only=True,
|
||||
label=_('The RSA private key for SSH traffic to isolated instances'),
|
||||
help_text=_('The RSA private key for SSH traffic to isolated instances'), # noqa
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ISOLATED_PUBLIC_KEY',
|
||||
field_class=fields.CharField,
|
||||
default='',
|
||||
allow_blank=True,
|
||||
read_only=True,
|
||||
label=_('The RSA public key for SSH traffic to isolated instances'),
|
||||
help_text=_('The RSA public key for SSH traffic to isolated instances'), # noqa
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_RESOURCE_PROFILING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Enable detailed resource profiling on all playbook runs'),
|
||||
help_text=_('If set, detailed resource profiling data will be collected on all jobs. ' 'This data can be gathered with `sosreport`.'), # noqa
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_RESOURCE_PROFILING_CPU_POLL_INTERVAL',
|
||||
field_class=FloatField,
|
||||
default='0.25',
|
||||
label=_('Interval (in seconds) between polls for cpu usage.'),
|
||||
help_text=_('Interval (in seconds) between polls for cpu usage. ' 'Setting this lower than the default will affect playbook performance.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
required=False,
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_RESOURCE_PROFILING_MEMORY_POLL_INTERVAL',
|
||||
field_class=FloatField,
|
||||
default='0.25',
|
||||
label=_('Interval (in seconds) between polls for memory usage.'),
|
||||
help_text=_('Interval (in seconds) between polls for memory usage. ' 'Setting this lower than the default will affect playbook performance.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
required=False,
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_RESOURCE_PROFILING_PID_POLL_INTERVAL',
|
||||
field_class=FloatField,
|
||||
default='0.25',
|
||||
label=_('Interval (in seconds) between polls for PID count.'),
|
||||
help_text=_('Interval (in seconds) between polls for PID count. ' 'Setting this lower than the default will affect playbook performance.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
required=False,
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_TASK_ENV',
|
||||
field_class=fields.KeyValueField,
|
||||
@@ -403,7 +266,7 @@ register(
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Gather data for Automation Analytics'),
|
||||
help_text=_('Enables Tower to gather data on automation and send it to Red Hat.'),
|
||||
help_text=_('Enables the service to gather data on automation and send it to Red Hat Insights.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -674,8 +537,8 @@ register(
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('Cluster-wide Tower unique identifier.'),
|
||||
help_text=_('Useful to uniquely identify Tower instances.'),
|
||||
label=_('Cluster-wide unique identifier.'),
|
||||
help_text=_('Useful to uniquely identify instances.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
@@ -710,7 +573,7 @@ register(
|
||||
label=_('Enable/disable HTTPS certificate verification'),
|
||||
help_text=_(
|
||||
'Flag to control enable/disable of certificate verification'
|
||||
' when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, Tower\'s'
|
||||
' when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, the'
|
||||
' log handler will verify certificate sent by external log aggregator'
|
||||
' before establishing connection.'
|
||||
),
|
||||
|
||||
@@ -7,7 +7,6 @@ from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
__all__ = [
|
||||
'CLOUD_PROVIDERS',
|
||||
'SCHEDULEABLE_PROVIDERS',
|
||||
'PRIVILEGE_ESCALATION_METHODS',
|
||||
'ANSI_SGR_PATTERN',
|
||||
'CAN_CANCEL',
|
||||
@@ -16,10 +15,6 @@ __all__ = [
|
||||
]
|
||||
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'tower')
|
||||
SCHEDULEABLE_PROVIDERS = CLOUD_PROVIDERS + (
|
||||
'custom',
|
||||
'scm',
|
||||
)
|
||||
PRIVILEGE_ESCALATION_METHODS = [
|
||||
('sudo', _('Sudo')),
|
||||
('su', _('Su')),
|
||||
|
||||
@@ -18,7 +18,7 @@ def reap_job(j, status):
|
||||
j.start_args = '' # blank field to remove encrypted passwords
|
||||
j.job_explanation += ' '.join(
|
||||
(
|
||||
'Task was marked as running in Tower but was not present in',
|
||||
'Task was marked as running but was not present in',
|
||||
'the job queue, so it has been marked as failed.',
|
||||
)
|
||||
)
|
||||
@@ -37,7 +37,7 @@ def reap(instance=None, status='failed', excluded_uuids=[]):
|
||||
if me is None:
|
||||
(changed, me) = Instance.objects.get_or_register()
|
||||
if changed:
|
||||
logger.info("Registered tower node '{}'".format(me.hostname))
|
||||
logger.info("Registered node '{}'".format(me.hostname))
|
||||
now = tz_now()
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
jobs = UnifiedJob.objects.filter(
|
||||
|
||||
1
awx/main/isolated/.gitignore
vendored
1
awx/main/isolated/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
authorized_keys
|
||||
@@ -1,365 +0,0 @@
|
||||
import fnmatch
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import stat
|
||||
import tempfile
|
||||
import time
|
||||
import logging
|
||||
import datetime
|
||||
|
||||
from django.conf import settings
|
||||
import ansible_runner
|
||||
|
||||
import awx
|
||||
from awx.main.utils import get_system_task_capacity
|
||||
|
||||
logger = logging.getLogger('awx.isolated.manager')
|
||||
playbook_logger = logging.getLogger('awx.isolated.manager.playbooks')
|
||||
|
||||
|
||||
def set_pythonpath(venv_libdir, env):
|
||||
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
|
||||
for version in os.listdir(venv_libdir):
|
||||
if fnmatch.fnmatch(version, 'python[23].*'):
|
||||
if os.path.isdir(os.path.join(venv_libdir, version)):
|
||||
env['PYTHONPATH'] = os.path.join(venv_libdir, version, "site-packages") + ":"
|
||||
break
|
||||
|
||||
|
||||
class IsolatedManager(object):
|
||||
def __init__(self, event_handler, canceled_callback=None, check_callback=None):
|
||||
"""
|
||||
:param event_handler: a callable used to persist event data from isolated nodes
|
||||
:param canceled_callback: a callable - which returns `True` or `False`
|
||||
- signifying if the job has been prematurely
|
||||
canceled
|
||||
"""
|
||||
self.event_handler = event_handler
|
||||
self.canceled_callback = canceled_callback
|
||||
self.check_callback = check_callback
|
||||
self.started_at = None
|
||||
self.captured_command_artifact = False
|
||||
self.instance = None
|
||||
|
||||
def build_inventory(self, hosts):
|
||||
inventory = '\n'.join(['{} ansible_ssh_user={}'.format(host, settings.AWX_ISOLATED_USERNAME) for host in hosts])
|
||||
|
||||
return inventory
|
||||
|
||||
def build_runner_params(self, hosts, verbosity=1):
|
||||
env = dict(os.environ.items())
|
||||
env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
|
||||
env['ANSIBLE_HOST_KEY_CHECKING'] = str(settings.AWX_ISOLATED_HOST_KEY_CHECKING)
|
||||
env['ANSIBLE_LIBRARY'] = os.path.join(os.path.dirname(awx.__file__), 'plugins', 'isolated')
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.AWX_ANSIBLE_COLLECTIONS_PATHS
|
||||
set_pythonpath(os.path.join(settings.ANSIBLE_VENV_PATH, 'lib'), env)
|
||||
|
||||
def finished_callback(runner_obj):
|
||||
if runner_obj.status == 'failed' and runner_obj.config.playbook != 'check_isolated.yml':
|
||||
# failed for clean_isolated.yml just means the playbook hasn't
|
||||
# exited on the isolated host
|
||||
stdout = runner_obj.stdout.read()
|
||||
playbook_logger.error(stdout)
|
||||
elif runner_obj.status == 'timeout':
|
||||
# this means that the default idle timeout of
|
||||
# (2 * AWX_ISOLATED_CONNECTION_TIMEOUT) was exceeded
|
||||
# (meaning, we tried to sync with an isolated node, and we got
|
||||
# no new output for 2 * AWX_ISOLATED_CONNECTION_TIMEOUT seconds)
|
||||
# this _usually_ means SSH key auth from the controller ->
|
||||
# isolated didn't work, and ssh is hung waiting on interactive
|
||||
# input e.g.,
|
||||
#
|
||||
# awx@isolated's password:
|
||||
stdout = runner_obj.stdout.read()
|
||||
playbook_logger.error(stdout)
|
||||
else:
|
||||
playbook_logger.info(runner_obj.stdout.read())
|
||||
|
||||
return {
|
||||
'project_dir': os.path.abspath(os.path.join(os.path.dirname(awx.__file__), 'playbooks')),
|
||||
'inventory': self.build_inventory(hosts),
|
||||
'envvars': env,
|
||||
'finished_callback': finished_callback,
|
||||
'verbosity': verbosity,
|
||||
'cancel_callback': self.canceled_callback,
|
||||
'settings': {
|
||||
'job_timeout': settings.AWX_ISOLATED_LAUNCH_TIMEOUT,
|
||||
'suppress_ansible_output': True,
|
||||
},
|
||||
}
|
||||
|
||||
def path_to(self, *args):
|
||||
return os.path.join(self.private_data_dir, *args)
|
||||
|
||||
def run_management_playbook(self, playbook, private_data_dir, idle_timeout=None, **kw):
|
||||
iso_dir = tempfile.mkdtemp(prefix=playbook, dir=private_data_dir)
|
||||
params = self.runner_params.copy()
|
||||
params.get('envvars', dict())['ANSIBLE_CALLBACK_WHITELIST'] = 'profile_tasks'
|
||||
params['playbook'] = playbook
|
||||
params['private_data_dir'] = iso_dir
|
||||
if idle_timeout:
|
||||
params['settings']['idle_timeout'] = idle_timeout
|
||||
else:
|
||||
params['settings'].pop('idle_timeout', None)
|
||||
params.update(**kw)
|
||||
if all([getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True, getattr(settings, 'AWX_ISOLATED_PRIVATE_KEY', None)]):
|
||||
params['ssh_key'] = settings.AWX_ISOLATED_PRIVATE_KEY
|
||||
return ansible_runner.interface.run(**params)
|
||||
|
||||
def dispatch(self, playbook=None, module=None, module_args=None):
|
||||
"""
|
||||
Ship the runner payload to a remote host for isolated execution.
|
||||
"""
|
||||
self.handled_events = set()
|
||||
self.started_at = time.time()
|
||||
|
||||
# exclude certain files from the rsync
|
||||
rsync_exclude = [
|
||||
# don't rsync source control metadata (it can be huge!)
|
||||
'- /project/.git',
|
||||
'- /project/.svn',
|
||||
# don't rsync job events that are in the process of being written
|
||||
'- /artifacts/job_events/*-partial.json.tmp',
|
||||
# don't rsync the ssh_key FIFO
|
||||
'- /env/ssh_key',
|
||||
# don't rsync kube config files
|
||||
'- .kubeconfig*',
|
||||
]
|
||||
|
||||
for filename, data in (['.rsync-filter', '\n'.join(rsync_exclude)],):
|
||||
path = self.path_to(filename)
|
||||
with open(path, 'w') as f:
|
||||
f.write(data)
|
||||
os.chmod(path, stat.S_IRUSR)
|
||||
|
||||
extravars = {
|
||||
'src': self.private_data_dir,
|
||||
'dest': settings.AWX_ISOLATION_BASE_PATH,
|
||||
'ident': self.ident,
|
||||
'job_id': self.instance.id,
|
||||
}
|
||||
if playbook:
|
||||
extravars['playbook'] = playbook
|
||||
if module and module_args:
|
||||
extravars['module'] = module
|
||||
extravars['module_args'] = module_args
|
||||
|
||||
logger.debug('Starting job {} on isolated host with `run_isolated.yml` playbook.'.format(self.instance.id))
|
||||
runner_obj = self.run_management_playbook(
|
||||
'run_isolated.yml', self.private_data_dir, idle_timeout=max(60, 2 * settings.AWX_ISOLATED_CONNECTION_TIMEOUT), extravars=extravars
|
||||
)
|
||||
|
||||
if runner_obj.status == 'failed':
|
||||
self.instance.result_traceback = runner_obj.stdout.read()
|
||||
self.instance.save(update_fields=['result_traceback'])
|
||||
return 'error', runner_obj.rc
|
||||
|
||||
return runner_obj.status, runner_obj.rc
|
||||
|
||||
def check(self, interval=None):
|
||||
"""
|
||||
Repeatedly poll the isolated node to determine if the job has run.
|
||||
|
||||
On success, copy job artifacts to the controlling node.
|
||||
On failure, continue to poll the isolated node (until the job timeout
|
||||
is exceeded).
|
||||
|
||||
For a completed job run, this function returns (status, rc),
|
||||
representing the status and return code of the isolated
|
||||
`ansible-playbook` run.
|
||||
|
||||
:param interval: an interval (in seconds) to wait between status polls
|
||||
"""
|
||||
interval = interval if interval is not None else settings.AWX_ISOLATED_CHECK_INTERVAL
|
||||
extravars = {'src': self.private_data_dir, 'job_id': self.instance.id}
|
||||
status = 'failed'
|
||||
rc = None
|
||||
last_check = time.time()
|
||||
|
||||
while status == 'failed':
|
||||
canceled = self.canceled_callback() if self.canceled_callback else False
|
||||
if not canceled and time.time() - last_check < interval:
|
||||
# If the job isn't canceled, but we haven't waited `interval` seconds, wait longer
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
if canceled:
|
||||
logger.warning('Isolated job {} was manually canceled.'.format(self.instance.id))
|
||||
|
||||
logger.debug('Checking on isolated job {} with `check_isolated.yml`.'.format(self.instance.id))
|
||||
time_start = datetime.datetime.now()
|
||||
runner_obj = self.run_management_playbook('check_isolated.yml', self.private_data_dir, extravars=extravars)
|
||||
time_end = datetime.datetime.now()
|
||||
time_diff = time_end - time_start
|
||||
logger.debug('Finished checking on isolated job {} with `check_isolated.yml` took {} seconds.'.format(self.instance.id, time_diff.total_seconds()))
|
||||
status, rc = runner_obj.status, runner_obj.rc
|
||||
|
||||
if self.check_callback is not None and not self.captured_command_artifact:
|
||||
command_path = self.path_to('artifacts', self.ident, 'command')
|
||||
# If the configuration artifact has been synced back, update the model
|
||||
if os.path.exists(command_path):
|
||||
try:
|
||||
with open(command_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.check_callback(data)
|
||||
self.captured_command_artifact = True
|
||||
except json.decoder.JSONDecodeError: # Just in case it's not fully here yet.
|
||||
pass
|
||||
|
||||
self.consume_events()
|
||||
|
||||
last_check = time.time()
|
||||
|
||||
if status == 'successful':
|
||||
status_path = self.path_to('artifacts', self.ident, 'status')
|
||||
rc_path = self.path_to('artifacts', self.ident, 'rc')
|
||||
if os.path.exists(status_path):
|
||||
with open(status_path, 'r') as f:
|
||||
status = f.readline()
|
||||
with open(rc_path, 'r') as f:
|
||||
rc = int(f.readline())
|
||||
else:
|
||||
# if there's no status file, it means that runner _probably_
|
||||
# exited with a traceback (which should be logged to
|
||||
# daemon.log) Record it so we can see how runner failed.
|
||||
daemon_path = self.path_to('daemon.log')
|
||||
if os.path.exists(daemon_path):
|
||||
with open(daemon_path, 'r') as f:
|
||||
self.instance.result_traceback = f.read()
|
||||
self.instance.save(update_fields=['result_traceback'])
|
||||
else:
|
||||
logger.error('Failed to rsync daemon.log (is ansible-runner installed on the isolated host?)')
|
||||
status = 'failed'
|
||||
rc = 1
|
||||
|
||||
# consume events one last time just to be sure we didn't miss anything
|
||||
# in the final sync
|
||||
self.consume_events()
|
||||
|
||||
return status, rc
|
||||
|
||||
def consume_events(self):
|
||||
# discover new events and ingest them
|
||||
events_path = self.path_to('artifacts', self.ident, 'job_events')
|
||||
|
||||
# it's possible that `events_path` doesn't exist *yet*, because runner
|
||||
# hasn't actually written any events yet (if you ran e.g., a sleep 30)
|
||||
# only attempt to consume events if any were rsynced back
|
||||
if os.path.exists(events_path):
|
||||
for event in set(os.listdir(events_path)) - self.handled_events:
|
||||
path = os.path.join(events_path, event)
|
||||
if os.path.exists(path) and os.path.isfile(path):
|
||||
try:
|
||||
event_data = json.load(open(os.path.join(events_path, event), 'r'))
|
||||
except json.decoder.JSONDecodeError:
|
||||
# This means the event we got back isn't valid JSON
|
||||
# that can happen if runner is still partially
|
||||
# writing an event file while it's rsyncing
|
||||
# these event writes are _supposed_ to be atomic
|
||||
# but it doesn't look like they actually are in
|
||||
# practice
|
||||
# in this scenario, just ignore this event and try it
|
||||
# again on the next sync
|
||||
continue
|
||||
self.event_handler(event_data)
|
||||
self.handled_events.add(event)
|
||||
|
||||
def cleanup(self):
|
||||
extravars = {
|
||||
'private_data_dir': self.private_data_dir,
|
||||
'cleanup_dirs': [
|
||||
self.private_data_dir,
|
||||
],
|
||||
}
|
||||
logger.debug('Cleaning up job {} on isolated host with `clean_isolated.yml` playbook.'.format(self.instance.id))
|
||||
self.run_management_playbook('clean_isolated.yml', self.private_data_dir, extravars=extravars)
|
||||
|
||||
@classmethod
|
||||
def update_capacity(cls, instance, task_result):
|
||||
instance.version = 'ansible-runner-{}'.format(task_result['version'])
|
||||
|
||||
if instance.capacity == 0 and task_result['capacity_cpu']:
|
||||
logger.warning('Isolated instance {} has re-joined.'.format(instance.hostname))
|
||||
instance.cpu = int(task_result['cpu'])
|
||||
instance.memory = int(task_result['mem'])
|
||||
instance.cpu_capacity = int(task_result['capacity_cpu'])
|
||||
instance.mem_capacity = int(task_result['capacity_mem'])
|
||||
instance.capacity = get_system_task_capacity(
|
||||
scale=instance.capacity_adjustment, cpu_capacity=int(task_result['capacity_cpu']), mem_capacity=int(task_result['capacity_mem'])
|
||||
)
|
||||
instance.save(update_fields=['cpu', 'memory', 'cpu_capacity', 'mem_capacity', 'capacity', 'version', 'modified'])
|
||||
|
||||
def health_check(self, instance_qs):
|
||||
"""
|
||||
:param instance_qs: List of Django objects representing the
|
||||
isolated instances to manage
|
||||
Runs playbook that will
|
||||
- determine if instance is reachable
|
||||
- find the instance capacity
|
||||
- clean up orphaned private files
|
||||
Performs save on each instance to update its capacity.
|
||||
"""
|
||||
instance_qs = [i for i in instance_qs if i.enabled]
|
||||
if not len(instance_qs):
|
||||
return
|
||||
try:
|
||||
private_data_dir = tempfile.mkdtemp(prefix='awx_iso_heartbeat_', dir=settings.AWX_ISOLATION_BASE_PATH)
|
||||
self.runner_params = self.build_runner_params([instance.hostname for instance in instance_qs])
|
||||
self.runner_params['private_data_dir'] = private_data_dir
|
||||
self.runner_params['forks'] = len(instance_qs)
|
||||
runner_obj = self.run_management_playbook('heartbeat_isolated.yml', private_data_dir)
|
||||
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version'],
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(instance.hostname, instance.modified))
|
||||
finally:
|
||||
if os.path.exists(private_data_dir):
|
||||
shutil.rmtree(private_data_dir)
|
||||
|
||||
def run(self, instance, private_data_dir, playbook, module, module_args, ident=None):
|
||||
"""
|
||||
Run a job on an isolated host.
|
||||
|
||||
:param instance: a `model.Job` instance
|
||||
:param private_data_dir: an absolute path on the local file system
|
||||
where job-specific data should be written
|
||||
(i.e., `/tmp/awx_N_xyz/`)
|
||||
:param playbook: the playbook to run
|
||||
:param module: the module to run
|
||||
:param module_args: the module args to use
|
||||
|
||||
For a completed job run, this function returns (status, rc),
|
||||
representing the status and return code of the isolated
|
||||
`ansible-playbook` run.
|
||||
"""
|
||||
self.ident = ident
|
||||
self.instance = instance
|
||||
self.private_data_dir = private_data_dir
|
||||
self.runner_params = self.build_runner_params([instance.execution_node], verbosity=min(5, self.instance.verbosity))
|
||||
|
||||
status, rc = self.dispatch(playbook, module, module_args)
|
||||
if status == 'successful':
|
||||
status, rc = self.check()
|
||||
return status, rc
|
||||
@@ -10,7 +10,7 @@ from awx.main.utils.pglock import advisory_lock
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Deprovision a Tower cluster node
|
||||
Deprovision a cluster node
|
||||
"""
|
||||
|
||||
help = 'Remove instance from the database. ' 'Specify `--hostname` to use this command.'
|
||||
|
||||
36
awx/main/management/commands/export_custom_scripts.py
Normal file
36
awx/main/management/commands/export_custom_scripts.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import tempfile
|
||||
import tarfile
|
||||
import stat
|
||||
import os
|
||||
|
||||
from awx.main.models.inventory import CustomInventoryScript
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.utils.text import slugify
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
help = 'Export custom inventory scripts into a tarfile.'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--filename', dest='filename', type=str, default='custom_scripts.tar', help='Filename of the output tar file')
|
||||
|
||||
def handle(self, **options):
|
||||
tar_filename = options.get('filename')
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
with tarfile.open(tar_filename, "w") as tar:
|
||||
|
||||
for cis in CustomInventoryScript.objects.all():
|
||||
# naming convention similar to project paths
|
||||
slug_name = slugify(str(cis.name)).replace(u'-', u'_')
|
||||
script_filename = u'_%d__%s' % (int(cis.pk), slug_name)
|
||||
script_path = os.path.join(tmpdirname, script_filename)
|
||||
|
||||
with open(script_path, 'w') as f:
|
||||
f.write(cis.script)
|
||||
os.chmod(script_path, stat.S_IRWXU)
|
||||
tar.add(script_path, arcname=script_filename)
|
||||
|
||||
print('Dump of old custom inventory scripts at {}'.format(tar_filename))
|
||||
@@ -1,38 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
import datetime
|
||||
from django.utils.encoding import smart_str
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.conf.models import Setting
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Generate and store a randomized RSA key for SSH traffic to isolated instances"""
|
||||
|
||||
help = 'Generates and stores a randomized RSA key for SSH traffic to isolated instances'
|
||||
|
||||
def handle(self, *args, **kwargs):
|
||||
if getattr(settings, 'AWX_ISOLATED_PRIVATE_KEY', False):
|
||||
print(settings.AWX_ISOLATED_PUBLIC_KEY)
|
||||
return
|
||||
|
||||
key = rsa.generate_private_key(public_exponent=65537, key_size=4096, backend=default_backend())
|
||||
Setting.objects.create(
|
||||
key='AWX_ISOLATED_PRIVATE_KEY',
|
||||
value=key.private_bytes(
|
||||
encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()
|
||||
),
|
||||
).save()
|
||||
pemfile = Setting.objects.create(
|
||||
key='AWX_ISOLATED_PUBLIC_KEY',
|
||||
value=smart_str(key.public_key().public_bytes(encoding=serialization.Encoding.OpenSSH, format=serialization.PublicFormat.OpenSSH))
|
||||
+ " generated-by-awx@%s" % datetime.datetime.utcnow().isoformat(),
|
||||
)
|
||||
pemfile.save()
|
||||
print(pemfile.value)
|
||||
@@ -147,9 +147,6 @@ class Command(BaseCommand):
|
||||
parser.add_argument('--overwrite', dest='overwrite', action='store_true', default=False, help='overwrite the destination hosts and groups')
|
||||
parser.add_argument('--overwrite-vars', dest='overwrite_vars', action='store_true', default=False, help='overwrite (rather than merge) variables')
|
||||
parser.add_argument('--keep-vars', dest='keep_vars', action='store_true', default=False, help='DEPRECATED legacy option, has no effect')
|
||||
parser.add_argument(
|
||||
'--custom', dest='custom', action='store_true', default=False, help='DEPRECATED indicates a custom inventory script, no longer used'
|
||||
)
|
||||
parser.add_argument('--source', dest='source', type=str, default=None, metavar='s', help='inventory directory, file, or script to load')
|
||||
parser.add_argument(
|
||||
'--enabled-var',
|
||||
|
||||
@@ -10,7 +10,6 @@ class Ungrouped(object):
|
||||
name = 'ungrouped'
|
||||
policy_instance_percentage = None
|
||||
policy_instance_minimum = None
|
||||
controller = None
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
@@ -18,7 +17,7 @@ class Ungrouped(object):
|
||||
|
||||
@property
|
||||
def capacity(self):
|
||||
return sum([x.capacity for x in self.instances])
|
||||
return sum(x.capacity for x in self.instances)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -38,8 +37,6 @@ class Command(BaseCommand):
|
||||
fmt += ' policy={0.policy_instance_percentage}%'
|
||||
if instance_group.policy_instance_minimum:
|
||||
fmt += ' policy>={0.policy_instance_minimum}'
|
||||
if instance_group.controller:
|
||||
fmt += ' controller={0.controller.name}'
|
||||
print((fmt + ']').format(instance_group))
|
||||
for x in instance_group.instances.all():
|
||||
color = '\033[92m'
|
||||
@@ -48,8 +45,6 @@ class Command(BaseCommand):
|
||||
if x.enabled is False:
|
||||
color = '\033[90m[DISABLED] '
|
||||
fmt = '\t' + color + '{0.hostname} capacity={0.capacity} version={1}'
|
||||
if x.last_isolated_check:
|
||||
fmt += ' last_isolated_check="{0.last_isolated_check:%Y-%m-%d %H:%M:%S}"'
|
||||
if x.capacity:
|
||||
fmt += ' heartbeat="{0.modified:%Y-%m-%d %H:%M:%S}"'
|
||||
print((fmt + '\033[0m').format(x, x.version or '?'))
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from uuid import uuid4
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction
|
||||
|
||||
from awx.main.models import Instance
|
||||
from django.conf import settings
|
||||
|
||||
from django.db import transaction
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -20,7 +18,6 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help='Hostname used during provisioning')
|
||||
parser.add_argument('--is-isolated', dest='is_isolated', action='store_true', help='Specify whether the instance is isolated')
|
||||
|
||||
def _register_hostname(self, hostname):
|
||||
if not hostname:
|
||||
@@ -36,10 +33,7 @@ class Command(BaseCommand):
|
||||
def handle(self, **options):
|
||||
if not options.get('hostname'):
|
||||
raise CommandError("Specify `--hostname` to use this command.")
|
||||
if options['is_isolated']:
|
||||
self.uuid = str(uuid4())
|
||||
else:
|
||||
self.uuid = settings.SYSTEM_UUID
|
||||
self.uuid = settings.SYSTEM_UUID
|
||||
self.changed = False
|
||||
self._register_hostname(options.get('hostname'))
|
||||
if self.changed:
|
||||
|
||||
@@ -16,8 +16,7 @@ from awx.main.utils.encryption import encrypt_field, decrypt_field, encrypt_valu
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Regenerate a new SECRET_KEY value and re-encrypt every secret in the
|
||||
Tower database.
|
||||
Regenerate a new SECRET_KEY value and re-encrypt every secret in the database.
|
||||
"""
|
||||
|
||||
@transaction.atomic
|
||||
|
||||
@@ -17,10 +17,9 @@ class InstanceNotFound(Exception):
|
||||
|
||||
|
||||
class RegisterQueue:
|
||||
def __init__(self, queuename, controller, instance_percent, inst_min, hostname_list, is_container_group=None):
|
||||
def __init__(self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None):
|
||||
self.instance_not_found_err = None
|
||||
self.queuename = queuename
|
||||
self.controller = controller
|
||||
self.instance_percent = instance_percent
|
||||
self.instance_min = inst_min
|
||||
self.hostname_list = hostname_list
|
||||
@@ -46,20 +45,6 @@ class RegisterQueue:
|
||||
|
||||
return (ig, created, changed)
|
||||
|
||||
def update_instance_group_controller(self, ig):
|
||||
changed = False
|
||||
control_ig = None
|
||||
|
||||
if self.controller:
|
||||
control_ig = InstanceGroup.objects.filter(name=self.controller).first()
|
||||
|
||||
if control_ig and ig.controller_id != control_ig.pk:
|
||||
ig.controller = control_ig
|
||||
ig.save()
|
||||
changed = True
|
||||
|
||||
return (control_ig, changed)
|
||||
|
||||
def add_instances_to_group(self, ig):
|
||||
changed = False
|
||||
|
||||
@@ -88,26 +73,20 @@ class RegisterQueue:
|
||||
with advisory_lock('cluster_policy_lock'):
|
||||
with transaction.atomic():
|
||||
changed2 = False
|
||||
changed3 = False
|
||||
(ig, created, changed1) = self.get_create_update_instance_group()
|
||||
if created:
|
||||
print("Creating instance group {}".format(ig.name))
|
||||
elif not created:
|
||||
print("Instance Group already registered {}".format(ig.name))
|
||||
|
||||
if self.controller:
|
||||
(ig_ctrl, changed2) = self.update_instance_group_controller(ig)
|
||||
if changed2:
|
||||
print("Set controller group {} on {}.".format(self.controller, self.queuename))
|
||||
|
||||
try:
|
||||
(instances, changed3) = self.add_instances_to_group(ig)
|
||||
(instances, changed2) = self.add_instances_to_group(ig)
|
||||
for i in instances:
|
||||
print("Added instance {} to {}".format(i.hostname, ig.name))
|
||||
except InstanceNotFound as e:
|
||||
self.instance_not_found_err = e
|
||||
|
||||
if any([changed1, changed2, changed3]):
|
||||
if changed1 or changed2:
|
||||
print('(changed: True)')
|
||||
|
||||
|
||||
@@ -117,7 +96,6 @@ class Command(BaseCommand):
|
||||
parser.add_argument(
|
||||
'--hostnames', dest='hostnames', type=str, help='Comma-Delimited Hosts to add to the Queue (will not remove already assigned instances)'
|
||||
)
|
||||
parser.add_argument('--controller', dest='controller', type=str, default='', help='The controlling group (makes this an isolated group)')
|
||||
parser.add_argument(
|
||||
'--instance_percent', dest='instance_percent', type=int, default=0, help='The percentage of active instances that will be assigned to this group'
|
||||
),
|
||||
@@ -133,14 +111,13 @@ class Command(BaseCommand):
|
||||
queuename = options.get('queuename')
|
||||
if not queuename:
|
||||
raise CommandError("Specify `--queuename` to use this command.")
|
||||
ctrl = options.get('controller')
|
||||
inst_per = options.get('instance_percent')
|
||||
instance_min = options.get('instance_minimum')
|
||||
hostname_list = []
|
||||
if options.get('hostnames'):
|
||||
hostname_list = options.get('hostnames').split(",")
|
||||
|
||||
rq = RegisterQueue(queuename, ctrl, inst_per, instance_min, hostname_list)
|
||||
rq = RegisterQueue(queuename, inst_per, instance_min, hostname_list)
|
||||
rq.register()
|
||||
if rq.instance_not_found_err:
|
||||
print(rq.instance_not_found_err.message)
|
||||
|
||||
@@ -10,7 +10,6 @@ from datetime import datetime as dt
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection
|
||||
from django.db.models import Q
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
@@ -140,7 +139,7 @@ class Command(BaseCommand):
|
||||
data[family.name] = family.samples[0].value
|
||||
|
||||
me = Instance.objects.me()
|
||||
hostnames = [i.hostname for i in Instance.objects.exclude(Q(hostname=me.hostname) | Q(rampart_groups__controller__isnull=False))]
|
||||
hostnames = [i.hostname for i in Instance.objects.exclude(hostname=me.hostname)]
|
||||
|
||||
host_stats = Command.get_connection_status(me, hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
import ansible_runner
|
||||
|
||||
from awx.main.isolated.manager import set_pythonpath
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Tests SSH connectivity between a controller and target isolated node"""
|
||||
|
||||
help = 'Tests SSH connectivity between a controller and target isolated node'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help='Hostname of an isolated node')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
hostname = options.get('hostname')
|
||||
if not hostname:
|
||||
raise CommandError("--hostname is a required argument")
|
||||
|
||||
try:
|
||||
path = tempfile.mkdtemp(prefix='awx_isolated_ssh', dir=settings.AWX_ISOLATION_BASE_PATH)
|
||||
ssh_key = None
|
||||
if all([getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True, getattr(settings, 'AWX_ISOLATED_PRIVATE_KEY', None)]):
|
||||
ssh_key = settings.AWX_ISOLATED_PRIVATE_KEY
|
||||
env = dict(os.environ.items())
|
||||
env['ANSIBLE_HOST_KEY_CHECKING'] = str(settings.AWX_ISOLATED_HOST_KEY_CHECKING)
|
||||
set_pythonpath(os.path.join(settings.ANSIBLE_VENV_PATH, 'lib'), env)
|
||||
res = ansible_runner.interface.run(
|
||||
private_data_dir=path,
|
||||
host_pattern='all',
|
||||
inventory='{} ansible_ssh_user={}'.format(hostname, settings.AWX_ISOLATED_USERNAME),
|
||||
module='shell',
|
||||
module_args='ansible-runner --version',
|
||||
envvars=env,
|
||||
verbosity=3,
|
||||
ssh_key=ssh_key,
|
||||
)
|
||||
sys.exit(res.rc)
|
||||
finally:
|
||||
shutil.rmtree(path)
|
||||
@@ -142,7 +142,7 @@ class InstanceManager(models.Manager):
|
||||
pod_ip = os.environ.get('MY_POD_IP')
|
||||
registered = self.register(ip_address=pod_ip)
|
||||
is_container_group = settings.IS_K8S
|
||||
RegisterQueue('tower', None, 100, 0, [], is_container_group).register()
|
||||
RegisterQueue('tower', 100, 0, [], is_container_group).register()
|
||||
return registered
|
||||
else:
|
||||
return (False, self.me())
|
||||
@@ -155,9 +155,6 @@ class InstanceManager(models.Manager):
|
||||
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
|
||||
return "tower"
|
||||
|
||||
def all_non_isolated(self):
|
||||
return self.exclude(rampart_groups__controller__isnull=False)
|
||||
|
||||
|
||||
class InstanceGroupManager(models.Manager):
|
||||
"""A custom manager class for the Instance model.
|
||||
|
||||
@@ -2,12 +2,8 @@
|
||||
# Python
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Django
|
||||
from django.db import migrations, models
|
||||
|
||||
# AWX
|
||||
from awx.main.migrations import ActivityStreamDisabledMigration
|
||||
import awx.main.fields
|
||||
|
||||
|
||||
class Migration(ActivityStreamDisabledMigration):
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# AWX
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
from awx.main.migrations import _credentialtypes as credentialtypes
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
from awx.main.migrations import ActivityStreamDisabledMigration
|
||||
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Django
|
||||
from django.db import migrations, models
|
||||
from django.db import migrations
|
||||
|
||||
# AWX
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
# Generated by Django 1.11.7 on 2017-12-11 16:40
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import awx.main.fields
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
from django.db import migrations
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
|
||||
@@ -2,10 +2,7 @@
|
||||
# Generated by Django 1.11.7 on 2018-02-27 17:58
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import awx.main.fields
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations
|
||||
|
||||
# TODO: Squash all of these migrations with '0024_v330_add_oauth_activity_stream_registrar'
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
|
||||
# AWX
|
||||
from awx.main.migrations import _credentialtypes as credentialtypes
|
||||
|
||||
from django.db import migrations, models
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
# Generated by Django 1.11.11 on 2018-03-16 20:25
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import awx.main.fields
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
@@ -4,9 +4,7 @@ from __future__ import unicode_literals
|
||||
|
||||
import awx.main.fields
|
||||
import awx.main.models.activity_stream
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
@@ -2,10 +2,7 @@
|
||||
# Generated by Django 1.11.11 on 2018-05-23 20:17
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import awx.main.fields
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
from __future__ import unicode_literals
|
||||
from uuid import uuid4
|
||||
|
||||
from django.db import migrations, models
|
||||
from django.db import migrations
|
||||
from django.utils.timezone import now
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Generated by Django 1.11.20 on 2019-05-06 15:20
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
from django.db import migrations
|
||||
from awx.main.fields import OrderedManyToManyField
|
||||
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
import awx
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import django.db.models.deletion
|
||||
|
||||
from awx.main.migrations._rbac import (
|
||||
rebuild_role_parentage,
|
||||
rebuild_role_hierarchy,
|
||||
migrate_ujt_organization,
|
||||
migrate_ujt_organization_backward,
|
||||
restore_inventory_admins,
|
||||
|
||||
@@ -3,15 +3,11 @@
|
||||
import logging
|
||||
|
||||
import awx.main.fields
|
||||
from awx.main.utils.encryption import encrypt_field, decrypt_field
|
||||
|
||||
from django.db import migrations, models
|
||||
from django.utils.timezone import now
|
||||
import django.db.models.deletion
|
||||
|
||||
from awx.main.migrations import _galaxy as galaxy
|
||||
from awx.main.models import CredentialType as ModernCredentialType
|
||||
from awx.main.utils.common import set_current_apps
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
23
awx/main/migrations/0136_scm_track_submodules.py
Normal file
23
awx/main/migrations/0136_scm_track_submodules.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 2.2.16 on 2021-04-13 19:21
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0135_schedule_sort_fallback_to_id'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='project',
|
||||
name='scm_track_submodules',
|
||||
field=models.BooleanField(default=False, help_text='Track submodules latest commits on defined branch.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='projectupdate',
|
||||
name='scm_track_submodules',
|
||||
field=models.BooleanField(default=False, help_text='Track submodules latest commits on defined branch.'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 2.2.16 on 2021-04-13 19:51
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
# AWX migration utils
|
||||
from awx.main.migrations._inventory_source import delete_custom_inv_source
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0136_scm_track_submodules'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(delete_custom_inv_source),
|
||||
]
|
||||
84
awx/main/migrations/0138_custom_inventory_scripts_removal.py
Normal file
84
awx/main/migrations/0138_custom_inventory_scripts_removal.py
Normal file
@@ -0,0 +1,84 @@
|
||||
# Generated by Django 2.2.16 on 2021-04-13 19:51
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
from awx.main.migrations._rbac import delete_all_custom_script_roles
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0137_custom_inventory_scripts_removal_data'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='activitystream',
|
||||
name='custom_inventory_script',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='inventorysource',
|
||||
name='source_script',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='inventoryupdate',
|
||||
name='source_script',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('tower', 'Ansible Tower'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('tower', 'Ansible Tower'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='custominventoryscript',
|
||||
unique_together=set(),
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='custominventoryscript',
|
||||
name='admin_role',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='custominventoryscript',
|
||||
name='organization',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='custominventoryscript',
|
||||
name='read_role',
|
||||
),
|
||||
migrations.RunPython(delete_all_custom_script_roles),
|
||||
]
|
||||
26
awx/main/migrations/0139_isolated_removal.py
Normal file
26
awx/main/migrations/0139_isolated_removal.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# Generated by Django 2.2.16 on 2021-04-21 15:02
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0138_custom_inventory_scripts_removal'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='instance',
|
||||
name='last_isolated_check',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='instancegroup',
|
||||
name='controller',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='controller_node',
|
||||
field=models.TextField(blank=True, default='', editable=False, help_text='The instance that managed the execution environment.'),
|
||||
),
|
||||
]
|
||||
90
awx/main/migrations/0140_rename.py
Normal file
90
awx/main/migrations/0140_rename.py
Normal file
@@ -0,0 +1,90 @@
|
||||
# Generated by Django 2.2.16 on 2021-04-27 18:07
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0139_isolated_removal'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='credential',
|
||||
name='credential_type',
|
||||
field=models.ForeignKey(
|
||||
help_text='Specify the type of credential you want to create. Refer to the documentation for details on each type.',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name='credentials',
|
||||
to='main.CredentialType',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credential',
|
||||
name='inputs',
|
||||
field=awx.main.fields.CredentialInputField(
|
||||
blank=True, default=dict, help_text='Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credentialtype',
|
||||
name='injectors',
|
||||
field=awx.main.fields.CredentialTypeInjectorField(
|
||||
blank=True, default=dict, help_text='Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax.'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credentialtype',
|
||||
name='inputs',
|
||||
field=awx.main.fields.CredentialTypeInputField(
|
||||
blank=True, default=dict, help_text='Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='enabled_value',
|
||||
field=models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2020-08-04T18:13:04+00:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='host_filter',
|
||||
field=models.TextField(blank=True, default='', help_text='Regex where only matching hosts will be imported.'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='enabled_value',
|
||||
field=models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2020-08-04T18:13:04+00:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='host_filter',
|
||||
field=models.TextField(blank=True, default='', help_text='Regex where only matching hosts will be imported.'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='job',
|
||||
name='use_fact_cache',
|
||||
field=models.BooleanField(
|
||||
default=False,
|
||||
help_text='If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='use_fact_cache',
|
||||
field=models.BooleanField(
|
||||
default=False,
|
||||
help_text='If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.',
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -1,8 +1,6 @@
|
||||
import random
|
||||
import logging
|
||||
|
||||
from django.db import migrations, models
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.timezone import now
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ def migrate_galaxy_settings(apps, schema_editor):
|
||||
galaxy_type = CredentialType.objects.get(kind='galaxy')
|
||||
private_galaxy_url = Setting.objects.filter(key='PRIMARY_GALAXY_URL').first()
|
||||
|
||||
# by default, prior versions of AWX/Tower automatically pulled content
|
||||
# by default, prior versions of AWX automatically pulled content
|
||||
# from galaxy.ansible.com
|
||||
public_galaxy_enabled = True
|
||||
public_galaxy_setting = Setting.objects.filter(key='PUBLIC_GALAXY_ENABLED').first()
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import logging
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.main.utils.common import set_current_apps
|
||||
from awx.main.utils.common import parse_yaml_or_json
|
||||
@@ -93,3 +90,22 @@ def delete_cloudforms_inv_source(apps, schema_editor):
|
||||
if ct:
|
||||
ct.credentials.all().delete()
|
||||
ct.delete()
|
||||
|
||||
|
||||
def delete_custom_inv_source(apps, schema_editor):
|
||||
set_current_apps(apps)
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
InventoryUpdate = apps.get_model('main', 'InventoryUpdate')
|
||||
ct, deletions = InventoryUpdate.objects.filter(source='custom').delete()
|
||||
if ct:
|
||||
logger.info('deleted {}'.format((ct, deletions)))
|
||||
update_ct = deletions['main.InventoryUpdate']
|
||||
if update_ct:
|
||||
logger.info('Deleted {} custom inventory script sources.'.format(update_ct))
|
||||
ct, deletions = InventorySource.objects.filter(source='custom').delete()
|
||||
if ct:
|
||||
logger.info('deleted {}'.format((ct, deletions)))
|
||||
src_ct = deletions['main.InventorySource']
|
||||
if src_ct:
|
||||
logger.info('Deleted {} custom inventory script updates.'.format(src_ct))
|
||||
logger.warning('Custom inventory scripts have been removed, see awx-manage export_custom_scripts')
|
||||
|
||||
@@ -28,7 +28,6 @@ def create_roles(apps, schema_editor):
|
||||
'Inventory',
|
||||
'Project',
|
||||
'Credential',
|
||||
'CustomInventoryScript',
|
||||
'JobTemplate',
|
||||
]
|
||||
]
|
||||
@@ -48,6 +47,21 @@ def delete_all_user_roles(apps, schema_editor):
|
||||
role.delete()
|
||||
|
||||
|
||||
def delete_all_custom_script_roles(apps, schema_editor):
|
||||
ContentType = apps.get_model('contenttypes', "ContentType")
|
||||
Role = apps.get_model('main', "Role")
|
||||
try:
|
||||
cis_type = ContentType.objects.get(model='custominventoryscript')
|
||||
except ContentType.DoesNotExist:
|
||||
return
|
||||
role_ct = 0
|
||||
for role in Role.objects.filter(content_type=cis_type).iterator():
|
||||
role.delete()
|
||||
role_ct += 1
|
||||
if role_ct:
|
||||
logger.debug('Deleted {} roles corresponding to custom inventory sources.'.format(role_ct))
|
||||
|
||||
|
||||
UNIFIED_ORG_LOOKUPS = {
|
||||
# Job Templates had an implicit organization via their project
|
||||
'jobtemplate': 'project',
|
||||
|
||||
@@ -12,7 +12,7 @@ from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate, StdoutM
|
||||
from awx.main.models.organization import Organization, Profile, Team, UserSessionMembership # noqa
|
||||
from awx.main.models.credential import Credential, CredentialType, CredentialInputSource, ManagedCredentialType, build_safe_env # noqa
|
||||
from awx.main.models.projects import Project, ProjectUpdate # noqa
|
||||
from awx.main.models.inventory import CustomInventoryScript, Group, Host, Inventory, InventorySource, InventoryUpdate, SmartInventoryMembership # noqa
|
||||
from awx.main.models.inventory import Group, Host, Inventory, InventorySource, InventoryUpdate, SmartInventoryMembership # noqa
|
||||
from awx.main.models.jobs import ( # noqa
|
||||
Job,
|
||||
JobHostSummary,
|
||||
@@ -224,7 +224,6 @@ activity_stream_registrar.connect(AdHocCommand)
|
||||
# activity_stream_registrar.connect(JobEvent)
|
||||
# activity_stream_registrar.connect(Profile)
|
||||
activity_stream_registrar.connect(Schedule)
|
||||
activity_stream_registrar.connect(CustomInventoryScript)
|
||||
activity_stream_registrar.connect(NotificationTemplate)
|
||||
activity_stream_registrar.connect(Notification)
|
||||
activity_stream_registrar.connect(Label)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Tower
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.fields import JSONField
|
||||
from awx.main.models.base import accepts_json
|
||||
@@ -74,7 +74,6 @@ class ActivityStream(models.Model):
|
||||
unified_job = models.ManyToManyField("UnifiedJob", blank=True, related_name='activity_stream_as_unified_job+')
|
||||
ad_hoc_command = models.ManyToManyField("AdHocCommand", blank=True)
|
||||
schedule = models.ManyToManyField("Schedule", blank=True)
|
||||
custom_inventory_script = models.ManyToManyField("CustomInventoryScript", blank=True)
|
||||
execution_environment = models.ManyToManyField("ExecutionEnvironment", blank=True)
|
||||
notification_template = models.ManyToManyField("NotificationTemplate", blank=True)
|
||||
notification = models.ManyToManyField("Notification", blank=True)
|
||||
|
||||
@@ -146,10 +146,6 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
return RunAdHocCommand
|
||||
|
||||
@classmethod
|
||||
def supports_isolation(cls):
|
||||
return True
|
||||
|
||||
@property
|
||||
def is_container_group_task(self):
|
||||
return bool(self.instance_group and self.instance_group.is_container_group)
|
||||
|
||||
@@ -62,7 +62,7 @@ PROJECT_UPDATE_JOB_TYPE_CHOICES = [
|
||||
(PERM_INVENTORY_CHECK, _('Check')),
|
||||
]
|
||||
|
||||
CLOUD_INVENTORY_SOURCES = list(CLOUD_PROVIDERS) + ['scm', 'custom']
|
||||
CLOUD_INVENTORY_SOURCES = list(CLOUD_PROVIDERS) + ['scm']
|
||||
|
||||
VERBOSITY_CHOICES = [
|
||||
(0, '0 (Normal)'),
|
||||
@@ -354,7 +354,7 @@ class PrimordialModel(HasEditsMixin, CreatedModifiedModel):
|
||||
|
||||
|
||||
class CommonModel(PrimordialModel):
|
||||
''' a base model where the name is unique '''
|
||||
'''a base model where the name is unique'''
|
||||
|
||||
class Meta:
|
||||
abstract = True
|
||||
@@ -366,7 +366,7 @@ class CommonModel(PrimordialModel):
|
||||
|
||||
|
||||
class CommonModelNameNotUnique(PrimordialModel):
|
||||
''' a base model where the name is not unique '''
|
||||
'''a base model where the name is not unique'''
|
||||
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
@@ -89,7 +89,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
related_name='credentials',
|
||||
null=False,
|
||||
on_delete=models.CASCADE,
|
||||
help_text=_('Specify the type of credential you want to create. Refer ' 'to the Ansible Tower documentation for details on each type.'),
|
||||
help_text=_('Specify the type of credential you want to create. Refer ' 'to the documentation for details on each type.'),
|
||||
)
|
||||
managed_by_tower = models.BooleanField(default=False, editable=False)
|
||||
organization = models.ForeignKey(
|
||||
@@ -101,7 +101,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
related_name='credentials',
|
||||
)
|
||||
inputs = CredentialInputField(
|
||||
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the Ansible Tower documentation for example syntax.')
|
||||
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.')
|
||||
)
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role=[
|
||||
@@ -295,6 +295,15 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
return True
|
||||
return field_name in self.inputs and self.inputs[field_name] not in ('', None)
|
||||
|
||||
def has_inputs(self, field_names=()):
|
||||
for name in field_names:
|
||||
if name in self.inputs:
|
||||
if self.inputs[name] in ('', None):
|
||||
return False
|
||||
else:
|
||||
raise ValueError('{} is not an input field'.format(name))
|
||||
return True
|
||||
|
||||
def _get_dynamic_input(self, field_name):
|
||||
for input_source in self.input_sources.all():
|
||||
if input_source.input_field_name == field_name:
|
||||
@@ -334,12 +343,12 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
managed_by_tower = models.BooleanField(default=False, editable=False)
|
||||
namespace = models.CharField(max_length=1024, null=True, default=None, editable=False)
|
||||
inputs = CredentialTypeInputField(
|
||||
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the Ansible Tower documentation for example syntax.')
|
||||
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.')
|
||||
)
|
||||
injectors = CredentialTypeInjectorField(
|
||||
blank=True,
|
||||
default=dict,
|
||||
help_text=_('Enter injectors using either JSON or YAML syntax. ' 'Refer to the Ansible Tower documentation for example syntax.'),
|
||||
help_text=_('Enter injectors using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.'),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -743,7 +752,7 @@ ManagedCredentialType(
|
||||
'help_text': ugettext_noop(
|
||||
'OpenStack domains define administrative boundaries. '
|
||||
'It is only needed for Keystone v3 authentication '
|
||||
'URLs. Refer to Ansible Tower documentation for '
|
||||
'URLs. Refer to the documentation for '
|
||||
'common scenarios.'
|
||||
),
|
||||
},
|
||||
@@ -1023,9 +1032,7 @@ ManagedCredentialType(
|
||||
'label': ugettext_noop('OAuth Token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'help_text': ugettext_noop(
|
||||
'An OAuth token to use to authenticate to Tower with.' 'This should not be set if username/password are being used.'
|
||||
),
|
||||
'help_text': ugettext_noop('An OAuth token to use to authenticate with.' 'This should not be set if username/password are being used.'),
|
||||
},
|
||||
{'id': 'verify_ssl', 'label': ugettext_noop('Verify SSL'), 'type': 'boolean', 'secret': False},
|
||||
],
|
||||
@@ -1097,16 +1104,16 @@ ManagedCredentialType(
|
||||
},
|
||||
{
|
||||
'id': 'password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'label': ugettext_noop('Password or Token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'help_text': ugettext_noop('A password or token used to authenticate with'),
|
||||
},
|
||||
{
|
||||
'id': 'token',
|
||||
'label': ugettext_noop('Access Token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'help_text': ugettext_noop('A token to use to authenticate with. ' 'This should not be set if username/password are being used.'),
|
||||
'id': 'verify_ssl',
|
||||
'label': ugettext_noop('Verify SSL'),
|
||||
'type': 'boolean',
|
||||
'default': True,
|
||||
},
|
||||
],
|
||||
'required': ['host'],
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import random
|
||||
from decimal import Decimal
|
||||
|
||||
from django.core.validators import MinValueValidator
|
||||
@@ -63,10 +62,6 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
)
|
||||
created = models.DateTimeField(auto_now_add=True)
|
||||
modified = models.DateTimeField(auto_now=True)
|
||||
last_isolated_check = models.DateTimeField(
|
||||
null=True,
|
||||
editable=False,
|
||||
)
|
||||
version = models.CharField(max_length=120, blank=True)
|
||||
capacity = models.PositiveIntegerField(
|
||||
default=100,
|
||||
@@ -128,20 +123,12 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
def jobs_total(self):
|
||||
return UnifiedJob.objects.filter(execution_node=self.hostname).count()
|
||||
|
||||
def is_lost(self, ref_time=None, isolated=False):
|
||||
def is_lost(self, ref_time=None):
|
||||
if ref_time is None:
|
||||
ref_time = now()
|
||||
grace_period = 120
|
||||
if isolated:
|
||||
grace_period = settings.AWX_ISOLATED_PERIODIC_CHECK * 2
|
||||
return self.modified < ref_time - timedelta(seconds=grace_period)
|
||||
|
||||
def is_controller(self):
|
||||
return Instance.objects.filter(rampart_groups__controller__instances=self).exists()
|
||||
|
||||
def is_isolated(self):
|
||||
return self.rampart_groups.filter(controller__isnull=False).exists()
|
||||
|
||||
def refresh_capacity(self):
|
||||
if settings.IS_K8S:
|
||||
self.capacity = self.cpu = self.memory = self.cpu_capacity = self.mem_capacity = 0 # noqa
|
||||
@@ -185,15 +172,6 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
editable=False,
|
||||
help_text=_('Instances that are members of this InstanceGroup'),
|
||||
)
|
||||
controller = models.ForeignKey(
|
||||
'InstanceGroup',
|
||||
related_name='controlled_groups',
|
||||
help_text=_('Instance Group to remotely control this group.'),
|
||||
editable=False,
|
||||
default=None,
|
||||
null=True,
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
is_container_group = models.BooleanField(default=False)
|
||||
credential = models.ForeignKey(
|
||||
'Credential',
|
||||
@@ -215,7 +193,7 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
default=[], blank=True, help_text=_("List of exact-match Instances that will always be automatically assigned to this group")
|
||||
)
|
||||
|
||||
POLICY_FIELDS = frozenset(('policy_instance_list', 'policy_instance_minimum', 'policy_instance_percentage', 'controller'))
|
||||
POLICY_FIELDS = frozenset(('policy_instance_list', 'policy_instance_minimum', 'policy_instance_percentage'))
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:instance_group_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -232,14 +210,6 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
def jobs_total(self):
|
||||
return UnifiedJob.objects.filter(instance_group=self).count()
|
||||
|
||||
@property
|
||||
def is_controller(self):
|
||||
return self.controlled_groups.exists()
|
||||
|
||||
@property
|
||||
def is_isolated(self):
|
||||
return bool(self.controller)
|
||||
|
||||
'''
|
||||
RelatedJobsMixin
|
||||
'''
|
||||
@@ -271,9 +241,6 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
largest_instance = i
|
||||
return largest_instance
|
||||
|
||||
def choose_online_controller_node(self):
|
||||
return random.choice(list(self.controller.instances.filter(capacity__gt=0, enabled=True).values_list('hostname', flat=True)))
|
||||
|
||||
def set_default_policy_fields(self):
|
||||
self.policy_instance_list = []
|
||||
self.policy_instance_minimum = 0
|
||||
|
||||
@@ -52,7 +52,7 @@ from awx.main.utils import _inventory_updates
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
|
||||
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'CustomInventoryScript', 'SmartInventoryMembership']
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership']
|
||||
|
||||
logger = logging.getLogger('awx.main.models.inventory')
|
||||
|
||||
@@ -821,7 +821,6 @@ class InventorySourceOptions(BaseModel):
|
||||
('openstack', _('OpenStack')),
|
||||
('rhv', _('Red Hat Virtualization')),
|
||||
('tower', _('Ansible Tower')),
|
||||
('custom', _('Custom Script')),
|
||||
]
|
||||
|
||||
# From the options of the Django management base command
|
||||
@@ -845,13 +844,6 @@ class InventorySourceOptions(BaseModel):
|
||||
blank=True,
|
||||
default='',
|
||||
)
|
||||
source_script = models.ForeignKey(
|
||||
'CustomInventoryScript',
|
||||
null=True,
|
||||
default=None,
|
||||
blank=True,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
source_vars = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
@@ -885,14 +877,14 @@ class InventorySourceOptions(BaseModel):
|
||||
'}'
|
||||
'The host would be marked enabled. If power_state where any '
|
||||
'value other than powered_on then the host would be disabled '
|
||||
'when imported into Tower. If the key is not found then the '
|
||||
'when imported. If the key is not found then the '
|
||||
'host will be enabled'
|
||||
),
|
||||
)
|
||||
host_filter = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text=_('Regex where only matching hosts will be imported into Tower.'),
|
||||
help_text=_('Regex where only matching hosts will be imported.'),
|
||||
)
|
||||
overwrite = models.BooleanField(
|
||||
default=False,
|
||||
@@ -1328,7 +1320,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
unique_together = [('name', 'organization')]
|
||||
ordering = ('name',)
|
||||
|
||||
script = prevent_search(
|
||||
@@ -1338,21 +1329,6 @@ class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin):
|
||||
help_text=_('Inventory script contents'),
|
||||
)
|
||||
)
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
related_name='custom_inventory_scripts',
|
||||
help_text=_('Organization owning this inventory script'),
|
||||
blank=False,
|
||||
null=True,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role='organization.admin_role',
|
||||
)
|
||||
read_role = ImplicitRoleField(
|
||||
parent_role=['organization.auditor_role', 'organization.member_role', 'admin_role'],
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:inventory_script_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@@ -162,7 +162,7 @@ class JobOptions(BaseModel):
|
||||
use_fact_cache = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_(
|
||||
"If enabled, Tower will act as an Ansible Fact Cache Plugin; persisting "
|
||||
"If enabled, the service will act as an Ansible Fact Cache Plugin; persisting "
|
||||
"facts at the end of a playbook run to the database and caching facts for use by Ansible."
|
||||
),
|
||||
)
|
||||
@@ -587,10 +587,6 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
|
||||
return RunJob
|
||||
|
||||
@classmethod
|
||||
def supports_isolation(cls):
|
||||
return True
|
||||
|
||||
def _global_timeout_setting(self):
|
||||
return 'DEFAULT_JOB_TIMEOUT'
|
||||
|
||||
@@ -759,7 +755,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
|
||||
@property
|
||||
def can_run_containerized(self):
|
||||
return any([ig for ig in self.preferred_instance_groups if ig.is_container_group])
|
||||
return True
|
||||
|
||||
@property
|
||||
def is_container_group_task(self):
|
||||
|
||||
@@ -114,6 +114,13 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
def _get_related_jobs(self):
|
||||
return UnifiedJob.objects.non_polymorphic().filter(organization=self)
|
||||
|
||||
def create_default_galaxy_credential(self):
|
||||
from awx.main.models import Credential
|
||||
|
||||
public_galaxy_credential = Credential.objects.filter(managed_by_tower=True, name='Ansible Galaxy').first()
|
||||
if public_galaxy_credential not in self.galaxy_credentials.all():
|
||||
self.galaxy_credentials.add(public_galaxy_credential)
|
||||
|
||||
|
||||
class OrganizationGalaxyCredentialMembership(models.Model):
|
||||
|
||||
|
||||
@@ -115,6 +115,10 @@ class ProjectOptions(models.Model):
|
||||
default=False,
|
||||
help_text=_('Delete the project before syncing.'),
|
||||
)
|
||||
scm_track_submodules = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_('Track submodules latest commits on defined branch.'),
|
||||
)
|
||||
credential = models.ForeignKey(
|
||||
'Credential',
|
||||
related_name='%(class)ss',
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# Python
|
||||
from io import StringIO
|
||||
import datetime
|
||||
import decimal
|
||||
import codecs
|
||||
import json
|
||||
import logging
|
||||
@@ -587,7 +588,7 @@ class UnifiedJob(
|
||||
blank=True,
|
||||
default='',
|
||||
editable=False,
|
||||
help_text=_("The instance that managed the isolated execution environment."),
|
||||
help_text=_("The instance that managed the execution environment."),
|
||||
)
|
||||
notifications = models.ManyToManyField(
|
||||
'Notification',
|
||||
@@ -736,10 +737,6 @@ class UnifiedJob(
|
||||
def _get_task_class(cls):
|
||||
raise NotImplementedError # Implement in subclasses.
|
||||
|
||||
@classmethod
|
||||
def supports_isolation(cls):
|
||||
return False
|
||||
|
||||
@property
|
||||
def can_run_containerized(self):
|
||||
return False
|
||||
@@ -842,15 +839,16 @@ class UnifiedJob(
|
||||
if 'finished' not in update_fields:
|
||||
update_fields.append('finished')
|
||||
|
||||
dq = decimal.Decimal('1.000')
|
||||
if self.elapsed is None:
|
||||
self.elapsed = decimal.Decimal(0.0).quantize(dq)
|
||||
|
||||
# If we have a start and finished time, and haven't already calculated
|
||||
# out the time that elapsed, do so.
|
||||
if self.started and self.finished and not self.elapsed:
|
||||
if self.started and self.finished and self.elapsed == 0.0:
|
||||
td = self.finished - self.started
|
||||
elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
|
||||
else:
|
||||
elapsed = 0.0
|
||||
if self.elapsed != elapsed:
|
||||
self.elapsed = str(elapsed)
|
||||
elapsed = decimal.Decimal(td.total_seconds())
|
||||
self.elapsed = elapsed.quantize(dq)
|
||||
if 'elapsed' not in update_fields:
|
||||
update_fields.append('elapsed')
|
||||
|
||||
@@ -1222,7 +1220,7 @@ class UnifiedJob(
|
||||
raise NotImplementedError # Implement in subclass.
|
||||
|
||||
def websocket_emit_data(self):
|
||||
''' Return extra data that should be included when submitting data to the browser over the websocket connection '''
|
||||
'''Return extra data that should be included when submitting data to the browser over the websocket connection'''
|
||||
websocket_data = dict(type=self.job_type_name)
|
||||
if self.spawned_by_workflow:
|
||||
websocket_data.update(dict(workflow_job_id=self.workflow_job_id, workflow_node_id=self.workflow_node_id))
|
||||
@@ -1400,12 +1398,11 @@ class UnifiedJob(
|
||||
@property
|
||||
def preferred_instance_groups(self):
|
||||
"""
|
||||
Return Instance/Rampart Groups preferred by this unified job templates
|
||||
Return Instance/Rampart Groups preferred by this unified job template
|
||||
"""
|
||||
if not self.unified_job_template:
|
||||
return []
|
||||
template_groups = [x for x in self.unified_job_template.instance_groups.all()]
|
||||
return template_groups
|
||||
return list(self.unified_job_template.instance_groups.all())
|
||||
|
||||
@property
|
||||
def global_instance_groups(self):
|
||||
@@ -1465,9 +1462,6 @@ class UnifiedJob(
|
||||
def get_queue_name(self):
|
||||
return self.controller_node or self.execution_node or get_local_queuename()
|
||||
|
||||
def is_isolated(self):
|
||||
return bool(self.controller_node)
|
||||
|
||||
@property
|
||||
def is_container_group_task(self):
|
||||
return False
|
||||
|
||||
@@ -9,7 +9,7 @@ from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.utils import get_awx_version
|
||||
from awx.main.utils import get_awx_http_client_headers
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.webhook_backend')
|
||||
@@ -61,9 +61,6 @@ class WebhookBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
def send_messages(self, messages):
|
||||
sent_messages = 0
|
||||
self.headers['Content-Type'] = 'application/json'
|
||||
if 'User-Agent' not in self.headers:
|
||||
self.headers['User-Agent'] = "Tower {}".format(get_awx_version())
|
||||
if self.http_method.lower() not in ['put', 'post']:
|
||||
raise ValueError("HTTP method must be either 'POST' or 'PUT'.")
|
||||
chosen_method = getattr(requests, self.http_method.lower(), None)
|
||||
@@ -75,7 +72,7 @@ class WebhookBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
"{}".format(m.recipients()[0]),
|
||||
auth=auth,
|
||||
data=json.dumps(m.body, ensure_ascii=False).encode('utf-8'),
|
||||
headers=self.headers,
|
||||
headers=get_awx_http_client_headers(),
|
||||
verify=(not self.disable_ssl_verification),
|
||||
)
|
||||
if r.status_code >= 400:
|
||||
|
||||
@@ -2,7 +2,7 @@ from collections import deque
|
||||
|
||||
|
||||
class SimpleDAG(object):
|
||||
''' A simple implementation of a directed acyclic graph '''
|
||||
'''A simple implementation of a directed acyclic graph'''
|
||||
|
||||
def __init__(self):
|
||||
self.nodes = []
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import collections
|
||||
import time
|
||||
import logging
|
||||
from base64 import b64encode
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ from datetime import timedelta
|
||||
import logging
|
||||
import uuid
|
||||
import json
|
||||
import random
|
||||
from types import SimpleNamespace
|
||||
|
||||
# Django
|
||||
@@ -253,14 +252,6 @@ class TaskManager:
|
||||
}
|
||||
dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]
|
||||
|
||||
controller_node = None
|
||||
if task.supports_isolation() and rampart_group.controller_id:
|
||||
try:
|
||||
controller_node = rampart_group.choose_online_controller_node()
|
||||
except IndexError:
|
||||
logger.debug("No controllers available in group {} to run {}".format(rampart_group.name, task.log_format))
|
||||
return
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
(start_status, opts) = task.pre_start()
|
||||
@@ -277,38 +268,24 @@ class TaskManager:
|
||||
task.send_notification_templates('running')
|
||||
logger.debug('Transitioning %s to running status.', task.log_format)
|
||||
schedule_task_manager()
|
||||
elif not task.supports_isolation() and rampart_group.controller_id:
|
||||
# non-Ansible jobs on isolated instances run on controller
|
||||
task.instance_group = rampart_group.controller
|
||||
task.execution_node = random.choice(list(rampart_group.controller.instances.all().values_list('hostname', flat=True)))
|
||||
logger.debug('Submitting isolated {} to queue {} on node {}.'.format(task.log_format, task.instance_group.name, task.execution_node))
|
||||
elif controller_node:
|
||||
task.instance_group = rampart_group
|
||||
task.execution_node = instance.hostname
|
||||
task.controller_node = controller_node
|
||||
logger.debug('Submitting isolated {} to queue {} controlled by {}.'.format(task.log_format, task.execution_node, controller_node))
|
||||
elif rampart_group.is_container_group:
|
||||
# find one real, non-containerized instance with capacity to
|
||||
# act as the controller for k8s API interaction
|
||||
match = None
|
||||
for group in InstanceGroup.objects.all():
|
||||
if group.is_container_group or group.controller_id:
|
||||
continue
|
||||
for group in InstanceGroup.objects.filter(is_container_group=False):
|
||||
match = group.fit_task_to_most_remaining_capacity_instance(task, group.instances.all())
|
||||
if match:
|
||||
break
|
||||
task.instance_group = rampart_group
|
||||
if match is None:
|
||||
logger.warn('No available capacity to run containerized <{}>.'.format(task.log_format))
|
||||
elif task.can_run_containerized and any(ig.is_container_group for ig in task.preferred_instance_groups):
|
||||
task.controller_node = match.hostname
|
||||
else:
|
||||
if task.supports_isolation():
|
||||
task.controller_node = match.hostname
|
||||
else:
|
||||
# project updates and inventory updates don't *actually* run in pods,
|
||||
# so just pick *any* non-isolated, non-containerized host and use it
|
||||
# as the execution node
|
||||
task.execution_node = match.hostname
|
||||
logger.debug('Submitting containerized {} to queue {}.'.format(task.log_format, task.execution_node))
|
||||
# project updates and inventory updates don't *actually* run in pods, so
|
||||
# just pick *any* non-containerized host and use it as the execution node
|
||||
task.execution_node = match.hostname
|
||||
logger.debug('Submitting containerized {} to queue {}.'.format(task.log_format, task.execution_node))
|
||||
else:
|
||||
task.instance_group = rampart_group
|
||||
if instance is not None:
|
||||
|
||||
@@ -11,5 +11,5 @@ logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def run_task_manager():
|
||||
logger.debug("Running Tower task manager.")
|
||||
logger.debug("Running task manager.")
|
||||
TaskManager().schedule()
|
||||
|
||||
@@ -378,7 +378,6 @@ def model_serializer_mapping():
|
||||
models.Group: serializers.GroupSerializer,
|
||||
models.InstanceGroup: serializers.InstanceGroupSerializer,
|
||||
models.InventorySource: serializers.InventorySourceSerializer,
|
||||
models.CustomInventoryScript: serializers.CustomInventoryScriptSerializer,
|
||||
models.Credential: serializers.CredentialSerializer,
|
||||
models.Team: serializers.TeamSerializer,
|
||||
models.Project: serializers.ProjectSerializer,
|
||||
|
||||
@@ -27,12 +27,13 @@ import socket
|
||||
import threading
|
||||
import concurrent.futures
|
||||
from base64 import b64encode
|
||||
import subprocess
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction, DatabaseError, IntegrityError, ProgrammingError, connection
|
||||
from django.db.models.fields.related import ForeignKey
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.timezone import now
|
||||
from django.utils.encoding import smart_str
|
||||
from django.contrib.auth.models import User
|
||||
from django.utils.translation import ugettext_lazy as _, gettext_noop
|
||||
@@ -83,7 +84,6 @@ from awx.main.models import (
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.exceptions import AwxTaskError, PostRunError
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
from awx.main.isolated import manager as isolated_manager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.utils import (
|
||||
@@ -169,8 +169,6 @@ def dispatch_startup():
|
||||
#
|
||||
apply_cluster_membership_policies()
|
||||
cluster_node_heartbeat()
|
||||
if Instance.objects.me().is_controller():
|
||||
awx_isolated_heartbeat()
|
||||
Metrics().clear_values()
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
@@ -204,13 +202,8 @@ def apply_cluster_membership_policies():
|
||||
started_compute = time.time()
|
||||
all_instances = list(Instance.objects.order_by('id'))
|
||||
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
|
||||
iso_hostnames = set([])
|
||||
for ig in all_groups:
|
||||
if ig.controller_id is not None:
|
||||
iso_hostnames.update(ig.policy_instance_list)
|
||||
|
||||
considered_instances = [inst for inst in all_instances if inst.hostname not in iso_hostnames]
|
||||
total_instances = len(considered_instances)
|
||||
total_instances = len(all_instances)
|
||||
actual_groups = []
|
||||
actual_instances = []
|
||||
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
|
||||
@@ -231,18 +224,12 @@ def apply_cluster_membership_policies():
|
||||
if group_actual.instances:
|
||||
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
|
||||
|
||||
if ig.controller_id is None:
|
||||
actual_groups.append(group_actual)
|
||||
else:
|
||||
# For isolated groups, _only_ apply the policy_instance_list
|
||||
# do not add to in-memory list, so minimum rules not applied
|
||||
logger.debug('Committing instances to isolated group {}'.format(ig.name))
|
||||
ig.instances.set(group_actual.instances)
|
||||
actual_groups.append(group_actual)
|
||||
|
||||
# Process Instance minimum policies next, since it represents a concrete lower bound to the
|
||||
# number of instances to make available to instance groups
|
||||
actual_instances = [Node(obj=i, groups=[]) for i in considered_instances if i.managed_by_policy]
|
||||
logger.debug("Total non-isolated instances:{} available for policy: {}".format(total_instances, len(actual_instances)))
|
||||
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
|
||||
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
|
||||
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
|
||||
policy_min_added = []
|
||||
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
|
||||
@@ -284,7 +271,7 @@ def apply_cluster_membership_policies():
|
||||
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
|
||||
return
|
||||
|
||||
# On a differential basis, apply instances to non-isolated groups
|
||||
# On a differential basis, apply instances to groups
|
||||
with transaction.atomic():
|
||||
for g in actual_groups:
|
||||
if g.obj.is_container_group:
|
||||
@@ -380,7 +367,7 @@ def gather_analytics():
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
|
||||
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather else None
|
||||
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
|
||||
gather_time = now()
|
||||
|
||||
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
@@ -396,11 +383,29 @@ def purge_old_stdout_files():
|
||||
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def cleanup_execution_environment_images():
|
||||
if settings.IS_K8S:
|
||||
return
|
||||
process = subprocess.run('podman images --filter="dangling=true" --format json'.split(" "), capture_output=True)
|
||||
if process.returncode != 0:
|
||||
logger.debug("Cleanup execution environment images: could not get list of images")
|
||||
return
|
||||
if len(process.stdout) > 0:
|
||||
images_system = json.loads(process.stdout)
|
||||
for e in images_system:
|
||||
image_name = e["Id"]
|
||||
logger.debug(f"Cleanup execution environment images: deleting {image_name}")
|
||||
process = subprocess.run(['podman', 'rmi', image_name, '-f'], stdout=subprocess.DEVNULL)
|
||||
if process.returncode != 0:
|
||||
logger.debug(f"Failed to delete image {image_name}")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def cluster_node_heartbeat():
|
||||
logger.debug("Cluster node heartbeat task.")
|
||||
nowtime = now()
|
||||
instance_list = list(Instance.objects.all_non_isolated())
|
||||
instance_list = list(Instance.objects.all())
|
||||
this_inst = None
|
||||
lost_instances = []
|
||||
|
||||
@@ -467,6 +472,9 @@ def cluster_node_heartbeat():
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_k8s_reaper():
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
return
|
||||
|
||||
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
|
||||
|
||||
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
|
||||
@@ -481,30 +489,6 @@ def awx_k8s_reaper():
|
||||
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_isolated_heartbeat():
|
||||
local_hostname = settings.CLUSTER_HOST_ID
|
||||
logger.debug("Controlling node checking for any isolated management tasks.")
|
||||
poll_interval = settings.AWX_ISOLATED_PERIODIC_CHECK
|
||||
# Get isolated instances not checked since poll interval - some buffer
|
||||
nowtime = now()
|
||||
accept_before = nowtime - timedelta(seconds=(poll_interval - 10))
|
||||
isolated_instance_qs = Instance.objects.filter(
|
||||
rampart_groups__controller__instances__hostname=local_hostname,
|
||||
)
|
||||
isolated_instance_qs = isolated_instance_qs.filter(last_isolated_check__lt=accept_before) | isolated_instance_qs.filter(last_isolated_check=None)
|
||||
# Fast pass of isolated instances, claiming the nodes to update
|
||||
with transaction.atomic():
|
||||
for isolated_instance in isolated_instance_qs:
|
||||
isolated_instance.last_isolated_check = nowtime
|
||||
# Prevent modified time from being changed, as in normal heartbeat
|
||||
isolated_instance.save(update_fields=['last_isolated_check'])
|
||||
# Slow pass looping over isolated IGs and their isolated instances
|
||||
if len(isolated_instance_qs) > 0:
|
||||
logger.debug("Managing isolated instances {}.".format(','.join([inst.hostname for inst in isolated_instance_qs])))
|
||||
isolated_manager.IsolatedManager(CallbackQueueDispatcher.dispatch).health_check(isolated_instance_qs)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_periodic_scheduler():
|
||||
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
|
||||
@@ -834,16 +818,10 @@ class BaseTask(object):
|
||||
"""
|
||||
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
|
||||
|
||||
def build_execution_environment_params(self, instance):
|
||||
def build_execution_environment_params(self, instance, private_data_dir):
|
||||
if settings.IS_K8S:
|
||||
return {}
|
||||
|
||||
if instance.execution_environment_id is None:
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with disable_activity_stream():
|
||||
self.instance = instance = self.update_model(instance.pk, execution_environment=instance.resolve_execution_environment())
|
||||
|
||||
image = instance.execution_environment.image
|
||||
params = {
|
||||
"container_image": image,
|
||||
@@ -851,6 +829,23 @@ class BaseTask(object):
|
||||
"container_options": ['--user=root'],
|
||||
}
|
||||
|
||||
if instance.execution_environment.credential:
|
||||
cred = instance.execution_environment.credential
|
||||
if cred.has_inputs(field_names=('host', 'username', 'password')):
|
||||
path = os.path.split(private_data_dir)[0]
|
||||
with open(path + '/auth.json', 'w') as authfile:
|
||||
os.chmod(authfile.name, stat.S_IRUSR | stat.S_IWUSR)
|
||||
|
||||
host = cred.get_input('host')
|
||||
username = cred.get_input('username')
|
||||
password = cred.get_input('password')
|
||||
token = "{}:{}".format(username, password)
|
||||
auth_data = {'auths': {host: {'auth': b64encode(token.encode('ascii')).decode()}}}
|
||||
authfile.write(json.dumps(auth_data, indent=4))
|
||||
params["container_options"].append(f'--authfile={authfile.name}')
|
||||
else:
|
||||
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
|
||||
|
||||
pull = instance.execution_environment.pull
|
||||
if pull:
|
||||
params['container_options'].append(f'--pull={pull}')
|
||||
@@ -958,36 +953,6 @@ class BaseTask(object):
|
||||
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
|
||||
"""
|
||||
|
||||
def build_params_resource_profiling(self, instance, private_data_dir):
|
||||
resource_profiling_params = {}
|
||||
if self.should_use_resource_profiling(instance):
|
||||
cpu_poll_interval = settings.AWX_RESOURCE_PROFILING_CPU_POLL_INTERVAL
|
||||
mem_poll_interval = settings.AWX_RESOURCE_PROFILING_MEMORY_POLL_INTERVAL
|
||||
pid_poll_interval = settings.AWX_RESOURCE_PROFILING_PID_POLL_INTERVAL
|
||||
|
||||
results_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
|
||||
if not os.path.isdir(results_dir):
|
||||
os.makedirs(results_dir, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
|
||||
# FIXME: develop some better means of referencing paths inside containers
|
||||
container_results_dir = os.path.join('/runner', 'artifacts/playbook_profiling')
|
||||
|
||||
logger.debug(
|
||||
'Collected the following resource profiling intervals: cpu: {} mem: {} pid: {}'.format(cpu_poll_interval, mem_poll_interval, pid_poll_interval)
|
||||
)
|
||||
|
||||
resource_profiling_params.update(
|
||||
{
|
||||
'resource_profiling': True,
|
||||
'resource_profiling_base_cgroup': 'ansible-runner',
|
||||
'resource_profiling_cpu_poll_interval': cpu_poll_interval,
|
||||
'resource_profiling_memory_poll_interval': mem_poll_interval,
|
||||
'resource_profiling_pid_poll_interval': pid_poll_interval,
|
||||
'resource_profiling_results_dir': container_results_dir,
|
||||
}
|
||||
)
|
||||
|
||||
return resource_profiling_params
|
||||
|
||||
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
|
||||
env_path = os.path.join(private_data_dir, 'env')
|
||||
try:
|
||||
@@ -1014,7 +979,7 @@ class BaseTask(object):
|
||||
else:
|
||||
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
|
||||
|
||||
def build_env(self, instance, private_data_dir, isolated, private_data_files=None):
|
||||
def build_env(self, instance, private_data_dir, private_data_files=None):
|
||||
"""
|
||||
Build environment dictionary for ansible-playbook.
|
||||
"""
|
||||
@@ -1029,13 +994,31 @@ class BaseTask(object):
|
||||
|
||||
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
|
||||
|
||||
return env
|
||||
ee_cred = self.instance.execution_environment.credential
|
||||
if ee_cred:
|
||||
verify_ssl = ee_cred.get_input('verify_ssl')
|
||||
if not verify_ssl:
|
||||
pdd_wrapper_path = os.path.split(private_data_dir)[0]
|
||||
registries_conf_path = os.path.join(pdd_wrapper_path, 'registries.conf')
|
||||
host = ee_cred.get_input('host')
|
||||
|
||||
def should_use_resource_profiling(self, job):
|
||||
"""
|
||||
Return whether this task should use resource profiling
|
||||
"""
|
||||
return False
|
||||
with open(registries_conf_path, 'w') as registries_conf:
|
||||
os.chmod(registries_conf.name, stat.S_IRUSR | stat.S_IWUSR)
|
||||
|
||||
lines = [
|
||||
'[[registry]]',
|
||||
'location = "{}"'.format(host),
|
||||
'insecure = true',
|
||||
]
|
||||
|
||||
registries_conf.write('\n'.join(lines))
|
||||
|
||||
# Podman >= 3.1.0
|
||||
env['CONTAINERS_REGISTRIES_CONF'] = registries_conf_path
|
||||
# Podman < 3.1.0
|
||||
env['REGISTRIES_CONFIG_PATH'] = registries_conf_path
|
||||
|
||||
return env
|
||||
|
||||
def build_inventory(self, instance, private_data_dir):
|
||||
script_params = dict(hostvars=True, towervars=True)
|
||||
@@ -1117,7 +1100,7 @@ class BaseTask(object):
|
||||
"""
|
||||
instance.log_lifecycle("post_run")
|
||||
|
||||
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times, isolated_manager_instance=None):
|
||||
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
|
||||
"""
|
||||
Hook for any steps to run after job/task is marked as complete.
|
||||
"""
|
||||
@@ -1258,16 +1241,10 @@ class BaseTask(object):
|
||||
if k in job_env:
|
||||
job_env[k] = v
|
||||
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
|
||||
|
||||
def check_handler(self, config):
|
||||
"""
|
||||
IsolatedManager callback triggered by the repeated checks of the isolated node
|
||||
"""
|
||||
job_env = build_safe_env(config['env'])
|
||||
for k, v in self.safe_cred_env.items():
|
||||
if k in job_env:
|
||||
job_env[k] = v
|
||||
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(config['command']), job_cwd=config['cwd'], job_env=job_env)
|
||||
elif status_data['status'] == 'error':
|
||||
result_traceback = status_data.get('result_traceback', None)
|
||||
if result_traceback:
|
||||
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
|
||||
|
||||
@with_path_cleanup
|
||||
def run(self, pk, **kwargs):
|
||||
@@ -1276,6 +1253,12 @@ class BaseTask(object):
|
||||
"""
|
||||
self.instance = self.model.objects.get(pk=pk)
|
||||
|
||||
if self.instance.execution_environment_id is None:
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with disable_activity_stream():
|
||||
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
|
||||
|
||||
# self.instance because of the update_model pattern and when it's used in callback handlers
|
||||
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
|
||||
self.instance.websocket_emit_status("running")
|
||||
@@ -1290,7 +1273,6 @@ class BaseTask(object):
|
||||
self.safe_env = {}
|
||||
self.safe_cred_env = {}
|
||||
private_data_dir = None
|
||||
isolated_manager_instance = None
|
||||
|
||||
# store a reference to the parent workflow job (if any) so we can include
|
||||
# it in event data JSON
|
||||
@@ -1298,7 +1280,6 @@ class BaseTask(object):
|
||||
self.parent_workflow_job_id = self.instance.get_workflow_job().id
|
||||
|
||||
try:
|
||||
isolated = self.instance.is_isolated()
|
||||
self.instance.send_notification_templates("running")
|
||||
private_data_dir = self.build_private_data_dir(self.instance)
|
||||
self.pre_run_hook(self.instance, private_data_dir)
|
||||
@@ -1332,8 +1313,7 @@ class BaseTask(object):
|
||||
passwords = self.build_passwords(self.instance, kwargs)
|
||||
self.build_extra_vars_file(self.instance, private_data_dir)
|
||||
args = self.build_args(self.instance, private_data_dir, passwords)
|
||||
resource_profiling_params = self.build_params_resource_profiling(self.instance, private_data_dir)
|
||||
env = self.build_env(self.instance, private_data_dir, isolated, private_data_files=private_data_files)
|
||||
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
|
||||
self.safe_env = build_safe_env(env)
|
||||
|
||||
credentials = self.build_credentials_list(self.instance)
|
||||
@@ -1359,7 +1339,6 @@ class BaseTask(object):
|
||||
'settings': {
|
||||
'job_timeout': self.get_instance_timeout(self.instance),
|
||||
'suppress_ansible_output': True,
|
||||
**resource_profiling_params,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1394,6 +1373,7 @@ class BaseTask(object):
|
||||
)
|
||||
else:
|
||||
receptor_job = AWXReceptorJob(self, params)
|
||||
self.unit_id = receptor_job.unit_id
|
||||
res = receptor_job.run()
|
||||
|
||||
if not res:
|
||||
@@ -1434,7 +1414,7 @@ class BaseTask(object):
|
||||
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
|
||||
|
||||
try:
|
||||
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times, isolated_manager_instance=isolated_manager_instance)
|
||||
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
|
||||
except Exception:
|
||||
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
|
||||
|
||||
@@ -1519,11 +1499,11 @@ class RunJob(BaseTask):
|
||||
|
||||
return passwords
|
||||
|
||||
def build_env(self, job, private_data_dir, isolated=False, private_data_files=None):
|
||||
def build_env(self, job, private_data_dir, private_data_files=None):
|
||||
"""
|
||||
Build environment dictionary for ansible-playbook.
|
||||
"""
|
||||
env = super(RunJob, self).build_env(job, private_data_dir, isolated=isolated, private_data_files=private_data_files)
|
||||
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
|
||||
if private_data_files is None:
|
||||
private_data_files = {}
|
||||
# Set environment variables needed for inventory and job event
|
||||
@@ -1534,10 +1514,9 @@ class RunJob(BaseTask):
|
||||
env['PROJECT_REVISION'] = job.project.scm_revision
|
||||
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
|
||||
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
|
||||
if not isolated:
|
||||
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
|
||||
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
|
||||
env['AWX_HOST'] = settings.TOWER_URL_BASE
|
||||
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
|
||||
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
|
||||
env['AWX_HOST'] = settings.TOWER_URL_BASE
|
||||
|
||||
# Create a directory for ControlPath sockets that is unique to each job
|
||||
cp_dir = os.path.join(private_data_dir, 'cp')
|
||||
@@ -1702,17 +1681,11 @@ class RunJob(BaseTask):
|
||||
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
|
||||
return d
|
||||
|
||||
def should_use_resource_profiling(self, job):
|
||||
"""
|
||||
Return whether this task should use resource profiling
|
||||
"""
|
||||
return settings.AWX_RESOURCE_PROFILING_ENABLED
|
||||
|
||||
def build_execution_environment_params(self, instance):
|
||||
def build_execution_environment_params(self, instance, private_data_dir):
|
||||
if settings.IS_K8S:
|
||||
return {}
|
||||
|
||||
params = super(RunJob, self).build_execution_environment_params(instance)
|
||||
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
|
||||
# If this has an insights agent and it is not already mounted then show it
|
||||
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
|
||||
if instance.use_fact_cache and os.path.exists(insights_dir):
|
||||
@@ -1774,9 +1747,6 @@ class RunJob(BaseTask):
|
||||
if sync_needs:
|
||||
pu_ig = job.instance_group
|
||||
pu_en = job.execution_node
|
||||
if job.is_isolated() is True:
|
||||
pu_ig = pu_ig.controller
|
||||
pu_en = settings.CLUSTER_HOST_ID
|
||||
|
||||
sync_metafields = dict(
|
||||
launch_type="sync",
|
||||
@@ -1831,7 +1801,7 @@ class RunJob(BaseTask):
|
||||
# ran inside of the event saving code
|
||||
update_smart_memberships_for_inventory(job.inventory)
|
||||
|
||||
def final_run_hook(self, job, status, private_data_dir, fact_modification_times, isolated_manager_instance=None):
|
||||
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
|
||||
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
|
||||
if not private_data_dir:
|
||||
# If there's no private data dir, that means we didn't get into the
|
||||
@@ -1843,8 +1813,6 @@ class RunJob(BaseTask):
|
||||
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
|
||||
fact_modification_times,
|
||||
)
|
||||
if isolated_manager_instance and not job.is_container_group_task:
|
||||
isolated_manager_instance.cleanup()
|
||||
|
||||
try:
|
||||
inventory = job.inventory
|
||||
@@ -1908,11 +1876,11 @@ class RunProjectUpdate(BaseTask):
|
||||
passwords['scm_password'] = project_update.credential.get_input('password', default='')
|
||||
return passwords
|
||||
|
||||
def build_env(self, project_update, private_data_dir, isolated=False, private_data_files=None):
|
||||
def build_env(self, project_update, private_data_dir, private_data_files=None):
|
||||
"""
|
||||
Build environment dictionary for ansible-playbook.
|
||||
"""
|
||||
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, isolated=isolated, private_data_files=private_data_files)
|
||||
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
|
||||
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
|
||||
env['ANSIBLE_ASK_PASS'] = str(False)
|
||||
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
|
||||
@@ -2026,6 +1994,7 @@ class RunProjectUpdate(BaseTask):
|
||||
'scm_url': scm_url,
|
||||
'scm_branch': scm_branch,
|
||||
'scm_clean': project_update.scm_clean,
|
||||
'scm_track_submodules': project_update.scm_track_submodules,
|
||||
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
|
||||
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
|
||||
}
|
||||
@@ -2323,11 +2292,11 @@ class RunProjectUpdate(BaseTask):
|
||||
if status == 'successful' and instance.launch_type != 'sync':
|
||||
self._update_dependent_inventories(instance, dependent_inventory_sources)
|
||||
|
||||
def build_execution_environment_params(self, instance):
|
||||
def build_execution_environment_params(self, instance, private_data_dir):
|
||||
if settings.IS_K8S:
|
||||
return {}
|
||||
|
||||
params = super(RunProjectUpdate, self).build_execution_environment_params(instance)
|
||||
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
|
||||
project_path = instance.get_project_path(check_if_exists=False)
|
||||
cache_path = instance.get_cache_path()
|
||||
params.setdefault('container_volume_mounts', [])
|
||||
@@ -2366,14 +2335,14 @@ class RunInventoryUpdate(BaseTask):
|
||||
injector = InventorySource.injectors[inventory_update.source]()
|
||||
return injector.build_private_data(inventory_update, private_data_dir)
|
||||
|
||||
def build_env(self, inventory_update, private_data_dir, isolated, private_data_files=None):
|
||||
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
|
||||
"""Build environment dictionary for ansible-inventory.
|
||||
|
||||
Most environment variables related to credentials or configuration
|
||||
are accomplished by the inventory source injectors (in this method)
|
||||
or custom credential type injectors (in main run method).
|
||||
"""
|
||||
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, isolated, private_data_files=private_data_files)
|
||||
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
|
||||
|
||||
if private_data_files is None:
|
||||
private_data_files = {}
|
||||
@@ -2391,7 +2360,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
|
||||
if inventory_update.source in ['scm', 'custom']:
|
||||
if inventory_update.source == 'scm':
|
||||
for env_k in inventory_update.source_vars_dict:
|
||||
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
|
||||
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
|
||||
@@ -2492,16 +2461,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
rel_path = injector.filename
|
||||
elif src == 'scm':
|
||||
rel_path = os.path.join('project', inventory_update.source_path)
|
||||
elif src == 'custom':
|
||||
handle, inventory_path = tempfile.mkstemp(dir=private_data_dir)
|
||||
f = os.fdopen(handle, 'w')
|
||||
if inventory_update.source_script is None:
|
||||
raise RuntimeError('Inventory Script does not exist')
|
||||
f.write(inventory_update.source_script.script)
|
||||
f.close()
|
||||
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
|
||||
rel_path = os.path.split(inventory_path)[-1]
|
||||
return rel_path
|
||||
|
||||
def build_cwd(self, inventory_update, private_data_dir):
|
||||
@@ -2699,11 +2659,11 @@ class RunAdHocCommand(BaseTask):
|
||||
passwords[field] = value
|
||||
return passwords
|
||||
|
||||
def build_env(self, ad_hoc_command, private_data_dir, isolated=False, private_data_files=None):
|
||||
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
|
||||
"""
|
||||
Build environment dictionary for ansible.
|
||||
"""
|
||||
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, isolated=isolated, private_data_files=private_data_files)
|
||||
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
|
||||
# Set environment variables needed for inventory and ad hoc event
|
||||
# callbacks to work.
|
||||
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
|
||||
@@ -2712,14 +2672,6 @@ class RunAdHocCommand(BaseTask):
|
||||
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
|
||||
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
|
||||
|
||||
# Create a directory for ControlPath sockets that is unique to each
|
||||
# ad hoc command
|
||||
cp_dir = os.path.join(private_data_dir, 'cp')
|
||||
if not os.path.exists(cp_dir):
|
||||
os.mkdir(cp_dir, 0o700)
|
||||
# FIXME: more elegant way to manage this path in container
|
||||
env['ANSIBLE_SSH_CONTROL_PATH'] = '/runner/cp'
|
||||
|
||||
return env
|
||||
|
||||
def build_args(self, ad_hoc_command, private_data_dir, passwords):
|
||||
@@ -2817,11 +2769,6 @@ class RunAdHocCommand(BaseTask):
|
||||
d[r'Password:\s*?$'] = 'ssh_password'
|
||||
return d
|
||||
|
||||
def final_run_hook(self, adhoc_job, status, private_data_dir, fact_modification_times, isolated_manager_instance=None):
|
||||
super(RunAdHocCommand, self).final_run_hook(adhoc_job, status, private_data_dir, fact_modification_times)
|
||||
if isolated_manager_instance:
|
||||
isolated_manager_instance.cleanup()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
class RunSystemJob(BaseTask):
|
||||
@@ -2830,7 +2777,7 @@ class RunSystemJob(BaseTask):
|
||||
event_model = SystemJobEvent
|
||||
event_data_key = 'system_job_id'
|
||||
|
||||
def build_execution_environment_params(self, system_job):
|
||||
def build_execution_environment_params(self, system_job, private_data_dir):
|
||||
return {}
|
||||
|
||||
def build_args(self, system_job, private_data_dir, passwords):
|
||||
@@ -2863,8 +2810,8 @@ class RunSystemJob(BaseTask):
|
||||
os.chmod(path, stat.S_IRUSR)
|
||||
return path
|
||||
|
||||
def build_env(self, instance, private_data_dir, isolated=False, private_data_files=None):
|
||||
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, isolated=isolated, private_data_files=private_data_files)
|
||||
def build_env(self, instance, private_data_dir, private_data_files=None):
|
||||
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
|
||||
# TODO: this is able to run by turning off isolation
|
||||
# the goal is to run it a container instead
|
||||
env = dict(os.environ.items())
|
||||
@@ -2946,7 +2893,7 @@ class AWXReceptorJob:
|
||||
self.unit_id = None
|
||||
|
||||
if self.task and not self.task.instance.is_container_group_task:
|
||||
execution_environment_params = self.task.build_execution_environment_params(self.task.instance)
|
||||
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
|
||||
self.runner_params['settings'].update(execution_environment_params)
|
||||
|
||||
def run(self):
|
||||
@@ -2998,6 +2945,8 @@ class AWXReceptorJob:
|
||||
# TODO: There should be a more efficient way of getting this information
|
||||
receptor_work_list = receptor_ctl.simple_command("work list")
|
||||
detail = receptor_work_list[self.unit_id]['Detail']
|
||||
state_name = receptor_work_list[self.unit_id]['StateName']
|
||||
|
||||
if 'exceeded quota' in detail:
|
||||
logger.warn(detail)
|
||||
log_name = self.task.instance.log_format
|
||||
@@ -3005,6 +2954,11 @@ class AWXReceptorJob:
|
||||
self.task.update_model(self.task.instance.pk, status='pending')
|
||||
return
|
||||
|
||||
# If ansible-runner ran, but an error occured at runtime, the traceback information
|
||||
# is saved via the status_handler passed in to the processor.
|
||||
if state_name == 'Succeeded':
|
||||
return res
|
||||
|
||||
raise RuntimeError(detail)
|
||||
|
||||
return res
|
||||
@@ -3069,6 +3023,11 @@ class AWXReceptorJob:
|
||||
if self.task.cancel_callback():
|
||||
result = namedtuple('result', ['status', 'rc'])
|
||||
return result('canceled', 1)
|
||||
|
||||
if hasattr(self, 'unit_id') and 'RECEPTOR_UNIT_ID' not in self.task.instance.job_env:
|
||||
self.task.instance.job_env['RECEPTOR_UNIT_ID'] = self.unit_id
|
||||
self.task.update_model(self.task.instance.pk, job_env=self.task.instance.job_env)
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
@property
|
||||
@@ -3079,13 +3038,15 @@ class AWXReceptorJob:
|
||||
ee = get_default_execution_environment()
|
||||
|
||||
default_pod_spec = get_default_pod_spec()
|
||||
default_pod_spec['spec']['containers'][0]['image'] = ee.image
|
||||
|
||||
pod_spec_override = {}
|
||||
if self.task and self.task.instance.instance_group.pod_spec_override:
|
||||
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
|
||||
pod_spec = {**default_pod_spec, **pod_spec_override}
|
||||
|
||||
pod_spec['spec']['containers'][0]['image'] = ee.image
|
||||
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
|
||||
|
||||
if self.task:
|
||||
pod_spec['metadata'] = deepmerge(
|
||||
pod_spec.get('metadata', {}),
|
||||
|
||||
@@ -12,7 +12,6 @@ def test_empty():
|
||||
"active_sessions": 0,
|
||||
"active_host_count": 0,
|
||||
"credential": 0,
|
||||
"custom_inventory_script": 0,
|
||||
"custom_virtualenvs": 0, # dev env ansible3
|
||||
"host": 0,
|
||||
"inventory": 0,
|
||||
@@ -48,7 +47,6 @@ def test_database_counts(organization_factory, job_template_factory, workflow_jo
|
||||
rrule="DTSTART;TZID=America/New_York:20300504T150000",
|
||||
unified_job_template=jt.job_template,
|
||||
).save()
|
||||
models.CustomInventoryScript(organization=objs.organization).save()
|
||||
|
||||
counts = collectors.counts(None)
|
||||
for key in (
|
||||
@@ -62,7 +60,6 @@ def test_database_counts(organization_factory, job_template_factory, workflow_jo
|
||||
"workflow_job_template",
|
||||
"host",
|
||||
"schedule",
|
||||
"custom_inventory_script",
|
||||
):
|
||||
assert counts[key] == 1
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user