mirror of
https://github.com/ansible/awx.git
synced 2026-02-09 05:24:42 -03:30
Compare commits
317 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a455c7bf7 | ||
|
|
10167eea8d | ||
|
|
46ddc84d2a | ||
|
|
b8ec3104a9 | ||
|
|
b098127961 | ||
|
|
1f0294d389 | ||
|
|
3172176940 | ||
|
|
b38ec3599b | ||
|
|
487343a022 | ||
|
|
69049a4427 | ||
|
|
be6b42561f | ||
|
|
e59cb07064 | ||
|
|
0234df055d | ||
|
|
b54c036398 | ||
|
|
eafd40291e | ||
|
|
519956f779 | ||
|
|
0b3e2cc7e3 | ||
|
|
efa9c84806 | ||
|
|
5ed623d682 | ||
|
|
8f77d15a31 | ||
|
|
d06d4d5a8c | ||
|
|
352c8c3cb1 | ||
|
|
94f21a3464 | ||
|
|
ac376f9c87 | ||
|
|
44e4263bee | ||
|
|
b7f3852ef9 | ||
|
|
a934e146ee | ||
|
|
cab25656eb | ||
|
|
0f9c906a22 | ||
|
|
b8226109a7 | ||
|
|
b26de8b922 | ||
|
|
67d8c1a4b5 | ||
|
|
0ef7ef22eb | ||
|
|
47383e05d6 | ||
|
|
3dd97feaa6 | ||
|
|
e530adde67 | ||
|
|
38a08d163c | ||
|
|
7b4adfcc15 | ||
|
|
5d6e1284e3 | ||
|
|
a0ba125ea9 | ||
|
|
ad5d0b92db | ||
|
|
debbac5c78 | ||
|
|
f4f4a7caec | ||
|
|
b00249b515 | ||
|
|
cd49213924 | ||
|
|
9a47a28b80 | ||
|
|
7b9ad1d69a | ||
|
|
6df00e1e4c | ||
|
|
7d2ed7b763 | ||
|
|
b08e5db267 | ||
|
|
8991396d23 | ||
|
|
76a6f84c70 | ||
|
|
a984e5df7a | ||
|
|
282d705c43 | ||
|
|
43e1b4a7db | ||
|
|
71ef7cdec1 | ||
|
|
5decde3f70 | ||
|
|
3f57061509 | ||
|
|
6395d64681 | ||
|
|
f3e2caeaa7 | ||
|
|
ce5c4359ee | ||
|
|
c4ddf50cad | ||
|
|
d250dd0cd6 | ||
|
|
96bbbdd5c9 | ||
|
|
9b4b2167b3 | ||
|
|
028a0a9279 | ||
|
|
30354dbcd0 | ||
|
|
543a87ac88 | ||
|
|
4be7cf66ec | ||
|
|
fd027f87a9 | ||
|
|
dac6e115c1 | ||
|
|
eca516f8ce | ||
|
|
b06645e125 | ||
|
|
fd60cd1a35 | ||
|
|
ad8bcd0de2 | ||
|
|
fdc29eebb7 | ||
|
|
63ae2cac38 | ||
|
|
4e787cc079 | ||
|
|
2de37ce5df | ||
|
|
a419547731 | ||
|
|
04844aa44f | ||
|
|
1b3fbee38d | ||
|
|
6d2a2ab714 | ||
|
|
82dd4a3884 | ||
|
|
4fe9e5da14 | ||
|
|
bbb4701fa9 | ||
|
|
86a39938fe | ||
|
|
987fc26537 | ||
|
|
70cf4cf5d4 | ||
|
|
2d3172f648 | ||
|
|
b2c33e3204 | ||
|
|
f7f648b956 | ||
|
|
780f104ab2 | ||
|
|
4c35adad6c | ||
|
|
cf24c81b3e | ||
|
|
6d792a8234 | ||
|
|
1558c6f942 | ||
|
|
2f75b48c63 | ||
|
|
979418620c | ||
|
|
482e0ac311 | ||
|
|
a36bf4af64 | ||
|
|
3bbce18173 | ||
|
|
e54fd19bca | ||
|
|
d2289fe9c6 | ||
|
|
1c50b8427a | ||
|
|
34d01f02cc | ||
|
|
d182c96c2e | ||
|
|
e59f3982ae | ||
|
|
5435c6ec73 | ||
|
|
5f96aee871 | ||
|
|
eceeeea22d | ||
|
|
a1a864b27b | ||
|
|
0291c476d4 | ||
|
|
638e8c7add | ||
|
|
6389ec50a1 | ||
|
|
ad53f4f5f6 | ||
|
|
9718aa711f | ||
|
|
cacd2c3392 | ||
|
|
1800b49822 | ||
|
|
1e97bb71db | ||
|
|
7055460c4c | ||
|
|
864767d74a | ||
|
|
5170948241 | ||
|
|
370a7f9b25 | ||
|
|
1368835a29 | ||
|
|
48fa5bb2cd | ||
|
|
25105d813d | ||
|
|
bbea43b1fe | ||
|
|
5790aa9780 | ||
|
|
bc97d11270 | ||
|
|
326ed22efe | ||
|
|
b942411dcc | ||
|
|
374c17751f | ||
|
|
ef2fa26126 | ||
|
|
b611164422 | ||
|
|
c7c899375b | ||
|
|
ab3a728032 | ||
|
|
aaf371ee23 | ||
|
|
d6c70e8d3a | ||
|
|
79e65e3e84 | ||
|
|
42c45367a0 | ||
|
|
d759aff4e9 | ||
|
|
6b63f0ac9e | ||
|
|
2df6eab472 | ||
|
|
1c7afb66f7 | ||
|
|
1fbb714cbc | ||
|
|
de75592f2a | ||
|
|
9cb7b0902a | ||
|
|
437d9843d1 | ||
|
|
490492e505 | ||
|
|
3dd8e490c6 | ||
|
|
75c9702caa | ||
|
|
accf000bdf | ||
|
|
a94b30be9f | ||
|
|
3c31e0ed16 | ||
|
|
7d74999851 | ||
|
|
b7ca369356 | ||
|
|
d15f7b76fa | ||
|
|
4e4a535178 | ||
|
|
78b00652bd | ||
|
|
473ab7c01c | ||
|
|
ae82ba53e7 | ||
|
|
d69174b1a6 | ||
|
|
570f549cf4 | ||
|
|
55e720e25d | ||
|
|
8f33f1a6c2 | ||
|
|
7be924d155 | ||
|
|
65f226960f | ||
|
|
84f056294d | ||
|
|
b906f8d757 | ||
|
|
2fae523fd4 | ||
|
|
4d519155bc | ||
|
|
ea8a91893a | ||
|
|
145476c7d9 | ||
|
|
c6595786f5 | ||
|
|
c6159a7c3e | ||
|
|
52638c709a | ||
|
|
a264b1db1f | ||
|
|
49907e337a | ||
|
|
afc1f85668 | ||
|
|
6efa751157 | ||
|
|
10131432b5 | ||
|
|
0d365068ff | ||
|
|
256404ba03 | ||
|
|
3b430c8bdf | ||
|
|
627dae6580 | ||
|
|
44db9ad033 | ||
|
|
21890efca6 | ||
|
|
0a8fe4d812 | ||
|
|
a1d7beca83 | ||
|
|
c35c80b06c | ||
|
|
3c5e9da9a1 | ||
|
|
f9af5e8959 | ||
|
|
c983b6a755 | ||
|
|
e18639b26b | ||
|
|
6d8b843ad0 | ||
|
|
00a9e42001 | ||
|
|
fc5363a140 | ||
|
|
d8d1ccf810 | ||
|
|
046518ab8f | ||
|
|
d33bbdd4f6 | ||
|
|
46e530ceeb | ||
|
|
2a77b8b4b9 | ||
|
|
23b2b136d6 | ||
|
|
d83a786c12 | ||
|
|
5d162b739b | ||
|
|
55e37b4eaa | ||
|
|
b2a0b3fc29 | ||
|
|
d1e1bc7108 | ||
|
|
cb88ea8fd1 | ||
|
|
c2fe3fcf13 | ||
|
|
6654a116d0 | ||
|
|
b77ab8a6ca | ||
|
|
1e796076f5 | ||
|
|
8fa38d1a2e | ||
|
|
44e176dde8 | ||
|
|
1ce197041f | ||
|
|
0952bae09f | ||
|
|
12509cd652 | ||
|
|
b094c063ae | ||
|
|
4e46d5d7cd | ||
|
|
8b10da9589 | ||
|
|
99ce277b06 | ||
|
|
5db6906212 | ||
|
|
652a428438 | ||
|
|
dfc769b8fe | ||
|
|
c45b1ffca6 | ||
|
|
ceed6f8d9b | ||
|
|
03cfb7bf9a | ||
|
|
49d1fa82d3 | ||
|
|
08a195ba08 | ||
|
|
77d1c711bf | ||
|
|
ad73174029 | ||
|
|
a6539d66d4 | ||
|
|
cb3ab67361 | ||
|
|
078dc666c1 | ||
|
|
e806da25c1 | ||
|
|
ef36b4fffd | ||
|
|
cc2ba09d3a | ||
|
|
790942c0f2 | ||
|
|
fd1e574fcb | ||
|
|
2daefcd94e | ||
|
|
46a7ca4dc3 | ||
|
|
5e4c997c41 | ||
|
|
8d4d718f7d | ||
|
|
cf34a81af7 | ||
|
|
11af21972d | ||
|
|
8850687d1b | ||
|
|
792f68eaec | ||
|
|
113aa2e11e | ||
|
|
1bf0bc8203 | ||
|
|
03cd7472af | ||
|
|
d549c217bb | ||
|
|
e7fead0f2c | ||
|
|
14990f7e98 | ||
|
|
d35eba8afb | ||
|
|
b0722311e8 | ||
|
|
946c16916f | ||
|
|
8ef5a6b0e1 | ||
|
|
6fa4d6462d | ||
|
|
525fd889e9 | ||
|
|
93a4e5ef05 | ||
|
|
06ce5a16ce | ||
|
|
15c665ea52 | ||
|
|
9a420820eb | ||
|
|
fa043100bd | ||
|
|
db0d748302 | ||
|
|
e8a95a1dac | ||
|
|
f911fb2046 | ||
|
|
a0304eeb16 | ||
|
|
a6f063b199 | ||
|
|
3977ec42e1 | ||
|
|
b7a064b05d | ||
|
|
aa5532f7b5 | ||
|
|
f79b6d3708 | ||
|
|
6d075b8874 | ||
|
|
3040a25932 | ||
|
|
0f0d9ba00d | ||
|
|
053897042f | ||
|
|
64186e881e | ||
|
|
0d98a1980e | ||
|
|
2b02b1affd | ||
|
|
bf3042e85a | ||
|
|
bdc25c14f6 | ||
|
|
6e5028587a | ||
|
|
8c8713885b | ||
|
|
bc5ef7f1c8 | ||
|
|
b9b6dad0b3 | ||
|
|
829e9054d6 | ||
|
|
be68a199ec | ||
|
|
44c0eb867b | ||
|
|
773b976f8a | ||
|
|
1220847c27 | ||
|
|
ec1c2a8391 | ||
|
|
2bc6521eee | ||
|
|
107d2da845 | ||
|
|
568606d2c8 | ||
|
|
78e2cd7084 | ||
|
|
79b8e6b6f0 | ||
|
|
d72896f9a6 | ||
|
|
7b3d36ba53 | ||
|
|
9ecb704e10 | ||
|
|
1b726a1b2f | ||
|
|
1cc4e302f9 | ||
|
|
1289ca9103 | ||
|
|
b18ca5ac1f | ||
|
|
193a041ef9 | ||
|
|
7219c17d30 | ||
|
|
79f0f1940f | ||
|
|
edc65cdc36 | ||
|
|
3684975ef9 | ||
|
|
8bfcef01df | ||
|
|
bbf9c13952 | ||
|
|
dfa578fcde | ||
|
|
33bc9e63c4 | ||
|
|
919475a4c7 | ||
|
|
1db88fe4f6 |
19
CHANGELOG.md
19
CHANGELOG.md
@@ -2,6 +2,25 @@
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
## 9.2.0 (Feb 12, 2020)
|
||||
- Added the ability to configure the convergence behavior of workflow nodes https://github.com/ansible/awx/issues/3054
|
||||
- AWX now allows for a configurable global limit for fork count (per-job run). The default maximum is 200. https://github.com/ansible/awx/pull/5604
|
||||
- Added the ability to specify AZURE_PUBLIC_CLOUD (for e.g., Azure Government KeyVault support) for the Azure credential plugin https://github.com/ansible/awx/issues/5138
|
||||
- Added support for several additional parameters for Satellite dynamic inventory https://github.com/ansible/awx/pull/5598
|
||||
- Added a new field to jobs for tracking the date/time a job is cancelled https://github.com/ansible/awx/pull/5610
|
||||
- Made a series of additional optimizations to the callback receiver to further improve stdout write speed for running playbooks https://github.com/ansible/awx/pull/5677 https://github.com/ansible/awx/pull/5739
|
||||
- Updated AWX to be compatible with Helm 3.x (https://github.com/ansible/awx/pull/5776)
|
||||
- Optimized AWX's job dependency/scheduling code to drastically improve processing time in scenarios where there are many pending jobs scheduled simultaneously https://github.com/ansible/awx/issues/5154
|
||||
- Fixed a bug which could cause SCM authentication details (basic auth passwords) to be reported to external loggers in certain failure scenarios (e.g., when a git clone fails and ansible itself prints an error message to stdout) https://github.com/ansible/awx/pull/5812
|
||||
- Fixed a k8s installer bug that caused installs to fail in certain situations https://github.com/ansible/awx/issues/5574
|
||||
- Fixed a number of issues that caused analytics gathering and reporting to run more often than necessary https://github.com/ansible/awx/pull/5721
|
||||
- Fixed a bug in the AWX CLI that prevented JSON-type settings from saving properly https://github.com/ansible/awx/issues/5528
|
||||
- Improved support for fetching custom virtualenv dependencies when AWX is installed behind a proxy https://github.com/ansible/awx/pull/5805
|
||||
- Updated the bundled version of openstacksdk to address a known issue https://github.com/ansible/awx/issues/5821
|
||||
- Updated the bundled vmware_inventory plugin to the latest version to address a bug https://github.com/ansible/awx/pull/5668
|
||||
- Fixed a bug that can cause inventory updates to fail to properly save their output when run within a workflow https://github.com/ansible/awx/pull/5666
|
||||
- Removed a number of pre-computed fields from the Host and Group models to improve AWX performance. As part of this change, inventory group UIs throughout the interface no longer display status icons https://github.com/ansible/awx/pull/5448
|
||||
|
||||
## 9.1.1 (Jan 14, 2020)
|
||||
|
||||
- Fixed a bug that caused database migrations on Kubernetes installs to hang https://github.com/ansible/awx/pull/5579
|
||||
|
||||
@@ -2,96 +2,8 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
Upgrades using Django migrations are not expected to work in AWX. As a result, to upgrade to a new version, it is necessary to export resources from the old AWX node and import them into a freshly-installed node with the new version. The recommended way to do this is to use the tower-cli send/receive feature.
|
||||
Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades.
|
||||
|
||||
This tool does __not__ support export/import of the following:
|
||||
* Logs/history
|
||||
* Credential passwords
|
||||
* LDAP/AWX config
|
||||
Users who wish to upgrade modern AWX installations should follow the instructions at:
|
||||
|
||||
### Install & Configure Tower-CLI
|
||||
|
||||
In terminal, pip install tower-cli (if you do not have pip already, install [here](https://pip.pypa.io/en/stable/installing/)):
|
||||
```
|
||||
$ pip install --upgrade ansible-tower-cli
|
||||
```
|
||||
|
||||
The AWX host URL, user, and password must be set for the AWX instance to be exported:
|
||||
```
|
||||
$ tower-cli config host http://<old-awx-host.example.com>
|
||||
$ tower-cli config username <user>
|
||||
$ tower-cli config password <pass>
|
||||
```
|
||||
|
||||
For more information on installing tower-cli look [here](http://tower-cli.readthedocs.io/en/latest/quickstart.html).
|
||||
|
||||
|
||||
### Export Resources
|
||||
|
||||
Export all objects
|
||||
|
||||
```$ tower-cli receive --all > assets.json```
|
||||
|
||||
|
||||
|
||||
### Teardown Old AWX
|
||||
|
||||
Clean up remnants of the old AWX install:
|
||||
|
||||
```docker rm -f $(docker ps -aq)``` # remove all old awx containers
|
||||
|
||||
```make clean-ui``` # clean up ui artifacts
|
||||
|
||||
|
||||
### Install New AWX version
|
||||
|
||||
If you are installing AWX as a dev container, pull down the latest code or version you want from GitHub, build
|
||||
the image locally, then start the container
|
||||
|
||||
```
|
||||
git pull # retrieve latest AWX changes from repository
|
||||
make docker-compose-build # build AWX image
|
||||
make docker-compose # run container
|
||||
```
|
||||
For other install methods, refer to the [Install.md](https://github.com/ansible/awx/blob/devel/INSTALL.md).
|
||||
|
||||
|
||||
### Import Resources
|
||||
|
||||
|
||||
Configure tower-cli for your new AWX host as shown earlier. Import from a JSON file named assets.json
|
||||
|
||||
```
|
||||
$ tower-cli config host http://<new-awx-host.example.com>
|
||||
$ tower-cli config username <user>
|
||||
$ tower-cli config password <pass>
|
||||
$ tower-cli send assets.json
|
||||
```
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## Additional Info
|
||||
|
||||
If you have two running AWX hosts, it is possible to copy all assets from one instance to another
|
||||
|
||||
```$ tower-cli receive --tower-host old-awx-host.example.com --all | tower-cli send --tower-host new-awx-host.example.com```
|
||||
|
||||
|
||||
|
||||
#### More Granular Exports:
|
||||
|
||||
Export all credentials
|
||||
|
||||
```$ tower-cli receive --credential all > credentials.json```
|
||||
> Note: This exports the credentials with blank strings for passwords and secrets
|
||||
|
||||
Export a credential named "My Credential"
|
||||
|
||||
```$ tower-cli receive --credential "My Credential"```
|
||||
|
||||
#### More Granular Imports:
|
||||
|
||||
|
||||
You could import anything except an organization defined in a JSON file named assets.json
|
||||
|
||||
```$ tower-cli send --prevent organization assets.json```
|
||||
https://github.com/ansible/awx/blob/devel/INSTALL.md#upgrading-from-previous-versions
|
||||
|
||||
2
Makefile
2
Makefile
@@ -122,7 +122,7 @@ clean-api:
|
||||
rm -rf awx/projects
|
||||
|
||||
clean-awxkit:
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
|
||||
|
||||
# convenience target to assert environment variables are defined
|
||||
guard-%:
|
||||
|
||||
@@ -9,7 +9,7 @@ from functools import reduce
|
||||
# Django
|
||||
from django.core.exceptions import FieldError, ValidationError
|
||||
from django.db import models
|
||||
from django.db.models import Q
|
||||
from django.db.models import Q, CharField, IntegerField, BooleanField
|
||||
from django.db.models.fields import FieldDoesNotExist
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
@@ -63,19 +63,19 @@ class TypeFilterBackend(BaseFilterBackend):
|
||||
raise ParseError(*e.args)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
def get_fields_from_path(model, path):
|
||||
'''
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and also the revised lookup path
|
||||
Returns the fields in the line, and also the revised lookup path
|
||||
ex., given
|
||||
model=Organization
|
||||
path='project__timeout'
|
||||
returns tuple of field at the end of the line as well as a corrected
|
||||
path, for special cases we do substitutions
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
returns tuple of fields traversed as well and a corrected path,
|
||||
for special cases we do substitutions
|
||||
([<IntegerField for timeout>], 'project__timeout')
|
||||
'''
|
||||
# Store of all the fields used to detect repeats
|
||||
field_set = set([])
|
||||
field_list = []
|
||||
new_parts = []
|
||||
for name in path.split('__'):
|
||||
if model is None:
|
||||
@@ -111,13 +111,24 @@ def get_field_from_path(model, path):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
elif getattr(field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
if field in field_set:
|
||||
if field in field_list:
|
||||
# Field traversed twice, could create infinite JOINs, DoSing Tower
|
||||
raise ParseError(_('Loops not allowed in filters, detected on field {}.').format(field.name))
|
||||
field_set.add(field)
|
||||
field_list.append(field)
|
||||
model = getattr(field, 'related_model', None)
|
||||
|
||||
return field, '__'.join(new_parts)
|
||||
return field_list, '__'.join(new_parts)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
'''
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and the revised lookup path
|
||||
ex.
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
'''
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
return (field_list[-1], new_path)
|
||||
|
||||
|
||||
class FieldLookupBackend(BaseFilterBackend):
|
||||
@@ -133,7 +144,11 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'in',
|
||||
'isnull', 'search')
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
# A list of fields that we know can be filtered on without the possiblity
|
||||
# of introducing duplicates
|
||||
NO_DUPLICATES_WHITELIST = (CharField, IntegerField, BooleanField)
|
||||
|
||||
def get_fields_from_lookup(self, model, lookup):
|
||||
|
||||
if '__' in lookup and lookup.rsplit('__', 1)[-1] in self.SUPPORTED_LOOKUPS:
|
||||
path, suffix = lookup.rsplit('__', 1)
|
||||
@@ -147,11 +162,16 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
# FIXME: Could build up a list of models used across relationships, use
|
||||
# those lookups combined with request.user.get_queryset(Model) to make
|
||||
# sure user cannot query using objects he could not view.
|
||||
field, new_path = get_field_from_path(model, path)
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
|
||||
new_lookup = new_path
|
||||
new_lookup = '__'.join([new_path, suffix])
|
||||
return field, new_lookup
|
||||
return field_list, new_lookup
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
'''Method to match return type of single field, if needed.'''
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
return (field_list[-1], new_lookup)
|
||||
|
||||
def to_python_related(self, value):
|
||||
value = force_text(value)
|
||||
@@ -182,7 +202,10 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
except UnicodeEncodeError:
|
||||
raise ValueError("%r is not an allowed field name. Must be ascii encodable." % lookup)
|
||||
|
||||
field, new_lookup = self.get_field_from_lookup(model, lookup)
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
field = field_list[-1]
|
||||
|
||||
needs_distinct = (not all(isinstance(f, self.NO_DUPLICATES_WHITELIST) for f in field_list))
|
||||
|
||||
# Type names are stored without underscores internally, but are presented and
|
||||
# and serialized over the API containing underscores so we remove `_`
|
||||
@@ -211,10 +234,10 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
for rm_field in related_model._meta.fields:
|
||||
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description', 'playbook'):
|
||||
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
|
||||
return value, new_lookups
|
||||
return value, new_lookups, needs_distinct
|
||||
else:
|
||||
value = self.value_to_python_for_field(field, value)
|
||||
return value, new_lookup
|
||||
return value, new_lookup, needs_distinct
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
@@ -225,6 +248,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
chain_filters = []
|
||||
role_filters = []
|
||||
search_filters = {}
|
||||
needs_distinct = False
|
||||
# Can only have two values: 'AND', 'OR'
|
||||
# If 'AND' is used, an iterm must satisfy all condition to show up in the results.
|
||||
# If 'OR' is used, an item just need to satisfy one condition to appear in results.
|
||||
@@ -256,9 +280,12 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
search_filter_relation = 'AND'
|
||||
values = reduce(lambda list1, list2: list1 + list2, [i.split(',') for i in values])
|
||||
for value in values:
|
||||
search_value, new_keys = self.value_to_python(queryset.model, key, force_text(value))
|
||||
search_value, new_keys, _ = self.value_to_python(queryset.model, key, force_text(value))
|
||||
assert isinstance(new_keys, list)
|
||||
search_filters[search_value] = new_keys
|
||||
# by definition, search *only* joins across relations,
|
||||
# so it _always_ needs a .distinct()
|
||||
needs_distinct = True
|
||||
continue
|
||||
|
||||
# Custom chain__ and or__ filters, mutually exclusive (both can
|
||||
@@ -282,7 +309,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
for value in values:
|
||||
if q_int:
|
||||
value = int(value)
|
||||
value, new_key = self.value_to_python(queryset.model, key, value)
|
||||
value, new_key, distinct = self.value_to_python(queryset.model, key, value)
|
||||
if distinct:
|
||||
needs_distinct = True
|
||||
if q_chain:
|
||||
chain_filters.append((q_not, new_key, value))
|
||||
elif q_or:
|
||||
@@ -332,7 +361,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
else:
|
||||
q = Q(**{k:v})
|
||||
queryset = queryset.filter(q)
|
||||
queryset = queryset.filter(*args).distinct()
|
||||
queryset = queryset.filter(*args)
|
||||
if needs_distinct:
|
||||
queryset = queryset.distinct()
|
||||
return queryset
|
||||
except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e:
|
||||
raise ParseError(e.args[0])
|
||||
|
||||
@@ -98,26 +98,19 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'total_hosts',
|
||||
'hosts_with_active_failures',
|
||||
'total_groups',
|
||||
'groups_with_active_failures',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources',
|
||||
'inventory_sources_with_failures',
|
||||
'organization_id',
|
||||
'kind',
|
||||
'insights_credential_id',),
|
||||
'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
|
||||
'has_inventory_sources'),
|
||||
'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
|
||||
'total_hosts',
|
||||
'hosts_with_active_failures',
|
||||
'total_groups',
|
||||
'groups_with_active_failures',
|
||||
'has_inventory_sources'),
|
||||
'host': DEFAULT_SUMMARY_FIELDS,
|
||||
'group': DEFAULT_SUMMARY_FIELDS,
|
||||
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
|
||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'),
|
||||
'job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job': DEFAULT_SUMMARY_FIELDS,
|
||||
@@ -125,7 +118,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'workflow_approval': DEFAULT_SUMMARY_FIELDS + ('timeout',),
|
||||
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
|
||||
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
|
||||
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),
|
||||
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error', 'canceled_on'),
|
||||
'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),
|
||||
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
@@ -139,7 +132,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'insights_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'approved_or_denied_by': ('id', 'username', 'first_name', 'last_name'),
|
||||
'credential_type': DEFAULT_SUMMARY_FIELDS,
|
||||
}
|
||||
@@ -719,7 +712,7 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = UnifiedJob
|
||||
fields = ('*', 'unified_job_template', 'launch_type', 'status',
|
||||
'failed', 'started', 'finished', 'elapsed', 'job_args',
|
||||
'failed', 'started', 'finished', 'canceled_on', 'elapsed', 'job_args',
|
||||
'job_cwd', 'job_env', 'job_explanation',
|
||||
'execution_node', 'controller_node',
|
||||
'result_traceback', 'event_processing_finished')
|
||||
@@ -1549,20 +1542,15 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
'admin', 'adhoc',
|
||||
{'copy': 'organization.inventory_admin'}
|
||||
]
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources',
|
||||
'total_inventory_sources', 'inventory_sources_with_failures',
|
||||
'insights_credential', 'pending_deletion',)
|
||||
'has_inventory_sources', 'total_inventory_sources',
|
||||
'inventory_sources_with_failures', 'insights_credential',
|
||||
'pending_deletion',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventorySerializer, self).get_related(obj)
|
||||
@@ -1644,6 +1632,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
capabilities_prefetch = ['inventory.admin']
|
||||
|
||||
has_active_failures = serializers.SerializerMethodField()
|
||||
has_inventory_sources = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = Host
|
||||
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
|
||||
@@ -1757,6 +1748,14 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
ret['last_job_host_summary'] = None
|
||||
return ret
|
||||
|
||||
def get_has_active_failures(self, obj):
|
||||
return bool(
|
||||
obj.last_job_host_summary and obj.last_job_host_summary.failed
|
||||
)
|
||||
|
||||
def get_has_inventory_sources(self, obj):
|
||||
return obj.inventory_sources.exists()
|
||||
|
||||
|
||||
class AnsibleFactsSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
@@ -1769,17 +1768,10 @@ class AnsibleFactsSerializer(BaseSerializer):
|
||||
class GroupSerializer(BaseSerializerWithVariables):
|
||||
show_capabilities = ['copy', 'edit', 'delete']
|
||||
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Group
|
||||
fields = ('*', 'inventory', 'variables', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources')
|
||||
fields = ('*', 'inventory', 'variables')
|
||||
|
||||
def build_relational_field(self, field_name, relation_info):
|
||||
field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)
|
||||
@@ -2823,7 +2815,7 @@ class JobTemplateMixin(object):
|
||||
# .only('id', 'status', 'finished', 'polymorphic_ctype_id')
|
||||
optimized_qs = uj_qs.non_polymorphic()
|
||||
return [{
|
||||
'id': x.id, 'status': x.status, 'finished': x.finished,
|
||||
'id': x.id, 'status': x.status, 'finished': x.finished, 'canceled_on': x.canceled_on,
|
||||
# Make type consistent with API top-level key, for instance workflow_job
|
||||
'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
|
||||
} for x in optimized_qs[:10]]
|
||||
@@ -3685,7 +3677,7 @@ class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJobTemplateNode
|
||||
fields = ('*', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes', 'all_parents_must_converge',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
|
||||
@@ -3724,8 +3716,8 @@ class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJobNode
|
||||
fields = ('*', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'do_not_run',)
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'all_parents_must_converge', 'do_not_run',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
|
||||
@@ -3833,7 +3825,7 @@ class JobEventSerializer(BaseSerializer):
|
||||
model = JobEvent
|
||||
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
|
||||
'event_display', 'event_data', 'event_level', 'failed',
|
||||
'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',
|
||||
'changed', 'uuid', 'parent_uuid', 'host', 'host_name',
|
||||
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
|
||||
'verbosity')
|
||||
|
||||
@@ -3842,13 +3834,9 @@ class JobEventSerializer(BaseSerializer):
|
||||
res.update(dict(
|
||||
job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),
|
||||
))
|
||||
if obj.parent_id:
|
||||
res['parent'] = self.reverse('api:job_event_detail', kwargs={'pk': obj.parent_id})
|
||||
res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})
|
||||
if obj.host_id:
|
||||
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})
|
||||
if obj.hosts.exists():
|
||||
res['hosts'] = self.reverse('api:job_event_hosts_list', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
|
||||
@@ -81,7 +81,8 @@ from awx.main.utils import (
|
||||
getattrd,
|
||||
get_pk_from_dict,
|
||||
schedule_task_manager,
|
||||
ignore_inventory_computed_fields
|
||||
ignore_inventory_computed_fields,
|
||||
set_environ
|
||||
)
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
@@ -204,20 +205,15 @@ class DashboardView(APIView):
|
||||
'failed': ec2_inventory_failed.count()}
|
||||
|
||||
user_groups = get_user_queryset(request.user, models.Group)
|
||||
groups_job_failed = (
|
||||
models.Group.objects.filter(hosts_with_active_failures__gt=0) | models.Group.objects.filter(groups_with_active_failures__gt=0)
|
||||
).count()
|
||||
groups_inventory_failed = models.Group.objects.filter(inventory_sources__last_job_failed=True).count()
|
||||
data['groups'] = {'url': reverse('api:group_list', request=request),
|
||||
'failures_url': reverse('api:group_list', request=request) + "?has_active_failures=True",
|
||||
'total': user_groups.count(),
|
||||
'job_failed': groups_job_failed,
|
||||
'inventory_failed': groups_inventory_failed}
|
||||
|
||||
user_hosts = get_user_queryset(request.user, models.Host)
|
||||
user_hosts_failed = user_hosts.filter(has_active_failures=True)
|
||||
user_hosts_failed = user_hosts.filter(last_job_host_summary__failed=True)
|
||||
data['hosts'] = {'url': reverse('api:host_list', request=request),
|
||||
'failures_url': reverse('api:host_list', request=request) + "?has_active_failures=True",
|
||||
'failures_url': reverse('api:host_list', request=request) + "?last_job_host_summary__failed=True",
|
||||
'total': user_hosts.count(),
|
||||
'failed': user_hosts_failed.count()}
|
||||
|
||||
@@ -1611,7 +1607,8 @@ class HostInsights(GenericAPIView):
|
||||
|
||||
def _call_insights_api(self, url, session, headers):
|
||||
try:
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
except requests.exceptions.SSLError:
|
||||
raise BadGateway(_('SSLError while trying to connect to {}').format(url))
|
||||
except requests.exceptions.Timeout:
|
||||
@@ -2150,7 +2147,7 @@ class InventorySourceHostsList(HostRelatedSearchMixin, SubListDestroyAPIView):
|
||||
host__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceHostsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id, True)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id)
|
||||
return r
|
||||
|
||||
|
||||
@@ -2177,7 +2174,7 @@ class InventorySourceGroupsList(SubListDestroyAPIView):
|
||||
group__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceGroupsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id, True)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id)
|
||||
return r
|
||||
|
||||
|
||||
@@ -3268,7 +3265,7 @@ class WorkflowJobRelaunch(GenericAPIView):
|
||||
jt = obj.job_template
|
||||
if not jt:
|
||||
raise ParseError(_('Cannot relaunch slice workflow job orphaned from job template.'))
|
||||
elif not jt.inventory or min(jt.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count():
|
||||
elif not obj.inventory or min(obj.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count():
|
||||
raise ParseError(_('Cannot relaunch sliced workflow job after slice count has changed.'))
|
||||
new_workflow_job = obj.create_relaunch_workflow_job()
|
||||
new_workflow_job.signal_start()
|
||||
@@ -3819,6 +3816,12 @@ class JobEventHostsList(HostRelatedSearchMixin, SubListAPIView):
|
||||
relationship = 'hosts'
|
||||
name = _('Job Event Hosts List')
|
||||
|
||||
def get_queryset(self):
|
||||
parent_event = self.get_parent_object()
|
||||
self.check_parent_access(parent_event)
|
||||
qs = self.request.user.get_queryset(self.model).filter(job_events_as_primary_host=parent_event)
|
||||
return qs
|
||||
|
||||
|
||||
class BaseJobEventsList(NoTruncateMixin, SubListAPIView):
|
||||
|
||||
@@ -3841,8 +3844,7 @@ class HostJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
parent_obj = self.get_parent_object()
|
||||
self.check_parent_access(parent_obj)
|
||||
qs = self.request.user.get_queryset(self.model).filter(
|
||||
Q(host=parent_obj) | Q(hosts=parent_obj)).distinct()
|
||||
qs = self.request.user.get_queryset(self.model).filter(host=parent_obj)
|
||||
return qs
|
||||
|
||||
|
||||
@@ -3858,9 +3860,7 @@ class JobJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
job = self.get_parent_object()
|
||||
self.check_parent_access(job)
|
||||
qs = job.job_events
|
||||
qs = qs.select_related('host')
|
||||
qs = qs.prefetch_related('hosts', 'children')
|
||||
qs = job.job_events.select_related('host').order_by('start_line')
|
||||
return qs.all()
|
||||
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ from rest_framework import status
|
||||
import requests
|
||||
|
||||
from awx.api.generics import APIView
|
||||
from awx.conf.registry import settings_registry
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.utils import (
|
||||
get_awx_version,
|
||||
@@ -37,6 +38,7 @@ from awx.main.models import (
|
||||
InstanceGroup,
|
||||
JobTemplate,
|
||||
)
|
||||
from awx.main.utils import set_environ
|
||||
|
||||
logger = logging.getLogger('awx.api.views.root')
|
||||
|
||||
@@ -190,7 +192,8 @@ class ApiV2SubscriptionView(APIView):
|
||||
data['rh_password'] = settings.REDHAT_PASSWORD
|
||||
try:
|
||||
user, pw = data.get('rh_username'), data.get('rh_password')
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
if user:
|
||||
settings.REDHAT_USERNAME = data['rh_username']
|
||||
if pw:
|
||||
@@ -202,10 +205,15 @@ class ApiV2SubscriptionView(APIView):
|
||||
getattr(getattr(exc, 'response', None), 'status_code', None) == 401
|
||||
):
|
||||
msg = _("The provided credentials are invalid (HTTP 401).")
|
||||
if isinstance(exc, (ValueError, OSError)) and exc.args:
|
||||
elif isinstance(exc, requests.exceptions.ProxyError):
|
||||
msg = _("Unable to connect to proxy server.")
|
||||
elif isinstance(exc, requests.exceptions.ConnectionError):
|
||||
msg = _("Could not connect to subscription service.")
|
||||
elif isinstance(exc, (ValueError, OSError)) and exc.args:
|
||||
msg = exc.args[0]
|
||||
logger.exception(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
else:
|
||||
logger.exception(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
return Response(validated)
|
||||
@@ -302,7 +310,8 @@ class ApiV2ConfigView(APIView):
|
||||
# If the license is valid, write it to the database.
|
||||
if license_data_validated['valid_key']:
|
||||
settings.LICENSE = license_data
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
if not settings_registry.is_setting_read_only('TOWER_URL_BASE'):
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
return Response(license_data_validated)
|
||||
|
||||
logger.warning(smart_text(u"Invalid license submitted."),
|
||||
|
||||
@@ -11,7 +11,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import ( # noqa
|
||||
BooleanField, CharField, ChoiceField, DictField, EmailField,
|
||||
BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField,
|
||||
IntegerField, ListField, NullBooleanField
|
||||
)
|
||||
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
# Python
|
||||
from collections import namedtuple
|
||||
import contextlib
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import urllib.parse
|
||||
from io import StringIO
|
||||
|
||||
# Django
|
||||
from django.conf import LazySettings
|
||||
@@ -28,8 +25,6 @@ from awx.conf import settings_registry
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.migrations._reencrypt import decrypt_field as old_decrypt_field
|
||||
|
||||
import cachetools
|
||||
|
||||
# FIXME: Gracefully handle when settings are accessed before the database is
|
||||
# ready (or during migrations).
|
||||
|
||||
@@ -91,42 +86,11 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
transaction.set_rollback(False)
|
||||
yield
|
||||
except DBError:
|
||||
# We want the _full_ traceback with the context
|
||||
# First we get the current call stack, which constitutes the "top",
|
||||
# it has the context up to the point where the context manager is used
|
||||
top_stack = StringIO()
|
||||
traceback.print_stack(file=top_stack)
|
||||
top_lines = top_stack.getvalue().strip('\n').split('\n')
|
||||
top_stack.close()
|
||||
# Get "bottom" stack from the local error that happened
|
||||
# inside of the "with" block this wraps
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
bottom_stack = StringIO()
|
||||
traceback.print_tb(exc_traceback, file=bottom_stack)
|
||||
bottom_lines = bottom_stack.getvalue().strip('\n').split('\n')
|
||||
# Glue together top and bottom where overlap is found
|
||||
bottom_cutoff = 0
|
||||
for i, line in enumerate(bottom_lines):
|
||||
if line in top_lines:
|
||||
# start of overlapping section, take overlap from bottom
|
||||
top_lines = top_lines[:top_lines.index(line)]
|
||||
bottom_cutoff = i
|
||||
break
|
||||
bottom_lines = bottom_lines[bottom_cutoff:]
|
||||
tb_lines = top_lines + bottom_lines
|
||||
|
||||
tb_string = '\n'.join(
|
||||
['Traceback (most recent call last):'] +
|
||||
tb_lines +
|
||||
['{}: {}'.format(exc_type.__name__, str(exc_value))]
|
||||
)
|
||||
bottom_stack.close()
|
||||
# Log the combined stack
|
||||
if trans_safe:
|
||||
if 'check_migrations' not in sys.argv:
|
||||
logger.debug('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
if 'migrate' not in sys.argv and 'check_migrations' not in sys.argv:
|
||||
logger.exception('Database settings are not available, using defaults.')
|
||||
else:
|
||||
logger.debug('Error modifying something related to database settings.\n{}'.format(tb_string))
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
if trans_safe and is_atomic and rollback_set:
|
||||
transaction.set_rollback(rollback_set)
|
||||
@@ -138,12 +102,13 @@ def filter_sensitive(registry, key, value):
|
||||
return value
|
||||
|
||||
|
||||
# settings.__getattr__ is called *constantly*, and the LOG_AGGREGATOR_ ones are
|
||||
# so ubiquitous when external logging is enabled that they should kept in memory
|
||||
# with a short TTL to avoid even having to contact memcached
|
||||
# the primary use case for this optimization is the callback receiver
|
||||
# when external logging is enabled
|
||||
LOGGING_SETTINGS_CACHE = cachetools.TTLCache(maxsize=50, ttl=1)
|
||||
class TransientSetting(object):
|
||||
|
||||
__slots__ = ('pk', 'value')
|
||||
|
||||
def __init__(self, pk, value):
|
||||
self.pk = pk
|
||||
self.value = value
|
||||
|
||||
|
||||
class EncryptedCacheProxy(object):
|
||||
@@ -173,7 +138,6 @@ class EncryptedCacheProxy(object):
|
||||
def get(self, key, **kwargs):
|
||||
value = self.cache.get(key, **kwargs)
|
||||
value = self._handle_encryption(self.decrypter, key, value)
|
||||
logger.debug('cache get(%r, %r) -> %r', key, empty, filter_sensitive(self.registry, key, value))
|
||||
return value
|
||||
|
||||
def set(self, key, value, log=True, **kwargs):
|
||||
@@ -196,8 +160,6 @@ class EncryptedCacheProxy(object):
|
||||
self.set(key, value, log=False, **kwargs)
|
||||
|
||||
def _handle_encryption(self, method, key, value):
|
||||
TransientSetting = namedtuple('TransientSetting', ['pk', 'value'])
|
||||
|
||||
if value is not empty and self.registry.is_setting_encrypted(key):
|
||||
# If the setting exists in the database, we'll use its primary key
|
||||
# as part of the AES key when encrypting/decrypting
|
||||
@@ -447,17 +409,11 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
return self._get_default('SETTINGS_MODULE')
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name.startswith('LOG_AGGREGATOR_'):
|
||||
cached = LOGGING_SETTINGS_CACHE.get(name)
|
||||
if cached:
|
||||
return cached
|
||||
value = empty
|
||||
if name in self.all_supported_settings:
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
value = self._get_local(name)
|
||||
if value is not empty:
|
||||
if name.startswith('LOG_AGGREGATOR_'):
|
||||
LOGGING_SETTINGS_CACHE[name] = value
|
||||
return value
|
||||
value = self._get_default(name)
|
||||
# sometimes users specify RabbitMQ passwords that contain
|
||||
|
||||
@@ -307,7 +307,7 @@ class BaseAccess(object):
|
||||
|
||||
return True # User has access to both, permission check passed
|
||||
|
||||
def check_license(self, add_host_name=None, feature=None, check_expiration=True):
|
||||
def check_license(self, add_host_name=None, feature=None, check_expiration=True, quiet=False):
|
||||
validation_info = get_licenser().validate()
|
||||
if validation_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
return
|
||||
@@ -317,8 +317,10 @@ class BaseAccess(object):
|
||||
validation_info['time_remaining'] = 99999999
|
||||
validation_info['grace_period_remaining'] = 99999999
|
||||
|
||||
report_violation = lambda message: logger.error(message)
|
||||
|
||||
if quiet:
|
||||
report_violation = lambda message: None
|
||||
else:
|
||||
report_violation = lambda message: logger.warning(message)
|
||||
if (
|
||||
validation_info.get('trial', False) is True or
|
||||
validation_info['instance_count'] == 10 # basic 10 license
|
||||
@@ -907,7 +909,7 @@ class HostAccess(BaseAccess):
|
||||
model = Host
|
||||
select_related = ('created_by', 'modified_by', 'inventory',
|
||||
'last_job__job_template', 'last_job_host_summary__job',)
|
||||
prefetch_related = ('groups',)
|
||||
prefetch_related = ('groups', 'inventory_sources')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(inventory__in=Inventory.accessible_pk_qs(self.user, 'read_role'))
|
||||
@@ -2238,7 +2240,7 @@ class JobEventAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
model = JobEvent
|
||||
prefetch_related = ('hosts', 'job__job_template', 'host',)
|
||||
prefetch_related = ('job__job_template', 'host',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
|
||||
@@ -15,7 +15,7 @@ from awx.conf.license import get_license
|
||||
from awx.main.models import Job
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.models.ha import TowerAnalyticsState
|
||||
from awx.main.utils import get_awx_http_client_headers
|
||||
from awx.main.utils import get_awx_http_client_headers, set_environ
|
||||
|
||||
|
||||
__all__ = ['register', 'gather', 'ship', 'table_version']
|
||||
@@ -169,12 +169,13 @@ def ship(path):
|
||||
s = requests.Session()
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
response = s.post(url,
|
||||
files=files,
|
||||
verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
|
||||
auth=(rh_user, rh_password),
|
||||
headers=s.headers,
|
||||
timeout=(31, 31))
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
response = s.post(url,
|
||||
files=files,
|
||||
verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
|
||||
auth=(rh_user, rh_password),
|
||||
headers=s.headers,
|
||||
timeout=(31, 31))
|
||||
if response.status_code != 202:
|
||||
return logger.exception('Upload failed with status {}, {}'.format(response.status_code,
|
||||
response.text))
|
||||
|
||||
@@ -616,6 +616,18 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'MAX_FORKS',
|
||||
field_class=fields.IntegerField,
|
||||
allow_null=False,
|
||||
default=200,
|
||||
label=_('Maximum number of forks per job.'),
|
||||
help_text=_('Saving a Job Template with more than this number of forks will result in an error. '
|
||||
'When set to 0, no limit is applied.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'LOG_AGGREGATOR_HOST',
|
||||
field_class=fields.CharField,
|
||||
@@ -787,6 +799,28 @@ register(
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last gather date for Automation Analytics.'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('Automation Analytics Gather Interval'),
|
||||
help_text=_('Interval (in seconds) between data gathering.'),
|
||||
default=14400, # every 4 hours
|
||||
min_value=1800, # every 30 minutes
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or \
|
||||
not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or \
|
||||
@@ -811,10 +845,7 @@ def galaxy_validate(serializer, attrs):
|
||||
to save settings which obviously break all project updates.
|
||||
"""
|
||||
prefix = 'PRIMARY_GALAXY_'
|
||||
|
||||
from awx.main.constants import GALAXY_SERVER_FIELDS
|
||||
if not any('{}{}'.format(prefix, subfield.upper()) in attrs for subfield in GALAXY_SERVER_FIELDS):
|
||||
return attrs
|
||||
errors = {}
|
||||
|
||||
def _new_value(setting_name):
|
||||
if setting_name in attrs:
|
||||
@@ -823,10 +854,22 @@ def galaxy_validate(serializer, attrs):
|
||||
return ''
|
||||
return getattr(serializer.instance, setting_name, '')
|
||||
|
||||
if not _new_value('PRIMARY_GALAXY_URL'):
|
||||
if _new_value('PUBLIC_GALAXY_ENABLED') is False:
|
||||
msg = _('A URL for Primary Galaxy must be defined before disabling public Galaxy.')
|
||||
# put error in both keys because UI has trouble with errors in toggles
|
||||
for key in ('PRIMARY_GALAXY_URL', 'PUBLIC_GALAXY_ENABLED'):
|
||||
errors.setdefault(key, [])
|
||||
errors[key].append(msg)
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
from awx.main.constants import GALAXY_SERVER_FIELDS
|
||||
if not any('{}{}'.format(prefix, subfield.upper()) in attrs for subfield in GALAXY_SERVER_FIELDS):
|
||||
return attrs
|
||||
|
||||
galaxy_data = {}
|
||||
for subfield in GALAXY_SERVER_FIELDS:
|
||||
galaxy_data[subfield] = _new_value('{}{}'.format(prefix, subfield.upper()))
|
||||
errors = {}
|
||||
if not galaxy_data['url']:
|
||||
for k, v in galaxy_data.items():
|
||||
if v:
|
||||
|
||||
@@ -3,6 +3,16 @@ from .plugin import CredentialPlugin
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
from msrestazure import azure_cloud
|
||||
|
||||
|
||||
# https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py
|
||||
clouds = [
|
||||
vars(azure_cloud)[n]
|
||||
for n in dir(azure_cloud)
|
||||
if n.startswith("AZURE_") and n.endswith("_CLOUD")
|
||||
]
|
||||
default_cloud = vars(azure_cloud)["AZURE_PUBLIC_CLOUD"]
|
||||
|
||||
|
||||
azure_keyvault_inputs = {
|
||||
@@ -24,6 +34,12 @@ azure_keyvault_inputs = {
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant ID'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'cloud_name',
|
||||
'label': _('Cloud Environment'),
|
||||
'help_text': _('Specify which azure cloud environment to use.'),
|
||||
'choices': list(set([default_cloud.name] + [c.name for c in clouds])),
|
||||
'default': default_cloud.name
|
||||
}],
|
||||
'metadata': [{
|
||||
'id': 'secret_field',
|
||||
@@ -42,6 +58,7 @@ azure_keyvault_inputs = {
|
||||
|
||||
def azure_keyvault_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
[cloud] = [c for c in clouds if c.name == kwargs.get('cloud_name', default_cloud.name)]
|
||||
|
||||
def auth_callback(server, resource, scope):
|
||||
credentials = ServicePrincipalCredentials(
|
||||
@@ -49,7 +66,7 @@ def azure_keyvault_backend(**kwargs):
|
||||
client_id = kwargs['client'],
|
||||
secret = kwargs['secret'],
|
||||
tenant = kwargs['tenant'],
|
||||
resource = "https://vault.azure.net",
|
||||
resource = f"https://{cloud.suffixes.keyvault_dns.split('.', 1).pop()}",
|
||||
)
|
||||
token = credentials.token
|
||||
return token['token_type'], token['access_token']
|
||||
|
||||
52
awx/main/dispatch/periodic.py
Normal file
52
awx/main/dispatch/periodic.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connections
|
||||
from schedule import Scheduler
|
||||
|
||||
from awx.main.dispatch.worker import TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
|
||||
|
||||
class Scheduler(Scheduler):
|
||||
|
||||
def run_continuously(self):
|
||||
cease_continuous_run = threading.Event()
|
||||
idle_seconds = max(
|
||||
1,
|
||||
min(self.jobs).period.total_seconds() / 2
|
||||
)
|
||||
|
||||
class ScheduleThread(threading.Thread):
|
||||
@classmethod
|
||||
def run(cls):
|
||||
while not cease_continuous_run.is_set():
|
||||
try:
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
self.run_pending()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
'encountered an error while scheduling periodic tasks'
|
||||
)
|
||||
time.sleep(idle_seconds)
|
||||
logger.debug('periodic thread exiting...')
|
||||
|
||||
thread = ScheduleThread()
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
return cease_continuous_run
|
||||
|
||||
|
||||
def run_continuously():
|
||||
scheduler = Scheduler()
|
||||
for task in settings.CELERYBEAT_SCHEDULE.values():
|
||||
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
|
||||
total_seconds = task['schedule'].total_seconds()
|
||||
scheduler.every(total_seconds).seconds.do(apply_async)
|
||||
return scheduler.run_continuously()
|
||||
@@ -72,9 +72,6 @@ class PoolWorker(object):
|
||||
if not body.get('uuid'):
|
||||
body['uuid'] = str(uuid4())
|
||||
uuid = body['uuid']
|
||||
logger.debug('delivered {} to worker[{}] qsize {}'.format(
|
||||
uuid, self.pid, self.qsize
|
||||
))
|
||||
self.managed_tasks[uuid] = body
|
||||
self.queue.put(body, block=True, timeout=5)
|
||||
self.messages_sent += 1
|
||||
|
||||
@@ -61,7 +61,7 @@ class AWXConsumer(ConsumerMixin):
|
||||
])
|
||||
|
||||
def control(self, body, message):
|
||||
logger.warn(body)
|
||||
logger.warn('Consumer received control message {}'.format(body))
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running'):
|
||||
producer = Producer(
|
||||
@@ -148,7 +148,6 @@ class BaseWorker(object):
|
||||
finally:
|
||||
if 'uuid' in body:
|
||||
uuid = body['uuid']
|
||||
logger.debug('task {} is finished'.format(uuid))
|
||||
finished.put(uuid)
|
||||
logger.warn('worker exiting gracefully pid:{}'.format(os.getpid()))
|
||||
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import cProfile
|
||||
import logging
|
||||
import os
|
||||
import pstats
|
||||
import signal
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError, IntegrityError
|
||||
|
||||
@@ -32,6 +37,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
|
||||
MAX_RETRIES = 2
|
||||
prof = None
|
||||
|
||||
def __init__(self):
|
||||
self.buff = {}
|
||||
@@ -42,6 +48,26 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
except QueueEmpty:
|
||||
return {'event': 'FLUSH'}
|
||||
|
||||
def toggle_profiling(self, *args):
|
||||
if self.prof:
|
||||
self.prof.disable()
|
||||
filename = f'callback-{os.getpid()}.pstats'
|
||||
filepath = os.path.join(tempfile.gettempdir(), filename)
|
||||
with open(filepath, 'w') as f:
|
||||
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
|
||||
pstats.Stats(self.prof).dump_stats(filepath + '.raw')
|
||||
self.prof = False
|
||||
logger.error(f'profiling is disabled, wrote {filepath}')
|
||||
else:
|
||||
self.prof = cProfile.Profile()
|
||||
self.prof.enable()
|
||||
logger.error('profiling is enabled')
|
||||
|
||||
def work_loop(self, *args, **kw):
|
||||
if settings.AWX_CALLBACK_PROFILE:
|
||||
signal.signal(signal.SIGUSR1, self.toggle_profiling)
|
||||
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
|
||||
|
||||
def flush(self, force=False):
|
||||
now = tz_now()
|
||||
if (
|
||||
|
||||
@@ -370,33 +370,32 @@ class IsolatedManager(object):
|
||||
private_data_dir
|
||||
)
|
||||
|
||||
if runner_obj.status == 'successful':
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
finally:
|
||||
if os.path.exists(private_data_dir):
|
||||
shutil.rmtree(private_data_dir)
|
||||
|
||||
@@ -9,6 +9,13 @@ class Command(BaseCommand):
|
||||
|
||||
def handle(self, *args, **options):
|
||||
with connection.cursor() as cursor:
|
||||
start = {}
|
||||
for relation in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
cursor.execute(f"SELECT MAX(id) FROM {relation};")
|
||||
start[relation] = cursor.fetchone()[0] or 0
|
||||
clear = False
|
||||
while True:
|
||||
lines = []
|
||||
@@ -17,19 +24,15 @@ class Command(BaseCommand):
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
lines.append(relation)
|
||||
for label, interval in (
|
||||
('last minute: ', '1 minute'),
|
||||
('last 5 minutes:', '5 minutes'),
|
||||
('last hour: ', '1 hour'),
|
||||
):
|
||||
cursor.execute(
|
||||
f"SELECT MAX(id) - MIN(id) FROM {relation} WHERE modified > now() - '{interval}'::interval;"
|
||||
)
|
||||
events = cursor.fetchone()[0] or 0
|
||||
lines.append(f'↳ {label} {events}')
|
||||
minimum = start[relation]
|
||||
cursor.execute(
|
||||
f"SELECT MAX(id) - MIN(id) FROM {relation} WHERE id > {minimum} AND modified > now() - '1 minute'::interval;"
|
||||
)
|
||||
events = cursor.fetchone()[0] or 0
|
||||
lines.append(f'↳ last minute {events}')
|
||||
lines.append('')
|
||||
if clear:
|
||||
for i in range(20):
|
||||
for i in range(12):
|
||||
sys.stdout.write('\x1b[1A\x1b[2K')
|
||||
for l in lines:
|
||||
print(l)
|
||||
|
||||
@@ -16,13 +16,10 @@ from awx.main.models import (
|
||||
Job, AdHocCommand, ProjectUpdate, InventoryUpdate,
|
||||
SystemJob, WorkflowJob, Notification
|
||||
)
|
||||
from awx.main.signals import ( # noqa
|
||||
emit_update_inventory_on_created_or_deleted,
|
||||
emit_update_inventory_computed_fields,
|
||||
from awx.main.signals import (
|
||||
disable_activity_stream,
|
||||
disable_computed_fields
|
||||
)
|
||||
from django.db.models.signals import post_save, post_delete, m2m_changed # noqa
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
@@ -921,11 +921,14 @@ class Command(BaseCommand):
|
||||
available_instances = license_info.get('available_instances', 0)
|
||||
free_instances = license_info.get('free_instances', 0)
|
||||
time_remaining = license_info.get('time_remaining', 0)
|
||||
hard_error = license_info.get('trial', False) is True or license_info['instance_count'] == 10
|
||||
new_count = Host.objects.active_count()
|
||||
if time_remaining <= 0 and not license_info.get('demo', False):
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
if license_info.get('trial', False) is True:
|
||||
if time_remaining <= 0:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
raise CommandError("License has expired!")
|
||||
else:
|
||||
logger.warning(LICENSE_EXPIRED_MESSAGE)
|
||||
# special check for tower-type inventory sources
|
||||
# but only if running the plugin
|
||||
TOWER_SOURCE_FILES = ['tower.yml', 'tower.yaml']
|
||||
@@ -938,15 +941,11 @@ class Command(BaseCommand):
|
||||
'new_count': new_count,
|
||||
'available_instances': available_instances,
|
||||
}
|
||||
if license_info.get('demo', False):
|
||||
logger.error(DEMO_LICENSE_MESSAGE % d)
|
||||
else:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_MESSAGE % d)
|
||||
if (
|
||||
license_info.get('trial', False) is True or
|
||||
license_info['instance_count'] == 10 # basic 10 license
|
||||
):
|
||||
raise CommandError('License count exceeded!')
|
||||
else:
|
||||
logger.warning(LICENSE_MESSAGE % d)
|
||||
|
||||
def check_org_host_limit(self):
|
||||
license_info = get_licenser().validate()
|
||||
|
||||
@@ -16,6 +16,7 @@ from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
|
||||
from awx.main.dispatch import periodic
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
@@ -36,71 +37,6 @@ class Command(BaseCommand):
|
||||
help=('cause the dispatcher to recycle all of its worker processes;'
|
||||
'running jobs will run to completion first'))
|
||||
|
||||
def beat(self):
|
||||
from celery import Celery
|
||||
from celery.beat import PersistentScheduler
|
||||
from celery.apps import beat
|
||||
|
||||
class AWXScheduler(PersistentScheduler):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.ppid = os.getppid()
|
||||
super(AWXScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def setup_schedule(self):
|
||||
super(AWXScheduler, self).setup_schedule()
|
||||
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
|
||||
|
||||
def tick(self, *args, **kwargs):
|
||||
if os.getppid() != self.ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
raise SystemExit()
|
||||
return super(AWXScheduler, self).tick(*args, **kwargs)
|
||||
|
||||
def apply_async(self, entry, producer=None, advance=True, **kwargs):
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
task = TaskWorker.resolve_callable(entry.task)
|
||||
result, queue = task.apply_async()
|
||||
|
||||
class TaskResult(object):
|
||||
id = result['uuid']
|
||||
|
||||
return TaskResult()
|
||||
|
||||
sched_file = '/var/lib/awx/beat.db'
|
||||
app = Celery()
|
||||
app.conf.BROKER_URL = settings.BROKER_URL
|
||||
app.conf.CELERY_TASK_RESULT_EXPIRES = False
|
||||
|
||||
# celery in py3 seems to have a bug where the celerybeat schedule
|
||||
# shelve can become corrupted; we've _only_ seen this in Ubuntu and py36
|
||||
# it can be avoided by detecting and removing the corrupted file
|
||||
# at some point, we'll just stop using celerybeat, because it's clearly
|
||||
# buggy, too -_-
|
||||
#
|
||||
# https://github.com/celery/celery/issues/4777
|
||||
sched = AWXScheduler(schedule_filename=sched_file, app=app)
|
||||
try:
|
||||
sched.setup_schedule()
|
||||
except Exception:
|
||||
logger.exception('{} is corrupted, removing.'.format(sched_file))
|
||||
sched._remove_db()
|
||||
finally:
|
||||
try:
|
||||
sched.close()
|
||||
except Exception:
|
||||
logger.exception('{} failed to sync/close'.format(sched_file))
|
||||
|
||||
beat.Beat(
|
||||
30,
|
||||
app,
|
||||
schedule=sched_file, scheduler_cls=AWXScheduler
|
||||
).run()
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
print(Control('dispatcher').status())
|
||||
@@ -116,9 +52,10 @@ class Command(BaseCommand):
|
||||
# for the DB and memcached connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
beat = Process(target=self.beat)
|
||||
beat.daemon = True
|
||||
beat.start()
|
||||
|
||||
# spawn a daemon thread to periodically enqueues scheduled tasks
|
||||
# (like the node heartbeat)
|
||||
cease_continuous_run = periodic.run_continuously()
|
||||
|
||||
reaper.reap()
|
||||
consumer = None
|
||||
@@ -152,6 +89,7 @@ class Command(BaseCommand):
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
cease_continuous_run.set()
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
|
||||
@@ -3,15 +3,17 @@ from uuid import uuid4
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from awx.main.models import Instance
|
||||
|
||||
|
||||
def _generate_new_uuid_for_iso_nodes(apps, schema_editor):
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
for instance in Instance.objects.all():
|
||||
if instance.is_isolated():
|
||||
# The below code is a copy paste of instance.is_isolated()
|
||||
# We can't call is_isolated because we are using the "old" version
|
||||
# of the Instance definition.
|
||||
if instance.rampart_groups.filter(controller__isnull=False).exists():
|
||||
instance.uuid = str(uuid4())
|
||||
instance.save()
|
||||
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
|
||||
18
awx/main/migrations/0102_v370_unifiedjob_canceled.py
Normal file
18
awx/main/migrations/0102_v370_unifiedjob_canceled.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.4 on 2019-11-25 20:53
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0101_v370_generate_new_uuids_for_iso_nodes'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='canceled_on',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False, help_text='The date and time when the cancel request was sent.', null=True),
|
||||
),
|
||||
]
|
||||
52
awx/main/migrations/0103_v370_remove_computed_fields.py
Normal file
52
awx/main/migrations/0103_v370_remove_computed_fields.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-02-21 17:35
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0102_v370_unifiedjob_canceled'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='groups_with_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='has_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='has_inventory_sources',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='hosts_with_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='total_groups',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='total_hosts',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='has_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='has_inventory_sources',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobhostsummary',
|
||||
name='failed',
|
||||
field=models.BooleanField(db_index=True, default=False, editable=False),
|
||||
),
|
||||
]
|
||||
24
awx/main/migrations/0104_v370_cleanup_old_scan_jts.py
Normal file
24
awx/main/migrations/0104_v370_cleanup_old_scan_jts.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-15 20:01
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def cleanup_scan_jts(apps, schema_editor):
|
||||
JobTemplate = apps.get_model('main', 'JobTemplate')
|
||||
JobTemplate.objects.filter(job_type='scan').update(job_type='run')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0103_v370_remove_computed_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(cleanup_scan_jts),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='job_type',
|
||||
field=models.CharField(choices=[('run', 'Run'), ('check', 'Check')], default='run', max_length=64),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,21 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-15 18:01
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0104_v370_cleanup_old_scan_jts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='jobevent',
|
||||
name='parent',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='jobevent',
|
||||
name='hosts',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-27 12:39
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0105_v370_remove_jobevent_parent_and_hosts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='inventory',
|
||||
name='groups_with_active_failures',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 2.2.4 on 2020-01-08 22:11
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0106_v370_remove_inventory_groups_with_active_failures'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='workflowjobnode',
|
||||
name='all_parents_must_converge',
|
||||
field=models.BooleanField(default=False, help_text='If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjobtemplatenode',
|
||||
name='all_parents_must_converge',
|
||||
field=models.BooleanField(default=False, help_text='If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.8 on 2020-02-06 16:43
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0107_v370_workflow_convergence_api_toggle'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='dependencies_processed',
|
||||
field=models.BooleanField(default=False, editable=False, help_text='If True, the task manager has already processed potential dependencies for this job.'),
|
||||
),
|
||||
]
|
||||
@@ -1136,7 +1136,7 @@ ManagedCredentialType(
|
||||
'help_text': ugettext_noop('The OpenShift or Kubernetes API Endpoint to authenticate with.')
|
||||
},{
|
||||
'id': 'bearer_token',
|
||||
'label': ugettext_noop('API authentication bearer token.'),
|
||||
'label': ugettext_noop('API authentication bearer token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
},{
|
||||
|
||||
@@ -360,11 +360,10 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
value = force_text(event_data.get(field, '')).strip()
|
||||
if value != getattr(self, field):
|
||||
setattr(self, field, value)
|
||||
if isinstance(self, JobEvent):
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from_data(cls, **kwargs):
|
||||
@@ -450,19 +449,6 @@ class JobEvent(BasePlaybookEvent):
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
hosts = models.ManyToManyField(
|
||||
'Host',
|
||||
related_name='job_events',
|
||||
editable=False,
|
||||
)
|
||||
parent = models.ForeignKey(
|
||||
'self',
|
||||
related_name='children',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
)
|
||||
parent_uuid = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
@@ -617,6 +603,7 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||
kwargs.pop('workflow_job_id', None)
|
||||
event = cls(**kwargs)
|
||||
event._update_from_event_data()
|
||||
return event
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
# Python
|
||||
import datetime
|
||||
import time
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
import copy
|
||||
@@ -123,12 +122,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of groups in this inventory.'),
|
||||
)
|
||||
groups_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of groups in this inventory with active failures.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
@@ -339,139 +332,17 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
|
||||
return data
|
||||
|
||||
def update_host_computed_fields(self):
|
||||
'''
|
||||
Update computed fields for all hosts in this inventory.
|
||||
'''
|
||||
hosts_to_update = {}
|
||||
hosts_qs = self.hosts
|
||||
# Define queryset of all hosts with active failures.
|
||||
hosts_with_active_failures = hosts_qs.filter(last_job_host_summary__isnull=False, last_job_host_summary__failed=True).values_list('pk', flat=True)
|
||||
# Find all hosts that need the has_active_failures flag set.
|
||||
hosts_to_set = hosts_qs.filter(has_active_failures=False, pk__in=hosts_with_active_failures)
|
||||
for host_pk in hosts_to_set.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_active_failures'] = True
|
||||
# Find all hosts that need the has_active_failures flag cleared.
|
||||
hosts_to_clear = hosts_qs.filter(has_active_failures=True).exclude(pk__in=hosts_with_active_failures)
|
||||
for host_pk in hosts_to_clear.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_active_failures'] = False
|
||||
# Define queryset of all hosts with cloud inventory sources.
|
||||
hosts_with_cloud_inventory = hosts_qs.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True)
|
||||
# Find all hosts that need the has_inventory_sources flag set.
|
||||
hosts_to_set = hosts_qs.filter(has_inventory_sources=False, pk__in=hosts_with_cloud_inventory)
|
||||
for host_pk in hosts_to_set.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_inventory_sources'] = True
|
||||
# Find all hosts that need the has_inventory_sources flag cleared.
|
||||
hosts_to_clear = hosts_qs.filter(has_inventory_sources=True).exclude(pk__in=hosts_with_cloud_inventory)
|
||||
for host_pk in hosts_to_clear.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_inventory_sources'] = False
|
||||
# Now apply updates to hosts where needed (in batches).
|
||||
all_update_pks = list(hosts_to_update.keys())
|
||||
|
||||
def _chunk(items, chunk_size):
|
||||
for i, group in itertools.groupby(enumerate(items), lambda x: x[0] // chunk_size):
|
||||
yield (g[1] for g in group)
|
||||
|
||||
for update_pks in _chunk(all_update_pks, 500):
|
||||
for host in hosts_qs.filter(pk__in=update_pks):
|
||||
host_updates = hosts_to_update[host.pk]
|
||||
for field, value in host_updates.items():
|
||||
setattr(host, field, value)
|
||||
host.save(update_fields=host_updates.keys())
|
||||
|
||||
def update_group_computed_fields(self):
|
||||
'''
|
||||
Update computed fields for all active groups in this inventory.
|
||||
'''
|
||||
group_children_map = self.get_group_children_map()
|
||||
group_hosts_map = self.get_group_hosts_map()
|
||||
active_host_pks = set(self.hosts.values_list('pk', flat=True))
|
||||
failed_host_pks = set(self.hosts.filter(last_job_host_summary__failed=True).values_list('pk', flat=True))
|
||||
# active_group_pks = set(self.groups.values_list('pk', flat=True))
|
||||
failed_group_pks = set() # Update below as we check each group.
|
||||
groups_with_cloud_pks = set(self.groups.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True))
|
||||
groups_to_update = {}
|
||||
|
||||
# Build list of group pks to check, starting with the groups at the
|
||||
# deepest level within the tree.
|
||||
root_group_pks = set(self.root_groups.values_list('pk', flat=True))
|
||||
group_depths = {} # pk: max_depth
|
||||
|
||||
def update_group_depths(group_pk, current_depth=0):
|
||||
max_depth = group_depths.get(group_pk, -1)
|
||||
# Arbitrarily limit depth to avoid hitting Python recursion limit (which defaults to 1000).
|
||||
if current_depth > 100:
|
||||
return
|
||||
if current_depth > max_depth:
|
||||
group_depths[group_pk] = current_depth
|
||||
for child_pk in group_children_map.get(group_pk, set()):
|
||||
update_group_depths(child_pk, current_depth + 1)
|
||||
for group_pk in root_group_pks:
|
||||
update_group_depths(group_pk)
|
||||
group_pks_to_check = [x[1] for x in sorted([(v,k) for k,v in group_depths.items()], reverse=True)]
|
||||
|
||||
for group_pk in group_pks_to_check:
|
||||
# Get all children and host pks for this group.
|
||||
parent_pks_to_check = set([group_pk])
|
||||
parent_pks_checked = set()
|
||||
child_pks = set()
|
||||
host_pks = set()
|
||||
while parent_pks_to_check:
|
||||
for parent_pk in list(parent_pks_to_check):
|
||||
c_ids = group_children_map.get(parent_pk, set())
|
||||
child_pks.update(c_ids)
|
||||
parent_pks_to_check.remove(parent_pk)
|
||||
parent_pks_checked.add(parent_pk)
|
||||
parent_pks_to_check.update(c_ids - parent_pks_checked)
|
||||
h_ids = group_hosts_map.get(parent_pk, set())
|
||||
host_pks.update(h_ids)
|
||||
# Define updates needed for this group.
|
||||
group_updates = groups_to_update.setdefault(group_pk, {})
|
||||
group_updates.update({
|
||||
'total_hosts': len(active_host_pks & host_pks),
|
||||
'has_active_failures': bool(failed_host_pks & host_pks),
|
||||
'hosts_with_active_failures': len(failed_host_pks & host_pks),
|
||||
'total_groups': len(child_pks),
|
||||
'groups_with_active_failures': len(failed_group_pks & child_pks),
|
||||
'has_inventory_sources': bool(group_pk in groups_with_cloud_pks),
|
||||
})
|
||||
if group_updates['has_active_failures']:
|
||||
failed_group_pks.add(group_pk)
|
||||
|
||||
# Now apply updates to each group as needed (in batches).
|
||||
all_update_pks = list(groups_to_update.keys())
|
||||
for offset in range(0, len(all_update_pks), 500):
|
||||
update_pks = all_update_pks[offset:(offset + 500)]
|
||||
for group in self.groups.filter(pk__in=update_pks):
|
||||
group_updates = groups_to_update[group.pk]
|
||||
for field, value in list(group_updates.items()):
|
||||
if getattr(group, field) != value:
|
||||
setattr(group, field, value)
|
||||
else:
|
||||
group_updates.pop(field)
|
||||
if group_updates:
|
||||
group.save(update_fields=group_updates.keys())
|
||||
|
||||
def update_computed_fields(self, update_groups=True, update_hosts=True):
|
||||
def update_computed_fields(self):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
logger.debug("Going to update inventory computed fields, pk={0}".format(self.pk))
|
||||
start_time = time.time()
|
||||
if update_hosts:
|
||||
self.update_host_computed_fields()
|
||||
if update_groups:
|
||||
self.update_group_computed_fields()
|
||||
active_hosts = self.hosts
|
||||
failed_hosts = active_hosts.filter(has_active_failures=True)
|
||||
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
|
||||
active_groups = self.groups
|
||||
if self.kind == 'smart':
|
||||
active_groups = active_groups.none()
|
||||
failed_groups = active_groups.filter(has_active_failures=True)
|
||||
if self.kind == 'smart':
|
||||
active_inventory_sources = self.inventory_sources.none()
|
||||
else:
|
||||
@@ -482,7 +353,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
'total_hosts': active_hosts.count(),
|
||||
'hosts_with_active_failures': failed_hosts.count(),
|
||||
'total_groups': active_groups.count(),
|
||||
'groups_with_active_failures': failed_groups.count(),
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
'total_inventory_sources': active_inventory_sources.count(),
|
||||
'inventory_sources_with_failures': failed_inventory_sources.count(),
|
||||
@@ -545,7 +415,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
if (self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and
|
||||
connection.vendor != 'sqlite'):
|
||||
# Minimal update of host_count for smart inventory host filter changes
|
||||
self.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
self.update_computed_fields()
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
@@ -631,18 +501,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
editable=False,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
has_active_failures = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether the last job failed for this host.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this host was created/updated from any external inventory sources.'),
|
||||
)
|
||||
inventory_sources = models.ManyToManyField(
|
||||
'InventorySource',
|
||||
related_name='hosts',
|
||||
@@ -673,34 +531,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:host_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def update_computed_fields(self, update_inventory=True, update_groups=True):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
has_active_failures = bool(self.last_job_host_summary and
|
||||
self.last_job_host_summary.failed)
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
computed_fields = {
|
||||
'has_active_failures': has_active_failures,
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
}
|
||||
for field, value in computed_fields.items():
|
||||
if getattr(self, field) != value:
|
||||
setattr(self, field, value)
|
||||
else:
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
self.save(update_fields=computed_fields.keys())
|
||||
# Groups and inventory may also need to be updated when host fields
|
||||
# change.
|
||||
# NOTE: I think this is no longer needed
|
||||
# if update_groups:
|
||||
# for group in self.all_groups:
|
||||
# group.update_computed_fields()
|
||||
# if update_inventory:
|
||||
# self.inventory.update_computed_fields(update_groups=False,
|
||||
# update_hosts=False)
|
||||
# Rebuild summary fields cache
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
|
||||
@property
|
||||
@@ -815,42 +645,6 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
blank=True,
|
||||
help_text=_('Hosts associated directly with this group.'),
|
||||
)
|
||||
total_hosts = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of hosts directly or indirectly in this group.'),
|
||||
)
|
||||
has_active_failures = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this group has any hosts with active failures.'),
|
||||
)
|
||||
hosts_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of hosts in this group with active failures.'),
|
||||
)
|
||||
total_groups = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of child groups contained within this group.'),
|
||||
)
|
||||
groups_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of child groups within this group that have active failures.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this group was created/updated from any external inventory sources.'),
|
||||
)
|
||||
inventory_sources = models.ManyToManyField(
|
||||
'InventorySource',
|
||||
related_name='groups',
|
||||
@@ -925,32 +719,6 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
mark_actual()
|
||||
activity_stream_delete(None, self)
|
||||
|
||||
def update_computed_fields(self):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
active_hosts = self.all_hosts
|
||||
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
|
||||
active_groups = self.all_children
|
||||
# FIXME: May not be accurate unless we always update groups depth-first.
|
||||
failed_groups = active_groups.filter(has_active_failures=True)
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
computed_fields = {
|
||||
'total_hosts': active_hosts.count(),
|
||||
'has_active_failures': bool(failed_hosts.count()),
|
||||
'hosts_with_active_failures': failed_hosts.count(),
|
||||
'total_groups': active_groups.count(),
|
||||
'groups_with_active_failures': failed_groups.count(),
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
}
|
||||
for field, value in computed_fields.items():
|
||||
if getattr(self, field) != value:
|
||||
setattr(self, field, value)
|
||||
else:
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
self.save(update_fields=computed_fields.keys())
|
||||
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
|
||||
def get_all_parents(self, except_pks=None):
|
||||
@@ -1556,7 +1324,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
self.update()
|
||||
if not getattr(_inventory_updates, 'is_updating', False):
|
||||
if self.inventory is not None:
|
||||
self.inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
self.inventory.update_computed_fields()
|
||||
|
||||
def _get_current_status(self):
|
||||
if self.source:
|
||||
@@ -2616,6 +2384,9 @@ class satellite6(PluginFileInjector):
|
||||
group_patterns = '[]'
|
||||
group_prefix = 'foreman_'
|
||||
want_hostcollections = 'False'
|
||||
want_ansible_ssh_host = 'False'
|
||||
rich_params = 'False'
|
||||
want_facts = 'True'
|
||||
foreman_opts = dict(inventory_update.source_vars_dict.items())
|
||||
foreman_opts.setdefault('ssl_verify', 'False')
|
||||
for k, v in foreman_opts.items():
|
||||
@@ -2625,6 +2396,12 @@ class satellite6(PluginFileInjector):
|
||||
group_prefix = v
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
elif k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
want_ansible_ssh_host = v
|
||||
elif k == 'satellite6_rich_params' and isinstance(v, bool):
|
||||
rich_params = v
|
||||
elif k == 'satellite6_want_facts' and isinstance(v, bool):
|
||||
want_facts = v
|
||||
else:
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
@@ -2636,9 +2413,11 @@ class satellite6(PluginFileInjector):
|
||||
section = 'ansible'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'group_patterns', group_patterns)
|
||||
cp.set(section, 'want_facts', 'True')
|
||||
cp.set(section, 'want_facts', str(want_facts))
|
||||
cp.set(section, 'want_hostcollections', str(want_hostcollections))
|
||||
cp.set(section, 'group_prefix', group_prefix)
|
||||
cp.set(section, 'want_ansible_ssh_host', str(want_ansible_ssh_host))
|
||||
cp.set(section, 'rich_params', str(rich_params))
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
|
||||
@@ -13,6 +13,7 @@ from urllib.parse import urljoin
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
#from django.core.cache import cache
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -28,7 +29,7 @@ from awx.api.versioning import reverse
|
||||
from awx.main.models.base import (
|
||||
BaseModel, CreatedModifiedModel,
|
||||
prevent_search, accepts_json,
|
||||
JOB_TYPE_CHOICES, VERBOSITY_CHOICES,
|
||||
JOB_TYPE_CHOICES, NEW_JOB_TYPE_CHOICES, VERBOSITY_CHOICES,
|
||||
VarsDictProperty
|
||||
)
|
||||
from awx.main.models.events import JobEvent, SystemJobEvent
|
||||
@@ -204,6 +205,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
app_label = 'main'
|
||||
ordering = ('name',)
|
||||
|
||||
job_type = models.CharField(
|
||||
max_length=64,
|
||||
choices=NEW_JOB_TYPE_CHOICES,
|
||||
default='run',
|
||||
)
|
||||
host_config_key = prevent_search(models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
@@ -293,6 +299,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
def resources_needed_to_start(self):
|
||||
return [fd for fd in ['project', 'inventory'] if not getattr(self, '{}_id'.format(fd))]
|
||||
|
||||
def clean_forks(self):
|
||||
if settings.MAX_FORKS > 0 and self.forks > settings.MAX_FORKS:
|
||||
raise ValidationError(_(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.'))
|
||||
return self.forks
|
||||
|
||||
def create_job(self, **kwargs):
|
||||
'''
|
||||
Create a new job based on this template.
|
||||
@@ -1060,7 +1071,7 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
processed = models.PositiveIntegerField(default=0, editable=False)
|
||||
rescued = models.PositiveIntegerField(default=0, editable=False)
|
||||
skipped = models.PositiveIntegerField(default=0, editable=False)
|
||||
failed = models.BooleanField(default=False, editable=False)
|
||||
failed = models.BooleanField(default=False, editable=False, db_index=True)
|
||||
|
||||
def __str__(self):
|
||||
host = getattr_dne(self, 'host')
|
||||
@@ -1095,7 +1106,6 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
update_fields.append('last_job_host_summary_id')
|
||||
if update_fields:
|
||||
self.host.save(update_fields=update_fields)
|
||||
#self.host.update_computed_fields()
|
||||
|
||||
|
||||
class SystemJobOptions(BaseModel):
|
||||
|
||||
@@ -274,7 +274,7 @@ class JobNotificationMixin(object):
|
||||
{'playbook_counts': ['play_count', 'task_count']},
|
||||
{'summary_fields': [{'inventory': ['id', 'name', 'description', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources', 'inventory_sources_with_failures',
|
||||
'organization_id', 'kind']},
|
||||
{'project': ['id', 'name', 'description', 'status', 'scm_type']},
|
||||
@@ -327,7 +327,6 @@ class JobNotificationMixin(object):
|
||||
'username': 'admin'},
|
||||
'instance_group': {'id': 1, 'name': 'tower'},
|
||||
'inventory': {'description': 'Sample inventory description',
|
||||
'groups_with_active_failures': 0,
|
||||
'has_active_failures': False,
|
||||
'has_inventory_sources': False,
|
||||
'hosts_with_active_failures': 0,
|
||||
|
||||
@@ -623,6 +623,11 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
editable=False,
|
||||
help_text=_("The date and time the job was queued for starting."),
|
||||
)
|
||||
dependencies_processed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_("If True, the task manager has already processed potential dependencies for this job.")
|
||||
)
|
||||
finished = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
@@ -630,6 +635,13 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
help_text=_("The date and time the job finished execution."),
|
||||
db_index=True,
|
||||
)
|
||||
canceled_on = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text=_("The date and time when the cancel request was sent."),
|
||||
db_index=True,
|
||||
)
|
||||
elapsed = models.DecimalField(
|
||||
max_digits=12,
|
||||
decimal_places=3,
|
||||
@@ -833,7 +845,12 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
self.unified_job_template = self._get_parent_instance()
|
||||
if 'unified_job_template' not in update_fields:
|
||||
update_fields.append('unified_job_template')
|
||||
|
||||
|
||||
if self.cancel_flag and not self.canceled_on:
|
||||
# Record the 'canceled' time.
|
||||
self.canceled_on = now()
|
||||
if 'canceled_on' not in update_fields:
|
||||
update_fields.append('canceled_on')
|
||||
# Okay; we're done. Perform the actual save.
|
||||
result = super(UnifiedJob, self).save(*args, **kwargs)
|
||||
|
||||
@@ -997,6 +1014,8 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
dir=settings.JOBOUTPUT_ROOT,
|
||||
encoding='utf-8'
|
||||
)
|
||||
from awx.main.tasks import purge_old_stdout_files # circular import
|
||||
purge_old_stdout_files.apply_async()
|
||||
|
||||
# Before the addition of event-based stdout, older versions of
|
||||
# awx stored stdout as raw text blobs in a certain database column
|
||||
|
||||
@@ -79,6 +79,11 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
symmetrical=False,
|
||||
related_name='%(class)ss_always',
|
||||
)
|
||||
all_parents_must_converge = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_("If enabled then the node will only run if all of the parent nodes "
|
||||
"have met the criteria to reach this node")
|
||||
)
|
||||
unified_job_template = models.ForeignKey(
|
||||
'UnifiedJobTemplate',
|
||||
related_name='%(class)ss',
|
||||
@@ -102,7 +107,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
'''
|
||||
return ['workflow_job', 'unified_job_template',
|
||||
'extra_data', 'survey_passwords',
|
||||
'inventory', 'credentials', 'char_prompts']
|
||||
'inventory', 'credentials', 'char_prompts', 'all_parents_must_converge']
|
||||
|
||||
def create_workflow_job_node(self, **kwargs):
|
||||
'''
|
||||
@@ -130,7 +135,7 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
||||
FIELDS_TO_PRESERVE_AT_COPY = [
|
||||
'unified_job_template', 'workflow_job_template', 'success_nodes', 'failure_nodes',
|
||||
'always_nodes', 'credentials', 'inventory', 'extra_data', 'survey_passwords',
|
||||
'char_prompts'
|
||||
'char_prompts', 'all_parents_must_converge'
|
||||
]
|
||||
REENCRYPTION_BLACKLIST_AT_COPY = ['extra_data', 'survey_passwords']
|
||||
|
||||
|
||||
@@ -89,8 +89,8 @@ class SimpleDAG(object):
|
||||
run_status(n['node_object']),
|
||||
color
|
||||
)
|
||||
for label, edges in self.node_from_edges_by_label.iteritems():
|
||||
for from_node, to_nodes in edges.iteritems():
|
||||
for label, edges in self.node_from_edges_by_label.items():
|
||||
for from_node, to_nodes in edges.items():
|
||||
for to_node in to_nodes:
|
||||
doc += "%s -> %s [ label=\"%s\" ];\n" % (
|
||||
run_status(self.nodes[from_node]['node_object']),
|
||||
@@ -140,36 +140,36 @@ class SimpleDAG(object):
|
||||
def find_ord(self, obj):
|
||||
return self.node_obj_to_node_index.get(obj, None)
|
||||
|
||||
def _get_dependencies_by_label(self, node_index, label):
|
||||
def _get_children_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_from_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependencies(self, obj, label=None):
|
||||
def get_children(self, obj, label=None):
|
||||
this_ord = self.find_ord(obj)
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependencies_by_label(this_ord, label)
|
||||
return self._get_children_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_from_edges_by_label.keys():
|
||||
nodes.extend(self._get_dependencies_by_label(this_ord, l))
|
||||
nodes.extend(self._get_children_by_label(this_ord, l))
|
||||
return nodes
|
||||
|
||||
def _get_dependents_by_label(self, node_index, label):
|
||||
def _get_parents_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_to_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependents(self, obj, label=None):
|
||||
def get_parents(self, obj, label=None):
|
||||
this_ord = self.find_ord(obj)
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependents_by_label(this_ord, label)
|
||||
return self._get_parents_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_to_edges_by_label.keys():
|
||||
nodes.extend(self._get_dependents_by_label(this_ord, l))
|
||||
nodes.extend(self._get_parents_by_label(this_ord, l))
|
||||
return nodes
|
||||
|
||||
def get_root_nodes(self):
|
||||
@@ -188,7 +188,7 @@ class SimpleDAG(object):
|
||||
while stack:
|
||||
node_obj = stack.pop()
|
||||
|
||||
children = [node['node_object'] for node in self.get_dependencies(node_obj)]
|
||||
children = [node['node_object'] for node in self.get_children(node_obj)]
|
||||
children_to_add = list(filter(lambda node_obj: node_obj not in node_objs_visited, children))
|
||||
|
||||
if children_to_add:
|
||||
@@ -212,7 +212,7 @@ class SimpleDAG(object):
|
||||
if obj.id in obj_ids_processed:
|
||||
return
|
||||
|
||||
for child in self.get_dependencies(obj):
|
||||
for child in self.get_children(obj):
|
||||
visit(child)
|
||||
obj_ids_processed.add(obj.id)
|
||||
nodes_sorted.appendleft(node)
|
||||
|
||||
@@ -55,7 +55,7 @@ class WorkflowDAG(SimpleDAG):
|
||||
|
||||
def _are_relevant_parents_finished(self, node):
|
||||
obj = node['node_object']
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
for p in parent_nodes:
|
||||
if p.do_not_run is True:
|
||||
continue
|
||||
@@ -69,33 +69,55 @@ class WorkflowDAG(SimpleDAG):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _all_parents_met_convergence_criteria(self, node):
|
||||
# This function takes any node and checks that all it's parents have met their criteria to run the child.
|
||||
# This returns a boolean and is really only useful if the node is an ALL convergence node and is
|
||||
# intended to be used in conjuction with the node property `all_parents_must_converge`
|
||||
obj = node['node_object']
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
for p in parent_nodes:
|
||||
#node has a status
|
||||
if p.job and p.job.status in ["successful", "failed"]:
|
||||
if p.job and p.job.status == "successful":
|
||||
status = "success_nodes"
|
||||
elif p.job and p.job.status == "failed":
|
||||
status = "failure_nodes"
|
||||
#check that the nodes status matches either a pathway of the same status or is an always path.
|
||||
if (p not in [node['node_object'] for node in self.get_parents(obj, status)]
|
||||
and p not in [node['node_object'] for node in self.get_parents(obj, "always_nodes")]):
|
||||
return False
|
||||
return True
|
||||
|
||||
def bfs_nodes_to_run(self):
|
||||
nodes = self.get_root_nodes()
|
||||
nodes_found = []
|
||||
node_ids_visited = set()
|
||||
|
||||
for index, n in enumerate(nodes):
|
||||
obj = n['node_object']
|
||||
if obj.id in node_ids_visited:
|
||||
continue
|
||||
node_ids_visited.add(obj.id)
|
||||
|
||||
if obj.do_not_run is True:
|
||||
continue
|
||||
|
||||
if obj.job:
|
||||
elif obj.job:
|
||||
if obj.job.status in ['failed', 'error', 'canceled']:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.job.status == 'successful':
|
||||
nodes.extend(self.get_dependencies(obj, 'success_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'success_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.unified_job_template is None:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
else:
|
||||
if self._are_relevant_parents_finished(n):
|
||||
# This catches root nodes or ANY convergence nodes
|
||||
if not obj.all_parents_must_converge and self._are_relevant_parents_finished(n):
|
||||
nodes_found.append(n)
|
||||
# This catches ALL convergence nodes
|
||||
elif obj.all_parents_must_converge and self._are_relevant_parents_finished(n):
|
||||
if self._all_parents_met_convergence_criteria(n):
|
||||
nodes_found.append(n)
|
||||
|
||||
return [n['node_object'] for n in nodes_found]
|
||||
|
||||
def cancel_node_jobs(self):
|
||||
@@ -135,8 +157,8 @@ class WorkflowDAG(SimpleDAG):
|
||||
|
||||
for node in failed_nodes:
|
||||
obj = node['node_object']
|
||||
if (len(self.get_dependencies(obj, 'failure_nodes')) +
|
||||
len(self.get_dependencies(obj, 'always_nodes'))) == 0:
|
||||
if (len(self.get_children(obj, 'failure_nodes')) +
|
||||
len(self.get_children(obj, 'always_nodes'))) == 0:
|
||||
if obj.unified_job_template is None:
|
||||
res = True
|
||||
failed_unified_job_template_node_ids.append(str(obj.id))
|
||||
@@ -190,35 +212,48 @@ class WorkflowDAG(SimpleDAG):
|
||||
pass
|
||||
elif p.job:
|
||||
if p.job.status == 'successful':
|
||||
if node in (self.get_dependencies(p, 'success_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
if node in (self.get_children(p, 'success_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
elif p.job.status in ['failed', 'error', 'canceled']:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
if node in (self.get_children(p, 'failure_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
elif p.do_not_run is False and p.unified_job_template is None:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
elif not p.do_not_run and p.unified_job_template is None:
|
||||
if node in (self.get_children(p, 'failure_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
r'''
|
||||
determine if the current node is a convergence node by checking if all the
|
||||
parents are finished then checking to see if all parents meet the needed
|
||||
path criteria to run the convergence child.
|
||||
(i.e. parent must fail, parent must succeed, etc. to proceed)
|
||||
|
||||
Return a list object
|
||||
'''
|
||||
def mark_dnr_nodes(self):
|
||||
root_nodes = self.get_root_nodes()
|
||||
nodes_marked_do_not_run = []
|
||||
|
||||
for node in self.sort_nodes_topological():
|
||||
obj = node['node_object']
|
||||
|
||||
if obj.do_not_run is False and not obj.job and node not in root_nodes:
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
if self._are_all_nodes_dnr_decided(parent_nodes):
|
||||
if self._should_mark_node_dnr(node, parent_nodes):
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
if not obj.do_not_run and not obj.job and node not in root_nodes:
|
||||
if obj.all_parents_must_converge:
|
||||
if any(p.do_not_run for p in parent_nodes) or not self._all_parents_met_convergence_criteria(node):
|
||||
obj.do_not_run = True
|
||||
nodes_marked_do_not_run.append(node)
|
||||
else:
|
||||
if self._are_all_nodes_dnr_decided(parent_nodes):
|
||||
if self._should_mark_node_dnr(node, parent_nodes):
|
||||
obj.do_not_run = True
|
||||
nodes_marked_do_not_run.append(node)
|
||||
|
||||
return [n['node_object'] for n in nodes_marked_do_not_run]
|
||||
|
||||
@@ -23,6 +23,7 @@ from awx.main.models import (
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
SystemJob,
|
||||
UnifiedJob,
|
||||
WorkflowApproval,
|
||||
WorkflowJob,
|
||||
WorkflowJobTemplate
|
||||
@@ -74,21 +75,6 @@ class TaskManager():
|
||||
key=lambda task: task.created)
|
||||
return all_tasks
|
||||
|
||||
|
||||
def get_latest_project_update_tasks(self, all_sorted_tasks):
|
||||
project_ids = set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
project_ids.add(task.project_id)
|
||||
return ProjectUpdate.objects.filter(id__in=project_ids)
|
||||
|
||||
def get_latest_inventory_update_tasks(self, all_sorted_tasks):
|
||||
inventory_ids = set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return InventoryUpdate.objects.filter(id__in=inventory_ids)
|
||||
|
||||
def get_running_workflow_jobs(self):
|
||||
graph_workflow_jobs = [wf for wf in
|
||||
WorkflowJob.objects.filter(status='running')]
|
||||
@@ -200,9 +186,6 @@ class TaskManager():
|
||||
schedule_task_manager()
|
||||
return result
|
||||
|
||||
def get_dependent_jobs_for_inv_and_proj_update(self, job_obj):
|
||||
return [{'type': j.model_to_str(), 'id': j.id} for j in job_obj.dependent_jobs.all()]
|
||||
|
||||
def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
|
||||
from awx.main.tasks import handle_work_error, handle_work_success
|
||||
|
||||
@@ -364,10 +347,6 @@ class TaskManager():
|
||||
def should_update_inventory_source(self, job, latest_inventory_update):
|
||||
now = tz_now()
|
||||
|
||||
# Already processed dependencies for this job
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_inventory_update is None:
|
||||
return True
|
||||
'''
|
||||
@@ -393,8 +372,6 @@ class TaskManager():
|
||||
|
||||
def should_update_related_project(self, job, latest_project_update):
|
||||
now = tz_now()
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_project_update is None:
|
||||
return True
|
||||
@@ -426,18 +403,21 @@ class TaskManager():
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_dependencies(self, task):
|
||||
dependencies = []
|
||||
if type(task) is Job:
|
||||
def generate_dependencies(self, undeped_tasks):
|
||||
created_dependencies = []
|
||||
for task in undeped_tasks:
|
||||
dependencies = []
|
||||
if not type(task) is Job:
|
||||
continue
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
||||
latest_project_update = self.get_latest_project_update(task)
|
||||
if self.should_update_related_project(task, latest_project_update):
|
||||
project_task = self.create_project_update(task)
|
||||
created_dependencies.append(project_task)
|
||||
dependencies.append(project_task)
|
||||
else:
|
||||
if latest_project_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_project_update)
|
||||
dependencies.append(latest_project_update)
|
||||
|
||||
# Inventory created 2 seconds behind job
|
||||
try:
|
||||
@@ -452,56 +432,20 @@ class TaskManager():
|
||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
created_dependencies.append(inventory_task)
|
||||
dependencies.append(inventory_task)
|
||||
else:
|
||||
if latest_inventory_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_inventory_update)
|
||||
dependencies.append(latest_inventory_update)
|
||||
|
||||
if len(dependencies) > 0:
|
||||
self.capture_chain_failure_dependencies(task, dependencies)
|
||||
return dependencies
|
||||
|
||||
def process_dependencies(self, dependent_task, dependency_tasks):
|
||||
for task in dependency_tasks:
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("Dependent {} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
idle_instance_that_fits = None
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if idle_instance_that_fits is None:
|
||||
idle_instance_that_fits = rampart_group.find_largest_idle_instance()
|
||||
if not rampart_group.is_containerized and self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug("Skipping group {} capacity <= 0".format(rampart_group.name))
|
||||
continue
|
||||
|
||||
execution_instance = rampart_group.fit_task_to_most_remaining_capacity_instance(task)
|
||||
if execution_instance:
|
||||
logger.debug("Starting dependent {} in group {} instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
elif not execution_instance and idle_instance_that_fits:
|
||||
if not rampart_group.is_containerized:
|
||||
execution_instance = idle_instance_that_fits
|
||||
logger.debug("Starting dependent {} in group {} on idle instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
if execution_instance or rampart_group.is_containerized:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
tasks_to_fail = [t for t in dependency_tasks if t != task]
|
||||
tasks_to_fail += [dependent_task]
|
||||
self.start_task(task, rampart_group, tasks_to_fail, execution_instance)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
logger.debug("No instance available in group {} to run job {} w/ capacity requirement {}".format(
|
||||
rampart_group.name, task.log_format, task.task_impact))
|
||||
if not found_acceptable_queue:
|
||||
logger.debug("Dependent {} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
UnifiedJob.objects.filter(pk__in = [task.pk for task in undeped_tasks]).update(dependencies_processed=True)
|
||||
return created_dependencies
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
for task in pending_tasks:
|
||||
self.process_dependencies(task, self.generate_dependencies(task))
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("{} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
@@ -574,13 +518,6 @@ class TaskManager():
|
||||
def calculate_capacity_consumed(self, tasks):
|
||||
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
|
||||
|
||||
def would_exceed_capacity(self, task, instance_group):
|
||||
current_capacity = self.graph[instance_group]['consumed_capacity']
|
||||
capacity_total = self.graph[instance_group]['capacity_total']
|
||||
if current_capacity == 0:
|
||||
return False
|
||||
return (task.task_impact + current_capacity > capacity_total)
|
||||
|
||||
def consume_capacity(self, task, instance_group):
|
||||
logger.debug('{} consumed {} capacity units from {} with prior total of {}'.format(
|
||||
task.log_format, task.task_impact, instance_group,
|
||||
@@ -598,6 +535,9 @@ class TaskManager():
|
||||
self.process_running_tasks(running_tasks)
|
||||
|
||||
pending_tasks = [t for t in all_sorted_tasks if t.status == 'pending']
|
||||
undeped_tasks = [t for t in pending_tasks if not t.dependencies_processed]
|
||||
dependencies = self.generate_dependencies(undeped_tasks)
|
||||
self.process_pending_tasks(dependencies)
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
|
||||
def _schedule(self):
|
||||
|
||||
@@ -10,6 +10,7 @@ import pkg_resources
|
||||
import sys
|
||||
|
||||
# Django
|
||||
from django.db import connection
|
||||
from django.conf import settings
|
||||
from django.db.models.signals import (
|
||||
pre_save,
|
||||
@@ -71,41 +72,6 @@ def get_current_user_or_none():
|
||||
return u
|
||||
|
||||
|
||||
def emit_update_inventory_computed_fields(sender, **kwargs):
|
||||
logger.debug("In update inventory computed fields")
|
||||
if getattr(_inventory_updates, 'is_updating', False):
|
||||
return
|
||||
instance = kwargs['instance']
|
||||
if sender == Group.hosts.through:
|
||||
sender_name = 'group.hosts'
|
||||
elif sender == Group.parents.through:
|
||||
sender_name = 'group.parents'
|
||||
elif sender == Host.inventory_sources.through:
|
||||
sender_name = 'host.inventory_sources'
|
||||
elif sender == Group.inventory_sources.through:
|
||||
sender_name = 'group.inventory_sources'
|
||||
else:
|
||||
sender_name = str(sender._meta.verbose_name)
|
||||
if kwargs['signal'] == post_save:
|
||||
if sender == Job:
|
||||
return
|
||||
sender_action = 'saved'
|
||||
elif kwargs['signal'] == post_delete:
|
||||
sender_action = 'deleted'
|
||||
elif kwargs['signal'] == m2m_changed and kwargs['action'] in ('post_add', 'post_remove', 'post_clear'):
|
||||
sender_action = 'changed'
|
||||
else:
|
||||
return
|
||||
logger.debug('%s %s, updating inventory computed fields: %r %r',
|
||||
sender_name, sender_action, sender, kwargs)
|
||||
try:
|
||||
inventory = instance.inventory
|
||||
except Inventory.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
|
||||
|
||||
def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
|
||||
if getattr(_inventory_updates, 'is_updating', False):
|
||||
return
|
||||
@@ -124,7 +90,9 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
|
||||
pass
|
||||
else:
|
||||
if inventory is not None:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
connection.on_commit(
|
||||
lambda: update_inventory_computed_fields.delay(inventory.id)
|
||||
)
|
||||
|
||||
|
||||
def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwargs):
|
||||
@@ -207,10 +175,6 @@ def connect_computed_field_signals():
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.parents.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Job)
|
||||
@@ -347,10 +311,6 @@ def disable_computed_fields():
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.parents.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Job)
|
||||
|
||||
@@ -52,6 +52,7 @@ import ansible_runner
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, GALAXY_SERVER_FIELDS
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.models import (
|
||||
Schedule, TowerScheduleState, Instance, InstanceGroup,
|
||||
UnifiedJob, Notification,
|
||||
@@ -337,17 +338,31 @@ def send_notifications(notification_list, job_id=None):
|
||||
|
||||
@task()
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
if not settings.INSIGHTS_TRACKING_STATE:
|
||||
return
|
||||
try:
|
||||
tgz = analytics.gather()
|
||||
if not tgz:
|
||||
return
|
||||
logger.debug('gathered analytics: {}'.format(tgz))
|
||||
analytics.ship(tgz)
|
||||
finally:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
|
||||
if last_gather:
|
||||
last_time = DateTimeField().to_internal_value(last_gather.value)
|
||||
else:
|
||||
last_time = None
|
||||
gather_time = now()
|
||||
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
with advisory_lock('gather_analytics_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
logger.debug('Not gathering analytics, another task holds lock')
|
||||
return
|
||||
try:
|
||||
tgz = analytics.gather()
|
||||
if not tgz:
|
||||
return
|
||||
logger.info('gathered analytics: {}'.format(tgz))
|
||||
analytics.ship(tgz)
|
||||
settings.AUTOMATION_ANALYTICS_LAST_GATHER = gather_time
|
||||
finally:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@@ -499,7 +514,7 @@ def awx_periodic_scheduler():
|
||||
|
||||
invalid_license = False
|
||||
try:
|
||||
access_registry[Job](None).check_license()
|
||||
access_registry[Job](None).check_license(quiet=True)
|
||||
except PermissionDenied as e:
|
||||
invalid_license = e
|
||||
|
||||
@@ -588,7 +603,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
|
||||
|
||||
@task()
|
||||
def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
'''
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
prevent unnecessary recursive calls.
|
||||
@@ -599,7 +614,7 @@ def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
return
|
||||
i = i[0]
|
||||
try:
|
||||
i.update_computed_fields(update_hosts=should_update_hosts)
|
||||
i.update_computed_fields()
|
||||
except DatabaseError as e:
|
||||
if 'did not affect any rows' in str(e):
|
||||
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
|
||||
@@ -642,7 +657,7 @@ def update_host_smart_inventory_memberships():
|
||||
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
|
||||
# Update computed fields for changed inventories outside atomic action
|
||||
for smart_inventory in changed_inventories:
|
||||
smart_inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task()
|
||||
@@ -1130,6 +1145,23 @@ class BaseTask(object):
|
||||
else:
|
||||
event_data['host_name'] = ''
|
||||
event_data['host_id'] = ''
|
||||
|
||||
if isinstance(self, RunProjectUpdate):
|
||||
# it's common for Ansible's SCM modules to print
|
||||
# error messages on failure that contain the plaintext
|
||||
# basic auth credentials (username + password)
|
||||
# it's also common for the nested event data itself (['res']['...'])
|
||||
# to contain unredacted text on failure
|
||||
# this is a _little_ expensive to filter
|
||||
# with regex, but project updates don't have many events,
|
||||
# so it *should* have a negligible performance impact
|
||||
try:
|
||||
event_data_json = json.dumps(event_data)
|
||||
event_data_json = UriCleaner.remove_sensitive(event_data_json)
|
||||
event_data = json.loads(event_data_json)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
should_write_event = False
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
@@ -1656,8 +1688,12 @@ class RunJob(BaseTask):
|
||||
args.append('--vault-id')
|
||||
args.append('{}@prompt'.format(vault_id))
|
||||
|
||||
if job.forks: # FIXME: Max limit?
|
||||
args.append('--forks=%d' % job.forks)
|
||||
if job.forks:
|
||||
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
|
||||
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
|
||||
args.append('--forks=%d' % settings.MAX_FORKS)
|
||||
else:
|
||||
args.append('--forks=%d' % job.forks)
|
||||
if job.force_handlers:
|
||||
args.append('--force-handlers')
|
||||
if job.limit:
|
||||
@@ -1868,7 +1904,7 @@ class RunJob(BaseTask):
|
||||
except Inventory.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task()
|
||||
@@ -1977,8 +2013,9 @@ class RunProjectUpdate(BaseTask):
|
||||
continue
|
||||
env_key = ('ANSIBLE_GALAXY_SERVER_{}_{}'.format(server.get('id', 'unnamed'), key)).upper()
|
||||
env[env_key] = server[key]
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
if galaxy_servers:
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
return env
|
||||
|
||||
def _build_scm_url_extra_vars(self, project_update):
|
||||
@@ -2851,4 +2888,4 @@ def deep_copy_model_obj(
|
||||
), permission_check_func[2])
|
||||
permission_check_func(creater, copy_mapping.values())
|
||||
if isinstance(new_obj, Inventory):
|
||||
update_inventory_computed_fields.delay(new_obj.id, True)
|
||||
update_inventory_computed_fields.delay(new_obj.id)
|
||||
|
||||
@@ -10,6 +10,8 @@ group_patterns = foo_group_patterns
|
||||
want_facts = True
|
||||
want_hostcollections = True
|
||||
group_prefix = foo_group_prefix
|
||||
want_ansible_ssh_host = True
|
||||
rich_params = True
|
||||
|
||||
[cache]
|
||||
path = /tmp
|
||||
|
||||
@@ -2,6 +2,9 @@ from django.db import connection
|
||||
from django.db.models.signals import post_migrate
|
||||
from django.apps import apps
|
||||
from django.conf import settings
|
||||
from unittest import mock
|
||||
|
||||
import contextlib
|
||||
|
||||
|
||||
def app_post_migration(sender, app_config, **kwargs):
|
||||
@@ -23,3 +26,13 @@ if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
|
||||
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def immediate_on_commit():
|
||||
"""
|
||||
Context manager executing transaction.on_commit() hooks immediately as
|
||||
if the connection was in auto-commit mode.
|
||||
"""
|
||||
def on_commit(func):
|
||||
func()
|
||||
with mock.patch('django.db.connection.on_commit', side_effect=on_commit) as patch:
|
||||
yield patch
|
||||
|
||||
@@ -153,7 +153,8 @@ def test_summary_fields_recent_jobs(job_template, admin_user, get):
|
||||
'id': job.id,
|
||||
'status': 'failed',
|
||||
'finished': job.finished,
|
||||
'type': 'job'
|
||||
'canceled_on': None,
|
||||
'type': 'job'
|
||||
} for job in jobs[-10:][::-1]]
|
||||
|
||||
|
||||
|
||||
@@ -264,18 +264,6 @@ def test_job_launch_fails_without_credential_access(job_template_prompts, runtim
|
||||
dict(credentials=runtime_data['credentials']), rando, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_job_block_scan_job_type_change(job_template_prompts, post, admin_user):
|
||||
job_template = job_template_prompts(True)
|
||||
|
||||
# Assure that changing the type of a scan job blocks the launch
|
||||
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
|
||||
dict(job_type='scan'), admin_user, expect=400)
|
||||
|
||||
assert 'job_type' in response.data
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_launch_JT_with_validation(machine_credential, credential, deploy_jobtemplate):
|
||||
deploy_jobtemplate.extra_vars = '{"job_template_var": 3}'
|
||||
|
||||
@@ -118,6 +118,22 @@ def test_extra_credential_unique_type_xfail(get, post, organization_factory, job
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_with_forks_exceeding_maximum_xfail(alice, post, project, inventory, settings):
|
||||
project.use_role.members.add(alice)
|
||||
inventory.use_role.members.add(alice)
|
||||
settings.MAX_FORKS = 10
|
||||
response = post(reverse('api:job_template_list'), {
|
||||
'name': 'Some name',
|
||||
'project': project.id,
|
||||
'inventory': inventory.id,
|
||||
'playbook': 'helloworld.yml',
|
||||
'forks': 11,
|
||||
}, alice)
|
||||
assert response.status_code == 400
|
||||
assert 'Maximum number of forks (10) exceeded' in str(response.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_attach_extra_credential(get, post, organization_factory, job_template_factory, credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import pytest
|
||||
import base64
|
||||
import contextlib
|
||||
import json
|
||||
from unittest import mock
|
||||
|
||||
from django.db import connection
|
||||
from django.test.utils import override_settings
|
||||
@@ -12,22 +10,11 @@ from awx.main.utils.encryption import decrypt_value, get_encryption_key
|
||||
from awx.api.versioning import reverse, drf_reverse
|
||||
from awx.main.models.oauth import (OAuth2Application as Application,
|
||||
OAuth2AccessToken as AccessToken)
|
||||
from awx.main.tests.functional import immediate_on_commit
|
||||
from awx.sso.models import UserEnterpriseAuth
|
||||
from oauth2_provider.models import RefreshToken
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def immediate_on_commit():
|
||||
"""
|
||||
Context manager executing transaction.on_commit() hooks immediately as
|
||||
if the connection was in auto-commit mode.
|
||||
"""
|
||||
def on_commit(func):
|
||||
func()
|
||||
with mock.patch('django.db.connection.on_commit', side_effect=on_commit) as patch:
|
||||
yield patch
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_personal_access_token_creation(oauth_application, post, alice):
|
||||
url = drf_reverse('api:oauth_authorization_root_view') + 'token/'
|
||||
|
||||
@@ -125,9 +125,9 @@ def project_playbooks():
|
||||
@pytest.fixture
|
||||
def run_computed_fields_right_away(request):
|
||||
|
||||
def run_me(inventory_id, should_update_hosts=True):
|
||||
def run_me(inventory_id):
|
||||
i = Inventory.objects.get(id=inventory_id)
|
||||
i.update_computed_fields(update_hosts=should_update_hosts)
|
||||
i.update_computed_fields()
|
||||
|
||||
mocked = mock.patch(
|
||||
'awx.main.signals.update_inventory_computed_fields.delay',
|
||||
|
||||
@@ -11,6 +11,7 @@ from awx.main.signals import (
|
||||
# AWX models
|
||||
from awx.main.models.organization import Organization
|
||||
from awx.main.models import ActivityStream, Job
|
||||
from awx.main.tests.functional import immediate_on_commit
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -34,9 +35,10 @@ class TestComputedFields:
|
||||
|
||||
def test_computed_fields_normal_use(self, mocker, inventory):
|
||||
job = Job.objects.create(name='fake-job', inventory=inventory)
|
||||
with mocker.patch.object(update_inventory_computed_fields, 'delay'):
|
||||
job.delete()
|
||||
update_inventory_computed_fields.delay.assert_called_once_with(inventory.id, True)
|
||||
with immediate_on_commit():
|
||||
with mocker.patch.object(update_inventory_computed_fields, 'delay'):
|
||||
job.delete()
|
||||
update_inventory_computed_fields.delay.assert_called_once_with(inventory.id)
|
||||
|
||||
def test_disable_computed_fields(self, mocker, inventory):
|
||||
job = Job.objects.create(name='fake-job', inventory=inventory)
|
||||
|
||||
@@ -48,7 +48,6 @@ class TestJobNotificationMixin(object):
|
||||
'username': str},
|
||||
'instance_group': {'id': int, 'name': str},
|
||||
'inventory': {'description': str,
|
||||
'groups_with_active_failures': int,
|
||||
'has_active_failures': bool,
|
||||
'has_inventory_sources': bool,
|
||||
'hosts_with_active_failures': int,
|
||||
|
||||
@@ -283,13 +283,13 @@ class TestTaskImpact:
|
||||
|
||||
def test_limit_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
||||
job = job_host_limit(5, 2)
|
||||
job.inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory
|
||||
job.inventory.update_computed_fields()
|
||||
assert job.inventory.total_hosts == 5
|
||||
assert job.task_impact == 2 + 1 # forks becomes constraint
|
||||
|
||||
def test_host_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
||||
job = job_host_limit(3, 5)
|
||||
job.inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory
|
||||
job.inventory.update_computed_fields()
|
||||
assert job.task_impact == 3 + 1 # hosts becomes constraint
|
||||
|
||||
def test_shard_task_impact(self, slice_job_factory, run_computed_fields_right_away):
|
||||
@@ -304,6 +304,7 @@ class TestTaskImpact:
|
||||
len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts'])
|
||||
for i in range(3)
|
||||
] == [1, 1, 1]
|
||||
jobs[0].inventory.update_computed_fields()
|
||||
assert [job.task_impact for job in jobs] == [2, 2, 2] # plus one base task impact
|
||||
# Uneven distribution - first job takes the extra host
|
||||
jobs[0].inventory.hosts.create(name='remainder_foo')
|
||||
@@ -311,5 +312,5 @@ class TestTaskImpact:
|
||||
len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts'])
|
||||
for i in range(3)
|
||||
] == [2, 1, 1]
|
||||
jobs[0].inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory
|
||||
jobs[0].inventory.update_computed_fields()
|
||||
assert [job.task_impact for job in jobs] == [3, 2, 2]
|
||||
|
||||
@@ -67,7 +67,7 @@ def test_multi_group_with_shared_dependency(instance_factory, default_instance_g
|
||||
pu = p.project_updates.first()
|
||||
TaskManager.start_task.assert_called_once_with(pu,
|
||||
default_instance_group,
|
||||
[j1],
|
||||
[j1,j2],
|
||||
default_instance_group.instances.all()[0])
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.status = "successful"
|
||||
@@ -193,7 +193,7 @@ def test_instance_group_basic_policies(instance_factory, instance_group_factory)
|
||||
ig2 = InstanceGroup.objects.get(id=ig2.id)
|
||||
ig3 = InstanceGroup.objects.get(id=ig3.id)
|
||||
assert len(ig0.instances.all()) == 1
|
||||
assert i0 in ig0.instances.all()
|
||||
assert i0 in ig0.instances.all()
|
||||
assert len(InstanceGroup.objects.get(id=ig1.id).instances.all()) == 2
|
||||
assert i1 in ig1.instances.all()
|
||||
assert i2 in ig1.instances.all()
|
||||
|
||||
@@ -6,7 +6,7 @@ from datetime import timedelta
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.main.models import WorkflowJobTemplate, JobTemplate
|
||||
from awx.main.models import WorkflowJobTemplate, JobTemplate, Job
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -307,8 +307,8 @@ def test_shared_dependencies_launch(default_instance_group, job_template_factory
|
||||
TaskManager().schedule()
|
||||
pu = p.project_updates.first()
|
||||
iu = ii.inventory_updates.first()
|
||||
TaskManager.start_task.assert_has_calls([mock.call(pu, default_instance_group, [iu, j1], instance),
|
||||
mock.call(iu, default_instance_group, [pu, j1], instance)])
|
||||
TaskManager.start_task.assert_has_calls([mock.call(iu, default_instance_group, [j1, j2, pu], instance),
|
||||
mock.call(pu, default_instance_group, [j1, j2, iu], instance)])
|
||||
pu.status = "successful"
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.save()
|
||||
@@ -383,3 +383,35 @@ def test_job_not_blocking_inventory_update(default_instance_group, job_template_
|
||||
dependency_graph = DependencyGraph(None)
|
||||
dependency_graph.add_job(job)
|
||||
assert not dependency_graph.is_job_blocked(inventory_update)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_generate_dependencies_only_once(job_template_factory):
|
||||
objects = job_template_factory('jt', organization='org1')
|
||||
|
||||
job = objects.job_template.create_job()
|
||||
job.status = "pending"
|
||||
job.name = "job_gen_dep"
|
||||
job.save()
|
||||
|
||||
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
# job starts with dependencies_processed as False
|
||||
assert not job.dependencies_processed
|
||||
# run one cycle of ._schedule() to generate dependencies
|
||||
TaskManager()._schedule()
|
||||
|
||||
# make sure dependencies_processed is now True
|
||||
job = Job.objects.filter(name="job_gen_dep")[0]
|
||||
assert job.dependencies_processed
|
||||
|
||||
# Run ._schedule() again, but make sure .generate_dependencies() is not
|
||||
# called with job in the argument list
|
||||
tm = TaskManager()
|
||||
tm.generate_dependencies = mock.MagicMock()
|
||||
tm._schedule()
|
||||
|
||||
# .call_args is tuple, (positional_args, kwargs), [0][0] then is
|
||||
# the first positional arg, i.e. the first argument of
|
||||
# .generate_dependencies()
|
||||
assert tm.generate_dependencies.call_args[0][0] == []
|
||||
|
||||
6
awx/main/tests/functional/test_credential_plugins.py
Normal file
6
awx/main/tests/functional/test_credential_plugins.py
Normal file
@@ -0,0 +1,6 @@
|
||||
def test_imported_azure_cloud_sdk_vars():
|
||||
from awx.main.credential_plugins import azure_kv
|
||||
assert len(azure_kv.clouds) > 0
|
||||
assert all([hasattr(c, 'name') for c in azure_kv.clouds])
|
||||
assert all([hasattr(c, 'suffixes') for c in azure_kv.clouds])
|
||||
assert all([hasattr(c.suffixes, 'keyvault_dns') for c in azure_kv.clouds])
|
||||
@@ -60,7 +60,11 @@ INI_TEST_VARS = {
|
||||
'satellite6': {
|
||||
'satellite6_group_patterns': 'foo_group_patterns',
|
||||
'satellite6_group_prefix': 'foo_group_prefix',
|
||||
'satellite6_want_hostcollections': True
|
||||
'satellite6_want_hostcollections': True,
|
||||
'satellite6_want_ansible_ssh_host': True,
|
||||
'satellite6_rich_params': True,
|
||||
'satellite6_want_facts': True
|
||||
|
||||
},
|
||||
'cloudforms': {
|
||||
'version': '2.4',
|
||||
|
||||
@@ -57,7 +57,7 @@ def test_empty_in(empty_value):
|
||||
@pytest.mark.parametrize(u"valid_value", [u'foo', u'foo,'])
|
||||
def test_valid_in(valid_value):
|
||||
field_lookup = FieldLookupBackend()
|
||||
value, new_lookup = field_lookup.value_to_python(JobTemplate, 'project__name__in', valid_value)
|
||||
value, new_lookup, _ = field_lookup.value_to_python(JobTemplate, 'project__name__in', valid_value)
|
||||
assert 'foo' in value
|
||||
|
||||
|
||||
|
||||
@@ -171,6 +171,7 @@ class TestWorkflowJobCreate:
|
||||
with mocker.patch('awx.main.models.WorkflowJobNode.objects.create', mock_create):
|
||||
wfjt_node_no_prompts.create_workflow_job_node(workflow_job=workflow_job_unit)
|
||||
mock_create.assert_called_once_with(
|
||||
all_parents_must_converge=False,
|
||||
extra_data={},
|
||||
survey_passwords={},
|
||||
char_prompts=wfjt_node_no_prompts.char_prompts,
|
||||
@@ -185,6 +186,7 @@ class TestWorkflowJobCreate:
|
||||
workflow_job=workflow_job_unit
|
||||
)
|
||||
mock_create.assert_called_once_with(
|
||||
all_parents_must_converge=False,
|
||||
extra_data={},
|
||||
survey_passwords={},
|
||||
char_prompts=wfjt_node_with_prompts.char_prompts,
|
||||
|
||||
@@ -19,6 +19,7 @@ class WorkflowNode(object):
|
||||
self.job = job
|
||||
self.do_not_run = do_not_run
|
||||
self.unified_job_template = unified_job_template
|
||||
self.all_parents_must_converge = False
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -94,7 +95,7 @@ class TestDNR():
|
||||
(g, nodes) = workflow_dag_1
|
||||
|
||||
r'''
|
||||
S0
|
||||
0
|
||||
/\
|
||||
S / \
|
||||
/ \
|
||||
@@ -113,7 +114,7 @@ class TestDNR():
|
||||
assert 0 == len(do_not_run_nodes)
|
||||
|
||||
r'''
|
||||
S0
|
||||
0
|
||||
/\
|
||||
S / \
|
||||
/ \
|
||||
@@ -132,6 +133,259 @@ class TestDNR():
|
||||
assert 1 == len(do_not_run_nodes)
|
||||
assert nodes[3] == do_not_run_nodes[0]
|
||||
|
||||
class TestAllWorkflowNodes():
|
||||
# test workflow convergence is functioning as expected
|
||||
@pytest.fixture
|
||||
def simple_all_convergence(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(4)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
|
||||
r'''
|
||||
0
|
||||
/\
|
||||
S / \ S
|
||||
/ \
|
||||
1 2
|
||||
\ /
|
||||
F \ / S
|
||||
\/
|
||||
3
|
||||
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[1], "success_nodes")
|
||||
g.add_edge(nodes[0], nodes[2], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "success_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='failed')
|
||||
nodes[2].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_simple_all_convergence(self, simple_all_convergence):
|
||||
(g, nodes) = simple_all_convergence
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "no nodes should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "Node 3, and only node 3, should be chosen to run"
|
||||
assert nodes[3] == nodes_to_run[0], "Only node 3 should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_1(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(3)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0
|
||||
|\ F
|
||||
| \
|
||||
S| 1
|
||||
| /
|
||||
|/ A
|
||||
2
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[1], "failure_nodes")
|
||||
g.add_edge(nodes[0], nodes[2], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[2], "always_nodes")
|
||||
nodes[2].all_parents_must_converge = True
|
||||
nodes[0].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_all_converge_edge_case_1(self, workflow_all_converge_1):
|
||||
(g, nodes) = workflow_all_converge_1
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 2 == len(dnr_nodes), "node[1] and node[2] should be marked DNR"
|
||||
assert nodes[1] == dnr_nodes[0], "Node 1 should be marked DNR"
|
||||
assert nodes[2] == dnr_nodes[1], "Node 2 should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "No nodes should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_2(self, wf_node_generator):
|
||||
"""The ordering of _1 and this test, _2, is _slightly_ different.
|
||||
The hope is that topological sorting results in 2 being processed before 3
|
||||
and/or 3 before 2.
|
||||
"""
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(3)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0
|
||||
|\ S
|
||||
| \
|
||||
F| 1
|
||||
| /
|
||||
|/ A
|
||||
2
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[1], "success_nodes")
|
||||
g.add_edge(nodes[0], nodes[2], "failure_nodes")
|
||||
g.add_edge(nodes[1], nodes[2], "always_nodes")
|
||||
nodes[2].all_parents_must_converge = True
|
||||
nodes[0].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_all_converge_edge_case_2(self, workflow_all_converge_2):
|
||||
(g, nodes) = workflow_all_converge_2
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 1 == len(dnr_nodes), "1 and only 1 node should be marked DNR"
|
||||
assert nodes[2] == dnr_nodes[0], "Node 3 should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "Node 2, and only node 2, should be chosen to run"
|
||||
assert nodes[1] == nodes_to_run[0], "Only node 2 should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_will_run(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(4)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0 1 2
|
||||
S \ F | / S
|
||||
\ | /
|
||||
\ | /
|
||||
\|/
|
||||
|
|
||||
3
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "success_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='failed')
|
||||
nodes[2].job = Job(status='running')
|
||||
return (g, nodes)
|
||||
|
||||
def test_workflow_all_converge_will_run(self, workflow_all_converge_will_run):
|
||||
(g, nodes) = workflow_all_converge_will_run
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "No nodes should get marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "No nodes should run yet"
|
||||
|
||||
nodes[2].job.status = 'successful'
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "1 and only 1 node should want to run"
|
||||
assert nodes[3] == nodes_to_run[0], "Convergence node should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_dnr(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(4)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0 1 2
|
||||
S \ F | / F
|
||||
\ | /
|
||||
\ | /
|
||||
\|/
|
||||
|
|
||||
3
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "failure_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='running')
|
||||
nodes[2].job = Job(status='failed')
|
||||
return (g, nodes)
|
||||
|
||||
def test_workflow_all_converge_while_parent_runs(self, workflow_all_converge_dnr):
|
||||
(g, nodes) = workflow_all_converge_dnr
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "No nodes should get marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "No nodes should run yet"
|
||||
|
||||
def test_workflow_all_converge_with_incorrect_parent(self, workflow_all_converge_dnr):
|
||||
# Another tick of the scheduler
|
||||
(g, nodes) = workflow_all_converge_dnr
|
||||
nodes[1].job.status = 'successful'
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 1 == len(dnr_nodes), "1 and only 1 node should be marked DNR"
|
||||
assert nodes[3] == dnr_nodes[0], "Convergence node should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "Convergence node should NOT be chosen to run because it is DNR"
|
||||
|
||||
def test_workflow_all_converge_runs(self, workflow_all_converge_dnr):
|
||||
# Trick the scheduler again to make sure the convergence node acutally runs
|
||||
(g, nodes) = workflow_all_converge_dnr
|
||||
nodes[1].job.status = 'failed'
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "No nodes should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "Convergence node should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_deep_dnr_tree(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(7)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0 1 2
|
||||
\ | /
|
||||
S \ S| / F
|
||||
\ | /
|
||||
\|/
|
||||
|
|
||||
3
|
||||
/\
|
||||
S / \ S
|
||||
/ \
|
||||
4| | 5
|
||||
\ /
|
||||
S \ / S
|
||||
\/
|
||||
6
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[3], nodes[4], "success_nodes")
|
||||
g.add_edge(nodes[3], nodes[5], "success_nodes")
|
||||
g.add_edge(nodes[4], nodes[6], "success_nodes")
|
||||
g.add_edge(nodes[5], nodes[6], "success_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
nodes[4].all_parents_must_converge = True
|
||||
nodes[5].all_parents_must_converge = True
|
||||
nodes[6].all_parents_must_converge = True
|
||||
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='successful')
|
||||
nodes[2].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_workflow_all_converge_deep_dnr_tree(self, workflow_all_converge_deep_dnr_tree):
|
||||
(g, nodes) = workflow_all_converge_deep_dnr_tree
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
|
||||
assert 4 == len(dnr_nodes), "All nodes w/ no jobs should be marked DNR"
|
||||
assert nodes[3] in dnr_nodes
|
||||
assert nodes[4] in dnr_nodes
|
||||
assert nodes[5] in dnr_nodes
|
||||
assert nodes[6] in dnr_nodes
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "All non-run nodes should be DNR and NOT candidates to run"
|
||||
|
||||
|
||||
class TestIsWorkflowDone():
|
||||
@pytest.fixture
|
||||
|
||||
@@ -197,36 +197,6 @@ def test_change_jt_sensitive_data(job_template_with_ids, mocker, user_unit):
|
||||
})
|
||||
|
||||
|
||||
def test_jt_add_scan_job_check(job_template_with_ids, user_unit):
|
||||
"Assure that permissions to add scan jobs work correctly"
|
||||
|
||||
access = JobTemplateAccess(user_unit)
|
||||
project = job_template_with_ids.project
|
||||
inventory = job_template_with_ids.inventory
|
||||
project.use_role = Role()
|
||||
inventory.use_role = Role()
|
||||
organization = Organization(name='test-org')
|
||||
inventory.organization = organization
|
||||
organization.admin_role = Role()
|
||||
|
||||
def mock_get_object(Class, **kwargs):
|
||||
if Class == Project:
|
||||
return project
|
||||
elif Class == Inventory:
|
||||
return inventory
|
||||
else:
|
||||
raise Exception('Item requested has not been mocked')
|
||||
|
||||
|
||||
with mock.patch('awx.main.models.rbac.Role.__contains__', return_value=True):
|
||||
with mock.patch('awx.main.access.get_object_or_400', mock_get_object):
|
||||
assert access.can_add({
|
||||
'project': project.pk,
|
||||
'inventory': inventory.pk,
|
||||
'job_type': 'scan'
|
||||
})
|
||||
|
||||
|
||||
def mock_raise_none(self, add_host=False, feature=None, check_expiration=True):
|
||||
return None
|
||||
|
||||
|
||||
@@ -2146,7 +2146,14 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
inventory_update.source_vars = '{"satellite6_group_patterns": "[a,b,c]", "satellite6_group_prefix": "hey_", "satellite6_want_hostcollections": True}'
|
||||
inventory_update.source_vars = {
|
||||
'satellite6_group_patterns': '[a,b,c]',
|
||||
'satellite6_group_prefix': 'hey_',
|
||||
'satellite6_want_hostcollections': True,
|
||||
'satellite6_want_ansible_ssh_host': True,
|
||||
'satellite6_rich_params': True,
|
||||
'satellite6_want_facts': False
|
||||
}
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
@@ -2159,6 +2166,9 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert config.get('ansible', 'group_patterns') == '[a,b,c]'
|
||||
assert config.get('ansible', 'group_prefix') == 'hey_'
|
||||
assert config.get('ansible', 'want_hostcollections') == 'True'
|
||||
assert config.get('ansible', 'want_ansible_ssh_host') == 'True'
|
||||
assert config.get('ansible', 'rich_params') == 'True'
|
||||
assert config.get('ansible', 'want_facts') == 'False'
|
||||
|
||||
def test_cloudforms_source(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
|
||||
@@ -79,8 +79,8 @@ class mockHost:
|
||||
@mock.patch('awx.main.utils.filters.get_model', return_value=mockHost())
|
||||
class TestSmartFilterQueryFromString():
|
||||
@mock.patch(
|
||||
'awx.api.filters.get_field_from_path',
|
||||
lambda model, path: (model, path) # disable field filtering, because a__b isn't a real Host field
|
||||
'awx.api.filters.get_fields_from_path',
|
||||
lambda model, path: ([model], path) # disable field filtering, because a__b isn't a real Host field
|
||||
)
|
||||
@pytest.mark.parametrize("filter_string,q_expected", [
|
||||
('facts__facts__blank=""', Q(**{u"facts__facts__blank": u""})),
|
||||
|
||||
@@ -107,6 +107,17 @@ class LogstashFormatterBase(logging.Formatter):
|
||||
|
||||
class LogstashFormatter(LogstashFormatterBase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cluster_host_id = settings.CLUSTER_HOST_ID
|
||||
self.tower_uuid = None
|
||||
uuid = (
|
||||
getattr(settings, 'LOG_AGGREGATOR_TOWER_UUID', None) or
|
||||
getattr(settings, 'INSTALL_UUID', None)
|
||||
)
|
||||
if uuid:
|
||||
self.tower_uuid = uuid
|
||||
super(LogstashFormatter, self).__init__(*args, **kwargs)
|
||||
|
||||
def reformat_data_for_log(self, raw_data, kind=None):
|
||||
'''
|
||||
Process dictionaries from various contexts (job events, activity stream
|
||||
@@ -128,37 +139,6 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
data = json.loads(data)
|
||||
data_for_log = {}
|
||||
|
||||
def index_by_name(alist):
|
||||
"""Takes a list of dictionaries with `name` as a key in each dict
|
||||
and returns a dictionary indexed by those names"""
|
||||
adict = {}
|
||||
for item in alist:
|
||||
subdict = copy(item)
|
||||
if 'name' in subdict:
|
||||
name = subdict.get('name', None)
|
||||
elif 'path' in subdict:
|
||||
name = subdict.get('path', None)
|
||||
if name:
|
||||
# Logstash v2 can not accept '.' in a name
|
||||
name = name.replace('.', '_')
|
||||
adict[name] = subdict
|
||||
return adict
|
||||
|
||||
def convert_to_type(t, val):
|
||||
if t is float:
|
||||
val = val[:-1] if val.endswith('s') else val
|
||||
try:
|
||||
return float(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is int:
|
||||
try:
|
||||
return int(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is str:
|
||||
return val
|
||||
|
||||
if kind == 'job_events':
|
||||
job_event = raw_data['python_objects']['job_event']
|
||||
for field_object in job_event._meta.fields:
|
||||
@@ -198,6 +178,21 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
data_for_log['host_name'] = raw_data['host_name']
|
||||
data_for_log['job_id'] = raw_data['job_id']
|
||||
elif kind == 'performance':
|
||||
def convert_to_type(t, val):
|
||||
if t is float:
|
||||
val = val[:-1] if val.endswith('s') else val
|
||||
try:
|
||||
return float(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is int:
|
||||
try:
|
||||
return int(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is str:
|
||||
return val
|
||||
|
||||
request = raw_data['python_objects']['request']
|
||||
response = raw_data['python_objects']['response']
|
||||
|
||||
@@ -231,21 +226,8 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
log_kind = record.name[len('awx.analytics.'):]
|
||||
fields = self.reformat_data_for_log(fields, kind=log_kind)
|
||||
# General AWX metadata
|
||||
for log_name, setting_name in [
|
||||
('type', 'LOG_AGGREGATOR_TYPE'),
|
||||
('cluster_host_id', 'CLUSTER_HOST_ID'),
|
||||
('tower_uuid', 'LOG_AGGREGATOR_TOWER_UUID')]:
|
||||
if hasattr(settings, setting_name):
|
||||
fields[log_name] = getattr(settings, setting_name, None)
|
||||
elif log_name == 'type':
|
||||
fields[log_name] = 'other'
|
||||
|
||||
uuid = (
|
||||
getattr(settings, 'LOG_AGGREGATOR_TOWER_UUID', None) or
|
||||
getattr(settings, 'INSTALL_UUID', None)
|
||||
)
|
||||
if uuid:
|
||||
fields['tower_uuid'] = uuid
|
||||
fields['cluster_host_id'] = self.cluster_host_id
|
||||
fields['tower_uuid'] = self.tower_uuid
|
||||
return fields
|
||||
|
||||
def format(self, record):
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# Python
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
import requests
|
||||
import time
|
||||
import threading
|
||||
@@ -18,6 +19,7 @@ from django.conf import settings
|
||||
|
||||
# requests futures, a dependency used by these handlers
|
||||
from requests_futures.sessions import FuturesSession
|
||||
import cachetools
|
||||
|
||||
# AWX
|
||||
from awx.main.utils.formatters import LogstashFormatter
|
||||
@@ -273,6 +275,16 @@ HANDLER_MAPPING = {
|
||||
}
|
||||
|
||||
|
||||
TTLCache = cachetools.TTLCache
|
||||
|
||||
if 'py.test' in os.environ.get('_', ''):
|
||||
# don't cache settings in unit tests
|
||||
class TTLCache(TTLCache):
|
||||
|
||||
def __getitem__(self, item):
|
||||
raise KeyError()
|
||||
|
||||
|
||||
class AWXProxyHandler(logging.Handler):
|
||||
'''
|
||||
Handler specific to the AWX external logging feature
|
||||
@@ -316,6 +328,7 @@ class AWXProxyHandler(logging.Handler):
|
||||
def get_handler_class(self, protocol):
|
||||
return HANDLER_MAPPING.get(protocol, AWXNullHandler)
|
||||
|
||||
@cachetools.cached(cache=TTLCache(maxsize=1, ttl=3), key=lambda *args, **kw: 'get_handler')
|
||||
def get_handler(self, custom_settings=None, force_create=False):
|
||||
new_kwargs = {}
|
||||
use_settings = custom_settings or settings
|
||||
@@ -342,10 +355,14 @@ class AWXProxyHandler(logging.Handler):
|
||||
self._handler.setFormatter(self.formatter)
|
||||
return self._handler
|
||||
|
||||
@cachetools.cached(cache=TTLCache(maxsize=1, ttl=3), key=lambda *args, **kw: 'should_audit')
|
||||
def should_audit(self):
|
||||
return settings.LOG_AGGREGATOR_AUDIT
|
||||
|
||||
def emit(self, record):
|
||||
if AWXProxyHandler.thread_local.enabled:
|
||||
actual_handler = self.get_handler()
|
||||
if settings.LOG_AGGREGATOR_AUDIT:
|
||||
if self.should_audit():
|
||||
self.auditor.setLevel(settings.LOG_AGGREGATOR_LEVEL)
|
||||
self.auditor.emit(record)
|
||||
return actual_handler.emit(record)
|
||||
|
||||
@@ -366,6 +366,7 @@ class VMWareInventory(object):
|
||||
def _get_instances(self, inkwargs):
|
||||
''' Make API calls '''
|
||||
instances = []
|
||||
si = None
|
||||
try:
|
||||
si = SmartConnect(**inkwargs)
|
||||
except ssl.SSLError as connection_error:
|
||||
|
||||
@@ -5,7 +5,6 @@ import os
|
||||
import re # noqa
|
||||
import sys
|
||||
from datetime import timedelta
|
||||
from celery.schedules import crontab
|
||||
|
||||
# global settings
|
||||
from django.conf import global_settings
|
||||
@@ -435,13 +434,9 @@ CELERYBEAT_SCHEDULE = {
|
||||
'schedule': timedelta(seconds=60),
|
||||
'options': {'expires': 50,}
|
||||
},
|
||||
'purge_stdout_files': {
|
||||
'task': 'awx.main.tasks.purge_old_stdout_files',
|
||||
'schedule': timedelta(days=7)
|
||||
},
|
||||
'gather_analytics': {
|
||||
'task': 'awx.main.tasks.gather_analytics',
|
||||
'schedule': crontab(hour='*/6')
|
||||
'schedule': timedelta(minutes=5)
|
||||
},
|
||||
'task_manager': {
|
||||
'task': 'awx.main.scheduler.tasks.run_task_manager',
|
||||
@@ -455,7 +450,6 @@ CELERYBEAT_SCHEDULE = {
|
||||
},
|
||||
# 'isolated_heartbeat': set up at the end of production.py and development.py
|
||||
}
|
||||
AWX_INCONSISTENT_TASK_INTERVAL = 60 * 3
|
||||
|
||||
AWX_CELERY_QUEUES_STATIC = [
|
||||
CELERY_DEFAULT_QUEUE,
|
||||
@@ -665,6 +659,9 @@ PENDO_TRACKING_STATE = "off"
|
||||
# Note: This setting may be overridden by database settings.
|
||||
INSIGHTS_TRACKING_STATE = False
|
||||
|
||||
# Last gather date for Analytics
|
||||
AUTOMATION_ANALYTICS_LAST_GATHER = None
|
||||
AUTOMATION_ANALYTICS_INTERVAL = 14400
|
||||
|
||||
# Default list of modules allowed for ad hoc commands.
|
||||
# Note: This setting may be overridden by database settings.
|
||||
@@ -1142,8 +1139,7 @@ LOGGING = {
|
||||
'handlers': ['null']
|
||||
},
|
||||
'awx.main.commands.run_callback_receiver': {
|
||||
'handlers': ['callback_receiver'],
|
||||
'level': 'INFO' # in debug mode, includes full callback data
|
||||
'handlers': ['callback_receiver'], # level handled by dynamic_level_filter
|
||||
},
|
||||
'awx.main.dispatch': {
|
||||
'handlers': ['dispatcher'],
|
||||
@@ -1221,6 +1217,9 @@ AWX_REQUEST_PROFILE = False
|
||||
#
|
||||
AWX_REQUEST_PROFILE_WITH_DOT = False
|
||||
|
||||
# Allow profiling callback workers via SIGUSR1
|
||||
AWX_CALLBACK_PROFILE = False
|
||||
|
||||
# Delete temporary directories created to store playbook run-time
|
||||
AWX_CLEANUP_PATHS = True
|
||||
|
||||
|
||||
@@ -179,3 +179,4 @@ else:
|
||||
os.environ['SDB_NOTIFY_HOST'] = os.popen('ip route').read().split(' ')[2]
|
||||
|
||||
WEBSOCKET_ORIGIN_WHITELIST = ['https://localhost:8043', 'https://localhost:3000']
|
||||
AWX_CALLBACK_PROFILE = True
|
||||
|
||||
@@ -153,7 +153,10 @@ function TemplatesStrings (BaseString) {
|
||||
TIMED_OUT: t.s('APPROVAL TIMED OUT'),
|
||||
TIMEOUT: t.s('Timeout'),
|
||||
APPROVED: t.s('APPROVED'),
|
||||
DENIED: t.s('DENIED')
|
||||
DENIED: t.s('DENIED'),
|
||||
CONVERGENCE: t.s('Convergence'),
|
||||
ALL: t.s('All'),
|
||||
ANY: t.s('Any'),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -598,6 +598,11 @@ table, tbody {
|
||||
}
|
||||
|
||||
.List-staticColumnLayout--groups {
|
||||
display: grid;
|
||||
grid-template-columns: @at-space @at-space-5x auto;
|
||||
}
|
||||
|
||||
.List-staticColumnLayout--hostNestedGroups {
|
||||
display: grid;
|
||||
grid-template-columns: @at-space @at-space-5x @at-space-5x auto;
|
||||
}
|
||||
|
||||
@@ -58,6 +58,10 @@ export default ['i18n', function(i18n) {
|
||||
type: 'text',
|
||||
reset: 'ANSIBLE_FACT_CACHE_TIMEOUT',
|
||||
},
|
||||
MAX_FORKS: {
|
||||
type: 'text',
|
||||
reset: 'MAX_FORKS',
|
||||
},
|
||||
PROJECT_UPDATE_VVV: {
|
||||
type: 'toggleSwitch',
|
||||
},
|
||||
|
||||
@@ -79,6 +79,12 @@ export default ['i18n', function(i18n) {
|
||||
AUTOMATION_ANALYTICS_URL: {
|
||||
type: 'text',
|
||||
reset: 'AUTOMATION_ANALYTICS_URL',
|
||||
},
|
||||
AUTOMATION_ANALYTICS_GATHER_INTERVAL: {
|
||||
type: 'number',
|
||||
integer: true,
|
||||
min: 1800,
|
||||
reset: 'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
||||
}
|
||||
},
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ export default
|
||||
label: i18n._("Hosts")
|
||||
},
|
||||
{
|
||||
url: "/#/hosts?host_search=has_active_failures:true",
|
||||
url: "/#/hosts?host_search=last_job_host_summary__failed:true",
|
||||
number: scope.data.hosts.failed,
|
||||
label: i18n._("Failed Hosts"),
|
||||
isFailureCount: true
|
||||
|
||||
@@ -5,11 +5,9 @@
|
||||
*************************************************/
|
||||
export default
|
||||
['$scope', '$rootScope', '$state', '$stateParams', 'HostsRelatedGroupsList', 'InventoryUpdate',
|
||||
'CancelSourceUpdate', 'rbacUiControlService', 'GetBasePath',
|
||||
'GetHostsStatusMsg', 'Dataset', 'Find', 'QuerySet', 'inventoryData', 'host', 'GroupsService',
|
||||
'CancelSourceUpdate', 'rbacUiControlService', 'GetBasePath', 'Dataset', 'Find', 'QuerySet', 'inventoryData', 'host', 'GroupsService',
|
||||
function($scope, $rootScope, $state, $stateParams, HostsRelatedGroupsList, InventoryUpdate,
|
||||
CancelSourceUpdate, rbacUiControlService, GetBasePath,
|
||||
GetHostsStatusMsg, Dataset, Find, qs, inventoryData, host, GroupsService){
|
||||
CancelSourceUpdate, rbacUiControlService, GetBasePath, Dataset, Find, qs, inventoryData, host, GroupsService){
|
||||
|
||||
let list = HostsRelatedGroupsList;
|
||||
|
||||
@@ -29,27 +27,6 @@
|
||||
$scope[`${list.iterator}_dataset`] = Dataset.data;
|
||||
$scope[list.name] = $scope[`${list.iterator}_dataset`].results;
|
||||
|
||||
$scope.$watchCollection(list.name, function(){
|
||||
_.forEach($scope[list.name], buildStatusIndicators);
|
||||
});
|
||||
}
|
||||
|
||||
function buildStatusIndicators(group){
|
||||
if (group === undefined || group === null) {
|
||||
group = {};
|
||||
}
|
||||
|
||||
let hosts_status;
|
||||
|
||||
hosts_status = GetHostsStatusMsg({
|
||||
active_failures: group.hosts_with_active_failures,
|
||||
total_hosts: group.total_hosts,
|
||||
inventory_id: $scope.inventory_id,
|
||||
group_id: group.id
|
||||
});
|
||||
_.assign(group,
|
||||
{hosts_status_tip: hosts_status.tooltip},
|
||||
{hosts_status_class: hosts_status.class});
|
||||
}
|
||||
|
||||
$scope.editGroup = function(id){
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
export default
|
||||
['i18n', function(i18n) {
|
||||
return function(params) {
|
||||
var active_failures = params.active_failures,
|
||||
total_hosts = params.total_hosts,
|
||||
tip, failures, html_class;
|
||||
|
||||
// Return values for use on host status indicator
|
||||
|
||||
if (active_failures > 0) {
|
||||
tip = total_hosts + ((total_hosts === 1) ? ' host' : ' hosts') + '. ' + active_failures + i18n._(' with failed jobs.');
|
||||
html_class = 'error';
|
||||
failures = true;
|
||||
} else {
|
||||
failures = false;
|
||||
if (total_hosts === 0) {
|
||||
// no hosts
|
||||
tip = i18n._("Contains 0 hosts.");
|
||||
html_class = 'none';
|
||||
} else {
|
||||
// many hosts with 0 failures
|
||||
tip = total_hosts + ((total_hosts === 1) ? ' host' : ' hosts') + '. ' + i18n._('No job failures');
|
||||
html_class = 'success';
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
tooltip: tip,
|
||||
failures: failures,
|
||||
'class': html_class
|
||||
};
|
||||
};
|
||||
}];
|
||||
@@ -18,22 +18,6 @@
|
||||
basePath: 'api/v2/inventories/{{$stateParams.inventory_id}}/groups/',
|
||||
layoutClass: 'List-staticColumnLayout--groups',
|
||||
actionHolderClass: 'List-actionHolder List-actionHolder--rootGroups',
|
||||
staticColumns: [
|
||||
{
|
||||
field: 'failed_hosts',
|
||||
content: {
|
||||
label: '',
|
||||
nosort: true,
|
||||
mode: 'all',
|
||||
iconOnly: true,
|
||||
awToolTip: "{{ group.hosts_status_tip }}",
|
||||
dataPlacement: "top",
|
||||
icon: "{{ 'fa icon-job-' + group.hosts_status_class }}",
|
||||
columnClass: 'status-column'
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
fields: {
|
||||
name: {
|
||||
label: i18n._('Groups'),
|
||||
|
||||
@@ -5,13 +5,11 @@
|
||||
*************************************************/
|
||||
export default
|
||||
['$scope', '$state', '$stateParams', 'listDefinition', 'InventoryUpdate',
|
||||
'GroupsService', 'CancelSourceUpdate',
|
||||
'GetHostsStatusMsg', 'Dataset', 'inventoryData', 'canAdd',
|
||||
'InventoryHostsStrings', '$transitions',
|
||||
'GroupsService', 'CancelSourceUpdate', 'Dataset', 'inventoryData', 'canAdd',
|
||||
'InventoryHostsStrings', '$transitions', 'GetBasePath', 'Rest',
|
||||
function($scope, $state, $stateParams, listDefinition, InventoryUpdate,
|
||||
GroupsService, CancelSourceUpdate,
|
||||
GetHostsStatusMsg, Dataset, inventoryData, canAdd,
|
||||
InventoryHostsStrings, $transitions){
|
||||
GroupsService, CancelSourceUpdate, Dataset, inventoryData, canAdd,
|
||||
InventoryHostsStrings, $transitions, GetBasePath, Rest){
|
||||
|
||||
let list = listDefinition;
|
||||
|
||||
@@ -70,18 +68,6 @@
|
||||
group.isSelected = true;
|
||||
}
|
||||
});
|
||||
|
||||
let hosts_status;
|
||||
|
||||
hosts_status = GetHostsStatusMsg({
|
||||
active_failures: group.hosts_with_active_failures,
|
||||
total_hosts: group.total_hosts,
|
||||
inventory_id: $scope.inventory_id,
|
||||
group_id: group.id
|
||||
});
|
||||
_.assign(group,
|
||||
{hosts_status_tip: hosts_status.tooltip},
|
||||
{hosts_status_class: hosts_status.class});
|
||||
}
|
||||
|
||||
$scope.createGroup = function(){
|
||||
@@ -102,35 +88,51 @@
|
||||
$state.go('inventories.edit.groups.edit.nested_groups', {group_id: id});
|
||||
};
|
||||
$scope.deleteGroup = function(group){
|
||||
$scope.toDelete = {};
|
||||
$scope.strings.deleteModal = {};
|
||||
angular.extend($scope.toDelete, group);
|
||||
if($scope.toDelete.total_groups === 0 && $scope.toDelete.total_hosts === 0) {
|
||||
// This group doesn't have any child groups or hosts - the user is just trying to delete
|
||||
// the group
|
||||
$scope.deleteOption = "delete";
|
||||
}
|
||||
else {
|
||||
$scope.strings.deleteModal.group = InventoryHostsStrings.get('deletegroup.GROUP', $scope.toDelete.total_groups);
|
||||
$scope.strings.deleteModal.host = InventoryHostsStrings.get('deletegroup.HOST', $scope.toDelete.total_hosts);
|
||||
const promises = [];
|
||||
|
||||
Rest.setUrl(group.related.hosts);
|
||||
promises.push(Rest.get());
|
||||
|
||||
Rest.setUrl(group.related.children);
|
||||
promises.push(Rest.get());
|
||||
|
||||
Promise.all(promises)
|
||||
.then(([hostResponse, groupResponse]) => {
|
||||
$scope.toDelete = {};
|
||||
$scope.strings.deleteModal = {};
|
||||
$scope.toDelete.hostCount = _.get(hostResponse, ['data', 'count'], 0);
|
||||
$scope.toDelete.groupCount = _.get(groupResponse, ['data', 'count'], 0);
|
||||
angular.extend($scope.toDelete, group);
|
||||
|
||||
if($scope.toDelete.groupCount === 0 && $scope.toDelete.hostCount === 0) {
|
||||
// This group doesn't have any child groups or hosts - the user is just trying to delete
|
||||
// the group
|
||||
$scope.deleteOption = "delete";
|
||||
}
|
||||
else {
|
||||
$scope.strings.deleteModal.group = InventoryHostsStrings.get('deletegroup.GROUP', $scope.toDelete.groupCount);
|
||||
$scope.strings.deleteModal.host = InventoryHostsStrings.get('deletegroup.HOST', $scope.toDelete.hostCount);
|
||||
|
||||
if($scope.toDelete.groupCount === 0 || $scope.toDelete.groupCount === 0) {
|
||||
if($scope.toDelete.groupCount === 0) {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_HOST', $scope.toDelete.hostCount);
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_HOST', $scope.toDelete.hostCount);
|
||||
}
|
||||
else if($scope.toDelete.hostCount === 0) {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUP', $scope.toDelete.groupCount);
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUP', $scope.toDelete.groupCount);
|
||||
}
|
||||
}
|
||||
else {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.groupCount, hosts: $scope.toDelete.hostCount});
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.groupCount, hosts: $scope.toDelete.hostCount});
|
||||
}
|
||||
}
|
||||
|
||||
$('#group-delete-modal').modal('show');
|
||||
});
|
||||
|
||||
if($scope.toDelete.total_groups === 0 || $scope.toDelete.total_hosts === 0) {
|
||||
if($scope.toDelete.total_groups === 0) {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_HOST', $scope.toDelete.total_hosts);
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_HOST', $scope.toDelete.total_hosts);
|
||||
}
|
||||
else if($scope.toDelete.total_hosts === 0) {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUP', $scope.toDelete.total_groups);
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUP', $scope.toDelete.total_groups);
|
||||
}
|
||||
}
|
||||
else {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.total_groups, hosts: $scope.toDelete.total_hosts});
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.total_groups, hosts: $scope.toDelete.total_hosts});
|
||||
}
|
||||
}
|
||||
|
||||
$('#group-delete-modal').modal('show');
|
||||
};
|
||||
$scope.confirmDelete = function(){
|
||||
let reloadListStateParams = null;
|
||||
|
||||
@@ -18,12 +18,10 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="Modal-body">
|
||||
<div ng-show="toDelete.total_groups > 0 || toDelete.total_hosts > 0">
|
||||
<div ng-show="toDelete.groupCount > 0 || toDelete.hostCount > 0">
|
||||
<div>
|
||||
<p class="Prompt-bodyQuery">Deleting group <em>{{ toDelete.name }}</em>.
|
||||
<span ng-show="toDelete.total_groups > 0 && toDelete.total_hosts > 0"> <translate>This group contains</translate> {{ toDelete.total_groups }} {{:: strings.deleteModal.group }} <translate>and</translate> {{ toDelete.total_hosts }} {{:: strings.deleteModal.host }}. </span>
|
||||
<span ng-show="toDelete.total_groups == 0 && toDelete.total_hosts > 0"> <translate>This group contains</translate> {{ toDelete.total_hosts }} {{:: strings.deleteModal.host }}. </span>
|
||||
<span ng-show="toDelete.total_groups > 0 && toDelete.total_hosts == 0"> <translate>This group contains</translate> {{ toDelete.total_groups }} {{:: strings.deleteModal.group }}. </span>
|
||||
<span> <translate>This group contains at least one group or host</translate>.</span>
|
||||
<translate>Delete or promote the group's children?</translate></p>
|
||||
<div style="margin: 15px auto;">
|
||||
|
||||
@@ -43,13 +41,13 @@
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div ng-show="toDelete.total_groups == 0 && toDelete.total_hosts == 0">
|
||||
<div ng-show="toDelete.groupCount == 0 && toDelete.hostCount == 0">
|
||||
<div class="Prompt-bodyQuery" translate>Are you sure you want to permanently delete the group below from the inventory?</div>
|
||||
<div class="Prompt-bodyTarget">{{ toDelete.name }}</div>
|
||||
</div>
|
||||
<div class="Modal-footer">
|
||||
<a href="#" data-target="#group-delete-modal" data-dismiss="modal" id="prompt_cancel_btn_groups_list" class="btn Modal-defaultButton Modal-footerButton" translate>CANCEL</a>
|
||||
<a href="" ng-class="promptActionBtnClass" ng-click="confirmDelete()" id="prompt_action_btn_groups_list" ng-disabled="!deleteOption && (toDelete.total_groups > 0 || toDelete.total_hosts > 0)" class="btn Modal-footerButton Modal-errorButton" translate>DELETE</a>
|
||||
<a href="" ng-class="promptActionBtnClass" ng-click="confirmDelete()" id="prompt_action_btn_groups_list" ng-disabled="!deleteOption && (toDelete.groupCount > 0 || toDelete.hostCount > 0)" class="btn Modal-footerButton Modal-errorButton" translate>DELETE</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -9,7 +9,6 @@ import groupAdd from './add/main';
|
||||
import groupEdit from './edit/main';
|
||||
import groupFormDefinition from './groups.form';
|
||||
import groupListDefinition from './groups.list';
|
||||
import GetHostsStatusMsg from './factories/get-hosts-status-msg.factory';
|
||||
import nestedGroups from './related/nested-groups/main';
|
||||
import nestedHosts from './related/nested-hosts/main';
|
||||
|
||||
@@ -22,5 +21,4 @@ export default
|
||||
nestedHosts.name
|
||||
])
|
||||
.factory('GroupForm', groupFormDefinition)
|
||||
.factory('GroupList', groupListDefinition)
|
||||
.factory('GetHostsStatusMsg', GetHostsStatusMsg);
|
||||
.factory('GroupList', groupListDefinition);
|
||||
|
||||
@@ -6,11 +6,11 @@
|
||||
export default
|
||||
['$scope', '$rootScope', '$state', '$stateParams', 'NestedGroupListDefinition', 'InventoryUpdate',
|
||||
'GroupsService', 'CancelSourceUpdate', 'rbacUiControlService', 'GetBasePath',
|
||||
'GetHostsStatusMsg', 'Dataset', 'Find', 'QuerySet', 'inventoryData', 'canAdd', 'groupData', 'ProcessErrors',
|
||||
'Dataset', 'Find', 'QuerySet', 'inventoryData', 'canAdd', 'groupData', 'ProcessErrors',
|
||||
'$transitions',
|
||||
function($scope, $rootScope, $state, $stateParams, NestedGroupListDefinition, InventoryUpdate,
|
||||
GroupsService, CancelSourceUpdate, rbacUiControlService, GetBasePath,
|
||||
GetHostsStatusMsg, Dataset, Find, qs, inventoryData, canAdd, groupData, ProcessErrors,
|
||||
Dataset, Find, qs, inventoryData, canAdd, groupData, ProcessErrors,
|
||||
$transitions){
|
||||
|
||||
let list = NestedGroupListDefinition;
|
||||
@@ -35,10 +35,6 @@
|
||||
|
||||
$scope.inventory_id = $stateParams.inventory_id;
|
||||
|
||||
$scope.$watchCollection(list.name, function(){
|
||||
_.forEach($scope[list.name], processRow);
|
||||
});
|
||||
|
||||
$scope.$on('selectedOrDeselected', function(e, value) {
|
||||
let item = value.value;
|
||||
|
||||
@@ -57,30 +53,6 @@
|
||||
|
||||
}
|
||||
|
||||
function processRow(group){
|
||||
if (group === undefined || group === null) {
|
||||
group = {};
|
||||
}
|
||||
|
||||
angular.forEach($scope.groupsSelected, function(selectedGroup){
|
||||
if(selectedGroup.id === group.id) {
|
||||
group.isSelected = true;
|
||||
}
|
||||
});
|
||||
|
||||
let hosts_status;
|
||||
|
||||
hosts_status = GetHostsStatusMsg({
|
||||
active_failures: group.hosts_with_active_failures,
|
||||
total_hosts: group.total_hosts,
|
||||
inventory_id: $scope.inventory_id,
|
||||
group_id: group.id
|
||||
});
|
||||
_.assign(group,
|
||||
{hosts_status_tip: hosts_status.tooltip},
|
||||
{hosts_status_class: hosts_status.class});
|
||||
}
|
||||
|
||||
$scope.disassociateGroup = function(group){
|
||||
$scope.toDisassociate = {};
|
||||
angular.extend($scope.toDisassociate, group);
|
||||
|
||||
@@ -17,22 +17,6 @@
|
||||
trackBy: 'nested_group.id',
|
||||
basePath: 'api/v2/groups/{{$stateParams.group_id}}/children/',
|
||||
layoutClass: 'List-staticColumnLayout--groups',
|
||||
staticColumns: [
|
||||
{
|
||||
field: 'failed_hosts',
|
||||
content: {
|
||||
label: '',
|
||||
nosort: true,
|
||||
mode: 'all',
|
||||
iconOnly: true,
|
||||
awToolTip: "{{ nested_group.hosts_status_tip }}",
|
||||
dataPlacement: "top",
|
||||
icon: "{{ 'fa icon-job-' + nested_group.hosts_status_class }}",
|
||||
columnClass: 'status-column'
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
fields: {
|
||||
name: {
|
||||
label: i18n._('Groups'),
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
export default
|
||||
['$scope', '$rootScope', '$state', '$stateParams', 'HostNestedGroupListDefinition', 'InventoryUpdate',
|
||||
'GroupsService', 'CancelSourceUpdate', 'rbacUiControlService', 'GetBasePath',
|
||||
'GetHostsStatusMsg', 'Dataset', 'Find', 'QuerySet', 'inventoryData', 'canAdd', 'ProcessErrors', 'host',
|
||||
'Dataset', 'Find', 'QuerySet', 'inventoryData', 'canAdd', 'ProcessErrors', 'host',
|
||||
function($scope, $rootScope, $state, $stateParams, HostNestedGroupListDefinition, InventoryUpdate,
|
||||
GroupsService, CancelSourceUpdate, rbacUiControlService, GetBasePath,
|
||||
GetHostsStatusMsg, Dataset, Find, qs, inventoryData, canAdd, ProcessErrors, host){
|
||||
Dataset, Find, qs, inventoryData, canAdd, ProcessErrors, host){
|
||||
|
||||
let list = HostNestedGroupListDefinition;
|
||||
|
||||
@@ -26,10 +26,6 @@
|
||||
$scope[`${list.iterator}_dataset`] = Dataset.data;
|
||||
$scope[list.name] = $scope[`${list.iterator}_dataset`].results;
|
||||
|
||||
$scope.$watchCollection(list.name, function(){
|
||||
_.forEach($scope[list.name], buildStatusIndicators);
|
||||
});
|
||||
|
||||
$scope.$on('selectedOrDeselected', function(e, value) {
|
||||
let item = value.value;
|
||||
|
||||
@@ -48,24 +44,6 @@
|
||||
|
||||
}
|
||||
|
||||
function buildStatusIndicators(group){
|
||||
if (group === undefined || group === null) {
|
||||
group = {};
|
||||
}
|
||||
|
||||
let hosts_status;
|
||||
|
||||
hosts_status = GetHostsStatusMsg({
|
||||
active_failures: group.hosts_with_active_failures,
|
||||
total_hosts: group.total_hosts,
|
||||
inventory_id: $scope.inventory_id,
|
||||
group_id: group.id
|
||||
});
|
||||
_.assign(group,
|
||||
{hosts_status_tip: hosts_status.tooltip},
|
||||
{hosts_status_class: hosts_status.class});
|
||||
}
|
||||
|
||||
$scope.associateGroup = function() {
|
||||
$state.go('.associate');
|
||||
};
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
multiSelect: true,
|
||||
trackBy: 'nested_group.id',
|
||||
basePath: 'api/v2/hosts/{{$stateParams.host_id}}/all_groups/',
|
||||
layoutClass: 'List-staticColumnLayout--groups',
|
||||
layoutClass: 'List-staticColumnLayout--hostNestedGroups',
|
||||
staticColumns: [
|
||||
{
|
||||
field: 'failed_hosts',
|
||||
|
||||
@@ -18,57 +18,6 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="Modal-body">
|
||||
<div ng-show="toDelete.total_groups > 0 || toDelete.total_hosts > 0">
|
||||
<div>
|
||||
<p class="Prompt-bodyQuery"><translate>Deleting group</translate> <em>{{ toDelete.name }}</em>.
|
||||
<span ng-show="toDelete.total_groups > 0 && toDelete.total_hosts > 0"> <translate>This group contains</translate> {{ toDelete.total_groups }} <translate>groups and</translate> {{ toDelete.total_hosts }} <translate>hosts</translate>. </span>
|
||||
<span ng-show="toDelete.total_groups == 0 && toDelete.total_hosts > 0"> <translate>This group contains</translate> {{ toDelete.total_hosts }} <translate>hosts</translate>. </span>
|
||||
<span ng-show="toDelete.total_groups > 0 && toDelete.total_hosts == 0"> <translate>This group contains</translate> {{ toDelete.total_groups }} <translate>groups</translate>. </span>
|
||||
<translate>Delete or promote the group's children?</translate></p>
|
||||
<div style="margin: 15px auto;">
|
||||
|
||||
<div class="radio" ng-show="toDelete.total_groups > 0 && toDelete.total_hosts > 0">
|
||||
<label>
|
||||
<input type="radio" ng-model="deleteOption" value="promote"> <translate>Promote groups and hosts</translate>
|
||||
</label>
|
||||
</div>
|
||||
<div class="radio" ng-show="toDelete.total_groups > 0 && toDelete.total_hosts > 0">
|
||||
<label>
|
||||
<input type="radio" ng-model="deleteOption" value="delete"> <translate>Delete groups and hosts</translate>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="radio" ng-show="toDelete.total_groups > 0 && toDelete.total_hosts == 0">
|
||||
<label>
|
||||
<input type="radio" ng-model="deleteOption" value="promote"> <translate>Promote groups</translate>
|
||||
</label>
|
||||
</div>
|
||||
<div class="radio" ng-show="toDelete.total_groups > 0 && toDelete.total_hosts == 0">
|
||||
<label>
|
||||
<input type="radio" ng-model="deleteOption" value="delete"> <translate>Delete groups</translate>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="radio" ng-show="toDelete.total_groups == 0 && toDelete.total_hosts > 0">
|
||||
<label>
|
||||
<input type="radio" ng-model="deleteOption" value="promote"> <translate>Promote hosts</translate>
|
||||
</label>
|
||||
</div>
|
||||
<div class="radio" ng-show="toDelete.total_groups == 0 && toDelete.total_hosts > 0">
|
||||
<label>
|
||||
<input type="radio" ng-model="deleteOption" value="delete"> <translate>Delete hosts</translate>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div ng-show="toDelete.total_groups == 0 && toDelete.total_hosts == 0">
|
||||
<div class="Prompt-bodyQuery" translate>Are you sure you want to permanently delete the inventory source below from the inventory?</div>
|
||||
<div class="Prompt-bodyTarget">{{ toDelete.name }}</div>
|
||||
</div>
|
||||
<div class="Modal-footer">
|
||||
<a href="#" data-target="#group-delete-modal" data-dismiss="modal" id="prompt_cancel_btn_groups_list" class="btn Modal-defaultButton Modal-footerButton" translate>CANCEL</a>
|
||||
<a href="" ng-class="promptActionBtnClass" ng-click="confirmDelete()" id="prompt_action_btn_groups_list" class="btn Modal-footerButton Modal-errorButton" translate>DELETE</a>
|
||||
|
||||
@@ -233,7 +233,7 @@ angular.module('Utilities', ['RestServices', 'Utilities'])
|
||||
addApiErrors(form.fields[field], field);
|
||||
}
|
||||
}
|
||||
if (defaultMsg) {
|
||||
if (!fieldErrors && defaultMsg) {
|
||||
Alert(defaultMsg.hdr, defaultMsg.msg);
|
||||
}
|
||||
} else if (typeof data === 'object' && data !== null) {
|
||||
|
||||
@@ -115,11 +115,20 @@
|
||||
fill: @default-icon;
|
||||
}
|
||||
|
||||
.WorkflowChart-convergenceTypeRectangle {
|
||||
fill: @default-icon;
|
||||
}
|
||||
|
||||
.WorkflowChart-nodeTypeLetter {
|
||||
fill: @default-bg;
|
||||
font-size: 10px;
|
||||
}
|
||||
|
||||
.WorkflowChart-convergenceTypeLetter {
|
||||
fill: @default-bg;
|
||||
font-size: 10px;
|
||||
}
|
||||
|
||||
.WorkflowChart-nodeStatus--running {
|
||||
fill: @default-icon;
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ export default ['moment', '$timeout', '$window', '$filter', 'TemplatesStrings',
|
||||
nodeH = 60,
|
||||
rootW = startNodeTextWidth + 25,
|
||||
rootH = 40,
|
||||
strokeW = 2, // px
|
||||
startNodeOffsetY = scope.mode === 'details' ? 17 : 10,
|
||||
maxNodeTextLength = 27,
|
||||
windowHeight,
|
||||
@@ -118,6 +119,14 @@ export default ['moment', '$timeout', '$window', '$filter', 'TemplatesStrings',
|
||||
};
|
||||
|
||||
const rounded_rect = (x, y, w, h, r, tl, tr, bl, br) => {
|
||||
// x, y - position coordinates
|
||||
// w - width
|
||||
// h - height
|
||||
// r - border radius
|
||||
// round the top-left corner (bool)
|
||||
// round the top-right corner (bool)
|
||||
// round the bottom-left corner (bool)
|
||||
// round the bottom-right corner (bool)
|
||||
let retval;
|
||||
retval = "M" + (x + r) + "," + y;
|
||||
retval += "h" + (w - 2*r);
|
||||
@@ -855,6 +864,9 @@ export default ['moment', '$timeout', '$window', '$filter', 'TemplatesStrings',
|
||||
.attr("fill", (d) => { return scope.graphState.addLinkSource === d.id ? "#337AB7" : "#D7D7D7"; })
|
||||
.style("display", (d) => { return scope.graphState.isLinkMode && !d.isInvalidLinkTarget ? null : "none"; });
|
||||
|
||||
baseSvg.selectAll(".WorkflowChart-convergenceTypeRectangle")
|
||||
.style("display", (d) => d.all_parents_must_converge ? null : "none");
|
||||
|
||||
// Add new nodes
|
||||
const nodeEnter = nodes
|
||||
.enter()
|
||||
@@ -924,7 +936,7 @@ export default ['moment', '$timeout', '$window', '$filter', 'TemplatesStrings',
|
||||
return "#D7D7D7";
|
||||
}
|
||||
})
|
||||
.attr('stroke-width', "2px")
|
||||
.attr('stroke-width', `${strokeW}px`)
|
||||
.attr("class", (d) => {
|
||||
let classString = d.id === scope.graphState.nodeBeingAdded ? "WorkflowChart-rect WorkflowChart-isNodeBeingAdded" : "WorkflowChart-rect";
|
||||
classString += !_.get(d, 'unifiedJobTemplate.name') ? " WorkflowChart-dashedNode" : "";
|
||||
@@ -980,6 +992,34 @@ export default ['moment', '$timeout', '$window', '$filter', 'TemplatesStrings',
|
||||
.html(`<span>${TemplatesStrings.get('workflow_maker.APPROVED')}</span>`)
|
||||
.style("display", (d) => { return d.job && d.job.type === "workflow_approval" && d.job.status === "successful" && !d.job.timed_out ? null : "none"; });
|
||||
|
||||
// Build the 'ALL' symbol for all-convergence nodes
|
||||
const convergenceTypeHeight = nodeH / 5;
|
||||
const convergenceTypeWidth = nodeW / 5;
|
||||
const convergenceTypeXCoord = nodeW / 2 - convergenceTypeWidth / 2;
|
||||
const convergenceTypeYCoord = -convergenceTypeHeight + (strokeW / 2);
|
||||
const convergenceTypeBorderRadius = 3;
|
||||
|
||||
const convergenceRectangle = rounded_rect(
|
||||
convergenceTypeXCoord,
|
||||
convergenceTypeYCoord,
|
||||
convergenceTypeWidth,
|
||||
convergenceTypeHeight,
|
||||
convergenceTypeBorderRadius,
|
||||
true, // round top-left
|
||||
true, // round top-right
|
||||
false, // round bottom-left
|
||||
false // round bottom-right
|
||||
);
|
||||
thisNode.append("path")
|
||||
.attr("d", convergenceRectangle)
|
||||
.attr("class", "WorkflowChart-convergenceTypeRectangle")
|
||||
.style("display", (d) => d.all_parents_must_converge ? null : "none");
|
||||
thisNode.append("text")
|
||||
.attr("y", ((convergenceTypeYCoord + convergenceTypeHeight) / 2) - Math.min(strokeW, 2))
|
||||
.attr("x", convergenceTypeXCoord + (convergenceTypeWidth / 4))
|
||||
.attr("class", "WorkflowChart-convergenceTypeLetter")
|
||||
.text("ALL");
|
||||
|
||||
thisNode.append("circle")
|
||||
.attr("cy", nodeH)
|
||||
.attr("r", 10)
|
||||
|
||||
@@ -28,7 +28,8 @@ export default [function(){
|
||||
|
||||
const nodeObj = {
|
||||
index: nodeIdCounter-1,
|
||||
id: nodeIdCounter
|
||||
id: nodeIdCounter,
|
||||
all_parents_must_converge: node.all_parents_must_converge,
|
||||
};
|
||||
|
||||
if(node.summary_fields.job) {
|
||||
|
||||
@@ -106,6 +106,10 @@ export default ['$scope', 'TemplatesService', 'JobTemplateModel', 'PromptService
|
||||
element: '#workflow_node_edge',
|
||||
multiple: false
|
||||
});
|
||||
CreateSelect2({
|
||||
element: '#workflow_node_convergence',
|
||||
multiple: false
|
||||
});
|
||||
};
|
||||
|
||||
const formatPopOverDetails = (model) => {
|
||||
@@ -500,6 +504,22 @@ export default ['$scope', 'TemplatesService', 'JobTemplateModel', 'PromptService
|
||||
type: 'workflow_job_template,job_template'
|
||||
};
|
||||
|
||||
const all_parents_must_converge = _.get(
|
||||
$scope, ['nodeConfig', 'node', 'all_parents_must_converge'],
|
||||
_.get($scope, ['nodeConfig', 'node', 'originalNodeObject', 'all_parents_must_converge'], false)
|
||||
);
|
||||
$scope.convergenceOptions = [
|
||||
{
|
||||
label: $scope.strings.get('workflow_maker.ALL'),
|
||||
value: true,
|
||||
},
|
||||
{
|
||||
label: $scope.strings.get('workflow_maker.ANY'),
|
||||
value: false,
|
||||
},
|
||||
];
|
||||
$scope.convergenceChoice = $scope.convergenceOptions.find(({ value }) => value === all_parents_must_converge);
|
||||
|
||||
$scope.wf_maker_templates = [];
|
||||
$scope.wf_maker_template_dataset = {};
|
||||
|
||||
@@ -617,7 +637,8 @@ export default ['$scope', 'TemplatesService', 'JobTemplateModel', 'PromptService
|
||||
|
||||
$scope.confirmNodeForm = () => {
|
||||
const nodeFormData = {
|
||||
edgeType: $scope.edgeType
|
||||
edgeType: $scope.edgeType,
|
||||
all_parents_must_converge: $scope.convergenceChoice.value,
|
||||
};
|
||||
|
||||
if ($scope.activeTab === "approval") {
|
||||
|
||||
@@ -183,6 +183,24 @@
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<div id="workflow_node_checkbox_group" class="form-group Form-formGroup Form-formGroup--singleColumn" >
|
||||
<label for="edgeType" class="Form-inputLabelContainer">
|
||||
<span class="Form-requiredAsterisk">*</span>
|
||||
<span class="Form-inputLabel">{{:: strings.get('workflow_maker.CONVERGENCE') }}</span>
|
||||
</label>
|
||||
<div>
|
||||
<select
|
||||
id="workflow_node_convergence"
|
||||
ng-options="v as v.label for v in convergenceOptions track by v.value"
|
||||
ng-model="convergenceChoice"
|
||||
class="form-control Form-dropDown"
|
||||
name="convergenceChoice"
|
||||
tabindex="-1"
|
||||
ng-disabled="readOnly"
|
||||
aria-hidden="true">
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<div ng-show="readOnly">
|
||||
<div
|
||||
class="WorkflowMaker-readOnlyPromptText"
|
||||
|
||||
@@ -92,7 +92,8 @@ export default ['$scope', 'TemplatesService',
|
||||
limit: null,
|
||||
diff_mode: null,
|
||||
verbosity: null,
|
||||
credential: null
|
||||
credential: null,
|
||||
all_parents_must_converge: _.get(node, 'all_parents_must_converge', false)
|
||||
};
|
||||
|
||||
if (_.has(node, 'fullUnifiedJobTemplateObject')) {
|
||||
@@ -637,9 +638,11 @@ export default ['$scope', 'TemplatesService',
|
||||
});
|
||||
}
|
||||
}
|
||||
nodeRef[$scope.nodeConfig.nodeId].all_parents_must_converge = nodeFormData.all_parents_must_converge;
|
||||
|
||||
$scope.graphState.arrayOfNodesForChart.map( (node) => {
|
||||
if (node.id === nodeId) {
|
||||
node.all_parents_must_converge = nodeFormData.all_parents_must_converge;
|
||||
if (isPauseNode) {
|
||||
node.unifiedJobTemplate = {
|
||||
unified_job_type: 'workflow_approval',
|
||||
@@ -650,7 +653,6 @@ export default ['$scope', 'TemplatesService',
|
||||
} else {
|
||||
node.unifiedJobTemplate = selectedTemplate;
|
||||
}
|
||||
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
4892
awx/ui_next/package-lock.json
generated
4892
awx/ui_next/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -18,10 +18,10 @@
|
||||
"author": "",
|
||||
"license": "Apache",
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.2.0",
|
||||
"@babel/core": "^7.8.3",
|
||||
"@babel/plugin-proposal-class-properties": "^7.1.0",
|
||||
"@babel/polyfill": "^7.0.0",
|
||||
"@babel/preset-env": "^7.1.0",
|
||||
"@babel/preset-env": "^7.8.3",
|
||||
"@babel/preset-react": "^7.0.0",
|
||||
"@lingui/cli": "^2.7.4",
|
||||
"@lingui/macro": "^2.7.2",
|
||||
@@ -29,7 +29,7 @@
|
||||
"babel-core": "^7.0.0-bridge.0",
|
||||
"babel-eslint": "^10.0.1",
|
||||
"babel-jest": "^24.7.1",
|
||||
"babel-loader": "^8.0.4",
|
||||
"babel-loader": "^8.0.6",
|
||||
"babel-plugin-macros": "^2.4.2",
|
||||
"babel-plugin-styled-components": "^1.10.0",
|
||||
"css-loader": "^1.0.0",
|
||||
@@ -47,7 +47,7 @@
|
||||
"file-loader": "^2.0.0",
|
||||
"history": "^4.9.0",
|
||||
"jest": "^24.7.1",
|
||||
"node-sass": "^4.12.0",
|
||||
"node-sass": "^4.13.1",
|
||||
"prettier": "^1.18.2",
|
||||
"react-hot-loader": "^4.3.3",
|
||||
"sass-loader": "^7.1.0",
|
||||
@@ -58,16 +58,16 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@lingui/react": "^2.7.2",
|
||||
"@patternfly/patternfly": "^2.46.1",
|
||||
"@patternfly/react-core": "^3.129.3",
|
||||
"@patternfly/react-icons": "^3.14.28",
|
||||
"@patternfly/react-tokens": "^2.7.14",
|
||||
"@patternfly/patternfly": "^2.56.3",
|
||||
"@patternfly/react-core": "^3.135.0",
|
||||
"@patternfly/react-icons": "^3.14.39",
|
||||
"@patternfly/react-tokens": "^2.7.25",
|
||||
"ansi-to-html": "^0.6.11",
|
||||
"axios": "^0.18.1",
|
||||
"codemirror": "^5.47.0",
|
||||
"d3": "^5.12.0",
|
||||
"dagre": "^0.8.4",
|
||||
"formik": "^1.5.1",
|
||||
"formik": "^2.1.2",
|
||||
"has-ansi": "^3.0.0",
|
||||
"html-entities": "^1.2.1",
|
||||
"js-yaml": "^3.13.1",
|
||||
|
||||
@@ -13,7 +13,6 @@ class JobTemplates extends InstanceGroupsMixin(NotificationsMixin(Base)) {
|
||||
this.disassociateLabel = this.disassociateLabel.bind(this);
|
||||
this.readCredentials = this.readCredentials.bind(this);
|
||||
this.readAccessList = this.readAccessList.bind(this);
|
||||
this.generateLabel = this.generateLabel.bind(this);
|
||||
}
|
||||
|
||||
launch(id, data) {
|
||||
@@ -24,8 +23,11 @@ class JobTemplates extends InstanceGroupsMixin(NotificationsMixin(Base)) {
|
||||
return this.http.get(`${this.baseUrl}${id}/launch/`);
|
||||
}
|
||||
|
||||
associateLabel(id, label) {
|
||||
return this.http.post(`${this.baseUrl}${id}/labels/`, label);
|
||||
associateLabel(id, label, orgId) {
|
||||
return this.http.post(`${this.baseUrl}${id}/labels/`, {
|
||||
name: label.name,
|
||||
organization: orgId,
|
||||
});
|
||||
}
|
||||
|
||||
disassociateLabel(id, label) {
|
||||
@@ -35,15 +37,10 @@ class JobTemplates extends InstanceGroupsMixin(NotificationsMixin(Base)) {
|
||||
});
|
||||
}
|
||||
|
||||
generateLabel(id, label, orgId) {
|
||||
return this.http.post(`${this.baseUrl}${id}/labels/`, {
|
||||
name: label.name,
|
||||
organization: orgId,
|
||||
});
|
||||
}
|
||||
|
||||
readCredentials(id, params) {
|
||||
return this.http.get(`${this.baseUrl}${id}/credentials/`, { params });
|
||||
return this.http.get(`${this.baseUrl}${id}/credentials/`, {
|
||||
params,
|
||||
});
|
||||
}
|
||||
|
||||
associateCredentials(id, credentialId) {
|
||||
@@ -60,7 +57,9 @@ class JobTemplates extends InstanceGroupsMixin(NotificationsMixin(Base)) {
|
||||
}
|
||||
|
||||
readAccessList(id, params) {
|
||||
return this.http.get(`${this.baseUrl}${id}/access_list/`, { params });
|
||||
return this.http.get(`${this.baseUrl}${id}/access_list/`, {
|
||||
params,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { describeNotificationMixin } from '@testUtils/apiReusable';
|
||||
import Organizations from './Organizations';
|
||||
import { describeNotificationMixin } from '../../../testUtils/apiReusable';
|
||||
|
||||
describe('OrganizationsAPI', () => {
|
||||
const orgId = 1;
|
||||
|
||||
@@ -183,10 +183,6 @@
|
||||
z-index: 20;
|
||||
}
|
||||
|
||||
.at-u-textRight {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
//
|
||||
// AlertModal styles
|
||||
//
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import React from 'react';
|
||||
import { mountWithContexts } from '../../../testUtils/enzymeHelpers';
|
||||
import { mountWithContexts } from '@testUtils/enzymeHelpers';
|
||||
import About from './About';
|
||||
|
||||
describe('<About />', () => {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/* eslint-disable react/jsx-pascal-case */
|
||||
import React from 'react';
|
||||
import { shallow } from 'enzyme';
|
||||
import { mountWithContexts } from '../../../testUtils/enzymeHelpers';
|
||||
import { mountWithContexts } from '@testUtils/enzymeHelpers';
|
||||
import AddResourceRole, { _AddResourceRole } from './AddResourceRole';
|
||||
import { TeamsAPI, UsersAPI } from '../../api';
|
||||
import { TeamsAPI, UsersAPI } from '@api';
|
||||
|
||||
jest.mock('../../api');
|
||||
jest.mock('@api');
|
||||
|
||||
describe('<_AddResourceRole />', () => {
|
||||
UsersAPI.read.mockResolvedValue({
|
||||
|
||||
@@ -116,6 +116,7 @@ class SelectResourceStep extends React.Component {
|
||||
name={item[displayKey]}
|
||||
label={item[displayKey]}
|
||||
onSelect={() => onRowClick(item)}
|
||||
onDeselect={() => onRowClick(item)}
|
||||
/>
|
||||
)}
|
||||
renderToolbar={props => <DataListToolbar {...props} fillWidth />}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import React from 'react';
|
||||
import { createMemoryHistory } from 'history';
|
||||
import { shallow } from 'enzyme';
|
||||
import { mountWithContexts } from '../../../testUtils/enzymeHelpers';
|
||||
import { sleep } from '../../../testUtils/testUtils';
|
||||
import { mountWithContexts } from '@testUtils/enzymeHelpers';
|
||||
import { sleep } from '@testUtils/testUtils';
|
||||
import SelectResourceStep from './SelectResourceStep';
|
||||
|
||||
describe('<SelectResourceStep />', () => {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user