mirror of
https://github.com/ansible/awx.git
synced 2026-02-06 20:14:44 -03:30
Compare commits
393 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6ba053d4ee | ||
|
|
3ecd26b5d8 | ||
|
|
bdf753ce23 | ||
|
|
0f54516b38 | ||
|
|
a56e32b59b | ||
|
|
026d5e6bdb | ||
|
|
03e73156ea | ||
|
|
0b6208047c | ||
|
|
f14129de9b | ||
|
|
6c1ba03235 | ||
|
|
85bb4e976f | ||
|
|
d1d3711fee | ||
|
|
d671366cad | ||
|
|
f9043864ce | ||
|
|
a1ded8db3f | ||
|
|
cf6b6d831f | ||
|
|
736cd4df36 | ||
|
|
8d99f79de4 | ||
|
|
1c70773cc2 | ||
|
|
c76a7d638f | ||
|
|
7a455d08d7 | ||
|
|
2c5bcf268d | ||
|
|
32cee852f0 | ||
|
|
8c1cd9ee71 | ||
|
|
da951714d1 | ||
|
|
2309feb6bd | ||
|
|
4d2c64ebb4 | ||
|
|
04f6fe6cd2 | ||
|
|
372baa12a5 | ||
|
|
21e6e5701e | ||
|
|
276a18b339 | ||
|
|
666acb9756 | ||
|
|
0eff3e6bac | ||
|
|
9af16c18c9 | ||
|
|
f0bcfc6024 | ||
|
|
d946103961 | ||
|
|
52d6f36b7c | ||
|
|
b10eb6f4c2 | ||
|
|
21aa1fc11f | ||
|
|
d4a3143b0e | ||
|
|
4478052b71 | ||
|
|
b92c5076a2 | ||
|
|
61846e88ca | ||
|
|
93e90228a2 | ||
|
|
395af1b5e4 | ||
|
|
5d7bdb3cbc | ||
|
|
95d40f037d | ||
|
|
ef67f9c65d | ||
|
|
8a4421dc0c | ||
|
|
2900bbbbd8 | ||
|
|
a6c09daf1e | ||
|
|
92c6ddf13c | ||
|
|
c6ac2f56dc | ||
|
|
7a130a0616 | ||
|
|
188f0417d2 | ||
|
|
d38401fd18 | ||
|
|
dadf8940cc | ||
|
|
6a36f802c2 | ||
|
|
5509686f5b | ||
|
|
1ee241199e | ||
|
|
67d828cf80 | ||
|
|
86c91509b3 | ||
|
|
bfd1abd79c | ||
|
|
c169cf6d58 | ||
|
|
9679c154f3 | ||
|
|
a317b6bede | ||
|
|
f73de11acc | ||
|
|
6f16d64929 | ||
|
|
8a6656aa90 | ||
|
|
95ba6de172 | ||
|
|
63aacd4e38 | ||
|
|
b39b80b036 | ||
|
|
214c27a5cf | ||
|
|
ae83032ff3 | ||
|
|
2e0dd61bb6 | ||
|
|
25aae9abc6 | ||
|
|
8b20d770a2 | ||
|
|
6726b72fe2 | ||
|
|
e610b77f8d | ||
|
|
2490929fd5 | ||
|
|
29ec0dc82a | ||
|
|
301d7a02c2 | ||
|
|
81e6ead99c | ||
|
|
158fe23d7c | ||
|
|
b3705357ba | ||
|
|
f460f70513 | ||
|
|
b6ced2b8dc | ||
|
|
be18803250 | ||
|
|
f26d975005 | ||
|
|
a7a17a2063 | ||
|
|
9e657059f3 | ||
|
|
486bcd80f8 | ||
|
|
e4f21ec294 | ||
|
|
3cb3819be9 | ||
|
|
122b36dcc5 | ||
|
|
2d1a859719 | ||
|
|
f882ac420d | ||
|
|
373cd9c20b | ||
|
|
673afdf1b5 | ||
|
|
7a16782ebf | ||
|
|
8ede74a7f6 | ||
|
|
19da9955ce | ||
|
|
e6e1f97048 | ||
|
|
0a63c2d4a0 | ||
|
|
493109782d | ||
|
|
0195bab931 | ||
|
|
e570810bdb | ||
|
|
9f1e8a1ae2 | ||
|
|
9a0c159943 | ||
|
|
9aa56b1247 | ||
|
|
d4d21a1511 | ||
|
|
27a2c842ac | ||
|
|
87dcc49429 | ||
|
|
805ca53765 | ||
|
|
070034047c | ||
|
|
53c50947d1 | ||
|
|
108a6e11f4 | ||
|
|
a4bc306b96 | ||
|
|
9106c3f813 | ||
|
|
3340ef9c91 | ||
|
|
fc171deb79 | ||
|
|
f2dac36dd1 | ||
|
|
fb1a5c0db5 | ||
|
|
d6a06c40f1 | ||
|
|
a6383e7f79 | ||
|
|
3063073395 | ||
|
|
9eda6359f0 | ||
|
|
1deaf55ba4 | ||
|
|
44c50bbbf7 | ||
|
|
c9e7747f2d | ||
|
|
199b4b6b47 | ||
|
|
f06485feca | ||
|
|
4bd910493a | ||
|
|
cd100fd770 | ||
|
|
157adb828e | ||
|
|
b26e33ca34 | ||
|
|
27a1254883 | ||
|
|
535bbfcc39 | ||
|
|
d2d511f596 | ||
|
|
25ca8d22d6 | ||
|
|
378a0711c2 | ||
|
|
8fd9225629 | ||
|
|
ee8c1638c5 | ||
|
|
4add72b9d2 | ||
|
|
54dd24b96b | ||
|
|
7d06fc74dd | ||
|
|
1a2e56c785 | ||
|
|
a7b29f6112 | ||
|
|
39b26c8f0e | ||
|
|
1ade9b3a7d | ||
|
|
82c5803e59 | ||
|
|
9f4172ce7b | ||
|
|
ef56571772 | ||
|
|
6911a59f39 | ||
|
|
7a63785255 | ||
|
|
a695274cb6 | ||
|
|
44fed1d7c1 | ||
|
|
be48d3eefd | ||
|
|
3cc6a4cf44 | ||
|
|
ddf4fbc4ce | ||
|
|
f0e7f2dbcd | ||
|
|
579d49033a | ||
|
|
210d5084f0 | ||
|
|
53e8a9e709 | ||
|
|
15effd7ade | ||
|
|
b919befc90 | ||
|
|
faded278e3 | ||
|
|
768ac01f58 | ||
|
|
4052603238 | ||
|
|
b306c6f258 | ||
|
|
4b6b8f2bdd | ||
|
|
70420dc3e4 | ||
|
|
50ca2d47ce | ||
|
|
faa0a6cf9a | ||
|
|
01228cea02 | ||
|
|
cbb461ab71 | ||
|
|
b551608f16 | ||
|
|
7a25f22078 | ||
|
|
b43d8e2c7f | ||
|
|
2ad84b60b3 | ||
|
|
d1981fcb4a | ||
|
|
df8ce801cf | ||
|
|
50e6348bef | ||
|
|
5ab449c90f | ||
|
|
66a9ffc376 | ||
|
|
16c6e2d716 | ||
|
|
ac5b53b13c | ||
|
|
6d433cc42a | ||
|
|
7c442f3f50 | ||
|
|
d79f73ab7a | ||
|
|
b26eaa3bd2 | ||
|
|
de46fb409e | ||
|
|
8078daa733 | ||
|
|
e4931bde6c | ||
|
|
e2b0a4f7a7 | ||
|
|
5da6b02801 | ||
|
|
326184da0f | ||
|
|
dad5533816 | ||
|
|
ca07946c24 | ||
|
|
2ce276455a | ||
|
|
4db6b8c1fe | ||
|
|
03209fe2f2 | ||
|
|
7832639c25 | ||
|
|
121db42699 | ||
|
|
ddb6c775b1 | ||
|
|
7467779ea9 | ||
|
|
66f140bb70 | ||
|
|
151f9e79ed | ||
|
|
f83343592b | ||
|
|
12504c9bc3 | ||
|
|
023cc68ba2 | ||
|
|
ec8ac6f1a7 | ||
|
|
82c4f6bb88 | ||
|
|
5beb68f527 | ||
|
|
2eb1e4bbe3 | ||
|
|
d3b20e6585 | ||
|
|
310cc2fd03 | ||
|
|
1fd6ba0bfc | ||
|
|
b64f966db1 | ||
|
|
891eeb22a5 | ||
|
|
0f6e221c14 | ||
|
|
239f20ede5 | ||
|
|
ffbbcd2bf6 | ||
|
|
b648957c8e | ||
|
|
31fe500921 | ||
|
|
2131703ca0 | ||
|
|
c429563126 | ||
|
|
1a1d66d2a2 | ||
|
|
30871bd6cf | ||
|
|
321135da3d | ||
|
|
2a23b4c719 | ||
|
|
f7d2f7a5e6 | ||
|
|
e371de38ed | ||
|
|
84af610a1f | ||
|
|
ef9f9129ba | ||
|
|
7b188aafea | ||
|
|
6ce227a6b6 | ||
|
|
1c97b9a046 | ||
|
|
137111351c | ||
|
|
c5a1e4c704 | ||
|
|
4f058245e4 | ||
|
|
ecdf6cccf8 | ||
|
|
4d7edbbad0 | ||
|
|
0f9f3f58e2 | ||
|
|
34c4967d27 | ||
|
|
6123b8e148 | ||
|
|
b86d365dde | ||
|
|
4efbd45b3c | ||
|
|
fb97687d14 | ||
|
|
0f53d9b911 | ||
|
|
5a785798b0 | ||
|
|
14168297bd | ||
|
|
7e1814e234 | ||
|
|
bdf11aa962 | ||
|
|
46807205f8 | ||
|
|
d749c172eb | ||
|
|
81db8091ea | ||
|
|
5c1a33382c | ||
|
|
db6f565dca | ||
|
|
6b4effc85a | ||
|
|
74a0c5bac5 | ||
|
|
661cf0afb3 | ||
|
|
fbb74a9896 | ||
|
|
200901e53b | ||
|
|
0eddd5ce7f | ||
|
|
a7cabec3d0 | ||
|
|
b98b3ced1c | ||
|
|
8501a45531 | ||
|
|
14b610dabf | ||
|
|
a1d1e70e43 | ||
|
|
0fa0a517ac | ||
|
|
28f9c0be0b | ||
|
|
373edbf8c0 | ||
|
|
b19bcdd882 | ||
|
|
08b96a0bd7 | ||
|
|
1e45e2ab9b | ||
|
|
2a58605727 | ||
|
|
c7ab3ea86e | ||
|
|
67046513ae | ||
|
|
f9b439ae82 | ||
|
|
80b08d17e3 | ||
|
|
f642c520bd | ||
|
|
221ddeb915 | ||
|
|
d90d0fb503 | ||
|
|
2c529f50af | ||
|
|
acfa1c4d1d | ||
|
|
ea2afeec1f | ||
|
|
a5cfc3036f | ||
|
|
ec484f81cf | ||
|
|
2ffa22e38f | ||
|
|
8fb313638c | ||
|
|
0eb1984b22 | ||
|
|
f259b0a71b | ||
|
|
82df3ebddb | ||
|
|
c87d7b0d79 | ||
|
|
612e91263c | ||
|
|
445042c0f4 | ||
|
|
0c289205de | ||
|
|
ba45592d93 | ||
|
|
7e0f2b0f08 | ||
|
|
fb30528197 | ||
|
|
48f1910075 | ||
|
|
0cb2d79889 | ||
|
|
1af1a5e9da | ||
|
|
c0d38e91f5 | ||
|
|
2f737f644f | ||
|
|
0574baf7f7 | ||
|
|
f70473dc0b | ||
|
|
de0b25862b | ||
|
|
6ff15a928a | ||
|
|
d10d1963c1 | ||
|
|
c6acca08d5 | ||
|
|
9946959599 | ||
|
|
9ce171d349 | ||
|
|
12f2975809 | ||
|
|
945125454b | ||
|
|
ea67c70437 | ||
|
|
f2f2483708 | ||
|
|
8c61a49e01 | ||
|
|
88ff68295b | ||
|
|
d93f62c030 | ||
|
|
ae5b11a2a9 | ||
|
|
baade775ab | ||
|
|
bd2da80cea | ||
|
|
550ab82f63 | ||
|
|
cece7ff741 | ||
|
|
c0b812c47a | ||
|
|
b256e5b79d | ||
|
|
2ed3038a5c | ||
|
|
779ca8b260 | ||
|
|
137fedfc9b | ||
|
|
256a47618f | ||
|
|
dfaa69be51 | ||
|
|
c34fa30ea7 | ||
|
|
3e5ee9d57a | ||
|
|
c7dd3996df | ||
|
|
e342919735 | ||
|
|
d0991bab9e | ||
|
|
efcbea1fc5 | ||
|
|
1537b84ec8 | ||
|
|
30b7535ca2 | ||
|
|
fe02c0b157 | ||
|
|
177901eca6 | ||
|
|
c2c93e7a66 | ||
|
|
00e60d2698 | ||
|
|
ec1408fbd1 | ||
|
|
23c3e62211 | ||
|
|
383c2bba58 | ||
|
|
8adb53b5a8 | ||
|
|
92401e5328 | ||
|
|
d360fb212e | ||
|
|
4c1b0297e7 | ||
|
|
a0b14b994d | ||
|
|
168c022d3e | ||
|
|
5adfacba64 | ||
|
|
52eeace20f | ||
|
|
21ff1d714d | ||
|
|
eeca5512be | ||
|
|
143a4a61b3 | ||
|
|
fef24355ab | ||
|
|
d70dfec6b6 | ||
|
|
49eccfb19f | ||
|
|
223c5bdaf6 | ||
|
|
e740cfcb52 | ||
|
|
65b03174ea | ||
|
|
134d84ded9 | ||
|
|
5707d65d0f | ||
|
|
9956538224 | ||
|
|
7818b2008f | ||
|
|
043aff6a8c | ||
|
|
7725c6f18f | ||
|
|
16a3f7c2df | ||
|
|
92f567539f | ||
|
|
951f6d4636 | ||
|
|
811fa514d2 | ||
|
|
a097602d7f | ||
|
|
e6cfd726c6 | ||
|
|
d3cc439fa8 | ||
|
|
50de068a02 | ||
|
|
5d838b8980 | ||
|
|
4ec7ba0107 | ||
|
|
fa7a459e50 | ||
|
|
b74990c480 | ||
|
|
6d052fdab4 | ||
|
|
51538b7688 | ||
|
|
8ac3cc1542 | ||
|
|
908263df50 | ||
|
|
7f6e022852 | ||
|
|
d324c12348 | ||
|
|
fd5e22a3f6 | ||
|
|
25903431bc | ||
|
|
f8374def64 | ||
|
|
9bbaa6993f |
20
CHANGELOG.md
20
CHANGELOG.md
@@ -1,6 +1,24 @@
|
||||
# Changelog
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
# 19.2.1 (June 17, 2021)
|
||||
|
||||
- There are now 2 default Instance Groups: 'controlplane' and 'default' (https://github.com/ansible/awx/pull/10324)
|
||||
- Removed deprecated modules: `tower_send`, `tower_receive`, `tower_workflow_template` (https://github.com/ansible/awx/pull/9980)
|
||||
- Improved UI performance when a large amount of events are being emitted by jobs (https://github.com/ansible/awx/pull/10053)
|
||||
- Settings UI Revert All button now issues a DELETE instead of PATCHing all fields (https://github.com/ansible/awx/pull/10376)
|
||||
- Fixed a bug with the schedule date/time picker in Firefox (https://github.com/ansible/awx/pull/10291)
|
||||
- UI now preselects the system default Galaxy credential when creating a new organization (https://github.com/ansible/awx/pull/10395)
|
||||
- Added favicon (https://github.com/ansible/awx/pull/10388)
|
||||
- Removed `not` option from smart inventory host filter search as it's not supported by the API (https://github.com/ansible/awx/pull/10380)
|
||||
- Added button to allow user to refetch project revision after project sync has finished (https://github.com/ansible/awx/pull/10334)
|
||||
- Fixed bug where extraneous CONFIG requests were made on logout (https://github.com/ansible/awx/pull/10379)
|
||||
- Fixed bug where users were unable to cancel inventory syncs (https://github.com/ansible/awx/pull/10346)
|
||||
- Added missing dashboard graph filters (https://github.com/ansible/awx/pull/10349)
|
||||
- Added support for typing in to single select lookup form fields (https://github.com/ansible/awx/pull/10257)
|
||||
- Fixed various bugs related to user sessions (https://github.com/ansible/awx/pull/9908)
|
||||
- Fixed bug where sorting in modals would close the modal (https://github.com/ansible/awx/pull/10215)
|
||||
- Added support for Red Hat Insights as an inventory source (https://github.com/ansible/awx/pull/8650)
|
||||
- Fixed bugs when selecting items in a list then sorting/paginating (https://github.com/ansible/awx/pull/10329)
|
||||
|
||||
# 19.2.0 (June 1, 2021)
|
||||
- Fixed race condition that would sometimes cause jobs to error out at the very end of an otherwise successful run (https://github.com/ansible/receptor/pull/328)
|
||||
|
||||
18
Makefile
18
Makefile
@@ -173,7 +173,7 @@ init:
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(MANAGEMENT_COMMAND) provision_instance --hostname=$(COMPOSE_HOST); \
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename=tower --instance_percent=100;
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename=controlplane --instance_percent=100;
|
||||
|
||||
# Refresh development environment after pulling new code.
|
||||
refresh: clean requirements_dev version_file develop migrate
|
||||
@@ -288,6 +288,11 @@ swagger: reports
|
||||
|
||||
check: black
|
||||
|
||||
api-lint:
|
||||
BLACK_ARGS="--check" make black
|
||||
flake8 awx
|
||||
yamllint -s .
|
||||
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/setup.py egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/python$(PYTHON_VERSION)/site-packages/awx.egg-link
|
||||
@@ -315,7 +320,7 @@ test_collection:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi && \
|
||||
pip install ansible && \
|
||||
pip install ansible-core && \
|
||||
py.test $(COLLECTION_TEST_DIRS) -v
|
||||
# The python path needs to be modified so that the tests can find Ansible within the container
|
||||
# First we will use anything expility set as PYTHONPATH
|
||||
@@ -551,10 +556,13 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
|
||||
# generate UI .pot
|
||||
# generate UI .pot file, an empty template of strings yet to be translated
|
||||
pot: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template --clean
|
||||
|
||||
# generate UI .po files for each locale (will update translated strings for `en`)
|
||||
po: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings -- --clean
|
||||
|
||||
# generate API django .pot .po
|
||||
LANG = "en-us"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[](https://ansible.softwarefactory-project.io/zuul/status) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
||||
[](irc.libera.chat - #ansible-awx)
|
||||
[](https://libera.chat)
|
||||
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
@@ -37,5 +37,5 @@ Get Involved
|
||||
|
||||
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
|
||||
|
||||
- Join the `#ansible-awx` channel on webchat.freenode.net
|
||||
- Join the `#ansible-awx` channel on irc.libera.chat
|
||||
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
||||
|
||||
@@ -34,6 +34,7 @@ else:
|
||||
from django.db.backends.base import schema
|
||||
from django.db.models import indexes
|
||||
from django.db.backends.utils import names_digest
|
||||
from django.db import connection
|
||||
|
||||
|
||||
if HAS_DJANGO is True:
|
||||
@@ -149,6 +150,12 @@ def manage():
|
||||
from django.conf import settings
|
||||
from django.core.management import execute_from_command_line
|
||||
|
||||
# enforce the postgres version is equal to 12. if not, then terminate program with exit code of 1
|
||||
if not MODE == 'development':
|
||||
if (connection.pg_version // 10000) < 12:
|
||||
sys.stderr.write("Postgres version 12 is required\n")
|
||||
sys.exit(1)
|
||||
|
||||
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover
|
||||
sys.stdout.write('%s\n' % __version__)
|
||||
# If running as a user without permission to read settings, display an
|
||||
|
||||
@@ -133,7 +133,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
Filter using field lookups provided via query string parameters.
|
||||
"""
|
||||
|
||||
RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by', 'search', 'type', 'host_filter', 'count_disabled', 'no_truncate')
|
||||
RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by', 'search', 'type', 'host_filter', 'count_disabled', 'no_truncate', 'limit')
|
||||
|
||||
SUPPORTED_LOOKUPS = (
|
||||
'exact',
|
||||
|
||||
@@ -39,6 +39,7 @@ from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credenti
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
from awx.main.views import ApiErrorView
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
|
||||
from awx.api.versioning import URLPathVersioning
|
||||
@@ -184,9 +185,6 @@ class APIView(views.APIView):
|
||||
"""
|
||||
Log warning for 400 requests. Add header with elapsed time.
|
||||
"""
|
||||
from awx.main.utils import get_licenser
|
||||
from awx.main.utils.licensing import OpenLicense
|
||||
|
||||
#
|
||||
# If the URL was rewritten, and we get a 404, we should entirely
|
||||
# replace the view in the request context with an ApiErrorView()
|
||||
@@ -226,7 +224,7 @@ class APIView(views.APIView):
|
||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||
time_started = getattr(self, 'time_started', None)
|
||||
response['X-API-Product-Version'] = get_awx_version()
|
||||
response['X-API-Product-Name'] = 'AWX' if isinstance(get_licenser(), OpenLicense) else 'Red Hat Ansible Tower'
|
||||
response['X-API-Product-Name'] = server_product_name()
|
||||
|
||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||
if time_started:
|
||||
|
||||
@@ -24,7 +24,7 @@ from rest_framework.request import clone_request
|
||||
from awx.api.fields import ChoiceNullField
|
||||
from awx.main.fields import JSONField, ImplicitRoleField
|
||||
from awx.main.models import NotificationTemplate
|
||||
from awx.main.tasks import AWXReceptorJob
|
||||
from awx.main.utils.execution_environments import get_default_pod_spec
|
||||
|
||||
# Polymorphic
|
||||
from polymorphic.models import PolymorphicModel
|
||||
@@ -211,7 +211,7 @@ class Metadata(metadata.SimpleMetadata):
|
||||
continue
|
||||
|
||||
if field == "pod_spec_override":
|
||||
meta['default'] = AWXReceptorJob().pod_definition
|
||||
meta['default'] = get_default_pod_spec()
|
||||
|
||||
# Add type choices if available from the serializer.
|
||||
if field == 'type' and hasattr(serializer, 'get_type_choices'):
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
# Django REST Framework
|
||||
from django.conf import settings
|
||||
from django.core.paginator import Paginator as DjangoPaginator
|
||||
from rest_framework import pagination
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.utils.urls import replace_query_param
|
||||
from rest_framework.settings import api_settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class DisabledPaginator(DjangoPaginator):
|
||||
@@ -65,3 +69,65 @@ class Pagination(pagination.PageNumberPagination):
|
||||
if self.count_disabled:
|
||||
return Response({'results': data})
|
||||
return super(Pagination, self).get_paginated_response(data)
|
||||
|
||||
|
||||
class LimitPagination(pagination.BasePagination):
|
||||
default_limit = api_settings.PAGE_SIZE
|
||||
limit_query_param = 'limit'
|
||||
limit_query_description = _('Number of results to return per page.')
|
||||
max_page_size = settings.MAX_PAGE_SIZE
|
||||
|
||||
def paginate_queryset(self, queryset, request, view=None):
|
||||
self.limit = self.get_limit(request)
|
||||
self.request = request
|
||||
|
||||
return list(queryset[0 : self.limit])
|
||||
|
||||
def get_paginated_response(self, data):
|
||||
return Response(OrderedDict([('results', data)]))
|
||||
|
||||
def get_paginated_response_schema(self, schema):
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'results': schema,
|
||||
},
|
||||
}
|
||||
|
||||
def get_limit(self, request):
|
||||
try:
|
||||
return pagination._positive_int(request.query_params[self.limit_query_param], strict=True)
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
|
||||
return self.default_limit
|
||||
|
||||
|
||||
class UnifiedJobEventPagination(Pagination):
|
||||
"""
|
||||
By default, use Pagination for all operations.
|
||||
If `limit` query parameter specified use LimitPagination
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.use_limit_paginator = False
|
||||
self.limit_pagination = LimitPagination()
|
||||
return super().__init__(*args, **kwargs)
|
||||
|
||||
def paginate_queryset(self, queryset, request, view=None):
|
||||
if 'limit' in request.query_params:
|
||||
self.use_limit_paginator = True
|
||||
|
||||
if self.use_limit_paginator:
|
||||
return self.limit_pagination.paginate_queryset(queryset, request, view=view)
|
||||
return super().paginate_queryset(queryset, request, view=view)
|
||||
|
||||
def get_paginated_response(self, data):
|
||||
if self.use_limit_paginator:
|
||||
return self.limit_pagination.get_paginated_response(data)
|
||||
return super().get_paginated_response(data)
|
||||
|
||||
def get_paginated_response_schema(self, schema):
|
||||
if self.use_limit_paginator:
|
||||
return self.limit_pagination.get_paginated_response_schema(schema)
|
||||
return super().get_paginated_response_schema(schema)
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# Python
|
||||
import logging
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import MethodNotAllowed, PermissionDenied
|
||||
from rest_framework import permissions
|
||||
@@ -245,7 +247,7 @@ class IsSuperUser(permissions.BasePermission):
|
||||
|
||||
class InstanceGroupTowerPermission(ModelAccessPermission):
|
||||
def has_object_permission(self, request, view, obj):
|
||||
if request.method == 'DELETE' and obj.name == "tower":
|
||||
if request.method == 'DELETE' and obj.name in [settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]:
|
||||
return False
|
||||
return super(InstanceGroupTowerPermission, self).has_object_permission(request, view, obj)
|
||||
|
||||
|
||||
@@ -724,6 +724,20 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
else:
|
||||
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary_fields = super().get_summary_fields(obj)
|
||||
|
||||
if self.is_detail_view:
|
||||
resolved_ee = obj.resolve_execution_environment()
|
||||
if resolved_ee is not None:
|
||||
summary_fields['resolved_environment'] = {
|
||||
field: getattr(resolved_ee, field, None)
|
||||
for field in SUMMARIZABLE_FK_FIELDS['execution_environment']
|
||||
if getattr(resolved_ee, field, None) is not None
|
||||
}
|
||||
|
||||
return summary_fields
|
||||
|
||||
|
||||
class UnifiedJobSerializer(BaseSerializer):
|
||||
show_capabilities = ['start', 'delete']
|
||||
@@ -1740,10 +1754,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
'has_inventory_sources',
|
||||
'last_job',
|
||||
'last_job_host_summary',
|
||||
'insights_system_id',
|
||||
'ansible_facts_modified',
|
||||
)
|
||||
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified')
|
||||
read_only_fields = ('last_job', 'last_job_host_summary', 'ansible_facts_modified')
|
||||
|
||||
def build_relational_field(self, field_name, relation_info):
|
||||
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
|
||||
@@ -1767,7 +1780,6 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
smart_inventories=self.reverse('api:host_smart_inventories_list', kwargs={'pk': obj.pk}),
|
||||
ad_hoc_commands=self.reverse('api:host_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
|
||||
ad_hoc_command_events=self.reverse('api:host_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
|
||||
insights=self.reverse('api:host_insights', kwargs={'pk': obj.pk}),
|
||||
ansible_facts=self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
@@ -3031,7 +3043,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
res = super(JobSerializer, self).get_related(obj)
|
||||
res.update(
|
||||
dict(
|
||||
job_events=self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}),
|
||||
job_events=self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}), # TODO: consider adding job_created
|
||||
job_host_summaries=self.reverse('api:job_job_host_summaries_list', kwargs={'pk': obj.pk}),
|
||||
activity_stream=self.reverse('api:job_activity_stream_list', kwargs={'pk': obj.pk}),
|
||||
notifications=self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
|
||||
@@ -3098,8 +3110,8 @@ class JobDetailSerializer(JobSerializer):
|
||||
fields = ('*', 'host_status_counts', 'playbook_counts', 'custom_virtualenv')
|
||||
|
||||
def get_playbook_counts(self, obj):
|
||||
task_count = obj.job_events.filter(event='playbook_on_task_start').count()
|
||||
play_count = obj.job_events.filter(event='playbook_on_play_start').count()
|
||||
task_count = obj.get_event_queryset().filter(event='playbook_on_task_start').count()
|
||||
play_count = obj.get_event_queryset().filter(event='playbook_on_play_start').count()
|
||||
|
||||
data = {'play_count': play_count, 'task_count': task_count}
|
||||
|
||||
@@ -3107,7 +3119,7 @@ class JobDetailSerializer(JobSerializer):
|
||||
|
||||
def get_host_status_counts(self, obj):
|
||||
try:
|
||||
counts = obj.job_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
counts = obj.get_event_queryset().only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
except JobEvent.DoesNotExist:
|
||||
counts = {}
|
||||
|
||||
@@ -3414,6 +3426,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
||||
'ask_limit_on_launch',
|
||||
'webhook_service',
|
||||
'webhook_credential',
|
||||
'-execution_environment',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -3440,6 +3453,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
||||
survey_spec=self.reverse('api:workflow_job_template_survey_spec', kwargs={'pk': obj.pk}),
|
||||
copy=self.reverse('api:workflow_job_template_copy', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
res.pop('execution_environment', None) # EEs aren't meaningful for workflows
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||
if obj.webhook_credential_id:
|
||||
@@ -3491,6 +3505,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
'allow_simultaneous',
|
||||
'job_template',
|
||||
'is_sliced_job',
|
||||
'-execution_environment',
|
||||
'-execution_node',
|
||||
'-event_processing_finished',
|
||||
'-controller_node',
|
||||
@@ -3504,6 +3519,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobSerializer, self).get_related(obj)
|
||||
res.pop('execution_environment', None) # EEs aren't meaningful for workflows
|
||||
if obj.workflow_job_template:
|
||||
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})
|
||||
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
|
||||
@@ -3528,7 +3544,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
|
||||
class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):
|
||||
class Meta:
|
||||
fields = ('*', '-execution_node', '-controller_node')
|
||||
fields = ('*', '-execution_environment', '-execution_node', '-controller_node')
|
||||
|
||||
|
||||
class WorkflowJobCancelSerializer(WorkflowJobSerializer):
|
||||
@@ -4905,8 +4921,12 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
return value
|
||||
|
||||
def validate_name(self, value):
|
||||
if self.instance and self.instance.name == 'tower' and value != 'tower':
|
||||
raise serializers.ValidationError(_('tower instance group name may not be changed.'))
|
||||
if self.instance and self.instance.name == settings.DEFAULT_EXECUTION_QUEUE_NAME and value != settings.DEFAULT_EXECUTION_QUEUE_NAME:
|
||||
raise serializers.ValidationError(_('%s instance group name may not be changed.' % settings.DEFAULT_EXECUTION_QUEUE_NAME))
|
||||
|
||||
if self.instance and self.instance.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME and value != settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
|
||||
raise serializers.ValidationError(_('%s instance group name may not be changed.' % settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME))
|
||||
|
||||
return value
|
||||
|
||||
def validate_credential(self, value):
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
{% include "api/job_job_events_list.md" %}
|
||||
1
awx/api/templates/api/inventory_update_events_list.md
Normal file
1
awx/api/templates/api/inventory_update_events_list.md
Normal file
@@ -0,0 +1 @@
|
||||
{% include "api/job_job_events_list.md" %}
|
||||
21
awx/api/templates/api/job_job_events_list.md
Normal file
21
awx/api/templates/api/job_job_events_list.md
Normal file
@@ -0,0 +1,21 @@
|
||||
{% include "api/sub_list_api_view.md" %}
|
||||
{% ifmeth GET %}
|
||||
## Special limit feature for event list views
|
||||
|
||||
Use the `limit` query string parameter to opt out of the pagination keys.
|
||||
Doing this can improve response times for jobs that produce a large volume
|
||||
of outputs.
|
||||
|
||||
?limit=25
|
||||
|
||||
This will set the page size to 25 and the `previous` and `next` keys will be
|
||||
omitted from the response data. The data structure will look like this.
|
||||
|
||||
{
|
||||
"results": [
|
||||
...
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
{% endifmeth %}
|
||||
@@ -1,25 +0,0 @@
|
||||
Make a GET request to retrieve the list of aggregated play data associated with a job
|
||||
|
||||
## Filtering
|
||||
|
||||
This endpoints supports a limited filtering subset:
|
||||
|
||||
?event_id__in=1,2,3
|
||||
|
||||
Will show only the given ids.
|
||||
|
||||
?event_id__gt=1
|
||||
|
||||
Will show ids greater than the given one.
|
||||
|
||||
?event_id__lt=3
|
||||
|
||||
Will show ids less than the given one.
|
||||
|
||||
?failed=true
|
||||
|
||||
Will show only failed plays. Alternatively `false` may be used.
|
||||
|
||||
?play__icontains=test
|
||||
|
||||
Will filter plays matching the substring `test`
|
||||
@@ -1,27 +0,0 @@
|
||||
Make a GET request to retrieve the list of aggregated task data associated with the play given by event_id.
|
||||
|
||||
`event_id` is a required query parameter and must match the job event id of the parent play in order to receive the list of tasks associated with the play
|
||||
|
||||
## Filtering
|
||||
|
||||
This endpoints supports a limited filtering subset:
|
||||
|
||||
?event_id__in=1,2,3
|
||||
|
||||
Will show only the given task ids under the play given by `event_id`.
|
||||
|
||||
?event_id__gt=1
|
||||
|
||||
Will show ids greater than the given one.
|
||||
|
||||
?event_id__lt=3
|
||||
|
||||
Will show ids less than the given one.
|
||||
|
||||
?failed=true
|
||||
|
||||
Will show only failed plays. Alternatively `false` may be used.
|
||||
|
||||
?task__icontains=test
|
||||
|
||||
Will filter tasks matching the substring `test`
|
||||
1
awx/api/templates/api/project_update_events_list.md
Normal file
1
awx/api/templates/api/project_update_events_list.md
Normal file
@@ -0,0 +1 @@
|
||||
{% include "api/job_job_events_list.md" %}
|
||||
1
awx/api/templates/api/system_job_events_list.md
Normal file
1
awx/api/templates/api/system_job_events_list.md
Normal file
@@ -0,0 +1 @@
|
||||
{% include "api/job_job_events_list.md" %}
|
||||
@@ -16,7 +16,6 @@ from awx.api.views import (
|
||||
HostSmartInventoriesList,
|
||||
HostAdHocCommandsList,
|
||||
HostAdHocCommandEventsList,
|
||||
HostInsights,
|
||||
)
|
||||
|
||||
|
||||
@@ -33,7 +32,6 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/smart_inventories/$', HostSmartInventoriesList.as_view(), name='host_smart_inventories_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/ad_hoc_commands/$', HostAdHocCommandsList.as_view(), name='host_ad_hoc_commands_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/ad_hoc_command_events/$', HostAdHocCommandEventsList.as_view(), name='host_ad_hoc_command_events_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/insights/$', HostInsights.as_view(), name='host_insights'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -3,14 +3,11 @@
|
||||
|
||||
from django.conf.urls import url
|
||||
|
||||
from awx.api.views import JobEventList, JobEventDetail, JobEventChildrenList, JobEventHostsList
|
||||
|
||||
from awx.api.views import JobEventDetail, JobEventChildrenList
|
||||
|
||||
urls = [
|
||||
url(r'^$', JobEventList.as_view(), name='job_event_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', JobEventDetail.as_view(), name='job_event_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/children/$', JobEventChildrenList.as_view(), name='job_event_children_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/hosts/$', JobEventHostsList.as_view(), name='job_event_hosts_list'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -21,7 +21,7 @@ from urllib3.exceptions import ConnectTimeoutError
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||
from django.db.models import Q, Sum
|
||||
from django.db import IntegrityError, transaction, connection
|
||||
from django.db import IntegrityError, ProgrammingError, transaction, connection
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.utils.safestring import mark_safe
|
||||
from django.utils.timezone import now
|
||||
@@ -90,17 +90,14 @@ from awx.main import models
|
||||
from awx.main.utils import (
|
||||
camelcase_to_underscore,
|
||||
extract_ansible_vars,
|
||||
get_awx_http_client_headers,
|
||||
get_object_or_400,
|
||||
getattrd,
|
||||
get_pk_from_dict,
|
||||
schedule_task_manager,
|
||||
ignore_inventory_computed_fields,
|
||||
set_environ,
|
||||
)
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.insights import filter_insights_api_response
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.api.permissions import (
|
||||
JobTemplateCallbackPermission,
|
||||
@@ -172,11 +169,21 @@ from awx.api.views.root import ( # noqa
|
||||
ApiV2AttachView,
|
||||
)
|
||||
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver # noqa
|
||||
from awx.api.pagination import UnifiedJobEventPagination
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.api.views')
|
||||
|
||||
|
||||
def unpartitioned_event_horizon(cls):
|
||||
with connection.cursor() as cursor:
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _unpartitioned_{cls._meta.db_table}')
|
||||
return cursor.fetchone()[0] or -1
|
||||
except ProgrammingError:
|
||||
return 0
|
||||
|
||||
|
||||
def api_exception_handler(exc, context):
|
||||
"""
|
||||
Override default API exception handler to catch IntegrityError exceptions.
|
||||
@@ -685,6 +692,7 @@ class TeamAccessList(ResourceAccessList):
|
||||
|
||||
class ExecutionEnvironmentList(ListCreateAPIView):
|
||||
|
||||
always_allow_superuser = False
|
||||
model = models.ExecutionEnvironment
|
||||
serializer_class = serializers.ExecutionEnvironmentSerializer
|
||||
swagger_topic = "Execution Environments"
|
||||
@@ -692,10 +700,22 @@ class ExecutionEnvironmentList(ListCreateAPIView):
|
||||
|
||||
class ExecutionEnvironmentDetail(RetrieveUpdateDestroyAPIView):
|
||||
|
||||
always_allow_superuser = False
|
||||
model = models.ExecutionEnvironment
|
||||
serializer_class = serializers.ExecutionEnvironmentSerializer
|
||||
swagger_topic = "Execution Environments"
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
fields_to_check = ['name', 'description', 'organization', 'image', 'credential']
|
||||
if instance.managed_by_tower and request.user.can_access(models.ExecutionEnvironment, 'change', instance):
|
||||
for field in fields_to_check:
|
||||
left = getattr(instance, field, None)
|
||||
right = request.data.get(field, None)
|
||||
if left != right:
|
||||
raise PermissionDenied(_("Only the 'pull' field can be edited for managed execution environments."))
|
||||
return super().update(request, *args, **kwargs)
|
||||
|
||||
|
||||
class ExecutionEnvironmentJobTemplateList(SubListAPIView):
|
||||
|
||||
@@ -878,11 +898,17 @@ class ProjectUpdateEventsList(SubListAPIView):
|
||||
relationship = 'project_update_events'
|
||||
name = _('Project Update Events List')
|
||||
search_fields = ('stdout',)
|
||||
pagination_class = UnifiedJobEventPagination
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||
return super(ProjectUpdateEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||
|
||||
def get_queryset(self):
|
||||
pu = self.get_parent_object()
|
||||
self.check_parent_access(pu)
|
||||
return pu.get_event_queryset()
|
||||
|
||||
|
||||
class SystemJobEventsList(SubListAPIView):
|
||||
|
||||
@@ -892,11 +918,17 @@ class SystemJobEventsList(SubListAPIView):
|
||||
relationship = 'system_job_events'
|
||||
name = _('System Job Events List')
|
||||
search_fields = ('stdout',)
|
||||
pagination_class = UnifiedJobEventPagination
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||
return super(SystemJobEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||
|
||||
def get_queryset(self):
|
||||
job = self.get_parent_object()
|
||||
self.check_parent_access(job)
|
||||
return job.get_event_queryset()
|
||||
|
||||
|
||||
class ProjectUpdateCancel(RetrieveAPIView):
|
||||
|
||||
@@ -1665,106 +1697,6 @@ class GatewayTimeout(APIException):
|
||||
default_code = 'gateway_timeout'
|
||||
|
||||
|
||||
class HostInsights(GenericAPIView):
|
||||
|
||||
model = models.Host
|
||||
serializer_class = serializers.EmptySerializer
|
||||
|
||||
def _call_insights_api(self, url, session, headers):
|
||||
try:
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
except requests.exceptions.SSLError:
|
||||
raise BadGateway(_('SSLError while trying to connect to {}').format(url))
|
||||
except requests.exceptions.Timeout:
|
||||
raise GatewayTimeout(_('Request to {} timed out.').format(url))
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise BadGateway(_('Unknown exception {} while trying to GET {}').format(e, url))
|
||||
|
||||
if res.status_code == 401:
|
||||
raise BadGateway(_('Unauthorized access. Please check your Insights Credential username and password.'))
|
||||
elif res.status_code != 200:
|
||||
raise BadGateway(
|
||||
_('Failed to access the Insights API at URL {}.' ' Server responded with {} status code and message {}').format(
|
||||
url, res.status_code, res.content
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
raise BadGateway(_('Expected JSON response from Insights at URL {}' ' but instead got {}').format(url, res.content))
|
||||
|
||||
def _get_session(self, username, password):
|
||||
session = requests.Session()
|
||||
session.auth = requests.auth.HTTPBasicAuth(username, password)
|
||||
|
||||
return session
|
||||
|
||||
def _get_platform_info(self, host, session, headers):
|
||||
url = '{}/api/inventory/v1/hosts?insights_id={}'.format(settings.INSIGHTS_URL_BASE, host.insights_system_id)
|
||||
res = self._call_insights_api(url, session, headers)
|
||||
try:
|
||||
res['results'][0]['id']
|
||||
except (IndexError, KeyError):
|
||||
raise NotFound(_('Could not translate Insights system ID {}' ' into an Insights platform ID.').format(host.insights_system_id))
|
||||
|
||||
return res['results'][0]
|
||||
|
||||
def _get_reports(self, platform_id, session, headers):
|
||||
url = '{}/api/insights/v1/system/{}/reports/'.format(settings.INSIGHTS_URL_BASE, platform_id)
|
||||
|
||||
return self._call_insights_api(url, session, headers)
|
||||
|
||||
def _get_remediations(self, platform_id, session, headers):
|
||||
url = '{}/api/remediations/v1/remediations?system={}'.format(settings.INSIGHTS_URL_BASE, platform_id)
|
||||
|
||||
remediations = []
|
||||
|
||||
# Iterate over all of the pages of content.
|
||||
while url:
|
||||
data = self._call_insights_api(url, session, headers)
|
||||
remediations.extend(data['data'])
|
||||
|
||||
url = data['links']['next'] # Will be `None` if this is the last page.
|
||||
|
||||
return remediations
|
||||
|
||||
def _get_insights(self, host, session, headers):
|
||||
platform_info = self._get_platform_info(host, session, headers)
|
||||
platform_id = platform_info['id']
|
||||
reports = self._get_reports(platform_id, session, headers)
|
||||
remediations = self._get_remediations(platform_id, session, headers)
|
||||
|
||||
return {'insights_content': filter_insights_api_response(platform_info, reports, remediations)}
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
host = self.get_object()
|
||||
cred = None
|
||||
|
||||
if host.insights_system_id is None:
|
||||
return Response(dict(error=_('This host is not recognized as an Insights host.')), status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
if host.inventory and host.inventory.insights_credential:
|
||||
cred = host.inventory.insights_credential
|
||||
else:
|
||||
return Response(dict(error=_('The Insights Credential for "{}" was not found.').format(host.inventory.name)), status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
username = cred.get_input('username', default='')
|
||||
password = cred.get_input('password', default='')
|
||||
session = self._get_session(username, password)
|
||||
headers = get_awx_http_client_headers()
|
||||
|
||||
data = self._get_insights(host, session, headers)
|
||||
return Response(data, status=status.HTTP_200_OK)
|
||||
|
||||
def handle_exception(self, exc):
|
||||
# Continue supporting the slightly different way we have handled error responses on this view.
|
||||
response = super().handle_exception(exc)
|
||||
response.data['error'] = response.data.pop('detail')
|
||||
return response
|
||||
|
||||
|
||||
class GroupList(ListCreateAPIView):
|
||||
|
||||
model = models.Group
|
||||
@@ -3602,7 +3534,7 @@ class JobRelaunch(RetrieveAPIView):
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
host_qs = obj.retry_qs(retry_hosts)
|
||||
if not obj.job_events.filter(event='playbook_on_stats').exists():
|
||||
if not obj.get_event_queryset().filter(event='playbook_on_stats').exists():
|
||||
return Response(
|
||||
{'hosts': _('Cannot retry on {status_value} hosts, playbook stats not available.').format(status_value=retry_hosts)},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
@@ -3729,18 +3661,22 @@ class JobHostSummaryDetail(RetrieveAPIView):
|
||||
serializer_class = serializers.JobHostSummarySerializer
|
||||
|
||||
|
||||
class JobEventList(NoTruncateMixin, ListAPIView):
|
||||
|
||||
model = models.JobEvent
|
||||
serializer_class = serializers.JobEventSerializer
|
||||
search_fields = ('stdout',)
|
||||
|
||||
|
||||
class JobEventDetail(RetrieveAPIView):
|
||||
|
||||
model = models.JobEvent
|
||||
serializer_class = serializers.JobEventSerializer
|
||||
|
||||
@property
|
||||
def is_partitioned(self):
|
||||
if 'pk' not in self.kwargs:
|
||||
return True
|
||||
return int(self.kwargs['pk']) > unpartitioned_event_horizon(models.JobEvent)
|
||||
|
||||
@property
|
||||
def model(self):
|
||||
if self.is_partitioned:
|
||||
return models.JobEvent
|
||||
return models.UnpartitionedJobEvent
|
||||
|
||||
def get_serializer_context(self):
|
||||
context = super().get_serializer_context()
|
||||
context.update(no_truncate=True)
|
||||
@@ -3749,33 +3685,31 @@ class JobEventDetail(RetrieveAPIView):
|
||||
|
||||
class JobEventChildrenList(NoTruncateMixin, SubListAPIView):
|
||||
|
||||
model = models.JobEvent
|
||||
serializer_class = serializers.JobEventSerializer
|
||||
parent_model = models.JobEvent
|
||||
relationship = 'children'
|
||||
name = _('Job Event Children List')
|
||||
search_fields = ('stdout',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent_event = self.get_parent_object()
|
||||
self.check_parent_access(parent_event)
|
||||
qs = self.request.user.get_queryset(self.model).filter(parent_uuid=parent_event.uuid)
|
||||
return qs
|
||||
@property
|
||||
def is_partitioned(self):
|
||||
if 'pk' not in self.kwargs:
|
||||
return True
|
||||
return int(self.kwargs['pk']) > unpartitioned_event_horizon(models.JobEvent)
|
||||
|
||||
@property
|
||||
def model(self):
|
||||
if self.is_partitioned:
|
||||
return models.JobEvent
|
||||
return models.UnpartitionedJobEvent
|
||||
|
||||
class JobEventHostsList(HostRelatedSearchMixin, SubListAPIView):
|
||||
|
||||
model = models.Host
|
||||
serializer_class = serializers.HostSerializer
|
||||
parent_model = models.JobEvent
|
||||
relationship = 'hosts'
|
||||
name = _('Job Event Hosts List')
|
||||
@property
|
||||
def parent_model(self):
|
||||
return self.model
|
||||
|
||||
def get_queryset(self):
|
||||
parent_event = self.get_parent_object()
|
||||
self.check_parent_access(parent_event)
|
||||
qs = self.request.user.get_queryset(self.model).filter(job_events_as_primary_host=parent_event)
|
||||
return qs
|
||||
return parent_event.job.get_event_queryset().filter(parent_uuid=parent_event.uuid)
|
||||
|
||||
|
||||
class BaseJobEventsList(NoTruncateMixin, SubListAPIView):
|
||||
@@ -3811,12 +3745,12 @@ class GroupJobEventsList(BaseJobEventsList):
|
||||
class JobJobEventsList(BaseJobEventsList):
|
||||
|
||||
parent_model = models.Job
|
||||
pagination_class = UnifiedJobEventPagination
|
||||
|
||||
def get_queryset(self):
|
||||
job = self.get_parent_object()
|
||||
self.check_parent_access(job)
|
||||
qs = job.job_events.select_related('host').order_by('start_line')
|
||||
return qs.all()
|
||||
return job.get_event_queryset().select_related('host').order_by('start_line')
|
||||
|
||||
|
||||
class AdHocCommandList(ListCreateAPIView):
|
||||
@@ -3974,6 +3908,11 @@ class AdHocCommandEventList(NoTruncateMixin, ListAPIView):
|
||||
serializer_class = serializers.AdHocCommandEventSerializer
|
||||
search_fields = ('stdout',)
|
||||
|
||||
def get_queryset(self):
|
||||
adhoc = self.get_parent_object()
|
||||
self.check_parent_access(adhoc)
|
||||
return adhoc.get_event_queryset()
|
||||
|
||||
|
||||
class AdHocCommandEventDetail(RetrieveAPIView):
|
||||
|
||||
@@ -3994,12 +3933,21 @@ class BaseAdHocCommandEventsList(NoTruncateMixin, SubListAPIView):
|
||||
relationship = 'ad_hoc_command_events'
|
||||
name = _('Ad Hoc Command Events List')
|
||||
search_fields = ('stdout',)
|
||||
pagination_class = UnifiedJobEventPagination
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return parent.get_event_queryset()
|
||||
|
||||
|
||||
class HostAdHocCommandEventsList(BaseAdHocCommandEventsList):
|
||||
|
||||
parent_model = models.Host
|
||||
|
||||
def get_queryset(self):
|
||||
return super(BaseAdHocCommandEventsList, self).get_queryset()
|
||||
|
||||
|
||||
# class GroupJobEventsList(BaseJobEventsList):
|
||||
# parent_model = Group
|
||||
|
||||
@@ -38,6 +38,9 @@ from awx.api.serializers import (
|
||||
)
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, ControlledByScmMixin
|
||||
|
||||
from awx.api.pagination import UnifiedJobEventPagination
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.api.views.organization')
|
||||
|
||||
|
||||
@@ -49,6 +52,12 @@ class InventoryUpdateEventsList(SubListAPIView):
|
||||
relationship = 'inventory_update_events'
|
||||
name = _('Inventory Update Events List')
|
||||
search_fields = ('stdout',)
|
||||
pagination_class = UnifiedJobEventPagination
|
||||
|
||||
def get_queryset(self):
|
||||
iu = self.get_parent_object()
|
||||
self.check_parent_access(iu)
|
||||
return iu.get_event_queryset()
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||
|
||||
@@ -52,6 +52,11 @@ class UnifiedJobDeletionMixin(object):
|
||||
else:
|
||||
# if it has been > 1 minute, events are probably lost
|
||||
logger.warning('Allowing deletion of {} through the API without all events ' 'processed.'.format(obj.log_format))
|
||||
|
||||
# Manually cascade delete events if unpartitioned job
|
||||
if obj.has_unpartitioned_events:
|
||||
obj.get_event_queryset().delete()
|
||||
|
||||
obj.delete()
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ from awx.api.versioning import reverse, drf_reverse
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
|
||||
from awx.main.utils import set_environ
|
||||
from awx.main.utils.licensing import get_licenser
|
||||
|
||||
logger = logging.getLogger('awx.api.views.root')
|
||||
|
||||
@@ -106,7 +107,6 @@ class ApiVersionRootView(APIView):
|
||||
data['hosts'] = reverse('api:host_list', request=request)
|
||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||
data['jobs'] = reverse('api:job_list', request=request)
|
||||
data['job_events'] = reverse('api:job_event_list', request=request)
|
||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||
data['system_job_templates'] = reverse('api:system_job_template_list', request=request)
|
||||
data['system_jobs'] = reverse('api:system_job_list', request=request)
|
||||
@@ -174,8 +174,6 @@ class ApiV2SubscriptionView(APIView):
|
||||
self.permission_denied(request) # Raises PermissionDenied exception.
|
||||
|
||||
def post(self, request):
|
||||
from awx.main.utils.common import get_licenser
|
||||
|
||||
data = request.data.copy()
|
||||
if data.get('subscriptions_password') == '$encrypted$':
|
||||
data['subscriptions_password'] = settings.SUBSCRIPTIONS_PASSWORD
|
||||
@@ -223,7 +221,6 @@ class ApiV2AttachView(APIView):
|
||||
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
||||
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
||||
if pool_id and user and pw:
|
||||
from awx.main.utils.common import get_licenser
|
||||
|
||||
data = request.data.copy()
|
||||
try:
|
||||
@@ -265,8 +262,6 @@ class ApiV2ConfigView(APIView):
|
||||
def get(self, request, format=None):
|
||||
'''Return various sitewide configuration settings'''
|
||||
|
||||
from awx.main.utils.common import get_licenser
|
||||
|
||||
license_data = get_licenser().validate()
|
||||
|
||||
if not license_data.get('valid_key', False):
|
||||
@@ -302,7 +297,9 @@ class ApiV2ConfigView(APIView):
|
||||
):
|
||||
data.update(
|
||||
dict(
|
||||
project_base_dir=settings.PROJECTS_ROOT, project_local_paths=Project.get_local_path_choices(), custom_virtualenvs=get_custom_venv_choices()
|
||||
project_base_dir=settings.PROJECTS_ROOT,
|
||||
project_local_paths=Project.get_local_path_choices(),
|
||||
custom_virtualenvs=get_custom_venv_choices(),
|
||||
)
|
||||
)
|
||||
elif JobTemplate.accessible_objects(request.user, 'admin_role').exists():
|
||||
@@ -319,8 +316,6 @@ class ApiV2ConfigView(APIView):
|
||||
logger.info(smart_text(u"Invalid JSON submitted for license."), extra=dict(actor=request.user.username))
|
||||
return Response({"error": _("Invalid JSON")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
from awx.main.utils.common import get_licenser
|
||||
|
||||
license_data = json.loads(data_actual)
|
||||
if 'license_key' in license_data:
|
||||
return Response({"error": _('Legacy license submitted. A subscription manifest is now required.')}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
@@ -23,6 +23,7 @@ import cachetools
|
||||
# AWX
|
||||
from awx.main.utils import encrypt_field, decrypt_field
|
||||
from awx.conf import settings_registry
|
||||
from awx.conf.fields import PrimaryKeyRelatedField
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.migrations._reencrypt import decrypt_field as old_decrypt_field
|
||||
|
||||
@@ -420,9 +421,9 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
raise ImproperlyConfigured('Setting "{}" is read only.'.format(name))
|
||||
|
||||
try:
|
||||
data = field.to_representation(value)
|
||||
data = None if value is None and isinstance(field, PrimaryKeyRelatedField) else field.to_representation(value)
|
||||
setting_value = field.run_validation(data)
|
||||
db_value = field.to_representation(setting_value)
|
||||
db_value = None if setting_value is None and isinstance(field, PrimaryKeyRelatedField) else field.to_representation(setting_value)
|
||||
except Exception as e:
|
||||
logger.exception('Unable to assign value "%r" to setting "%s".', value, name, exc_info=True)
|
||||
raise e
|
||||
|
||||
@@ -45,6 +45,7 @@ from awx.main.models import (
|
||||
InventoryUpdateEvent,
|
||||
Job,
|
||||
JobEvent,
|
||||
UnpartitionedJobEvent,
|
||||
JobHostSummary,
|
||||
JobLaunchConfig,
|
||||
JobTemplate,
|
||||
@@ -464,7 +465,7 @@ class BaseAccess(object):
|
||||
if display_method == 'schedule':
|
||||
user_capabilities['schedule'] = user_capabilities['start']
|
||||
continue
|
||||
elif display_method == 'delete' and not isinstance(obj, (User, UnifiedJob, CredentialInputSource)):
|
||||
elif display_method == 'delete' and not isinstance(obj, (User, UnifiedJob, CredentialInputSource, ExecutionEnvironment)):
|
||||
user_capabilities['delete'] = user_capabilities['edit']
|
||||
continue
|
||||
elif display_method == 'copy' and isinstance(obj, (Group, Host)):
|
||||
@@ -1369,6 +1370,8 @@ class ExecutionEnvironmentAccess(BaseAccess):
|
||||
return self.check_related('organization', Organization, data, obj=obj, mandatory=True, role_field='execution_environment_admin_role')
|
||||
|
||||
def can_delete(self, obj):
|
||||
if obj.managed_by_tower:
|
||||
raise PermissionDenied
|
||||
return self.can_change(obj, None)
|
||||
|
||||
|
||||
@@ -2352,6 +2355,11 @@ class JobEventAccess(BaseAccess):
|
||||
return False
|
||||
|
||||
|
||||
class UnpartitionedJobEventAccess(JobEventAccess):
|
||||
|
||||
model = UnpartitionedJobEvent
|
||||
|
||||
|
||||
class ProjectUpdateEventAccess(BaseAccess):
|
||||
"""
|
||||
I can see project update event records whenever I can access the project update
|
||||
@@ -2895,3 +2903,4 @@ class WorkflowApprovalTemplateAccess(BaseAccess):
|
||||
|
||||
for cls in BaseAccess.__subclasses__():
|
||||
access_registry[cls.model] = cls
|
||||
access_registry[UnpartitionedJobEvent] = UnpartitionedJobEventAccess
|
||||
|
||||
@@ -6,7 +6,7 @@ import platform
|
||||
import distro
|
||||
|
||||
from django.db import connection
|
||||
from django.db.models import Count, Max, Min
|
||||
from django.db.models import Count
|
||||
from django.conf import settings
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now, timedelta
|
||||
@@ -15,7 +15,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from psycopg2.errors import UntranslatableCharacter
|
||||
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils import get_awx_version, get_custom_venv_choices, camelcase_to_underscore, datetime_hook
|
||||
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
|
||||
from awx.main import models
|
||||
from awx.main.analytics import register
|
||||
|
||||
@@ -58,7 +58,10 @@ def four_hour_slicing(key, since, until, last_gather):
|
||||
horizon = until - timedelta(weeks=4)
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||
last_entry = max(last_entries.get(key) or last_gather, horizon)
|
||||
try:
|
||||
last_entry = max(last_entries.get(key) or last_gather, horizon)
|
||||
except TypeError: # last_entries has a stale non-datetime entry for this collector
|
||||
last_entry = max(last_gather, horizon)
|
||||
|
||||
start, end = last_entry, None
|
||||
while start < until:
|
||||
@@ -67,7 +70,7 @@ def four_hour_slicing(key, since, until, last_gather):
|
||||
start = end
|
||||
|
||||
|
||||
def events_slicing(key, since, until, last_gather):
|
||||
def _identify_lower(key, since, until, last_gather):
|
||||
from awx.conf.models import Setting
|
||||
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
@@ -77,16 +80,8 @@ def events_slicing(key, since, until, last_gather):
|
||||
lower = since or last_gather
|
||||
if not since and last_entries.get(key):
|
||||
lower = horizon
|
||||
pk_values = models.JobEvent.objects.filter(created__gte=lower, created__lte=until).aggregate(Min('pk'), Max('pk'))
|
||||
|
||||
previous_pk = pk_values['pk__min'] - 1 if pk_values['pk__min'] is not None else 0
|
||||
if not since and last_entries.get(key):
|
||||
previous_pk = max(last_entries[key], previous_pk)
|
||||
final_pk = pk_values['pk__max'] or 0
|
||||
|
||||
step = 100000
|
||||
for start in range(previous_pk, final_pk + 1, step):
|
||||
yield (start, min(start + step, final_pk))
|
||||
return lower, last_entries
|
||||
|
||||
|
||||
@register('config', '1.3', description=_('General platform configuration.'))
|
||||
@@ -120,7 +115,7 @@ def config(since, **kwargs):
|
||||
}
|
||||
|
||||
|
||||
@register('counts', '1.0', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||
@register('counts', '1.1', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||
def counts(since, **kwargs):
|
||||
counts = {}
|
||||
for cls in (
|
||||
@@ -138,9 +133,6 @@ def counts(since, **kwargs):
|
||||
):
|
||||
counts[camelcase_to_underscore(cls.__name__)] = cls.objects.count()
|
||||
|
||||
venvs = get_custom_venv_choices()
|
||||
counts['custom_virtualenvs'] = len([v for v in venvs if os.path.basename(v.rstrip('/')) != 'ansible'])
|
||||
|
||||
inv_counts = dict(models.Inventory.objects.order_by().values_list('kind').annotate(Count('kind')))
|
||||
inv_counts['normal'] = inv_counts.get('', 0)
|
||||
inv_counts.pop('', None)
|
||||
@@ -335,39 +327,49 @@ def _copy_table(table, query, path):
|
||||
return file.file_list()
|
||||
|
||||
|
||||
@register('events_table', '1.2', format='csv', description=_('Automation task records'), expensive=events_slicing)
|
||||
def events_table(since, full_path, until, **kwargs):
|
||||
def _events_table(since, full_path, until, tbl, where_column, project_job_created=False, **kwargs):
|
||||
def query(event_data):
|
||||
return f'''COPY (SELECT main_jobevent.id,
|
||||
main_jobevent.created,
|
||||
main_jobevent.modified,
|
||||
main_jobevent.uuid,
|
||||
main_jobevent.parent_uuid,
|
||||
main_jobevent.event,
|
||||
{event_data}->'task_action' AS task_action,
|
||||
(CASE WHEN event = 'playbook_on_stats' THEN event_data END) as playbook_on_stats,
|
||||
main_jobevent.failed,
|
||||
main_jobevent.changed,
|
||||
main_jobevent.playbook,
|
||||
main_jobevent.play,
|
||||
main_jobevent.task,
|
||||
main_jobevent.role,
|
||||
main_jobevent.job_id,
|
||||
main_jobevent.host_id,
|
||||
main_jobevent.host_name,
|
||||
CAST({event_data}->>'start' AS TIMESTAMP WITH TIME ZONE) AS start,
|
||||
CAST({event_data}->>'end' AS TIMESTAMP WITH TIME ZONE) AS end,
|
||||
{event_data}->'duration' AS duration,
|
||||
{event_data}->'res'->'warnings' AS warnings,
|
||||
{event_data}->'res'->'deprecations' AS deprecations
|
||||
FROM main_jobevent
|
||||
WHERE (main_jobevent.id > {since} AND main_jobevent.id <= {until})
|
||||
ORDER BY main_jobevent.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
query = f'''COPY (SELECT {tbl}.id,
|
||||
{tbl}.created,
|
||||
{tbl}.modified,
|
||||
{tbl + '.job_created' if project_job_created else 'NULL'} as job_created,
|
||||
{tbl}.uuid,
|
||||
{tbl}.parent_uuid,
|
||||
{tbl}.event,
|
||||
task_action,
|
||||
(CASE WHEN event = 'playbook_on_stats' THEN event_data END) as playbook_on_stats,
|
||||
{tbl}.failed,
|
||||
{tbl}.changed,
|
||||
{tbl}.playbook,
|
||||
{tbl}.play,
|
||||
{tbl}.task,
|
||||
{tbl}.role,
|
||||
{tbl}.job_id,
|
||||
{tbl}.host_id,
|
||||
{tbl}.host_name,
|
||||
CAST(x.start AS TIMESTAMP WITH TIME ZONE) AS start,
|
||||
CAST(x.end AS TIMESTAMP WITH TIME ZONE) AS end,
|
||||
x.duration AS duration,
|
||||
x.res->'warnings' AS warnings,
|
||||
x.res->'deprecations' AS deprecations
|
||||
FROM {tbl}, json_to_record({event_data}) AS x("res" json, "duration" text, "task_action" text, "start" text, "end" text)
|
||||
WHERE ({tbl}.{where_column} > '{since.isoformat()}' AND {tbl}.{where_column} <= '{until.isoformat()}')) TO STDOUT WITH CSV HEADER'''
|
||||
return query
|
||||
|
||||
try:
|
||||
return _copy_table(table='events', query=query("main_jobevent.event_data::json"), path=full_path)
|
||||
return _copy_table(table='events', query=query(f"{tbl}.event_data::json"), path=full_path)
|
||||
except UntranslatableCharacter:
|
||||
return _copy_table(table='events', query=query("replace(main_jobevent.event_data::text, '\\u0000', '')::json"), path=full_path)
|
||||
return _copy_table(table='events', query=query(f"replace({tbl}.event_data::text, '\\u0000', '')::json"), path=full_path)
|
||||
|
||||
|
||||
@register('events_table', '1.3', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
def events_table_unpartitioned(since, full_path, until, **kwargs):
|
||||
return _events_table(since, full_path, until, '_unpartitioned_main_jobevent', 'created', **kwargs)
|
||||
|
||||
|
||||
@register('events_table', '1.3', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
def events_table_partitioned_modified(since, full_path, until, **kwargs):
|
||||
return _events_table(since, full_path, until, 'main_jobevent', 'modified', project_job_created=True, **kwargs)
|
||||
|
||||
|
||||
@register('unified_jobs_table', '1.2', format='csv', description=_('Data on jobs run'), expensive=four_hour_slicing)
|
||||
|
||||
@@ -270,7 +270,8 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
if not files:
|
||||
if collection_type != 'dry-run':
|
||||
with disable_activity_stream():
|
||||
last_entries[key] = max(last_entries[key], end) if last_entries.get(key) else end
|
||||
entry = last_entries.get(key)
|
||||
last_entries[key] = max(entry, end) if entry and type(entry) == type(end) else end
|
||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||
continue
|
||||
|
||||
@@ -293,7 +294,8 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
|
||||
if slice_succeeded and collection_type != 'dry-run':
|
||||
with disable_activity_stream():
|
||||
last_entries[key] = max(last_entries[key], end) if last_entries.get(key) else end
|
||||
entry = last_entries.get(key)
|
||||
last_entries[key] = max(entry, end) if entry and type(entry) == type(end) else end
|
||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||
except Exception:
|
||||
succeeded = False
|
||||
|
||||
@@ -39,7 +39,6 @@ def metrics():
|
||||
],
|
||||
registry=REGISTRY,
|
||||
)
|
||||
CUSTOM_VENVS = Gauge('awx_custom_virtualenvs_total', 'Number of virtualenvs', registry=REGISTRY)
|
||||
RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the system', registry=REGISTRY)
|
||||
PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the system', registry=REGISTRY)
|
||||
STATUS = Gauge(
|
||||
@@ -159,7 +158,6 @@ def metrics():
|
||||
HOST_COUNT.labels(type='active').set(current_counts['active_host_count'])
|
||||
|
||||
SCHEDULE_COUNT.set(current_counts['schedule'])
|
||||
CUSTOM_VENVS.set(current_counts['custom_virtualenvs'])
|
||||
|
||||
USER_SESSIONS.labels(type='all').set(current_counts['active_sessions'])
|
||||
USER_SESSIONS.labels(type='user').set(current_counts['active_user_sessions'])
|
||||
|
||||
@@ -177,6 +177,24 @@ register(
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_CONTROL_PLANE_QUEUE_NAME',
|
||||
field_class=fields.CharField,
|
||||
label=_('The instance group where control plane tasks run'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_EXECUTION_QUEUE_NAME',
|
||||
field_class=fields.CharField,
|
||||
label=_('The instance group where user jobs run (currently only on non-VM installs)'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_EXECUTION_ENVIRONMENT',
|
||||
field_class=fields.PrimaryKeyRelatedField,
|
||||
@@ -344,6 +362,17 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'MAX_WEBSOCKET_EVENT_RATE',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
default=30,
|
||||
label=_('Job Event Maximum Websocket Messages Per Second'),
|
||||
help_text=_('Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'SCHEDULE_MAX_JOBS',
|
||||
field_class=fields.IntegerField,
|
||||
|
||||
@@ -14,7 +14,7 @@ __all__ = [
|
||||
'STANDARD_INVENTORY_UPDATE_ENV',
|
||||
]
|
||||
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'tower')
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'tower', 'insights')
|
||||
PRIVILEGE_ESCALATION_METHODS = [
|
||||
('sudo', _('Sudo')),
|
||||
('su', _('Su')),
|
||||
@@ -41,6 +41,7 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
}
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
MINIMAL_EVENTS = set(['playbook_on_play_start', 'playbook_on_task_start', 'playbook_on_stats', 'EOF'])
|
||||
CENSOR_VALUE = '************'
|
||||
ENV_BLOCKLIST = frozenset(
|
||||
(
|
||||
|
||||
56
awx/main/credential_plugins/dsv.py
Normal file
56
awx/main/credential_plugins/dsv.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from .plugin import CredentialPlugin
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from thycotic.secrets.vault import SecretsVault
|
||||
|
||||
|
||||
dsv_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretservercloud.com'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'tld',
|
||||
'label': _('Top-level Domain (TLD)'),
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretservercloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'com.sg', 'eu'],
|
||||
'default': 'com',
|
||||
},
|
||||
{'id': 'client_id', 'label': _('Client ID'), 'type': 'string'},
|
||||
{
|
||||
'id': 'client_secret',
|
||||
'label': _('Client Secret'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
},
|
||||
],
|
||||
'metadata': [
|
||||
{
|
||||
'id': 'path',
|
||||
'label': _('Secret Path'),
|
||||
'type': 'string',
|
||||
'help_text': _('The secret path e.g. /test/secret1'),
|
||||
},
|
||||
],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path'],
|
||||
}
|
||||
|
||||
if settings.DEBUG:
|
||||
dsv_inputs['fields'].append(
|
||||
{
|
||||
'id': 'url_template',
|
||||
'label': _('URL template'),
|
||||
'type': 'string',
|
||||
'default': 'https://{}.secretsvaultcloud.{}/v1',
|
||||
}
|
||||
)
|
||||
|
||||
dsv_plugin = CredentialPlugin(
|
||||
'Thycotic DevOps Secrets Vault',
|
||||
dsv_inputs,
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path']),
|
||||
)
|
||||
@@ -142,7 +142,8 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
duration_to_save = time.perf_counter() - duration_to_save
|
||||
for e in events:
|
||||
emit_event_detail(e)
|
||||
if not getattr(e, '_skip_websocket_message', False):
|
||||
emit_event_detail(e)
|
||||
self.buff = {}
|
||||
self.last_flush = time.time()
|
||||
# only update metrics if we saved events
|
||||
@@ -207,7 +208,13 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
GuidMiddleware.set_guid('')
|
||||
return
|
||||
|
||||
skip_websocket_message = body.pop('skip_websocket_message', False)
|
||||
|
||||
event = cls.create_from_data(**body)
|
||||
|
||||
if skip_websocket_message:
|
||||
event._skip_websocket_message = True
|
||||
|
||||
self.buff.setdefault(cls, []).append(event)
|
||||
|
||||
retries = 0
|
||||
|
||||
@@ -4,11 +4,13 @@
|
||||
# Python
|
||||
import datetime
|
||||
import logging
|
||||
import pytz
|
||||
import re
|
||||
|
||||
|
||||
# Django
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction
|
||||
from django.db import transaction, connection
|
||||
from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
@@ -18,6 +20,132 @@ from awx.main.signals import disable_activity_stream, disable_computed_fields
|
||||
from awx.main.utils.deletion import AWXCollector, pre_delete
|
||||
|
||||
|
||||
def unified_job_class_to_event_table_name(job_class):
|
||||
return f'main_{job_class().event_class.__name__.lower()}'
|
||||
|
||||
|
||||
def partition_table_name(job_class, dt):
|
||||
suffix = dt.replace(microsecond=0, second=0, minute=0).strftime('%Y%m%d_%H')
|
||||
|
||||
event_tbl_name = unified_job_class_to_event_table_name(job_class)
|
||||
event_tbl_name += f'_{suffix}'
|
||||
return event_tbl_name
|
||||
|
||||
|
||||
def partition_name_dt(part_name):
|
||||
"""
|
||||
part_name examples:
|
||||
main_jobevent_20210318_09
|
||||
main_projectupdateevent_20210318_11
|
||||
main_inventoryupdateevent_20210318_03
|
||||
"""
|
||||
if '_unpartitioned' in part_name:
|
||||
return None
|
||||
p = re.compile('([a-z]+)_([a-z]+)_([0-9]+)_([0-9][0-9])')
|
||||
m = p.match(part_name)
|
||||
if not m:
|
||||
return m
|
||||
dt_str = f"{m.group(3)}_{m.group(4)}"
|
||||
dt = datetime.datetime.strptime(dt_str, '%Y%m%d_%H').replace(tzinfo=pytz.UTC)
|
||||
return dt
|
||||
|
||||
|
||||
def dt_to_partition_name(tbl_name, dt):
|
||||
return f"{tbl_name}_{dt.strftime('%Y%m%d_%H')}"
|
||||
|
||||
|
||||
class DeleteMeta:
|
||||
def __init__(self, logger, job_class, cutoff, dry_run):
|
||||
self.logger = logger
|
||||
self.job_class = job_class
|
||||
self.cutoff = cutoff
|
||||
self.dry_run = dry_run
|
||||
|
||||
self.jobs_qs = None # Set in by find_jobs_to_delete()
|
||||
|
||||
self.parts_no_drop = set() # Set in identify_excluded_partitions()
|
||||
self.parts_to_drop = set() # Set in find_partitions_to_drop()
|
||||
self.jobs_pk_list = [] # Set in find_jobs_to_delete()
|
||||
self.jobs_to_delete_count = 0 # Set in find_jobs_to_delete()
|
||||
self.jobs_no_delete_count = 0 # Set in find_jobs_to_delete()
|
||||
|
||||
def find_jobs_to_delete(self):
|
||||
self.jobs_qs = self.job_class.objects.filter(created__lt=self.cutoff).values_list('pk', 'status', 'created')
|
||||
for pk, status, created in self.jobs_qs:
|
||||
if status not in ['pending', 'waiting', 'running']:
|
||||
self.jobs_to_delete_count += 1
|
||||
self.jobs_pk_list.append(pk)
|
||||
self.jobs_no_delete_count = (
|
||||
self.job_class.objects.filter(created__gte=self.cutoff) | self.job_class.objects.filter(status__in=['pending', 'waiting', 'running'])
|
||||
).count()
|
||||
|
||||
def identify_excluded_partitions(self):
|
||||
|
||||
part_drop = {}
|
||||
|
||||
for pk, status, created in self.jobs_qs:
|
||||
|
||||
part_key = partition_table_name(self.job_class, created)
|
||||
if status in ['pending', 'waiting', 'running']:
|
||||
part_drop[part_key] = False
|
||||
else:
|
||||
part_drop.setdefault(part_key, True)
|
||||
|
||||
# Note that parts_no_drop _may_ contain the names of partitions that don't exist
|
||||
# This can happen when the cleanup of _unpartitioned_* logic leaves behind jobs with status pending, waiting, running. The find_jobs_to_delete() will
|
||||
# pick these jobs up.
|
||||
self.parts_no_drop = set([k for k, v in part_drop.items() if v is False])
|
||||
|
||||
def delete_jobs(self):
|
||||
if not self.dry_run:
|
||||
self.job_class.objects.filter(pk__in=self.jobs_pk_list).delete()
|
||||
|
||||
def find_partitions_to_drop(self):
|
||||
tbl_name = unified_job_class_to_event_table_name(self.job_class)
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
query = "SELECT inhrelid::regclass::text AS child FROM pg_catalog.pg_inherits"
|
||||
query += f" WHERE inhparent = 'public.{tbl_name}'::regclass"
|
||||
query += f" AND TO_TIMESTAMP(LTRIM(inhrelid::regclass::text, '{tbl_name}_'), 'YYYYMMDD_HH24') < '{self.cutoff}'"
|
||||
query += " ORDER BY inhrelid::regclass::text"
|
||||
|
||||
cursor.execute(query)
|
||||
partitions_from_db = [r[0] for r in cursor.fetchall()]
|
||||
|
||||
partitions_dt = [partition_name_dt(p) for p in partitions_from_db if not None]
|
||||
partitions_dt = [p for p in partitions_dt if not None]
|
||||
|
||||
# convert datetime partition back to string partition
|
||||
partitions_maybe_drop = set([dt_to_partition_name(tbl_name, dt) for dt in partitions_dt])
|
||||
|
||||
# Do not drop partition if there is a job that will not be deleted pointing at it
|
||||
self.parts_to_drop = partitions_maybe_drop - self.parts_no_drop
|
||||
|
||||
def drop_partitions(self):
|
||||
if len(self.parts_to_drop) > 0:
|
||||
parts_to_drop = list(self.parts_to_drop)
|
||||
parts_to_drop.sort() # sort it to make reading it easier for humans
|
||||
parts_to_drop_str = ','.join(parts_to_drop)
|
||||
if self.dry_run:
|
||||
self.logger.debug(f"Would drop event partition(s) {parts_to_drop_str}")
|
||||
else:
|
||||
self.logger.debug(f"Dropping event partition(s) {parts_to_drop_str}")
|
||||
|
||||
if not self.dry_run:
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"DROP TABLE {parts_to_drop_str}")
|
||||
else:
|
||||
self.logger.debug("No event partitions to drop")
|
||||
|
||||
def delete(self):
|
||||
self.find_jobs_to_delete()
|
||||
self.identify_excluded_partitions()
|
||||
self.find_partitions_to_drop()
|
||||
self.drop_partitions()
|
||||
self.delete_jobs()
|
||||
return (self.jobs_no_delete_count, self.jobs_to_delete_count)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Management command to cleanup old jobs and project updates.
|
||||
@@ -36,6 +164,43 @@ class Command(BaseCommand):
|
||||
parser.add_argument('--notifications', dest='only_notifications', action='store_true', default=False, help='Remove notifications')
|
||||
parser.add_argument('--workflow-jobs', default=False, action='store_true', dest='only_workflow_jobs', help='Remove workflow jobs')
|
||||
|
||||
def cleanup(self, job_class):
|
||||
delete_meta = DeleteMeta(self.logger, job_class, self.cutoff, self.dry_run)
|
||||
skipped, deleted = delete_meta.delete()
|
||||
|
||||
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||
|
||||
def cleanup_jobs_partition(self):
|
||||
return self.cleanup(Job)
|
||||
|
||||
def cleanup_ad_hoc_commands_partition(self):
|
||||
return self.cleanup(AdHocCommand)
|
||||
|
||||
def cleanup_project_updates_partition(self):
|
||||
return self.cleanup(ProjectUpdate)
|
||||
|
||||
def cleanup_inventory_updates_partition(self):
|
||||
return self.cleanup(InventoryUpdate)
|
||||
|
||||
def cleanup_management_jobs_partition(self):
|
||||
return self.cleanup(SystemJob)
|
||||
|
||||
def cleanup_workflow_jobs_partition(self):
|
||||
delete_meta = DeleteMeta(self.logger, WorkflowJob, self.cutoff, self.dry_run)
|
||||
|
||||
delete_meta.find_jobs_to_delete()
|
||||
delete_meta.delete_jobs()
|
||||
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||
|
||||
def _cascade_delete_job_events(self, model, pk_list):
|
||||
if len(pk_list) > 0:
|
||||
with connection.cursor() as cursor:
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
|
||||
pk_list_csv = ','.join(map(str, pk_list))
|
||||
rel_name = model().event_parent_key
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
|
||||
|
||||
def cleanup_jobs(self):
|
||||
skipped, deleted = 0, 0
|
||||
|
||||
@@ -45,12 +210,14 @@ class Command(BaseCommand):
|
||||
# get queryset for available jobs to remove
|
||||
qs = Job.objects.filter(created__lt=self.cutoff).exclude(status__in=['pending', 'waiting', 'running'])
|
||||
# get pk list for the first N (batch_size) objects
|
||||
pk_list = qs[0:batch_size].values_list('pk')
|
||||
pk_list = qs[0:batch_size].values_list('pk', flat=True)
|
||||
# You cannot delete queries with sql LIMIT set, so we must
|
||||
# create a new query from this pk_list
|
||||
qs_batch = Job.objects.filter(pk__in=pk_list)
|
||||
just_deleted = 0
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(Job, pk_list)
|
||||
|
||||
del_query = pre_delete(qs_batch)
|
||||
collector = AWXCollector(del_query.db)
|
||||
collector.collect(del_query)
|
||||
@@ -71,6 +238,7 @@ class Command(BaseCommand):
|
||||
def cleanup_ad_hoc_commands(self):
|
||||
skipped, deleted = 0, 0
|
||||
ad_hoc_commands = AdHocCommand.objects.filter(created__lt=self.cutoff)
|
||||
pk_list = []
|
||||
for ad_hoc_command in ad_hoc_commands.iterator():
|
||||
ad_hoc_command_display = '"%s" (%d events)' % (str(ad_hoc_command), ad_hoc_command.ad_hoc_command_events.count())
|
||||
if ad_hoc_command.status in ('pending', 'waiting', 'running'):
|
||||
@@ -81,15 +249,20 @@ class Command(BaseCommand):
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, ad_hoc_command_display)
|
||||
if not self.dry_run:
|
||||
pk_list.append(ad_hoc_command.pk)
|
||||
ad_hoc_command.delete()
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(AdHocCommand, pk_list)
|
||||
|
||||
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_project_updates(self):
|
||||
skipped, deleted = 0, 0
|
||||
project_updates = ProjectUpdate.objects.filter(created__lt=self.cutoff)
|
||||
pk_list = []
|
||||
for pu in project_updates.iterator():
|
||||
pu_display = '"%s" (type %s)' % (str(pu), str(pu.launch_type))
|
||||
if pu.status in ('pending', 'waiting', 'running'):
|
||||
@@ -104,15 +277,20 @@ class Command(BaseCommand):
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, pu_display)
|
||||
if not self.dry_run:
|
||||
pk_list.append(pu.pk)
|
||||
pu.delete()
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(ProjectUpdate, pk_list)
|
||||
|
||||
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_inventory_updates(self):
|
||||
skipped, deleted = 0, 0
|
||||
inventory_updates = InventoryUpdate.objects.filter(created__lt=self.cutoff)
|
||||
pk_list = []
|
||||
for iu in inventory_updates.iterator():
|
||||
iu_display = '"%s" (source %s)' % (str(iu), str(iu.source))
|
||||
if iu.status in ('pending', 'waiting', 'running'):
|
||||
@@ -127,15 +305,20 @@ class Command(BaseCommand):
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, iu_display)
|
||||
if not self.dry_run:
|
||||
pk_list.append(iu.pk)
|
||||
iu.delete()
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(InventoryUpdate, pk_list)
|
||||
|
||||
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_management_jobs(self):
|
||||
skipped, deleted = 0, 0
|
||||
system_jobs = SystemJob.objects.filter(created__lt=self.cutoff)
|
||||
pk_list = []
|
||||
for sj in system_jobs.iterator():
|
||||
sj_display = '"%s" (type %s)' % (str(sj), str(sj.job_type))
|
||||
if sj.status in ('pending', 'waiting', 'running'):
|
||||
@@ -146,9 +329,13 @@ class Command(BaseCommand):
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, sj_display)
|
||||
if not self.dry_run:
|
||||
pk_list.append(sj.pk)
|
||||
sj.delete()
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(SystemJob, pk_list)
|
||||
|
||||
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
@@ -222,6 +409,13 @@ class Command(BaseCommand):
|
||||
for m in model_names:
|
||||
if m in models_to_cleanup:
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
# All Rights Reserved
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from crum import impersonate
|
||||
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate, ExecutionEnvironment
|
||||
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate
|
||||
from awx.main.signals import disable_computed_fields
|
||||
|
||||
|
||||
@@ -68,13 +67,6 @@ class Command(BaseCommand):
|
||||
print('Demo Credential, Inventory, and Job Template added.')
|
||||
changed = True
|
||||
|
||||
for ee in reversed(settings.DEFAULT_EXECUTION_ENVIRONMENTS):
|
||||
_, created = ExecutionEnvironment.objects.update_or_create(name=ee['name'], defaults={'image': ee['image'], 'managed_by_tower': True})
|
||||
|
||||
if created:
|
||||
changed = True
|
||||
print('Default Execution Environment(s) registered.')
|
||||
|
||||
if changed:
|
||||
print('(changed: True)')
|
||||
else:
|
||||
|
||||
59
awx/main/management/commands/custom_venv_associations.py
Normal file
59
awx/main/management/commands/custom_venv_associations.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# Copyright (c) 2021 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.utils.common import get_custom_venv_choices
|
||||
from awx.main.models import Organization, InventorySource, JobTemplate, Project
|
||||
import yaml
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Returns the pip freeze from the path passed in the argument"""
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'path',
|
||||
type=str,
|
||||
nargs=1,
|
||||
default='',
|
||||
help='run this with a path to a virtual environment as an argument to see the associated Job Templates, Organizations, Projects, and Inventory Sources.',
|
||||
)
|
||||
parser.add_argument('-q', action='store_true', help='run with -q to output only the results of the query.')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
# look organiztions and unified job templates (which include JTs, workflows, and Inventory updates)
|
||||
super(Command, self).__init__()
|
||||
results = {}
|
||||
path = options.get('path')
|
||||
if path:
|
||||
all_venvs = get_custom_venv_choices()
|
||||
if path[0] in all_venvs: # verify this is a valid path
|
||||
path = path[0]
|
||||
orgs = [{"name": org.name, "id": org.id} for org in Organization.objects.filter(custom_virtualenv=path)]
|
||||
jts = [{"name": jt.name, "id": jt.id} for jt in JobTemplate.objects.filter(custom_virtualenv=path)]
|
||||
proj = [{"name": proj.name, "id": proj.id} for proj in Project.objects.filter(custom_virtualenv=path)]
|
||||
invsrc = [{"name": inv.name, "id": inv.id} for inv in InventorySource.objects.filter(custom_virtualenv=path)]
|
||||
results["organizations"] = orgs
|
||||
results["job_templates"] = jts
|
||||
results["projects"] = proj
|
||||
results["inventory_sources"] = invsrc
|
||||
if not options.get('q'):
|
||||
msg = [
|
||||
'# Virtual Environments Associations:',
|
||||
yaml.dump(results),
|
||||
'- To list all (now deprecated) custom virtual environments run:',
|
||||
'awx-manage list_custom_venvs',
|
||||
'',
|
||||
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
|
||||
'awx-manage export_custom_venv /path/to/venv',
|
||||
'',
|
||||
'- Run these commands with `-q` to remove tool tips.',
|
||||
'',
|
||||
]
|
||||
print('\n'.join(msg))
|
||||
else:
|
||||
print(yaml.dump(results))
|
||||
|
||||
else:
|
||||
print('\n', '# Incorrect path, verify your path is from the following list:')
|
||||
print('\n'.join(all_venvs), '\n')
|
||||
48
awx/main/management/commands/export_custom_venv.py
Normal file
48
awx/main/management/commands/export_custom_venv.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# Copyright (c) 2021 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from awx.main.utils.common import get_custom_venv_pip_freeze, get_custom_venv_choices
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Returns the pip freeze from the path passed in the argument"""
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'path',
|
||||
type=str,
|
||||
nargs=1,
|
||||
default='',
|
||||
help='run this with a path to a virtual environment as an argument to see the pip freeze data',
|
||||
)
|
||||
parser.add_argument('-q', action='store_true', help='run with -q to output only the results of the query.')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
super(Command, self).__init__()
|
||||
if options.get('path'):
|
||||
path = options.get('path')
|
||||
all_venvs = get_custom_venv_choices()
|
||||
if path[0] in all_venvs:
|
||||
pip_data = get_custom_venv_pip_freeze(options.get('path')[0])
|
||||
if pip_data:
|
||||
if not options.get('q'):
|
||||
msg = [
|
||||
'# Virtual environment contents:',
|
||||
pip_data,
|
||||
'- To list all (now deprecated) custom virtual environments run:',
|
||||
'awx-manage list_custom_venvs',
|
||||
'',
|
||||
'- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:',
|
||||
'awx-manage custom_venv_associations /path/to/venv',
|
||||
'',
|
||||
'- Run these commands with `-q` to remove tool tips.',
|
||||
'',
|
||||
]
|
||||
print('\n'.join(msg))
|
||||
else:
|
||||
print(pip_data)
|
||||
|
||||
else:
|
||||
print('\n', '# Incorrect path, verify your path is from the following list:')
|
||||
print('\n'.join(all_venvs))
|
||||
@@ -36,20 +36,20 @@ from awx.main.utils.pglock import advisory_lock
|
||||
logger = logging.getLogger('awx.main.commands.inventory_import')
|
||||
|
||||
LICENSE_EXPIRED_MESSAGE = '''\
|
||||
License expired.
|
||||
See http://www.ansible.com/renew for license extension information.'''
|
||||
Subscription expired.
|
||||
Contact us (https://www.redhat.com/contact) for subscription extension information.'''
|
||||
|
||||
LICENSE_NON_EXISTANT_MESSAGE = '''\
|
||||
No license.
|
||||
See http://www.ansible.com/renew for license information.'''
|
||||
No subscription.
|
||||
Contact us (https://www.redhat.com/contact) for subscription information.'''
|
||||
|
||||
LICENSE_MESSAGE = '''\
|
||||
Number of licensed instances exceeded, would bring available instances to %(new_count)d, system is licensed for %(instance_count)d.
|
||||
See http://www.ansible.com/renew for license extension information.'''
|
||||
%(new_count)d instances have been automated, system is subscribed for %(instance_count)d.
|
||||
Contact us (https://www.redhat.com/contact) for upgrade information.'''
|
||||
|
||||
DEMO_LICENSE_MESSAGE = '''\
|
||||
Demo mode free license count exceeded, would bring available instances to %(new_count)d, demo mode allows %(instance_count)d.
|
||||
See http://www.ansible.com/renew for licensing information.'''
|
||||
Demo mode free subscription count exceeded. Current automated instances are %(new_count)d, demo mode allows %(instance_count)d.
|
||||
Contact us (https://www.redhat.com/contact) for subscription information.'''
|
||||
|
||||
|
||||
def functioning_dir(path):
|
||||
@@ -66,13 +66,9 @@ class AnsibleInventoryLoader(object):
|
||||
/usr/bin/ansible/ansible-inventory -i hosts --list
|
||||
"""
|
||||
|
||||
def __init__(self, source, venv_path=None, verbosity=0):
|
||||
def __init__(self, source, verbosity=0):
|
||||
self.source = source
|
||||
self.verbosity = verbosity
|
||||
if venv_path:
|
||||
self.venv_path = venv_path
|
||||
else:
|
||||
self.venv_path = settings.ANSIBLE_VENV_PATH
|
||||
|
||||
def get_base_args(self):
|
||||
bargs = ['podman', 'run', '--user=root', '--quiet']
|
||||
@@ -131,7 +127,6 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--inventory-name', dest='inventory_name', type=str, default=None, metavar='n', help='name of inventory to sync')
|
||||
parser.add_argument('--inventory-id', dest='inventory_id', type=int, default=None, metavar='i', help='id of inventory to sync')
|
||||
parser.add_argument('--venv', dest='venv', type=str, default=None, help='absolute path to the AWX custom virtualenv to use')
|
||||
parser.add_argument('--overwrite', dest='overwrite', action='store_true', default=False, help='overwrite the destination hosts and groups')
|
||||
parser.add_argument('--overwrite-vars', dest='overwrite_vars', action='store_true', default=False, help='overwrite (rather than merge) variables')
|
||||
parser.add_argument('--keep-vars', dest='keep_vars', action='store_true', default=False, help='DEPRECATED legacy option, has no effect')
|
||||
@@ -761,29 +756,22 @@ class Command(BaseCommand):
|
||||
instance_count = license_info.get('instance_count', 0)
|
||||
free_instances = license_info.get('free_instances', 0)
|
||||
time_remaining = license_info.get('time_remaining', 0)
|
||||
automated_count = license_info.get('automated_instances', 0)
|
||||
hard_error = license_info.get('trial', False) is True or license_info['instance_count'] == 10
|
||||
new_count = Host.objects.active_count()
|
||||
if time_remaining <= 0:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
raise PermissionDenied("License has expired!")
|
||||
raise PermissionDenied("Subscription has expired!")
|
||||
else:
|
||||
logger.warning(LICENSE_EXPIRED_MESSAGE)
|
||||
# special check for tower-type inventory sources
|
||||
# but only if running the plugin
|
||||
TOWER_SOURCE_FILES = ['tower.yml', 'tower.yaml']
|
||||
if self.inventory_source.source == 'tower' and any(f in self.inventory_source.source_path for f in TOWER_SOURCE_FILES):
|
||||
# only if this is the 2nd call to license check, we cannot compare before running plugin
|
||||
if hasattr(self, 'all_group'):
|
||||
self.remote_tower_license_compare(local_license_type)
|
||||
if free_instances < 0:
|
||||
d = {
|
||||
'new_count': new_count,
|
||||
'new_count': automated_count,
|
||||
'instance_count': instance_count,
|
||||
}
|
||||
if hard_error:
|
||||
logger.error(LICENSE_MESSAGE % d)
|
||||
raise PermissionDenied('License count exceeded!')
|
||||
raise PermissionDenied('Subscription count exceeded!')
|
||||
else:
|
||||
logger.warning(LICENSE_MESSAGE % d)
|
||||
|
||||
@@ -824,7 +812,6 @@ class Command(BaseCommand):
|
||||
raise CommandError('--source is required')
|
||||
verbosity = int(options.get('verbosity', 1))
|
||||
self.set_logging_level(verbosity)
|
||||
venv_path = options.get('venv', None)
|
||||
|
||||
# Load inventory object based on name or ID.
|
||||
if inventory_id:
|
||||
@@ -854,7 +841,7 @@ class Command(BaseCommand):
|
||||
_eager_fields=dict(job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
|
||||
)
|
||||
|
||||
data = AnsibleInventoryLoader(source=source, venv_path=venv_path, verbosity=verbosity).load()
|
||||
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
|
||||
|
||||
logger.debug('Finished loading from source: %s', source)
|
||||
|
||||
|
||||
43
awx/main/management/commands/list_custom_venvs.py
Normal file
43
awx/main/management/commands/list_custom_venvs.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# Copyright (c) 2021 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
import sys
|
||||
|
||||
from awx.main.utils.common import get_custom_venv_choices
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Returns a list of custom venv paths from the path passed in the argument"""
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('-q', action='store_true', help='run with -q to output only the results of the query.')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
super(Command, self).__init__()
|
||||
venvs = get_custom_venv_choices()
|
||||
if venvs:
|
||||
if not options.get('q'):
|
||||
msg = [
|
||||
'# Discovered Virtual Environments:',
|
||||
'\n'.join(venvs),
|
||||
'',
|
||||
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
|
||||
'awx-manage export_custom_venv /path/to/venv',
|
||||
'',
|
||||
'- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:',
|
||||
'awx-manage custom_venv_associations /path/to/venv',
|
||||
'',
|
||||
'- Run these commands with `-q` to remove tool tips.',
|
||||
'',
|
||||
]
|
||||
print('\n'.join(msg))
|
||||
else:
|
||||
print('\n'.join(venvs), '\n')
|
||||
else:
|
||||
msg = ["No custom virtual environments detected in:", settings.BASE_VENV_PATH]
|
||||
|
||||
for path in settings.CUSTOM_VENV_PATHS:
|
||||
msg.append(path)
|
||||
|
||||
print('\n'.join(msg), file=sys.stderr)
|
||||
@@ -0,0 +1,135 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
import sys
|
||||
from distutils.util import strtobool
|
||||
from argparse import RawTextHelpFormatter
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from awx.main.models import CredentialType, Credential, ExecutionEnvironment
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Create default execution environments, intended for new installs"""
|
||||
|
||||
help = """
|
||||
Creates or updates the execution environments set in settings.DEFAULT_EXECUTION_ENVIRONMENTS if they are not yet created.
|
||||
Optionally provide authentication details to create or update a container registry credential that will be set on all of these default execution environments.
|
||||
Note that settings.DEFAULT_EXECUTION_ENVIRONMENTS is and ordered list, the first in the list will be used for project updates.
|
||||
"""
|
||||
|
||||
# Preserves newlines in the help text
|
||||
def create_parser(self, *args, **kwargs):
|
||||
parser = super(Command, self).create_parser(*args, **kwargs)
|
||||
parser.formatter_class = RawTextHelpFormatter
|
||||
return parser
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"--registry-url",
|
||||
type=str,
|
||||
default="",
|
||||
help="URL for the container registry",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--registry-username",
|
||||
type=str,
|
||||
default="",
|
||||
help="username for the container registry",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--registry-password",
|
||||
type=str,
|
||||
default="",
|
||||
help="Password or token for CLI authentication with the container registry",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verify-ssl",
|
||||
type=lambda x: bool(strtobool(str(x))),
|
||||
default=True,
|
||||
help="Verify SSL when authenticating with the container registry",
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
changed = False
|
||||
registry_cred = None
|
||||
|
||||
if options.get("registry_username"):
|
||||
if not options.get("registry_password"):
|
||||
sys.stderr.write("Registry password must be provided when providing registry username\n")
|
||||
sys.exit(1)
|
||||
|
||||
if not options.get("registry_url"):
|
||||
sys.stderr.write("Registry url must be provided when providing registry username\n")
|
||||
sys.exit(1)
|
||||
|
||||
registry_cred_type = CredentialType.objects.filter(kind="registry")
|
||||
if not registry_cred_type.exists():
|
||||
sys.stderr.write("No registry credential type found")
|
||||
sys.exit(1)
|
||||
|
||||
inputs = {
|
||||
"host": options.get("registry_url"),
|
||||
"password": options.get("registry_password"),
|
||||
"username": options.get("registry_username"),
|
||||
"verify_ssl": options.get("verify_ssl"),
|
||||
}
|
||||
registry_cred, cred_created = Credential.objects.get_or_create(
|
||||
name="Default Execution Environment Registry Credential",
|
||||
managed_by_tower=True,
|
||||
credential_type=registry_cred_type[0],
|
||||
defaults={'inputs': inputs},
|
||||
)
|
||||
|
||||
if cred_created:
|
||||
changed = True
|
||||
print("'Default Execution Environment Credential' registered.")
|
||||
|
||||
for key, value in inputs.items():
|
||||
if not registry_cred.inputs.get(key) or registry_cred.get_input(key) != value:
|
||||
registry_cred.inputs[key] = value
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
registry_cred.save()
|
||||
print("'Default Execution Environment Credential' updated.")
|
||||
|
||||
# Create default globally available Execution Environments
|
||||
for ee in reversed(settings.GLOBAL_JOB_EXECUTION_ENVIRONMENTS):
|
||||
_this_ee, ee_created = ExecutionEnvironment.objects.get_or_create(name=ee["name"], defaults={'image': ee["image"], 'credential': registry_cred})
|
||||
if ee_created:
|
||||
changed = True
|
||||
print(f"'{ee['name']}' Default Execution Environment registered.")
|
||||
else:
|
||||
if _this_ee.image != ee["image"]:
|
||||
_this_ee.image = ee["image"]
|
||||
changed = True
|
||||
if _this_ee.credential != registry_cred:
|
||||
_this_ee.credential = registry_cred
|
||||
changed = True
|
||||
if changed:
|
||||
_this_ee.save()
|
||||
print(f"'{ee['name']}' Default Execution Environment updated.")
|
||||
|
||||
# Create the control plane execution environment that is used for project updates and system jobs
|
||||
ee = settings.CONTROL_PLANE_EXECUTION_ENVIRONMENT
|
||||
_this_ee, cp_created = ExecutionEnvironment.objects.get_or_create(
|
||||
name="Control Plane Execution Environment", defaults={'image': ee, 'managed_by_tower': True, 'credential': registry_cred}
|
||||
)
|
||||
if cp_created:
|
||||
changed = True
|
||||
print("Control Plane Execution Environment registered.")
|
||||
else:
|
||||
if _this_ee.image != ee:
|
||||
_this_ee.image = ee
|
||||
changed = True
|
||||
if _this_ee.credential != registry_cred:
|
||||
_this_ee.credential = registry_cred
|
||||
changed = True
|
||||
if changed:
|
||||
_this_ee.save()
|
||||
|
||||
if changed:
|
||||
print("(changed: True)")
|
||||
else:
|
||||
print("(changed: False)")
|
||||
@@ -11,11 +11,16 @@ from django.conf import settings
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
|
||||
___all__ = ['HostManager', 'InstanceManager', 'InstanceGroupManager']
|
||||
___all__ = ['HostManager', 'InstanceManager', 'InstanceGroupManager', 'DeferJobCreatedManager']
|
||||
|
||||
logger = logging.getLogger('awx.main.managers')
|
||||
|
||||
|
||||
class DeferJobCreatedManager(models.Manager):
|
||||
def get_queryset(self):
|
||||
return super(DeferJobCreatedManager, self).get_queryset().defer('job_created')
|
||||
|
||||
|
||||
class HostManager(models.Manager):
|
||||
"""Custom manager class for Hosts model."""
|
||||
|
||||
@@ -141,8 +146,8 @@ class InstanceManager(models.Manager):
|
||||
|
||||
pod_ip = os.environ.get('MY_POD_IP')
|
||||
registered = self.register(ip_address=pod_ip)
|
||||
is_container_group = settings.IS_K8S
|
||||
RegisterQueue('tower', 100, 0, [], is_container_group).register()
|
||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||
RegisterQueue(settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True).register()
|
||||
return registered
|
||||
else:
|
||||
return (False, self.me())
|
||||
@@ -151,10 +156,6 @@ class InstanceManager(models.Manager):
|
||||
"""Return count of active Tower nodes for licensing."""
|
||||
return self.all().count()
|
||||
|
||||
def my_role(self):
|
||||
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
|
||||
return "tower"
|
||||
|
||||
|
||||
class InstanceGroupManager(models.Manager):
|
||||
"""A custom manager class for the Instance model.
|
||||
|
||||
@@ -46,7 +46,7 @@ class TimingMiddleware(threading.local, MiddlewareMixin):
|
||||
response['X-API-Total-Time'] = '%0.3fs' % total_time
|
||||
if settings.AWX_REQUEST_PROFILE:
|
||||
response['X-API-Profile-File'] = self.prof.stop()
|
||||
perf_logger.info(
|
||||
perf_logger.debug(
|
||||
f'request: {request}, response_time: {response["X-API-Total-Time"]}',
|
||||
extra=dict(python_objects=dict(request=request, response=response, X_API_TOTAL_TIME=response["X-API-Total-Time"])),
|
||||
)
|
||||
|
||||
@@ -10,15 +10,6 @@ def migrate_event_data(apps, schema_editor):
|
||||
# that have a bigint primary key (because the old usage of an integer
|
||||
# numeric isn't enough, as its range is about 2.1B, see:
|
||||
# https://www.postgresql.org/docs/9.1/datatype-numeric.html)
|
||||
|
||||
# unfortunately, we can't do this with a simple ALTER TABLE, because
|
||||
# for tables with hundreds of millions or billions of rows, the ALTER TABLE
|
||||
# can take *hours* on modest hardware.
|
||||
#
|
||||
# the approach in this migration means that post-migration, event data will
|
||||
# *not* immediately show up, but will be repopulated over time progressively
|
||||
# the trade-off here is not having to wait hours for the full data migration
|
||||
# before you can start and run AWX again (including new playbook runs)
|
||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||
with connection.cursor() as cursor:
|
||||
# rename the current event table
|
||||
@@ -35,30 +26,7 @@ def migrate_event_data(apps, schema_editor):
|
||||
cursor.execute(f'CREATE SEQUENCE "{tblname}_id_seq";')
|
||||
cursor.execute(f'ALTER TABLE "{tblname}" ALTER COLUMN "id" ' f"SET DEFAULT nextval('{tblname}_id_seq');")
|
||||
cursor.execute(f"SELECT setval('{tblname}_id_seq', (SELECT MAX(id) FROM _old_{tblname}), true);")
|
||||
|
||||
# replace the BTREE index on main_jobevent.job_id with
|
||||
# a BRIN index to drastically improve per-UJ lookup performance
|
||||
# see: https://info.crunchydata.com/blog/postgresql-brin-indexes-big-data-performance-with-minimal-storage
|
||||
if tblname == 'main_jobevent':
|
||||
cursor.execute("SELECT indexname FROM pg_indexes WHERE tablename='main_jobevent' AND indexdef LIKE '%USING btree (job_id)';")
|
||||
old_index = cursor.fetchone()[0]
|
||||
cursor.execute(f'DROP INDEX {old_index}')
|
||||
cursor.execute('CREATE INDEX main_jobevent_job_id_brin_idx ON main_jobevent USING brin (job_id);')
|
||||
|
||||
# remove all of the indexes and constraints from the old table
|
||||
# (they just slow down the data migration)
|
||||
cursor.execute(f"SELECT indexname, indexdef FROM pg_indexes WHERE tablename='_old_{tblname}' AND indexname != '{tblname}_pkey';")
|
||||
indexes = cursor.fetchall()
|
||||
|
||||
cursor.execute(
|
||||
f"SELECT conname, contype, pg_catalog.pg_get_constraintdef(r.oid, true) as condef FROM pg_catalog.pg_constraint r WHERE r.conrelid = '_old_{tblname}'::regclass AND conname != '{tblname}_pkey';"
|
||||
)
|
||||
constraints = cursor.fetchall()
|
||||
|
||||
for indexname, indexdef in indexes:
|
||||
cursor.execute(f'DROP INDEX IF EXISTS {indexname}')
|
||||
for conname, contype, condef in constraints:
|
||||
cursor.execute(f'ALTER TABLE _old_{tblname} DROP CONSTRAINT IF EXISTS {conname}')
|
||||
cursor.execute(f'DROP TABLE _old_{tblname};')
|
||||
|
||||
|
||||
class FakeAlterField(migrations.AlterField):
|
||||
|
||||
@@ -3,6 +3,16 @@
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def remove_iso_instances(apps, schema_editor):
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
Instance.objects.filter(rampart_groups__controller__isnull=False).delete()
|
||||
|
||||
|
||||
def remove_iso_groups(apps, schema_editor):
|
||||
InstanceGroup = apps.get_model('main', 'InstanceGroup')
|
||||
InstanceGroup.objects.filter(controller__isnull=False).delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
@@ -10,6 +20,8 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(remove_iso_instances),
|
||||
migrations.RunPython(remove_iso_groups),
|
||||
migrations.RemoveField(
|
||||
model_name='instance',
|
||||
name='last_isolated_check',
|
||||
|
||||
268
awx/main/migrations/0144_event_partitions.py
Normal file
268
awx/main/migrations/0144_event_partitions.py
Normal file
@@ -0,0 +1,268 @@
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/9039
|
||||
#
|
||||
# the goal of this function is -- for each job event table -- to:
|
||||
# - create a parent partition table
|
||||
# - .. with a single partition
|
||||
# - .. that includes all existing job events
|
||||
#
|
||||
# the new main_jobevent_parent table should have a new
|
||||
# denormalized column, job_created, this is used as a
|
||||
# basis for partitioning job event rows
|
||||
#
|
||||
# The initial partion will be a unique case. After
|
||||
# the migration is completed, awx should create
|
||||
# new partitions on an hourly basis, as needed.
|
||||
# All events for a given job should be placed in
|
||||
# a partition based on the job's _created time_.
|
||||
|
||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||
with connection.cursor() as cursor:
|
||||
# mark existing table as _unpartitioned_*
|
||||
# we will drop this table after its data
|
||||
# has been moved over
|
||||
cursor.execute(f'ALTER TABLE {tblname} RENAME TO _unpartitioned_{tblname}')
|
||||
|
||||
# create a copy of the table that we will use as a reference for schema
|
||||
# otherwise, the schema changes we would make on the old jobevents table
|
||||
# (namely, dropping the primary key constraint) would cause the migration
|
||||
# to suffer a serious performance degradation
|
||||
cursor.execute(f'CREATE TABLE tmp_{tblname} ' f'(LIKE _unpartitioned_{tblname} INCLUDING ALL)')
|
||||
|
||||
# drop primary key constraint; in a partioned table
|
||||
# constraints must include the partition key itself
|
||||
# TODO: do more generic search for pkey constraints
|
||||
# instead of hardcoding this one that applies to main_jobevent
|
||||
cursor.execute(f'ALTER TABLE tmp_{tblname} DROP CONSTRAINT tmp_{tblname}_pkey')
|
||||
|
||||
# create parent table
|
||||
cursor.execute(
|
||||
f'CREATE TABLE {tblname} '
|
||||
f'(LIKE tmp_{tblname} INCLUDING ALL, job_created TIMESTAMP WITH TIME ZONE NOT NULL) '
|
||||
f'PARTITION BY RANGE(job_created);'
|
||||
)
|
||||
|
||||
cursor.execute(f'DROP TABLE tmp_{tblname}')
|
||||
|
||||
# recreate primary key constraint
|
||||
cursor.execute(f'ALTER TABLE ONLY {tblname} ' f'ADD CONSTRAINT {tblname}_pkey_new PRIMARY KEY (id, job_created);')
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
"""
|
||||
Big int migration introduced the brin index main_jobevent_job_id_brin_idx index. For upgardes, we drop the index, new installs do nothing.
|
||||
I have seen the second index in my dev environment. I can not find where in the code it was created. Drop it just in case
|
||||
"""
|
||||
cursor.execute('DROP INDEX IF EXISTS main_jobevent_job_id_brin_idx')
|
||||
cursor.execute('DROP INDEX IF EXISTS main_jobevent_job_id_idx')
|
||||
|
||||
|
||||
class FakeAddField(migrations.AddField):
|
||||
def database_forwards(self, *args):
|
||||
# this is intentionally left blank, because we're
|
||||
# going to accomplish the migration with some custom raw SQL
|
||||
pass
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0143_hostmetric'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
FakeAddField(
|
||||
model_name='jobevent',
|
||||
name='job_created',
|
||||
field=models.DateTimeField(null=True, editable=False),
|
||||
),
|
||||
FakeAddField(
|
||||
model_name='inventoryupdateevent',
|
||||
name='job_created',
|
||||
field=models.DateTimeField(null=True, editable=False),
|
||||
),
|
||||
FakeAddField(
|
||||
model_name='projectupdateevent',
|
||||
name='job_created',
|
||||
field=models.DateTimeField(null=True, editable=False),
|
||||
),
|
||||
FakeAddField(
|
||||
model_name='adhoccommandevent',
|
||||
name='job_created',
|
||||
field=models.DateTimeField(null=True, editable=False),
|
||||
),
|
||||
FakeAddField(
|
||||
model_name='systemjobevent',
|
||||
name='job_created',
|
||||
field=models.DateTimeField(null=True, editable=False),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='UnpartitionedAdHocCommandEvent',
|
||||
fields=[],
|
||||
options={
|
||||
'proxy': True,
|
||||
'indexes': [],
|
||||
'constraints': [],
|
||||
},
|
||||
bases=('main.adhoccommandevent',),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='UnpartitionedInventoryUpdateEvent',
|
||||
fields=[],
|
||||
options={
|
||||
'proxy': True,
|
||||
'indexes': [],
|
||||
'constraints': [],
|
||||
},
|
||||
bases=('main.inventoryupdateevent',),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='UnpartitionedJobEvent',
|
||||
fields=[],
|
||||
options={
|
||||
'proxy': True,
|
||||
'indexes': [],
|
||||
'constraints': [],
|
||||
},
|
||||
bases=('main.jobevent',),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='UnpartitionedProjectUpdateEvent',
|
||||
fields=[],
|
||||
options={
|
||||
'proxy': True,
|
||||
'indexes': [],
|
||||
'constraints': [],
|
||||
},
|
||||
bases=('main.projectupdateevent',),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='UnpartitionedSystemJobEvent',
|
||||
fields=[],
|
||||
options={
|
||||
'proxy': True,
|
||||
'indexes': [],
|
||||
'constraints': [],
|
||||
},
|
||||
bases=('main.systemjobevent',),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='ad_hoc_command',
|
||||
field=models.ForeignKey(
|
||||
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='ad_hoc_command_events', to='main.AdHocCommand'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='created',
|
||||
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='modified',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdateevent',
|
||||
name='created',
|
||||
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdateevent',
|
||||
name='inventory_update',
|
||||
field=models.ForeignKey(
|
||||
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='inventory_update_events', to='main.InventoryUpdate'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdateevent',
|
||||
name='modified',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobevent',
|
||||
name='created',
|
||||
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobevent',
|
||||
name='job',
|
||||
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=models.deletion.DO_NOTHING, related_name='job_events', to='main.Job'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobevent',
|
||||
name='modified',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='projectupdateevent',
|
||||
name='created',
|
||||
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='projectupdateevent',
|
||||
name='modified',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='projectupdateevent',
|
||||
name='project_update',
|
||||
field=models.ForeignKey(
|
||||
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='project_update_events', to='main.ProjectUpdate'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='systemjobevent',
|
||||
name='created',
|
||||
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='systemjobevent',
|
||||
name='modified',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='systemjobevent',
|
||||
name='system_job',
|
||||
field=models.ForeignKey(
|
||||
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='system_job_events', to='main.SystemJob'
|
||||
),
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='adhoccommandevent',
|
||||
index_together={
|
||||
('ad_hoc_command', 'job_created', 'event'),
|
||||
('ad_hoc_command', 'job_created', 'counter'),
|
||||
('ad_hoc_command', 'job_created', 'uuid'),
|
||||
},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='inventoryupdateevent',
|
||||
index_together={('inventory_update', 'job_created', 'counter'), ('inventory_update', 'job_created', 'uuid')},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='jobevent',
|
||||
index_together={
|
||||
('job', 'job_created', 'counter'),
|
||||
('job', 'job_created', 'uuid'),
|
||||
('job', 'job_created', 'event'),
|
||||
('job', 'job_created', 'parent_uuid'),
|
||||
},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='projectupdateevent',
|
||||
index_together={
|
||||
('project_update', 'job_created', 'uuid'),
|
||||
('project_update', 'job_created', 'event'),
|
||||
('project_update', 'job_created', 'counter'),
|
||||
},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='systemjobevent',
|
||||
index_together={('system_job', 'job_created', 'uuid'), ('system_job', 'job_created', 'counter')},
|
||||
),
|
||||
]
|
||||
21
awx/main/migrations/0145_deregister_managed_ee_objs.py
Normal file
21
awx/main/migrations/0145_deregister_managed_ee_objs.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# Generated by Django 2.2.16 on 2021-06-07 19:36
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def forwards(apps, schema_editor):
|
||||
ExecutionEnvironment = apps.get_model('main', 'ExecutionEnvironment')
|
||||
for row in ExecutionEnvironment.objects.filter(managed_by_tower=True):
|
||||
row.managed_by_tower = False
|
||||
row.save(update_fields=['managed_by_tower'])
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0144_event_partitions'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(forwards),
|
||||
]
|
||||
59
awx/main/migrations/0146_add_insights_inventory.py
Normal file
59
awx/main/migrations/0146_add_insights_inventory.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# Generated by Django 2.2.16 on 2021-06-08 18:59
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0145_deregister_managed_ee_objs'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='insights_system_id',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('tower', 'Ansible Tower'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('tower', 'Ansible Tower'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
]
|
||||
24
awx/main/migrations/0147_validate_ee_image_field.py
Normal file
24
awx/main/migrations/0147_validate_ee_image_field.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Generated by Django 2.2.16 on 2021-06-15 02:49
|
||||
|
||||
import awx.main.validators
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0146_add_insights_inventory'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='executionenvironment',
|
||||
name='image',
|
||||
field=models.CharField(
|
||||
help_text='The full image location, including the container registry, image name, and version tag.',
|
||||
max_length=1024,
|
||||
validators=[awx.main.validators.validate_container_image_name],
|
||||
verbose_name='image location',
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
# Django
|
||||
from django.conf import settings # noqa
|
||||
from django.db import connection
|
||||
from django.db.models.signals import pre_delete # noqa
|
||||
|
||||
# AWX
|
||||
@@ -36,6 +35,11 @@ from awx.main.models.events import ( # noqa
|
||||
JobEvent,
|
||||
ProjectUpdateEvent,
|
||||
SystemJobEvent,
|
||||
UnpartitionedAdHocCommandEvent,
|
||||
UnpartitionedInventoryUpdateEvent,
|
||||
UnpartitionedJobEvent,
|
||||
UnpartitionedProjectUpdateEvent,
|
||||
UnpartitionedSystemJobEvent,
|
||||
)
|
||||
from awx.main.models.ad_hoc_commands import AdHocCommand # noqa
|
||||
from awx.main.models.schedules import Schedule # noqa
|
||||
@@ -92,27 +96,6 @@ User.add_to_class('can_access_with_errors', check_user_access_with_errors)
|
||||
User.add_to_class('accessible_objects', user_accessible_objects)
|
||||
|
||||
|
||||
def enforce_bigint_pk_migration():
|
||||
#
|
||||
# NOTE: this function is not actually in use anymore,
|
||||
# but has been intentionally kept for historical purposes,
|
||||
# and to serve as an illustration if we ever need to perform
|
||||
# bulk modification/migration of event data in the future.
|
||||
#
|
||||
# see: https://github.com/ansible/awx/issues/6010
|
||||
# look at all the event tables and verify that they have been fully migrated
|
||||
# from the *old* int primary key table to the replacement bigint table
|
||||
# if not, attempt to migrate them in the background
|
||||
#
|
||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute('SELECT 1 FROM information_schema.tables WHERE table_name=%s', (f'_old_{tblname}',))
|
||||
if bool(cursor.rowcount):
|
||||
from awx.main.tasks import migrate_legacy_event_data
|
||||
|
||||
migrate_legacy_event_data.apply_async([tblname])
|
||||
|
||||
|
||||
def cleanup_created_modified_by(sender, **kwargs):
|
||||
# work around a bug in django-polymorphic that doesn't properly
|
||||
# handle cascades for reverse foreign keys on the polymorphic base model
|
||||
|
||||
@@ -15,7 +15,7 @@ from django.core.exceptions import ValidationError
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import prevent_search, AD_HOC_JOB_TYPE_CHOICES, VERBOSITY_CHOICES, VarsDictProperty
|
||||
from awx.main.models.events import AdHocCommandEvent
|
||||
from awx.main.models.events import AdHocCommandEvent, UnpartitionedAdHocCommandEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob
|
||||
from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate
|
||||
|
||||
@@ -127,6 +127,8 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
if self.has_unpartitioned_events:
|
||||
return UnpartitionedAdHocCommandEvent
|
||||
return AdHocCommandEvent
|
||||
|
||||
@property
|
||||
@@ -215,9 +217,6 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)
|
||||
if 'name' not in update_fields:
|
||||
update_fields.append('name')
|
||||
if not self.execution_environment_id:
|
||||
self.execution_environment = self.resolve_execution_environment()
|
||||
update_fields.append('execution_environment')
|
||||
super(AdHocCommand, self).save(*args, **kwargs)
|
||||
|
||||
@property
|
||||
|
||||
@@ -954,6 +954,10 @@ ManagedCredentialType(
|
||||
"scm_username": "{{username}}",
|
||||
"scm_password": "{{password}}",
|
||||
},
|
||||
'env': {
|
||||
'INSIGHTS_USER': '{{username}}',
|
||||
'INSIGHTS_PASSWORD': '{{password}}',
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -15,7 +15,9 @@ from django.utils.encoding import force_text
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main import consumers
|
||||
from awx.main.managers import DeferJobCreatedManager
|
||||
from awx.main.fields import JSONField
|
||||
from awx.main.constants import MINIMAL_EVENTS
|
||||
from awx.main.models.base import CreatedModifiedModel
|
||||
from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore
|
||||
|
||||
@@ -56,9 +58,6 @@ def create_host_status_counts(event_data):
|
||||
return dict(host_status_counts)
|
||||
|
||||
|
||||
MINIMAL_EVENTS = set(['playbook_on_play_start', 'playbook_on_task_start', 'playbook_on_stats', 'EOF'])
|
||||
|
||||
|
||||
def emit_event_detail(event):
|
||||
if settings.UI_LIVE_UPDATES_ENABLED is False and event.event not in MINIMAL_EVENTS:
|
||||
return
|
||||
@@ -271,6 +270,10 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
)
|
||||
modified = models.DateTimeField(
|
||||
default=None,
|
||||
editable=False,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
@@ -365,14 +368,24 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
|
||||
# find parent links and progagate changed=T and failed=T
|
||||
changed = (
|
||||
job.job_events.filter(changed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct()
|
||||
job.get_event_queryset()
|
||||
.filter(changed=True)
|
||||
.exclude(parent_uuid=None)
|
||||
.only('parent_uuid')
|
||||
.values_list('parent_uuid', flat=True)
|
||||
.distinct()
|
||||
) # noqa
|
||||
failed = (
|
||||
job.job_events.filter(failed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct()
|
||||
job.get_event_queryset()
|
||||
.filter(failed=True)
|
||||
.exclude(parent_uuid=None)
|
||||
.only('parent_uuid')
|
||||
.values_list('parent_uuid', flat=True)
|
||||
.distinct()
|
||||
) # noqa
|
||||
|
||||
JobEvent.objects.filter(job_id=self.job_id, uuid__in=changed).update(changed=True)
|
||||
JobEvent.objects.filter(job_id=self.job_id, uuid__in=failed).update(failed=True)
|
||||
job.get_event_queryset().filter(uuid__in=changed).update(changed=True)
|
||||
job.get_event_queryset().filter(uuid__in=failed).update(failed=True)
|
||||
|
||||
# send success/failure notifications when we've finished handling the playbook_on_stats event
|
||||
from awx.main.tasks import handle_success_and_failure_notifications # circular import
|
||||
@@ -423,6 +436,16 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
# same as above, for job_created
|
||||
# TODO: if this approach, identical to above, works, can convert to for loop
|
||||
try:
|
||||
if not isinstance(kwargs['job_created'], datetime.datetime):
|
||||
kwargs['job_created'] = parse_datetime(kwargs['job_created'])
|
||||
if not kwargs['job_created'].tzinfo:
|
||||
kwargs['job_created'] = kwargs['job_created'].replace(tzinfo=utc)
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('job_created', None)
|
||||
|
||||
host_map = kwargs.pop('host_map', {})
|
||||
|
||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||
@@ -430,6 +453,11 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
event = cls(**kwargs)
|
||||
if workflow_job_id:
|
||||
setattr(event, 'workflow_job_id', workflow_job_id)
|
||||
# shouldn't job_created _always_ be present?
|
||||
# if it's not, how could we save the event to the db?
|
||||
job_created = kwargs.pop('job_created', None)
|
||||
if job_created:
|
||||
setattr(event, 'job_created', job_created)
|
||||
setattr(event, 'host_map', host_map)
|
||||
event._update_from_event_data()
|
||||
return event
|
||||
@@ -444,25 +472,28 @@ class JobEvent(BasePlaybookEvent):
|
||||
An event/message logged from the callback when running a job.
|
||||
"""
|
||||
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id']
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id', 'job_created']
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('pk',)
|
||||
index_together = [
|
||||
('job', 'event'),
|
||||
('job', 'uuid'),
|
||||
('job', 'start_line'),
|
||||
('job', 'end_line'),
|
||||
('job', 'parent_uuid'),
|
||||
('job', 'job_created', 'event'),
|
||||
('job', 'job_created', 'uuid'),
|
||||
('job', 'job_created', 'parent_uuid'),
|
||||
('job', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
job = models.ForeignKey(
|
||||
'Job',
|
||||
related_name='job_events',
|
||||
on_delete=models.CASCADE,
|
||||
null=True,
|
||||
on_delete=models.DO_NOTHING,
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
host = models.ForeignKey(
|
||||
'Host',
|
||||
@@ -482,6 +513,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
job_created = models.DateTimeField(null=True, editable=False)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:job_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -518,7 +550,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
summaries = dict()
|
||||
updated_hosts_list = list()
|
||||
for host in hostnames:
|
||||
updated_hosts_list.append(host)
|
||||
updated_hosts_list.append(host.lower())
|
||||
host_id = self.host_map.get(host, None)
|
||||
if host_id not in existing_host_ids:
|
||||
host_id = None
|
||||
@@ -561,33 +593,52 @@ class JobEvent(BasePlaybookEvent):
|
||||
return self.job.verbosity
|
||||
|
||||
|
||||
class UnpartitionedJobEvent(JobEvent):
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
|
||||
UnpartitionedJobEvent._meta.db_table = '_unpartitioned_' + JobEvent._meta.db_table # noqa
|
||||
|
||||
|
||||
class ProjectUpdateEvent(BasePlaybookEvent):
|
||||
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['project_update_id', 'workflow_job_id']
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['project_update_id', 'workflow_job_id', 'job_created']
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('pk',)
|
||||
index_together = [
|
||||
('project_update', 'event'),
|
||||
('project_update', 'uuid'),
|
||||
('project_update', 'start_line'),
|
||||
('project_update', 'end_line'),
|
||||
('project_update', 'job_created', 'event'),
|
||||
('project_update', 'job_created', 'uuid'),
|
||||
('project_update', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
project_update = models.ForeignKey(
|
||||
'ProjectUpdate',
|
||||
related_name='project_update_events',
|
||||
on_delete=models.CASCADE,
|
||||
on_delete=models.DO_NOTHING,
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
job_created = models.DateTimeField(null=True, editable=False)
|
||||
|
||||
@property
|
||||
def host_name(self):
|
||||
return 'localhost'
|
||||
|
||||
|
||||
class UnpartitionedProjectUpdateEvent(ProjectUpdateEvent):
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
|
||||
UnpartitionedProjectUpdateEvent._meta.db_table = '_unpartitioned_' + ProjectUpdateEvent._meta.db_table # noqa
|
||||
|
||||
|
||||
class BaseCommandEvent(CreatedModifiedModel):
|
||||
"""
|
||||
An event/message logged from a command for each host.
|
||||
@@ -627,6 +678,16 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
created = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
)
|
||||
modified = models.DateTimeField(
|
||||
default=None,
|
||||
editable=False,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())
|
||||
@@ -681,16 +742,17 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
|
||||
class AdHocCommandEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event', 'host_name', 'host_id', 'workflow_job_id']
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event', 'host_name', 'host_id', 'workflow_job_id', 'job_created']
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
index_together = [
|
||||
('ad_hoc_command', 'event'),
|
||||
('ad_hoc_command', 'uuid'),
|
||||
('ad_hoc_command', 'start_line'),
|
||||
('ad_hoc_command', 'end_line'),
|
||||
('ad_hoc_command', 'job_created', 'event'),
|
||||
('ad_hoc_command', 'job_created', 'uuid'),
|
||||
('ad_hoc_command', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
EVENT_TYPES = [
|
||||
@@ -737,8 +799,9 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
ad_hoc_command = models.ForeignKey(
|
||||
'AdHocCommand',
|
||||
related_name='ad_hoc_command_events',
|
||||
on_delete=models.CASCADE,
|
||||
on_delete=models.DO_NOTHING,
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
host = models.ForeignKey(
|
||||
'Host',
|
||||
@@ -753,6 +816,7 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
job_created = models.DateTimeField(null=True, editable=False)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:ad_hoc_command_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -768,26 +832,37 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
analytics_logger.info('Event data saved.', extra=dict(python_objects=dict(job_event=self)))
|
||||
|
||||
|
||||
class UnpartitionedAdHocCommandEvent(AdHocCommandEvent):
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
|
||||
UnpartitionedAdHocCommandEvent._meta.db_table = '_unpartitioned_' + AdHocCommandEvent._meta.db_table # noqa
|
||||
|
||||
|
||||
class InventoryUpdateEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['inventory_update_id', 'workflow_job_id']
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['inventory_update_id', 'workflow_job_id', 'job_created']
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
index_together = [
|
||||
('inventory_update', 'uuid'),
|
||||
('inventory_update', 'start_line'),
|
||||
('inventory_update', 'end_line'),
|
||||
('inventory_update', 'job_created', 'uuid'),
|
||||
('inventory_update', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
inventory_update = models.ForeignKey(
|
||||
'InventoryUpdate',
|
||||
related_name='inventory_update_events',
|
||||
on_delete=models.CASCADE,
|
||||
on_delete=models.DO_NOTHING,
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
job_created = models.DateTimeField(null=True, editable=False)
|
||||
|
||||
@property
|
||||
def event(self):
|
||||
@@ -802,26 +877,37 @@ class InventoryUpdateEvent(BaseCommandEvent):
|
||||
return False
|
||||
|
||||
|
||||
class UnpartitionedInventoryUpdateEvent(InventoryUpdateEvent):
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
|
||||
UnpartitionedInventoryUpdateEvent._meta.db_table = '_unpartitioned_' + InventoryUpdateEvent._meta.db_table # noqa
|
||||
|
||||
|
||||
class SystemJobEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['system_job_id']
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['system_job_id', 'job_created']
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
index_together = [
|
||||
('system_job', 'uuid'),
|
||||
('system_job', 'start_line'),
|
||||
('system_job', 'end_line'),
|
||||
('system_job', 'job_created', 'uuid'),
|
||||
('system_job', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
system_job = models.ForeignKey(
|
||||
'SystemJob',
|
||||
related_name='system_job_events',
|
||||
on_delete=models.CASCADE,
|
||||
on_delete=models.DO_NOTHING,
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
job_created = models.DateTimeField(null=True, editable=False)
|
||||
|
||||
@property
|
||||
def event(self):
|
||||
@@ -834,3 +920,11 @@ class SystemJobEvent(BaseCommandEvent):
|
||||
@property
|
||||
def changed(self):
|
||||
return False
|
||||
|
||||
|
||||
class UnpartitionedSystemJobEvent(SystemJobEvent):
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
|
||||
UnpartitionedSystemJobEvent._meta.db_table = '_unpartitioned_' + SystemJobEvent._meta.db_table # noqa
|
||||
|
||||
@@ -3,6 +3,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import CommonModel
|
||||
from awx.main.validators import validate_container_image_name
|
||||
|
||||
|
||||
__all__ = ['ExecutionEnvironment']
|
||||
@@ -31,6 +32,7 @@ class ExecutionEnvironment(CommonModel):
|
||||
max_length=1024,
|
||||
verbose_name=_('image location'),
|
||||
help_text=_("The full image location, including the container registry, image name, and version tag."),
|
||||
validators=[validate_container_image_name],
|
||||
)
|
||||
managed_by_tower = models.BooleanField(default=False, editable=False)
|
||||
credential = models.ForeignKey(
|
||||
|
||||
@@ -130,12 +130,6 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
return self.modified < ref_time - timedelta(seconds=grace_period)
|
||||
|
||||
def refresh_capacity(self):
|
||||
if settings.IS_K8S:
|
||||
self.capacity = self.cpu = self.memory = self.cpu_capacity = self.mem_capacity = 0 # noqa
|
||||
self.version = awx_application_version
|
||||
self.save(update_fields=['capacity', 'version', 'modified', 'cpu', 'memory', 'cpu_capacity', 'mem_capacity'])
|
||||
return
|
||||
|
||||
cpu = get_cpu_capacity()
|
||||
mem = get_mem_capacity()
|
||||
if self.enabled:
|
||||
|
||||
@@ -35,7 +35,7 @@ from awx.main.fields import (
|
||||
)
|
||||
from awx.main.managers import HostManager
|
||||
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json
|
||||
from awx.main.models.events import InventoryUpdateEvent
|
||||
from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
@@ -51,6 +51,7 @@ from awx.main.models.credential.injectors import _openstack_data
|
||||
from awx.main.utils import _inventory_updates
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
from awx.main.utils.execution_environments import to_container_path
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
|
||||
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership']
|
||||
@@ -503,13 +504,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
null=True,
|
||||
help_text=_('The date and time ansible_facts was last modified.'),
|
||||
)
|
||||
insights_system_id = models.TextField(
|
||||
blank=True,
|
||||
default=None,
|
||||
null=True,
|
||||
db_index=True,
|
||||
help_text=_('Red Hat Insights host unique identifier.'),
|
||||
)
|
||||
|
||||
objects = HostManager()
|
||||
|
||||
@@ -828,6 +822,7 @@ class InventorySourceOptions(BaseModel):
|
||||
('openstack', _('OpenStack')),
|
||||
('rhv', _('Red Hat Virtualization')),
|
||||
('tower', _('Ansible Tower')),
|
||||
('insights', _('Red Hat Insights')),
|
||||
]
|
||||
|
||||
# From the options of the Django management base command
|
||||
@@ -1230,6 +1225,10 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
def is_container_group_task(self):
|
||||
return bool(self.instance_group and self.instance_group.is_container_group)
|
||||
|
||||
@property
|
||||
def can_run_containerized(self):
|
||||
return True
|
||||
|
||||
def _get_parent_field_name(self):
|
||||
return 'inventory_source'
|
||||
|
||||
@@ -1265,6 +1264,8 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
if self.has_unpartitioned_events:
|
||||
return UnpartitionedInventoryUpdateEvent
|
||||
return InventoryUpdateEvent
|
||||
|
||||
@property
|
||||
@@ -1306,16 +1307,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
return self.global_instance_groups
|
||||
return selected_groups
|
||||
|
||||
@property
|
||||
def ansible_virtualenv_path(self):
|
||||
if self.inventory_source and self.inventory_source.custom_virtualenv:
|
||||
return self.inventory_source.custom_virtualenv
|
||||
if self.inventory_source and self.inventory_source.source_project:
|
||||
project = self.inventory_source.source_project
|
||||
if project and project.custom_virtualenv:
|
||||
return project.custom_virtualenv
|
||||
return settings.ANSIBLE_VENV_PATH
|
||||
|
||||
def cancel(self, job_explanation=None, is_chain=False):
|
||||
res = super(InventoryUpdate, self).cancel(job_explanation=job_explanation, is_chain=is_chain)
|
||||
if res:
|
||||
@@ -1350,6 +1341,7 @@ class PluginFileInjector(object):
|
||||
namespace = None
|
||||
collection = None
|
||||
collection_migration = '2.9' # Starting with this version, we use collections
|
||||
use_fqcn = False # plugin: name versus plugin: namespace.collection.name
|
||||
|
||||
# TODO: delete this method and update unit tests
|
||||
@classmethod
|
||||
@@ -1376,7 +1368,12 @@ class PluginFileInjector(object):
|
||||
Note that a plugin value of '' should still be overridden.
|
||||
'''
|
||||
if self.plugin_name is not None:
|
||||
source_vars['plugin'] = self.plugin_name
|
||||
if hasattr(self, 'downstream_namespace') and server_product_name() != 'AWX':
|
||||
source_vars['plugin'] = f'{self.downstream_namespace}.{self.downstream_collection}.{self.plugin_name}'
|
||||
elif self.use_fqcn:
|
||||
source_vars['plugin'] = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
else:
|
||||
source_vars['plugin'] = self.plugin_name
|
||||
return source_vars
|
||||
|
||||
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
|
||||
@@ -1524,12 +1521,17 @@ class rhv(PluginFileInjector):
|
||||
initial_version = '2.9'
|
||||
namespace = 'ovirt'
|
||||
collection = 'ovirt'
|
||||
downstream_namespace = 'redhat'
|
||||
downstream_collection = 'rhv'
|
||||
|
||||
|
||||
class satellite6(PluginFileInjector):
|
||||
plugin_name = 'foreman'
|
||||
namespace = 'theforeman'
|
||||
collection = 'foreman'
|
||||
downstream_namespace = 'redhat'
|
||||
downstream_collection = 'satellite'
|
||||
use_fqcn = True
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
# this assumes that this is merged
|
||||
@@ -1542,18 +1544,30 @@ class satellite6(PluginFileInjector):
|
||||
ret['FOREMAN_PASSWORD'] = credential.get_input('password', default='')
|
||||
return ret
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(satellite6, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
# this inventory plugin requires the fully qualified inventory plugin name
|
||||
ret['plugin'] = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
return ret
|
||||
|
||||
|
||||
class tower(PluginFileInjector):
|
||||
plugin_name = 'tower'
|
||||
base_injector = 'template'
|
||||
namespace = 'awx'
|
||||
collection = 'awx'
|
||||
downstream_namespace = 'ansible'
|
||||
downstream_collection = 'controller'
|
||||
|
||||
|
||||
class insights(PluginFileInjector):
|
||||
plugin_name = 'insights'
|
||||
base_injector = 'template'
|
||||
namespace = 'redhatinsights'
|
||||
collection = 'insights'
|
||||
downstream_namespace = 'redhat'
|
||||
downstream_collection = 'insights'
|
||||
use_fqcn = 'true'
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(insights, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
# this inventory plugin requires the fully qualified inventory plugin name
|
||||
ret['plugin'] = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
return ret
|
||||
|
||||
|
||||
for cls in PluginFileInjector.__subclasses__():
|
||||
|
||||
@@ -37,7 +37,7 @@ from awx.main.models.base import (
|
||||
VERBOSITY_CHOICES,
|
||||
VarsDictProperty,
|
||||
)
|
||||
from awx.main.models.events import JobEvent, SystemJobEvent
|
||||
from awx.main.models.events import JobEvent, UnpartitionedJobEvent, UnpartitionedSystemJobEvent, SystemJobEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
@@ -600,20 +600,10 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def get_ui_url(self):
|
||||
return urljoin(settings.TOWER_URL_BASE, "/#/jobs/playbook/{}".format(self.pk))
|
||||
|
||||
@property
|
||||
def ansible_virtualenv_path(self):
|
||||
# the order here enforces precedence (it matters)
|
||||
for virtualenv in (
|
||||
self.job_template.custom_virtualenv if self.job_template else None,
|
||||
self.project.custom_virtualenv,
|
||||
self.organization.custom_virtualenv if self.organization else None,
|
||||
):
|
||||
if virtualenv:
|
||||
return virtualenv
|
||||
return settings.ANSIBLE_VENV_PATH
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
if self.has_unpartitioned_events:
|
||||
return UnpartitionedJobEvent
|
||||
return JobEvent
|
||||
|
||||
def copy_unified_job(self, **new_prompts):
|
||||
@@ -855,23 +845,6 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
ansible_local = ansible_facts.get('ansible_local', {}).get('insights', {})
|
||||
ansible_facts = ansible_facts.get('insights', {})
|
||||
ansible_local_system_id = ansible_local.get('system_id', None) if isinstance(ansible_local, dict) else None
|
||||
ansible_facts_system_id = ansible_facts.get('system_id', None) if isinstance(ansible_facts, dict) else None
|
||||
if ansible_local_system_id:
|
||||
print("Setting local {}".format(ansible_local_system_id))
|
||||
logger.debug(
|
||||
"Insights system_id {} found for host <{}, {}> in"
|
||||
" ansible local facts".format(ansible_local_system_id, host.inventory.id, host.name)
|
||||
)
|
||||
host.insights_system_id = ansible_local_system_id
|
||||
elif ansible_facts_system_id:
|
||||
logger.debug(
|
||||
"Insights system_id {} found for host <{}, {}> in"
|
||||
" insights facts".format(ansible_local_system_id, host.inventory.id, host.name)
|
||||
)
|
||||
host.insights_system_id = ansible_facts_system_id
|
||||
host.save()
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
@@ -1259,12 +1232,16 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
if self.has_unpartitioned_events:
|
||||
return UnpartitionedSystemJobEvent
|
||||
return SystemJobEvent
|
||||
|
||||
@property
|
||||
def can_run_on_control_plane(self):
|
||||
return True
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
if settings.IS_K8S:
|
||||
return 0
|
||||
return 5
|
||||
|
||||
@property
|
||||
|
||||
@@ -464,15 +464,18 @@ class ExecutionEnvironmentMixin(models.Model):
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
"""
|
||||
Return the execution environment that should be used when creating a new job.
|
||||
Return the execution environment that should be used when executing a job.
|
||||
"""
|
||||
if self.execution_environment is not None:
|
||||
return self.execution_environment
|
||||
template = getattr(self, 'unified_job_template', None)
|
||||
if template is not None and template.execution_environment is not None:
|
||||
return template.execution_environment
|
||||
if getattr(self, 'project_id', None) and self.project.default_environment is not None:
|
||||
return self.project.default_environment
|
||||
if getattr(self, 'organization', None) and self.organization.default_environment is not None:
|
||||
if getattr(self, 'organization_id', None) and self.organization.default_environment is not None:
|
||||
return self.organization.default_environment
|
||||
if getattr(self, 'inventory', None) and self.inventory.organization is not None:
|
||||
if getattr(self, 'inventory_id', None) and self.inventory.organization is not None:
|
||||
if self.inventory.organization.default_environment is not None:
|
||||
return self.inventory.organization.default_environment
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ from django.utils.timezone import now, make_aware, get_default_timezone
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import PROJECT_UPDATE_JOB_TYPE_CHOICES, PERM_INVENTORY_DEPLOY
|
||||
from awx.main.models.events import ProjectUpdateEvent
|
||||
from awx.main.models.events import ProjectUpdateEvent, UnpartitionedProjectUpdateEvent
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
@@ -32,7 +32,7 @@ from awx.main.models.jobs import Job
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerProjectUpdateMixin, CustomVirtualEnvMixin, RelatedJobsMixin
|
||||
from awx.main.utils import update_scm_url, polymorphic
|
||||
from awx.main.utils.ansible import skip_directory, could_be_inventory, could_be_playbook
|
||||
from awx.main.utils.execution_environments import get_default_execution_environment
|
||||
from awx.main.utils.execution_environments import get_control_plane_execution_environment
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
@@ -185,11 +185,11 @@ class ProjectOptions(models.Model):
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
"""
|
||||
Project updates, themselves, will use the default execution environment.
|
||||
Project updates, themselves, will use the control plane execution environment.
|
||||
Jobs using the project can use the default_environment, but the project updates
|
||||
are not flexible enough to allow customizing the image they use.
|
||||
"""
|
||||
return get_default_execution_environment()
|
||||
return get_control_plane_execution_environment()
|
||||
|
||||
def get_project_path(self, check_if_exists=True):
|
||||
local_path = os.path.basename(self.local_path)
|
||||
@@ -553,14 +553,18 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
websocket_data.update(dict(project_id=self.project.id))
|
||||
return websocket_data
|
||||
|
||||
@property
|
||||
def can_run_on_control_plane(self):
|
||||
return True
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
if self.has_unpartitioned_events:
|
||||
return UnpartitionedProjectUpdateEvent
|
||||
return ProjectUpdateEvent
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
if settings.IS_K8S:
|
||||
return 0
|
||||
return 0 if self.job_type == 'run' else 1
|
||||
|
||||
@property
|
||||
@@ -621,6 +625,8 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
organization_groups = []
|
||||
template_groups = [x for x in super(ProjectUpdate, self).preferred_instance_groups]
|
||||
selected_groups = template_groups + organization_groups
|
||||
if not any([not group.is_container_group for group in selected_groups]):
|
||||
selected_groups = selected_groups + list(self.control_plane_instance_group)
|
||||
if not selected_groups:
|
||||
return self.global_instance_groups
|
||||
return selected_groups
|
||||
|
||||
@@ -49,6 +49,7 @@ from awx.main.utils import (
|
||||
getattr_dne,
|
||||
polymorphic,
|
||||
schedule_task_manager,
|
||||
get_event_partition_epoch,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES, CAN_CANCEL
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
@@ -366,8 +367,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
for fd, val in eager_fields.items():
|
||||
setattr(unified_job, fd, val)
|
||||
|
||||
unified_job.execution_environment = self.resolve_execution_environment()
|
||||
|
||||
# NOTE: slice workflow jobs _get_parent_field_name method
|
||||
# is not correct until this is set
|
||||
if not parent_field_name:
|
||||
@@ -737,6 +736,13 @@ class UnifiedJob(
|
||||
def _get_task_class(cls):
|
||||
raise NotImplementedError # Implement in subclasses.
|
||||
|
||||
@property
|
||||
def can_run_on_control_plane(self):
|
||||
if settings.IS_K8S:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@property
|
||||
def can_run_containerized(self):
|
||||
return False
|
||||
@@ -992,8 +998,18 @@ class UnifiedJob(
|
||||
'main_systemjob': 'system_job_id',
|
||||
}[tablename]
|
||||
|
||||
@property
|
||||
def has_unpartitioned_events(self):
|
||||
applied = get_event_partition_epoch()
|
||||
return applied and self.created and self.created < applied
|
||||
|
||||
def get_event_queryset(self):
|
||||
return self.event_class.objects.filter(**{self.event_parent_key: self.id})
|
||||
kwargs = {
|
||||
self.event_parent_key: self.id,
|
||||
}
|
||||
if not self.has_unpartitioned_events:
|
||||
kwargs['job_created'] = self.created
|
||||
return self.event_class.objects.filter(**kwargs)
|
||||
|
||||
@property
|
||||
def event_processing_finished(self):
|
||||
@@ -1079,13 +1095,15 @@ class UnifiedJob(
|
||||
# .write() calls on the fly to maintain this interface
|
||||
_write = fd.write
|
||||
fd.write = lambda s: _write(smart_text(s))
|
||||
tbl = self._meta.db_table + 'event'
|
||||
created_by_cond = ''
|
||||
if self.has_unpartitioned_events:
|
||||
tbl = f'_unpartitioned_{tbl}'
|
||||
else:
|
||||
created_by_cond = f"job_created='{self.created.isoformat()}' AND "
|
||||
|
||||
cursor.copy_expert(
|
||||
"copy (select stdout from {} where {}={} and stdout != '' order by start_line) to stdout".format(
|
||||
self._meta.db_table + 'event', self.event_parent_key, self.id
|
||||
),
|
||||
fd,
|
||||
)
|
||||
sql = f"copy (select stdout from {tbl} where {created_by_cond}{self.event_parent_key}={self.id} and stdout != '' order by start_line) to stdout" # nosql
|
||||
cursor.copy_expert(sql, fd)
|
||||
|
||||
if hasattr(fd, 'name'):
|
||||
# If we're dealing with a physical file, use `sed` to clean
|
||||
@@ -1404,14 +1422,21 @@ class UnifiedJob(
|
||||
return []
|
||||
return list(self.unified_job_template.instance_groups.all())
|
||||
|
||||
@property
|
||||
def control_plane_instance_group(self):
|
||||
from awx.main.models.ha import InstanceGroup
|
||||
|
||||
control_plane_instance_group = InstanceGroup.objects.filter(name=settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
|
||||
|
||||
return list(control_plane_instance_group)
|
||||
|
||||
@property
|
||||
def global_instance_groups(self):
|
||||
from awx.main.models.ha import InstanceGroup
|
||||
|
||||
default_instance_group = InstanceGroup.objects.filter(name='tower')
|
||||
if default_instance_group.exists():
|
||||
return [default_instance_group.first()]
|
||||
return []
|
||||
default_instance_groups = InstanceGroup.objects.filter(name__in=[settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME])
|
||||
|
||||
return list(default_instance_groups)
|
||||
|
||||
def awx_meta_vars(self):
|
||||
"""
|
||||
|
||||
@@ -258,6 +258,10 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
models.Index(fields=['identifier']),
|
||||
]
|
||||
|
||||
@property
|
||||
def event_processing_finished(self):
|
||||
return True
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@@ -591,6 +595,9 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
def _get_related_jobs(self):
|
||||
return WorkflowJob.objects.filter(workflow_job_template=self)
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
return None # EEs are not meaningful for workflows
|
||||
|
||||
|
||||
class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificationMixin, WebhookMixin):
|
||||
class Meta:
|
||||
@@ -620,6 +627,10 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
def workflow_nodes(self):
|
||||
return self.workflow_job_nodes
|
||||
|
||||
@property
|
||||
def event_processing_finished(self):
|
||||
return True
|
||||
|
||||
def _get_parent_field_name(self):
|
||||
if self.job_template_id:
|
||||
# This is a workflow job which is a container for slice jobs
|
||||
|
||||
@@ -96,7 +96,7 @@ class PodManager(object):
|
||||
error_msg = _('Invalid openshift or k8s cluster credential')
|
||||
if e.status == 403:
|
||||
error_msg = _(
|
||||
'Failed to create secret for container group {} because the needed service account roles are needed. Add get, create and delete roles for secret resources for your cluster credential.'.format(
|
||||
'Failed to create secret for container group {} because additional service account role rules are needed. Add get, create and delete role rules for secret resources for your cluster credential.'.format(
|
||||
job.instance_group.name
|
||||
)
|
||||
)
|
||||
@@ -113,7 +113,7 @@ class PodManager(object):
|
||||
error_msg = _('Invalid openshift or k8s cluster credential')
|
||||
if e.status == 403:
|
||||
error_msg = _(
|
||||
'Failed to delete secret for container group {} because the needed service account roles are needed. Add create and delete roles for secret resources for your cluster credential.'.format(
|
||||
'Failed to delete secret for container group {} because additional service account role rules are needed. Add create and delete role rules for secret resources for your cluster credential.'.format(
|
||||
job.instance_group.name
|
||||
)
|
||||
)
|
||||
|
||||
@@ -35,6 +35,7 @@ from awx.main.models import (
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils import get_type_for_model, task_manager_bulk_reschedule, schedule_task_manager
|
||||
from awx.main.utils.common import create_partition
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.utils import decrypt_field
|
||||
@@ -301,6 +302,8 @@ class TaskManager:
|
||||
|
||||
def post_commit():
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
# Before task is dispatched, ensure that job_event partitions exist
|
||||
create_partition(task.event_class._meta.db_table, start=task.created)
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
@@ -471,6 +474,7 @@ class TaskManager:
|
||||
tasks_to_update_job_explanation.append(task)
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
|
||||
found_acceptable_queue = False
|
||||
if isinstance(task, WorkflowJob):
|
||||
if task.unified_job_template_id in running_workflow_templates:
|
||||
@@ -481,6 +485,7 @@ class TaskManager:
|
||||
running_workflow_templates.add(task.unified_job_template_id)
|
||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||
continue
|
||||
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if task.can_run_containerized and rampart_group.is_container_group:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
@@ -488,12 +493,12 @@ class TaskManager:
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
|
||||
if not task.can_run_on_control_plane:
|
||||
logger.debug("Skipping group {}, task cannot run on control plane".format(rampart_group.name))
|
||||
continue
|
||||
|
||||
remaining_capacity = self.get_remaining_capacity(rampart_group.name)
|
||||
if (
|
||||
task.task_impact > 0
|
||||
and not rampart_group.is_container_group # project updates have a cost of zero
|
||||
and self.get_remaining_capacity(rampart_group.name) <= 0
|
||||
):
|
||||
if task.task_impact > 0 and self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug("Skipping group {}, remaining_capacity {} <= 0".format(rampart_group.name, remaining_capacity))
|
||||
continue
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ from crum.signals import current_user_getter
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
ActivityStream,
|
||||
ExecutionEnvironment,
|
||||
Group,
|
||||
Host,
|
||||
InstanceGroup,
|
||||
@@ -623,6 +624,12 @@ def deny_orphaned_approvals(sender, instance, **kwargs):
|
||||
approval.deny()
|
||||
|
||||
|
||||
@receiver(pre_delete, sender=ExecutionEnvironment)
|
||||
def remove_default_ee(sender, instance, **kwargs):
|
||||
if instance.id == getattr(settings.DEFAULT_EXECUTION_ENVIRONMENT, 'id', None):
|
||||
settings.DEFAULT_EXECUTION_ENVIRONMENT = None
|
||||
|
||||
|
||||
@receiver(post_save, sender=Session)
|
||||
def save_user_session_membership(sender, **kwargs):
|
||||
session = kwargs.get('instance', None)
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
from collections import OrderedDict, namedtuple
|
||||
from collections import OrderedDict, namedtuple, deque
|
||||
import errno
|
||||
import functools
|
||||
import importlib
|
||||
@@ -32,7 +32,7 @@ import sys
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction, DatabaseError, IntegrityError, ProgrammingError, connection
|
||||
from django.db import transaction, DatabaseError, IntegrityError
|
||||
from django.db.models.fields.related import ForeignKey
|
||||
from django.utils.timezone import now
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -57,7 +57,7 @@ from receptorctl.socket_interface import ReceptorControl
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, MINIMAL_EVENTS
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.models import (
|
||||
@@ -98,7 +98,7 @@ from awx.main.utils import (
|
||||
parse_yaml_or_json,
|
||||
cleanup_new_process,
|
||||
)
|
||||
from awx.main.utils.execution_environments import get_default_execution_environment, get_default_pod_spec, CONTAINER_ROOT, to_container_path
|
||||
from awx.main.utils.execution_environments import get_default_pod_spec, CONTAINER_ROOT, to_container_path
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||
@@ -682,48 +682,6 @@ def update_host_smart_inventory_memberships():
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def migrate_legacy_event_data(tblname):
|
||||
#
|
||||
# NOTE: this function is not actually in use anymore,
|
||||
# but has been intentionally kept for historical purposes,
|
||||
# and to serve as an illustration if we ever need to perform
|
||||
# bulk modification/migration of event data in the future.
|
||||
#
|
||||
if 'event' not in tblname:
|
||||
return
|
||||
with advisory_lock(f'bigint_migration_{tblname}', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
return
|
||||
chunk = settings.JOB_EVENT_MIGRATION_CHUNK_SIZE
|
||||
|
||||
def _remaining():
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname};')
|
||||
return cursor.fetchone()[0]
|
||||
except ProgrammingError:
|
||||
# the table is gone (migration is unnecessary)
|
||||
return None
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
total_rows = _remaining()
|
||||
while total_rows:
|
||||
with transaction.atomic():
|
||||
cursor.execute(f'INSERT INTO {tblname} SELECT * FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk} RETURNING id;')
|
||||
last_insert_pk = cursor.fetchone()
|
||||
if last_insert_pk is None:
|
||||
# this means that the SELECT from the old table was
|
||||
# empty, and there was nothing to insert (so we're done)
|
||||
break
|
||||
last_insert_pk = last_insert_pk[0]
|
||||
cursor.execute(f'DELETE FROM _old_{tblname} WHERE id IN (SELECT id FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk});')
|
||||
logger.warn(f'migrated int -> bigint rows to {tblname} from _old_{tblname}; # ({last_insert_pk} rows remaining)')
|
||||
|
||||
if _remaining() is None:
|
||||
cursor.execute(f'DROP TABLE IF EXISTS _old_{tblname}')
|
||||
logger.warn(f'{tblname} primary key migration to bigint has finished')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
@@ -781,6 +739,8 @@ class BaseTask(object):
|
||||
self.parent_workflow_job_id = None
|
||||
self.host_map = {}
|
||||
self.guid = GuidMiddleware.get_guid()
|
||||
self.job_created = None
|
||||
self.recent_event_timings = deque(maxlen=settings.MAX_WEBSOCKET_EVENT_RATE)
|
||||
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
"""Reload the model instance from the database and update the
|
||||
@@ -1158,6 +1118,7 @@ class BaseTask(object):
|
||||
event_data.pop('parent_uuid', None)
|
||||
if self.parent_workflow_job_id:
|
||||
event_data['workflow_job_id'] = self.parent_workflow_job_id
|
||||
event_data['job_created'] = self.job_created
|
||||
if self.host_map:
|
||||
host = event_data.get('event_data', {}).get('host', '').strip()
|
||||
if host:
|
||||
@@ -1191,6 +1152,37 @@ class BaseTask(object):
|
||||
if 'event_data' in event_data:
|
||||
event_data['event_data']['guid'] = self.guid
|
||||
|
||||
# To prevent overwhelming the broadcast queue, skip some websocket messages
|
||||
if self.recent_event_timings:
|
||||
cpu_time = time.time()
|
||||
first_window_time = self.recent_event_timings[0]
|
||||
last_window_time = self.recent_event_timings[-1]
|
||||
|
||||
if event_data.get('event') in MINIMAL_EVENTS:
|
||||
should_emit = True # always send some types like playbook_on_stats
|
||||
elif event_data.get('stdout') == '' and event_data['start_line'] == event_data['end_line']:
|
||||
should_emit = False # exclude events with no output
|
||||
else:
|
||||
should_emit = any(
|
||||
[
|
||||
# if 30the most recent websocket message was sent over 1 second ago
|
||||
cpu_time - first_window_time > 1.0,
|
||||
# if the very last websocket message came in over 1/30 seconds ago
|
||||
self.recent_event_timings.maxlen * (cpu_time - last_window_time) > 1.0,
|
||||
# if the queue is not yet full
|
||||
len(self.recent_event_timings) != self.recent_event_timings.maxlen,
|
||||
]
|
||||
)
|
||||
|
||||
if should_emit:
|
||||
self.recent_event_timings.append(cpu_time)
|
||||
else:
|
||||
event_data.setdefault('event_data', {})
|
||||
event_data['skip_websocket_message'] = True
|
||||
|
||||
elif self.recent_event_timings.maxlen:
|
||||
self.recent_event_timings.append(time.time())
|
||||
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
self.event_ct += 1
|
||||
@@ -1283,6 +1275,8 @@ class BaseTask(object):
|
||||
if self.instance.spawned_by_workflow:
|
||||
self.parent_workflow_job_id = self.instance.get_workflow_job().id
|
||||
|
||||
self.job_created = str(self.instance.created)
|
||||
|
||||
try:
|
||||
self.instance.send_notification_templates("running")
|
||||
private_data_dir = self.build_private_data_dir(self.instance)
|
||||
@@ -1300,10 +1294,6 @@ class BaseTask(object):
|
||||
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
|
||||
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
|
||||
|
||||
# store a record of the venv used at runtime
|
||||
if hasattr(self.instance, 'custom_virtualenv'):
|
||||
self.update_model(pk, custom_virtualenv=getattr(self.instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH))
|
||||
|
||||
# Fetch "cached" fact data from prior runs and put on the disk
|
||||
# where ansible expects to find it
|
||||
if getattr(self.instance, 'use_fact_cache', False):
|
||||
@@ -2391,6 +2381,12 @@ class RunInventoryUpdate(BaseTask):
|
||||
paths = [config_values[config_setting]] + paths
|
||||
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
|
||||
env[env_key] = os.pathsep.join(paths)
|
||||
if 'ANSIBLE_COLLECTIONS_PATHS' in env:
|
||||
paths = env['ANSIBLE_COLLECTIONS_PATHS'].split(':')
|
||||
else:
|
||||
paths = ['~/.ansible/collections', '/usr/share/ansible/collections']
|
||||
paths.append('/usr/share/automation-controller/collections')
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = os.pathsep.join(paths)
|
||||
|
||||
return env
|
||||
|
||||
@@ -2883,7 +2879,7 @@ class TransmitterThread(threading.Thread):
|
||||
|
||||
|
||||
class AWXReceptorJob:
|
||||
def __init__(self, task=None, runner_params=None):
|
||||
def __init__(self, task, runner_params=None):
|
||||
self.task = task
|
||||
self.runner_params = runner_params
|
||||
self.unit_id = None
|
||||
@@ -3038,10 +3034,7 @@ class AWXReceptorJob:
|
||||
|
||||
@property
|
||||
def pod_definition(self):
|
||||
if self.task:
|
||||
ee = self.task.instance.resolve_execution_environment()
|
||||
else:
|
||||
ee = get_default_execution_environment()
|
||||
ee = self.task.instance.execution_environment
|
||||
|
||||
default_pod_spec = get_default_pod_spec()
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import pytest
|
||||
from unittest import mock
|
||||
from contextlib import contextmanager
|
||||
|
||||
from awx.main.models import Credential
|
||||
from awx.main.models import Credential, UnifiedJob
|
||||
from awx.main.tests.factories import (
|
||||
create_organization,
|
||||
create_job_template,
|
||||
@@ -81,7 +81,7 @@ def instance_group_factory():
|
||||
|
||||
@pytest.fixture
|
||||
def default_instance_group(instance_factory, instance_group_factory):
|
||||
return create_instance_group("tower", instances=[create_instance("hostA")])
|
||||
return create_instance_group("default", instances=[create_instance("hostA")])
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -149,3 +149,29 @@ def mock_external_credential_input_sources():
|
||||
# test it explicitly.
|
||||
with mock.patch.object(Credential, 'dynamic_input_fields', new=[]) as _fixture:
|
||||
yield _fixture
|
||||
|
||||
|
||||
@pytest.fixture(scope='session', autouse=True)
|
||||
def mock_has_unpartitioned_events():
|
||||
# has_unpartitioned_events determines if there are any events still
|
||||
# left in the old, unpartitioned job events table. In order to work,
|
||||
# this method looks up when the partition migration occurred. When
|
||||
# Django's unit tests run, however, there will be no record of the migration.
|
||||
# We mock this out to circumvent the migration query.
|
||||
with mock.patch.object(UnifiedJob, 'has_unpartitioned_events', new=False) as _fixture:
|
||||
yield _fixture
|
||||
|
||||
|
||||
@pytest.fixture(scope='session', autouse=True)
|
||||
def mock_get_event_queryset_no_job_created():
|
||||
"""
|
||||
SQLite friendly since partitions aren't supported. Do not add the faked job_created field to the filter. If we do, it will result in an sql query for the
|
||||
job_created field. That field does not actually exist in a non-partition scenario.
|
||||
"""
|
||||
|
||||
def event_qs(self):
|
||||
kwargs = {self.event_parent_key: self.id}
|
||||
return self.event_class.objects.filter(**kwargs)
|
||||
|
||||
with mock.patch.object(UnifiedJob, 'get_event_queryset', lambda self: event_qs(self)) as _fixture:
|
||||
yield _fixture
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,14 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
dir_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
with open(os.path.join(dir_path, 'insights_hosts.json')) as data_file:
|
||||
TEST_INSIGHTS_HOSTS = json.load(data_file)
|
||||
|
||||
with open(os.path.join(dir_path, 'insights.json')) as data_file:
|
||||
TEST_INSIGHTS_PLANS = json.load(data_file)
|
||||
|
||||
with open(os.path.join(dir_path, 'insights_remediations.json')) as data_file:
|
||||
TEST_INSIGHTS_REMEDIATIONS = json.load(data_file)['data']
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"total": 1,
|
||||
"count": 1,
|
||||
"page": 1,
|
||||
"per_page": 50,
|
||||
"results": [
|
||||
{
|
||||
"id": "11111111-1111-1111-1111-111111111111",
|
||||
"insights_id": "22222222-2222-2222-2222-222222222222",
|
||||
"updated": "2019-03-19T21:59:09.213151-04:00"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"id": "9197ba55-0abc-4028-9bbe-269e530f8bd5",
|
||||
"name": "Fix Critical CVEs",
|
||||
"created_by": {
|
||||
"username": "jharting@redhat.com",
|
||||
"first_name": "Jozef",
|
||||
"last_name": "Hartinger"
|
||||
},
|
||||
"created_at": "2018-12-05T08:19:36.641Z",
|
||||
"updated_by": {
|
||||
"username": "jharting@redhat.com",
|
||||
"first_name": "Jozef",
|
||||
"last_name": "Hartinger"
|
||||
},
|
||||
"updated_at": "2018-12-05T08:19:36.641Z",
|
||||
"issue_count": 0,
|
||||
"system_count": 0,
|
||||
"needs_reboot": true
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"count": 0,
|
||||
"total": 0
|
||||
},
|
||||
"links": {
|
||||
"first": null,
|
||||
"last": null,
|
||||
"next": null,
|
||||
"previous": null
|
||||
}
|
||||
}
|
||||
5
awx/main/tests/data/inventory/plugins/insights/env.json
Normal file
5
awx/main/tests/data/inventory/plugins/insights/env.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"INSIGHTS_USER": "fooo",
|
||||
"INSIGHTS_PASSWORD": "fooo"
|
||||
}
|
||||
@@ -16,6 +16,65 @@ def app_post_migration(sender, app_config, **kwargs):
|
||||
if 'result_stdout_text' not in cols:
|
||||
cur.execute('ALTER TABLE main_unifiedjob ADD COLUMN result_stdout_text TEXT')
|
||||
|
||||
# we also need to make sure that the `_unpartitioned_<event>` tables are present.
|
||||
# these tables represent old job event tables that were renamed / preserved during a
|
||||
# migration which introduces partitioned event tables
|
||||
# https://github.com/ansible/awx/issues/9039
|
||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||
table_entries = cur.execute(f'SELECT count(*) from sqlite_master WHERE tbl_name="_unpartitioned_{tblname}";').fetchone()[0]
|
||||
if table_entries > 0:
|
||||
continue
|
||||
if tblname == 'main_adhoccommandevent':
|
||||
unique_columns = """host_name character varying(1024) NOT NULL,
|
||||
event character varying(100) NOT NULL,
|
||||
failed boolean NOT NULL,
|
||||
changed boolean NOT NULL,
|
||||
host_id integer,
|
||||
ad_hoc_command_id integer NOT NULL
|
||||
"""
|
||||
elif tblname == 'main_inventoryupdateevent':
|
||||
unique_columns = "inventory_update_id integer NOT NULL"
|
||||
elif tblname == 'main_jobevent':
|
||||
unique_columns = """event character varying(100) NOT NULL,
|
||||
failed boolean NOT NULL,
|
||||
changed boolean NOT NULL,
|
||||
host_name character varying(1024) NOT NULL,
|
||||
play character varying(1024) NOT NULL,
|
||||
role character varying(1024) NOT NULL,
|
||||
task character varying(1024) NOT NULL,
|
||||
host_id integer,
|
||||
job_id integer NOT NULL,
|
||||
playbook character varying(1024) NOT NULL
|
||||
"""
|
||||
elif tblname == 'main_projectupdateevent':
|
||||
unique_columns = """event character varying(100) NOT NULL,
|
||||
failed boolean NOT NULL,
|
||||
changed boolean NOT NULL,
|
||||
playbook character varying(1024) NOT NULL,
|
||||
play character varying(1024) NOT NULL,
|
||||
role character varying(1024) NOT NULL,
|
||||
task character varying(1024) NOT NULL,
|
||||
project_update_id integer NOT NULL
|
||||
"""
|
||||
elif tblname == 'main_systemjobevent':
|
||||
unique_columns = "system_job_id integer NOT NULL"
|
||||
|
||||
cur.execute(
|
||||
f"""CREATE TABLE _unpartitioned_{tblname} (
|
||||
id bigint NOT NULL,
|
||||
created timestamp with time zone NOT NULL,
|
||||
modified timestamp with time zone NOT NULL,
|
||||
event_data text NOT NULL,
|
||||
counter integer NOT NULL,
|
||||
end_line integer NOT NULL,
|
||||
start_line integer NOT NULL,
|
||||
stdout text NOT NULL,
|
||||
uuid character varying(1024) NOT NULL,
|
||||
verbosity integer NOT NULL,
|
||||
{unique_columns});
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
|
||||
post_migrate.connect(app_post_migration, sender=apps.get_app_config('main'))
|
||||
|
||||
@@ -12,7 +12,6 @@ def test_empty():
|
||||
"active_sessions": 0,
|
||||
"active_host_count": 0,
|
||||
"credential": 0,
|
||||
"custom_virtualenvs": 0, # dev env ansible3
|
||||
"host": 0,
|
||||
"inventory": 0,
|
||||
"inventories": {"normal": 0, "smart": 0},
|
||||
|
||||
@@ -21,7 +21,6 @@ EXPECTED_VALUES = {
|
||||
'awx_sessions_total': 0.0,
|
||||
'awx_sessions_total': 0.0,
|
||||
'awx_sessions_total': 0.0,
|
||||
'awx_custom_virtualenvs_total': 0.0,
|
||||
'awx_running_jobs_total': 0.0,
|
||||
'awx_instance_capacity': 100.0,
|
||||
'awx_instance_consumed_capacity': 0.0,
|
||||
|
||||
@@ -652,6 +652,31 @@ def test_satellite6_create_ok(post, organization, admin):
|
||||
assert decrypt_field(cred, 'password') == 'some_password'
|
||||
|
||||
|
||||
#
|
||||
# RH Insights Credentials
|
||||
#
|
||||
@pytest.mark.django_db
|
||||
def test_insights_create_ok(post, organization, admin):
|
||||
params = {
|
||||
'credential_type': 1,
|
||||
'name': 'Best credential ever',
|
||||
'inputs': {
|
||||
'username': 'some_username',
|
||||
'password': 'some_password',
|
||||
},
|
||||
}
|
||||
sat6 = CredentialType.defaults['insights']()
|
||||
sat6.save()
|
||||
params['organization'] = organization.id
|
||||
response = post(reverse('api:credential_list'), params, admin)
|
||||
assert response.status_code == 201
|
||||
|
||||
assert Credential.objects.count() == 1
|
||||
cred = Credential.objects.all()[:1].get()
|
||||
assert cred.inputs['username'] == 'some_username'
|
||||
assert decrypt_field(cred, 'password') == 'some_password'
|
||||
|
||||
|
||||
#
|
||||
# AWS Credentials
|
||||
#
|
||||
|
||||
@@ -16,7 +16,7 @@ def test_job_events_sublist_truncation(get, organization_factory, job_template_f
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization, inventory='test_inv', project='test_proj').job_template
|
||||
job = jt.create_unified_job()
|
||||
JobEvent.create_from_data(job_id=job.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025).save()
|
||||
JobEvent.create_from_data(job_id=job.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025, job_created=job.created).save()
|
||||
|
||||
url = reverse('api:job_job_events_list', kwargs={'pk': job.pk})
|
||||
if not truncate:
|
||||
@@ -38,7 +38,7 @@ def test_ad_hoc_events_sublist_truncation(get, organization_factory, job_templat
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
adhoc = AdHocCommand()
|
||||
adhoc.save()
|
||||
AdHocCommandEvent.create_from_data(ad_hoc_command_id=adhoc.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025).save()
|
||||
AdHocCommandEvent.create_from_data(ad_hoc_command_id=adhoc.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025, job_created=adhoc.created).save()
|
||||
|
||||
url = reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': adhoc.pk})
|
||||
if not truncate:
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
from collections import namedtuple
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestHostInsights:
|
||||
def test_insights_bad_host(self, get, hosts, user, mocker):
|
||||
mocker.patch.object(requests.Session, 'get')
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'] == 'This host is not recognized as an Insights host.'
|
||||
assert response.status_code == 404
|
||||
|
||||
def test_insights_host_missing_from_insights(self, get, hosts, insights_credential, user, mocker):
|
||||
class Response:
|
||||
status_code = 200
|
||||
content = "{'results': []}"
|
||||
|
||||
def json(self):
|
||||
return {'results': []}
|
||||
|
||||
mocker.patch.object(requests.Session, 'get', return_value=Response())
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.inventory.insights_credential = insights_credential
|
||||
host.inventory.save()
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'] == ('Could not translate Insights system ID 123e4567-e89b-12d3-a456-426655440000' ' into an Insights platform ID.')
|
||||
assert response.status_code == 404
|
||||
|
||||
def test_insights_no_credential(self, get, hosts, user, mocker):
|
||||
mocker.patch.object(requests.Session, 'get')
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'] == 'The Insights Credential for "test-inv" was not found.'
|
||||
assert response.status_code == 404
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"status_code, exception, error, message",
|
||||
[
|
||||
(
|
||||
502,
|
||||
requests.exceptions.SSLError,
|
||||
'SSLError while trying to connect to https://myexample.com/whocares/me/',
|
||||
None,
|
||||
),
|
||||
(
|
||||
504,
|
||||
requests.exceptions.Timeout,
|
||||
'Request to https://myexample.com/whocares/me/ timed out.',
|
||||
None,
|
||||
),
|
||||
(502, requests.exceptions.RequestException, 'booo!', 'Unknown exception booo! while trying to GET https://myexample.com/whocares/me/'),
|
||||
],
|
||||
)
|
||||
def test_insights_exception(self, get, hosts, insights_credential, user, mocker, status_code, exception, error, message):
|
||||
mocker.patch.object(requests.Session, 'get', side_effect=exception(error))
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.inventory.insights_credential = insights_credential
|
||||
host.inventory.save()
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'] == message or error
|
||||
assert response.status_code == status_code
|
||||
|
||||
def test_insights_unauthorized(self, get, hosts, insights_credential, user, mocker):
|
||||
Response = namedtuple('Response', 'status_code content')
|
||||
mocker.patch.object(requests.Session, 'get', return_value=Response(401, 'mock 401 err msg'))
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.inventory.insights_credential = insights_credential
|
||||
host.inventory.save()
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'] == ("Unauthorized access. Please check your Insights Credential username and password.")
|
||||
assert response.status_code == 502
|
||||
|
||||
def test_insights_bad_status(self, get, hosts, insights_credential, user, mocker):
|
||||
Response = namedtuple('Response', 'status_code content')
|
||||
mocker.patch.object(requests.Session, 'get', return_value=Response(500, 'mock 500 err msg'))
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.inventory.insights_credential = insights_credential
|
||||
host.inventory.save()
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'].startswith("Failed to access the Insights API at URL")
|
||||
assert "Server responded with 500 status code and message mock 500 err msg" in response.data['error']
|
||||
assert response.status_code == 502
|
||||
|
||||
def test_insights_bad_json(self, get, hosts, insights_credential, user, mocker):
|
||||
class Response:
|
||||
status_code = 200
|
||||
content = 'booo!'
|
||||
|
||||
def json(self):
|
||||
raise ValueError("we do not care what this is")
|
||||
|
||||
mocker.patch.object(requests.Session, 'get', return_value=Response())
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.inventory.insights_credential = insights_credential
|
||||
host.inventory.save()
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'].startswith("Expected JSON response from Insights at URL")
|
||||
assert 'insights_id=123e4567-e89b-12d3-a456-426655440000' in response.data['error']
|
||||
assert response.data['error'].endswith("but instead got booo!")
|
||||
assert response.status_code == 502
|
||||
@@ -13,7 +13,7 @@ from awx.main.utils import camelcase_to_underscore
|
||||
|
||||
@pytest.fixture
|
||||
def tower_instance_group():
|
||||
ig = InstanceGroup(name='tower')
|
||||
ig = InstanceGroup(name='default')
|
||||
ig.save()
|
||||
return ig
|
||||
|
||||
@@ -105,7 +105,9 @@ def test_delete_instance_group_jobs_running(delete, instance_group_jobs_running,
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_delete_rename_tower_instance_group_prevented(delete, options, tower_instance_group, instance_group, user, patch, execution_environment):
|
||||
def test_delete_rename_tower_instance_group_prevented(
|
||||
delete, options, tower_instance_group, instance_group, user, patch, control_plane_execution_environment, default_job_execution_environment
|
||||
):
|
||||
url = reverse("api:instance_group_detail", kwargs={'pk': tower_instance_group.pk})
|
||||
super_user = user('bob', True)
|
||||
|
||||
@@ -117,8 +119,8 @@ def test_delete_rename_tower_instance_group_prevented(delete, options, tower_ins
|
||||
assert 'GET' in resp.data['actions']
|
||||
assert 'PUT' in resp.data['actions']
|
||||
|
||||
# Rename 'tower' instance group denied
|
||||
patch(url, {'name': 'tower_prime'}, super_user, expect=400)
|
||||
# Rename 'default' instance group denied
|
||||
patch(url, {'name': 'default_prime'}, super_user, expect=400)
|
||||
|
||||
# Rename, other instance group OK
|
||||
url = reverse("api:instance_group_detail", kwargs={'pk': instance_group.pk})
|
||||
|
||||
@@ -4,6 +4,7 @@ from unittest.mock import patch
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from awx.main.models.inventory import Group, Host
|
||||
from awx.main.models.ad_hoc_commands import AdHocCommand
|
||||
from awx.api.pagination import Pagination
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
@@ -61,3 +62,46 @@ def test_pagination_cap_page_size(get, admin, inventory):
|
||||
|
||||
assert jdata['previous'] == host_list_url({'page': '1', 'page_size': '5'})
|
||||
assert jdata['next'] == host_list_url({'page': '3', 'page_size': '5'})
|
||||
|
||||
|
||||
class TestUnifiedJobEventPagination:
|
||||
@pytest.fixture
|
||||
def ad_hoc_command(self, ad_hoc_command_factory):
|
||||
return ad_hoc_command_factory()
|
||||
|
||||
def _test_unified_job(self, get, admin, template, job_attribute, list_endpoint):
|
||||
if isinstance(template, AdHocCommand):
|
||||
job = template
|
||||
else:
|
||||
job = template.create_unified_job()
|
||||
kwargs = {job_attribute: job.pk}
|
||||
for i in range(20):
|
||||
job.event_class.create_from_data(**kwargs).save()
|
||||
|
||||
url = reverse(f'api:{list_endpoint}', kwargs={'pk': job.pk}) + '?limit=7'
|
||||
resp = get(url, user=admin, expect=200)
|
||||
|
||||
assert 'count' not in resp.data
|
||||
assert 'next' not in resp.data
|
||||
assert 'previous' not in resp.data
|
||||
assert len(resp.data['results']) == 7
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job(self, get, admin, job_template):
|
||||
self._test_unified_job(get, admin, job_template, 'job_id', 'job_job_events_list')
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_project_update(self, get, admin, project):
|
||||
self._test_unified_job(get, admin, project, 'project_update_id', 'project_update_events_list')
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_update(self, get, admin, inventory_source):
|
||||
self._test_unified_job(get, admin, inventory_source, 'inventory_update_id', 'inventory_update_events_list')
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_system_job(self, get, admin, system_job_template):
|
||||
self._test_unified_job(get, admin, system_job_template, 'system_job_id', 'system_job_events_list')
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_adhoc_command(self, get, admin, ad_hoc_command):
|
||||
self._test_unified_job(get, admin, ad_hoc_command, 'ad_hoc_command_id', 'ad_hoc_command_ad_hoc_command_events_list')
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import base64
|
||||
import json
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -26,16 +27,22 @@ from awx.main.models import (
|
||||
)
|
||||
|
||||
|
||||
def _mk_project_update():
|
||||
def _mk_project_update(created=None):
|
||||
kwargs = {}
|
||||
if created:
|
||||
kwargs['created'] = created
|
||||
project = Project()
|
||||
project.save()
|
||||
return ProjectUpdate(project=project)
|
||||
return ProjectUpdate(project=project, **kwargs)
|
||||
|
||||
|
||||
def _mk_inventory_update():
|
||||
def _mk_inventory_update(created=None):
|
||||
kwargs = {}
|
||||
if created:
|
||||
kwargs['created'] = created
|
||||
source = InventorySource(source='ec2')
|
||||
source.save()
|
||||
iu = InventoryUpdate(inventory_source=source, source='e2')
|
||||
iu = InventoryUpdate(inventory_source=source, source='e2', **kwargs)
|
||||
return iu
|
||||
|
||||
|
||||
@@ -139,10 +146,11 @@ def test_stdout_line_range(sqlite_copy_expert, Parent, Child, relation, view, ge
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_text_stdout_from_system_job_events(sqlite_copy_expert, get, admin):
|
||||
job = SystemJob()
|
||||
created = datetime.utcnow()
|
||||
job = SystemJob(created=created)
|
||||
job.save()
|
||||
for i in range(3):
|
||||
SystemJobEvent(system_job=job, stdout='Testing {}\n'.format(i), start_line=i).save()
|
||||
SystemJobEvent(system_job=job, stdout='Testing {}\n'.format(i), start_line=i, job_created=created).save()
|
||||
url = reverse('api:system_job_detail', kwargs={'pk': job.pk})
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert smart_str(response.data['result_stdout']).splitlines() == ['Testing %d' % i for i in range(3)]
|
||||
@@ -150,11 +158,12 @@ def test_text_stdout_from_system_job_events(sqlite_copy_expert, get, admin):
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_text_stdout_with_max_stdout(sqlite_copy_expert, get, admin):
|
||||
job = SystemJob()
|
||||
created = datetime.utcnow()
|
||||
job = SystemJob(created=created)
|
||||
job.save()
|
||||
total_bytes = settings.STDOUT_MAX_BYTES_DISPLAY + 1
|
||||
large_stdout = 'X' * total_bytes
|
||||
SystemJobEvent(system_job=job, stdout=large_stdout, start_line=0).save()
|
||||
SystemJobEvent(system_job=job, stdout=large_stdout, start_line=0, job_created=created).save()
|
||||
url = reverse('api:system_job_detail', kwargs={'pk': job.pk})
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert response.data['result_stdout'] == (
|
||||
@@ -176,11 +185,12 @@ def test_text_stdout_with_max_stdout(sqlite_copy_expert, get, admin):
|
||||
@pytest.mark.parametrize('fmt', ['txt', 'ansi'])
|
||||
@mock.patch('awx.main.redact.UriCleaner.SENSITIVE_URI_PATTERN', mock.Mock(**{'search.return_value': None})) # really slow for large strings
|
||||
def test_max_bytes_display(sqlite_copy_expert, Parent, Child, relation, view, fmt, get, admin):
|
||||
job = Parent()
|
||||
created = datetime.utcnow()
|
||||
job = Parent(created=created)
|
||||
job.save()
|
||||
total_bytes = settings.STDOUT_MAX_BYTES_DISPLAY + 1
|
||||
large_stdout = 'X' * total_bytes
|
||||
Child(**{relation: job, 'stdout': large_stdout, 'start_line': 0}).save()
|
||||
Child(**{relation: job, 'stdout': large_stdout, 'start_line': 0, 'job_created': created}).save()
|
||||
url = reverse(view, kwargs={'pk': job.pk})
|
||||
|
||||
response = get(url + '?format={}'.format(fmt), user=admin, expect=200)
|
||||
@@ -257,10 +267,11 @@ def test_text_with_unicode_stdout(sqlite_copy_expert, Parent, Child, relation, v
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_unicode_with_base64_ansi(sqlite_copy_expert, get, admin):
|
||||
job = Job()
|
||||
created = datetime.utcnow()
|
||||
job = Job(created=created)
|
||||
job.save()
|
||||
for i in range(3):
|
||||
JobEvent(job=job, stdout='オ{}\n'.format(i), start_line=i).save()
|
||||
JobEvent(job=job, stdout='オ{}\n'.format(i), start_line=i, job_created=created).save()
|
||||
url = reverse('api:job_stdout', kwargs={'pk': job.pk}) + '?format=json&content_encoding=base64'
|
||||
|
||||
response = get(url, user=admin, expect=200)
|
||||
|
||||
@@ -2,12 +2,14 @@ import pytest
|
||||
from datetime import datetime, timedelta
|
||||
from pytz import timezone
|
||||
from collections import OrderedDict
|
||||
from unittest import mock
|
||||
|
||||
from django.db.models.deletion import Collector, SET_NULL, CASCADE
|
||||
from django.core.management import call_command
|
||||
|
||||
from awx.main.management.commands import cleanup_jobs
|
||||
from awx.main.utils.deletion import AWXCollector
|
||||
from awx.main.models import JobTemplate, User, Job, JobEvent, Notification, WorkflowJobNode, JobHostSummary
|
||||
from awx.main.models import JobTemplate, User, Job, Notification, WorkflowJobNode, JobHostSummary
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -32,19 +34,20 @@ def setup_environment(inventory, project, machine_credential, host, notification
|
||||
notification.save()
|
||||
|
||||
for i in range(3):
|
||||
# create jobs with current time
|
||||
job1 = jt.create_job()
|
||||
job1.created = datetime.now(tz=timezone('UTC'))
|
||||
job1.save()
|
||||
# create jobs with current time
|
||||
JobEvent.create_from_data(job_id=job1.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025).save()
|
||||
# sqlite does not support partitioning so we cannot test partition-based jobevent cleanup
|
||||
# JobEvent.create_from_data(job_id=job1.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025).save()
|
||||
new_jobs.append(job1)
|
||||
|
||||
job2 = jt.create_job()
|
||||
# create jobs 10 days ago
|
||||
job2 = jt.create_job()
|
||||
job2.created = datetime.now(tz=timezone('UTC')) - timedelta(days=days)
|
||||
job2.save()
|
||||
job2.dependent_jobs.add(job1)
|
||||
JobEvent.create_from_data(job_id=job2.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025).save()
|
||||
# JobEvent.create_from_data(job_id=job2.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025).save()
|
||||
old_jobs.append(job2)
|
||||
|
||||
jt.last_job = job2
|
||||
@@ -62,7 +65,13 @@ def setup_environment(inventory, project, machine_credential, host, notification
|
||||
return (old_jobs, new_jobs, days_str)
|
||||
|
||||
|
||||
# sqlite does not support table partitioning so we mock out the methods responsible for pruning
|
||||
# job event partitions during the job cleanup task
|
||||
# https://github.com/ansible/awx/issues/9039
|
||||
@pytest.mark.django_db
|
||||
@mock.patch.object(cleanup_jobs.DeleteMeta, 'identify_excluded_partitions', mock.MagicMock())
|
||||
@mock.patch.object(cleanup_jobs.DeleteMeta, 'find_partitions_to_drop', mock.MagicMock())
|
||||
@mock.patch.object(cleanup_jobs.DeleteMeta, 'drop_partitions', mock.MagicMock())
|
||||
def test_cleanup_jobs(setup_environment):
|
||||
(old_jobs, new_jobs, days_str) = setup_environment
|
||||
|
||||
|
||||
@@ -824,5 +824,10 @@ def slice_job_factory(slice_jt_factory):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def execution_environment():
|
||||
return ExecutionEnvironment.objects.create(name="test-ee", description="test-ee", managed_by_tower=True)
|
||||
def control_plane_execution_environment():
|
||||
return ExecutionEnvironment.objects.create(name="Control Plane EE", managed_by_tower=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def default_job_execution_environment():
|
||||
return ExecutionEnvironment.objects.create(name="Default Job EE", managed_by_tower=False)
|
||||
|
||||
@@ -209,6 +209,7 @@ class TestInventorySourceInjectors:
|
||||
('vmware', 'community.vmware.vmware_vm_inventory'),
|
||||
('rhv', 'ovirt.ovirt.ovirt'),
|
||||
('satellite6', 'theforeman.foreman.foreman'),
|
||||
('insights', 'redhatinsights.insights.insights'),
|
||||
('tower', 'awx.awx.tower'),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,14 +3,6 @@ import pytest
|
||||
from awx.main.models import JobTemplate, Job, JobHostSummary, WorkflowJob, Inventory, Project, Organization
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_awx_virtualenv_from_settings(inventory, project, machine_credential):
|
||||
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml')
|
||||
jt.credentials.add(machine_credential)
|
||||
job = jt.create_unified_job()
|
||||
assert job.ansible_virtualenv_path == '/var/lib/awx/venv/ansible'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_prevent_slicing():
|
||||
jt = JobTemplate.objects.create(name='foo', job_slice_count=4)
|
||||
@@ -20,36 +12,6 @@ def test_prevent_slicing():
|
||||
assert isinstance(job, Job)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_awx_custom_virtualenv(inventory, project, machine_credential, organization):
|
||||
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml', organization=organization)
|
||||
jt.credentials.add(machine_credential)
|
||||
job = jt.create_unified_job()
|
||||
|
||||
job.organization.custom_virtualenv = '/var/lib/awx/venv/fancy-org'
|
||||
job.organization.save()
|
||||
assert job.ansible_virtualenv_path == '/var/lib/awx/venv/fancy-org'
|
||||
|
||||
job.project.custom_virtualenv = '/var/lib/awx/venv/fancy-proj'
|
||||
job.project.save()
|
||||
assert job.ansible_virtualenv_path == '/var/lib/awx/venv/fancy-proj'
|
||||
|
||||
job.job_template.custom_virtualenv = '/var/lib/awx/venv/fancy-jt'
|
||||
job.job_template.save()
|
||||
assert job.ansible_virtualenv_path == '/var/lib/awx/venv/fancy-jt'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_awx_custom_virtualenv_without_jt(project):
|
||||
project.custom_virtualenv = '/var/lib/awx/venv/fancy-proj'
|
||||
project.save()
|
||||
job = Job(project=project)
|
||||
job.save()
|
||||
|
||||
job = Job.objects.get(pk=job.id)
|
||||
assert job.ansible_virtualenv_path == '/var/lib/awx/venv/fancy-proj'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_host_summary_representation(host):
|
||||
job = Job.objects.create(name='foo')
|
||||
|
||||
@@ -10,21 +10,21 @@ class TestCapacityMapping(TransactionTestCase):
|
||||
def sample_cluster(self):
|
||||
ig_small = InstanceGroup.objects.create(name='ig_small')
|
||||
ig_large = InstanceGroup.objects.create(name='ig_large')
|
||||
tower = InstanceGroup.objects.create(name='tower')
|
||||
default = InstanceGroup.objects.create(name='default')
|
||||
i1 = Instance.objects.create(hostname='i1', capacity=200)
|
||||
i2 = Instance.objects.create(hostname='i2', capacity=200)
|
||||
i3 = Instance.objects.create(hostname='i3', capacity=200)
|
||||
ig_small.instances.add(i1)
|
||||
ig_large.instances.add(i2, i3)
|
||||
tower.instances.add(i2)
|
||||
return [tower, ig_large, ig_small]
|
||||
default.instances.add(i2)
|
||||
return [default, ig_large, ig_small]
|
||||
|
||||
def test_mapping(self):
|
||||
self.sample_cluster()
|
||||
with self.assertNumQueries(2):
|
||||
inst_map, ig_map = InstanceGroup.objects.capacity_mapping()
|
||||
assert inst_map['i1'] == set(['ig_small'])
|
||||
assert inst_map['i2'] == set(['ig_large', 'tower'])
|
||||
assert inst_map['i2'] == set(['ig_large', 'default'])
|
||||
assert ig_map['ig_small'] == set(['ig_small'])
|
||||
assert ig_map['ig_large'] == set(['ig_large', 'tower'])
|
||||
assert ig_map['tower'] == set(['ig_large', 'tower'])
|
||||
assert ig_map['ig_large'] == set(['ig_large', 'default'])
|
||||
assert ig_map['default'] == set(['ig_large', 'default'])
|
||||
|
||||
@@ -35,7 +35,8 @@ def test_containerized_job(containerized_job):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_kubectl_ssl_verification(containerized_job, execution_environment):
|
||||
def test_kubectl_ssl_verification(containerized_job, default_job_execution_environment):
|
||||
containerized_job.execution_environment = default_job_execution_environment
|
||||
cred = containerized_job.instance_group.credential
|
||||
cred.inputs['verify_ssl'] = True
|
||||
key_material = subprocess.run('openssl genrsa 2> /dev/null', shell=True, check=True, stdout=subprocess.PIPE)
|
||||
|
||||
@@ -96,6 +96,7 @@ def test_default_cred_types():
|
||||
'satellite6',
|
||||
'scm',
|
||||
'ssh',
|
||||
'thycotic_dsv',
|
||||
'tower',
|
||||
'vault',
|
||||
'vmware',
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.models import ExecutionEnvironment
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_execution_environment_creation(execution_environment, organization):
|
||||
execution_env = ExecutionEnvironment.objects.create(
|
||||
name='Hello Environment', image='', organization=organization, managed_by_tower=False, credential=None, pull='missing'
|
||||
)
|
||||
assert type(execution_env) is type(execution_environment)
|
||||
assert execution_env.organization == organization
|
||||
assert execution_env.name == 'Hello Environment'
|
||||
assert execution_env.pull == 'missing'
|
||||
@@ -182,7 +182,8 @@ def create_reference_data(source_dir, env, content):
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('this_kind', CLOUD_PROVIDERS)
|
||||
def test_inventory_update_injected_content(this_kind, inventory, fake_credential_factory):
|
||||
ExecutionEnvironment.objects.create(name='test EE', managed_by_tower=True)
|
||||
ExecutionEnvironment.objects.create(name='Control Plane EE', managed_by_tower=True)
|
||||
ExecutionEnvironment.objects.create(name='Default Job EE', managed_by_tower=False)
|
||||
|
||||
injector = InventorySource.injectors[this_kind]
|
||||
if injector.plugin_name is None:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
|
||||
from django.conf import settings
|
||||
@@ -62,6 +63,26 @@ def test_python_and_js_licenses():
|
||||
ret[name] = {'name': name, 'version': version}
|
||||
return ret
|
||||
|
||||
def read_ui_requirements(path):
|
||||
def json_deps(jsondata):
|
||||
ret = {}
|
||||
deps = jsondata.get('dependencies', {})
|
||||
for key in deps.keys():
|
||||
key = key.lower()
|
||||
devonly = deps[key].get('dev', False)
|
||||
if not devonly:
|
||||
if key not in ret.keys():
|
||||
depname = key.replace('/', '-')
|
||||
if depname[0] == '@':
|
||||
depname = depname[1:]
|
||||
ret[depname] = {'name': depname, 'version': deps[key]['version']}
|
||||
ret.update(json_deps(deps[key]))
|
||||
return ret
|
||||
|
||||
with open('%s/package-lock.json' % path) as f:
|
||||
jsondata = json.load(f)
|
||||
return json_deps(jsondata)
|
||||
|
||||
def remediate_licenses_and_requirements(licenses, requirements):
|
||||
errors = []
|
||||
items = list(licenses.keys())
|
||||
@@ -88,9 +109,12 @@ def test_python_and_js_licenses():
|
||||
|
||||
base_dir = settings.BASE_DIR
|
||||
api_licenses = index_licenses('%s/../docs/licenses' % base_dir)
|
||||
ui_licenses = index_licenses('%s/../docs/licenses/ui' % base_dir)
|
||||
api_requirements = read_api_requirements('%s/../requirements' % base_dir)
|
||||
ui_requirements = read_ui_requirements('%s/ui_next' % base_dir)
|
||||
|
||||
errors = []
|
||||
errors += remediate_licenses_and_requirements(ui_licenses, ui_requirements)
|
||||
errors += remediate_licenses_and_requirements(api_licenses, api_requirements)
|
||||
if errors:
|
||||
raise Exception('Included licenses not consistent with requirements:\n%s' % '\n'.join(errors))
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
import pytest
|
||||
|
||||
from django.conf import settings
|
||||
from django.test.utils import override_settings
|
||||
|
||||
from awx.main.models.execution_environments import ExecutionEnvironment
|
||||
from awx.main.utils.execution_environments import get_default_execution_environment
|
||||
from awx.main.management.commands.register_default_execution_environments import Command
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def set_up_defaults():
|
||||
Command().handle()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_default_to_jobs_default(set_up_defaults, organization):
|
||||
"""Under normal operation, the default EE should be from the list of global job EEs
|
||||
which are populated by the installer
|
||||
"""
|
||||
# Fill in some other unrelated EEs
|
||||
ExecutionEnvironment.objects.create(name='Steves environment', image='quay.io/ansible/awx-ee')
|
||||
ExecutionEnvironment(name=settings.GLOBAL_JOB_EXECUTION_ENVIRONMENTS[0]['name'], image='quay.io/ansible/awx-ee', organization=organization)
|
||||
default_ee = get_default_execution_environment()
|
||||
assert default_ee.image == settings.GLOBAL_JOB_EXECUTION_ENVIRONMENTS[0]['image']
|
||||
assert default_ee.name == settings.GLOBAL_JOB_EXECUTION_ENVIRONMENTS[0]['name']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_default_to_control_plane(set_up_defaults):
|
||||
"""If all of the job execution environments are job execution environments have gone missing
|
||||
then it will refuse to use the control plane execution environment as the default
|
||||
"""
|
||||
for ee in ExecutionEnvironment.objects.all():
|
||||
if ee.name == 'Control Plane Execution Environment':
|
||||
continue
|
||||
ee.delete()
|
||||
assert get_default_execution_environment() is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_user_default(set_up_defaults):
|
||||
"""If superuser has configured a default, then their preference should come first, of course"""
|
||||
ee = ExecutionEnvironment.objects.create(name='Steves environment', image='quay.io/ansible/awx-ee')
|
||||
with override_settings(DEFAULT_EXECUTION_ENVIRONMENT=ee):
|
||||
assert get_default_execution_environment() == ee
|
||||
@@ -134,7 +134,8 @@ class TestJobDetailSerializerGetHostStatusCountFields(object):
|
||||
)
|
||||
|
||||
mock_qs = namedtuple('mock_qs', ['get'])(mocker.MagicMock(return_value=mock_event))
|
||||
job.job_events.only = mocker.MagicMock(return_value=mock_qs)
|
||||
only = mocker.MagicMock(return_value=mock_qs)
|
||||
job.get_event_queryset = lambda *args, **kwargs: mocker.MagicMock(only=only)
|
||||
|
||||
serializer = JobDetailSerializer()
|
||||
host_status_counts = serializer.get_host_status_counts(job)
|
||||
@@ -142,7 +143,7 @@ class TestJobDetailSerializerGetHostStatusCountFields(object):
|
||||
assert host_status_counts == {'ok': 1, 'changed': 1, 'dark': 2}
|
||||
|
||||
def test_host_status_counts_is_empty_dict_without_stats_event(self, job):
|
||||
job.job_events = JobEvent.objects.none()
|
||||
job.get_event_queryset = lambda *args, **kwargs: JobEvent.objects.none()
|
||||
|
||||
serializer = JobDetailSerializer()
|
||||
host_status_counts = serializer.get_host_status_counts(job)
|
||||
|
||||
@@ -55,7 +55,7 @@ def test_list_views_use_list_serializers(all_views):
|
||||
"""
|
||||
list_serializers = tuple(getattr(serializers, '{}ListSerializer'.format(cls.__name__)) for cls in (UnifiedJob.__subclasses__() + [UnifiedJob]))
|
||||
for View in all_views:
|
||||
if hasattr(View, 'model') and issubclass(getattr(View, 'model'), UnifiedJob):
|
||||
if hasattr(View, 'model') and type(View.model) is not property and issubclass(getattr(View, 'model'), UnifiedJob):
|
||||
if issubclass(View, ListAPIView):
|
||||
assert issubclass(View.serializer_class, list_serializers), 'View {} serializer {} is not a list serializer'.format(View, View.serializer_class)
|
||||
else:
|
||||
|
||||
@@ -71,7 +71,7 @@ def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker,
|
||||
for h in hosts:
|
||||
h.save = mocker.Mock()
|
||||
|
||||
ansible_facts_new = {"foo": "bar", "insights": {"system_id": "updated_by_scan"}}
|
||||
ansible_facts_new = {"foo": "bar"}
|
||||
filepath = os.path.join(fact_cache, hosts[1].name)
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(json.dumps(ansible_facts_new))
|
||||
@@ -90,31 +90,9 @@ def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker,
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified is None
|
||||
assert hosts[1].ansible_facts == ansible_facts_new
|
||||
assert hosts[1].insights_system_id == "updated_by_scan"
|
||||
hosts[1].save.assert_called_once_with()
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_malformed_fact(job, hosts, inventory, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
job.start_job_fact_cache(fact_cache, modified_times, 0)
|
||||
|
||||
for h in hosts:
|
||||
h.save = mocker.Mock()
|
||||
|
||||
for h in hosts:
|
||||
filepath = os.path.join(fact_cache, h.name)
|
||||
with open(filepath, 'w') as f:
|
||||
json.dump({'ansible_local': {'insights': 'this is an unexpected error from ansible'}}, f)
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
job.finish_job_fact_cache(fact_cache, modified_times)
|
||||
|
||||
for h in hosts:
|
||||
assert h.insights_system_id is None
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
|
||||
@@ -75,7 +75,6 @@ def job(mocker):
|
||||
'launch_type': 'manual',
|
||||
'verbosity': 1,
|
||||
'awx_meta_vars.return_value': {},
|
||||
'ansible_virtualenv_path': '',
|
||||
'inventory.get_script_data.return_value': {},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -43,34 +43,34 @@ def sample_cluster():
|
||||
|
||||
ig_small = InstanceGroup(name='ig_small')
|
||||
ig_large = InstanceGroup(name='ig_large')
|
||||
tower = InstanceGroup(name='tower')
|
||||
default = InstanceGroup(name='default')
|
||||
i1 = Instance(hostname='i1', capacity=200)
|
||||
i2 = Instance(hostname='i2', capacity=200)
|
||||
i3 = Instance(hostname='i3', capacity=200)
|
||||
ig_small.instances.add(i1)
|
||||
ig_large.instances.add(i2, i3)
|
||||
tower.instances.add(i2)
|
||||
return [tower, ig_large, ig_small]
|
||||
default.instances.add(i2)
|
||||
return [default, ig_large, ig_small]
|
||||
|
||||
return stand_up_cluster
|
||||
|
||||
|
||||
def test_committed_capacity(sample_cluster):
|
||||
tower, ig_large, ig_small = sample_cluster()
|
||||
tasks = [Job(status='waiting', instance_group=tower), Job(status='waiting', instance_group=ig_large), Job(status='waiting', instance_group=ig_small)]
|
||||
capacities = InstanceGroup.objects.capacity_values(qs=[tower, ig_large, ig_small], tasks=tasks, breakdown=True)
|
||||
default, ig_large, ig_small = sample_cluster()
|
||||
tasks = [Job(status='waiting', instance_group=default), Job(status='waiting', instance_group=ig_large), Job(status='waiting', instance_group=ig_small)]
|
||||
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks, breakdown=True)
|
||||
# Jobs submitted to either tower or ig_larg must count toward both
|
||||
assert capacities['tower']['committed_capacity'] == 43 * 2
|
||||
assert capacities['default']['committed_capacity'] == 43 * 2
|
||||
assert capacities['ig_large']['committed_capacity'] == 43 * 2
|
||||
assert capacities['ig_small']['committed_capacity'] == 43
|
||||
|
||||
|
||||
def test_running_capacity(sample_cluster):
|
||||
tower, ig_large, ig_small = sample_cluster()
|
||||
default, ig_large, ig_small = sample_cluster()
|
||||
tasks = [Job(status='running', execution_node='i1'), Job(status='running', execution_node='i2'), Job(status='running', execution_node='i3')]
|
||||
capacities = InstanceGroup.objects.capacity_values(qs=[tower, ig_large, ig_small], tasks=tasks, breakdown=True)
|
||||
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks, breakdown=True)
|
||||
# Tower is only given 1 instance
|
||||
assert capacities['tower']['running_capacity'] == 43
|
||||
assert capacities['default']['running_capacity'] == 43
|
||||
# Large IG has 2 instances
|
||||
assert capacities['ig_large']['running_capacity'] == 43 * 2
|
||||
assert capacities['ig_small']['running_capacity'] == 43
|
||||
@@ -81,10 +81,10 @@ def test_offline_node_running(sample_cluster):
|
||||
Assure that algorithm doesn't explode if a job is marked running
|
||||
in an offline node
|
||||
"""
|
||||
tower, ig_large, ig_small = sample_cluster()
|
||||
default, ig_large, ig_small = sample_cluster()
|
||||
ig_small.instance_list[0].capacity = 0
|
||||
tasks = [Job(status='running', execution_node='i1', instance_group=ig_small)]
|
||||
capacities = InstanceGroup.objects.capacity_values(qs=[tower, ig_large, ig_small], tasks=tasks)
|
||||
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks)
|
||||
assert capacities['ig_small']['consumed_capacity'] == 43
|
||||
|
||||
|
||||
@@ -92,10 +92,10 @@ def test_offline_node_waiting(sample_cluster):
|
||||
"""
|
||||
Same but for a waiting job
|
||||
"""
|
||||
tower, ig_large, ig_small = sample_cluster()
|
||||
default, ig_large, ig_small = sample_cluster()
|
||||
ig_small.instance_list[0].capacity = 0
|
||||
tasks = [Job(status='waiting', instance_group=ig_small)]
|
||||
capacities = InstanceGroup.objects.capacity_values(qs=[tower, ig_large, ig_small], tasks=tasks)
|
||||
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks)
|
||||
assert capacities['ig_small']['consumed_capacity'] == 43
|
||||
|
||||
|
||||
@@ -105,9 +105,9 @@ def test_RBAC_reduced_filter(sample_cluster):
|
||||
but user does not have permission to see those actual instance groups.
|
||||
Verify that this does not blow everything up.
|
||||
"""
|
||||
tower, ig_large, ig_small = sample_cluster()
|
||||
tasks = [Job(status='waiting', instance_group=tower), Job(status='waiting', instance_group=ig_large), Job(status='waiting', instance_group=ig_small)]
|
||||
capacities = InstanceGroup.objects.capacity_values(qs=[tower], tasks=tasks, breakdown=True)
|
||||
default, ig_large, ig_small = sample_cluster()
|
||||
tasks = [Job(status='waiting', instance_group=default), Job(status='waiting', instance_group=ig_large), Job(status='waiting', instance_group=ig_small)]
|
||||
capacities = InstanceGroup.objects.capacity_values(qs=[default], tasks=tasks, breakdown=True)
|
||||
# Cross-links between groups not visible to current user,
|
||||
# so a naieve accounting of capacities is returned instead
|
||||
assert capacities['tower']['committed_capacity'] == 43
|
||||
assert capacities['default']['committed_capacity'] == 43
|
||||
|
||||
@@ -187,7 +187,6 @@ def test_openstack_client_config_generation(mocker, source, expected, private_da
|
||||
'source_vars_dict': {},
|
||||
'get_cloud_credential': mocker.Mock(return_value=credential),
|
||||
'get_extra_credentials': lambda x: [],
|
||||
'ansible_virtualenv_path': '/var/lib/awx/venv/foo',
|
||||
}
|
||||
)
|
||||
cloud_config = update.build_private_data(inventory_update, private_data_dir)
|
||||
@@ -229,7 +228,6 @@ def test_openstack_client_config_generation_with_project_domain_name(mocker, sou
|
||||
'source_vars_dict': {},
|
||||
'get_cloud_credential': mocker.Mock(return_value=credential),
|
||||
'get_extra_credentials': lambda x: [],
|
||||
'ansible_virtualenv_path': '/var/lib/awx/venv/foo',
|
||||
}
|
||||
)
|
||||
cloud_config = update.build_private_data(inventory_update, private_data_dir)
|
||||
@@ -273,7 +271,6 @@ def test_openstack_client_config_generation_with_region(mocker, source, expected
|
||||
'source_vars_dict': {},
|
||||
'get_cloud_credential': mocker.Mock(return_value=credential),
|
||||
'get_extra_credentials': lambda x: [],
|
||||
'ansible_virtualenv_path': '/venv/foo',
|
||||
}
|
||||
)
|
||||
cloud_config = update.build_private_data(inventory_update, private_data_dir)
|
||||
@@ -315,7 +312,6 @@ def test_openstack_client_config_generation_with_private_source_vars(mocker, sou
|
||||
'source_vars_dict': {'private': source},
|
||||
'get_cloud_credential': mocker.Mock(return_value=credential),
|
||||
'get_extra_credentials': lambda x: [],
|
||||
'ansible_virtualenv_path': '/var/lib/awx/venv/foo',
|
||||
}
|
||||
)
|
||||
cloud_config = update.build_private_data(inventory_update, private_data_dir)
|
||||
@@ -592,7 +588,8 @@ class TestGenericRun:
|
||||
@pytest.mark.django_db
|
||||
class TestAdhocRun(TestJobExecution):
|
||||
def test_options_jinja_usage(self, adhoc_job, adhoc_update_model_wrapper):
|
||||
ExecutionEnvironment.objects.create(name='test EE', managed_by_tower=True)
|
||||
ExecutionEnvironment.objects.create(name='Control Plane EE', managed_by_tower=True)
|
||||
ExecutionEnvironment.objects.create(name='Default Job EE', managed_by_tower=False)
|
||||
|
||||
adhoc_job.module_args = '{{ ansible_ssh_pass }}'
|
||||
adhoc_job.websocket_emit_status = mock.Mock()
|
||||
@@ -1749,6 +1746,34 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert env["FOREMAN_PASSWORD"] == "secret"
|
||||
assert safe_env["FOREMAN_PASSWORD"] == tasks.HIDDEN_PASSWORD
|
||||
|
||||
def test_insights_source(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
task.instance = inventory_update
|
||||
insights = CredentialType.defaults['insights']()
|
||||
inventory_update.source = 'insights'
|
||||
|
||||
def get_cred():
|
||||
cred = Credential(
|
||||
pk=1,
|
||||
credential_type=insights,
|
||||
inputs={
|
||||
'username': 'bob',
|
||||
'password': 'secret',
|
||||
},
|
||||
)
|
||||
cred.inputs['password'] = encrypt_field(cred, 'password')
|
||||
return cred
|
||||
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
env = task.build_env(inventory_update, private_data_dir, False)
|
||||
safe_env = build_safe_env(env)
|
||||
|
||||
assert env["INSIGHTS_USER"] == "bob"
|
||||
assert env["INSIGHTS_PASSWORD"] == "secret"
|
||||
assert safe_env['INSIGHTS_PASSWORD'] == tasks.HIDDEN_PASSWORD
|
||||
|
||||
@pytest.mark.parametrize('verify', [True, False])
|
||||
def test_tower_source(self, verify, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
|
||||
@@ -4,6 +4,7 @@ from awx.main.validators import (
|
||||
validate_certificate,
|
||||
validate_ssh_private_key,
|
||||
vars_validate_or_raise,
|
||||
validate_container_image_name,
|
||||
)
|
||||
from awx.main.tests.data.ssh import (
|
||||
TEST_SSH_RSA1_KEY_DATA,
|
||||
@@ -163,3 +164,39 @@ def test_valid_vars(var_str):
|
||||
def test_invalid_vars(var_str):
|
||||
with pytest.raises(RestValidationError):
|
||||
vars_validate_or_raise(var_str)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("image_name", "is_valid"),
|
||||
[
|
||||
("localhost", True),
|
||||
("short", True),
|
||||
("simple/name", True),
|
||||
("ab/ab/ab/ab", True),
|
||||
("foo.com/", False),
|
||||
("", False),
|
||||
("localhost/foo", True),
|
||||
("3asdasdf3", True),
|
||||
("xn--7o8h.com/myimage", True),
|
||||
("Asdf.com/foo/bar", True),
|
||||
("Foo/FarB", False),
|
||||
("registry.com:8080/myapp:tag", True),
|
||||
("registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", True),
|
||||
("registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", True),
|
||||
("registry.com:8080/myapp@sha256:badbadbadbad", False),
|
||||
("registry.com:8080/myapp:invalid~tag", False),
|
||||
("bad_hostname.com:8080/myapp:tag", False),
|
||||
("localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", True),
|
||||
("localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", True),
|
||||
("localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", False),
|
||||
("localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", True),
|
||||
("registry.com:8080/myapp@bad", False),
|
||||
("registry.com:8080/myapp@2bad", False),
|
||||
],
|
||||
)
|
||||
def test_valid_container_image_name(image_name, is_valid):
|
||||
if is_valid:
|
||||
validate_container_image_name(image_name)
|
||||
else:
|
||||
with pytest.raises(ValidationError):
|
||||
validate_container_image_name(image_name)
|
||||
|
||||
@@ -73,6 +73,8 @@ def test_global_creation_always_possible(all_views):
|
||||
views_by_model = {}
|
||||
for View in all_views:
|
||||
if not getattr(View, 'deprecated', False) and issubclass(View, ListAPIView) and hasattr(View, 'model'):
|
||||
if type(View.model) is property:
|
||||
continue # special case for JobEventChildrenList
|
||||
views_by_model.setdefault(View.model, []).append(View)
|
||||
for model, views in views_by_model.items():
|
||||
creatable = False
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
# Copyright (c) 2017 Ansible Tower by Red Hat
|
||||
# All Rights Reserved.
|
||||
|
||||
|
||||
from awx.main.utils.insights import filter_insights_api_response
|
||||
from awx.main.tests.data.insights import TEST_INSIGHTS_HOSTS, TEST_INSIGHTS_PLANS, TEST_INSIGHTS_REMEDIATIONS
|
||||
|
||||
|
||||
def test_filter_insights_api_response():
|
||||
actual = filter_insights_api_response(TEST_INSIGHTS_HOSTS['results'][0], TEST_INSIGHTS_PLANS, TEST_INSIGHTS_REMEDIATIONS)
|
||||
|
||||
assert actual['last_check_in'] == '2019-03-19T21:59:09.213151-04:00'
|
||||
assert len(actual['reports']) == 5
|
||||
assert len(actual['reports'][0]['maintenance_actions']) == 1
|
||||
assert actual['reports'][0]['maintenance_actions'][0]['name'] == "Fix Critical CVEs"
|
||||
rule = actual['reports'][0]['rule']
|
||||
|
||||
assert rule['severity'] == 'WARN'
|
||||
assert rule['description'] == ("Kernel vulnerable to side-channel attacks in modern microprocessors (CVE-2017-5715/Spectre)")
|
||||
assert rule['category'] == 'Security'
|
||||
assert rule['summary'] == (
|
||||
"A vulnerability was discovered in modern microprocessors supported by the kernel,"
|
||||
" whereby an unprivileged attacker can use this flaw to bypass restrictions to gain read"
|
||||
" access to privileged memory.\nThe issue was reported as [CVE-2017-5715 / Spectre]"
|
||||
"(https://access.redhat.com/security/cve/CVE-2017-5715).\n"
|
||||
)
|
||||
@@ -11,3 +11,4 @@ from awx.main.utils.encryption import ( # noqa
|
||||
decrypt_value,
|
||||
encrypt_dict,
|
||||
)
|
||||
from awx.main.utils.licensing import get_licenser # noqa
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user