mirror of
https://github.com/ansible/awx.git
synced 2026-02-04 19:18:13 -03:30
Compare commits
452 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a455c7bf7 | ||
|
|
10167eea8d | ||
|
|
46ddc84d2a | ||
|
|
b8ec3104a9 | ||
|
|
b098127961 | ||
|
|
1f0294d389 | ||
|
|
3172176940 | ||
|
|
b38ec3599b | ||
|
|
487343a022 | ||
|
|
69049a4427 | ||
|
|
be6b42561f | ||
|
|
e59cb07064 | ||
|
|
0234df055d | ||
|
|
b54c036398 | ||
|
|
eafd40291e | ||
|
|
519956f779 | ||
|
|
0b3e2cc7e3 | ||
|
|
efa9c84806 | ||
|
|
5ed623d682 | ||
|
|
8f77d15a31 | ||
|
|
d06d4d5a8c | ||
|
|
352c8c3cb1 | ||
|
|
94f21a3464 | ||
|
|
ac376f9c87 | ||
|
|
44e4263bee | ||
|
|
b7f3852ef9 | ||
|
|
a934e146ee | ||
|
|
cab25656eb | ||
|
|
0f9c906a22 | ||
|
|
b8226109a7 | ||
|
|
b26de8b922 | ||
|
|
67d8c1a4b5 | ||
|
|
0ef7ef22eb | ||
|
|
47383e05d6 | ||
|
|
3dd97feaa6 | ||
|
|
e530adde67 | ||
|
|
38a08d163c | ||
|
|
7b4adfcc15 | ||
|
|
5d6e1284e3 | ||
|
|
a0ba125ea9 | ||
|
|
ad5d0b92db | ||
|
|
debbac5c78 | ||
|
|
f4f4a7caec | ||
|
|
b00249b515 | ||
|
|
cd49213924 | ||
|
|
9a47a28b80 | ||
|
|
7b9ad1d69a | ||
|
|
6df00e1e4c | ||
|
|
7d2ed7b763 | ||
|
|
b08e5db267 | ||
|
|
8991396d23 | ||
|
|
76a6f84c70 | ||
|
|
a984e5df7a | ||
|
|
282d705c43 | ||
|
|
43e1b4a7db | ||
|
|
71ef7cdec1 | ||
|
|
5decde3f70 | ||
|
|
3f57061509 | ||
|
|
6395d64681 | ||
|
|
f3e2caeaa7 | ||
|
|
ce5c4359ee | ||
|
|
c4ddf50cad | ||
|
|
d250dd0cd6 | ||
|
|
96bbbdd5c9 | ||
|
|
9b4b2167b3 | ||
|
|
028a0a9279 | ||
|
|
30354dbcd0 | ||
|
|
543a87ac88 | ||
|
|
4be7cf66ec | ||
|
|
fd027f87a9 | ||
|
|
dac6e115c1 | ||
|
|
eca516f8ce | ||
|
|
b06645e125 | ||
|
|
fd60cd1a35 | ||
|
|
ad8bcd0de2 | ||
|
|
fdc29eebb7 | ||
|
|
63ae2cac38 | ||
|
|
4e787cc079 | ||
|
|
2de37ce5df | ||
|
|
a419547731 | ||
|
|
04844aa44f | ||
|
|
1b3fbee38d | ||
|
|
6d2a2ab714 | ||
|
|
82dd4a3884 | ||
|
|
4fe9e5da14 | ||
|
|
bbb4701fa9 | ||
|
|
86a39938fe | ||
|
|
987fc26537 | ||
|
|
70cf4cf5d4 | ||
|
|
2d3172f648 | ||
|
|
b2c33e3204 | ||
|
|
f7f648b956 | ||
|
|
780f104ab2 | ||
|
|
4c35adad6c | ||
|
|
cf24c81b3e | ||
|
|
6d792a8234 | ||
|
|
1558c6f942 | ||
|
|
2f75b48c63 | ||
|
|
979418620c | ||
|
|
482e0ac311 | ||
|
|
a36bf4af64 | ||
|
|
3bbce18173 | ||
|
|
e54fd19bca | ||
|
|
d2289fe9c6 | ||
|
|
1c50b8427a | ||
|
|
34d01f02cc | ||
|
|
d182c96c2e | ||
|
|
e59f3982ae | ||
|
|
5435c6ec73 | ||
|
|
5f96aee871 | ||
|
|
eceeeea22d | ||
|
|
a1a864b27b | ||
|
|
0291c476d4 | ||
|
|
638e8c7add | ||
|
|
6389ec50a1 | ||
|
|
ad53f4f5f6 | ||
|
|
9718aa711f | ||
|
|
cacd2c3392 | ||
|
|
1800b49822 | ||
|
|
1e97bb71db | ||
|
|
7055460c4c | ||
|
|
864767d74a | ||
|
|
5170948241 | ||
|
|
370a7f9b25 | ||
|
|
1368835a29 | ||
|
|
48fa5bb2cd | ||
|
|
25105d813d | ||
|
|
bbea43b1fe | ||
|
|
5790aa9780 | ||
|
|
bc97d11270 | ||
|
|
326ed22efe | ||
|
|
b942411dcc | ||
|
|
374c17751f | ||
|
|
ef2fa26126 | ||
|
|
b611164422 | ||
|
|
c7c899375b | ||
|
|
ab3a728032 | ||
|
|
aaf371ee23 | ||
|
|
d6c70e8d3a | ||
|
|
79e65e3e84 | ||
|
|
42c45367a0 | ||
|
|
d759aff4e9 | ||
|
|
6b63f0ac9e | ||
|
|
2df6eab472 | ||
|
|
1c7afb66f7 | ||
|
|
1fbb714cbc | ||
|
|
de75592f2a | ||
|
|
9cb7b0902a | ||
|
|
437d9843d1 | ||
|
|
490492e505 | ||
|
|
3dd8e490c6 | ||
|
|
75c9702caa | ||
|
|
accf000bdf | ||
|
|
a94b30be9f | ||
|
|
3c31e0ed16 | ||
|
|
7d74999851 | ||
|
|
b7ca369356 | ||
|
|
d15f7b76fa | ||
|
|
4e4a535178 | ||
|
|
78b00652bd | ||
|
|
473ab7c01c | ||
|
|
ae82ba53e7 | ||
|
|
d69174b1a6 | ||
|
|
570f549cf4 | ||
|
|
55e720e25d | ||
|
|
8f33f1a6c2 | ||
|
|
7be924d155 | ||
|
|
65f226960f | ||
|
|
84f056294d | ||
|
|
b906f8d757 | ||
|
|
2fae523fd4 | ||
|
|
4d519155bc | ||
|
|
ea8a91893a | ||
|
|
145476c7d9 | ||
|
|
c6595786f5 | ||
|
|
c6159a7c3e | ||
|
|
52638c709a | ||
|
|
a264b1db1f | ||
|
|
49907e337a | ||
|
|
afc1f85668 | ||
|
|
6efa751157 | ||
|
|
10131432b5 | ||
|
|
0d365068ff | ||
|
|
256404ba03 | ||
|
|
3b430c8bdf | ||
|
|
627dae6580 | ||
|
|
44db9ad033 | ||
|
|
21890efca6 | ||
|
|
0a8fe4d812 | ||
|
|
a1d7beca83 | ||
|
|
c35c80b06c | ||
|
|
3c5e9da9a1 | ||
|
|
f9af5e8959 | ||
|
|
c983b6a755 | ||
|
|
e18639b26b | ||
|
|
6d8b843ad0 | ||
|
|
00a9e42001 | ||
|
|
fc5363a140 | ||
|
|
d8d1ccf810 | ||
|
|
046518ab8f | ||
|
|
d33bbdd4f6 | ||
|
|
46e530ceeb | ||
|
|
2a77b8b4b9 | ||
|
|
23b2b136d6 | ||
|
|
d83a786c12 | ||
|
|
5d162b739b | ||
|
|
55e37b4eaa | ||
|
|
b2a0b3fc29 | ||
|
|
d1e1bc7108 | ||
|
|
cb88ea8fd1 | ||
|
|
c2fe3fcf13 | ||
|
|
6654a116d0 | ||
|
|
b77ab8a6ca | ||
|
|
1e796076f5 | ||
|
|
8fa38d1a2e | ||
|
|
44e176dde8 | ||
|
|
1ce197041f | ||
|
|
0952bae09f | ||
|
|
12509cd652 | ||
|
|
b094c063ae | ||
|
|
4e46d5d7cd | ||
|
|
8b10da9589 | ||
|
|
99ce277b06 | ||
|
|
5db6906212 | ||
|
|
652a428438 | ||
|
|
dfc769b8fe | ||
|
|
c45b1ffca6 | ||
|
|
ceed6f8d9b | ||
|
|
03cfb7bf9a | ||
|
|
49d1fa82d3 | ||
|
|
08a195ba08 | ||
|
|
77d1c711bf | ||
|
|
ad73174029 | ||
|
|
a6539d66d4 | ||
|
|
cb3ab67361 | ||
|
|
078dc666c1 | ||
|
|
e806da25c1 | ||
|
|
ef36b4fffd | ||
|
|
cc2ba09d3a | ||
|
|
790942c0f2 | ||
|
|
fd1e574fcb | ||
|
|
2daefcd94e | ||
|
|
46a7ca4dc3 | ||
|
|
5e4c997c41 | ||
|
|
8d4d718f7d | ||
|
|
cf34a81af7 | ||
|
|
11af21972d | ||
|
|
8850687d1b | ||
|
|
792f68eaec | ||
|
|
113aa2e11e | ||
|
|
1bf0bc8203 | ||
|
|
03cd7472af | ||
|
|
d549c217bb | ||
|
|
e7fead0f2c | ||
|
|
14990f7e98 | ||
|
|
d35eba8afb | ||
|
|
b0722311e8 | ||
|
|
946c16916f | ||
|
|
8ef5a6b0e1 | ||
|
|
6fa4d6462d | ||
|
|
525fd889e9 | ||
|
|
93a4e5ef05 | ||
|
|
06ce5a16ce | ||
|
|
15c665ea52 | ||
|
|
9a420820eb | ||
|
|
fa043100bd | ||
|
|
db0d748302 | ||
|
|
e8a95a1dac | ||
|
|
f911fb2046 | ||
|
|
a0304eeb16 | ||
|
|
a6f063b199 | ||
|
|
3977ec42e1 | ||
|
|
b7a064b05d | ||
|
|
aa5532f7b5 | ||
|
|
f79b6d3708 | ||
|
|
6d075b8874 | ||
|
|
3040a25932 | ||
|
|
0f0d9ba00d | ||
|
|
053897042f | ||
|
|
64186e881e | ||
|
|
0d98a1980e | ||
|
|
2b02b1affd | ||
|
|
bf3042e85a | ||
|
|
bdc25c14f6 | ||
|
|
6e5028587a | ||
|
|
8c8713885b | ||
|
|
bc5ef7f1c8 | ||
|
|
b9b6dad0b3 | ||
|
|
829e9054d6 | ||
|
|
be68a199ec | ||
|
|
44c0eb867b | ||
|
|
773b976f8a | ||
|
|
1220847c27 | ||
|
|
ec1c2a8391 | ||
|
|
2bc6521eee | ||
|
|
107d2da845 | ||
|
|
568606d2c8 | ||
|
|
78e2cd7084 | ||
|
|
79b8e6b6f0 | ||
|
|
d72896f9a6 | ||
|
|
7b3d36ba53 | ||
|
|
df5231f527 | ||
|
|
8bd9233d2c | ||
|
|
4dfda92c69 | ||
|
|
9ecb704e10 | ||
|
|
1b726a1b2f | ||
|
|
0d2ae47238 | ||
|
|
b12c2a142d | ||
|
|
306f504fb7 | ||
|
|
862fafab86 | ||
|
|
1cc4e302f9 | ||
|
|
1289ca9103 | ||
|
|
b18ca5ac1f | ||
|
|
193a041ef9 | ||
|
|
7219c17d30 | ||
|
|
79f0f1940f | ||
|
|
f923f07b79 | ||
|
|
4112b20f1a | ||
|
|
18e7b6ce04 | ||
|
|
ebc540a460 | ||
|
|
edc65cdc36 | ||
|
|
3684975ef9 | ||
|
|
b1f56df930 | ||
|
|
95960c8c14 | ||
|
|
488f52b82b | ||
|
|
b4a7cdbb60 | ||
|
|
8bfcef01df | ||
|
|
bbf9c13952 | ||
|
|
04576af6a5 | ||
|
|
dfa578fcde | ||
|
|
33bc9e63c4 | ||
|
|
919475a4c7 | ||
|
|
1db88fe4f6 | ||
|
|
cf9f00ab86 | ||
|
|
200be3297a | ||
|
|
6da5205d73 | ||
|
|
15cb92d58e | ||
|
|
78cc2742b2 | ||
|
|
959d5058fc | ||
|
|
acf54e6102 | ||
|
|
4a9979e2db | ||
|
|
1e344bdf8a | ||
|
|
3cdf274bdb | ||
|
|
068de221c1 | ||
|
|
30b6e318cc | ||
|
|
2c1648f9c9 | ||
|
|
2c953ed7d0 | ||
|
|
2d00623c16 | ||
|
|
51a6ba14f1 | ||
|
|
6edd879a43 | ||
|
|
a31661ce08 | ||
|
|
c69d497093 | ||
|
|
8b9810e466 | ||
|
|
16f9411914 | ||
|
|
f7ba706ec2 | ||
|
|
5455fe3c10 | ||
|
|
8ac8bc8df2 | ||
|
|
ed474df744 | ||
|
|
c33d2a1e00 | ||
|
|
3e58ee068c | ||
|
|
b19e5aab28 | ||
|
|
6ec96a8f4f | ||
|
|
4db2df9691 | ||
|
|
0c696bfd96 | ||
|
|
63ffff3b76 | ||
|
|
c532c6fe61 | ||
|
|
61c2968a7c | ||
|
|
d9e41547a1 | ||
|
|
eec08fdcca | ||
|
|
b74f7f6c26 | ||
|
|
f37ac1dcc9 | ||
|
|
1c09114abd | ||
|
|
c0e1c8aa77 | ||
|
|
d82180605c | ||
|
|
d3b7829e69 | ||
|
|
4a214a7770 | ||
|
|
18bb910e33 | ||
|
|
ca8dcced8b | ||
|
|
0b9b8832a8 | ||
|
|
271b3f00b7 | ||
|
|
477f566da0 | ||
|
|
cf55b6a0ba | ||
|
|
a2acf4d61f | ||
|
|
3dc8c789fb | ||
|
|
7873d08311 | ||
|
|
c4df5f64c1 | ||
|
|
679d531930 | ||
|
|
7d0d000180 | ||
|
|
f0882aba7d | ||
|
|
5c1713460b | ||
|
|
67d19b20ef | ||
|
|
4a6147d4c2 | ||
|
|
d91e72c23f | ||
|
|
8c99321ec8 | ||
|
|
18e9121db4 | ||
|
|
0809c27bd1 | ||
|
|
807f4ea757 | ||
|
|
ef3f98a399 | ||
|
|
0bbf5e4faf | ||
|
|
da440469cf | ||
|
|
8d4425f056 | ||
|
|
1f46878652 | ||
|
|
930b46810f | ||
|
|
c6dc69c68b | ||
|
|
f00344f8b4 | ||
|
|
f9e0600263 | ||
|
|
3ba1ba1c9d | ||
|
|
ecf1d79ca5 | ||
|
|
82fd245ca9 | ||
|
|
df5aa8a47d | ||
|
|
f3c5cb5a2e | ||
|
|
b794fdbefd | ||
|
|
497f46041c | ||
|
|
e688ed813a | ||
|
|
6c3e42a1ac | ||
|
|
bfedbe561c | ||
|
|
6c439bb9ae | ||
|
|
f461a46155 | ||
|
|
eee84b1af7 | ||
|
|
c4ff27cedb | ||
|
|
cf57d596a3 | ||
|
|
a68cd6f0ae | ||
|
|
7ff4d821ce | ||
|
|
23914182c4 | ||
|
|
979328baa4 | ||
|
|
055c02072f | ||
|
|
8ff0902177 | ||
|
|
3d510c5064 | ||
|
|
df47186c43 | ||
|
|
2f7607a080 | ||
|
|
cde39413c9 | ||
|
|
41c9ea3c07 | ||
|
|
3d45f27502 | ||
|
|
0ab61fd3cb | ||
|
|
d0c891764f | ||
|
|
057320aed3 | ||
|
|
6340f9147c | ||
|
|
b8d6991e9d | ||
|
|
2f9742e9de | ||
|
|
e4c3454b98 | ||
|
|
7cc3a7c39d | ||
|
|
9c291c2b50 | ||
|
|
caad204cbb | ||
|
|
86eb541b3f | ||
|
|
05e2386fac | ||
|
|
3c0fd37a4d | ||
|
|
b23ccf7ee1 | ||
|
|
bd8643d599 | ||
|
|
b26b8e7097 | ||
|
|
a7a3609e48 | ||
|
|
93dda04fd0 | ||
|
|
15041e57b2 |
147
CHANGELOG.md
Normal file
147
CHANGELOG.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Changelog
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
## 9.2.0 (Feb 12, 2020)
|
||||
- Added the ability to configure the convergence behavior of workflow nodes https://github.com/ansible/awx/issues/3054
|
||||
- AWX now allows for a configurable global limit for fork count (per-job run). The default maximum is 200. https://github.com/ansible/awx/pull/5604
|
||||
- Added the ability to specify AZURE_PUBLIC_CLOUD (for e.g., Azure Government KeyVault support) for the Azure credential plugin https://github.com/ansible/awx/issues/5138
|
||||
- Added support for several additional parameters for Satellite dynamic inventory https://github.com/ansible/awx/pull/5598
|
||||
- Added a new field to jobs for tracking the date/time a job is cancelled https://github.com/ansible/awx/pull/5610
|
||||
- Made a series of additional optimizations to the callback receiver to further improve stdout write speed for running playbooks https://github.com/ansible/awx/pull/5677 https://github.com/ansible/awx/pull/5739
|
||||
- Updated AWX to be compatible with Helm 3.x (https://github.com/ansible/awx/pull/5776)
|
||||
- Optimized AWX's job dependency/scheduling code to drastically improve processing time in scenarios where there are many pending jobs scheduled simultaneously https://github.com/ansible/awx/issues/5154
|
||||
- Fixed a bug which could cause SCM authentication details (basic auth passwords) to be reported to external loggers in certain failure scenarios (e.g., when a git clone fails and ansible itself prints an error message to stdout) https://github.com/ansible/awx/pull/5812
|
||||
- Fixed a k8s installer bug that caused installs to fail in certain situations https://github.com/ansible/awx/issues/5574
|
||||
- Fixed a number of issues that caused analytics gathering and reporting to run more often than necessary https://github.com/ansible/awx/pull/5721
|
||||
- Fixed a bug in the AWX CLI that prevented JSON-type settings from saving properly https://github.com/ansible/awx/issues/5528
|
||||
- Improved support for fetching custom virtualenv dependencies when AWX is installed behind a proxy https://github.com/ansible/awx/pull/5805
|
||||
- Updated the bundled version of openstacksdk to address a known issue https://github.com/ansible/awx/issues/5821
|
||||
- Updated the bundled vmware_inventory plugin to the latest version to address a bug https://github.com/ansible/awx/pull/5668
|
||||
- Fixed a bug that can cause inventory updates to fail to properly save their output when run within a workflow https://github.com/ansible/awx/pull/5666
|
||||
- Removed a number of pre-computed fields from the Host and Group models to improve AWX performance. As part of this change, inventory group UIs throughout the interface no longer display status icons https://github.com/ansible/awx/pull/5448
|
||||
|
||||
## 9.1.1 (Jan 14, 2020)
|
||||
|
||||
- Fixed a bug that caused database migrations on Kubernetes installs to hang https://github.com/ansible/awx/pull/5579
|
||||
- Upgraded Python-level app dependencies in AWX virtual environment https://github.com/ansible/awx/pull/5407
|
||||
- Running jobs no longer block associated inventory updates https://github.com/ansible/awx/pull/5519
|
||||
- Fixed invalid_response SAML error https://github.com/ansible/awx/pull/5577
|
||||
- Optimized the callback receiver to drastically improve the write speed of stdout for parallel jobs (https://github.com/ansible/awx/pull/5618)
|
||||
|
||||
## 9.1.0 (Dec 17, 2019)
|
||||
- Added a command to generate a new SECRET_KEY and rekey the secrets in the database
|
||||
- Removed project update locking when jobs using it are running
|
||||
- Fixed slow queries for /api/v2/instances and /api/v2/instance_groups when smart inventories are used
|
||||
- Fixed a partial password disclosure when special characters existed in the RabbitMQ password (CVE-2019-19342)
|
||||
- Fixed hang in error handling for source control checkouts
|
||||
- Fixed an error on subsequent job runs that override the branch of a project on an instance that did not have a prior project checkout
|
||||
- Fixed an issue where jobs launched in isolated or container groups would incorrectly timeout
|
||||
- Fixed an incorrect link to instance groups documentation in the user interface
|
||||
- Fixed editing of inventory on Workflow templates
|
||||
- Fixed multiple issues with OAuth2 token cleanup system jobs
|
||||
- Fixed a bug that broke email notifications for workflow approval/deny https://github.com/ansible/awx/issues/5401
|
||||
- Updated SAML implementation to automatically login if authorization already exists
|
||||
- Updated AngularJS to 1.7.9 for CVE-2019-10768
|
||||
|
||||
## 9.0.1 (Nov 4, 2019)
|
||||
|
||||
- Fixed a bug in the installer that broke certain types of k8s installs https://github.com/ansible/awx/issues/5205
|
||||
|
||||
## 9.0.0 (Oct 31, 2019)
|
||||
|
||||
- Updated AWX images to use centos:8 as the parent image.
|
||||
- Updated to ansible-runner 1.4.4 to address various bugs.
|
||||
- Added oc and kubectl to the AWX images to support new container-based execution introduced in 8.0.0.
|
||||
- Added some optimizations to speed up the deletion of large Inventory Groups.
|
||||
- Fixed a bug that broke webhook launches for Job Templates that define a survey (https://github.com/ansible/awx/issues/5062).
|
||||
- Fixed a bug in the CLI which incorrectly parsed launch time arguments for `awx job_templates launch` and `awx workflow_job_templates launch` (https://github.com/ansible/awx/issues/5093).
|
||||
- Fixed a bug that caused inventory updates using "sourced from a project" to stop working (https://github.com/ansible/awx/issues/4750).
|
||||
- Fixed a bug that caused Slack notifications to sometimes show the wrong bot avatar (https://github.com/ansible/awx/pull/5125).
|
||||
- Fixed a bug that prevented the use of digits in Tower's URL settings (https://github.com/ansible/awx/issues/5081).
|
||||
|
||||
## 8.0.0 (Oct 21, 2019)
|
||||
|
||||
- The Ansible Tower Ansible modules have been migrated to a new official Ansible AWX collection: https://galaxy.ansible.com/awx/AWX
|
||||
Please note that this functionality is only supported in Ansible 2.9+
|
||||
- AWX now supports the ability to launch jobs from external webhooks (GitHub and GitLab integration are supported).
|
||||
- AWX now supports Container Groups, a new feature that allows you to schedule and run playbooks on single-use kubernetes pods on-demand.
|
||||
- AWX now supports sending notifications when Workflow steps are approved, denied, or time out.
|
||||
- AWX now records the user who approved or denied Workflow steps.
|
||||
- AWX now supports fetching Ansible Collections from private galaxy servers.
|
||||
- AWX now checks the user's ansible.cfg for paths where role/collections may live when running project updates.
|
||||
- AWX now uses PostgreSQL 10 by default.
|
||||
- AWX now warns more loudly about underlying AMQP connectivity issues (https://github.com/ansible/awx/pull/4857).
|
||||
- Added a few optimizations to drastically improve dashboard performance for larger AWX installs (installs with several hundred thousand jobs or more).
|
||||
- Updated to the latest version of Ansible's VMWare inventory script (which adds support for vmware_guest_facts).
|
||||
- Deprecated /api/v2/inventory_scripts/ (this endpoint - and the Custom Inventory Script feature - will be removed in a future release of AWX).
|
||||
- Fixed a bug which prevented Organization Admins from removing users from their own Organization (https://github.com/ansible/awx/issues/2979)
|
||||
- Fixed a bug which sometimes caused cluster nodes to fail to re-join with a cryptic error, "No instance found with the current cluster host id" (https://github.com/ansible/awx/issues/4294)
|
||||
- Fixed a bug that prevented the use of launch-time passphrases when using credential plugins (https://github.com/ansible/awx/pull/4807)
|
||||
- Fixed a bug that caused notifications assigned at the Organization level not to take effect for Workflows in that Organization (https://github.com/ansible/awx/issues/4712)
|
||||
- Fixed a bug which caused a notable amount of CPU overhead on RabbitMQ health checks (https://github.com/ansible/awx/pull/5009)
|
||||
- Fixed a bug which sometimes caused the <return> key to stop functioning in <textarea> elements (https://github.com/ansible/awx/issues/4192)
|
||||
- Fixed a bug which caused request contention when the same OAuth2.0 token was used in multiple simultaneous requests (https://github.com/ansible/awx/issues/4694)
|
||||
- Fixed a bug related to parsing multiple choice survey options (https://github.com/ansible/awx/issues/4452).
|
||||
- Fixed a bug that caused single-sign-on icons on the login page to fail to render in certain Windows browsers (https://github.com/ansible/awx/issues/3924)
|
||||
- Fixed a number of bugs that caused certain OAuth2 settings to not be properly respected, such as REFRESH_TOKEN_EXPIRE_SECONDS.
|
||||
- Fixed a number of bugs in the AWX CLI, including a bug which sometimes caused long lines of stdout output to be unexpectedly truncated.
|
||||
- Fixed a number of bugs on the job details UI which sometimes caused auto-scrolling stdout to become stuck.
|
||||
- Fixed a bug which caused LDAP authentication to fail if the TLD of the server URL contained digits (https://github.com/ansible/awx/issues/3646)
|
||||
- Fixed a bug which broke HashiCorp Vault integration on older versions of HashiCorp Vault.
|
||||
|
||||
## 7.0.0 (Sept 4, 2019)
|
||||
|
||||
- AWX now detects and installs Ansible Collections defined in your project (note - this feature only works in Ansible 2.9+) (https://github.com/ansible/awx/issues/2534)
|
||||
- AWX now includes an official command line client. Keep an eye out for a follow-up email on this mailing list for information on how to install it and try it out.
|
||||
- Added the ability to provide a specific SCM branch on jobs (https://github.com/ansible/awx/issues/282)
|
||||
- Added support for Workflow Approval Nodes, a new feature which allows you to add "pause and wait for approval" steps into your workflows (https://github.com/ansible/awx/issues/1206)
|
||||
- Added the ability to specify a specific HTTP method for webhook notifications (POST vs PUT) (https://github.com/ansible/awx/pull/4124)
|
||||
- Added the ability to specify a username and password for HTTP Basic Authorization for webhook notifications (https://github.com/ansible/awx/pull/4124)
|
||||
- Added support for customizing the text content of notifications (https://github.com/ansible/awx/issues/79)
|
||||
- Added the ability to enable and disable hosts in dynamic inventory (https://github.com/ansible/awx/pull/4420)
|
||||
- Added the description (if any) to the Job Template list (https://github.com/ansible/awx/issues/4359)
|
||||
- Added new metrics for instance hostnames and pending jobs to the /api/v2/metrics/ endpoint (https://github.com/ansible/awx/pull/4375)
|
||||
- Changed AWX's on/off toggle buttons to a non-text based style to simplify internationalization (https://github.com/ansible/awx/pull/4425)
|
||||
- Events emitted by ansible for adhoc commands are now sent to the external log aggregrator (https://github.com/ansible/awx/issues/4545)
|
||||
- Fixed a bug which allowed a user to make an organization credential in another organization without permissions to that organization (https://github.com/ansible/awx/pull/4483)
|
||||
- Fixed a bug that caused `extra_vars` on workflows to break when edited (https://github.com/ansible/awx/issues/4293)
|
||||
- Fixed a slow SQL query that caused performance issues when large numbers of groups exist (https://github.com/ansible/awx/issues/4461)
|
||||
- Fixed a few minor bugs in survey field validation (https://github.com/ansible/awx/pull/4509) (https://github.com/ansible/awx/pull/4479)
|
||||
- Fixed a bug that sometimes resulted in orphaned `ansible_runner_pi` directories in `/tmp` after playbook execution (https://github.com/ansible/awx/pull/4409)
|
||||
- Fixed a bug that caused the `is_system_auditor` flag in LDAP configuration to not work (https://github.com/ansible/awx/pull/4396)
|
||||
- Fixed a bug which caused schedules to disappear from the UI when toggled off (https://github.com/ansible/awx/pull/4378)
|
||||
- Fixed a bug that sometimes caused stdout content to contain extraneous blank lines in newer versions of Ansible (https://github.com/ansible/awx/pull/4391)
|
||||
- Updated to the latest Django security release, 2.2.4 (https://github.com/ansible/awx/pull/4410) (https://www.djangoproject.com/weblog/2019/aug/01/security-releases/)
|
||||
- Updated the default version of git to a version that includes support for x509 certificates (https://github.com/ansible/awx/issues/4362)
|
||||
- Removed the deprecated `credential` field from `/api/v2/workflow_job_templates/N/` (as part of the `/api/v1/` removal in prior AWX versions - https://github.com/ansible/awx/pull/4490).
|
||||
|
||||
## 6.1.0 (Jul 18, 2019)
|
||||
|
||||
- Updated AWX to use Django 2.2.2.
|
||||
- Updated the provided openstacksdk version to support new functionality (such as Nova scheduler_hints)
|
||||
- Added the ability to specify a custom cacert for the HashiCorp Vault credential plugin
|
||||
- Fixed a number of bugs related to path lookups for the HashiCorp Vault credential plugin
|
||||
- Fixed a bug which prevented signed SSH certificates from working, including the HashiCorp Vault Signed SSH backend
|
||||
- Fixed a bug which prevented custom logos from displaying on the login page (as a result of a new Content Security Policy in 6.0.0)
|
||||
- Fixed a bug which broke websocket connectivity in Apple Safari (as a result of a new Content Security Policy in 6.0.0)
|
||||
- Fixed a bug on the job output page that occasionally caused the "up" and "down" buttons to not load additional output
|
||||
- Fixed a bug on the job output page that caused quoted task names to display incorrectly
|
||||
|
||||
## 6.0.0 (Jul 1, 2019)
|
||||
|
||||
- Removed support for "Any" notification templates and their API endpoints e.g., /api/v2/job_templates/N/notification_templates/any/ (https://github.com/ansible/awx/issues/4022)
|
||||
- Fixed a bug which prevented credentials from properly being applied to inventory sources (https://github.com/ansible/awx/issues/4059)
|
||||
- Fixed a bug which can cause the task dispatcher to hang indefinitely when external logging support (e.g., Splunk, Logstash) is enabled (https://github.com/ansible/awx/issues/4181)
|
||||
- Fixed a bug which causes slow stdout display when running jobs against smart inventories. (https://github.com/ansible/awx/issues/3106)
|
||||
- Fixed a bug that caused SSL verification flags to fail to be respected for LDAP authentication in certain environments. (https://github.com/ansible/awx/pull/4190)
|
||||
- Added a simple Content Security Policy (https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP) to restrict access to third-party resources in the browser. (https://github.com/ansible/awx/pull/4167)
|
||||
- Updated ovirt4 library dependencies to work with newer versions of oVirt (https://github.com/ansible/awx/issues/4138)
|
||||
|
||||
## 5.0.0 (Jun 21, 2019)
|
||||
|
||||
- Bump Django Rest Framework from 3.7.7 to 3.9.4
|
||||
- Bump setuptools / pip dependencies
|
||||
- Fixed bug where Recent Notification list would not appear
|
||||
- Added notifications on job start
|
||||
- Default to Ansible 2.8
|
||||
@@ -2,96 +2,8 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
Upgrades using Django migrations are not expected to work in AWX. As a result, to upgrade to a new version, it is necessary to export resources from the old AWX node and import them into a freshly-installed node with the new version. The recommended way to do this is to use the tower-cli send/receive feature.
|
||||
Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades.
|
||||
|
||||
This tool does __not__ support export/import of the following:
|
||||
* Logs/history
|
||||
* Credential passwords
|
||||
* LDAP/AWX config
|
||||
Users who wish to upgrade modern AWX installations should follow the instructions at:
|
||||
|
||||
### Install & Configure Tower-CLI
|
||||
|
||||
In terminal, pip install tower-cli (if you do not have pip already, install [here](https://pip.pypa.io/en/stable/installing/)):
|
||||
```
|
||||
$ pip install --upgrade ansible-tower-cli
|
||||
```
|
||||
|
||||
The AWX host URL, user, and password must be set for the AWX instance to be exported:
|
||||
```
|
||||
$ tower-cli config host http://<old-awx-host.example.com>
|
||||
$ tower-cli config username <user>
|
||||
$ tower-cli config password <pass>
|
||||
```
|
||||
|
||||
For more information on installing tower-cli look [here](http://tower-cli.readthedocs.io/en/latest/quickstart.html).
|
||||
|
||||
|
||||
### Export Resources
|
||||
|
||||
Export all objects
|
||||
|
||||
```$ tower-cli receive --all > assets.json```
|
||||
|
||||
|
||||
|
||||
### Teardown Old AWX
|
||||
|
||||
Clean up remnants of the old AWX install:
|
||||
|
||||
```docker rm -f $(docker ps -aq)``` # remove all old awx containers
|
||||
|
||||
```make clean-ui``` # clean up ui artifacts
|
||||
|
||||
|
||||
### Install New AWX version
|
||||
|
||||
If you are installing AWX as a dev container, pull down the latest code or version you want from GitHub, build
|
||||
the image locally, then start the container
|
||||
|
||||
```
|
||||
git pull # retrieve latest AWX changes from repository
|
||||
make docker-compose-build # build AWX image
|
||||
make docker-compose # run container
|
||||
```
|
||||
For other install methods, refer to the [Install.md](https://github.com/ansible/awx/blob/devel/INSTALL.md).
|
||||
|
||||
|
||||
### Import Resources
|
||||
|
||||
|
||||
Configure tower-cli for your new AWX host as shown earlier. Import from a JSON file named assets.json
|
||||
|
||||
```
|
||||
$ tower-cli config host http://<new-awx-host.example.com>
|
||||
$ tower-cli config username <user>
|
||||
$ tower-cli config password <pass>
|
||||
$ tower-cli send assets.json
|
||||
```
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## Additional Info
|
||||
|
||||
If you have two running AWX hosts, it is possible to copy all assets from one instance to another
|
||||
|
||||
```$ tower-cli receive --tower-host old-awx-host.example.com --all | tower-cli send --tower-host new-awx-host.example.com```
|
||||
|
||||
|
||||
|
||||
#### More Granular Exports:
|
||||
|
||||
Export all credentials
|
||||
|
||||
```$ tower-cli receive --credential all > credentials.json```
|
||||
> Note: This exports the credentials with blank strings for passwords and secrets
|
||||
|
||||
Export a credential named "My Credential"
|
||||
|
||||
```$ tower-cli receive --credential "My Credential"```
|
||||
|
||||
#### More Granular Imports:
|
||||
|
||||
|
||||
You could import anything except an organization defined in a JSON file named assets.json
|
||||
|
||||
```$ tower-cli send --prevent organization assets.json```
|
||||
https://github.com/ansible/awx/blob/devel/INSTALL.md#upgrading-from-previous-versions
|
||||
|
||||
51
Makefile
51
Makefile
@@ -26,6 +26,9 @@ DEV_DOCKER_TAG_BASE ?= gcr.io/ansible-tower-engineering
|
||||
# Python packages to install only from source (not from binary wheels)
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==19.3.1 setuptools==41.6.0
|
||||
|
||||
# Determine appropriate shasum command
|
||||
UNAME_S := $(shell uname -s)
|
||||
@@ -119,7 +122,7 @@ clean-api:
|
||||
rm -rf awx/projects
|
||||
|
||||
clean-awxkit:
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
|
||||
|
||||
# convenience target to assert environment variables are defined
|
||||
guard-%:
|
||||
@@ -130,16 +133,16 @@ guard-%:
|
||||
|
||||
virtualenv: virtualenv_ansible virtualenv_awx
|
||||
|
||||
# virtualenv_* targets do not use --system-site-packages to prevent bugs installing packages
|
||||
# but Ansible venvs are expected to have this, so that must be done after venv creation
|
||||
virtualenv_ansible:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
if [ ! -d "$(VENV_BASE)" ]; then \
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/ansible" ]; then \
|
||||
virtualenv -p python --system-site-packages $(VENV_BASE)/ansible && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed six packaging appdirs && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed setuptools==36.0.1 && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed pip==9.0.1; \
|
||||
virtualenv -p python $(VENV_BASE)/ansible && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP); \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
@@ -149,36 +152,46 @@ virtualenv_ansible_py3:
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/ansible" ]; then \
|
||||
$(PYTHON) -m venv --system-site-packages $(VENV_BASE)/ansible; \
|
||||
virtualenv -p $(PYTHON) $(VENV_BASE)/ansible; \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP); \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
# flit is needed for offline install of certain packages, specifically ptyprocess
|
||||
# it is needed for setup, but not always recognized as a setup dependency
|
||||
# similar to pip, setuptools, and wheel, these are all needed here as a bootstrapping issues
|
||||
virtualenv_awx:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
if [ ! -d "$(VENV_BASE)" ]; then \
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/awx" ]; then \
|
||||
$(PYTHON) -m venv --system-site-packages $(VENV_BASE)/awx; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --ignore-installed docutils==0.14; \
|
||||
virtualenv -p $(PYTHON) $(VENV_BASE)/awx; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP) && \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) flit; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
# --ignore-install flag is not used because *.txt files should specify exact versions
|
||||
requirements_ansible: virtualenv_ansible
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
# Same effect as using --system-site-packages flag on venv creation
|
||||
rm $(shell ls -d $(VENV_BASE)/ansible/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
|
||||
requirements_ansible_py3: virtualenv_ansible_py3
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip3 uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
# Same effect as using --system-site-packages flag on venv creation
|
||||
rm $(shell ls -d $(VENV_BASE)/ansible/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
|
||||
requirements_ansible_dev:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -186,13 +199,13 @@ requirements_ansible_dev:
|
||||
fi
|
||||
|
||||
# Install third-party requirements needed for AWX's environment.
|
||||
# this does not use system site packages intentionally
|
||||
requirements_awx: virtualenv_awx
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements.txt requirements/requirements_local.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements.txt requirements/requirements_local.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements.txt requirements/requirements_git.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements.txt requirements/requirements_git.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
echo "include-system-site-packages = true" >> $(VENV_BASE)/awx/lib/python$(PYTHON_VERSION)/pyvenv.cfg
|
||||
$(VENV_BASE)/awx/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt
|
||||
|
||||
requirements_awx_dev:
|
||||
@@ -395,7 +408,7 @@ test_collection:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONPATH=$(COLLECTION_VENV):/awx_devel/awx_collection:$PYTHONPATH py.test $(COLLECTION_TEST_DIRS)
|
||||
PYTHONPATH=$(COLLECTION_VENV):/awx_devel/awx_collection:$PYTHONPATH:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
|
||||
flake8_collection:
|
||||
flake8 awx_collection/ # Different settings, in main exclude list
|
||||
@@ -411,7 +424,11 @@ test_collection_sanity:
|
||||
|
||||
build_collection:
|
||||
ansible-playbook -i localhost, awx_collection/template_galaxy.yml -e collection_package=$(COLLECTION_PACKAGE) -e collection_namespace=$(COLLECTION_NAMESPACE) -e collection_version=$(VERSION)
|
||||
ansible-galaxy collection build awx_collection --output-path=awx_collection
|
||||
ansible-galaxy collection build awx_collection --force --output-path=awx_collection
|
||||
|
||||
install_collection: build_collection
|
||||
rm -rf ~/.ansible/collections/ansible_collections/awx/awx
|
||||
ansible-galaxy collection install awx_collection/awx-awx-$(VERSION).tar.gz
|
||||
|
||||
test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
|
||||
@@ -24,31 +24,18 @@ except ImportError: # pragma: no cover
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
import django
|
||||
from django.db.backends.base import schema
|
||||
from django.db.backends.utils import names_digest
|
||||
import django # noqa: F401
|
||||
HAS_DJANGO = True
|
||||
except ImportError:
|
||||
HAS_DJANGO = False
|
||||
else:
|
||||
from django.db.backends.base import schema
|
||||
from django.db.backends.utils import names_digest
|
||||
|
||||
|
||||
if HAS_DJANGO is True:
|
||||
# This line exists to make sure we don't regress on FIPS support if we
|
||||
# upgrade Django; if you're upgrading Django and see this error,
|
||||
# update the version check below, and confirm that FIPS still works.
|
||||
# If operating in a FIPS environment, `hashlib.md5()` will raise a `ValueError`,
|
||||
# but will support the `usedforsecurity` keyword on RHEL and Centos systems.
|
||||
|
||||
# Keep an eye on https://code.djangoproject.com/ticket/28401
|
||||
target_version = '2.2.4'
|
||||
if django.__version__ != target_version:
|
||||
raise RuntimeError(
|
||||
"Django version other than {target} detected: {current}. "
|
||||
"Overriding `names_digest` is known to work for Django {target} "
|
||||
"and may not work in other Django versions.".format(target=target_version,
|
||||
current=django.__version__)
|
||||
)
|
||||
|
||||
# See upgrade blocker note in requirements/README.md
|
||||
try:
|
||||
names_digest('foo', 'bar', 'baz', length=8)
|
||||
except ValueError:
|
||||
|
||||
@@ -67,6 +67,7 @@ register(
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
required=False,
|
||||
default='',
|
||||
label=_('Login redirect override URL'),
|
||||
help_text=_('URL to which unauthorized users will be redirected to log in. '
|
||||
'If blank, users will be sent to the Tower login page.'),
|
||||
|
||||
@@ -9,7 +9,7 @@ from functools import reduce
|
||||
# Django
|
||||
from django.core.exceptions import FieldError, ValidationError
|
||||
from django.db import models
|
||||
from django.db.models import Q
|
||||
from django.db.models import Q, CharField, IntegerField, BooleanField
|
||||
from django.db.models.fields import FieldDoesNotExist
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
@@ -63,19 +63,19 @@ class TypeFilterBackend(BaseFilterBackend):
|
||||
raise ParseError(*e.args)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
def get_fields_from_path(model, path):
|
||||
'''
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and also the revised lookup path
|
||||
Returns the fields in the line, and also the revised lookup path
|
||||
ex., given
|
||||
model=Organization
|
||||
path='project__timeout'
|
||||
returns tuple of field at the end of the line as well as a corrected
|
||||
path, for special cases we do substitutions
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
returns tuple of fields traversed as well and a corrected path,
|
||||
for special cases we do substitutions
|
||||
([<IntegerField for timeout>], 'project__timeout')
|
||||
'''
|
||||
# Store of all the fields used to detect repeats
|
||||
field_set = set([])
|
||||
field_list = []
|
||||
new_parts = []
|
||||
for name in path.split('__'):
|
||||
if model is None:
|
||||
@@ -111,13 +111,24 @@ def get_field_from_path(model, path):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
elif getattr(field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
if field in field_set:
|
||||
if field in field_list:
|
||||
# Field traversed twice, could create infinite JOINs, DoSing Tower
|
||||
raise ParseError(_('Loops not allowed in filters, detected on field {}.').format(field.name))
|
||||
field_set.add(field)
|
||||
field_list.append(field)
|
||||
model = getattr(field, 'related_model', None)
|
||||
|
||||
return field, '__'.join(new_parts)
|
||||
return field_list, '__'.join(new_parts)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
'''
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and the revised lookup path
|
||||
ex.
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
'''
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
return (field_list[-1], new_path)
|
||||
|
||||
|
||||
class FieldLookupBackend(BaseFilterBackend):
|
||||
@@ -133,7 +144,11 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'in',
|
||||
'isnull', 'search')
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
# A list of fields that we know can be filtered on without the possiblity
|
||||
# of introducing duplicates
|
||||
NO_DUPLICATES_WHITELIST = (CharField, IntegerField, BooleanField)
|
||||
|
||||
def get_fields_from_lookup(self, model, lookup):
|
||||
|
||||
if '__' in lookup and lookup.rsplit('__', 1)[-1] in self.SUPPORTED_LOOKUPS:
|
||||
path, suffix = lookup.rsplit('__', 1)
|
||||
@@ -147,11 +162,16 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
# FIXME: Could build up a list of models used across relationships, use
|
||||
# those lookups combined with request.user.get_queryset(Model) to make
|
||||
# sure user cannot query using objects he could not view.
|
||||
field, new_path = get_field_from_path(model, path)
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
|
||||
new_lookup = new_path
|
||||
new_lookup = '__'.join([new_path, suffix])
|
||||
return field, new_lookup
|
||||
return field_list, new_lookup
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
'''Method to match return type of single field, if needed.'''
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
return (field_list[-1], new_lookup)
|
||||
|
||||
def to_python_related(self, value):
|
||||
value = force_text(value)
|
||||
@@ -182,7 +202,10 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
except UnicodeEncodeError:
|
||||
raise ValueError("%r is not an allowed field name. Must be ascii encodable." % lookup)
|
||||
|
||||
field, new_lookup = self.get_field_from_lookup(model, lookup)
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
field = field_list[-1]
|
||||
|
||||
needs_distinct = (not all(isinstance(f, self.NO_DUPLICATES_WHITELIST) for f in field_list))
|
||||
|
||||
# Type names are stored without underscores internally, but are presented and
|
||||
# and serialized over the API containing underscores so we remove `_`
|
||||
@@ -211,10 +234,10 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
for rm_field in related_model._meta.fields:
|
||||
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description', 'playbook'):
|
||||
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
|
||||
return value, new_lookups
|
||||
return value, new_lookups, needs_distinct
|
||||
else:
|
||||
value = self.value_to_python_for_field(field, value)
|
||||
return value, new_lookup
|
||||
return value, new_lookup, needs_distinct
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
@@ -225,6 +248,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
chain_filters = []
|
||||
role_filters = []
|
||||
search_filters = {}
|
||||
needs_distinct = False
|
||||
# Can only have two values: 'AND', 'OR'
|
||||
# If 'AND' is used, an iterm must satisfy all condition to show up in the results.
|
||||
# If 'OR' is used, an item just need to satisfy one condition to appear in results.
|
||||
@@ -256,9 +280,12 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
search_filter_relation = 'AND'
|
||||
values = reduce(lambda list1, list2: list1 + list2, [i.split(',') for i in values])
|
||||
for value in values:
|
||||
search_value, new_keys = self.value_to_python(queryset.model, key, force_text(value))
|
||||
search_value, new_keys, _ = self.value_to_python(queryset.model, key, force_text(value))
|
||||
assert isinstance(new_keys, list)
|
||||
search_filters[search_value] = new_keys
|
||||
# by definition, search *only* joins across relations,
|
||||
# so it _always_ needs a .distinct()
|
||||
needs_distinct = True
|
||||
continue
|
||||
|
||||
# Custom chain__ and or__ filters, mutually exclusive (both can
|
||||
@@ -282,7 +309,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
for value in values:
|
||||
if q_int:
|
||||
value = int(value)
|
||||
value, new_key = self.value_to_python(queryset.model, key, value)
|
||||
value, new_key, distinct = self.value_to_python(queryset.model, key, value)
|
||||
if distinct:
|
||||
needs_distinct = True
|
||||
if q_chain:
|
||||
chain_filters.append((q_not, new_key, value))
|
||||
elif q_or:
|
||||
@@ -332,7 +361,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
else:
|
||||
q = Q(**{k:v})
|
||||
queryset = queryset.filter(q)
|
||||
queryset = queryset.filter(*args).distinct()
|
||||
queryset = queryset.filter(*args)
|
||||
if needs_distinct:
|
||||
queryset = queryset.distinct()
|
||||
return queryset
|
||||
except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e:
|
||||
raise ParseError(e.args[0])
|
||||
|
||||
@@ -98,26 +98,19 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'total_hosts',
|
||||
'hosts_with_active_failures',
|
||||
'total_groups',
|
||||
'groups_with_active_failures',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources',
|
||||
'inventory_sources_with_failures',
|
||||
'organization_id',
|
||||
'kind',
|
||||
'insights_credential_id',),
|
||||
'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
|
||||
'has_inventory_sources'),
|
||||
'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
|
||||
'total_hosts',
|
||||
'hosts_with_active_failures',
|
||||
'total_groups',
|
||||
'groups_with_active_failures',
|
||||
'has_inventory_sources'),
|
||||
'host': DEFAULT_SUMMARY_FIELDS,
|
||||
'group': DEFAULT_SUMMARY_FIELDS,
|
||||
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
|
||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'),
|
||||
'job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job': DEFAULT_SUMMARY_FIELDS,
|
||||
@@ -125,7 +118,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'workflow_approval': DEFAULT_SUMMARY_FIELDS + ('timeout',),
|
||||
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
|
||||
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
|
||||
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),
|
||||
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error', 'canceled_on'),
|
||||
'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),
|
||||
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
@@ -139,8 +132,9 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'insights_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'approved_or_denied_by': ('id', 'username', 'first_name', 'last_name'),
|
||||
'credential_type': DEFAULT_SUMMARY_FIELDS,
|
||||
}
|
||||
|
||||
|
||||
@@ -718,7 +712,7 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = UnifiedJob
|
||||
fields = ('*', 'unified_job_template', 'launch_type', 'status',
|
||||
'failed', 'started', 'finished', 'elapsed', 'job_args',
|
||||
'failed', 'started', 'finished', 'canceled_on', 'elapsed', 'job_args',
|
||||
'job_cwd', 'job_env', 'job_explanation',
|
||||
'execution_node', 'controller_node',
|
||||
'result_traceback', 'event_processing_finished')
|
||||
@@ -1548,20 +1542,15 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
'admin', 'adhoc',
|
||||
{'copy': 'organization.inventory_admin'}
|
||||
]
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources',
|
||||
'total_inventory_sources', 'inventory_sources_with_failures',
|
||||
'insights_credential', 'pending_deletion',)
|
||||
'has_inventory_sources', 'total_inventory_sources',
|
||||
'inventory_sources_with_failures', 'insights_credential',
|
||||
'pending_deletion',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventorySerializer, self).get_related(obj)
|
||||
@@ -1643,6 +1632,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
capabilities_prefetch = ['inventory.admin']
|
||||
|
||||
has_active_failures = serializers.SerializerMethodField()
|
||||
has_inventory_sources = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = Host
|
||||
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
|
||||
@@ -1756,6 +1748,14 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
ret['last_job_host_summary'] = None
|
||||
return ret
|
||||
|
||||
def get_has_active_failures(self, obj):
|
||||
return bool(
|
||||
obj.last_job_host_summary and obj.last_job_host_summary.failed
|
||||
)
|
||||
|
||||
def get_has_inventory_sources(self, obj):
|
||||
return obj.inventory_sources.exists()
|
||||
|
||||
|
||||
class AnsibleFactsSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
@@ -1768,17 +1768,10 @@ class AnsibleFactsSerializer(BaseSerializer):
|
||||
class GroupSerializer(BaseSerializerWithVariables):
|
||||
show_capabilities = ['copy', 'edit', 'delete']
|
||||
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Group
|
||||
fields = ('*', 'inventory', 'variables', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources')
|
||||
fields = ('*', 'inventory', 'variables')
|
||||
|
||||
def build_relational_field(self, field_name, relation_info):
|
||||
field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)
|
||||
@@ -2822,7 +2815,7 @@ class JobTemplateMixin(object):
|
||||
# .only('id', 'status', 'finished', 'polymorphic_ctype_id')
|
||||
optimized_qs = uj_qs.non_polymorphic()
|
||||
return [{
|
||||
'id': x.id, 'status': x.status, 'finished': x.finished,
|
||||
'id': x.id, 'status': x.status, 'finished': x.finished, 'canceled_on': x.canceled_on,
|
||||
# Make type consistent with API top-level key, for instance workflow_job
|
||||
'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
|
||||
} for x in optimized_qs[:10]]
|
||||
@@ -3684,7 +3677,7 @@ class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJobTemplateNode
|
||||
fields = ('*', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes', 'all_parents_must_converge',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
|
||||
@@ -3723,8 +3716,8 @@ class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJobNode
|
||||
fields = ('*', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'do_not_run',)
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'all_parents_must_converge', 'do_not_run',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
|
||||
@@ -3832,7 +3825,7 @@ class JobEventSerializer(BaseSerializer):
|
||||
model = JobEvent
|
||||
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
|
||||
'event_display', 'event_data', 'event_level', 'failed',
|
||||
'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',
|
||||
'changed', 'uuid', 'parent_uuid', 'host', 'host_name',
|
||||
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
|
||||
'verbosity')
|
||||
|
||||
@@ -3841,13 +3834,9 @@ class JobEventSerializer(BaseSerializer):
|
||||
res.update(dict(
|
||||
job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),
|
||||
))
|
||||
if obj.parent_id:
|
||||
res['parent'] = self.reverse('api:job_event_detail', kwargs={'pk': obj.parent_id})
|
||||
res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})
|
||||
if obj.host_id:
|
||||
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})
|
||||
if obj.hosts.exists():
|
||||
res['hosts'] = self.reverse('api:job_event_hosts_list', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
@@ -3873,26 +3862,6 @@ class JobEventSerializer(BaseSerializer):
|
||||
return data
|
||||
|
||||
|
||||
class JobEventWebSocketSerializer(JobEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = JobEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'job_events'
|
||||
|
||||
|
||||
class ProjectUpdateEventSerializer(JobEventSerializer):
|
||||
stdout = serializers.SerializerMethodField()
|
||||
event_data = serializers.SerializerMethodField()
|
||||
@@ -3924,26 +3893,6 @@ class ProjectUpdateEventSerializer(JobEventSerializer):
|
||||
return {}
|
||||
|
||||
|
||||
class ProjectUpdateEventWebSocketSerializer(ProjectUpdateEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = ProjectUpdateEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'project_update_events'
|
||||
|
||||
|
||||
class AdHocCommandEventSerializer(BaseSerializer):
|
||||
|
||||
event_display = serializers.CharField(source='get_event_display', read_only=True)
|
||||
@@ -3975,26 +3924,6 @@ class AdHocCommandEventSerializer(BaseSerializer):
|
||||
return data
|
||||
|
||||
|
||||
class AdHocCommandEventWebSocketSerializer(AdHocCommandEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = AdHocCommandEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'ad_hoc_command_events'
|
||||
|
||||
|
||||
class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
|
||||
|
||||
class Meta:
|
||||
@@ -4010,26 +3939,6 @@ class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
|
||||
return res
|
||||
|
||||
|
||||
class InventoryUpdateEventWebSocketSerializer(InventoryUpdateEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = InventoryUpdateEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'inventory_update_events'
|
||||
|
||||
|
||||
class SystemJobEventSerializer(AdHocCommandEventSerializer):
|
||||
|
||||
class Meta:
|
||||
@@ -4045,26 +3954,6 @@ class SystemJobEventSerializer(AdHocCommandEventSerializer):
|
||||
return res
|
||||
|
||||
|
||||
class SystemJobEventWebSocketSerializer(SystemJobEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = SystemJobEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'system_job_events'
|
||||
|
||||
|
||||
class JobLaunchSerializer(BaseSerializer):
|
||||
|
||||
# Representational fields
|
||||
|
||||
@@ -81,7 +81,8 @@ from awx.main.utils import (
|
||||
getattrd,
|
||||
get_pk_from_dict,
|
||||
schedule_task_manager,
|
||||
ignore_inventory_computed_fields
|
||||
ignore_inventory_computed_fields,
|
||||
set_environ
|
||||
)
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
@@ -204,20 +205,15 @@ class DashboardView(APIView):
|
||||
'failed': ec2_inventory_failed.count()}
|
||||
|
||||
user_groups = get_user_queryset(request.user, models.Group)
|
||||
groups_job_failed = (
|
||||
models.Group.objects.filter(hosts_with_active_failures__gt=0) | models.Group.objects.filter(groups_with_active_failures__gt=0)
|
||||
).count()
|
||||
groups_inventory_failed = models.Group.objects.filter(inventory_sources__last_job_failed=True).count()
|
||||
data['groups'] = {'url': reverse('api:group_list', request=request),
|
||||
'failures_url': reverse('api:group_list', request=request) + "?has_active_failures=True",
|
||||
'total': user_groups.count(),
|
||||
'job_failed': groups_job_failed,
|
||||
'inventory_failed': groups_inventory_failed}
|
||||
|
||||
user_hosts = get_user_queryset(request.user, models.Host)
|
||||
user_hosts_failed = user_hosts.filter(has_active_failures=True)
|
||||
user_hosts_failed = user_hosts.filter(last_job_host_summary__failed=True)
|
||||
data['hosts'] = {'url': reverse('api:host_list', request=request),
|
||||
'failures_url': reverse('api:host_list', request=request) + "?has_active_failures=True",
|
||||
'failures_url': reverse('api:host_list', request=request) + "?last_job_host_summary__failed=True",
|
||||
'total': user_hosts.count(),
|
||||
'failed': user_hosts_failed.count()}
|
||||
|
||||
@@ -1611,7 +1607,8 @@ class HostInsights(GenericAPIView):
|
||||
|
||||
def _call_insights_api(self, url, session, headers):
|
||||
try:
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
except requests.exceptions.SSLError:
|
||||
raise BadGateway(_('SSLError while trying to connect to {}').format(url))
|
||||
except requests.exceptions.Timeout:
|
||||
@@ -2150,7 +2147,7 @@ class InventorySourceHostsList(HostRelatedSearchMixin, SubListDestroyAPIView):
|
||||
host__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceHostsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id, True)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id)
|
||||
return r
|
||||
|
||||
|
||||
@@ -2177,7 +2174,7 @@ class InventorySourceGroupsList(SubListDestroyAPIView):
|
||||
group__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceGroupsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id, True)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id)
|
||||
return r
|
||||
|
||||
|
||||
@@ -2549,7 +2546,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
if not isinstance(val, allow_types):
|
||||
return Response(dict(error=_("'{field_name}' in survey question {idx} expected to be {type_label}.").format(
|
||||
field_name=field_name, type_label=type_label, **context
|
||||
)))
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
if survey_item['variable'] in variable_set:
|
||||
return Response(dict(error=_("'variable' '%(item)s' duplicated in survey question %(survey)s.") % {
|
||||
'item': survey_item['variable'], 'survey': str(idx)}), status=status.HTTP_400_BAD_REQUEST)
|
||||
@@ -2564,7 +2561,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
"'{survey_item[type]}' in survey question {idx} is not one of '{allowed_types}' allowed question types."
|
||||
).format(
|
||||
allowed_types=', '.join(JobTemplateSurveySpec.ALLOWED_TYPES.keys()), **context
|
||||
)))
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
if 'default' in survey_item and survey_item['default'] != '':
|
||||
if not isinstance(survey_item['default'], JobTemplateSurveySpec.ALLOWED_TYPES[qtype]):
|
||||
type_label = 'string'
|
||||
@@ -2582,7 +2579,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
if survey_item[key] is not None and (not isinstance(survey_item[key], int)):
|
||||
return Response(dict(error=_(
|
||||
"The {min_or_max} limit in survey question {idx} expected to be integer."
|
||||
).format(min_or_max=key, **context)))
|
||||
).format(min_or_max=key, **context)), status=status.HTTP_400_BAD_REQUEST)
|
||||
# if it's a multiselect or multiple choice, it must have coices listed
|
||||
# choices and defualts must come in as strings seperated by /n characters.
|
||||
if qtype == 'multiselect' or qtype == 'multiplechoice':
|
||||
@@ -2592,7 +2589,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
else:
|
||||
return Response(dict(error=_(
|
||||
"Survey question {idx} of type {survey_item[type]} must specify choices.".format(**context)
|
||||
)))
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
# If there is a default string split it out removing extra /n characters.
|
||||
# Note: There can still be extra newline characters added in the API, these are sanitized out using .strip()
|
||||
if 'default' in survey_item:
|
||||
@@ -2606,11 +2603,11 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
if len(list_of_defaults) > 1:
|
||||
return Response(dict(error=_(
|
||||
"Multiple Choice (Single Select) can only have one default value.".format(**context)
|
||||
)))
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
if any(item not in survey_item['choices'] for item in list_of_defaults):
|
||||
return Response(dict(error=_(
|
||||
"Default choice must be answered from the choices listed.".format(**context)
|
||||
)))
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Process encryption substitution
|
||||
if ("default" in survey_item and isinstance(survey_item['default'], str) and
|
||||
@@ -3268,7 +3265,7 @@ class WorkflowJobRelaunch(GenericAPIView):
|
||||
jt = obj.job_template
|
||||
if not jt:
|
||||
raise ParseError(_('Cannot relaunch slice workflow job orphaned from job template.'))
|
||||
elif not jt.inventory or min(jt.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count():
|
||||
elif not obj.inventory or min(obj.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count():
|
||||
raise ParseError(_('Cannot relaunch sliced workflow job after slice count has changed.'))
|
||||
new_workflow_job = obj.create_relaunch_workflow_job()
|
||||
new_workflow_job.signal_start()
|
||||
@@ -3819,6 +3816,12 @@ class JobEventHostsList(HostRelatedSearchMixin, SubListAPIView):
|
||||
relationship = 'hosts'
|
||||
name = _('Job Event Hosts List')
|
||||
|
||||
def get_queryset(self):
|
||||
parent_event = self.get_parent_object()
|
||||
self.check_parent_access(parent_event)
|
||||
qs = self.request.user.get_queryset(self.model).filter(job_events_as_primary_host=parent_event)
|
||||
return qs
|
||||
|
||||
|
||||
class BaseJobEventsList(NoTruncateMixin, SubListAPIView):
|
||||
|
||||
@@ -3841,8 +3844,7 @@ class HostJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
parent_obj = self.get_parent_object()
|
||||
self.check_parent_access(parent_obj)
|
||||
qs = self.request.user.get_queryset(self.model).filter(
|
||||
Q(host=parent_obj) | Q(hosts=parent_obj)).distinct()
|
||||
qs = self.request.user.get_queryset(self.model).filter(host=parent_obj)
|
||||
return qs
|
||||
|
||||
|
||||
@@ -3858,9 +3860,7 @@ class JobJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
job = self.get_parent_object()
|
||||
self.check_parent_access(job)
|
||||
qs = job.job_events
|
||||
qs = qs.select_related('host')
|
||||
qs = qs.prefetch_related('hosts', 'children')
|
||||
qs = job.job_events.select_related('host').order_by('start_line')
|
||||
return qs.all()
|
||||
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ from rest_framework import status
|
||||
import requests
|
||||
|
||||
from awx.api.generics import APIView
|
||||
from awx.conf.registry import settings_registry
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.utils import (
|
||||
get_awx_version,
|
||||
@@ -37,6 +38,7 @@ from awx.main.models import (
|
||||
InstanceGroup,
|
||||
JobTemplate,
|
||||
)
|
||||
from awx.main.utils import set_environ
|
||||
|
||||
logger = logging.getLogger('awx.api.views.root')
|
||||
|
||||
@@ -190,7 +192,8 @@ class ApiV2SubscriptionView(APIView):
|
||||
data['rh_password'] = settings.REDHAT_PASSWORD
|
||||
try:
|
||||
user, pw = data.get('rh_username'), data.get('rh_password')
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
if user:
|
||||
settings.REDHAT_USERNAME = data['rh_username']
|
||||
if pw:
|
||||
@@ -202,10 +205,15 @@ class ApiV2SubscriptionView(APIView):
|
||||
getattr(getattr(exc, 'response', None), 'status_code', None) == 401
|
||||
):
|
||||
msg = _("The provided credentials are invalid (HTTP 401).")
|
||||
if isinstance(exc, (ValueError, OSError)) and exc.args:
|
||||
elif isinstance(exc, requests.exceptions.ProxyError):
|
||||
msg = _("Unable to connect to proxy server.")
|
||||
elif isinstance(exc, requests.exceptions.ConnectionError):
|
||||
msg = _("Could not connect to subscription service.")
|
||||
elif isinstance(exc, (ValueError, OSError)) and exc.args:
|
||||
msg = exc.args[0]
|
||||
logger.exception(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
else:
|
||||
logger.exception(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
return Response(validated)
|
||||
@@ -302,7 +310,8 @@ class ApiV2ConfigView(APIView):
|
||||
# If the license is valid, write it to the database.
|
||||
if license_data_validated['valid_key']:
|
||||
settings.LICENSE = license_data
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
if not settings_registry.is_setting_read_only('TOWER_URL_BASE'):
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
return Response(license_data_validated)
|
||||
|
||||
logger.warning(smart_text(u"Invalid license submitted."),
|
||||
|
||||
@@ -11,7 +11,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import ( # noqa
|
||||
BooleanField, CharField, ChoiceField, DictField, EmailField,
|
||||
BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField,
|
||||
IntegerField, ListField, NullBooleanField
|
||||
)
|
||||
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
# Python
|
||||
from collections import namedtuple
|
||||
import contextlib
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import urllib.parse
|
||||
from io import StringIO
|
||||
|
||||
# Django
|
||||
from django.conf import LazySettings
|
||||
@@ -89,42 +86,11 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
transaction.set_rollback(False)
|
||||
yield
|
||||
except DBError:
|
||||
# We want the _full_ traceback with the context
|
||||
# First we get the current call stack, which constitutes the "top",
|
||||
# it has the context up to the point where the context manager is used
|
||||
top_stack = StringIO()
|
||||
traceback.print_stack(file=top_stack)
|
||||
top_lines = top_stack.getvalue().strip('\n').split('\n')
|
||||
top_stack.close()
|
||||
# Get "bottom" stack from the local error that happened
|
||||
# inside of the "with" block this wraps
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
bottom_stack = StringIO()
|
||||
traceback.print_tb(exc_traceback, file=bottom_stack)
|
||||
bottom_lines = bottom_stack.getvalue().strip('\n').split('\n')
|
||||
# Glue together top and bottom where overlap is found
|
||||
bottom_cutoff = 0
|
||||
for i, line in enumerate(bottom_lines):
|
||||
if line in top_lines:
|
||||
# start of overlapping section, take overlap from bottom
|
||||
top_lines = top_lines[:top_lines.index(line)]
|
||||
bottom_cutoff = i
|
||||
break
|
||||
bottom_lines = bottom_lines[bottom_cutoff:]
|
||||
tb_lines = top_lines + bottom_lines
|
||||
|
||||
tb_string = '\n'.join(
|
||||
['Traceback (most recent call last):'] +
|
||||
tb_lines +
|
||||
['{}: {}'.format(exc_type.__name__, str(exc_value))]
|
||||
)
|
||||
bottom_stack.close()
|
||||
# Log the combined stack
|
||||
if trans_safe:
|
||||
if 'check_migrations' not in sys.argv:
|
||||
logger.debug('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
if 'migrate' not in sys.argv and 'check_migrations' not in sys.argv:
|
||||
logger.exception('Database settings are not available, using defaults.')
|
||||
else:
|
||||
logger.debug('Error modifying something related to database settings.\n{}'.format(tb_string))
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
if trans_safe and is_atomic and rollback_set:
|
||||
transaction.set_rollback(rollback_set)
|
||||
@@ -136,6 +102,15 @@ def filter_sensitive(registry, key, value):
|
||||
return value
|
||||
|
||||
|
||||
class TransientSetting(object):
|
||||
|
||||
__slots__ = ('pk', 'value')
|
||||
|
||||
def __init__(self, pk, value):
|
||||
self.pk = pk
|
||||
self.value = value
|
||||
|
||||
|
||||
class EncryptedCacheProxy(object):
|
||||
|
||||
def __init__(self, cache, registry, encrypter=None, decrypter=None):
|
||||
@@ -163,7 +138,6 @@ class EncryptedCacheProxy(object):
|
||||
def get(self, key, **kwargs):
|
||||
value = self.cache.get(key, **kwargs)
|
||||
value = self._handle_encryption(self.decrypter, key, value)
|
||||
logger.debug('cache get(%r, %r) -> %r', key, empty, filter_sensitive(self.registry, key, value))
|
||||
return value
|
||||
|
||||
def set(self, key, value, log=True, **kwargs):
|
||||
@@ -186,8 +160,6 @@ class EncryptedCacheProxy(object):
|
||||
self.set(key, value, log=False, **kwargs)
|
||||
|
||||
def _handle_encryption(self, method, key, value):
|
||||
TransientSetting = namedtuple('TransientSetting', ['pk', 'value'])
|
||||
|
||||
if value is not empty and self.registry.is_setting_encrypted(key):
|
||||
# If the setting exists in the database, we'll use its primary key
|
||||
# as part of the AES key when encrypting/decrypting
|
||||
|
||||
@@ -307,7 +307,7 @@ class BaseAccess(object):
|
||||
|
||||
return True # User has access to both, permission check passed
|
||||
|
||||
def check_license(self, add_host_name=None, feature=None, check_expiration=True):
|
||||
def check_license(self, add_host_name=None, feature=None, check_expiration=True, quiet=False):
|
||||
validation_info = get_licenser().validate()
|
||||
if validation_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
return
|
||||
@@ -317,8 +317,10 @@ class BaseAccess(object):
|
||||
validation_info['time_remaining'] = 99999999
|
||||
validation_info['grace_period_remaining'] = 99999999
|
||||
|
||||
report_violation = lambda message: logger.error(message)
|
||||
|
||||
if quiet:
|
||||
report_violation = lambda message: None
|
||||
else:
|
||||
report_violation = lambda message: logger.warning(message)
|
||||
if (
|
||||
validation_info.get('trial', False) is True or
|
||||
validation_info['instance_count'] == 10 # basic 10 license
|
||||
@@ -907,7 +909,7 @@ class HostAccess(BaseAccess):
|
||||
model = Host
|
||||
select_related = ('created_by', 'modified_by', 'inventory',
|
||||
'last_job__job_template', 'last_job_host_summary__job',)
|
||||
prefetch_related = ('groups',)
|
||||
prefetch_related = ('groups', 'inventory_sources')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(inventory__in=Inventory.accessible_pk_qs(self.user, 'read_role'))
|
||||
@@ -2238,7 +2240,7 @@ class JobEventAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
model = JobEvent
|
||||
prefetch_related = ('hosts', 'job__job_template', 'host',)
|
||||
prefetch_related = ('job__job_template', 'host',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
|
||||
@@ -52,7 +52,7 @@ def config(since):
|
||||
'tower_version': get_awx_version(),
|
||||
'ansible_version': get_ansible_version(),
|
||||
'license_type': license_info.get('license_type', 'UNLICENSED'),
|
||||
'free_instances': license_info.get('free instances', 0),
|
||||
'free_instances': license_info.get('free_instances', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
'pendo_tracking': settings.PENDO_TRACKING_STATE,
|
||||
'authentication_backends': settings.AUTHENTICATION_BACKENDS,
|
||||
|
||||
@@ -15,7 +15,7 @@ from awx.conf.license import get_license
|
||||
from awx.main.models import Job
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.models.ha import TowerAnalyticsState
|
||||
from awx.main.utils import get_awx_http_client_headers
|
||||
from awx.main.utils import get_awx_http_client_headers, set_environ
|
||||
|
||||
|
||||
__all__ = ['register', 'gather', 'ship', 'table_version']
|
||||
@@ -169,12 +169,13 @@ def ship(path):
|
||||
s = requests.Session()
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
response = s.post(url,
|
||||
files=files,
|
||||
verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
|
||||
auth=(rh_user, rh_password),
|
||||
headers=s.headers,
|
||||
timeout=(31, 31))
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
response = s.post(url,
|
||||
files=files,
|
||||
verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
|
||||
auth=(rh_user, rh_password),
|
||||
headers=s.headers,
|
||||
timeout=(31, 31))
|
||||
if response.status_code != 202:
|
||||
return logger.exception('Upload failed with status {}, {}'.format(response.status_code,
|
||||
response.text))
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
from django.apps import AppConfig
|
||||
from django.db.models.signals import pre_migrate
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
|
||||
def raise_migration_flag(**kwargs):
|
||||
from awx.main.tasks import set_migration_flag
|
||||
set_migration_flag.delay()
|
||||
|
||||
|
||||
class MainConfig(AppConfig):
|
||||
|
||||
name = 'awx.main'
|
||||
verbose_name = _('Main')
|
||||
|
||||
def ready(self):
|
||||
pre_migrate.connect(raise_migration_flag, sender=self)
|
||||
|
||||
@@ -616,6 +616,18 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'MAX_FORKS',
|
||||
field_class=fields.IntegerField,
|
||||
allow_null=False,
|
||||
default=200,
|
||||
label=_('Maximum number of forks per job.'),
|
||||
help_text=_('Saving a Job Template with more than this number of forks will result in an error. '
|
||||
'When set to 0, no limit is applied.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'LOG_AGGREGATOR_HOST',
|
||||
field_class=fields.CharField,
|
||||
@@ -787,6 +799,28 @@ register(
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last gather date for Automation Analytics.'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('Automation Analytics Gather Interval'),
|
||||
help_text=_('Interval (in seconds) between data gathering.'),
|
||||
default=14400, # every 4 hours
|
||||
min_value=1800, # every 30 minutes
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or \
|
||||
not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or \
|
||||
@@ -811,10 +845,7 @@ def galaxy_validate(serializer, attrs):
|
||||
to save settings which obviously break all project updates.
|
||||
"""
|
||||
prefix = 'PRIMARY_GALAXY_'
|
||||
|
||||
from awx.main.constants import GALAXY_SERVER_FIELDS
|
||||
if not any('{}{}'.format(prefix, subfield.upper()) in attrs for subfield in GALAXY_SERVER_FIELDS):
|
||||
return attrs
|
||||
errors = {}
|
||||
|
||||
def _new_value(setting_name):
|
||||
if setting_name in attrs:
|
||||
@@ -823,10 +854,22 @@ def galaxy_validate(serializer, attrs):
|
||||
return ''
|
||||
return getattr(serializer.instance, setting_name, '')
|
||||
|
||||
if not _new_value('PRIMARY_GALAXY_URL'):
|
||||
if _new_value('PUBLIC_GALAXY_ENABLED') is False:
|
||||
msg = _('A URL for Primary Galaxy must be defined before disabling public Galaxy.')
|
||||
# put error in both keys because UI has trouble with errors in toggles
|
||||
for key in ('PRIMARY_GALAXY_URL', 'PUBLIC_GALAXY_ENABLED'):
|
||||
errors.setdefault(key, [])
|
||||
errors[key].append(msg)
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
from awx.main.constants import GALAXY_SERVER_FIELDS
|
||||
if not any('{}{}'.format(prefix, subfield.upper()) in attrs for subfield in GALAXY_SERVER_FIELDS):
|
||||
return attrs
|
||||
|
||||
galaxy_data = {}
|
||||
for subfield in GALAXY_SERVER_FIELDS:
|
||||
galaxy_data[subfield] = _new_value('{}{}'.format(prefix, subfield.upper()))
|
||||
errors = {}
|
||||
if not galaxy_data['url']:
|
||||
for k, v in galaxy_data.items():
|
||||
if v:
|
||||
|
||||
@@ -3,6 +3,16 @@ from .plugin import CredentialPlugin
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
from msrestazure import azure_cloud
|
||||
|
||||
|
||||
# https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py
|
||||
clouds = [
|
||||
vars(azure_cloud)[n]
|
||||
for n in dir(azure_cloud)
|
||||
if n.startswith("AZURE_") and n.endswith("_CLOUD")
|
||||
]
|
||||
default_cloud = vars(azure_cloud)["AZURE_PUBLIC_CLOUD"]
|
||||
|
||||
|
||||
azure_keyvault_inputs = {
|
||||
@@ -24,6 +34,12 @@ azure_keyvault_inputs = {
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant ID'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'cloud_name',
|
||||
'label': _('Cloud Environment'),
|
||||
'help_text': _('Specify which azure cloud environment to use.'),
|
||||
'choices': list(set([default_cloud.name] + [c.name for c in clouds])),
|
||||
'default': default_cloud.name
|
||||
}],
|
||||
'metadata': [{
|
||||
'id': 'secret_field',
|
||||
@@ -42,6 +58,7 @@ azure_keyvault_inputs = {
|
||||
|
||||
def azure_keyvault_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
[cloud] = [c for c in clouds if c.name == kwargs.get('cloud_name', default_cloud.name)]
|
||||
|
||||
def auth_callback(server, resource, scope):
|
||||
credentials = ServicePrincipalCredentials(
|
||||
@@ -49,7 +66,7 @@ def azure_keyvault_backend(**kwargs):
|
||||
client_id = kwargs['client'],
|
||||
secret = kwargs['secret'],
|
||||
tenant = kwargs['tenant'],
|
||||
resource = "https://vault.azure.net",
|
||||
resource = f"https://{cloud.suffixes.keyvault_dns.split('.', 1).pop()}",
|
||||
)
|
||||
token = credentials.token
|
||||
return token['token_type'], token['access_token']
|
||||
|
||||
52
awx/main/dispatch/periodic.py
Normal file
52
awx/main/dispatch/periodic.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connections
|
||||
from schedule import Scheduler
|
||||
|
||||
from awx.main.dispatch.worker import TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
|
||||
|
||||
class Scheduler(Scheduler):
|
||||
|
||||
def run_continuously(self):
|
||||
cease_continuous_run = threading.Event()
|
||||
idle_seconds = max(
|
||||
1,
|
||||
min(self.jobs).period.total_seconds() / 2
|
||||
)
|
||||
|
||||
class ScheduleThread(threading.Thread):
|
||||
@classmethod
|
||||
def run(cls):
|
||||
while not cease_continuous_run.is_set():
|
||||
try:
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
self.run_pending()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
'encountered an error while scheduling periodic tasks'
|
||||
)
|
||||
time.sleep(idle_seconds)
|
||||
logger.debug('periodic thread exiting...')
|
||||
|
||||
thread = ScheduleThread()
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
return cease_continuous_run
|
||||
|
||||
|
||||
def run_continuously():
|
||||
scheduler = Scheduler()
|
||||
for task in settings.CELERYBEAT_SCHEDULE.values():
|
||||
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
|
||||
total_seconds = task['schedule'].total_seconds()
|
||||
scheduler.every(total_seconds).seconds.do(apply_async)
|
||||
return scheduler.run_continuously()
|
||||
@@ -72,9 +72,6 @@ class PoolWorker(object):
|
||||
if not body.get('uuid'):
|
||||
body['uuid'] = str(uuid4())
|
||||
uuid = body['uuid']
|
||||
logger.debug('delivered {} to worker[{}] qsize {}'.format(
|
||||
uuid, self.pid, self.qsize
|
||||
))
|
||||
self.managed_tasks[uuid] = body
|
||||
self.queue.put(body, block=True, timeout=5)
|
||||
self.messages_sent += 1
|
||||
@@ -132,7 +129,7 @@ class PoolWorker(object):
|
||||
# when this occurs, it's _fine_ to ignore this KeyError because
|
||||
# the purpose of self.managed_tasks is to just track internal
|
||||
# state of which events are *currently* being processed.
|
||||
pass
|
||||
logger.warn('Event UUID {} appears to be have been duplicated.'.format(uuid))
|
||||
|
||||
@property
|
||||
def current_task(self):
|
||||
@@ -277,7 +274,7 @@ class WorkerPool(object):
|
||||
logger.warn("could not write to queue %s" % preferred_queue)
|
||||
logger.warn("detail: {}".format(tb))
|
||||
write_attempt_order.append(preferred_queue)
|
||||
logger.warn("could not write payload to any queue, attempted order: {}".format(write_attempt_order))
|
||||
logger.error("could not write payload to any queue, attempted order: {}".format(write_attempt_order))
|
||||
return None
|
||||
|
||||
def stop(self, signum):
|
||||
|
||||
@@ -61,7 +61,7 @@ class AWXConsumer(ConsumerMixin):
|
||||
])
|
||||
|
||||
def control(self, body, message):
|
||||
logger.warn(body)
|
||||
logger.warn('Consumer received control message {}'.format(body))
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running'):
|
||||
producer = Producer(
|
||||
@@ -119,6 +119,9 @@ class AWXConsumer(ConsumerMixin):
|
||||
|
||||
class BaseWorker(object):
|
||||
|
||||
def read(self, queue):
|
||||
return queue.get(block=True, timeout=1)
|
||||
|
||||
def work_loop(self, queue, finished, idx, *args):
|
||||
ppid = os.getppid()
|
||||
signal_handler = WorkerSignalHandler()
|
||||
@@ -128,7 +131,7 @@ class BaseWorker(object):
|
||||
if os.getppid() != ppid:
|
||||
break
|
||||
try:
|
||||
body = queue.get(block=True, timeout=1)
|
||||
body = self.read(queue)
|
||||
if body == 'QUIT':
|
||||
break
|
||||
except QueueEmpty:
|
||||
@@ -145,7 +148,6 @@ class BaseWorker(object):
|
||||
finally:
|
||||
if 'uuid' in body:
|
||||
uuid = body['uuid']
|
||||
logger.debug('task {} is finished'.format(uuid))
|
||||
finished.put(uuid)
|
||||
logger.warn('worker exiting gracefully pid:{}'.format(os.getpid()))
|
||||
|
||||
|
||||
@@ -1,19 +1,31 @@
|
||||
import cProfile
|
||||
import logging
|
||||
import os
|
||||
import pstats
|
||||
import signal
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
from django.db.utils import InterfaceError, InternalError, IntegrityError
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
|
||||
InventoryUpdateEvent, SystemJobEvent, UnifiedJob)
|
||||
from awx.main.models.events import emit_event_detail
|
||||
|
||||
from .base import BaseWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
# the number of seconds to buffer events in memory before flushing
|
||||
# using JobEvent.objects.bulk_create()
|
||||
BUFFER_SECONDS = .1
|
||||
|
||||
|
||||
class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
@@ -25,90 +37,134 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
|
||||
MAX_RETRIES = 2
|
||||
prof = None
|
||||
|
||||
def __init__(self):
|
||||
self.buff = {}
|
||||
|
||||
def read(self, queue):
|
||||
try:
|
||||
return queue.get(block=True, timeout=BUFFER_SECONDS)
|
||||
except QueueEmpty:
|
||||
return {'event': 'FLUSH'}
|
||||
|
||||
def toggle_profiling(self, *args):
|
||||
if self.prof:
|
||||
self.prof.disable()
|
||||
filename = f'callback-{os.getpid()}.pstats'
|
||||
filepath = os.path.join(tempfile.gettempdir(), filename)
|
||||
with open(filepath, 'w') as f:
|
||||
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
|
||||
pstats.Stats(self.prof).dump_stats(filepath + '.raw')
|
||||
self.prof = False
|
||||
logger.error(f'profiling is disabled, wrote {filepath}')
|
||||
else:
|
||||
self.prof = cProfile.Profile()
|
||||
self.prof.enable()
|
||||
logger.error('profiling is enabled')
|
||||
|
||||
def work_loop(self, *args, **kw):
|
||||
if settings.AWX_CALLBACK_PROFILE:
|
||||
signal.signal(signal.SIGUSR1, self.toggle_profiling)
|
||||
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
|
||||
|
||||
def flush(self, force=False):
|
||||
now = tz_now()
|
||||
if (
|
||||
force or
|
||||
any([len(events) >= 1000 for events in self.buff.values()])
|
||||
):
|
||||
for cls, events in self.buff.items():
|
||||
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
|
||||
for e in events:
|
||||
if not e.created:
|
||||
e.created = now
|
||||
e.modified = now
|
||||
try:
|
||||
cls.objects.bulk_create(events)
|
||||
except Exception as exc:
|
||||
# if an exception occurs, we should re-attempt to save the
|
||||
# events one-by-one, because something in the list is
|
||||
# broken/stale (e.g., an IntegrityError on a specific event)
|
||||
for e in events:
|
||||
try:
|
||||
if (
|
||||
isinstance(exc, IntegrityError),
|
||||
getattr(e, 'host_id', '')
|
||||
):
|
||||
# this is one potential IntegrityError we can
|
||||
# work around - if the host disappears before
|
||||
# the event can be processed
|
||||
e.host_id = None
|
||||
e.save()
|
||||
except Exception:
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
for e in events:
|
||||
emit_event_detail(e)
|
||||
self.buff = {}
|
||||
|
||||
def perform_work(self, body):
|
||||
try:
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
flush = body.get('event') == 'FLUSH'
|
||||
if not flush:
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
|
||||
if not any([key in body for key in event_map]):
|
||||
raise Exception('Payload does not have a job identifier')
|
||||
|
||||
def _save_event_data():
|
||||
job_identifier = 'unknown job'
|
||||
for key, cls in event_map.items():
|
||||
if key in body:
|
||||
cls.create_from_data(**body)
|
||||
job_identifier = body[key]
|
||||
break
|
||||
|
||||
job_identifier = 'unknown job'
|
||||
job_key = 'unknown'
|
||||
for key in event_map.keys():
|
||||
if key in body:
|
||||
job_identifier = body[key]
|
||||
job_key = key
|
||||
break
|
||||
|
||||
if settings.DEBUG:
|
||||
from pygments import highlight
|
||||
from pygments.lexers import PythonLexer
|
||||
from pygments.formatters import Terminal256Formatter
|
||||
from pprint import pformat
|
||||
if body.get('event') == 'EOF':
|
||||
event_thing = 'EOF event'
|
||||
else:
|
||||
event_thing = 'event {}'.format(body.get('counter', 'unknown'))
|
||||
logger.info('Callback worker received {} for {} {}'.format(
|
||||
event_thing, job_key[:-len('_id')], job_identifier
|
||||
))
|
||||
logger.debug('Body: {}'.format(
|
||||
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
|
||||
)[:1024 * 4])
|
||||
try:
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification(
|
||||
'jobs-summary',
|
||||
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
|
||||
)
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
if hasattr(uj, 'send_notification_templates'):
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
break
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
return
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification(
|
||||
'jobs-summary',
|
||||
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
|
||||
)
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
if hasattr(uj, 'send_notification_templates'):
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
break
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
return
|
||||
event = cls.create_from_data(**body)
|
||||
self.buff.setdefault(cls, []).append(event)
|
||||
|
||||
retries = 0
|
||||
while retries <= self.MAX_RETRIES:
|
||||
try:
|
||||
_save_event_data()
|
||||
self.flush(force=flush)
|
||||
break
|
||||
except (OperationalError, InterfaceError, InternalError):
|
||||
if retries >= self.MAX_RETRIES:
|
||||
logger.exception('Worker could not re-establish database connectivity, giving up on event for Job {}'.format(job_identifier))
|
||||
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
|
||||
return
|
||||
delay = 60 * retries
|
||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
|
||||
@@ -119,7 +175,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
time.sleep(delay)
|
||||
retries += 1
|
||||
except DatabaseError:
|
||||
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
break
|
||||
except Exception as exc:
|
||||
tb = traceback.format_exc()
|
||||
|
||||
@@ -370,33 +370,32 @@ class IsolatedManager(object):
|
||||
private_data_dir
|
||||
)
|
||||
|
||||
if runner_obj.status == 'successful':
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
finally:
|
||||
if os.path.exists(private_data_dir):
|
||||
shutil.rmtree(private_data_dir)
|
||||
|
||||
40
awx/main/management/commands/callback_stats.py
Normal file
40
awx/main/management/commands/callback_stats.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import time
|
||||
import sys
|
||||
|
||||
from django.db import connection
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def handle(self, *args, **options):
|
||||
with connection.cursor() as cursor:
|
||||
start = {}
|
||||
for relation in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
cursor.execute(f"SELECT MAX(id) FROM {relation};")
|
||||
start[relation] = cursor.fetchone()[0] or 0
|
||||
clear = False
|
||||
while True:
|
||||
lines = []
|
||||
for relation in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
lines.append(relation)
|
||||
minimum = start[relation]
|
||||
cursor.execute(
|
||||
f"SELECT MAX(id) - MIN(id) FROM {relation} WHERE id > {minimum} AND modified > now() - '1 minute'::interval;"
|
||||
)
|
||||
events = cursor.fetchone()[0] or 0
|
||||
lines.append(f'↳ last minute {events}')
|
||||
lines.append('')
|
||||
if clear:
|
||||
for i in range(12):
|
||||
sys.stdout.write('\x1b[1A\x1b[2K')
|
||||
for l in lines:
|
||||
print(l)
|
||||
clear = True
|
||||
time.sleep(.25)
|
||||
@@ -16,13 +16,10 @@ from awx.main.models import (
|
||||
Job, AdHocCommand, ProjectUpdate, InventoryUpdate,
|
||||
SystemJob, WorkflowJob, Notification
|
||||
)
|
||||
from awx.main.signals import ( # noqa
|
||||
emit_update_inventory_on_created_or_deleted,
|
||||
emit_update_inventory_computed_fields,
|
||||
from awx.main.signals import (
|
||||
disable_activity_stream,
|
||||
disable_computed_fields
|
||||
)
|
||||
from django.db.models.signals import post_save, post_delete, m2m_changed # noqa
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
@@ -921,11 +921,14 @@ class Command(BaseCommand):
|
||||
available_instances = license_info.get('available_instances', 0)
|
||||
free_instances = license_info.get('free_instances', 0)
|
||||
time_remaining = license_info.get('time_remaining', 0)
|
||||
hard_error = license_info.get('trial', False) is True or license_info['instance_count'] == 10
|
||||
new_count = Host.objects.active_count()
|
||||
if time_remaining <= 0 and not license_info.get('demo', False):
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
if license_info.get('trial', False) is True:
|
||||
if time_remaining <= 0:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
raise CommandError("License has expired!")
|
||||
else:
|
||||
logger.warning(LICENSE_EXPIRED_MESSAGE)
|
||||
# special check for tower-type inventory sources
|
||||
# but only if running the plugin
|
||||
TOWER_SOURCE_FILES = ['tower.yml', 'tower.yaml']
|
||||
@@ -938,15 +941,11 @@ class Command(BaseCommand):
|
||||
'new_count': new_count,
|
||||
'available_instances': available_instances,
|
||||
}
|
||||
if license_info.get('demo', False):
|
||||
logger.error(DEMO_LICENSE_MESSAGE % d)
|
||||
else:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_MESSAGE % d)
|
||||
if (
|
||||
license_info.get('trial', False) is True or
|
||||
license_info['instance_count'] == 10 # basic 10 license
|
||||
):
|
||||
raise CommandError('License count exceeded!')
|
||||
else:
|
||||
logger.warning(LICENSE_MESSAGE % d)
|
||||
|
||||
def check_org_host_limit(self):
|
||||
license_info = get_licenser().validate()
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from awx.main.models import Instance
|
||||
from django.conf import settings
|
||||
|
||||
@@ -22,6 +24,8 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str,
|
||||
help='Hostname used during provisioning')
|
||||
parser.add_argument('--is-isolated', dest='is_isolated', action='store_true',
|
||||
help='Specify whether the instance is isolated')
|
||||
|
||||
def _register_hostname(self, hostname):
|
||||
if not hostname:
|
||||
@@ -37,7 +41,10 @@ class Command(BaseCommand):
|
||||
def handle(self, **options):
|
||||
if not options.get('hostname'):
|
||||
raise CommandError("Specify `--hostname` to use this command.")
|
||||
self.uuid = settings.SYSTEM_UUID
|
||||
if options['is_isolated']:
|
||||
self.uuid = str(uuid4())
|
||||
else:
|
||||
self.uuid = settings.SYSTEM_UUID
|
||||
self.changed = False
|
||||
self._register_hostname(options.get('hostname'))
|
||||
if self.changed:
|
||||
|
||||
@@ -9,6 +9,7 @@ import random
|
||||
from django.utils import timezone
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.models.events import emit_event_detail
|
||||
from awx.main.models import (
|
||||
UnifiedJob,
|
||||
Job,
|
||||
@@ -17,14 +18,6 @@ from awx.main.models import (
|
||||
InventoryUpdate,
|
||||
SystemJob
|
||||
)
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.api.serializers import (
|
||||
JobEventWebSocketSerializer,
|
||||
AdHocCommandEventWebSocketSerializer,
|
||||
ProjectUpdateEventWebSocketSerializer,
|
||||
InventoryUpdateEventWebSocketSerializer,
|
||||
SystemJobEventWebSocketSerializer
|
||||
)
|
||||
|
||||
|
||||
class JobStatusLifeCycle():
|
||||
@@ -96,21 +89,6 @@ class ReplayJobEvents(JobStatusLifeCycle):
|
||||
raise RuntimeError("No events for job id {}".format(job.id))
|
||||
return job_events, count
|
||||
|
||||
def get_serializer(self, job):
|
||||
if type(job) is Job:
|
||||
return JobEventWebSocketSerializer
|
||||
elif type(job) is AdHocCommand:
|
||||
return AdHocCommandEventWebSocketSerializer
|
||||
elif type(job) is ProjectUpdate:
|
||||
return ProjectUpdateEventWebSocketSerializer
|
||||
elif type(job) is InventoryUpdate:
|
||||
return InventoryUpdateEventWebSocketSerializer
|
||||
elif type(job) is SystemJob:
|
||||
return SystemJobEventWebSocketSerializer
|
||||
else:
|
||||
raise RuntimeError("Job is of type {} and replay is not yet supported.".format(type(job)))
|
||||
sys.exit(1)
|
||||
|
||||
def run(self, job_id, speed=1.0, verbosity=0, skip_range=[], random_seed=0, final_status_delay=0, debug=False):
|
||||
stats = {
|
||||
'events_ontime': {
|
||||
@@ -136,7 +114,6 @@ class ReplayJobEvents(JobStatusLifeCycle):
|
||||
try:
|
||||
job = self.get_job(job_id)
|
||||
job_events, job_event_count = self.get_job_events(job)
|
||||
serializer = self.get_serializer(job)
|
||||
except RuntimeError as e:
|
||||
print("{}".format(e.message))
|
||||
sys.exit(1)
|
||||
@@ -162,8 +139,7 @@ class ReplayJobEvents(JobStatusLifeCycle):
|
||||
stats['replay_start'] = self.replay_start
|
||||
je_previous = je_current
|
||||
|
||||
je_serialized = serializer(je_current).data
|
||||
emit_channel_notification('{}-{}'.format(je_serialized['group_name'], job.id), je_serialized)
|
||||
emit_event_detail(je_current)
|
||||
|
||||
replay_offset = self.replay_offset(je_previous.created, speed)
|
||||
recording_diff = (je_current.created - je_previous.created).total_seconds() * (1.0 / speed)
|
||||
|
||||
@@ -16,6 +16,7 @@ from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
|
||||
from awx.main.dispatch import periodic
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
@@ -36,71 +37,6 @@ class Command(BaseCommand):
|
||||
help=('cause the dispatcher to recycle all of its worker processes;'
|
||||
'running jobs will run to completion first'))
|
||||
|
||||
def beat(self):
|
||||
from celery import Celery
|
||||
from celery.beat import PersistentScheduler
|
||||
from celery.apps import beat
|
||||
|
||||
class AWXScheduler(PersistentScheduler):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.ppid = os.getppid()
|
||||
super(AWXScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def setup_schedule(self):
|
||||
super(AWXScheduler, self).setup_schedule()
|
||||
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
|
||||
|
||||
def tick(self, *args, **kwargs):
|
||||
if os.getppid() != self.ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
raise SystemExit()
|
||||
return super(AWXScheduler, self).tick(*args, **kwargs)
|
||||
|
||||
def apply_async(self, entry, producer=None, advance=True, **kwargs):
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
task = TaskWorker.resolve_callable(entry.task)
|
||||
result, queue = task.apply_async()
|
||||
|
||||
class TaskResult(object):
|
||||
id = result['uuid']
|
||||
|
||||
return TaskResult()
|
||||
|
||||
sched_file = '/var/lib/awx/beat.db'
|
||||
app = Celery()
|
||||
app.conf.BROKER_URL = settings.BROKER_URL
|
||||
app.conf.CELERY_TASK_RESULT_EXPIRES = False
|
||||
|
||||
# celery in py3 seems to have a bug where the celerybeat schedule
|
||||
# shelve can become corrupted; we've _only_ seen this in Ubuntu and py36
|
||||
# it can be avoided by detecting and removing the corrupted file
|
||||
# at some point, we'll just stop using celerybeat, because it's clearly
|
||||
# buggy, too -_-
|
||||
#
|
||||
# https://github.com/celery/celery/issues/4777
|
||||
sched = AWXScheduler(schedule_filename=sched_file, app=app)
|
||||
try:
|
||||
sched.setup_schedule()
|
||||
except Exception:
|
||||
logger.exception('{} is corrupted, removing.'.format(sched_file))
|
||||
sched._remove_db()
|
||||
finally:
|
||||
try:
|
||||
sched.close()
|
||||
except Exception:
|
||||
logger.exception('{} failed to sync/close'.format(sched_file))
|
||||
|
||||
beat.Beat(
|
||||
30,
|
||||
app,
|
||||
schedule=sched_file, scheduler_cls=AWXScheduler
|
||||
).run()
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
print(Control('dispatcher').status())
|
||||
@@ -116,9 +52,10 @@ class Command(BaseCommand):
|
||||
# for the DB and memcached connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
beat = Process(target=self.beat)
|
||||
beat.daemon = True
|
||||
beat.start()
|
||||
|
||||
# spawn a daemon thread to periodically enqueues scheduled tasks
|
||||
# (like the node heartbeat)
|
||||
cease_continuous_run = periodic.run_continuously()
|
||||
|
||||
reaper.reap()
|
||||
consumer = None
|
||||
@@ -152,6 +89,7 @@ class Command(BaseCommand):
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
cease_continuous_run.set()
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
|
||||
@@ -13,7 +13,8 @@ import urllib.parse
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User
|
||||
from django.db.models.signals import post_save
|
||||
from django.db import IntegrityError
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
from django.db import IntegrityError, connection
|
||||
from django.utils.functional import curry
|
||||
from django.shortcuts import get_object_or_404, redirect
|
||||
from django.apps import apps
|
||||
@@ -23,7 +24,6 @@ from django.urls import reverse, resolve
|
||||
|
||||
from awx.main.models import ActivityStream
|
||||
from awx.main.utils.named_url_graph import generate_graph, GraphNode
|
||||
from awx.main.utils.db import migration_in_progress_check_or_relase
|
||||
from awx.conf import fields, register
|
||||
|
||||
|
||||
@@ -62,6 +62,17 @@ class TimingMiddleware(threading.local, MiddlewareMixin):
|
||||
with open(filepath, 'w') as f:
|
||||
f.write('%s %s\n' % (request.method, request.get_full_path()))
|
||||
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
|
||||
|
||||
if settings.AWX_REQUEST_PROFILE_WITH_DOT:
|
||||
from gprof2dot import main as generate_dot
|
||||
raw = os.path.join(self.dest, filename) + '.raw'
|
||||
pstats.Stats(self.prof).dump_stats(raw)
|
||||
generate_dot([
|
||||
'-n', '2.5', '-f', 'pstats', '-o',
|
||||
os.path.join( self.dest, filename).replace('.pstats', '.dot'),
|
||||
raw
|
||||
])
|
||||
os.remove(raw)
|
||||
return filepath
|
||||
|
||||
|
||||
@@ -213,7 +224,8 @@ class URLModificationMiddleware(MiddlewareMixin):
|
||||
class MigrationRanCheckMiddleware(MiddlewareMixin):
|
||||
|
||||
def process_request(self, request):
|
||||
if migration_in_progress_check_or_relase():
|
||||
if getattr(resolve(request.path), 'url_name', '') == 'migrations_notran':
|
||||
return
|
||||
executor = MigrationExecutor(connection)
|
||||
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
|
||||
if bool(plan) and \
|
||||
getattr(resolve(request.path), 'url_name', '') != 'migrations_notran':
|
||||
return redirect(reverse("ui:migrations_notran"))
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from uuid import uuid4
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def _generate_new_uuid_for_iso_nodes(apps, schema_editor):
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
for instance in Instance.objects.all():
|
||||
# The below code is a copy paste of instance.is_isolated()
|
||||
# We can't call is_isolated because we are using the "old" version
|
||||
# of the Instance definition.
|
||||
if instance.rampart_groups.filter(controller__isnull=False).exists():
|
||||
instance.uuid = str(uuid4())
|
||||
instance.save()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0100_v370_projectupdate_job_tags'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(_generate_new_uuid_for_iso_nodes)
|
||||
]
|
||||
18
awx/main/migrations/0102_v370_unifiedjob_canceled.py
Normal file
18
awx/main/migrations/0102_v370_unifiedjob_canceled.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.4 on 2019-11-25 20:53
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0101_v370_generate_new_uuids_for_iso_nodes'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='canceled_on',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False, help_text='The date and time when the cancel request was sent.', null=True),
|
||||
),
|
||||
]
|
||||
52
awx/main/migrations/0103_v370_remove_computed_fields.py
Normal file
52
awx/main/migrations/0103_v370_remove_computed_fields.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-02-21 17:35
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0102_v370_unifiedjob_canceled'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='groups_with_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='has_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='has_inventory_sources',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='hosts_with_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='total_groups',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='total_hosts',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='has_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='has_inventory_sources',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobhostsummary',
|
||||
name='failed',
|
||||
field=models.BooleanField(db_index=True, default=False, editable=False),
|
||||
),
|
||||
]
|
||||
24
awx/main/migrations/0104_v370_cleanup_old_scan_jts.py
Normal file
24
awx/main/migrations/0104_v370_cleanup_old_scan_jts.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-15 20:01
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def cleanup_scan_jts(apps, schema_editor):
|
||||
JobTemplate = apps.get_model('main', 'JobTemplate')
|
||||
JobTemplate.objects.filter(job_type='scan').update(job_type='run')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0103_v370_remove_computed_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(cleanup_scan_jts),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='job_type',
|
||||
field=models.CharField(choices=[('run', 'Run'), ('check', 'Check')], default='run', max_length=64),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,21 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-15 18:01
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0104_v370_cleanup_old_scan_jts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='jobevent',
|
||||
name='parent',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='jobevent',
|
||||
name='hosts',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-27 12:39
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0105_v370_remove_jobevent_parent_and_hosts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='inventory',
|
||||
name='groups_with_active_failures',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 2.2.4 on 2020-01-08 22:11
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0106_v370_remove_inventory_groups_with_active_failures'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='workflowjobnode',
|
||||
name='all_parents_must_converge',
|
||||
field=models.BooleanField(default=False, help_text='If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjobtemplatenode',
|
||||
name='all_parents_must_converge',
|
||||
field=models.BooleanField(default=False, help_text='If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.8 on 2020-02-06 16:43
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0107_v370_workflow_convergence_api_toggle'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='dependencies_processed',
|
||||
field=models.BooleanField(default=False, editable=False, help_text='If True, the task manager has already processed potential dependencies for this job.'),
|
||||
),
|
||||
]
|
||||
@@ -1136,7 +1136,7 @@ ManagedCredentialType(
|
||||
'help_text': ugettext_noop('The OpenShift or Kubernetes API Endpoint to authenticate with.')
|
||||
},{
|
||||
'id': 'bearer_token',
|
||||
'label': ugettext_noop('API authentication bearer token.'),
|
||||
'label': ugettext_noop('API authentication bearer token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
},{
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import models, DatabaseError
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
@@ -11,9 +12,10 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.encoding import force_text
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main import consumers
|
||||
from awx.main.fields import JSONField
|
||||
from awx.main.models.base import CreatedModifiedModel
|
||||
from awx.main.utils import ignore_inventory_computed_fields
|
||||
from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore
|
||||
|
||||
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||
|
||||
@@ -55,6 +57,51 @@ def create_host_status_counts(event_data):
|
||||
return dict(host_status_counts)
|
||||
|
||||
|
||||
def emit_event_detail(event):
|
||||
cls = event.__class__
|
||||
relation = {
|
||||
JobEvent: 'job_id',
|
||||
AdHocCommandEvent: 'ad_hoc_command_id',
|
||||
ProjectUpdateEvent: 'project_update_id',
|
||||
InventoryUpdateEvent: 'inventory_update_id',
|
||||
SystemJobEvent: 'system_job_id',
|
||||
}[cls]
|
||||
url = ''
|
||||
if isinstance(event, JobEvent):
|
||||
url = '/api/v2/job_events/{}'.format(event.id)
|
||||
if isinstance(event, AdHocCommandEvent):
|
||||
url = '/api/v2/ad_hoc_command_events/{}'.format(event.id)
|
||||
group = camelcase_to_underscore(cls.__name__) + 's'
|
||||
timestamp = event.created.isoformat()
|
||||
consumers.emit_channel_notification(
|
||||
'-'.join([group, str(getattr(event, relation))]),
|
||||
{
|
||||
'id': event.id,
|
||||
relation.replace('_id', ''): getattr(event, relation),
|
||||
'created': timestamp,
|
||||
'modified': timestamp,
|
||||
'group_name': group,
|
||||
'url': url,
|
||||
'stdout': event.stdout,
|
||||
'counter': event.counter,
|
||||
'uuid': event.uuid,
|
||||
'parent_uuid': getattr(event, 'parent_uuid', ''),
|
||||
'start_line': event.start_line,
|
||||
'end_line': event.end_line,
|
||||
'event': event.event,
|
||||
'event_data': getattr(event, 'event_data', {}),
|
||||
'failed': event.failed,
|
||||
'changed': event.changed,
|
||||
'event_level': getattr(event, 'event_level', ''),
|
||||
'play': getattr(event, 'play', ''),
|
||||
'role': getattr(event, 'role', ''),
|
||||
'task': getattr(event, 'task', ''),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
class BasePlaybookEvent(CreatedModifiedModel):
|
||||
'''
|
||||
An event/message logged from a playbook callback for each host.
|
||||
@@ -63,7 +110,7 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
VALID_KEYS = [
|
||||
'event', 'event_data', 'playbook', 'play', 'role', 'task', 'created',
|
||||
'counter', 'uuid', 'stdout', 'parent_uuid', 'start_line', 'end_line',
|
||||
'verbosity'
|
||||
'host_id', 'host_name', 'verbosity',
|
||||
]
|
||||
|
||||
class Meta:
|
||||
@@ -271,37 +318,66 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
|
||||
def _update_from_event_data(self):
|
||||
# Update event model fields from event data.
|
||||
updated_fields = set()
|
||||
event_data = self.event_data
|
||||
res = event_data.get('res', None)
|
||||
if self.event in self.FAILED_EVENTS and not event_data.get('ignore_errors', False):
|
||||
self.failed = True
|
||||
updated_fields.add('failed')
|
||||
if isinstance(res, dict):
|
||||
if res.get('changed', False):
|
||||
self.changed = True
|
||||
updated_fields.add('changed')
|
||||
if self.event == 'playbook_on_stats':
|
||||
try:
|
||||
failures_dict = event_data.get('failures', {})
|
||||
dark_dict = event_data.get('dark', {})
|
||||
self.failed = bool(sum(failures_dict.values()) +
|
||||
sum(dark_dict.values()))
|
||||
updated_fields.add('failed')
|
||||
changed_dict = event_data.get('changed', {})
|
||||
self.changed = bool(sum(changed_dict.values()))
|
||||
updated_fields.add('changed')
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
|
||||
if isinstance(self, JobEvent):
|
||||
hostnames = self._hostnames()
|
||||
self._update_host_summary_from_stats(hostnames)
|
||||
if self.job.inventory:
|
||||
try:
|
||||
self.job.inventory.update_computed_fields()
|
||||
except DatabaseError:
|
||||
logger.exception('Computed fields database error saving event {}'.format(self.pk))
|
||||
|
||||
# find parent links and progagate changed=T and failed=T
|
||||
changed = self.job.job_events.filter(changed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct() # noqa
|
||||
failed = self.job.job_events.filter(failed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct() # noqa
|
||||
|
||||
JobEvent.objects.filter(
|
||||
job_id=self.job_id, uuid__in=changed
|
||||
).update(changed=True)
|
||||
JobEvent.objects.filter(
|
||||
job_id=self.job_id, uuid__in=failed
|
||||
).update(failed=True)
|
||||
|
||||
for field in ('playbook', 'play', 'task', 'role'):
|
||||
value = force_text(event_data.get(field, '')).strip()
|
||||
if value != getattr(self, field):
|
||||
setattr(self, field, value)
|
||||
updated_fields.add(field)
|
||||
return updated_fields
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from_data(cls, **kwargs):
|
||||
#
|
||||
# ⚠️ D-D-D-DANGER ZONE ⚠️
|
||||
# This function is called by the callback receiver *once* for *every
|
||||
# event* emitted by Ansible as a playbook runs. That means that
|
||||
# changes to this function are _very_ susceptible to introducing
|
||||
# performance regressions (which the user will experience as "my
|
||||
# playbook stdout takes too long to show up"), *especially* code which
|
||||
# might invoke additional database queries per event.
|
||||
#
|
||||
# Proceed with caution!
|
||||
#
|
||||
pk = None
|
||||
for key in ('job_id', 'project_update_id'):
|
||||
if key in kwargs:
|
||||
@@ -325,74 +401,16 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
|
||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||
workflow_job_id = kwargs.pop('workflow_job_id', None)
|
||||
job_event = cls.objects.create(**kwargs)
|
||||
event = cls(**kwargs)
|
||||
if workflow_job_id:
|
||||
setattr(job_event, 'workflow_job_id', workflow_job_id)
|
||||
analytics_logger.info('Event data saved.', extra=dict(python_objects=dict(job_event=job_event)))
|
||||
return job_event
|
||||
setattr(event, 'workflow_job_id', workflow_job_id)
|
||||
event._update_from_event_data()
|
||||
return event
|
||||
|
||||
@property
|
||||
def job_verbosity(self):
|
||||
return 0
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
# Update model fields and related objects unless we're only updating
|
||||
# failed/changed flags triggered from a child event.
|
||||
from_parent_update = kwargs.pop('from_parent_update', False)
|
||||
if not from_parent_update:
|
||||
# Update model fields from event data.
|
||||
updated_fields = self._update_from_event_data()
|
||||
for field in updated_fields:
|
||||
if field not in update_fields:
|
||||
update_fields.append(field)
|
||||
|
||||
# Update host related field from host_name.
|
||||
if hasattr(self, 'job') and not self.host_id and self.host_name:
|
||||
if self.job.inventory.kind == 'smart':
|
||||
# optimization to avoid calling inventory.hosts, which
|
||||
# can take a long time to run under some circumstances
|
||||
from awx.main.models.inventory import SmartInventoryMembership
|
||||
membership = SmartInventoryMembership.objects.filter(
|
||||
inventory=self.job.inventory, host__name=self.host_name
|
||||
).first()
|
||||
if membership:
|
||||
host_id = membership.host_id
|
||||
else:
|
||||
host_id = None
|
||||
else:
|
||||
host_qs = self.job.inventory.hosts.filter(name=self.host_name)
|
||||
host_id = host_qs.only('id').values_list('id', flat=True).first()
|
||||
if host_id != self.host_id:
|
||||
self.host_id = host_id
|
||||
if 'host_id' not in update_fields:
|
||||
update_fields.append('host_id')
|
||||
super(BasePlaybookEvent, self).save(*args, **kwargs)
|
||||
|
||||
# Update related objects after this event is saved.
|
||||
if hasattr(self, 'job') and not from_parent_update:
|
||||
if getattr(settings, 'CAPTURE_JOB_EVENT_HOSTS', False):
|
||||
self._update_hosts()
|
||||
if self.parent_uuid:
|
||||
kwargs = {}
|
||||
if self.changed is True:
|
||||
kwargs['changed'] = True
|
||||
if self.failed is True:
|
||||
kwargs['failed'] = True
|
||||
if kwargs:
|
||||
JobEvent.objects.filter(job_id=self.job_id, uuid=self.parent_uuid).update(**kwargs)
|
||||
|
||||
if self.event == 'playbook_on_stats':
|
||||
hostnames = self._hostnames()
|
||||
self._update_host_summary_from_stats(hostnames)
|
||||
try:
|
||||
self.job.inventory.update_computed_fields()
|
||||
except DatabaseError:
|
||||
logger.exception('Computed fields database error saving event {}'.format(self.pk))
|
||||
|
||||
|
||||
|
||||
class JobEvent(BasePlaybookEvent):
|
||||
'''
|
||||
@@ -431,19 +449,6 @@ class JobEvent(BasePlaybookEvent):
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
hosts = models.ManyToManyField(
|
||||
'Host',
|
||||
related_name='job_events',
|
||||
editable=False,
|
||||
)
|
||||
parent = models.ForeignKey(
|
||||
'self',
|
||||
related_name='children',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
)
|
||||
parent_uuid = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
@@ -456,38 +461,6 @@ class JobEvent(BasePlaybookEvent):
|
||||
def __str__(self):
|
||||
return u'%s @ %s' % (self.get_event_display2(), self.created.isoformat())
|
||||
|
||||
def _update_from_event_data(self):
|
||||
# Update job event hostname
|
||||
updated_fields = super(JobEvent, self)._update_from_event_data()
|
||||
value = force_text(self.event_data.get('host', '')).strip()
|
||||
if value != getattr(self, 'host_name'):
|
||||
setattr(self, 'host_name', value)
|
||||
updated_fields.add('host_name')
|
||||
return updated_fields
|
||||
|
||||
def _update_hosts(self, extra_host_pks=None):
|
||||
# Update job event hosts m2m from host_name, propagate to parent events.
|
||||
extra_host_pks = set(extra_host_pks or [])
|
||||
hostnames = set()
|
||||
if self.host_name:
|
||||
hostnames.add(self.host_name)
|
||||
if self.event == 'playbook_on_stats':
|
||||
try:
|
||||
for v in self.event_data.values():
|
||||
hostnames.update(v.keys())
|
||||
except AttributeError: # In case event_data or v isn't a dict.
|
||||
pass
|
||||
qs = self.job.inventory.hosts.all()
|
||||
qs = qs.filter(models.Q(name__in=hostnames) | models.Q(pk__in=extra_host_pks))
|
||||
qs = qs.exclude(job_events__pk=self.id).only('id')
|
||||
for host in qs:
|
||||
self.hosts.add(host)
|
||||
if self.parent_uuid:
|
||||
parent = JobEvent.objects.filter(uuid=self.parent_uuid)
|
||||
if parent.exists():
|
||||
parent = parent[0]
|
||||
parent._update_hosts(qs.values_list('id', flat=True))
|
||||
|
||||
def _hostnames(self):
|
||||
hostnames = set()
|
||||
try:
|
||||
@@ -605,6 +578,17 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
|
||||
@classmethod
|
||||
def create_from_data(cls, **kwargs):
|
||||
#
|
||||
# ⚠️ D-D-D-DANGER ZONE ⚠️
|
||||
# This function is called by the callback receiver *once* for *every
|
||||
# event* emitted by Ansible as a playbook runs. That means that
|
||||
# changes to this function are _very_ susceptible to introducing
|
||||
# performance regressions (which the user will experience as "my
|
||||
# playbook stdout takes too long to show up"), *especially* code which
|
||||
# might invoke additional database queries per event.
|
||||
#
|
||||
# Proceed with caution!
|
||||
#
|
||||
# Convert the datetime for the event's creation
|
||||
# appropriately, and include a time zone for it.
|
||||
#
|
||||
@@ -620,12 +604,8 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
|
||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||
kwargs.pop('workflow_job_id', None)
|
||||
event = cls.objects.create(**kwargs)
|
||||
if isinstance(event, AdHocCommandEvent):
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=event))
|
||||
)
|
||||
event = cls(**kwargs)
|
||||
event._update_from_event_data()
|
||||
return event
|
||||
|
||||
def get_event_display(self):
|
||||
@@ -640,10 +620,15 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
def get_host_status_counts(self):
|
||||
return create_host_status_counts(getattr(self, 'event_data', {}))
|
||||
|
||||
def _update_from_event_data(self):
|
||||
pass
|
||||
|
||||
|
||||
class AdHocCommandEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event', 'workflow_job_id']
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + [
|
||||
'ad_hoc_command_id', 'event', 'host_name', 'host_id', 'workflow_job_id'
|
||||
]
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
@@ -719,34 +704,18 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:ad_hoc_command_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
def _update_from_event_data(self):
|
||||
res = self.event_data.get('res', None)
|
||||
if self.event in self.FAILED_EVENTS:
|
||||
if not self.event_data.get('ignore_errors', False):
|
||||
self.failed = True
|
||||
if 'failed' not in update_fields:
|
||||
update_fields.append('failed')
|
||||
if isinstance(res, dict) and res.get('changed', False):
|
||||
self.changed = True
|
||||
if 'changed' not in update_fields:
|
||||
update_fields.append('changed')
|
||||
self.host_name = self.event_data.get('host', '').strip()
|
||||
if 'host_name' not in update_fields:
|
||||
update_fields.append('host_name')
|
||||
if not self.host_id and self.host_name:
|
||||
host_qs = self.ad_hoc_command.inventory.hosts.filter(name=self.host_name)
|
||||
try:
|
||||
host_id = host_qs.only('id').values_list('id', flat=True)
|
||||
if host_id.exists():
|
||||
self.host_id = host_id[0]
|
||||
if 'host_id' not in update_fields:
|
||||
update_fields.append('host_id')
|
||||
except (IndexError, AttributeError):
|
||||
pass
|
||||
super(AdHocCommandEvent, self).save(*args, **kwargs)
|
||||
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
|
||||
|
||||
class InventoryUpdateEvent(BaseCommandEvent):
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
# Python
|
||||
import datetime
|
||||
import time
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
import copy
|
||||
@@ -123,12 +122,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of groups in this inventory.'),
|
||||
)
|
||||
groups_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of groups in this inventory with active failures.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
@@ -339,139 +332,17 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
|
||||
return data
|
||||
|
||||
def update_host_computed_fields(self):
|
||||
'''
|
||||
Update computed fields for all hosts in this inventory.
|
||||
'''
|
||||
hosts_to_update = {}
|
||||
hosts_qs = self.hosts
|
||||
# Define queryset of all hosts with active failures.
|
||||
hosts_with_active_failures = hosts_qs.filter(last_job_host_summary__isnull=False, last_job_host_summary__failed=True).values_list('pk', flat=True)
|
||||
# Find all hosts that need the has_active_failures flag set.
|
||||
hosts_to_set = hosts_qs.filter(has_active_failures=False, pk__in=hosts_with_active_failures)
|
||||
for host_pk in hosts_to_set.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_active_failures'] = True
|
||||
# Find all hosts that need the has_active_failures flag cleared.
|
||||
hosts_to_clear = hosts_qs.filter(has_active_failures=True).exclude(pk__in=hosts_with_active_failures)
|
||||
for host_pk in hosts_to_clear.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_active_failures'] = False
|
||||
# Define queryset of all hosts with cloud inventory sources.
|
||||
hosts_with_cloud_inventory = hosts_qs.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True)
|
||||
# Find all hosts that need the has_inventory_sources flag set.
|
||||
hosts_to_set = hosts_qs.filter(has_inventory_sources=False, pk__in=hosts_with_cloud_inventory)
|
||||
for host_pk in hosts_to_set.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_inventory_sources'] = True
|
||||
# Find all hosts that need the has_inventory_sources flag cleared.
|
||||
hosts_to_clear = hosts_qs.filter(has_inventory_sources=True).exclude(pk__in=hosts_with_cloud_inventory)
|
||||
for host_pk in hosts_to_clear.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_inventory_sources'] = False
|
||||
# Now apply updates to hosts where needed (in batches).
|
||||
all_update_pks = list(hosts_to_update.keys())
|
||||
|
||||
def _chunk(items, chunk_size):
|
||||
for i, group in itertools.groupby(enumerate(items), lambda x: x[0] // chunk_size):
|
||||
yield (g[1] for g in group)
|
||||
|
||||
for update_pks in _chunk(all_update_pks, 500):
|
||||
for host in hosts_qs.filter(pk__in=update_pks):
|
||||
host_updates = hosts_to_update[host.pk]
|
||||
for field, value in host_updates.items():
|
||||
setattr(host, field, value)
|
||||
host.save(update_fields=host_updates.keys())
|
||||
|
||||
def update_group_computed_fields(self):
|
||||
'''
|
||||
Update computed fields for all active groups in this inventory.
|
||||
'''
|
||||
group_children_map = self.get_group_children_map()
|
||||
group_hosts_map = self.get_group_hosts_map()
|
||||
active_host_pks = set(self.hosts.values_list('pk', flat=True))
|
||||
failed_host_pks = set(self.hosts.filter(last_job_host_summary__failed=True).values_list('pk', flat=True))
|
||||
# active_group_pks = set(self.groups.values_list('pk', flat=True))
|
||||
failed_group_pks = set() # Update below as we check each group.
|
||||
groups_with_cloud_pks = set(self.groups.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True))
|
||||
groups_to_update = {}
|
||||
|
||||
# Build list of group pks to check, starting with the groups at the
|
||||
# deepest level within the tree.
|
||||
root_group_pks = set(self.root_groups.values_list('pk', flat=True))
|
||||
group_depths = {} # pk: max_depth
|
||||
|
||||
def update_group_depths(group_pk, current_depth=0):
|
||||
max_depth = group_depths.get(group_pk, -1)
|
||||
# Arbitrarily limit depth to avoid hitting Python recursion limit (which defaults to 1000).
|
||||
if current_depth > 100:
|
||||
return
|
||||
if current_depth > max_depth:
|
||||
group_depths[group_pk] = current_depth
|
||||
for child_pk in group_children_map.get(group_pk, set()):
|
||||
update_group_depths(child_pk, current_depth + 1)
|
||||
for group_pk in root_group_pks:
|
||||
update_group_depths(group_pk)
|
||||
group_pks_to_check = [x[1] for x in sorted([(v,k) for k,v in group_depths.items()], reverse=True)]
|
||||
|
||||
for group_pk in group_pks_to_check:
|
||||
# Get all children and host pks for this group.
|
||||
parent_pks_to_check = set([group_pk])
|
||||
parent_pks_checked = set()
|
||||
child_pks = set()
|
||||
host_pks = set()
|
||||
while parent_pks_to_check:
|
||||
for parent_pk in list(parent_pks_to_check):
|
||||
c_ids = group_children_map.get(parent_pk, set())
|
||||
child_pks.update(c_ids)
|
||||
parent_pks_to_check.remove(parent_pk)
|
||||
parent_pks_checked.add(parent_pk)
|
||||
parent_pks_to_check.update(c_ids - parent_pks_checked)
|
||||
h_ids = group_hosts_map.get(parent_pk, set())
|
||||
host_pks.update(h_ids)
|
||||
# Define updates needed for this group.
|
||||
group_updates = groups_to_update.setdefault(group_pk, {})
|
||||
group_updates.update({
|
||||
'total_hosts': len(active_host_pks & host_pks),
|
||||
'has_active_failures': bool(failed_host_pks & host_pks),
|
||||
'hosts_with_active_failures': len(failed_host_pks & host_pks),
|
||||
'total_groups': len(child_pks),
|
||||
'groups_with_active_failures': len(failed_group_pks & child_pks),
|
||||
'has_inventory_sources': bool(group_pk in groups_with_cloud_pks),
|
||||
})
|
||||
if group_updates['has_active_failures']:
|
||||
failed_group_pks.add(group_pk)
|
||||
|
||||
# Now apply updates to each group as needed (in batches).
|
||||
all_update_pks = list(groups_to_update.keys())
|
||||
for offset in range(0, len(all_update_pks), 500):
|
||||
update_pks = all_update_pks[offset:(offset + 500)]
|
||||
for group in self.groups.filter(pk__in=update_pks):
|
||||
group_updates = groups_to_update[group.pk]
|
||||
for field, value in list(group_updates.items()):
|
||||
if getattr(group, field) != value:
|
||||
setattr(group, field, value)
|
||||
else:
|
||||
group_updates.pop(field)
|
||||
if group_updates:
|
||||
group.save(update_fields=group_updates.keys())
|
||||
|
||||
def update_computed_fields(self, update_groups=True, update_hosts=True):
|
||||
def update_computed_fields(self):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
logger.debug("Going to update inventory computed fields, pk={0}".format(self.pk))
|
||||
start_time = time.time()
|
||||
if update_hosts:
|
||||
self.update_host_computed_fields()
|
||||
if update_groups:
|
||||
self.update_group_computed_fields()
|
||||
active_hosts = self.hosts
|
||||
failed_hosts = active_hosts.filter(has_active_failures=True)
|
||||
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
|
||||
active_groups = self.groups
|
||||
if self.kind == 'smart':
|
||||
active_groups = active_groups.none()
|
||||
failed_groups = active_groups.filter(has_active_failures=True)
|
||||
if self.kind == 'smart':
|
||||
active_inventory_sources = self.inventory_sources.none()
|
||||
else:
|
||||
@@ -482,7 +353,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
'total_hosts': active_hosts.count(),
|
||||
'hosts_with_active_failures': failed_hosts.count(),
|
||||
'total_groups': active_groups.count(),
|
||||
'groups_with_active_failures': failed_groups.count(),
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
'total_inventory_sources': active_inventory_sources.count(),
|
||||
'inventory_sources_with_failures': failed_inventory_sources.count(),
|
||||
@@ -545,7 +415,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
if (self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and
|
||||
connection.vendor != 'sqlite'):
|
||||
# Minimal update of host_count for smart inventory host filter changes
|
||||
self.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
self.update_computed_fields()
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
@@ -631,18 +501,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
editable=False,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
has_active_failures = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether the last job failed for this host.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this host was created/updated from any external inventory sources.'),
|
||||
)
|
||||
inventory_sources = models.ManyToManyField(
|
||||
'InventorySource',
|
||||
related_name='hosts',
|
||||
@@ -673,34 +531,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:host_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def update_computed_fields(self, update_inventory=True, update_groups=True):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
has_active_failures = bool(self.last_job_host_summary and
|
||||
self.last_job_host_summary.failed)
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
computed_fields = {
|
||||
'has_active_failures': has_active_failures,
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
}
|
||||
for field, value in computed_fields.items():
|
||||
if getattr(self, field) != value:
|
||||
setattr(self, field, value)
|
||||
else:
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
self.save(update_fields=computed_fields.keys())
|
||||
# Groups and inventory may also need to be updated when host fields
|
||||
# change.
|
||||
# NOTE: I think this is no longer needed
|
||||
# if update_groups:
|
||||
# for group in self.all_groups:
|
||||
# group.update_computed_fields()
|
||||
# if update_inventory:
|
||||
# self.inventory.update_computed_fields(update_groups=False,
|
||||
# update_hosts=False)
|
||||
# Rebuild summary fields cache
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
|
||||
@property
|
||||
@@ -815,42 +645,6 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
blank=True,
|
||||
help_text=_('Hosts associated directly with this group.'),
|
||||
)
|
||||
total_hosts = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of hosts directly or indirectly in this group.'),
|
||||
)
|
||||
has_active_failures = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this group has any hosts with active failures.'),
|
||||
)
|
||||
hosts_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of hosts in this group with active failures.'),
|
||||
)
|
||||
total_groups = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of child groups contained within this group.'),
|
||||
)
|
||||
groups_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of child groups within this group that have active failures.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this group was created/updated from any external inventory sources.'),
|
||||
)
|
||||
inventory_sources = models.ManyToManyField(
|
||||
'InventorySource',
|
||||
related_name='groups',
|
||||
@@ -925,32 +719,6 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
mark_actual()
|
||||
activity_stream_delete(None, self)
|
||||
|
||||
def update_computed_fields(self):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
active_hosts = self.all_hosts
|
||||
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
|
||||
active_groups = self.all_children
|
||||
# FIXME: May not be accurate unless we always update groups depth-first.
|
||||
failed_groups = active_groups.filter(has_active_failures=True)
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
computed_fields = {
|
||||
'total_hosts': active_hosts.count(),
|
||||
'has_active_failures': bool(failed_hosts.count()),
|
||||
'hosts_with_active_failures': failed_hosts.count(),
|
||||
'total_groups': active_groups.count(),
|
||||
'groups_with_active_failures': failed_groups.count(),
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
}
|
||||
for field, value in computed_fields.items():
|
||||
if getattr(self, field) != value:
|
||||
setattr(self, field, value)
|
||||
else:
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
self.save(update_fields=computed_fields.keys())
|
||||
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
|
||||
def get_all_parents(self, except_pks=None):
|
||||
@@ -1556,7 +1324,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
self.update()
|
||||
if not getattr(_inventory_updates, 'is_updating', False):
|
||||
if self.inventory is not None:
|
||||
self.inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
self.inventory.update_computed_fields()
|
||||
|
||||
def _get_current_status(self):
|
||||
if self.source:
|
||||
@@ -2616,6 +2384,9 @@ class satellite6(PluginFileInjector):
|
||||
group_patterns = '[]'
|
||||
group_prefix = 'foreman_'
|
||||
want_hostcollections = 'False'
|
||||
want_ansible_ssh_host = 'False'
|
||||
rich_params = 'False'
|
||||
want_facts = 'True'
|
||||
foreman_opts = dict(inventory_update.source_vars_dict.items())
|
||||
foreman_opts.setdefault('ssl_verify', 'False')
|
||||
for k, v in foreman_opts.items():
|
||||
@@ -2625,6 +2396,12 @@ class satellite6(PluginFileInjector):
|
||||
group_prefix = v
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
elif k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
want_ansible_ssh_host = v
|
||||
elif k == 'satellite6_rich_params' and isinstance(v, bool):
|
||||
rich_params = v
|
||||
elif k == 'satellite6_want_facts' and isinstance(v, bool):
|
||||
want_facts = v
|
||||
else:
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
@@ -2636,9 +2413,11 @@ class satellite6(PluginFileInjector):
|
||||
section = 'ansible'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'group_patterns', group_patterns)
|
||||
cp.set(section, 'want_facts', 'True')
|
||||
cp.set(section, 'want_facts', str(want_facts))
|
||||
cp.set(section, 'want_hostcollections', str(want_hostcollections))
|
||||
cp.set(section, 'group_prefix', group_prefix)
|
||||
cp.set(section, 'want_ansible_ssh_host', str(want_ansible_ssh_host))
|
||||
cp.set(section, 'rich_params', str(rich_params))
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
|
||||
@@ -13,6 +13,7 @@ from urllib.parse import urljoin
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
#from django.core.cache import cache
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -28,7 +29,7 @@ from awx.api.versioning import reverse
|
||||
from awx.main.models.base import (
|
||||
BaseModel, CreatedModifiedModel,
|
||||
prevent_search, accepts_json,
|
||||
JOB_TYPE_CHOICES, VERBOSITY_CHOICES,
|
||||
JOB_TYPE_CHOICES, NEW_JOB_TYPE_CHOICES, VERBOSITY_CHOICES,
|
||||
VarsDictProperty
|
||||
)
|
||||
from awx.main.models.events import JobEvent, SystemJobEvent
|
||||
@@ -204,6 +205,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
app_label = 'main'
|
||||
ordering = ('name',)
|
||||
|
||||
job_type = models.CharField(
|
||||
max_length=64,
|
||||
choices=NEW_JOB_TYPE_CHOICES,
|
||||
default='run',
|
||||
)
|
||||
host_config_key = prevent_search(models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
@@ -293,6 +299,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
def resources_needed_to_start(self):
|
||||
return [fd for fd in ['project', 'inventory'] if not getattr(self, '{}_id'.format(fd))]
|
||||
|
||||
def clean_forks(self):
|
||||
if settings.MAX_FORKS > 0 and self.forks > settings.MAX_FORKS:
|
||||
raise ValidationError(_(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.'))
|
||||
return self.forks
|
||||
|
||||
def create_job(self, **kwargs):
|
||||
'''
|
||||
Create a new job based on this template.
|
||||
@@ -1060,7 +1071,7 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
processed = models.PositiveIntegerField(default=0, editable=False)
|
||||
rescued = models.PositiveIntegerField(default=0, editable=False)
|
||||
skipped = models.PositiveIntegerField(default=0, editable=False)
|
||||
failed = models.BooleanField(default=False, editable=False)
|
||||
failed = models.BooleanField(default=False, editable=False, db_index=True)
|
||||
|
||||
def __str__(self):
|
||||
host = getattr_dne(self, 'host')
|
||||
@@ -1095,7 +1106,6 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
update_fields.append('last_job_host_summary_id')
|
||||
if update_fields:
|
||||
self.host.save(update_fields=update_fields)
|
||||
#self.host.update_computed_fields()
|
||||
|
||||
|
||||
class SystemJobOptions(BaseModel):
|
||||
|
||||
@@ -274,7 +274,7 @@ class JobNotificationMixin(object):
|
||||
{'playbook_counts': ['play_count', 'task_count']},
|
||||
{'summary_fields': [{'inventory': ['id', 'name', 'description', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources', 'inventory_sources_with_failures',
|
||||
'organization_id', 'kind']},
|
||||
{'project': ['id', 'name', 'description', 'status', 'scm_type']},
|
||||
@@ -327,7 +327,6 @@ class JobNotificationMixin(object):
|
||||
'username': 'admin'},
|
||||
'instance_group': {'id': 1, 'name': 'tower'},
|
||||
'inventory': {'description': 'Sample inventory description',
|
||||
'groups_with_active_failures': 0,
|
||||
'has_active_failures': False,
|
||||
'has_inventory_sources': False,
|
||||
'hosts_with_active_failures': 0,
|
||||
|
||||
@@ -124,11 +124,6 @@ class OAuth2AccessToken(AbstractAccessToken):
|
||||
def is_valid(self, scopes=None):
|
||||
valid = super(OAuth2AccessToken, self).is_valid(scopes)
|
||||
if valid:
|
||||
try:
|
||||
self.validate_external_users()
|
||||
except oauth2.AccessDeniedError:
|
||||
logger.exception(f'Failed to authenticate {self.user.username}')
|
||||
return False
|
||||
self.last_used = now()
|
||||
|
||||
def _update_last_used():
|
||||
@@ -146,5 +141,6 @@ class OAuth2AccessToken(AbstractAccessToken):
|
||||
).format(external_account))
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
self.validate_external_users()
|
||||
if not self.pk:
|
||||
self.validate_external_users()
|
||||
super(OAuth2AccessToken, self).save(*args, **kwargs)
|
||||
|
||||
@@ -623,6 +623,11 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
editable=False,
|
||||
help_text=_("The date and time the job was queued for starting."),
|
||||
)
|
||||
dependencies_processed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_("If True, the task manager has already processed potential dependencies for this job.")
|
||||
)
|
||||
finished = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
@@ -630,6 +635,13 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
help_text=_("The date and time the job finished execution."),
|
||||
db_index=True,
|
||||
)
|
||||
canceled_on = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text=_("The date and time when the cancel request was sent."),
|
||||
db_index=True,
|
||||
)
|
||||
elapsed = models.DecimalField(
|
||||
max_digits=12,
|
||||
decimal_places=3,
|
||||
@@ -833,7 +845,12 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
self.unified_job_template = self._get_parent_instance()
|
||||
if 'unified_job_template' not in update_fields:
|
||||
update_fields.append('unified_job_template')
|
||||
|
||||
|
||||
if self.cancel_flag and not self.canceled_on:
|
||||
# Record the 'canceled' time.
|
||||
self.canceled_on = now()
|
||||
if 'canceled_on' not in update_fields:
|
||||
update_fields.append('canceled_on')
|
||||
# Okay; we're done. Perform the actual save.
|
||||
result = super(UnifiedJob, self).save(*args, **kwargs)
|
||||
|
||||
@@ -997,6 +1014,8 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
dir=settings.JOBOUTPUT_ROOT,
|
||||
encoding='utf-8'
|
||||
)
|
||||
from awx.main.tasks import purge_old_stdout_files # circular import
|
||||
purge_old_stdout_files.apply_async()
|
||||
|
||||
# Before the addition of event-based stdout, older versions of
|
||||
# awx stored stdout as raw text blobs in a certain database column
|
||||
|
||||
@@ -79,6 +79,11 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
symmetrical=False,
|
||||
related_name='%(class)ss_always',
|
||||
)
|
||||
all_parents_must_converge = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_("If enabled then the node will only run if all of the parent nodes "
|
||||
"have met the criteria to reach this node")
|
||||
)
|
||||
unified_job_template = models.ForeignKey(
|
||||
'UnifiedJobTemplate',
|
||||
related_name='%(class)ss',
|
||||
@@ -102,7 +107,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
'''
|
||||
return ['workflow_job', 'unified_job_template',
|
||||
'extra_data', 'survey_passwords',
|
||||
'inventory', 'credentials', 'char_prompts']
|
||||
'inventory', 'credentials', 'char_prompts', 'all_parents_must_converge']
|
||||
|
||||
def create_workflow_job_node(self, **kwargs):
|
||||
'''
|
||||
@@ -130,7 +135,7 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
||||
FIELDS_TO_PRESERVE_AT_COPY = [
|
||||
'unified_job_template', 'workflow_job_template', 'success_nodes', 'failure_nodes',
|
||||
'always_nodes', 'credentials', 'inventory', 'extra_data', 'survey_passwords',
|
||||
'char_prompts'
|
||||
'char_prompts', 'all_parents_must_converge'
|
||||
]
|
||||
REENCRYPTION_BLACKLIST_AT_COPY = ['extra_data', 'survey_passwords']
|
||||
|
||||
|
||||
@@ -89,8 +89,8 @@ class SimpleDAG(object):
|
||||
run_status(n['node_object']),
|
||||
color
|
||||
)
|
||||
for label, edges in self.node_from_edges_by_label.iteritems():
|
||||
for from_node, to_nodes in edges.iteritems():
|
||||
for label, edges in self.node_from_edges_by_label.items():
|
||||
for from_node, to_nodes in edges.items():
|
||||
for to_node in to_nodes:
|
||||
doc += "%s -> %s [ label=\"%s\" ];\n" % (
|
||||
run_status(self.nodes[from_node]['node_object']),
|
||||
@@ -140,36 +140,36 @@ class SimpleDAG(object):
|
||||
def find_ord(self, obj):
|
||||
return self.node_obj_to_node_index.get(obj, None)
|
||||
|
||||
def _get_dependencies_by_label(self, node_index, label):
|
||||
def _get_children_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_from_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependencies(self, obj, label=None):
|
||||
def get_children(self, obj, label=None):
|
||||
this_ord = self.find_ord(obj)
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependencies_by_label(this_ord, label)
|
||||
return self._get_children_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_from_edges_by_label.keys():
|
||||
nodes.extend(self._get_dependencies_by_label(this_ord, l))
|
||||
nodes.extend(self._get_children_by_label(this_ord, l))
|
||||
return nodes
|
||||
|
||||
def _get_dependents_by_label(self, node_index, label):
|
||||
def _get_parents_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_to_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependents(self, obj, label=None):
|
||||
def get_parents(self, obj, label=None):
|
||||
this_ord = self.find_ord(obj)
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependents_by_label(this_ord, label)
|
||||
return self._get_parents_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_to_edges_by_label.keys():
|
||||
nodes.extend(self._get_dependents_by_label(this_ord, l))
|
||||
nodes.extend(self._get_parents_by_label(this_ord, l))
|
||||
return nodes
|
||||
|
||||
def get_root_nodes(self):
|
||||
@@ -188,7 +188,7 @@ class SimpleDAG(object):
|
||||
while stack:
|
||||
node_obj = stack.pop()
|
||||
|
||||
children = [node['node_object'] for node in self.get_dependencies(node_obj)]
|
||||
children = [node['node_object'] for node in self.get_children(node_obj)]
|
||||
children_to_add = list(filter(lambda node_obj: node_obj not in node_objs_visited, children))
|
||||
|
||||
if children_to_add:
|
||||
@@ -212,7 +212,7 @@ class SimpleDAG(object):
|
||||
if obj.id in obj_ids_processed:
|
||||
return
|
||||
|
||||
for child in self.get_dependencies(obj):
|
||||
for child in self.get_children(obj):
|
||||
visit(child)
|
||||
obj_ids_processed.add(obj.id)
|
||||
nodes_sorted.appendleft(node)
|
||||
|
||||
@@ -55,7 +55,7 @@ class WorkflowDAG(SimpleDAG):
|
||||
|
||||
def _are_relevant_parents_finished(self, node):
|
||||
obj = node['node_object']
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
for p in parent_nodes:
|
||||
if p.do_not_run is True:
|
||||
continue
|
||||
@@ -69,33 +69,55 @@ class WorkflowDAG(SimpleDAG):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _all_parents_met_convergence_criteria(self, node):
|
||||
# This function takes any node and checks that all it's parents have met their criteria to run the child.
|
||||
# This returns a boolean and is really only useful if the node is an ALL convergence node and is
|
||||
# intended to be used in conjuction with the node property `all_parents_must_converge`
|
||||
obj = node['node_object']
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
for p in parent_nodes:
|
||||
#node has a status
|
||||
if p.job and p.job.status in ["successful", "failed"]:
|
||||
if p.job and p.job.status == "successful":
|
||||
status = "success_nodes"
|
||||
elif p.job and p.job.status == "failed":
|
||||
status = "failure_nodes"
|
||||
#check that the nodes status matches either a pathway of the same status or is an always path.
|
||||
if (p not in [node['node_object'] for node in self.get_parents(obj, status)]
|
||||
and p not in [node['node_object'] for node in self.get_parents(obj, "always_nodes")]):
|
||||
return False
|
||||
return True
|
||||
|
||||
def bfs_nodes_to_run(self):
|
||||
nodes = self.get_root_nodes()
|
||||
nodes_found = []
|
||||
node_ids_visited = set()
|
||||
|
||||
for index, n in enumerate(nodes):
|
||||
obj = n['node_object']
|
||||
if obj.id in node_ids_visited:
|
||||
continue
|
||||
node_ids_visited.add(obj.id)
|
||||
|
||||
if obj.do_not_run is True:
|
||||
continue
|
||||
|
||||
if obj.job:
|
||||
elif obj.job:
|
||||
if obj.job.status in ['failed', 'error', 'canceled']:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.job.status == 'successful':
|
||||
nodes.extend(self.get_dependencies(obj, 'success_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'success_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.unified_job_template is None:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
else:
|
||||
if self._are_relevant_parents_finished(n):
|
||||
# This catches root nodes or ANY convergence nodes
|
||||
if not obj.all_parents_must_converge and self._are_relevant_parents_finished(n):
|
||||
nodes_found.append(n)
|
||||
# This catches ALL convergence nodes
|
||||
elif obj.all_parents_must_converge and self._are_relevant_parents_finished(n):
|
||||
if self._all_parents_met_convergence_criteria(n):
|
||||
nodes_found.append(n)
|
||||
|
||||
return [n['node_object'] for n in nodes_found]
|
||||
|
||||
def cancel_node_jobs(self):
|
||||
@@ -135,8 +157,8 @@ class WorkflowDAG(SimpleDAG):
|
||||
|
||||
for node in failed_nodes:
|
||||
obj = node['node_object']
|
||||
if (len(self.get_dependencies(obj, 'failure_nodes')) +
|
||||
len(self.get_dependencies(obj, 'always_nodes'))) == 0:
|
||||
if (len(self.get_children(obj, 'failure_nodes')) +
|
||||
len(self.get_children(obj, 'always_nodes'))) == 0:
|
||||
if obj.unified_job_template is None:
|
||||
res = True
|
||||
failed_unified_job_template_node_ids.append(str(obj.id))
|
||||
@@ -190,35 +212,48 @@ class WorkflowDAG(SimpleDAG):
|
||||
pass
|
||||
elif p.job:
|
||||
if p.job.status == 'successful':
|
||||
if node in (self.get_dependencies(p, 'success_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
if node in (self.get_children(p, 'success_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
elif p.job.status in ['failed', 'error', 'canceled']:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
if node in (self.get_children(p, 'failure_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
elif p.do_not_run is False and p.unified_job_template is None:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
elif not p.do_not_run and p.unified_job_template is None:
|
||||
if node in (self.get_children(p, 'failure_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
r'''
|
||||
determine if the current node is a convergence node by checking if all the
|
||||
parents are finished then checking to see if all parents meet the needed
|
||||
path criteria to run the convergence child.
|
||||
(i.e. parent must fail, parent must succeed, etc. to proceed)
|
||||
|
||||
Return a list object
|
||||
'''
|
||||
def mark_dnr_nodes(self):
|
||||
root_nodes = self.get_root_nodes()
|
||||
nodes_marked_do_not_run = []
|
||||
|
||||
for node in self.sort_nodes_topological():
|
||||
obj = node['node_object']
|
||||
|
||||
if obj.do_not_run is False and not obj.job and node not in root_nodes:
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
if self._are_all_nodes_dnr_decided(parent_nodes):
|
||||
if self._should_mark_node_dnr(node, parent_nodes):
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
if not obj.do_not_run and not obj.job and node not in root_nodes:
|
||||
if obj.all_parents_must_converge:
|
||||
if any(p.do_not_run for p in parent_nodes) or not self._all_parents_met_convergence_criteria(node):
|
||||
obj.do_not_run = True
|
||||
nodes_marked_do_not_run.append(node)
|
||||
else:
|
||||
if self._are_all_nodes_dnr_decided(parent_nodes):
|
||||
if self._should_mark_node_dnr(node, parent_nodes):
|
||||
obj.do_not_run = True
|
||||
nodes_marked_do_not_run.append(node)
|
||||
|
||||
return [n['node_object'] for n in nodes_marked_do_not_run]
|
||||
|
||||
@@ -15,7 +15,6 @@ class DependencyGraph(object):
|
||||
INVENTORY_UPDATES = 'inventory_updates'
|
||||
|
||||
JOB_TEMPLATE_JOBS = 'job_template_jobs'
|
||||
JOB_INVENTORY_IDS = 'job_inventory_ids'
|
||||
|
||||
SYSTEM_JOB = 'system_job'
|
||||
INVENTORY_SOURCE_UPDATES = 'inventory_source_updates'
|
||||
@@ -40,8 +39,6 @@ class DependencyGraph(object):
|
||||
Track runnable job related project and inventory to ensure updates
|
||||
don't run while a job needing those resources is running.
|
||||
'''
|
||||
# inventory_id -> True / False
|
||||
self.data[self.JOB_INVENTORY_IDS] = {}
|
||||
|
||||
# inventory_source_id -> True / False
|
||||
self.data[self.INVENTORY_SOURCE_UPDATES] = {}
|
||||
@@ -77,7 +74,6 @@ class DependencyGraph(object):
|
||||
self.data[self.INVENTORY_SOURCE_UPDATES][inventory_source_id] = False
|
||||
|
||||
def mark_job_template_job(self, job):
|
||||
self.data[self.JOB_INVENTORY_IDS][job.inventory_id] = False
|
||||
self.data[self.JOB_TEMPLATE_JOBS][job.job_template_id] = False
|
||||
|
||||
def mark_workflow_job(self, job):
|
||||
@@ -87,8 +83,7 @@ class DependencyGraph(object):
|
||||
return self.data[self.PROJECT_UPDATES].get(job.project_id, True)
|
||||
|
||||
def can_inventory_update_run(self, job):
|
||||
return self.data[self.JOB_INVENTORY_IDS].get(job.inventory_source.inventory_id, True) and \
|
||||
self.data[self.INVENTORY_SOURCE_UPDATES].get(job.inventory_source_id, True)
|
||||
return self.data[self.INVENTORY_SOURCE_UPDATES].get(job.inventory_source_id, True)
|
||||
|
||||
def can_job_run(self, job):
|
||||
if self.data[self.PROJECT_UPDATES].get(job.project_id, True) is True and \
|
||||
|
||||
@@ -23,6 +23,7 @@ from awx.main.models import (
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
SystemJob,
|
||||
UnifiedJob,
|
||||
WorkflowApproval,
|
||||
WorkflowJob,
|
||||
WorkflowJobTemplate
|
||||
@@ -74,21 +75,6 @@ class TaskManager():
|
||||
key=lambda task: task.created)
|
||||
return all_tasks
|
||||
|
||||
|
||||
def get_latest_project_update_tasks(self, all_sorted_tasks):
|
||||
project_ids = set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
project_ids.add(task.project_id)
|
||||
return ProjectUpdate.objects.filter(id__in=project_ids)
|
||||
|
||||
def get_latest_inventory_update_tasks(self, all_sorted_tasks):
|
||||
inventory_ids = set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return InventoryUpdate.objects.filter(id__in=inventory_ids)
|
||||
|
||||
def get_running_workflow_jobs(self):
|
||||
graph_workflow_jobs = [wf for wf in
|
||||
WorkflowJob.objects.filter(status='running')]
|
||||
@@ -200,9 +186,6 @@ class TaskManager():
|
||||
schedule_task_manager()
|
||||
return result
|
||||
|
||||
def get_dependent_jobs_for_inv_and_proj_update(self, job_obj):
|
||||
return [{'type': j.model_to_str(), 'id': j.id} for j in job_obj.dependent_jobs.all()]
|
||||
|
||||
def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
|
||||
from awx.main.tasks import handle_work_error, handle_work_success
|
||||
|
||||
@@ -364,10 +347,6 @@ class TaskManager():
|
||||
def should_update_inventory_source(self, job, latest_inventory_update):
|
||||
now = tz_now()
|
||||
|
||||
# Already processed dependencies for this job
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_inventory_update is None:
|
||||
return True
|
||||
'''
|
||||
@@ -393,8 +372,6 @@ class TaskManager():
|
||||
|
||||
def should_update_related_project(self, job, latest_project_update):
|
||||
now = tz_now()
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_project_update is None:
|
||||
return True
|
||||
@@ -426,18 +403,21 @@ class TaskManager():
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_dependencies(self, task):
|
||||
dependencies = []
|
||||
if type(task) is Job:
|
||||
def generate_dependencies(self, undeped_tasks):
|
||||
created_dependencies = []
|
||||
for task in undeped_tasks:
|
||||
dependencies = []
|
||||
if not type(task) is Job:
|
||||
continue
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
||||
latest_project_update = self.get_latest_project_update(task)
|
||||
if self.should_update_related_project(task, latest_project_update):
|
||||
project_task = self.create_project_update(task)
|
||||
created_dependencies.append(project_task)
|
||||
dependencies.append(project_task)
|
||||
else:
|
||||
if latest_project_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_project_update)
|
||||
dependencies.append(latest_project_update)
|
||||
|
||||
# Inventory created 2 seconds behind job
|
||||
try:
|
||||
@@ -452,56 +432,20 @@ class TaskManager():
|
||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
created_dependencies.append(inventory_task)
|
||||
dependencies.append(inventory_task)
|
||||
else:
|
||||
if latest_inventory_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_inventory_update)
|
||||
dependencies.append(latest_inventory_update)
|
||||
|
||||
if len(dependencies) > 0:
|
||||
self.capture_chain_failure_dependencies(task, dependencies)
|
||||
return dependencies
|
||||
|
||||
def process_dependencies(self, dependent_task, dependency_tasks):
|
||||
for task in dependency_tasks:
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("Dependent {} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
idle_instance_that_fits = None
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if idle_instance_that_fits is None:
|
||||
idle_instance_that_fits = rampart_group.find_largest_idle_instance()
|
||||
if not rampart_group.is_containerized and self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug("Skipping group {} capacity <= 0".format(rampart_group.name))
|
||||
continue
|
||||
|
||||
execution_instance = rampart_group.fit_task_to_most_remaining_capacity_instance(task)
|
||||
if execution_instance:
|
||||
logger.debug("Starting dependent {} in group {} instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
elif not execution_instance and idle_instance_that_fits:
|
||||
if not rampart_group.is_containerized:
|
||||
execution_instance = idle_instance_that_fits
|
||||
logger.debug("Starting dependent {} in group {} on idle instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
if execution_instance or rampart_group.is_containerized:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
tasks_to_fail = [t for t in dependency_tasks if t != task]
|
||||
tasks_to_fail += [dependent_task]
|
||||
self.start_task(task, rampart_group, tasks_to_fail, execution_instance)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
logger.debug("No instance available in group {} to run job {} w/ capacity requirement {}".format(
|
||||
rampart_group.name, task.log_format, task.task_impact))
|
||||
if not found_acceptable_queue:
|
||||
logger.debug("Dependent {} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
UnifiedJob.objects.filter(pk__in = [task.pk for task in undeped_tasks]).update(dependencies_processed=True)
|
||||
return created_dependencies
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
for task in pending_tasks:
|
||||
self.process_dependencies(task, self.generate_dependencies(task))
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("{} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
@@ -574,13 +518,6 @@ class TaskManager():
|
||||
def calculate_capacity_consumed(self, tasks):
|
||||
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
|
||||
|
||||
def would_exceed_capacity(self, task, instance_group):
|
||||
current_capacity = self.graph[instance_group]['consumed_capacity']
|
||||
capacity_total = self.graph[instance_group]['capacity_total']
|
||||
if current_capacity == 0:
|
||||
return False
|
||||
return (task.task_impact + current_capacity > capacity_total)
|
||||
|
||||
def consume_capacity(self, task, instance_group):
|
||||
logger.debug('{} consumed {} capacity units from {} with prior total of {}'.format(
|
||||
task.log_format, task.task_impact, instance_group,
|
||||
@@ -598,6 +535,9 @@ class TaskManager():
|
||||
self.process_running_tasks(running_tasks)
|
||||
|
||||
pending_tasks = [t for t in all_sorted_tasks if t.status == 'pending']
|
||||
undeped_tasks = [t for t in pending_tasks if not t.dependencies_processed]
|
||||
dependencies = self.generate_dependencies(undeped_tasks)
|
||||
self.process_pending_tasks(dependencies)
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
|
||||
def _schedule(self):
|
||||
|
||||
@@ -5,15 +5,11 @@ import logging
|
||||
# AWX
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.utils.db import migration_in_progress_check_or_relase
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
@task()
|
||||
def run_task_manager():
|
||||
if migration_in_progress_check_or_relase():
|
||||
logger.debug("Not running task manager because migration is in progress.")
|
||||
return
|
||||
logger.debug("Running Tower task manager.")
|
||||
TaskManager().schedule()
|
||||
|
||||
@@ -10,6 +10,7 @@ import pkg_resources
|
||||
import sys
|
||||
|
||||
# Django
|
||||
from django.db import connection
|
||||
from django.conf import settings
|
||||
from django.db.models.signals import (
|
||||
pre_save,
|
||||
@@ -30,12 +31,11 @@ from crum.signals import current_user_getter
|
||||
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
ActivityStream, AdHocCommandEvent, Group, Host, InstanceGroup, Inventory,
|
||||
InventorySource, InventoryUpdateEvent, Job, JobEvent, JobHostSummary,
|
||||
JobTemplate, OAuth2AccessToken, Organization, Project, ProjectUpdateEvent,
|
||||
Role, SystemJob, SystemJobEvent, SystemJobTemplate, UnifiedJob,
|
||||
UnifiedJobTemplate, User, UserSessionMembership, WorkflowJobTemplateNode,
|
||||
WorkflowApproval, WorkflowApprovalTemplate, ROLE_SINGLETON_SYSTEM_ADMINISTRATOR
|
||||
ActivityStream, Group, Host, InstanceGroup, Inventory, InventorySource,
|
||||
Job, JobHostSummary, JobTemplate, OAuth2AccessToken, Organization, Project,
|
||||
Role, SystemJob, SystemJobTemplate, UnifiedJob, UnifiedJobTemplate, User,
|
||||
UserSessionMembership, WorkflowJobTemplateNode, WorkflowApproval,
|
||||
WorkflowApprovalTemplate, ROLE_SINGLETON_SYSTEM_ADMINISTRATOR
|
||||
)
|
||||
from awx.main.constants import CENSOR_VALUE
|
||||
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, get_current_apps
|
||||
@@ -72,77 +72,6 @@ def get_current_user_or_none():
|
||||
return u
|
||||
|
||||
|
||||
def emit_event_detail(serializer, relation, **kwargs):
|
||||
instance = kwargs['instance']
|
||||
created = kwargs['created']
|
||||
if created:
|
||||
event_serializer = serializer(instance)
|
||||
consumers.emit_channel_notification(
|
||||
'-'.join([event_serializer.get_group_name(instance), str(getattr(instance, relation))]),
|
||||
event_serializer.data
|
||||
)
|
||||
|
||||
|
||||
def emit_job_event_detail(sender, **kwargs):
|
||||
from awx.api import serializers
|
||||
emit_event_detail(serializers.JobEventWebSocketSerializer, 'job_id', **kwargs)
|
||||
|
||||
|
||||
def emit_ad_hoc_command_event_detail(sender, **kwargs):
|
||||
from awx.api import serializers
|
||||
emit_event_detail(serializers.AdHocCommandEventWebSocketSerializer, 'ad_hoc_command_id', **kwargs)
|
||||
|
||||
|
||||
def emit_project_update_event_detail(sender, **kwargs):
|
||||
from awx.api import serializers
|
||||
emit_event_detail(serializers.ProjectUpdateEventWebSocketSerializer, 'project_update_id', **kwargs)
|
||||
|
||||
|
||||
def emit_inventory_update_event_detail(sender, **kwargs):
|
||||
from awx.api import serializers
|
||||
emit_event_detail(serializers.InventoryUpdateEventWebSocketSerializer, 'inventory_update_id', **kwargs)
|
||||
|
||||
|
||||
def emit_system_job_event_detail(sender, **kwargs):
|
||||
from awx.api import serializers
|
||||
emit_event_detail(serializers.SystemJobEventWebSocketSerializer, 'system_job_id', **kwargs)
|
||||
|
||||
|
||||
def emit_update_inventory_computed_fields(sender, **kwargs):
|
||||
logger.debug("In update inventory computed fields")
|
||||
if getattr(_inventory_updates, 'is_updating', False):
|
||||
return
|
||||
instance = kwargs['instance']
|
||||
if sender == Group.hosts.through:
|
||||
sender_name = 'group.hosts'
|
||||
elif sender == Group.parents.through:
|
||||
sender_name = 'group.parents'
|
||||
elif sender == Host.inventory_sources.through:
|
||||
sender_name = 'host.inventory_sources'
|
||||
elif sender == Group.inventory_sources.through:
|
||||
sender_name = 'group.inventory_sources'
|
||||
else:
|
||||
sender_name = str(sender._meta.verbose_name)
|
||||
if kwargs['signal'] == post_save:
|
||||
if sender == Job:
|
||||
return
|
||||
sender_action = 'saved'
|
||||
elif kwargs['signal'] == post_delete:
|
||||
sender_action = 'deleted'
|
||||
elif kwargs['signal'] == m2m_changed and kwargs['action'] in ('post_add', 'post_remove', 'post_clear'):
|
||||
sender_action = 'changed'
|
||||
else:
|
||||
return
|
||||
logger.debug('%s %s, updating inventory computed fields: %r %r',
|
||||
sender_name, sender_action, sender, kwargs)
|
||||
try:
|
||||
inventory = instance.inventory
|
||||
except Inventory.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
|
||||
|
||||
def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
|
||||
if getattr(_inventory_updates, 'is_updating', False):
|
||||
return
|
||||
@@ -161,7 +90,9 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
|
||||
pass
|
||||
else:
|
||||
if inventory is not None:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
connection.on_commit(
|
||||
lambda: update_inventory_computed_fields.delay(inventory.id)
|
||||
)
|
||||
|
||||
|
||||
def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwargs):
|
||||
@@ -244,10 +175,6 @@ def connect_computed_field_signals():
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.parents.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Job)
|
||||
@@ -258,11 +185,6 @@ connect_computed_field_signals()
|
||||
|
||||
post_save.connect(save_related_job_templates, sender=Project)
|
||||
post_save.connect(save_related_job_templates, sender=Inventory)
|
||||
post_save.connect(emit_job_event_detail, sender=JobEvent)
|
||||
post_save.connect(emit_ad_hoc_command_event_detail, sender=AdHocCommandEvent)
|
||||
post_save.connect(emit_project_update_event_detail, sender=ProjectUpdateEvent)
|
||||
post_save.connect(emit_inventory_update_event_detail, sender=InventoryUpdateEvent)
|
||||
post_save.connect(emit_system_job_event_detail, sender=SystemJobEvent)
|
||||
m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through)
|
||||
m2m_changed.connect(rbac_activity_stream, Role.members.through)
|
||||
m2m_changed.connect(rbac_activity_stream, Role.parents.through)
|
||||
@@ -389,10 +311,6 @@ def disable_computed_fields():
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.parents.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Job)
|
||||
|
||||
@@ -52,6 +52,7 @@ import ansible_runner
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, GALAXY_SERVER_FIELDS
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.models import (
|
||||
Schedule, TowerScheduleState, Instance, InstanceGroup,
|
||||
UnifiedJob, Notification,
|
||||
@@ -263,12 +264,6 @@ def apply_cluster_membership_policies():
|
||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all', exchange_type='fanout')
|
||||
def set_migration_flag():
|
||||
logger.debug('Received migration-in-progress signal, will serve redirect.')
|
||||
cache.set('migration_in_progress', True)
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all', exchange_type='fanout')
|
||||
def handle_setting_changes(setting_keys):
|
||||
orig_len = len(setting_keys)
|
||||
@@ -343,17 +338,31 @@ def send_notifications(notification_list, job_id=None):
|
||||
|
||||
@task()
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
if not settings.INSIGHTS_TRACKING_STATE:
|
||||
return
|
||||
try:
|
||||
tgz = analytics.gather()
|
||||
if not tgz:
|
||||
return
|
||||
logger.debug('gathered analytics: {}'.format(tgz))
|
||||
analytics.ship(tgz)
|
||||
finally:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
|
||||
if last_gather:
|
||||
last_time = DateTimeField().to_internal_value(last_gather.value)
|
||||
else:
|
||||
last_time = None
|
||||
gather_time = now()
|
||||
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
with advisory_lock('gather_analytics_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
logger.debug('Not gathering analytics, another task holds lock')
|
||||
return
|
||||
try:
|
||||
tgz = analytics.gather()
|
||||
if not tgz:
|
||||
return
|
||||
logger.info('gathered analytics: {}'.format(tgz))
|
||||
analytics.ship(tgz)
|
||||
settings.AUTOMATION_ANALYTICS_LAST_GATHER = gather_time
|
||||
finally:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@@ -505,7 +514,7 @@ def awx_periodic_scheduler():
|
||||
|
||||
invalid_license = False
|
||||
try:
|
||||
access_registry[Job](None).check_license()
|
||||
access_registry[Job](None).check_license(quiet=True)
|
||||
except PermissionDenied as e:
|
||||
invalid_license = e
|
||||
|
||||
@@ -594,7 +603,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
|
||||
|
||||
@task()
|
||||
def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
'''
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
prevent unnecessary recursive calls.
|
||||
@@ -605,7 +614,7 @@ def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
return
|
||||
i = i[0]
|
||||
try:
|
||||
i.update_computed_fields(update_hosts=should_update_hosts)
|
||||
i.update_computed_fields()
|
||||
except DatabaseError as e:
|
||||
if 'did not affect any rows' in str(e):
|
||||
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
|
||||
@@ -648,7 +657,7 @@ def update_host_smart_inventory_memberships():
|
||||
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
|
||||
# Update computed fields for changed inventories outside atomic action
|
||||
for smart_inventory in changed_inventories:
|
||||
smart_inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task()
|
||||
@@ -709,6 +718,7 @@ class BaseTask(object):
|
||||
def __init__(self):
|
||||
self.cleanup_paths = []
|
||||
self.parent_workflow_job_id = None
|
||||
self.host_map = {}
|
||||
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
"""Reload the model instance from the database and update the
|
||||
@@ -1007,11 +1017,17 @@ class BaseTask(object):
|
||||
return False
|
||||
|
||||
def build_inventory(self, instance, private_data_dir):
|
||||
script_params = dict(hostvars=True)
|
||||
script_params = dict(hostvars=True, towervars=True)
|
||||
if hasattr(instance, 'job_slice_number'):
|
||||
script_params['slice_number'] = instance.job_slice_number
|
||||
script_params['slice_count'] = instance.job_slice_count
|
||||
script_data = instance.inventory.get_script_data(**script_params)
|
||||
# maintain a list of host_name --> host_id
|
||||
# so we can associate emitted events to Host objects
|
||||
self.host_map = {
|
||||
hostname: hv.pop('remote_tower_id', '')
|
||||
for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()
|
||||
}
|
||||
json_data = json.dumps(script_data)
|
||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
||||
f = os.fdopen(handle, 'w')
|
||||
@@ -1120,6 +1136,32 @@ class BaseTask(object):
|
||||
event_data.pop('parent_uuid', None)
|
||||
if self.parent_workflow_job_id:
|
||||
event_data['workflow_job_id'] = self.parent_workflow_job_id
|
||||
if self.host_map:
|
||||
host = event_data.get('event_data', {}).get('host', '').strip()
|
||||
if host:
|
||||
event_data['host_name'] = host
|
||||
if host in self.host_map:
|
||||
event_data['host_id'] = self.host_map[host]
|
||||
else:
|
||||
event_data['host_name'] = ''
|
||||
event_data['host_id'] = ''
|
||||
|
||||
if isinstance(self, RunProjectUpdate):
|
||||
# it's common for Ansible's SCM modules to print
|
||||
# error messages on failure that contain the plaintext
|
||||
# basic auth credentials (username + password)
|
||||
# it's also common for the nested event data itself (['res']['...'])
|
||||
# to contain unredacted text on failure
|
||||
# this is a _little_ expensive to filter
|
||||
# with regex, but project updates don't have many events,
|
||||
# so it *should* have a negligible performance impact
|
||||
try:
|
||||
event_data_json = json.dumps(event_data)
|
||||
event_data_json = UriCleaner.remove_sensitive(event_data_json)
|
||||
event_data = json.loads(event_data_json)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
should_write_event = False
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
@@ -1646,8 +1688,12 @@ class RunJob(BaseTask):
|
||||
args.append('--vault-id')
|
||||
args.append('{}@prompt'.format(vault_id))
|
||||
|
||||
if job.forks: # FIXME: Max limit?
|
||||
args.append('--forks=%d' % job.forks)
|
||||
if job.forks:
|
||||
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
|
||||
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
|
||||
args.append('--forks=%d' % settings.MAX_FORKS)
|
||||
else:
|
||||
args.append('--forks=%d' % job.forks)
|
||||
if job.force_handlers:
|
||||
args.append('--force-handlers')
|
||||
if job.limit:
|
||||
@@ -1858,7 +1904,7 @@ class RunJob(BaseTask):
|
||||
except Inventory.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task()
|
||||
@@ -1967,8 +2013,9 @@ class RunProjectUpdate(BaseTask):
|
||||
continue
|
||||
env_key = ('ANSIBLE_GALAXY_SERVER_{}_{}'.format(server.get('id', 'unnamed'), key)).upper()
|
||||
env[env_key] = server[key]
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
if galaxy_servers:
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
return env
|
||||
|
||||
def _build_scm_url_extra_vars(self, project_update):
|
||||
@@ -2841,4 +2888,4 @@ def deep_copy_model_obj(
|
||||
), permission_check_func[2])
|
||||
permission_check_func(creater, copy_mapping.values())
|
||||
if isinstance(new_obj, Inventory):
|
||||
update_inventory_computed_fields.delay(new_obj.id, True)
|
||||
update_inventory_computed_fields.delay(new_obj.id)
|
||||
|
||||
@@ -10,6 +10,8 @@ group_patterns = foo_group_patterns
|
||||
want_facts = True
|
||||
want_hostcollections = True
|
||||
group_prefix = foo_group_prefix
|
||||
want_ansible_ssh_host = True
|
||||
rich_params = True
|
||||
|
||||
[cache]
|
||||
path = /tmp
|
||||
|
||||
@@ -2,6 +2,9 @@ from django.db import connection
|
||||
from django.db.models.signals import post_migrate
|
||||
from django.apps import apps
|
||||
from django.conf import settings
|
||||
from unittest import mock
|
||||
|
||||
import contextlib
|
||||
|
||||
|
||||
def app_post_migration(sender, app_config, **kwargs):
|
||||
@@ -23,3 +26,13 @@ if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
|
||||
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def immediate_on_commit():
|
||||
"""
|
||||
Context manager executing transaction.on_commit() hooks immediately as
|
||||
if the connection was in auto-commit mode.
|
||||
"""
|
||||
def on_commit(func):
|
||||
func()
|
||||
with mock.patch('django.db.connection.on_commit', side_effect=on_commit) as patch:
|
||||
yield patch
|
||||
|
||||
@@ -15,7 +15,7 @@ def test_job_events_sublist_truncation(get, organization_factory, job_template_f
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
job = jt.create_unified_job()
|
||||
JobEvent.create_from_data(job_id=job.pk, uuid='abc123', event='runner_on_start',
|
||||
stdout='a' * 1025)
|
||||
stdout='a' * 1025).save()
|
||||
|
||||
url = reverse('api:job_job_events_list', kwargs={'pk': job.pk})
|
||||
if not truncate:
|
||||
@@ -35,7 +35,7 @@ def test_ad_hoc_events_sublist_truncation(get, organization_factory, job_templat
|
||||
adhoc = AdHocCommand()
|
||||
adhoc.save()
|
||||
AdHocCommandEvent.create_from_data(ad_hoc_command_id=adhoc.pk, uuid='abc123', event='runner_on_start',
|
||||
stdout='a' * 1025)
|
||||
stdout='a' * 1025).save()
|
||||
|
||||
url = reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': adhoc.pk})
|
||||
if not truncate:
|
||||
|
||||
@@ -153,7 +153,8 @@ def test_summary_fields_recent_jobs(job_template, admin_user, get):
|
||||
'id': job.id,
|
||||
'status': 'failed',
|
||||
'finished': job.finished,
|
||||
'type': 'job'
|
||||
'canceled_on': None,
|
||||
'type': 'job'
|
||||
} for job in jobs[-10:][::-1]]
|
||||
|
||||
|
||||
|
||||
@@ -264,18 +264,6 @@ def test_job_launch_fails_without_credential_access(job_template_prompts, runtim
|
||||
dict(credentials=runtime_data['credentials']), rando, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_job_block_scan_job_type_change(job_template_prompts, post, admin_user):
|
||||
job_template = job_template_prompts(True)
|
||||
|
||||
# Assure that changing the type of a scan job blocks the launch
|
||||
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
|
||||
dict(job_type='scan'), admin_user, expect=400)
|
||||
|
||||
assert 'job_type' in response.data
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_launch_JT_with_validation(machine_credential, credential, deploy_jobtemplate):
|
||||
deploy_jobtemplate.extra_vars = '{"job_template_var": 3}'
|
||||
|
||||
@@ -118,6 +118,22 @@ def test_extra_credential_unique_type_xfail(get, post, organization_factory, job
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_with_forks_exceeding_maximum_xfail(alice, post, project, inventory, settings):
|
||||
project.use_role.members.add(alice)
|
||||
inventory.use_role.members.add(alice)
|
||||
settings.MAX_FORKS = 10
|
||||
response = post(reverse('api:job_template_list'), {
|
||||
'name': 'Some name',
|
||||
'project': project.id,
|
||||
'inventory': inventory.id,
|
||||
'playbook': 'helloworld.yml',
|
||||
'forks': 11,
|
||||
}, alice)
|
||||
assert response.status_code == 400
|
||||
assert 'Maximum number of forks (10) exceeded' in str(response.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_attach_extra_credential(get, post, organization_factory, job_template_factory, credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import pytest
|
||||
import base64
|
||||
import contextlib
|
||||
import json
|
||||
from unittest import mock
|
||||
|
||||
from django.db import connection
|
||||
from django.test.utils import override_settings
|
||||
@@ -12,22 +10,11 @@ from awx.main.utils.encryption import decrypt_value, get_encryption_key
|
||||
from awx.api.versioning import reverse, drf_reverse
|
||||
from awx.main.models.oauth import (OAuth2Application as Application,
|
||||
OAuth2AccessToken as AccessToken)
|
||||
from awx.main.tests.functional import immediate_on_commit
|
||||
from awx.sso.models import UserEnterpriseAuth
|
||||
from oauth2_provider.models import RefreshToken
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def immediate_on_commit():
|
||||
"""
|
||||
Context manager executing transaction.on_commit() hooks immediately as
|
||||
if the connection was in auto-commit mode.
|
||||
"""
|
||||
def on_commit(func):
|
||||
func()
|
||||
with mock.patch('django.db.connection.on_commit', side_effect=on_commit) as patch:
|
||||
yield patch
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_personal_access_token_creation(oauth_application, post, alice):
|
||||
url = drf_reverse('api:oauth_authorization_root_view') + 'token/'
|
||||
@@ -69,7 +56,7 @@ def test_token_creation_disabled_for_external_accounts(oauth_application, post,
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_existing_token_disabled_for_external_accounts(oauth_application, get, post, admin):
|
||||
def test_existing_token_enabled_for_external_accounts(oauth_application, get, post, admin):
|
||||
UserEnterpriseAuth(user=admin, provider='radius').save()
|
||||
url = drf_reverse('api:oauth_authorization_root_view') + 'token/'
|
||||
with override_settings(RADIUS_SERVER='example.org', ALLOW_OAUTH2_FOR_EXTERNAL_USERS=True):
|
||||
@@ -98,9 +85,9 @@ def test_existing_token_disabled_for_external_accounts(oauth_application, get, p
|
||||
resp = get(
|
||||
drf_reverse('api:user_me_list', kwargs={'version': 'v2'}),
|
||||
HTTP_AUTHORIZATION='Bearer ' + token,
|
||||
status=401
|
||||
status=200
|
||||
)
|
||||
assert b'To establish a login session' in resp.content
|
||||
assert json.loads(resp.content)['results'][0]['username'] == 'admin'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
# Python
|
||||
import pytest
|
||||
import os
|
||||
import time
|
||||
|
||||
from django.conf import settings
|
||||
from kombu.utils.url import parse_url
|
||||
@@ -276,6 +277,7 @@ def test_logging_aggregrator_connection_test_valid(mocker, get, post, admin):
|
||||
def test_logging_aggregrator_connection_test_with_masked_password(mocker, patch, post, admin):
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'logging'})
|
||||
patch(url, user=admin, data={'LOG_AGGREGATOR_PASSWORD': 'password123'}, expect=200)
|
||||
time.sleep(1) # log settings are cached slightly
|
||||
|
||||
with mock.patch.object(AWXProxyHandler, 'perform_test') as perform_test:
|
||||
url = reverse('api:setting_logging_test')
|
||||
|
||||
@@ -219,6 +219,30 @@ def test_survey_spec_passwords_with_default_required(job_template_factory, post,
|
||||
assert launch_value not in json.loads(job.extra_vars).values()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_survey_spec_default_not_allowed(job_template, post, admin_user):
|
||||
survey_input_data = {
|
||||
'description': 'A survey',
|
||||
'spec': [{
|
||||
'question_name': 'You must choose wisely',
|
||||
'variable': 'your_choice',
|
||||
'default': 'blue',
|
||||
'required': False,
|
||||
'type': 'multiplechoice',
|
||||
"choices": ["red", "green", "purple"]
|
||||
}],
|
||||
'name': 'my survey'
|
||||
}
|
||||
r = post(
|
||||
url=reverse(
|
||||
'api:job_template_survey_spec',
|
||||
kwargs={'pk': job_template.id}
|
||||
),
|
||||
data=survey_input_data, user=admin_user, expect=400
|
||||
)
|
||||
assert r.data['error'] == 'Default choice must be answered from the choices listed.'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('default, status', [
|
||||
('SUPERSECRET', 200),
|
||||
|
||||
@@ -37,26 +37,26 @@ class TestKeyRegeneration:
|
||||
|
||||
def test_encrypted_setting_values(self):
|
||||
# test basic decryption
|
||||
settings.LOG_AGGREGATOR_PASSWORD = 'sensitive'
|
||||
s = Setting.objects.filter(key='LOG_AGGREGATOR_PASSWORD').first()
|
||||
settings.REDHAT_PASSWORD = 'sensitive'
|
||||
s = Setting.objects.filter(key='REDHAT_PASSWORD').first()
|
||||
assert s.value.startswith(PREFIX)
|
||||
assert settings.LOG_AGGREGATOR_PASSWORD == 'sensitive'
|
||||
assert settings.REDHAT_PASSWORD == 'sensitive'
|
||||
|
||||
# re-key the setting value
|
||||
new_key = regenerate_secret_key.Command().handle()
|
||||
new_setting = Setting.objects.filter(key='LOG_AGGREGATOR_PASSWORD').first()
|
||||
new_setting = Setting.objects.filter(key='REDHAT_PASSWORD').first()
|
||||
assert s.value != new_setting.value
|
||||
|
||||
# wipe out the local cache so the value is pulled from the DB again
|
||||
settings.cache.delete('LOG_AGGREGATOR_PASSWORD')
|
||||
settings.cache.delete('REDHAT_PASSWORD')
|
||||
|
||||
# verify that the old SECRET_KEY doesn't work
|
||||
with pytest.raises(InvalidToken):
|
||||
settings.LOG_AGGREGATOR_PASSWORD
|
||||
settings.REDHAT_PASSWORD
|
||||
|
||||
# verify that the new SECRET_KEY *does* work
|
||||
with override_settings(SECRET_KEY=new_key):
|
||||
assert settings.LOG_AGGREGATOR_PASSWORD == 'sensitive'
|
||||
assert settings.REDHAT_PASSWORD == 'sensitive'
|
||||
|
||||
def test_encrypted_notification_secrets(self, notification_template_with_encrypt):
|
||||
# test basic decryption
|
||||
|
||||
@@ -125,9 +125,9 @@ def project_playbooks():
|
||||
@pytest.fixture
|
||||
def run_computed_fields_right_away(request):
|
||||
|
||||
def run_me(inventory_id, should_update_hosts=True):
|
||||
def run_me(inventory_id):
|
||||
i = Inventory.objects.get(id=inventory_id)
|
||||
i.update_computed_fields(update_hosts=should_update_hosts)
|
||||
i.update_computed_fields()
|
||||
|
||||
mocked = mock.patch(
|
||||
'awx.main.signals.update_inventory_computed_fields.delay',
|
||||
|
||||
@@ -296,3 +296,15 @@ def test_cluster_node_long_node_name(inventory, project):
|
||||
# node name is very long, we just want to make sure it does not error
|
||||
entry = ActivityStream.objects.filter(job=job).first()
|
||||
assert entry.action_node.startswith('ffffff')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_credential_defaults_idempotency():
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
old_inputs = CredentialType.objects.get(name='Ansible Tower', kind='cloud').inputs
|
||||
prior_count = ActivityStream.objects.count()
|
||||
# this is commonly re-ran in migrations, and no changes should be shown
|
||||
# because inputs and injectors are not actually tracked in the database
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
assert CredentialType.objects.get(name='Ansible Tower', kind='cloud').inputs == old_inputs
|
||||
assert ActivityStream.objects.count() == prior_count
|
||||
|
||||
@@ -11,6 +11,7 @@ from awx.main.signals import (
|
||||
# AWX models
|
||||
from awx.main.models.organization import Organization
|
||||
from awx.main.models import ActivityStream, Job
|
||||
from awx.main.tests.functional import immediate_on_commit
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -34,9 +35,10 @@ class TestComputedFields:
|
||||
|
||||
def test_computed_fields_normal_use(self, mocker, inventory):
|
||||
job = Job.objects.create(name='fake-job', inventory=inventory)
|
||||
with mocker.patch.object(update_inventory_computed_fields, 'delay'):
|
||||
job.delete()
|
||||
update_inventory_computed_fields.delay.assert_called_once_with(inventory.id, True)
|
||||
with immediate_on_commit():
|
||||
with mocker.patch.object(update_inventory_computed_fields, 'delay'):
|
||||
job.delete()
|
||||
update_inventory_computed_fields.delay.assert_called_once_with(inventory.id)
|
||||
|
||||
def test_disable_computed_fields(self, mocker, inventory):
|
||||
job = Job.objects.create(name='fake-job', inventory=inventory)
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
from unittest import mock
|
||||
import pytest
|
||||
|
||||
from awx.main.models import (Job, JobEvent, ProjectUpdate, ProjectUpdateEvent,
|
||||
AdHocCommand, AdHocCommandEvent, InventoryUpdate,
|
||||
InventorySource, InventoryUpdateEvent, SystemJob,
|
||||
SystemJobEvent)
|
||||
from awx.main.models import Job, JobEvent
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
@mock.patch('awx.main.models.events.emit_event_detail')
|
||||
def test_parent_changed(emit):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start')
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.changed is False
|
||||
@@ -24,19 +21,26 @@ def test_parent_changed(emit):
|
||||
event_data={
|
||||
'res': {'changed': ['localhost']}
|
||||
}
|
||||
)
|
||||
assert JobEvent.objects.count() == 2
|
||||
for e in JobEvent.objects.all():
|
||||
).save()
|
||||
# the `playbook_on_stats` event is where we update the parent changed linkage
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats'
|
||||
).save()
|
||||
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', 'runner_on_ok'])
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.changed is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('event', JobEvent.FAILED_EVENTS)
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
@mock.patch('awx.main.models.events.emit_event_detail')
|
||||
def test_parent_failed(emit, event):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start')
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.failed is False
|
||||
@@ -45,69 +49,15 @@ def test_parent_failed(emit, event):
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event=event
|
||||
)
|
||||
assert JobEvent.objects.count() == 2
|
||||
for e in JobEvent.objects.all():
|
||||
).save()
|
||||
|
||||
# the `playbook_on_stats` event is where we update the parent failed linkage
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats'
|
||||
).save()
|
||||
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', event])
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.failed is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_job_event_websocket_notifications(emit):
|
||||
j = Job(id=123)
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk)
|
||||
assert len(emit.call_args_list) == 1
|
||||
topic, payload = emit.call_args_list[0][0]
|
||||
assert topic == 'job_events-123'
|
||||
assert payload['job'] == 123
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_ad_hoc_event_websocket_notifications(emit):
|
||||
ahc = AdHocCommand(id=123)
|
||||
ahc.save()
|
||||
AdHocCommandEvent.create_from_data(ad_hoc_command_id=ahc.pk)
|
||||
assert len(emit.call_args_list) == 1
|
||||
topic, payload = emit.call_args_list[0][0]
|
||||
assert topic == 'ad_hoc_command_events-123'
|
||||
assert payload['ad_hoc_command'] == 123
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_project_update_event_websocket_notifications(emit, project):
|
||||
pu = ProjectUpdate(id=123, project=project)
|
||||
pu.save()
|
||||
ProjectUpdateEvent.create_from_data(project_update_id=pu.pk)
|
||||
assert len(emit.call_args_list) == 1
|
||||
topic, payload = emit.call_args_list[0][0]
|
||||
assert topic == 'project_update_events-123'
|
||||
assert payload['project_update'] == 123
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_inventory_update_event_websocket_notifications(emit, inventory):
|
||||
source = InventorySource()
|
||||
source.save()
|
||||
iu = InventoryUpdate(id=123, inventory_source=source)
|
||||
iu.save()
|
||||
InventoryUpdateEvent.create_from_data(inventory_update_id=iu.pk)
|
||||
assert len(emit.call_args_list) == 1
|
||||
topic, payload = emit.call_args_list[0][0]
|
||||
assert topic == 'inventory_update_events-123'
|
||||
assert payload['inventory_update'] == 123
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_system_job_event_websocket_notifications(emit, inventory):
|
||||
j = SystemJob(id=123)
|
||||
j.save()
|
||||
SystemJobEvent.create_from_data(system_job_id=j.pk)
|
||||
assert len(emit.call_args_list) == 1
|
||||
topic, payload = emit.call_args_list[0][0]
|
||||
assert topic == 'system_job_events-123'
|
||||
assert payload['system_job'] == 123
|
||||
|
||||
@@ -48,7 +48,6 @@ class TestJobNotificationMixin(object):
|
||||
'username': str},
|
||||
'instance_group': {'id': int, 'name': str},
|
||||
'inventory': {'description': str,
|
||||
'groups_with_active_failures': int,
|
||||
'has_active_failures': bool,
|
||||
'has_inventory_sources': bool,
|
||||
'hosts_with_active_failures': int,
|
||||
|
||||
@@ -283,13 +283,13 @@ class TestTaskImpact:
|
||||
|
||||
def test_limit_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
||||
job = job_host_limit(5, 2)
|
||||
job.inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory
|
||||
job.inventory.update_computed_fields()
|
||||
assert job.inventory.total_hosts == 5
|
||||
assert job.task_impact == 2 + 1 # forks becomes constraint
|
||||
|
||||
def test_host_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
||||
job = job_host_limit(3, 5)
|
||||
job.inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory
|
||||
job.inventory.update_computed_fields()
|
||||
assert job.task_impact == 3 + 1 # hosts becomes constraint
|
||||
|
||||
def test_shard_task_impact(self, slice_job_factory, run_computed_fields_right_away):
|
||||
@@ -304,6 +304,7 @@ class TestTaskImpact:
|
||||
len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts'])
|
||||
for i in range(3)
|
||||
] == [1, 1, 1]
|
||||
jobs[0].inventory.update_computed_fields()
|
||||
assert [job.task_impact for job in jobs] == [2, 2, 2] # plus one base task impact
|
||||
# Uneven distribution - first job takes the extra host
|
||||
jobs[0].inventory.hosts.create(name='remainder_foo')
|
||||
@@ -311,5 +312,5 @@ class TestTaskImpact:
|
||||
len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts'])
|
||||
for i in range(3)
|
||||
] == [2, 1, 1]
|
||||
jobs[0].inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory
|
||||
jobs[0].inventory.update_computed_fields()
|
||||
assert [job.task_impact for job in jobs] == [3, 2, 2]
|
||||
|
||||
@@ -67,7 +67,7 @@ def test_multi_group_with_shared_dependency(instance_factory, default_instance_g
|
||||
pu = p.project_updates.first()
|
||||
TaskManager.start_task.assert_called_once_with(pu,
|
||||
default_instance_group,
|
||||
[j1],
|
||||
[j1,j2],
|
||||
default_instance_group.instances.all()[0])
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.status = "successful"
|
||||
@@ -193,7 +193,7 @@ def test_instance_group_basic_policies(instance_factory, instance_group_factory)
|
||||
ig2 = InstanceGroup.objects.get(id=ig2.id)
|
||||
ig3 = InstanceGroup.objects.get(id=ig3.id)
|
||||
assert len(ig0.instances.all()) == 1
|
||||
assert i0 in ig0.instances.all()
|
||||
assert i0 in ig0.instances.all()
|
||||
assert len(InstanceGroup.objects.get(id=ig1.id).instances.all()) == 2
|
||||
assert i1 in ig1.instances.all()
|
||||
assert i2 in ig1.instances.all()
|
||||
|
||||
@@ -6,7 +6,7 @@ from datetime import timedelta
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.main.models import WorkflowJobTemplate, JobTemplate
|
||||
from awx.main.models import WorkflowJobTemplate, JobTemplate, Job
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -307,8 +307,8 @@ def test_shared_dependencies_launch(default_instance_group, job_template_factory
|
||||
TaskManager().schedule()
|
||||
pu = p.project_updates.first()
|
||||
iu = ii.inventory_updates.first()
|
||||
TaskManager.start_task.assert_has_calls([mock.call(pu, default_instance_group, [iu, j1], instance),
|
||||
mock.call(iu, default_instance_group, [pu, j1], instance)])
|
||||
TaskManager.start_task.assert_has_calls([mock.call(iu, default_instance_group, [j1, j2, pu], instance),
|
||||
mock.call(pu, default_instance_group, [j1, j2, iu], instance)])
|
||||
pu.status = "successful"
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.save()
|
||||
@@ -353,3 +353,65 @@ def test_job_not_blocking_project_update(default_instance_group, job_template_fa
|
||||
dependency_graph = DependencyGraph(None)
|
||||
dependency_graph.add_job(job)
|
||||
assert not dependency_graph.is_job_blocked(project_update)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_not_blocking_inventory_update(default_instance_group, job_template_factory, inventory_source_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj',
|
||||
inventory='inv', credential='cred',
|
||||
jobs=["job"])
|
||||
job = objects.jobs["job"]
|
||||
job.instance_group = default_instance_group
|
||||
job.status = "running"
|
||||
job.save()
|
||||
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
task_manager = TaskManager()
|
||||
task_manager._schedule()
|
||||
|
||||
inv = objects.inventory
|
||||
inv_source = inventory_source_factory("ec2")
|
||||
inv_source.source = "ec2"
|
||||
inv.inventory_sources.add(inv_source)
|
||||
inventory_update = inv_source.create_inventory_update()
|
||||
inventory_update.instance_group = default_instance_group
|
||||
inventory_update.status = "pending"
|
||||
inventory_update.save()
|
||||
|
||||
assert not task_manager.is_job_blocked(inventory_update)
|
||||
|
||||
dependency_graph = DependencyGraph(None)
|
||||
dependency_graph.add_job(job)
|
||||
assert not dependency_graph.is_job_blocked(inventory_update)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_generate_dependencies_only_once(job_template_factory):
|
||||
objects = job_template_factory('jt', organization='org1')
|
||||
|
||||
job = objects.job_template.create_job()
|
||||
job.status = "pending"
|
||||
job.name = "job_gen_dep"
|
||||
job.save()
|
||||
|
||||
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
# job starts with dependencies_processed as False
|
||||
assert not job.dependencies_processed
|
||||
# run one cycle of ._schedule() to generate dependencies
|
||||
TaskManager()._schedule()
|
||||
|
||||
# make sure dependencies_processed is now True
|
||||
job = Job.objects.filter(name="job_gen_dep")[0]
|
||||
assert job.dependencies_processed
|
||||
|
||||
# Run ._schedule() again, but make sure .generate_dependencies() is not
|
||||
# called with job in the argument list
|
||||
tm = TaskManager()
|
||||
tm.generate_dependencies = mock.MagicMock()
|
||||
tm._schedule()
|
||||
|
||||
# .call_args is tuple, (positional_args, kwargs), [0][0] then is
|
||||
# the first positional arg, i.e. the first argument of
|
||||
# .generate_dependencies()
|
||||
assert tm.generate_dependencies.call_args[0][0] == []
|
||||
|
||||
6
awx/main/tests/functional/test_credential_plugins.py
Normal file
6
awx/main/tests/functional/test_credential_plugins.py
Normal file
@@ -0,0 +1,6 @@
|
||||
def test_imported_azure_cloud_sdk_vars():
|
||||
from awx.main.credential_plugins import azure_kv
|
||||
assert len(azure_kv.clouds) > 0
|
||||
assert all([hasattr(c, 'name') for c in azure_kv.clouds])
|
||||
assert all([hasattr(c, 'suffixes') for c in azure_kv.clouds])
|
||||
assert all([hasattr(c.suffixes, 'keyvault_dns') for c in azure_kv.clouds])
|
||||
@@ -60,7 +60,11 @@ INI_TEST_VARS = {
|
||||
'satellite6': {
|
||||
'satellite6_group_patterns': 'foo_group_patterns',
|
||||
'satellite6_group_prefix': 'foo_group_prefix',
|
||||
'satellite6_want_hostcollections': True
|
||||
'satellite6_want_hostcollections': True,
|
||||
'satellite6_want_ansible_ssh_host': True,
|
||||
'satellite6_rich_params': True,
|
||||
'satellite6_want_facts': True
|
||||
|
||||
},
|
||||
'cloudforms': {
|
||||
'version': '2.4',
|
||||
|
||||
@@ -57,7 +57,7 @@ def test_empty_in(empty_value):
|
||||
@pytest.mark.parametrize(u"valid_value", [u'foo', u'foo,'])
|
||||
def test_valid_in(valid_value):
|
||||
field_lookup = FieldLookupBackend()
|
||||
value, new_lookup = field_lookup.value_to_python(JobTemplate, 'project__name__in', valid_value)
|
||||
value, new_lookup, _ = field_lookup.value_to_python(JobTemplate, 'project__name__in', valid_value)
|
||||
assert 'foo' in value
|
||||
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ class TestReplayJobEvents():
|
||||
r.emit_job_status = lambda job, status: True
|
||||
return r
|
||||
|
||||
@mock.patch('awx.main.management.commands.replay_job_events.emit_channel_notification', lambda *a, **kw: None)
|
||||
@mock.patch('awx.main.management.commands.replay_job_events.emit_event_detail', lambda *a, **kw: None)
|
||||
def test_sleep(self, mocker, replayer):
|
||||
replayer.run(3, 1)
|
||||
|
||||
@@ -74,7 +74,7 @@ class TestReplayJobEvents():
|
||||
mock.call(0.000001),
|
||||
])
|
||||
|
||||
@mock.patch('awx.main.management.commands.replay_job_events.emit_channel_notification', lambda *a, **kw: None)
|
||||
@mock.patch('awx.main.management.commands.replay_job_events.emit_event_detail', lambda *a, **kw: None)
|
||||
def test_speed(self, mocker, replayer):
|
||||
replayer.run(3, 2)
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from datetime import datetime
|
||||
from django.utils.timezone import utc
|
||||
from unittest import mock
|
||||
import pytest
|
||||
|
||||
from awx.main.models import (JobEvent, ProjectUpdateEvent, AdHocCommandEvent,
|
||||
@@ -18,16 +17,11 @@ from awx.main.models import (JobEvent, ProjectUpdateEvent, AdHocCommandEvent,
|
||||
datetime(2018, 1, 1).isoformat(), datetime(2018, 1, 1)
|
||||
])
|
||||
def test_event_parse_created(job_identifier, cls, created):
|
||||
with mock.patch.object(cls, 'objects') as manager:
|
||||
cls.create_from_data(**{
|
||||
job_identifier: 123,
|
||||
'created': created
|
||||
})
|
||||
expected_created = datetime(2018, 1, 1).replace(tzinfo=utc)
|
||||
manager.create.assert_called_with(**{
|
||||
job_identifier: 123,
|
||||
'created': expected_created
|
||||
})
|
||||
event = cls.create_from_data(**{
|
||||
job_identifier: 123,
|
||||
'created': created
|
||||
})
|
||||
assert event.created == datetime(2018, 1, 1).replace(tzinfo=utc)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('job_identifier, cls', [
|
||||
@@ -38,24 +32,20 @@ def test_event_parse_created(job_identifier, cls, created):
|
||||
['system_job_id', SystemJobEvent],
|
||||
])
|
||||
def test_playbook_event_strip_invalid_keys(job_identifier, cls):
|
||||
with mock.patch.object(cls, 'objects') as manager:
|
||||
cls.create_from_data(**{
|
||||
job_identifier: 123,
|
||||
'extra_key': 'extra_value'
|
||||
})
|
||||
manager.create.assert_called_with(**{job_identifier: 123})
|
||||
event = cls.create_from_data(**{
|
||||
job_identifier: 123,
|
||||
'extra_key': 'extra_value'
|
||||
})
|
||||
assert getattr(event, job_identifier) == 123
|
||||
assert not hasattr(event, 'extra_key')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('field', [
|
||||
'play', 'role', 'task', 'playbook'
|
||||
])
|
||||
def test_really_long_event_fields(field):
|
||||
with mock.patch.object(JobEvent, 'objects') as manager:
|
||||
JobEvent.create_from_data(**{
|
||||
'job_id': 123,
|
||||
'event_data': {field: 'X' * 4096}
|
||||
})
|
||||
manager.create.assert_called_with(**{
|
||||
'job_id': 123,
|
||||
'event_data': {field: 'X' * 1023 + '…'}
|
||||
})
|
||||
event = JobEvent.create_from_data(**{
|
||||
'job_id': 123,
|
||||
'event_data': {field: 'X' * 4096}
|
||||
})
|
||||
assert event.event_data[field] == 'X' * 1023 + '…'
|
||||
|
||||
@@ -171,6 +171,7 @@ class TestWorkflowJobCreate:
|
||||
with mocker.patch('awx.main.models.WorkflowJobNode.objects.create', mock_create):
|
||||
wfjt_node_no_prompts.create_workflow_job_node(workflow_job=workflow_job_unit)
|
||||
mock_create.assert_called_once_with(
|
||||
all_parents_must_converge=False,
|
||||
extra_data={},
|
||||
survey_passwords={},
|
||||
char_prompts=wfjt_node_no_prompts.char_prompts,
|
||||
@@ -185,6 +186,7 @@ class TestWorkflowJobCreate:
|
||||
workflow_job=workflow_job_unit
|
||||
)
|
||||
mock_create.assert_called_once_with(
|
||||
all_parents_must_converge=False,
|
||||
extra_data={},
|
||||
survey_passwords={},
|
||||
char_prompts=wfjt_node_with_prompts.char_prompts,
|
||||
|
||||
@@ -19,6 +19,7 @@ class WorkflowNode(object):
|
||||
self.job = job
|
||||
self.do_not_run = do_not_run
|
||||
self.unified_job_template = unified_job_template
|
||||
self.all_parents_must_converge = False
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -94,7 +95,7 @@ class TestDNR():
|
||||
(g, nodes) = workflow_dag_1
|
||||
|
||||
r'''
|
||||
S0
|
||||
0
|
||||
/\
|
||||
S / \
|
||||
/ \
|
||||
@@ -113,7 +114,7 @@ class TestDNR():
|
||||
assert 0 == len(do_not_run_nodes)
|
||||
|
||||
r'''
|
||||
S0
|
||||
0
|
||||
/\
|
||||
S / \
|
||||
/ \
|
||||
@@ -132,6 +133,259 @@ class TestDNR():
|
||||
assert 1 == len(do_not_run_nodes)
|
||||
assert nodes[3] == do_not_run_nodes[0]
|
||||
|
||||
class TestAllWorkflowNodes():
|
||||
# test workflow convergence is functioning as expected
|
||||
@pytest.fixture
|
||||
def simple_all_convergence(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(4)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
|
||||
r'''
|
||||
0
|
||||
/\
|
||||
S / \ S
|
||||
/ \
|
||||
1 2
|
||||
\ /
|
||||
F \ / S
|
||||
\/
|
||||
3
|
||||
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[1], "success_nodes")
|
||||
g.add_edge(nodes[0], nodes[2], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "success_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='failed')
|
||||
nodes[2].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_simple_all_convergence(self, simple_all_convergence):
|
||||
(g, nodes) = simple_all_convergence
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "no nodes should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "Node 3, and only node 3, should be chosen to run"
|
||||
assert nodes[3] == nodes_to_run[0], "Only node 3 should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_1(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(3)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0
|
||||
|\ F
|
||||
| \
|
||||
S| 1
|
||||
| /
|
||||
|/ A
|
||||
2
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[1], "failure_nodes")
|
||||
g.add_edge(nodes[0], nodes[2], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[2], "always_nodes")
|
||||
nodes[2].all_parents_must_converge = True
|
||||
nodes[0].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_all_converge_edge_case_1(self, workflow_all_converge_1):
|
||||
(g, nodes) = workflow_all_converge_1
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 2 == len(dnr_nodes), "node[1] and node[2] should be marked DNR"
|
||||
assert nodes[1] == dnr_nodes[0], "Node 1 should be marked DNR"
|
||||
assert nodes[2] == dnr_nodes[1], "Node 2 should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "No nodes should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_2(self, wf_node_generator):
|
||||
"""The ordering of _1 and this test, _2, is _slightly_ different.
|
||||
The hope is that topological sorting results in 2 being processed before 3
|
||||
and/or 3 before 2.
|
||||
"""
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(3)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0
|
||||
|\ S
|
||||
| \
|
||||
F| 1
|
||||
| /
|
||||
|/ A
|
||||
2
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[1], "success_nodes")
|
||||
g.add_edge(nodes[0], nodes[2], "failure_nodes")
|
||||
g.add_edge(nodes[1], nodes[2], "always_nodes")
|
||||
nodes[2].all_parents_must_converge = True
|
||||
nodes[0].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_all_converge_edge_case_2(self, workflow_all_converge_2):
|
||||
(g, nodes) = workflow_all_converge_2
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 1 == len(dnr_nodes), "1 and only 1 node should be marked DNR"
|
||||
assert nodes[2] == dnr_nodes[0], "Node 3 should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "Node 2, and only node 2, should be chosen to run"
|
||||
assert nodes[1] == nodes_to_run[0], "Only node 2 should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_will_run(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(4)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0 1 2
|
||||
S \ F | / S
|
||||
\ | /
|
||||
\ | /
|
||||
\|/
|
||||
|
|
||||
3
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "success_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='failed')
|
||||
nodes[2].job = Job(status='running')
|
||||
return (g, nodes)
|
||||
|
||||
def test_workflow_all_converge_will_run(self, workflow_all_converge_will_run):
|
||||
(g, nodes) = workflow_all_converge_will_run
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "No nodes should get marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "No nodes should run yet"
|
||||
|
||||
nodes[2].job.status = 'successful'
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "1 and only 1 node should want to run"
|
||||
assert nodes[3] == nodes_to_run[0], "Convergence node should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_dnr(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(4)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0 1 2
|
||||
S \ F | / F
|
||||
\ | /
|
||||
\ | /
|
||||
\|/
|
||||
|
|
||||
3
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "failure_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='running')
|
||||
nodes[2].job = Job(status='failed')
|
||||
return (g, nodes)
|
||||
|
||||
def test_workflow_all_converge_while_parent_runs(self, workflow_all_converge_dnr):
|
||||
(g, nodes) = workflow_all_converge_dnr
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "No nodes should get marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "No nodes should run yet"
|
||||
|
||||
def test_workflow_all_converge_with_incorrect_parent(self, workflow_all_converge_dnr):
|
||||
# Another tick of the scheduler
|
||||
(g, nodes) = workflow_all_converge_dnr
|
||||
nodes[1].job.status = 'successful'
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 1 == len(dnr_nodes), "1 and only 1 node should be marked DNR"
|
||||
assert nodes[3] == dnr_nodes[0], "Convergence node should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "Convergence node should NOT be chosen to run because it is DNR"
|
||||
|
||||
def test_workflow_all_converge_runs(self, workflow_all_converge_dnr):
|
||||
# Trick the scheduler again to make sure the convergence node acutally runs
|
||||
(g, nodes) = workflow_all_converge_dnr
|
||||
nodes[1].job.status = 'failed'
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "No nodes should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "Convergence node should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_deep_dnr_tree(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(7)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0 1 2
|
||||
\ | /
|
||||
S \ S| / F
|
||||
\ | /
|
||||
\|/
|
||||
|
|
||||
3
|
||||
/\
|
||||
S / \ S
|
||||
/ \
|
||||
4| | 5
|
||||
\ /
|
||||
S \ / S
|
||||
\/
|
||||
6
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[3], nodes[4], "success_nodes")
|
||||
g.add_edge(nodes[3], nodes[5], "success_nodes")
|
||||
g.add_edge(nodes[4], nodes[6], "success_nodes")
|
||||
g.add_edge(nodes[5], nodes[6], "success_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
nodes[4].all_parents_must_converge = True
|
||||
nodes[5].all_parents_must_converge = True
|
||||
nodes[6].all_parents_must_converge = True
|
||||
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='successful')
|
||||
nodes[2].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_workflow_all_converge_deep_dnr_tree(self, workflow_all_converge_deep_dnr_tree):
|
||||
(g, nodes) = workflow_all_converge_deep_dnr_tree
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
|
||||
assert 4 == len(dnr_nodes), "All nodes w/ no jobs should be marked DNR"
|
||||
assert nodes[3] in dnr_nodes
|
||||
assert nodes[4] in dnr_nodes
|
||||
assert nodes[5] in dnr_nodes
|
||||
assert nodes[6] in dnr_nodes
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "All non-run nodes should be DNR and NOT candidates to run"
|
||||
|
||||
|
||||
class TestIsWorkflowDone():
|
||||
@pytest.fixture
|
||||
|
||||
@@ -197,36 +197,6 @@ def test_change_jt_sensitive_data(job_template_with_ids, mocker, user_unit):
|
||||
})
|
||||
|
||||
|
||||
def test_jt_add_scan_job_check(job_template_with_ids, user_unit):
|
||||
"Assure that permissions to add scan jobs work correctly"
|
||||
|
||||
access = JobTemplateAccess(user_unit)
|
||||
project = job_template_with_ids.project
|
||||
inventory = job_template_with_ids.inventory
|
||||
project.use_role = Role()
|
||||
inventory.use_role = Role()
|
||||
organization = Organization(name='test-org')
|
||||
inventory.organization = organization
|
||||
organization.admin_role = Role()
|
||||
|
||||
def mock_get_object(Class, **kwargs):
|
||||
if Class == Project:
|
||||
return project
|
||||
elif Class == Inventory:
|
||||
return inventory
|
||||
else:
|
||||
raise Exception('Item requested has not been mocked')
|
||||
|
||||
|
||||
with mock.patch('awx.main.models.rbac.Role.__contains__', return_value=True):
|
||||
with mock.patch('awx.main.access.get_object_or_400', mock_get_object):
|
||||
assert access.can_add({
|
||||
'project': project.pk,
|
||||
'inventory': inventory.pk,
|
||||
'job_type': 'scan'
|
||||
})
|
||||
|
||||
|
||||
def mock_raise_none(self, add_host=False, feature=None, check_expiration=True):
|
||||
return None
|
||||
|
||||
|
||||
@@ -2146,7 +2146,14 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
inventory_update.source_vars = '{"satellite6_group_patterns": "[a,b,c]", "satellite6_group_prefix": "hey_", "satellite6_want_hostcollections": True}'
|
||||
inventory_update.source_vars = {
|
||||
'satellite6_group_patterns': '[a,b,c]',
|
||||
'satellite6_group_prefix': 'hey_',
|
||||
'satellite6_want_hostcollections': True,
|
||||
'satellite6_want_ansible_ssh_host': True,
|
||||
'satellite6_rich_params': True,
|
||||
'satellite6_want_facts': False
|
||||
}
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
@@ -2159,6 +2166,9 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert config.get('ansible', 'group_patterns') == '[a,b,c]'
|
||||
assert config.get('ansible', 'group_prefix') == 'hey_'
|
||||
assert config.get('ansible', 'want_hostcollections') == 'True'
|
||||
assert config.get('ansible', 'want_ansible_ssh_host') == 'True'
|
||||
assert config.get('ansible', 'rich_params') == 'True'
|
||||
assert config.get('ansible', 'want_facts') == 'False'
|
||||
|
||||
def test_cloudforms_source(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
|
||||
@@ -79,8 +79,8 @@ class mockHost:
|
||||
@mock.patch('awx.main.utils.filters.get_model', return_value=mockHost())
|
||||
class TestSmartFilterQueryFromString():
|
||||
@mock.patch(
|
||||
'awx.api.filters.get_field_from_path',
|
||||
lambda model, path: (model, path) # disable field filtering, because a__b isn't a real Host field
|
||||
'awx.api.filters.get_fields_from_path',
|
||||
lambda model, path: ([model], path) # disable field filtering, because a__b isn't a real Host field
|
||||
)
|
||||
@pytest.mark.parametrize("filter_string,q_expected", [
|
||||
('facts__facts__blank=""', Q(**{u"facts__facts__blank": u""})),
|
||||
|
||||
@@ -379,7 +379,12 @@ def get_allowed_fields(obj, serializer_mapping):
|
||||
'oauth2accesstoken': ['last_used'],
|
||||
'oauth2application': ['client_secret']
|
||||
}
|
||||
field_blacklist = ACTIVITY_STREAM_FIELD_EXCLUSIONS.get(obj._meta.model_name, [])
|
||||
model_name = obj._meta.model_name
|
||||
field_blacklist = ACTIVITY_STREAM_FIELD_EXCLUSIONS.get(model_name, [])
|
||||
# see definition of from_db for CredentialType
|
||||
# injection logic of any managed types are incompatible with activity stream
|
||||
if model_name == 'credentialtype' and obj.managed_by_tower and obj.namespace:
|
||||
field_blacklist.extend(['inputs', 'injectors'])
|
||||
if field_blacklist:
|
||||
allowed_fields = [f for f in allowed_fields if f not in field_blacklist]
|
||||
return allowed_fields
|
||||
|
||||
@@ -1,16 +1,8 @@
|
||||
# Copyright (c) 2017 Ansible by Red Hat
|
||||
# All Rights Reserved.
|
||||
|
||||
import logging
|
||||
from itertools import chain
|
||||
|
||||
from django.core.cache import cache
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
from django.db import connection
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.utils.db')
|
||||
|
||||
|
||||
def get_all_field_names(model):
|
||||
# Implements compatibility with _meta.get_all_field_names
|
||||
@@ -22,21 +14,3 @@ def get_all_field_names(model):
|
||||
# GenericForeignKey from the results.
|
||||
if not (field.many_to_one and field.related_model is None)
|
||||
)))
|
||||
|
||||
|
||||
def migration_in_progress_check_or_relase():
|
||||
'''A memcache flag is raised (set to True) to inform cluster
|
||||
that a migration is ongoing see main.apps.MainConfig.ready
|
||||
if the flag is True then the flag is removed on this instance if
|
||||
models-db consistency is observed
|
||||
effective value of migration flag is returned
|
||||
'''
|
||||
migration_in_progress = cache.get('migration_in_progress', False)
|
||||
if migration_in_progress:
|
||||
executor = MigrationExecutor(connection)
|
||||
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
|
||||
if not bool(plan):
|
||||
logger.info('Detected that migration finished, migration flag taken down.')
|
||||
cache.delete('migration_in_progress')
|
||||
migration_in_progress = False
|
||||
return migration_in_progress
|
||||
|
||||
@@ -107,6 +107,17 @@ class LogstashFormatterBase(logging.Formatter):
|
||||
|
||||
class LogstashFormatter(LogstashFormatterBase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cluster_host_id = settings.CLUSTER_HOST_ID
|
||||
self.tower_uuid = None
|
||||
uuid = (
|
||||
getattr(settings, 'LOG_AGGREGATOR_TOWER_UUID', None) or
|
||||
getattr(settings, 'INSTALL_UUID', None)
|
||||
)
|
||||
if uuid:
|
||||
self.tower_uuid = uuid
|
||||
super(LogstashFormatter, self).__init__(*args, **kwargs)
|
||||
|
||||
def reformat_data_for_log(self, raw_data, kind=None):
|
||||
'''
|
||||
Process dictionaries from various contexts (job events, activity stream
|
||||
@@ -128,37 +139,6 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
data = json.loads(data)
|
||||
data_for_log = {}
|
||||
|
||||
def index_by_name(alist):
|
||||
"""Takes a list of dictionaries with `name` as a key in each dict
|
||||
and returns a dictionary indexed by those names"""
|
||||
adict = {}
|
||||
for item in alist:
|
||||
subdict = copy(item)
|
||||
if 'name' in subdict:
|
||||
name = subdict.get('name', None)
|
||||
elif 'path' in subdict:
|
||||
name = subdict.get('path', None)
|
||||
if name:
|
||||
# Logstash v2 can not accept '.' in a name
|
||||
name = name.replace('.', '_')
|
||||
adict[name] = subdict
|
||||
return adict
|
||||
|
||||
def convert_to_type(t, val):
|
||||
if t is float:
|
||||
val = val[:-1] if val.endswith('s') else val
|
||||
try:
|
||||
return float(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is int:
|
||||
try:
|
||||
return int(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is str:
|
||||
return val
|
||||
|
||||
if kind == 'job_events':
|
||||
job_event = raw_data['python_objects']['job_event']
|
||||
for field_object in job_event._meta.fields:
|
||||
@@ -198,6 +178,21 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
data_for_log['host_name'] = raw_data['host_name']
|
||||
data_for_log['job_id'] = raw_data['job_id']
|
||||
elif kind == 'performance':
|
||||
def convert_to_type(t, val):
|
||||
if t is float:
|
||||
val = val[:-1] if val.endswith('s') else val
|
||||
try:
|
||||
return float(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is int:
|
||||
try:
|
||||
return int(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is str:
|
||||
return val
|
||||
|
||||
request = raw_data['python_objects']['request']
|
||||
response = raw_data['python_objects']['response']
|
||||
|
||||
@@ -231,21 +226,8 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
log_kind = record.name[len('awx.analytics.'):]
|
||||
fields = self.reformat_data_for_log(fields, kind=log_kind)
|
||||
# General AWX metadata
|
||||
for log_name, setting_name in [
|
||||
('type', 'LOG_AGGREGATOR_TYPE'),
|
||||
('cluster_host_id', 'CLUSTER_HOST_ID'),
|
||||
('tower_uuid', 'LOG_AGGREGATOR_TOWER_UUID')]:
|
||||
if hasattr(settings, setting_name):
|
||||
fields[log_name] = getattr(settings, setting_name, None)
|
||||
elif log_name == 'type':
|
||||
fields[log_name] = 'other'
|
||||
|
||||
uuid = (
|
||||
getattr(settings, 'LOG_AGGREGATOR_TOWER_UUID', None) or
|
||||
getattr(settings, 'INSTALL_UUID', None)
|
||||
)
|
||||
if uuid:
|
||||
fields['tower_uuid'] = uuid
|
||||
fields['cluster_host_id'] = self.cluster_host_id
|
||||
fields['tower_uuid'] = self.tower_uuid
|
||||
return fields
|
||||
|
||||
def format(self, record):
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# Python
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
import requests
|
||||
import time
|
||||
import threading
|
||||
@@ -18,6 +19,7 @@ from django.conf import settings
|
||||
|
||||
# requests futures, a dependency used by these handlers
|
||||
from requests_futures.sessions import FuturesSession
|
||||
import cachetools
|
||||
|
||||
# AWX
|
||||
from awx.main.utils.formatters import LogstashFormatter
|
||||
@@ -273,6 +275,16 @@ HANDLER_MAPPING = {
|
||||
}
|
||||
|
||||
|
||||
TTLCache = cachetools.TTLCache
|
||||
|
||||
if 'py.test' in os.environ.get('_', ''):
|
||||
# don't cache settings in unit tests
|
||||
class TTLCache(TTLCache):
|
||||
|
||||
def __getitem__(self, item):
|
||||
raise KeyError()
|
||||
|
||||
|
||||
class AWXProxyHandler(logging.Handler):
|
||||
'''
|
||||
Handler specific to the AWX external logging feature
|
||||
@@ -316,6 +328,7 @@ class AWXProxyHandler(logging.Handler):
|
||||
def get_handler_class(self, protocol):
|
||||
return HANDLER_MAPPING.get(protocol, AWXNullHandler)
|
||||
|
||||
@cachetools.cached(cache=TTLCache(maxsize=1, ttl=3), key=lambda *args, **kw: 'get_handler')
|
||||
def get_handler(self, custom_settings=None, force_create=False):
|
||||
new_kwargs = {}
|
||||
use_settings = custom_settings or settings
|
||||
@@ -342,10 +355,14 @@ class AWXProxyHandler(logging.Handler):
|
||||
self._handler.setFormatter(self.formatter)
|
||||
return self._handler
|
||||
|
||||
@cachetools.cached(cache=TTLCache(maxsize=1, ttl=3), key=lambda *args, **kw: 'should_audit')
|
||||
def should_audit(self):
|
||||
return settings.LOG_AGGREGATOR_AUDIT
|
||||
|
||||
def emit(self, record):
|
||||
if AWXProxyHandler.thread_local.enabled:
|
||||
actual_handler = self.get_handler()
|
||||
if settings.LOG_AGGREGATOR_AUDIT:
|
||||
if self.should_audit():
|
||||
self.auditor.setLevel(settings.LOG_AGGREGATOR_LEVEL)
|
||||
self.auditor.emit(record)
|
||||
return actual_handler.emit(record)
|
||||
|
||||
@@ -98,5 +98,6 @@ def handle_csp_violation(request):
|
||||
logger.error(json.loads(request.body))
|
||||
return HttpResponse(content=None)
|
||||
|
||||
|
||||
def handle_login_redirect(request):
|
||||
return HttpResponseRedirect("/#/login")
|
||||
|
||||
@@ -366,6 +366,7 @@ class VMWareInventory(object):
|
||||
def _get_instances(self, inkwargs):
|
||||
''' Make API calls '''
|
||||
instances = []
|
||||
si = None
|
||||
try:
|
||||
si = SmartConnect(**inkwargs)
|
||||
except ssl.SSLError as connection_error:
|
||||
|
||||
@@ -5,7 +5,6 @@ import os
|
||||
import re # noqa
|
||||
import sys
|
||||
from datetime import timedelta
|
||||
from celery.schedules import crontab
|
||||
|
||||
# global settings
|
||||
from django.conf import global_settings
|
||||
@@ -310,6 +309,9 @@ REST_FRAMEWORK = {
|
||||
'VIEW_DESCRIPTION_FUNCTION': 'awx.api.generics.get_view_description',
|
||||
'NON_FIELD_ERRORS_KEY': '__all__',
|
||||
'DEFAULT_VERSION': 'v2',
|
||||
# For swagger schema generation
|
||||
# see https://github.com/encode/django-rest-framework/pull/6532
|
||||
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema',
|
||||
#'URL_FORMAT_OVERRIDE': None,
|
||||
}
|
||||
|
||||
@@ -375,7 +377,7 @@ AUTH_BASIC_ENABLED = True
|
||||
|
||||
# If set, specifies a URL that unauthenticated users will be redirected to
|
||||
# when trying to access a UI page that requries authentication.
|
||||
LOGIN_REDIRECT_OVERRIDE = None
|
||||
LOGIN_REDIRECT_OVERRIDE = ''
|
||||
|
||||
# If set, serve only minified JS for UI.
|
||||
USE_MINIFIED_JS = False
|
||||
@@ -432,13 +434,9 @@ CELERYBEAT_SCHEDULE = {
|
||||
'schedule': timedelta(seconds=60),
|
||||
'options': {'expires': 50,}
|
||||
},
|
||||
'purge_stdout_files': {
|
||||
'task': 'awx.main.tasks.purge_old_stdout_files',
|
||||
'schedule': timedelta(days=7)
|
||||
},
|
||||
'gather_analytics': {
|
||||
'task': 'awx.main.tasks.gather_analytics',
|
||||
'schedule': crontab(hour='*/6')
|
||||
'schedule': timedelta(minutes=5)
|
||||
},
|
||||
'task_manager': {
|
||||
'task': 'awx.main.scheduler.tasks.run_task_manager',
|
||||
@@ -452,7 +450,6 @@ CELERYBEAT_SCHEDULE = {
|
||||
},
|
||||
# 'isolated_heartbeat': set up at the end of production.py and development.py
|
||||
}
|
||||
AWX_INCONSISTENT_TASK_INTERVAL = 60 * 3
|
||||
|
||||
AWX_CELERY_QUEUES_STATIC = [
|
||||
CELERY_DEFAULT_QUEUE,
|
||||
@@ -573,9 +570,6 @@ ANSIBLE_INVENTORY_UNPARSED_FAILED = True
|
||||
# Additional environment variables to be passed to the ansible subprocesses
|
||||
AWX_TASK_ENV = {}
|
||||
|
||||
# Flag to enable/disable updating hosts M2M when saving job events.
|
||||
CAPTURE_JOB_EVENT_HOSTS = False
|
||||
|
||||
# Rebuild Host Smart Inventory memberships.
|
||||
AWX_REBUILD_SMART_MEMBERSHIP = False
|
||||
|
||||
@@ -665,6 +659,9 @@ PENDO_TRACKING_STATE = "off"
|
||||
# Note: This setting may be overridden by database settings.
|
||||
INSIGHTS_TRACKING_STATE = False
|
||||
|
||||
# Last gather date for Analytics
|
||||
AUTOMATION_ANALYTICS_LAST_GATHER = None
|
||||
AUTOMATION_ANALYTICS_INTERVAL = 14400
|
||||
|
||||
# Default list of modules allowed for ad hoc commands.
|
||||
# Note: This setting may be overridden by database settings.
|
||||
@@ -1142,8 +1139,7 @@ LOGGING = {
|
||||
'handlers': ['null']
|
||||
},
|
||||
'awx.main.commands.run_callback_receiver': {
|
||||
'handlers': ['callback_receiver'],
|
||||
'level': 'INFO' # in debug mode, includes full callback data
|
||||
'handlers': ['callback_receiver'], # level handled by dynamic_level_filter
|
||||
},
|
||||
'awx.main.dispatch': {
|
||||
'handlers': ['dispatcher'],
|
||||
@@ -1208,6 +1204,22 @@ SILENCED_SYSTEM_CHECKS = ['models.E006']
|
||||
# Use middleware to get request statistics
|
||||
AWX_REQUEST_PROFILE = False
|
||||
|
||||
#
|
||||
# Optionally, AWX can generate DOT graphs
|
||||
# (http://www.graphviz.org/doc/info/lang.html) for per-request profiling
|
||||
# via gprof2dot (https://github.com/jrfonseca/gprof2dot)
|
||||
#
|
||||
# If you set this to True, you must `/var/lib/awx/venv/awx/bin/pip install gprof2dot`
|
||||
# .dot files will be saved in `/var/log/tower/profile/` and can be converted e.g.,
|
||||
#
|
||||
# ~ yum install graphviz
|
||||
# ~ dot -o profile.png -Tpng /var/log/tower/profile/some-profile-data.dot
|
||||
#
|
||||
AWX_REQUEST_PROFILE_WITH_DOT = False
|
||||
|
||||
# Allow profiling callback workers via SIGUSR1
|
||||
AWX_CALLBACK_PROFILE = False
|
||||
|
||||
# Delete temporary directories created to store playbook run-time
|
||||
AWX_CLEANUP_PATHS = True
|
||||
|
||||
|
||||
@@ -179,3 +179,4 @@ else:
|
||||
os.environ['SDB_NOTIFY_HOST'] = os.popen('ip route').read().split(' ')[2]
|
||||
|
||||
WEBSOCKET_ORIGIN_WHITELIST = ['https://localhost:8043', 'https://localhost:3000']
|
||||
AWX_CALLBACK_PROFILE = True
|
||||
|
||||
@@ -20,17 +20,7 @@ class SocialAuthMiddleware(SocialAuthExceptionMiddleware):
|
||||
|
||||
def process_request(self, request):
|
||||
if request.path.startswith('/sso'):
|
||||
# django-social keeps a list of backends in memory that it gathers
|
||||
# based on the value of settings.AUTHENTICATION_BACKENDS *at import
|
||||
# time*:
|
||||
# https://github.com/python-social-auth/social-app-django/blob/c1e2795b00b753d58a81fa6a0261d8dae1d9c73d/social_django/utils.py#L13
|
||||
#
|
||||
# our settings.AUTHENTICATION_BACKENDS can *change*
|
||||
# dynamically as Tower settings are changed (i.e., if somebody
|
||||
# configures Github OAuth2 integration), so we need to
|
||||
# _overwrite_ this in-memory value at the top of every request so
|
||||
# that we have the latest version
|
||||
# see: https://github.com/ansible/tower/issues/1979
|
||||
# See upgrade blocker note in requirements/README.md
|
||||
utils.BACKENDS = settings.AUTHENTICATION_BACKENDS
|
||||
token_key = request.COOKIES.get('token', '')
|
||||
token_key = urllib.parse.quote(urllib.parse.unquote(token_key).strip('"'))
|
||||
|
||||
@@ -78,7 +78,7 @@ def _update_m2m_from_expression(user, related, expr, remove=True):
|
||||
related.remove(user)
|
||||
|
||||
|
||||
def _update_org_from_attr(user, related, attr, remove, remove_admins):
|
||||
def _update_org_from_attr(user, related, attr, remove, remove_admins, remove_auditors):
|
||||
from awx.main.models import Organization
|
||||
|
||||
org_ids = []
|
||||
@@ -97,6 +97,10 @@ def _update_org_from_attr(user, related, attr, remove, remove_admins):
|
||||
[o.admin_role.members.remove(user) for o in
|
||||
Organization.objects.filter(Q(admin_role__members=user) & ~Q(id__in=org_ids))]
|
||||
|
||||
if remove_auditors:
|
||||
[o.auditor_role.members.remove(user) for o in
|
||||
Organization.objects.filter(Q(auditor_role__members=user) & ~Q(id__in=org_ids))]
|
||||
|
||||
|
||||
def update_user_orgs(backend, details, user=None, *args, **kwargs):
|
||||
'''
|
||||
@@ -162,9 +166,9 @@ def update_user_orgs_by_saml_attr(backend, details, user=None, *args, **kwargs):
|
||||
attr_admin_values = kwargs.get('response', {}).get('attributes', {}).get(org_map.get('saml_admin_attr'), [])
|
||||
attr_auditor_values = kwargs.get('response', {}).get('attributes', {}).get(org_map.get('saml_auditor_attr'), [])
|
||||
|
||||
_update_org_from_attr(user, "member_role", attr_values, remove, False)
|
||||
_update_org_from_attr(user, "admin_role", attr_admin_values, False, remove_admins)
|
||||
_update_org_from_attr(user, "auditor_role", attr_auditor_values, False, remove_auditors)
|
||||
_update_org_from_attr(user, "member_role", attr_values, remove, False, False)
|
||||
_update_org_from_attr(user, "admin_role", attr_admin_values, False, remove_admins, False)
|
||||
_update_org_from_attr(user, "auditor_role", attr_auditor_values, False, False, remove_auditors)
|
||||
|
||||
|
||||
def update_user_teams_by_saml_attr(backend, details, user=None, *args, **kwargs):
|
||||
|
||||
@@ -153,7 +153,10 @@ function TemplatesStrings (BaseString) {
|
||||
TIMED_OUT: t.s('APPROVAL TIMED OUT'),
|
||||
TIMEOUT: t.s('Timeout'),
|
||||
APPROVED: t.s('APPROVED'),
|
||||
DENIED: t.s('DENIED')
|
||||
DENIED: t.s('DENIED'),
|
||||
CONVERGENCE: t.s('Convergence'),
|
||||
ALL: t.s('All'),
|
||||
ANY: t.s('Any'),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -598,6 +598,11 @@ table, tbody {
|
||||
}
|
||||
|
||||
.List-staticColumnLayout--groups {
|
||||
display: grid;
|
||||
grid-template-columns: @at-space @at-space-5x auto;
|
||||
}
|
||||
|
||||
.List-staticColumnLayout--hostNestedGroups {
|
||||
display: grid;
|
||||
grid-template-columns: @at-space @at-space-5x @at-space-5x auto;
|
||||
}
|
||||
|
||||
@@ -58,6 +58,10 @@ export default ['i18n', function(i18n) {
|
||||
type: 'text',
|
||||
reset: 'ANSIBLE_FACT_CACHE_TIMEOUT',
|
||||
},
|
||||
MAX_FORKS: {
|
||||
type: 'text',
|
||||
reset: 'MAX_FORKS',
|
||||
},
|
||||
PROJECT_UPDATE_VVV: {
|
||||
type: 'toggleSwitch',
|
||||
},
|
||||
|
||||
@@ -79,6 +79,12 @@ export default ['i18n', function(i18n) {
|
||||
AUTOMATION_ANALYTICS_URL: {
|
||||
type: 'text',
|
||||
reset: 'AUTOMATION_ANALYTICS_URL',
|
||||
},
|
||||
AUTOMATION_ANALYTICS_GATHER_INTERVAL: {
|
||||
type: 'number',
|
||||
integer: true,
|
||||
min: 1800,
|
||||
reset: 'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
||||
}
|
||||
},
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ export default
|
||||
label: i18n._("Hosts")
|
||||
},
|
||||
{
|
||||
url: "/#/hosts?host_search=has_active_failures:true",
|
||||
url: "/#/hosts?host_search=last_job_host_summary__failed:true",
|
||||
number: scope.data.hosts.failed,
|
||||
label: i18n._("Failed Hosts"),
|
||||
isFailureCount: true
|
||||
|
||||
@@ -5,11 +5,9 @@
|
||||
*************************************************/
|
||||
export default
|
||||
['$scope', '$rootScope', '$state', '$stateParams', 'HostsRelatedGroupsList', 'InventoryUpdate',
|
||||
'CancelSourceUpdate', 'rbacUiControlService', 'GetBasePath',
|
||||
'GetHostsStatusMsg', 'Dataset', 'Find', 'QuerySet', 'inventoryData', 'host', 'GroupsService',
|
||||
'CancelSourceUpdate', 'rbacUiControlService', 'GetBasePath', 'Dataset', 'Find', 'QuerySet', 'inventoryData', 'host', 'GroupsService',
|
||||
function($scope, $rootScope, $state, $stateParams, HostsRelatedGroupsList, InventoryUpdate,
|
||||
CancelSourceUpdate, rbacUiControlService, GetBasePath,
|
||||
GetHostsStatusMsg, Dataset, Find, qs, inventoryData, host, GroupsService){
|
||||
CancelSourceUpdate, rbacUiControlService, GetBasePath, Dataset, Find, qs, inventoryData, host, GroupsService){
|
||||
|
||||
let list = HostsRelatedGroupsList;
|
||||
|
||||
@@ -29,27 +27,6 @@
|
||||
$scope[`${list.iterator}_dataset`] = Dataset.data;
|
||||
$scope[list.name] = $scope[`${list.iterator}_dataset`].results;
|
||||
|
||||
$scope.$watchCollection(list.name, function(){
|
||||
_.forEach($scope[list.name], buildStatusIndicators);
|
||||
});
|
||||
}
|
||||
|
||||
function buildStatusIndicators(group){
|
||||
if (group === undefined || group === null) {
|
||||
group = {};
|
||||
}
|
||||
|
||||
let hosts_status;
|
||||
|
||||
hosts_status = GetHostsStatusMsg({
|
||||
active_failures: group.hosts_with_active_failures,
|
||||
total_hosts: group.total_hosts,
|
||||
inventory_id: $scope.inventory_id,
|
||||
group_id: group.id
|
||||
});
|
||||
_.assign(group,
|
||||
{hosts_status_tip: hosts_status.tooltip},
|
||||
{hosts_status_class: hosts_status.class});
|
||||
}
|
||||
|
||||
$scope.editGroup = function(id){
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
export default
|
||||
['i18n', function(i18n) {
|
||||
return function(params) {
|
||||
var active_failures = params.active_failures,
|
||||
total_hosts = params.total_hosts,
|
||||
tip, failures, html_class;
|
||||
|
||||
// Return values for use on host status indicator
|
||||
|
||||
if (active_failures > 0) {
|
||||
tip = total_hosts + ((total_hosts === 1) ? ' host' : ' hosts') + '. ' + active_failures + i18n._(' with failed jobs.');
|
||||
html_class = 'error';
|
||||
failures = true;
|
||||
} else {
|
||||
failures = false;
|
||||
if (total_hosts === 0) {
|
||||
// no hosts
|
||||
tip = i18n._("Contains 0 hosts.");
|
||||
html_class = 'none';
|
||||
} else {
|
||||
// many hosts with 0 failures
|
||||
tip = total_hosts + ((total_hosts === 1) ? ' host' : ' hosts') + '. ' + i18n._('No job failures');
|
||||
html_class = 'success';
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
tooltip: tip,
|
||||
failures: failures,
|
||||
'class': html_class
|
||||
};
|
||||
};
|
||||
}];
|
||||
@@ -18,22 +18,6 @@
|
||||
basePath: 'api/v2/inventories/{{$stateParams.inventory_id}}/groups/',
|
||||
layoutClass: 'List-staticColumnLayout--groups',
|
||||
actionHolderClass: 'List-actionHolder List-actionHolder--rootGroups',
|
||||
staticColumns: [
|
||||
{
|
||||
field: 'failed_hosts',
|
||||
content: {
|
||||
label: '',
|
||||
nosort: true,
|
||||
mode: 'all',
|
||||
iconOnly: true,
|
||||
awToolTip: "{{ group.hosts_status_tip }}",
|
||||
dataPlacement: "top",
|
||||
icon: "{{ 'fa icon-job-' + group.hosts_status_class }}",
|
||||
columnClass: 'status-column'
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
fields: {
|
||||
name: {
|
||||
label: i18n._('Groups'),
|
||||
|
||||
@@ -5,13 +5,11 @@
|
||||
*************************************************/
|
||||
export default
|
||||
['$scope', '$state', '$stateParams', 'listDefinition', 'InventoryUpdate',
|
||||
'GroupsService', 'CancelSourceUpdate',
|
||||
'GetHostsStatusMsg', 'Dataset', 'inventoryData', 'canAdd',
|
||||
'InventoryHostsStrings', '$transitions',
|
||||
'GroupsService', 'CancelSourceUpdate', 'Dataset', 'inventoryData', 'canAdd',
|
||||
'InventoryHostsStrings', '$transitions', 'GetBasePath', 'Rest',
|
||||
function($scope, $state, $stateParams, listDefinition, InventoryUpdate,
|
||||
GroupsService, CancelSourceUpdate,
|
||||
GetHostsStatusMsg, Dataset, inventoryData, canAdd,
|
||||
InventoryHostsStrings, $transitions){
|
||||
GroupsService, CancelSourceUpdate, Dataset, inventoryData, canAdd,
|
||||
InventoryHostsStrings, $transitions, GetBasePath, Rest){
|
||||
|
||||
let list = listDefinition;
|
||||
|
||||
@@ -70,18 +68,6 @@
|
||||
group.isSelected = true;
|
||||
}
|
||||
});
|
||||
|
||||
let hosts_status;
|
||||
|
||||
hosts_status = GetHostsStatusMsg({
|
||||
active_failures: group.hosts_with_active_failures,
|
||||
total_hosts: group.total_hosts,
|
||||
inventory_id: $scope.inventory_id,
|
||||
group_id: group.id
|
||||
});
|
||||
_.assign(group,
|
||||
{hosts_status_tip: hosts_status.tooltip},
|
||||
{hosts_status_class: hosts_status.class});
|
||||
}
|
||||
|
||||
$scope.createGroup = function(){
|
||||
@@ -102,35 +88,51 @@
|
||||
$state.go('inventories.edit.groups.edit.nested_groups', {group_id: id});
|
||||
};
|
||||
$scope.deleteGroup = function(group){
|
||||
$scope.toDelete = {};
|
||||
$scope.strings.deleteModal = {};
|
||||
angular.extend($scope.toDelete, group);
|
||||
if($scope.toDelete.total_groups === 0 && $scope.toDelete.total_hosts === 0) {
|
||||
// This group doesn't have any child groups or hosts - the user is just trying to delete
|
||||
// the group
|
||||
$scope.deleteOption = "delete";
|
||||
}
|
||||
else {
|
||||
$scope.strings.deleteModal.group = InventoryHostsStrings.get('deletegroup.GROUP', $scope.toDelete.total_groups);
|
||||
$scope.strings.deleteModal.host = InventoryHostsStrings.get('deletegroup.HOST', $scope.toDelete.total_hosts);
|
||||
const promises = [];
|
||||
|
||||
Rest.setUrl(group.related.hosts);
|
||||
promises.push(Rest.get());
|
||||
|
||||
Rest.setUrl(group.related.children);
|
||||
promises.push(Rest.get());
|
||||
|
||||
Promise.all(promises)
|
||||
.then(([hostResponse, groupResponse]) => {
|
||||
$scope.toDelete = {};
|
||||
$scope.strings.deleteModal = {};
|
||||
$scope.toDelete.hostCount = _.get(hostResponse, ['data', 'count'], 0);
|
||||
$scope.toDelete.groupCount = _.get(groupResponse, ['data', 'count'], 0);
|
||||
angular.extend($scope.toDelete, group);
|
||||
|
||||
if($scope.toDelete.groupCount === 0 && $scope.toDelete.hostCount === 0) {
|
||||
// This group doesn't have any child groups or hosts - the user is just trying to delete
|
||||
// the group
|
||||
$scope.deleteOption = "delete";
|
||||
}
|
||||
else {
|
||||
$scope.strings.deleteModal.group = InventoryHostsStrings.get('deletegroup.GROUP', $scope.toDelete.groupCount);
|
||||
$scope.strings.deleteModal.host = InventoryHostsStrings.get('deletegroup.HOST', $scope.toDelete.hostCount);
|
||||
|
||||
if($scope.toDelete.groupCount === 0 || $scope.toDelete.groupCount === 0) {
|
||||
if($scope.toDelete.groupCount === 0) {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_HOST', $scope.toDelete.hostCount);
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_HOST', $scope.toDelete.hostCount);
|
||||
}
|
||||
else if($scope.toDelete.hostCount === 0) {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUP', $scope.toDelete.groupCount);
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUP', $scope.toDelete.groupCount);
|
||||
}
|
||||
}
|
||||
else {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.groupCount, hosts: $scope.toDelete.hostCount});
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.groupCount, hosts: $scope.toDelete.hostCount});
|
||||
}
|
||||
}
|
||||
|
||||
$('#group-delete-modal').modal('show');
|
||||
});
|
||||
|
||||
if($scope.toDelete.total_groups === 0 || $scope.toDelete.total_hosts === 0) {
|
||||
if($scope.toDelete.total_groups === 0) {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_HOST', $scope.toDelete.total_hosts);
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_HOST', $scope.toDelete.total_hosts);
|
||||
}
|
||||
else if($scope.toDelete.total_hosts === 0) {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUP', $scope.toDelete.total_groups);
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUP', $scope.toDelete.total_groups);
|
||||
}
|
||||
}
|
||||
else {
|
||||
$scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.total_groups, hosts: $scope.toDelete.total_hosts});
|
||||
$scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.total_groups, hosts: $scope.toDelete.total_hosts});
|
||||
}
|
||||
}
|
||||
|
||||
$('#group-delete-modal').modal('show');
|
||||
};
|
||||
$scope.confirmDelete = function(){
|
||||
let reloadListStateParams = null;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user