mirror of
https://github.com/ansible/awx.git
synced 2026-02-06 03:54:44 -03:30
Compare commits
502 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed1bacdc08 | ||
|
|
f39fa35d86 | ||
|
|
9266444b19 | ||
|
|
35230eded1 | ||
|
|
ecacf64c28 | ||
|
|
d01e6ab8b6 | ||
|
|
5653b47aa3 | ||
|
|
7bc3d85913 | ||
|
|
0a8df7fde2 | ||
|
|
b39269c4c2 | ||
|
|
09981c0020 | ||
|
|
81bdbef785 | ||
|
|
3c541a4695 | ||
|
|
5a1ae9b816 | ||
|
|
8c261892ee | ||
|
|
b89d4349c0 | ||
|
|
3e98363811 | ||
|
|
f24289b2ba | ||
|
|
9170c557a7 | ||
|
|
a47b403f8d | ||
|
|
83aa7bfac4 | ||
|
|
db0b2e6cb6 | ||
|
|
f391b7ace4 | ||
|
|
008c9e4320 | ||
|
|
8ddc1c61ef | ||
|
|
0aa6c7b83f | ||
|
|
e43879d44e | ||
|
|
2a6f6111dc | ||
|
|
6b0659d63a | ||
|
|
426e901cdf | ||
|
|
ac55f93cfb | ||
|
|
c32c3db35e | ||
|
|
20a999f846 | ||
|
|
81af34fce3 | ||
|
|
8fed469975 | ||
|
|
c6d4a62263 | ||
|
|
a9b77eb706 | ||
|
|
e642af82cc | ||
|
|
b0a755d7b5 | ||
|
|
6753f1ca35 | ||
|
|
f8d9d5f51a | ||
|
|
bad8c65321 | ||
|
|
6f0c937236 | ||
|
|
55a616cba6 | ||
|
|
87365e5969 | ||
|
|
7e829e3a9d | ||
|
|
b8cba916a5 | ||
|
|
dc96a1730e | ||
|
|
d4983ea10d | ||
|
|
209bdd00a1 | ||
|
|
c4efbd62bc | ||
|
|
287a3bc8d4 | ||
|
|
9fefc26528 | ||
|
|
e2d4ef31fd | ||
|
|
a15e257b9e | ||
|
|
a56370fed5 | ||
|
|
e7ed4811c1 | ||
|
|
9860b38438 | ||
|
|
ef80ecd3b6 | ||
|
|
50290a9063 | ||
|
|
fefa4a8bf4 | ||
|
|
546f88c74d | ||
|
|
afa1fb489c | ||
|
|
3571abb42b | ||
|
|
21425db889 | ||
|
|
cc64657749 | ||
|
|
7300c2ccc1 | ||
|
|
7c596039c5 | ||
|
|
9857c8272e | ||
|
|
797169317c | ||
|
|
67c6591f6f | ||
|
|
15906b7e3c | ||
|
|
fdd2b84804 | ||
|
|
ac3f7d0fac | ||
|
|
09d63b4883 | ||
|
|
b96e33ea50 | ||
|
|
71d23e8c81 | ||
|
|
073feb74cb | ||
|
|
43f19cc94b | ||
|
|
ef312f0030 | ||
|
|
d0fec0f19c | ||
|
|
1e14221625 | ||
|
|
b6a901ac51 | ||
|
|
1af0ee2f8c | ||
|
|
b62ac6fbe4 | ||
|
|
e5aaeedc43 | ||
|
|
fc5c5400cd | ||
|
|
95bead2bb2 | ||
|
|
bcbda23aee | ||
|
|
5a21783013 | ||
|
|
e33604de71 | ||
|
|
c50c63a9ff | ||
|
|
916d91cbc7 | ||
|
|
79bd8b2c72 | ||
|
|
5939116b0a | ||
|
|
6759e60428 | ||
|
|
ef8af79700 | ||
|
|
dbb4d2b011 | ||
|
|
4a28065dbb | ||
|
|
5387846cbb | ||
|
|
6b247f1f24 | ||
|
|
838b793704 | ||
|
|
3cb8c98a41 | ||
|
|
18f254fc28 | ||
|
|
9c6c6ce816 | ||
|
|
6699be95bf | ||
|
|
17cd0595d7 | ||
|
|
0402064c0f | ||
|
|
e33265e12c | ||
|
|
b8c76301de | ||
|
|
51f7907a01 | ||
|
|
1a98cedc0f | ||
|
|
db974d4fd4 | ||
|
|
d6e663eff0 | ||
|
|
ccb40c8c68 | ||
|
|
6eb04de1a7 | ||
|
|
cad5c5e79a | ||
|
|
97472cb91b | ||
|
|
0c63ea0052 | ||
|
|
2b1d2b2976 | ||
|
|
7d51b1cb9d | ||
|
|
52e531625c | ||
|
|
b5db652050 | ||
|
|
e699402115 | ||
|
|
d012f5cd99 | ||
|
|
4a2ca20b60 | ||
|
|
e49dfd6ee2 | ||
|
|
fb414802fa | ||
|
|
00f400e839 | ||
|
|
234e33df0e | ||
|
|
f9b0a3121f | ||
|
|
0afdca3674 | ||
|
|
03cef6fea3 | ||
|
|
7dc0fce1aa | ||
|
|
648d27f28d | ||
|
|
5a5e5bc121 | ||
|
|
aea37654e2 | ||
|
|
2ed97aeb0c | ||
|
|
9431b0b6ff | ||
|
|
a5007ccd41 | ||
|
|
81fc4219ae | ||
|
|
c3c4d79890 | ||
|
|
b01b229fea | ||
|
|
984b7e066d | ||
|
|
67d927121d | ||
|
|
ae06cff991 | ||
|
|
7ea6d7bf4d | ||
|
|
fad4a549d0 | ||
|
|
9365e477c5 | ||
|
|
d0b3cac72a | ||
|
|
de02138dfd | ||
|
|
44f0b003fc | ||
|
|
56aed597b2 | ||
|
|
f33ee03b98 | ||
|
|
69a3b0def6 | ||
|
|
6504972d82 | ||
|
|
4bb2b5768e | ||
|
|
c0a641ed52 | ||
|
|
1e8c89f536 | ||
|
|
54d3412820 | ||
|
|
1690938dfb | ||
|
|
0a9d3d47b9 | ||
|
|
2952b0a0fe | ||
|
|
1d3e8f8b87 | ||
|
|
97c040aaa1 | ||
|
|
818c95501a | ||
|
|
664bdec57f | ||
|
|
92068930a6 | ||
|
|
d07a946183 | ||
|
|
9d58b15135 | ||
|
|
a0038276a4 | ||
|
|
f0ff6ecb0a | ||
|
|
60743d6ba6 | ||
|
|
4707b5e020 | ||
|
|
ed7d7fcf00 | ||
|
|
6c2a7f3782 | ||
|
|
47875c5f9a | ||
|
|
f28f7c6184 | ||
|
|
1494c8395b | ||
|
|
2691e1d707 | ||
|
|
6d413bd412 | ||
|
|
54bf7e13d8 | ||
|
|
c6b6a3ad89 | ||
|
|
2bd656e61d | ||
|
|
35b8e40d3c | ||
|
|
c4d901bf2c | ||
|
|
1369f72885 | ||
|
|
0b30e7907b | ||
|
|
fc94b3a943 | ||
|
|
fde9099198 | ||
|
|
815cd829e0 | ||
|
|
28c612ae9c | ||
|
|
d6ed6a856d | ||
|
|
706b370f7e | ||
|
|
80a2d10742 | ||
|
|
f7259a1e78 | ||
|
|
08570fe785 | ||
|
|
987cdc6802 | ||
|
|
6e27294e2b | ||
|
|
3439ba5f3b | ||
|
|
c8e10adc96 | ||
|
|
7e261b5246 | ||
|
|
1e1839915d | ||
|
|
74bf058d62 | ||
|
|
5ec537bad2 | ||
|
|
568901af74 | ||
|
|
c2e9926330 | ||
|
|
c4ccfa1b27 | ||
|
|
478bcc0b07 | ||
|
|
0bb9c58e25 | ||
|
|
9c783aa0ce | ||
|
|
526391a072 | ||
|
|
98f8faa349 | ||
|
|
8a2a5b0fb1 | ||
|
|
07cfa6cba5 | ||
|
|
e188692acf | ||
|
|
ad70754b6a | ||
|
|
9fb24f1a4c | ||
|
|
aefa30e1e9 | ||
|
|
7eb2d86890 | ||
|
|
2fb0144914 | ||
|
|
e3a731bb9e | ||
|
|
451e9a7504 | ||
|
|
8311acfba2 | ||
|
|
77a1c405a6 | ||
|
|
1b0bca8229 | ||
|
|
bd91e8eb54 | ||
|
|
ea4cd99003 | ||
|
|
00ce244716 | ||
|
|
3b791609cd | ||
|
|
a8d4eb7c1d | ||
|
|
d35bfafcf5 | ||
|
|
9f8ef4d1e5 | ||
|
|
a978d094b4 | ||
|
|
47e422ba7a | ||
|
|
4b86815275 | ||
|
|
6c1c850c5f | ||
|
|
f4f1e0fd3c | ||
|
|
ca84e1c654 | ||
|
|
6b6e898882 | ||
|
|
9dbcc5934e | ||
|
|
fac7fd45f8 | ||
|
|
34c206fab0 | ||
|
|
a2f64f1053 | ||
|
|
334d47f3ab | ||
|
|
4724b6a3d6 | ||
|
|
ce94ba4c83 | ||
|
|
0dc4fa975b | ||
|
|
1fb890f4eb | ||
|
|
15e8fd5eca | ||
|
|
06e751fea1 | ||
|
|
fe93ef5488 | ||
|
|
9b05a41eec | ||
|
|
2c12f1b66e | ||
|
|
33dedc88c8 | ||
|
|
759867c863 | ||
|
|
d4613d448c | ||
|
|
dbd68c5747 | ||
|
|
d23d7c422d | ||
|
|
4b793dc58a | ||
|
|
112757e202 | ||
|
|
12380fe1b1 | ||
|
|
b987b7daa0 | ||
|
|
6c7851b51f | ||
|
|
1ff0591553 | ||
|
|
58ad214dcf | ||
|
|
a71cee9300 | ||
|
|
1057b93570 | ||
|
|
e0edfeac7c | ||
|
|
47f45bf9b3 | ||
|
|
8d162f9044 | ||
|
|
6269b43456 | ||
|
|
67867cf0c8 | ||
|
|
7538b4ce15 | ||
|
|
8c6a1e348d | ||
|
|
3cd80ef67a | ||
|
|
f3310236e4 | ||
|
|
ed28faa3db | ||
|
|
fc4b02b79f | ||
|
|
a3dd9eb4b7 | ||
|
|
079abc162f | ||
|
|
d773d163f7 | ||
|
|
68ada92f3b | ||
|
|
4c43afda19 | ||
|
|
91cc4689c9 | ||
|
|
febfcf709d | ||
|
|
cf1d5a29f6 | ||
|
|
1425021106 | ||
|
|
7b42316366 | ||
|
|
ce9d75c2e4 | ||
|
|
26845642f0 | ||
|
|
6fa0d9d4ed | ||
|
|
7accac2f63 | ||
|
|
044c047ac6 | ||
|
|
5a2ecd25e7 | ||
|
|
6c89935521 | ||
|
|
0641c6b0a6 | ||
|
|
4ea27e0d1b | ||
|
|
79c196fc08 | ||
|
|
249a5e5e4d | ||
|
|
51c73cb357 | ||
|
|
8d35b71321 | ||
|
|
a80d5b1b39 | ||
|
|
e5d86419c8 | ||
|
|
54a98ff612 | ||
|
|
e7077185bf | ||
|
|
4187d02b8a | ||
|
|
457359322f | ||
|
|
8a65c6e1c8 | ||
|
|
fb29f68efc | ||
|
|
1fcddba558 | ||
|
|
e20599d7bb | ||
|
|
9288b53015 | ||
|
|
82be0a8af2 | ||
|
|
35c374fc79 | ||
|
|
dbe135991b | ||
|
|
64f89b3fce | ||
|
|
aaaae87aa7 | ||
|
|
44a2d7a346 | ||
|
|
be00b1ca96 | ||
|
|
33574d70c8 | ||
|
|
bc705ad8ce | ||
|
|
78961c8037 | ||
|
|
e22486ada8 | ||
|
|
0051da95c9 | ||
|
|
122142c040 | ||
|
|
91ad0a9f89 | ||
|
|
6ea3ecbb26 | ||
|
|
e87dce023b | ||
|
|
89a05e9bbc | ||
|
|
96fbc9ea27 | ||
|
|
e70d377a53 | ||
|
|
f65ef9f75c | ||
|
|
7149c41804 | ||
|
|
1a5b5c32b8 | ||
|
|
1b44ca8ef4 | ||
|
|
d7f4707044 | ||
|
|
9d39ac83f9 | ||
|
|
ce393da6fd | ||
|
|
2f86774006 | ||
|
|
e2c63c41e7 | ||
|
|
f9685717b8 | ||
|
|
47a3ba9bd5 | ||
|
|
af3e6f792c | ||
|
|
fc56a1c170 | ||
|
|
84fb908261 | ||
|
|
cb4a38d7a7 | ||
|
|
9518c38bb8 | ||
|
|
5e37d6ea7e | ||
|
|
54e76b2534 | ||
|
|
b8ed41fa82 | ||
|
|
fbd03287ea | ||
|
|
7919433288 | ||
|
|
3568be84c8 | ||
|
|
8d2ab3de42 | ||
|
|
4c4cbaef9f | ||
|
|
aef224732c | ||
|
|
b0c1be7338 | ||
|
|
14a3a6073e | ||
|
|
fc7c2117e9 | ||
|
|
962de13965 | ||
|
|
7211ff22df | ||
|
|
003d7f0915 | ||
|
|
f019452207 | ||
|
|
c323a2393a | ||
|
|
85be3c7692 | ||
|
|
5f3ebc26e0 | ||
|
|
d282966aa1 | ||
|
|
71e132ce0f | ||
|
|
d6d84e8f5e | ||
|
|
fdc7f58bb4 | ||
|
|
6c597ad165 | ||
|
|
48ec69c4f5 | ||
|
|
1ea3d55167 | ||
|
|
7181bd1c9b | ||
|
|
9e8ac3b09b | ||
|
|
e24e1fc1f0 | ||
|
|
f28b48a473 | ||
|
|
4f58537949 | ||
|
|
0512f65c8f | ||
|
|
947bdeed3e | ||
|
|
d3a7bec674 | ||
|
|
652facba9f | ||
|
|
b1ef7506ea | ||
|
|
c95d7d465a | ||
|
|
70919638ba | ||
|
|
6ea48cd73e | ||
|
|
63ca8e4134 | ||
|
|
725cc469cf | ||
|
|
665a4d83e3 | ||
|
|
018514d657 | ||
|
|
71d428433f | ||
|
|
2f689fffbe | ||
|
|
3119d5ed22 | ||
|
|
aab27e9b93 | ||
|
|
b60a30cbd4 | ||
|
|
88acd95a72 | ||
|
|
c3fbb07535 | ||
|
|
8d043e6f85 | ||
|
|
31602c4b28 | ||
|
|
57cd8adc2d | ||
|
|
c1e20fe7a0 | ||
|
|
b1f5529aa4 | ||
|
|
350699eda8 | ||
|
|
10a7544d68 | ||
|
|
d3eea5e694 | ||
|
|
8fd9fea113 | ||
|
|
470a4b7746 | ||
|
|
38c2ea7025 | ||
|
|
5895654538 | ||
|
|
b402d9ba6d | ||
|
|
5db478a4a0 | ||
|
|
059347eec3 | ||
|
|
e8dbfa42cf | ||
|
|
3d12e040ed | ||
|
|
fceca3bcae | ||
|
|
fcd03fb1c2 | ||
|
|
2cab6982c1 | ||
|
|
3ede367df4 | ||
|
|
f6bf0ad21f | ||
|
|
817b397d20 | ||
|
|
b61fdaf721 | ||
|
|
1603106cb4 | ||
|
|
1454000b91 | ||
|
|
b2e63d5e47 | ||
|
|
e7ede6af4a | ||
|
|
5503d4efb4 | ||
|
|
54640dbca0 | ||
|
|
eab82f3efa | ||
|
|
9e3d90896b | ||
|
|
e66a1002ee | ||
|
|
82160e2072 | ||
|
|
e814f28039 | ||
|
|
03e58523b2 | ||
|
|
341ef411a4 | ||
|
|
8d19555cf1 | ||
|
|
d23fd0515d | ||
|
|
b9483c28b0 | ||
|
|
6f9fc0c3f8 | ||
|
|
766a088749 | ||
|
|
2b539cab85 | ||
|
|
2fb67a3648 | ||
|
|
64c5e3994e | ||
|
|
7b792926eb | ||
|
|
c067788428 | ||
|
|
b7071a48c2 | ||
|
|
dee4b72303 | ||
|
|
5994a77b84 | ||
|
|
f93506fe2c | ||
|
|
7c86e38b81 | ||
|
|
1c374fba7d | ||
|
|
2cc9e2ca0b | ||
|
|
335dfd564a | ||
|
|
5380d57ce8 | ||
|
|
a01f80db5b | ||
|
|
d7eba47adb | ||
|
|
5fffdec69d | ||
|
|
358ef76529 | ||
|
|
bb628c52ad | ||
|
|
d2e0b26287 | ||
|
|
f2d46baf09 | ||
|
|
c6fdadd7f2 | ||
|
|
cc8b115c6a | ||
|
|
82d05e0a10 | ||
|
|
9978b3f9ad | ||
|
|
4f4af058b3 | ||
|
|
b372cebf8d | ||
|
|
3df8e2beb1 | ||
|
|
c45fbcf2ee | ||
|
|
5efa50788f | ||
|
|
3abbe87e10 | ||
|
|
f26bdb3e96 | ||
|
|
4be4e3db7f | ||
|
|
4ea92f0dcb | ||
|
|
a0cfbb93e9 | ||
|
|
08a784d50c | ||
|
|
9ee18d02c8 | ||
|
|
4fd190e4c8 | ||
|
|
a11e33458f | ||
|
|
84fdfbb898 | ||
|
|
f4a252a331 | ||
|
|
d4fe60756b | ||
|
|
f4ab979b59 | ||
|
|
3d3d79b6b3 | ||
|
|
e06d4d7734 | ||
|
|
ab18a4a440 | ||
|
|
7438062b97 | ||
|
|
4510cd11db | ||
|
|
74f2509482 | ||
|
|
f84e42ed15 | ||
|
|
94b4dabee2 | ||
|
|
94d44e8791 | ||
|
|
d24166bd68 | ||
|
|
62f82e7a7e | ||
|
|
7a21a45781 | ||
|
|
91ec0a4482 | ||
|
|
c8f4320b58 | ||
|
|
71a725c5f8 | ||
|
|
96572fe3d4 | ||
|
|
554a9586c6 | ||
|
|
f41c8cf4f2 | ||
|
|
f2f42c2c8a |
@@ -57,7 +57,7 @@ For Linux platforms, refer to the following from Docker:
|
||||
|
||||
> https://docs.docker.com/engine/installation/linux/docker-ce/fedora/
|
||||
|
||||
**Centos**
|
||||
**CentOS**
|
||||
|
||||
> https://docs.docker.com/engine/installation/linux/docker-ce/centos/
|
||||
|
||||
@@ -217,7 +217,7 @@ If you want to start and use the development environment, you'll first need to b
|
||||
(container)# /bootstrap_development.sh
|
||||
```
|
||||
|
||||
The above will do all the setup tasks, including running database migrations, so it amy take a couple minutes.
|
||||
The above will do all the setup tasks, including running database migrations, so it may take a couple minutes.
|
||||
|
||||
Now you can start each service individually, or start all services in a pre-configured tmux session like so:
|
||||
|
||||
@@ -281,7 +281,7 @@ For feature work, take a look at the current [Enhancements](https://github.com/a
|
||||
|
||||
If it has someone assigned to it then that person is the person responsible for working the enhancement. If you feel like you could contribute then reach out to that person.
|
||||
|
||||
Fixing bugs, adding translations, and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start.
|
||||
Fixing bugs, adding translations, and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start. For extra information on debugging tools, see [Debugging](https://github.com/ansible/awx/blob/devel/docs/debugging.md).
|
||||
|
||||
**NOTE**
|
||||
|
||||
@@ -293,7 +293,7 @@ Fixing bugs, adding translations, and updating the documentation are always appr
|
||||
|
||||
## Submitting Pull Requests
|
||||
|
||||
Fixes and Features for AWX will go through the Github pull request process. Submit your pull request (PR) agains the `devel` branch.
|
||||
Fixes and Features for AWX will go through the Github pull request process. Submit your pull request (PR) against the `devel` branch.
|
||||
|
||||
Here are a few things you can do to help the visibility of your change, and increase the likelihood that it will be accepted:
|
||||
|
||||
@@ -312,7 +312,7 @@ It's generally a good idea to discuss features with us first by engaging us in t
|
||||
We like to keep our commit history clean, and will require resubmission of pull requests that contain merge commits. Use `git pull --rebase`, rather than
|
||||
`git pull`, and `git rebase`, rather than `git merge`.
|
||||
|
||||
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefuly. Please be patient.
|
||||
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefully. Please be patient.
|
||||
|
||||
All submitted PRs will have the linter and unit tests run against them, and the status reported in the PR.
|
||||
|
||||
|
||||
133
INSTALL.md
133
INSTALL.md
@@ -13,24 +13,30 @@ This document provides a guide for installing AWX.
|
||||
- [Choose a deployment platform](#choose-a-deployment-platform)
|
||||
- [Official vs Building Images](#official-vs-building-images)
|
||||
- [OpenShift](#openshift)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Prerequisites](#prerequisites-1)
|
||||
- [Deploying to Minishift](#deploying-to-minishift)
|
||||
- [Pre-build steps](#pre-build-steps)
|
||||
- [PostgreSQL](#postgresql)
|
||||
- [Start the build](#start-the-build)
|
||||
- [Post build](#post-build)
|
||||
- [Accessing AWX](#accessing-awx)
|
||||
- [Docker](#docker)
|
||||
- [Kubernetes](#kubernetes)
|
||||
- [Prerequisites](#prerequisites-2)
|
||||
- [Pre-build steps](#pre-build-steps-1)
|
||||
- [Start the build](#start-the-build-1)
|
||||
- [Accessing AWX](#accessing-awx-1)
|
||||
- [SSL Termination](#ssl-termination)
|
||||
- [Docker or Docker Compose](#docker-or-docker-compose)
|
||||
- [Prerequisites](#prerequisites-3)
|
||||
- [Pre-build steps](#pre-build-steps-2)
|
||||
- [Deploying to a remote host](#deploying-to-a-remote-host)
|
||||
- [Inventory variables](#inventory-variables)
|
||||
- [Docker registry](#docker-registry)
|
||||
- [PostgreSQL](#postgresql-1)
|
||||
- [Proxy settings](#proxy-settings)
|
||||
- [Start the build](#start-the-build-1)
|
||||
- [Start the build](#start-the-build-2)
|
||||
- [Post build](#post-build-1)
|
||||
- [Accessing AWX](#accessing-awx-1)
|
||||
- [Accessing AWX](#accessing-awx-2)
|
||||
|
||||
## Getting started
|
||||
|
||||
@@ -54,7 +60,7 @@ Before you can run a deployment, you'll need the following installed in your loc
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [docker-py](https://github.com/docker/docker-py) Python module
|
||||
- [GNU Make](https://www.gnu.org/software/make/)
|
||||
- [Git](https://git-scm.com/)
|
||||
- [Git](https://git-scm.com/) Requires Version 1.8.4+
|
||||
|
||||
### System Requirements
|
||||
|
||||
@@ -63,7 +69,7 @@ The system that runs the AWX service will need to satisfy the following requirem
|
||||
- At leasts 4GB of memory
|
||||
- At least 2 cpu cores
|
||||
- At least 20GB of space
|
||||
- Running Docker or Openshift
|
||||
- Running Docker, Openshift, or Kubernetes
|
||||
|
||||
### AWX Tunables
|
||||
|
||||
@@ -71,11 +77,14 @@ The system that runs the AWX service will need to satisfy the following requirem
|
||||
|
||||
### Choose a deployment platform
|
||||
|
||||
We currently support running AWX as a containerized application using Docker images deployed to either an OpenShift cluster, or a standalone Docker daemon. The remainder of this document will walk you through the process of building the images, and deploying them to either platform.
|
||||
We currently support running AWX as a containerized application using Docker images deployed to either an OpenShift cluster, docker-compose or a standalone Docker daemon. The remainder of this document will walk you through the process of building the images, and deploying them to either platform.
|
||||
|
||||
The [installer](./installer) directory contains an [inventory](./installer/inventory) file, and a playbook, [install.yml](./installer/install.yml). You'll begin by setting variables in the inventory file according to the platform you wish to use, and then you'll start the image build and deployment process by running the playbook.
|
||||
|
||||
In the sections below, you'll find deployment details and instructions for each platform. To deploy to Docker, view the [Docker section](#docker), and for OpenShift, view the [OpenShift section](#openshift).
|
||||
In the sections below, you'll find deployment details and instructions for each platform:
|
||||
- [Docker and Docker Compose](#docker-and-docker-compose)
|
||||
- [OpenShift](#openshift)
|
||||
- [Kubernetes](#kubernetes).
|
||||
|
||||
### Official vs Building Images
|
||||
|
||||
@@ -133,10 +142,6 @@ Before starting the build process, review the [inventory](./installer/inventory)
|
||||
|
||||
> Name of the OpenShift project that will be created, and used as the namespace for the AWX app. Defaults to *awx*.
|
||||
|
||||
*awx_node_port*
|
||||
|
||||
> The web server port running inside the AWX pod. Defaults to *30083*.
|
||||
|
||||
*openshift_user*
|
||||
|
||||
> Username of the OpenShift user that will create the project, and deploy the application. Defaults to *developer*.
|
||||
@@ -144,7 +149,7 @@ Before starting the build process, review the [inventory](./installer/inventory)
|
||||
*docker_registry*
|
||||
|
||||
> IP address and port, or URL, for accessing a registry that the OpenShift cluster can access. Defaults to *172.30.1.1:5000*, the internal registry delivered with Minishift. This is not needed if you are using official hosted images.
|
||||
n
|
||||
|
||||
*docker_registry_repository*
|
||||
|
||||
> Namespace to use when pushing and pulling images to and from the registry. Generally this will match the project name. It defaults to *awx*. This is not needed if you are using official hosted images.
|
||||
@@ -271,16 +276,88 @@ The above example is taken from a Minishift instance. From a web browser, use `h
|
||||
|
||||
Once you access the AWX server, you will be prompted with a login dialog. The default administrator username is `admin`, and the password is `password`.
|
||||
|
||||
## Docker
|
||||
## Kubernetes
|
||||
|
||||
### Prerequisites
|
||||
|
||||
You will need the following installed on the host where AWX will be deployed:
|
||||
A Kubernetes deployment will require you to have access to a Kubernetes cluster as well as the following tools:
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [docker-py](https://github.com/docker/docker-py) Python module
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||
- [helm](https://docs.helm.sh/using_helm/#quickstart-guide)
|
||||
|
||||
Note: After installing Docker, the Docker service must be started.
|
||||
The installation program will reference `kubectl` directly. `helm` is only necessary if you are letting the installer configure PostgreSQL for you.
|
||||
|
||||
### Pre-build steps
|
||||
|
||||
Before starting the build process, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section uncommenting when necessary. Make sure the openshift and standalone docker sections are commented out:
|
||||
|
||||
*kubernetes_context*
|
||||
|
||||
> Prior to running the installer, make sure you've configured the context for the cluster you'll be installing to. This is how the installer knows which cluster to connect to and what authentication to use
|
||||
|
||||
*awx_kubernetes_namespace*
|
||||
|
||||
> Name of the Kubernetes namespace where the AWX resources will be installed. This will be created if it doesn't exist
|
||||
|
||||
*docker_registry_*
|
||||
|
||||
> These settings should be used if building your own base images. You'll need access to an external registry and are responsible for making sure your kube cluster can talk to it and use it. If these are undefined and the dockerhub_ configuration settings are uncommented then the images will be pulled from dockerhub instead
|
||||
|
||||
### Start the build
|
||||
|
||||
After making changes to the `inventory` file use `ansible-playbook` to begin the install
|
||||
|
||||
```bash
|
||||
$ ansible-playbook -i inventory install.yml
|
||||
```
|
||||
|
||||
### Post build
|
||||
|
||||
After the playbook run completes, check the status of the deployment by running `kubectl get pods --namespace awx` (replace awx with the namespace you used):
|
||||
|
||||
```bash
|
||||
# View the running pods, it may take a few minutes for everything to be marked in the Running state
|
||||
$ kubectl get pods --namespace awx
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
awx-2558692395-2r8ss 4/4 Running 0 29s
|
||||
awx-postgresql-355348841-kltkn 1/1 Running 0 1m
|
||||
```
|
||||
|
||||
### Accessing AWX
|
||||
|
||||
The AWX web interface is running in the AWX pod behind the `awx-web-svc` service:
|
||||
|
||||
```bash
|
||||
# View available services
|
||||
$ kubectl get svc --namespace awx
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
awx-postgresql ClusterIP 10.7.250.208 <none> 5432/TCP 2m
|
||||
awx-web-svc NodePort 10.7.241.35 <none> 80:30177/TCP 1m
|
||||
```
|
||||
|
||||
The deployment process creates an `Ingress` named `awx-web-svc` also. Some kubernetes cloud providers will automatically handle routing configuration when an Ingress is created others may require that you more explicitly configure it. You can see what kubernetes knows about things with:
|
||||
|
||||
```bash
|
||||
kubectl get ing --namespace awx
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
awx-web-svc * 35.227.x.y 80 3m
|
||||
```
|
||||
|
||||
If your provider is able to allocate an IP Address from the Ingress controller then you can navigate to the address and access the AWX interface. For some providers it can take a few minutes to allocate and make this accessible. For other providers it may require you to manually intervene.
|
||||
|
||||
### SSL Termination
|
||||
|
||||
Unlike Openshift's `Route` the Kubernetes `Ingress` doesn't yet handle SSL termination. As such the default configuration will only expose AWX through HTTP on port 80. You are responsible for configuring SSL support until support is added (either to Kubernetes or AWX itself).
|
||||
|
||||
|
||||
## Docker or Docker-Compose
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/) on the host where AWX will be deployed. After installing Docker, the Docker service must be started (depending on your OS, you may have to add the local user that uses Docker to the ``docker`` group, refer to the documentation for details)
|
||||
- [docker-py](https://github.com/docker/docker-py) Python module.
|
||||
|
||||
If you're installing using Docker Compose, you'll need [Docker Compose](https://docs.docker.com/compose/install/).
|
||||
|
||||
### Pre-build steps
|
||||
|
||||
@@ -323,6 +400,13 @@ Before starting the build process, review the [inventory](./installer/inventory)
|
||||
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container. Defaults to *80*.
|
||||
|
||||
*use_docker_compose*
|
||||
|
||||
> Switch to ``true`` to use Docker Compose instead of the standalone Docker install.
|
||||
|
||||
*docker_compose_dir*
|
||||
|
||||
When using docker-compose, the `docker-compose.yml` file will be created there (default `/var/lib/awx`).
|
||||
|
||||
#### Docker registry
|
||||
|
||||
@@ -404,6 +488,8 @@ e240ed8209cd awx_task:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago
|
||||
97e196120ab3 postgres:9.6 "docker-entrypoint..." 2 minutes ago Up 2 minutes 5432/tcp postgres
|
||||
```
|
||||
|
||||
If you're deploying using Docker Compose, container names will be prefixed by the name of the folder where the docker-compose.yml file is created (by default, `awx`).
|
||||
|
||||
Immediately after the containers start, the *awx_task* container will perform required setup tasks, including database migrations. These tasks need to complete before the web interface can be accessed. To monitor the progress, you can follow the container's STDOUT by running the following:
|
||||
|
||||
```bash
|
||||
@@ -466,3 +552,14 @@ Added instance awx to tower
|
||||
The AWX web server is accessible on the deployment host, using the *host_port* value set in the *inventory* file. The default URL is [http://localhost](http://localhost).
|
||||
|
||||
You will prompted with a login dialog. The default administrator username is `admin`, and the password is `password`.
|
||||
|
||||
### Maintenance using docker-compose
|
||||
|
||||
After the installation, maintenance operations with docker-compose can be done by using the `docker-compose.yml` file created at the location pointed by `docker_compose_dir`.
|
||||
|
||||
Among the possible operations, you may:
|
||||
|
||||
- Stop AWX : `docker-compose stop`
|
||||
- Upgrade AWX : `docker-compose pull && docker-compose up --force-recreate`
|
||||
|
||||
See the [docker-compose documentation](https://docs.docker.com/compose/) for details.
|
||||
|
||||
15
Makefile
15
Makefile
@@ -12,10 +12,10 @@ MANAGEMENT_COMMAND ?= awx-manage
|
||||
IMAGE_REPOSITORY_AUTH ?=
|
||||
IMAGE_REPOSITORY_BASE ?= https://gcr.io
|
||||
|
||||
VERSION=$(shell git describe --long)
|
||||
VERSION3=$(shell git describe --long | sed 's/\-g.*//')
|
||||
VERSION3DOT=$(shell git describe --long | sed 's/\-g.*//' | sed 's/\-/\./')
|
||||
RELEASE_VERSION=$(shell git describe --long | sed 's@\([0-9.]\{1,\}\).*@\1@')
|
||||
VERSION=$(shell git describe --long --first-parent)
|
||||
VERSION3=$(shell git describe --long --first-parent | sed 's/\-g.*//')
|
||||
VERSION3DOT=$(shell git describe --long --first-parent | sed 's/\-g.*//' | sed 's/\-/\./')
|
||||
RELEASE_VERSION=$(shell git describe --long --first-parent | sed 's@\([0-9.]\{1,\}\).*@\1@')
|
||||
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
@@ -299,7 +299,7 @@ uwsgi: collectstatic
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --master-fifo=/awxfifo --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)"
|
||||
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --master-fifo=/awxfifo --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)" --hook-accepting1-once="exec:/bin/sh -c '[ -f /tmp/celery_pid ] && kill -1 `cat /tmp/celery_pid`'"
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -322,10 +322,11 @@ runserver:
|
||||
|
||||
# Run to start the background celery worker for development.
|
||||
celeryd:
|
||||
rm -f /tmp/celery_pid
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
celery worker -A awx -l DEBUG -B -Ofair --autoscale=100,4 --schedule=$(CELERY_SCHEDULE_FILE) -Q tower_scheduler,tower_broadcast_all,$(COMPOSE_HOST),$(AWX_GROUP_QUEUES) -n celery@$(COMPOSE_HOST)
|
||||
celery worker -A awx -l DEBUG -B -Ofair --autoscale=100,4 --schedule=$(CELERY_SCHEDULE_FILE) -Q tower_scheduler,tower_broadcast_all,$(COMPOSE_HOST),$(AWX_GROUP_QUEUES) -n celery@$(COMPOSE_HOST) --pidfile /tmp/celery_pid
|
||||
|
||||
# Run to start the zeromq callback receiver
|
||||
receiver:
|
||||
@@ -607,7 +608,7 @@ clean-elk:
|
||||
docker rm tools_kibana_1
|
||||
|
||||
psql-container:
|
||||
docker run -it --net tools_default --rm postgres:9.4.1 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
|
||||
docker run -it --net tools_default --rm postgres:9.6 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
|
||||
|
||||
VERSION:
|
||||
@echo $(VERSION_TARGET) > $@
|
||||
|
||||
@@ -166,7 +166,13 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
elif isinstance(field, models.BooleanField):
|
||||
return to_python_boolean(value)
|
||||
elif isinstance(field, (ForeignObjectRel, ManyToManyField, GenericForeignKey, ForeignKey)):
|
||||
return self.to_python_related(value)
|
||||
try:
|
||||
return self.to_python_related(value)
|
||||
except ValueError:
|
||||
raise ParseError(_('Invalid {field_name} id: {field_id}').format(
|
||||
field_name=getattr(field, 'name', 'related field'),
|
||||
field_id=value)
|
||||
)
|
||||
else:
|
||||
return field.to_python(value)
|
||||
|
||||
@@ -243,11 +249,10 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
# Search across related objects.
|
||||
if key.endswith('__search'):
|
||||
for value in values:
|
||||
for search_term in force_text(value).replace(',', ' ').split():
|
||||
search_value, new_keys = self.value_to_python(queryset.model, key, search_term)
|
||||
assert isinstance(new_keys, list)
|
||||
for new_key in new_keys:
|
||||
search_filters.append((new_key, search_value))
|
||||
search_value, new_keys = self.value_to_python(queryset.model, key, force_text(value))
|
||||
assert isinstance(new_keys, list)
|
||||
for new_key in new_keys:
|
||||
search_filters.append((new_key, search_value))
|
||||
continue
|
||||
|
||||
# Custom chain__ and or__ filters, mutually exclusive (both can
|
||||
|
||||
@@ -21,7 +21,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.authentication import get_authorization_header
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.exceptions import PermissionDenied, AuthenticationFailed
|
||||
from rest_framework import generics
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
@@ -30,6 +30,7 @@ from rest_framework import views
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.utils import * # noqa
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer
|
||||
@@ -38,9 +39,10 @@ from awx.api.metadata import SublistAttachDetatchMetadata
|
||||
|
||||
__all__ = ['APIView', 'GenericAPIView', 'ListAPIView', 'SimpleListAPIView',
|
||||
'ListCreateAPIView', 'SubListAPIView', 'SubListCreateAPIView',
|
||||
'SubListDestroyAPIView',
|
||||
'SubListCreateAttachDetachAPIView', 'RetrieveAPIView',
|
||||
'RetrieveUpdateAPIView', 'RetrieveDestroyAPIView',
|
||||
'RetrieveUpdateDestroyAPIView', 'DestroyAPIView',
|
||||
'RetrieveUpdateDestroyAPIView',
|
||||
'SubDetailAPIView',
|
||||
'ResourceAccessList',
|
||||
'ParentMixin',
|
||||
@@ -115,6 +117,10 @@ class APIView(views.APIView):
|
||||
|
||||
drf_request = super(APIView, self).initialize_request(request, *args, **kwargs)
|
||||
request.drf_request = drf_request
|
||||
try:
|
||||
request.drf_request_user = getattr(drf_request, 'user', False)
|
||||
except AuthenticationFailed:
|
||||
request.drf_request_user = None
|
||||
return drf_request
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
@@ -140,7 +146,6 @@ class APIView(views.APIView):
|
||||
response['X-API-Query-Count'] = len(q_times)
|
||||
response['X-API-Query-Time'] = '%0.3fs' % sum(q_times)
|
||||
|
||||
analytics_logger.info("api response", extra=dict(python_objects=dict(request=request, response=response)))
|
||||
return response
|
||||
|
||||
def get_authenticate_header(self, request):
|
||||
@@ -269,12 +274,17 @@ class GenericAPIView(generics.GenericAPIView, APIView):
|
||||
return serializer
|
||||
|
||||
def get_queryset(self):
|
||||
#if hasattr(self.request.user, 'get_queryset'):
|
||||
# return self.request.user.get_queryset(self.model)
|
||||
if self.queryset is not None:
|
||||
return self.queryset._clone()
|
||||
elif self.model is not None:
|
||||
return self.model._default_manager.all()
|
||||
qs = self.model._default_manager
|
||||
if self.model in access_registry:
|
||||
access_class = access_registry[self.model]
|
||||
if access_class.select_related:
|
||||
qs = qs.select_related(*access_class.select_related)
|
||||
if access_class.prefetch_related:
|
||||
qs = qs.prefetch_related(*access_class.prefetch_related)
|
||||
return qs
|
||||
else:
|
||||
return super(GenericAPIView, self).get_queryset()
|
||||
|
||||
@@ -442,6 +452,41 @@ class SubListAPIView(ParentMixin, ListAPIView):
|
||||
return qs & sublist_qs
|
||||
|
||||
|
||||
class DestroyAPIView(generics.DestroyAPIView):
|
||||
|
||||
def has_delete_permission(self, obj):
|
||||
return self.request.user.can_access(self.model, 'delete', obj)
|
||||
|
||||
def perform_destroy(self, instance, check_permission=True):
|
||||
if check_permission and not self.has_delete_permission(instance):
|
||||
raise PermissionDenied()
|
||||
super(DestroyAPIView, self).perform_destroy(instance)
|
||||
|
||||
|
||||
class SubListDestroyAPIView(DestroyAPIView, SubListAPIView):
|
||||
"""
|
||||
Concrete view for deleting everything related by `relationship`.
|
||||
"""
|
||||
check_sub_obj_permission = True
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance_list = self.get_queryset()
|
||||
if (not self.check_sub_obj_permission and
|
||||
not request.user.can_access(self.parent_model, 'delete', self.get_parent_object())):
|
||||
raise PermissionDenied()
|
||||
self.perform_list_destroy(instance_list)
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
def perform_list_destroy(self, instance_list):
|
||||
if self.check_sub_obj_permission:
|
||||
# Check permissions for all before deleting, avoiding half-deleted lists
|
||||
for instance in instance_list:
|
||||
if self.has_delete_permission(instance):
|
||||
raise PermissionDenied()
|
||||
for instance in instance_list:
|
||||
self.perform_destroy(instance, check_permission=False)
|
||||
|
||||
|
||||
class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
|
||||
# Base class for a sublist view that allows for creating subobjects
|
||||
# associated with the parent object.
|
||||
@@ -680,22 +725,11 @@ class RetrieveUpdateAPIView(RetrieveAPIView, generics.RetrieveUpdateAPIView):
|
||||
pass
|
||||
|
||||
|
||||
class RetrieveDestroyAPIView(RetrieveAPIView, generics.RetrieveDestroyAPIView):
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
# somewhat lame that delete has to call it's own permissions check
|
||||
obj = self.get_object()
|
||||
if not request.user.can_access(self.model, 'delete', obj):
|
||||
raise PermissionDenied()
|
||||
obj.delete()
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, RetrieveDestroyAPIView):
|
||||
class RetrieveDestroyAPIView(RetrieveAPIView, DestroyAPIView):
|
||||
pass
|
||||
|
||||
|
||||
class DestroyAPIView(GenericAPIView, generics.DestroyAPIView):
|
||||
class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, DestroyAPIView):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ import re
|
||||
import six
|
||||
import urllib
|
||||
from collections import OrderedDict
|
||||
from dateutil import rrule
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -44,7 +43,7 @@ from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.utils import (
|
||||
get_type_for_model, get_model_for_type, timestamp_apiformat,
|
||||
camelcase_to_underscore, getattrd, parse_yaml_or_json,
|
||||
has_model_field_prefetched, extract_ansible_vars)
|
||||
has_model_field_prefetched, extract_ansible_vars, encrypt_dict)
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.redact import REPLACE_STR
|
||||
|
||||
@@ -345,7 +344,9 @@ class BaseSerializer(serializers.ModelSerializer):
|
||||
continue
|
||||
summary_fields[fk] = OrderedDict()
|
||||
for field in related_fields:
|
||||
if field == 'credential_type_id' and fk == 'credential' and self.version < 2: # TODO: remove version check in 3.3
|
||||
if (
|
||||
self.version < 2 and field == 'credential_type_id' and
|
||||
fk in ['credential', 'vault_credential']): # TODO: remove version check in 3.3
|
||||
continue
|
||||
|
||||
fval = getattr(fkval, field, None)
|
||||
@@ -612,14 +613,12 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
class UnifiedJobSerializer(BaseSerializer):
|
||||
show_capabilities = ['start', 'delete']
|
||||
|
||||
result_stdout = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = UnifiedJob
|
||||
fields = ('*', 'unified_job_template', 'launch_type', 'status',
|
||||
'failed', 'started', 'finished', 'elapsed', 'job_args',
|
||||
'job_cwd', 'job_env', 'job_explanation', 'result_stdout',
|
||||
'execution_node', 'result_traceback')
|
||||
'job_cwd', 'job_env', 'job_explanation', 'execution_node',
|
||||
'result_traceback')
|
||||
extra_kwargs = {
|
||||
'unified_job_template': {
|
||||
'source': 'unified_job_template_id',
|
||||
@@ -700,25 +699,17 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
|
||||
return ret
|
||||
|
||||
def get_result_stdout(self, obj):
|
||||
obj_size = obj.result_stdout_size
|
||||
if obj_size > settings.STDOUT_MAX_BYTES_DISPLAY:
|
||||
return _("Standard Output too large to display (%(text_size)d bytes), "
|
||||
"only download supported for sizes over %(supported_size)d bytes") % {
|
||||
'text_size': obj_size, 'supported_size': settings.STDOUT_MAX_BYTES_DISPLAY}
|
||||
return obj.result_stdout
|
||||
|
||||
|
||||
class UnifiedJobListSerializer(UnifiedJobSerializer):
|
||||
|
||||
class Meta:
|
||||
fields = ('*', '-job_args', '-job_cwd', '-job_env', '-result_traceback', '-result_stdout')
|
||||
fields = ('*', '-job_args', '-job_cwd', '-job_env', '-result_traceback')
|
||||
|
||||
def get_field_names(self, declared_fields, info):
|
||||
field_names = super(UnifiedJobListSerializer, self).get_field_names(declared_fields, info)
|
||||
# Meta multiple inheritance and -field_name options don't seem to be
|
||||
# taking effect above, so remove the undesired fields here.
|
||||
return tuple(x for x in field_names if x not in ('job_args', 'job_cwd', 'job_env', 'result_traceback', 'result_stdout'))
|
||||
return tuple(x for x in field_names if x not in ('job_args', 'job_cwd', 'job_env', 'result_traceback'))
|
||||
|
||||
def get_types(self):
|
||||
if type(self) is UnifiedJobListSerializer:
|
||||
@@ -758,14 +749,6 @@ class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
|
||||
class Meta:
|
||||
fields = ('result_stdout',)
|
||||
|
||||
def get_result_stdout(self, obj):
|
||||
obj_size = obj.result_stdout_size
|
||||
if obj_size > settings.STDOUT_MAX_BYTES_DISPLAY:
|
||||
return _("Standard Output too large to display (%(text_size)d bytes), "
|
||||
"only download supported for sizes over %(supported_size)d bytes") % {
|
||||
'text_size': obj_size, 'supported_size': settings.STDOUT_MAX_BYTES_DISPLAY}
|
||||
return obj.result_stdout
|
||||
|
||||
def get_types(self):
|
||||
if type(self) is UnifiedJobStdoutSerializer:
|
||||
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job']
|
||||
@@ -912,7 +895,7 @@ class OrganizationSerializer(BaseSerializer):
|
||||
|
||||
class Meta:
|
||||
model = Organization
|
||||
fields = ('*',)
|
||||
fields = ('*', 'custom_virtualenv',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(OrganizationSerializer, self).get_related(obj)
|
||||
@@ -1000,7 +983,7 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
class Meta:
|
||||
model = Project
|
||||
fields = ('*', 'organization', 'scm_delete_on_next_update', 'scm_update_on_launch',
|
||||
'scm_update_cache_timeout', 'scm_revision',) + \
|
||||
'scm_update_cache_timeout', 'scm_revision', 'custom_virtualenv',) + \
|
||||
('last_update_failed', 'last_updated') # Backwards compatibility
|
||||
read_only_fields = ('scm_delete_on_next_update',)
|
||||
|
||||
@@ -1111,11 +1094,17 @@ class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(ProjectUpdateSerializer, self).get_related(obj)
|
||||
try:
|
||||
res.update(dict(
|
||||
project = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk}),
|
||||
))
|
||||
except ObjectDoesNotExist:
|
||||
pass
|
||||
res.update(dict(
|
||||
project = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk}),
|
||||
cancel = self.reverse('api:project_update_cancel', kwargs={'pk': obj.pk}),
|
||||
scm_inventory_updates = self.reverse('api:project_update_scm_inventory_updates', kwargs={'pk': obj.pk}),
|
||||
notifications = self.reverse('api:project_update_notifications_list', kwargs={'pk': obj.pk}),
|
||||
events = self.reverse('api:project_update_events_list', kwargs={'pk': obj.pk}),
|
||||
))
|
||||
return res
|
||||
|
||||
@@ -1234,8 +1223,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
model = Host
|
||||
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
|
||||
'has_active_failures', 'has_inventory_sources', 'last_job',
|
||||
'last_job_host_summary', 'insights_system_id')
|
||||
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id',)
|
||||
'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified',)
|
||||
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id',
|
||||
'ansible_facts_modified',)
|
||||
|
||||
def build_relational_field(self, field_name, relation_info):
|
||||
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
|
||||
@@ -1726,10 +1716,18 @@ class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSeri
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventoryUpdateSerializer, self).get_related(obj)
|
||||
try:
|
||||
res.update(dict(
|
||||
inventory_source = self.reverse(
|
||||
'api:inventory_source_detail', kwargs={'pk': obj.inventory_source.pk}
|
||||
),
|
||||
))
|
||||
except ObjectDoesNotExist:
|
||||
pass
|
||||
res.update(dict(
|
||||
inventory_source = self.reverse('api:inventory_source_detail', kwargs={'pk': obj.inventory_source.pk}),
|
||||
cancel = self.reverse('api:inventory_update_cancel', kwargs={'pk': obj.pk}),
|
||||
notifications = self.reverse('api:inventory_update_notifications_list', kwargs={'pk': obj.pk}),
|
||||
events = self.reverse('api:inventory_update_events_list', kwargs={'pk': obj.pk}),
|
||||
))
|
||||
if obj.source_project_update_id:
|
||||
res['source_project_update'] = self.reverse('api:project_update_detail',
|
||||
@@ -2125,7 +2123,7 @@ class CredentialSerializer(BaseSerializer):
|
||||
|
||||
def to_internal_value(self, data):
|
||||
# TODO: remove when API v1 is removed
|
||||
if 'credential_type' not in data:
|
||||
if 'credential_type' not in data and self.version == 1:
|
||||
# If `credential_type` is not provided, assume the payload is a
|
||||
# v1 credential payload that specifies a `kind` and a flat list
|
||||
# of field values
|
||||
@@ -2162,10 +2160,22 @@ class CredentialSerializer(BaseSerializer):
|
||||
|
||||
def validate_credential_type(self, credential_type):
|
||||
if self.instance and credential_type.pk != self.instance.credential_type.pk:
|
||||
raise ValidationError(
|
||||
_('You cannot change the credential type of the credential, as it may break the functionality'
|
||||
' of the resources using it.'),
|
||||
)
|
||||
for rel in (
|
||||
'ad_hoc_commands',
|
||||
'insights_inventories',
|
||||
'inventorysources',
|
||||
'inventoryupdates',
|
||||
'unifiedjobs',
|
||||
'unifiedjobtemplates',
|
||||
'projects',
|
||||
'projectupdates',
|
||||
'workflowjobnodes'
|
||||
):
|
||||
if getattr(self.instance, rel).count() > 0:
|
||||
raise ValidationError(
|
||||
_('You cannot change the credential type of the credential, as it may break the functionality'
|
||||
' of the resources using it.'),
|
||||
)
|
||||
return credential_type
|
||||
|
||||
|
||||
@@ -2346,14 +2356,30 @@ class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
|
||||
def get_related(self, obj):
|
||||
res = super(JobOptionsSerializer, self).get_related(obj)
|
||||
res['labels'] = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk})
|
||||
if obj.inventory:
|
||||
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
|
||||
if obj.project:
|
||||
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
|
||||
if obj.credential:
|
||||
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential})
|
||||
if obj.vault_credential:
|
||||
res['vault_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.vault_credential})
|
||||
try:
|
||||
if obj.inventory:
|
||||
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
|
||||
except ObjectDoesNotExist:
|
||||
setattr(obj, 'inventory', None)
|
||||
try:
|
||||
if obj.project:
|
||||
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
|
||||
except ObjectDoesNotExist:
|
||||
setattr(obj, 'project', None)
|
||||
try:
|
||||
if obj.credential:
|
||||
res['credential'] = self.reverse(
|
||||
'api:credential_detail', kwargs={'pk': obj.credential}
|
||||
)
|
||||
except ObjectDoesNotExist:
|
||||
setattr(obj, 'credential', None)
|
||||
try:
|
||||
if obj.vault_credential:
|
||||
res['vault_credential'] = self.reverse(
|
||||
'api:credential_detail', kwargs={'pk': obj.vault_credential}
|
||||
)
|
||||
except ObjectDoesNotExist:
|
||||
setattr(obj, 'vault_credential', None)
|
||||
if self.version > 1:
|
||||
if isinstance(obj, UnifiedJobTemplate):
|
||||
res['extra_credentials'] = self.reverse(
|
||||
@@ -2504,7 +2530,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
|
||||
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',
|
||||
'allow_simultaneous')
|
||||
'allow_simultaneous', 'custom_virtualenv')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobTemplateSerializer, self).get_related(obj)
|
||||
@@ -2608,15 +2634,23 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
notifications = self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
|
||||
labels = self.reverse('api:job_label_list', kwargs={'pk': obj.pk}),
|
||||
))
|
||||
if obj.job_template:
|
||||
res['job_template'] = self.reverse('api:job_template_detail',
|
||||
kwargs={'pk': obj.job_template.pk})
|
||||
try:
|
||||
if obj.job_template:
|
||||
res['job_template'] = self.reverse('api:job_template_detail',
|
||||
kwargs={'pk': obj.job_template.pk})
|
||||
except ObjectDoesNotExist:
|
||||
setattr(obj, 'job_template', None)
|
||||
if (obj.can_start or True) and self.version == 1: # TODO: remove in 3.3
|
||||
res['start'] = self.reverse('api:job_start', kwargs={'pk': obj.pk})
|
||||
if obj.can_cancel or True:
|
||||
res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})
|
||||
if obj.project_update:
|
||||
res['project_update'] = self.reverse('api:project_update_detail', kwargs={'pk': obj.project_update.pk})
|
||||
try:
|
||||
if obj.project_update:
|
||||
res['project_update'] = self.reverse(
|
||||
'api:project_update_detail', kwargs={'pk': obj.project_update.pk}
|
||||
)
|
||||
except ObjectDoesNotExist:
|
||||
pass
|
||||
res['create_schedule'] = self.reverse('api:job_create_schedule', kwargs={'pk': obj.pk})
|
||||
res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
@@ -2756,8 +2790,10 @@ class JobRelaunchSerializer(BaseSerializer):
|
||||
|
||||
def validate(self, attrs):
|
||||
obj = self.context.get('obj')
|
||||
if not obj.credential:
|
||||
raise serializers.ValidationError(dict(credential=[_("Credential not found or deleted.")]))
|
||||
if not obj.credential and not obj.vault_credential:
|
||||
raise serializers.ValidationError(
|
||||
dict(credential=[_("Neither credential nor vault credential provided.")])
|
||||
)
|
||||
if obj.project is None:
|
||||
raise serializers.ValidationError(dict(errors=[_("Job Template Project is missing or undefined.")]))
|
||||
if obj.inventory is None or obj.inventory.pending_deletion:
|
||||
@@ -2914,9 +2950,11 @@ class SystemJobTemplateSerializer(UnifiedJobTemplateSerializer):
|
||||
|
||||
class SystemJobSerializer(UnifiedJobSerializer):
|
||||
|
||||
result_stdout = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = SystemJob
|
||||
fields = ('*', 'system_job_template', 'job_type', 'extra_vars')
|
||||
fields = ('*', 'system_job_template', 'job_type', 'extra_vars', 'result_stdout')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(SystemJobSerializer, self).get_related(obj)
|
||||
@@ -2926,8 +2964,12 @@ class SystemJobSerializer(UnifiedJobSerializer):
|
||||
res['notifications'] = self.reverse('api:system_job_notifications_list', kwargs={'pk': obj.pk})
|
||||
if obj.can_cancel or True:
|
||||
res['cancel'] = self.reverse('api:system_job_cancel', kwargs={'pk': obj.pk})
|
||||
res['events'] = self.reverse('api:system_job_events_list', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def get_result_stdout(self, obj):
|
||||
return obj.result_stdout
|
||||
|
||||
|
||||
class SystemJobCancelSerializer(SystemJobSerializer):
|
||||
|
||||
@@ -3068,12 +3110,38 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
||||
def validate(self, attrs):
|
||||
attrs = super(LaunchConfigurationBaseSerializer, self).validate(attrs)
|
||||
|
||||
# Build unsaved version of this config, use it to detect prompts errors
|
||||
ujt = None
|
||||
if 'unified_job_template' in attrs:
|
||||
ujt = attrs['unified_job_template']
|
||||
elif self.instance:
|
||||
ujt = self.instance.unified_job_template
|
||||
|
||||
# Insert survey_passwords to track redacted variables
|
||||
if 'extra_data' in attrs:
|
||||
extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))
|
||||
if hasattr(ujt, 'survey_password_variables'):
|
||||
password_dict = {}
|
||||
for key in ujt.survey_password_variables():
|
||||
if key in extra_data:
|
||||
password_dict[key] = REPLACE_STR
|
||||
if not self.instance or password_dict != self.instance.survey_passwords:
|
||||
attrs['survey_passwords'] = password_dict
|
||||
if not isinstance(attrs['extra_data'], dict):
|
||||
attrs['extra_data'] = parse_yaml_or_json(attrs['extra_data'])
|
||||
encrypt_dict(attrs['extra_data'], password_dict.keys())
|
||||
if self.instance:
|
||||
db_extra_data = parse_yaml_or_json(self.instance.extra_data)
|
||||
else:
|
||||
db_extra_data = {}
|
||||
for key in password_dict.keys():
|
||||
if attrs['extra_data'].get(key, None) == REPLACE_STR:
|
||||
if key not in db_extra_data:
|
||||
raise serializers.ValidationError(
|
||||
_('Provided variable {} has no database value to replace with.').format(key))
|
||||
else:
|
||||
attrs['extra_data'][key] = db_extra_data[key]
|
||||
|
||||
# Build unsaved version of this config, use it to detect prompts errors
|
||||
mock_obj = self._build_mock_obj(attrs)
|
||||
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(
|
||||
_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
|
||||
@@ -3085,19 +3153,9 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
# Model `.save` needs the container dict, not the psuedo fields
|
||||
attrs['char_prompts'] = mock_obj.char_prompts
|
||||
if mock_obj.char_prompts:
|
||||
attrs['char_prompts'] = mock_obj.char_prompts
|
||||
|
||||
# Insert survey_passwords to track redacted variables
|
||||
# TODO: perform encryption on save
|
||||
if 'extra_data' in attrs:
|
||||
extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))
|
||||
if hasattr(ujt, 'survey_password_variables'):
|
||||
password_dict = {}
|
||||
for key in ujt.survey_password_variables():
|
||||
if key in extra_data:
|
||||
password_dict[key] = REPLACE_STR
|
||||
if not self.instance or password_dict != self.instance.survey_passwords:
|
||||
attrs['survey_passwords'] = password_dict
|
||||
return attrs
|
||||
|
||||
|
||||
@@ -3108,7 +3166,7 @@ class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
|
||||
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
|
||||
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
|
||||
exclude_errors = ('required') # required variables may be provided by WFJT or on launch
|
||||
exclude_errors = ('required',) # required variables may be provided by WFJT or on launch
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJobTemplateNode
|
||||
@@ -3360,6 +3418,41 @@ class JobEventWebSocketSerializer(JobEventSerializer):
|
||||
return 'job_events'
|
||||
|
||||
|
||||
class ProjectUpdateEventSerializer(JobEventSerializer):
|
||||
|
||||
class Meta:
|
||||
model = ProjectUpdateEvent
|
||||
fields = ('*', '-name', '-description', '-job', '-job_id',
|
||||
'-parent_uuid', '-parent', '-host', 'project_update')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobEventSerializer, self).get_related(obj)
|
||||
res['project_update'] = self.reverse(
|
||||
'api:project_update_detail', kwargs={'pk': obj.project_update_id}
|
||||
)
|
||||
return res
|
||||
|
||||
|
||||
class ProjectUpdateEventWebSocketSerializer(ProjectUpdateEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = ProjectUpdateEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'project_update_events'
|
||||
|
||||
|
||||
class AdHocCommandEventSerializer(BaseSerializer):
|
||||
|
||||
event_display = serializers.CharField(source='get_event_display', read_only=True)
|
||||
@@ -3419,6 +3512,76 @@ class AdHocCommandEventWebSocketSerializer(AdHocCommandEventSerializer):
|
||||
return 'ad_hoc_command_events'
|
||||
|
||||
|
||||
class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
|
||||
|
||||
class Meta:
|
||||
model = InventoryUpdateEvent
|
||||
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
|
||||
'-host_name', 'inventory_update')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(AdHocCommandEventSerializer, self).get_related(obj)
|
||||
res['inventory_update'] = self.reverse(
|
||||
'api:inventory_update_detail', kwargs={'pk': obj.inventory_update_id}
|
||||
)
|
||||
return res
|
||||
|
||||
|
||||
class InventoryUpdateEventWebSocketSerializer(InventoryUpdateEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = InventoryUpdateEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'inventory_update_events'
|
||||
|
||||
|
||||
class SystemJobEventSerializer(AdHocCommandEventSerializer):
|
||||
|
||||
class Meta:
|
||||
model = SystemJobEvent
|
||||
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
|
||||
'-host_name', 'system_job')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(AdHocCommandEventSerializer, self).get_related(obj)
|
||||
res['system_job'] = self.reverse(
|
||||
'api:system_job_detail', kwargs={'pk': obj.system_job_id}
|
||||
)
|
||||
return res
|
||||
|
||||
|
||||
class SystemJobEventWebSocketSerializer(SystemJobEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = SystemJobEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'system_job_events'
|
||||
|
||||
|
||||
class JobLaunchSerializer(BaseSerializer):
|
||||
|
||||
# Representational fields
|
||||
@@ -3483,15 +3646,16 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
id=getattrd(obj, '%s.pk' % field_name, None))
|
||||
elif field_name == 'credentials':
|
||||
if self.version > 1:
|
||||
defaults_dict[field_name] = [
|
||||
dict(
|
||||
for cred in obj.credentials.all():
|
||||
cred_dict = dict(
|
||||
id=cred.id,
|
||||
name=cred.name,
|
||||
credential_type=cred.credential_type.pk,
|
||||
passwords_needed=cred.passwords_needed
|
||||
)
|
||||
for cred in obj.credentials.all()
|
||||
]
|
||||
if cred.credential_type.managed_by_tower and 'vault_id' in cred.credential_type.defined_fields:
|
||||
cred_dict['vault_id'] = cred.inputs.get('vault_id') or None
|
||||
defaults_dict.setdefault(field_name, []).append(cred_dict)
|
||||
else:
|
||||
defaults_dict[field_name] = getattr(obj, field_name)
|
||||
return defaults_dict
|
||||
@@ -3506,7 +3670,7 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
template = self.context.get('template')
|
||||
|
||||
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(
|
||||
_exclude_errors=['prompts', 'required'], # make several error types non-blocking
|
||||
_exclude_errors=['prompts'], # make several error types non-blocking
|
||||
**attrs)
|
||||
self._ignored_fields = rejected
|
||||
|
||||
@@ -3708,32 +3872,11 @@ class LabelSerializer(BaseSerializer):
|
||||
return res
|
||||
|
||||
|
||||
class ScheduleSerializer(LaunchConfigurationBaseSerializer):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
class SchedulePreviewSerializer(BaseSerializer):
|
||||
|
||||
class Meta:
|
||||
model = Schedule
|
||||
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(ScheduleSerializer, self).get_related(obj)
|
||||
res.update(dict(
|
||||
unified_jobs = self.reverse('api:schedule_unified_jobs_list', kwargs={'pk': obj.pk}),
|
||||
))
|
||||
if obj.unified_job_template:
|
||||
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
|
||||
return res
|
||||
|
||||
def validate_unified_job_template(self, value):
|
||||
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
|
||||
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
|
||||
elif type(value) == Project and value.scm_type == '':
|
||||
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
|
||||
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
|
||||
raise serializers.ValidationError(_(
|
||||
'Inventory sources with `update_on_project_update` cannot be scheduled. '
|
||||
'Schedule its source project `{}` instead.'.format(value.source_project.name)))
|
||||
return value
|
||||
fields = ('rrule',)
|
||||
|
||||
# We reject rrules if:
|
||||
# - DTSTART is not include
|
||||
@@ -3751,20 +3894,21 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer):
|
||||
multi_by_month = ".*?BYMONTH[\:\=][0-9]+,[0-9]+"
|
||||
by_day_with_numeric_prefix = ".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
|
||||
match_count = re.match(".*?(COUNT\=[0-9]+)", rrule_value)
|
||||
match_multiple_dtstart = re.findall(".*?(DTSTART\:[0-9]+T[0-9]+Z)", rrule_value)
|
||||
match_multiple_dtstart = re.findall(".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
|
||||
match_native_dtstart = re.findall(".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
|
||||
match_multiple_rrule = re.findall(".*?(RRULE\:)", rrule_value)
|
||||
if not len(match_multiple_dtstart):
|
||||
raise serializers.ValidationError(_('DTSTART required in rrule. Value should match: DTSTART:YYYYMMDDTHHMMSSZ'))
|
||||
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
|
||||
if len(match_native_dtstart):
|
||||
raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
|
||||
if len(match_multiple_dtstart) > 1:
|
||||
raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))
|
||||
if not len(match_multiple_rrule):
|
||||
raise serializers.ValidationError(_('RRULE require in rrule.'))
|
||||
raise serializers.ValidationError(_('RRULE required in rrule.'))
|
||||
if len(match_multiple_rrule) > 1:
|
||||
raise serializers.ValidationError(_('Multiple RRULE is not supported.'))
|
||||
if 'interval' not in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('INTERVAL required in rrule.'))
|
||||
if 'tzid' in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('TZID is not supported.'))
|
||||
if 'secondly' in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('SECONDLY is not supported.'))
|
||||
if re.match(multi_by_month_day, rrule_value):
|
||||
@@ -3782,9 +3926,46 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer):
|
||||
if int(count_val[1]) > 999:
|
||||
raise serializers.ValidationError(_("COUNT > 999 is unsupported."))
|
||||
try:
|
||||
rrule.rrulestr(rrule_value)
|
||||
except Exception:
|
||||
raise serializers.ValidationError(_("rrule parsing failed validation."))
|
||||
Schedule.rrulestr(rrule_value)
|
||||
except Exception as e:
|
||||
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
|
||||
return value
|
||||
|
||||
|
||||
class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
|
||||
class Meta:
|
||||
model = Schedule
|
||||
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(ScheduleSerializer, self).get_related(obj)
|
||||
res.update(dict(
|
||||
unified_jobs = self.reverse('api:schedule_unified_jobs_list', kwargs={'pk': obj.pk}),
|
||||
))
|
||||
if obj.unified_job_template:
|
||||
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
|
||||
try:
|
||||
if obj.unified_job_template.project:
|
||||
res['project'] = obj.unified_job_template.project.get_absolute_url(self.context.get('request'))
|
||||
except ObjectDoesNotExist:
|
||||
pass
|
||||
if obj.inventory:
|
||||
res['inventory'] = obj.inventory.get_absolute_url(self.context.get('request'))
|
||||
elif obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
|
||||
res['inventory'] = obj.unified_job_template.inventory.get_absolute_url(self.context.get('request'))
|
||||
return res
|
||||
|
||||
def validate_unified_job_template(self, value):
|
||||
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
|
||||
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
|
||||
elif type(value) == Project and value.scm_type == '':
|
||||
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
|
||||
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
|
||||
raise serializers.ValidationError(_(
|
||||
six.text_type('Inventory sources with `update_on_project_update` cannot be scheduled. '
|
||||
'Schedule its source project `{}` instead.').format(value.source_project.name)))
|
||||
return value
|
||||
|
||||
|
||||
@@ -3820,6 +4001,7 @@ class InstanceSerializer(BaseSerializer):
|
||||
|
||||
class InstanceGroupSerializer(BaseSerializer):
|
||||
|
||||
committed_capacity = serializers.SerializerMethodField()
|
||||
consumed_capacity = serializers.SerializerMethodField()
|
||||
percent_capacity_remaining = serializers.SerializerMethodField()
|
||||
jobs_running = serializers.SerializerMethodField()
|
||||
@@ -3827,7 +4009,8 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
|
||||
class Meta:
|
||||
model = InstanceGroup
|
||||
fields = ("id", "type", "url", "related", "name", "created", "modified", "capacity", "consumed_capacity",
|
||||
fields = ("id", "type", "url", "related", "name", "created", "modified",
|
||||
"capacity", "committed_capacity", "consumed_capacity",
|
||||
"percent_capacity_remaining", "jobs_running", "instances", "controller")
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -3856,7 +4039,10 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
return self.context['capacity_map']
|
||||
|
||||
def get_consumed_capacity(self, obj):
|
||||
return self.get_capacity_dict()[obj.name]['consumed_capacity']
|
||||
return self.get_capacity_dict()[obj.name]['running_capacity']
|
||||
|
||||
def get_committed_capacity(self, obj):
|
||||
return self.get_capacity_dict()[obj.name]['committed_capacity']
|
||||
|
||||
def get_percent_capacity_remaining(self, obj):
|
||||
if not obj.capacity:
|
||||
@@ -3954,6 +4140,11 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
|
||||
if fk == 'schedule':
|
||||
rel['unified_job_template'] = thisItem.unified_job_template.get_absolute_url(self.context.get('request'))
|
||||
if obj.setting and obj.setting.get('category', None):
|
||||
rel['setting'] = self.reverse(
|
||||
'api:setting_singleton_detail',
|
||||
kwargs={'category_slug': obj.setting['category']}
|
||||
)
|
||||
return rel
|
||||
|
||||
def _get_rel(self, obj, fk):
|
||||
@@ -4005,6 +4196,8 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
username = obj.actor.username,
|
||||
first_name = obj.actor.first_name,
|
||||
last_name = obj.actor.last_name)
|
||||
if obj.setting:
|
||||
summary_fields['setting'] = [obj.setting]
|
||||
return summary_fields
|
||||
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
The resulting data structure contains:
|
||||
|
||||
{
|
||||
"count": 99,
|
||||
"next": null,
|
||||
"previous": null,
|
||||
"count": 99,
|
||||
"next": null,
|
||||
"previous": null,
|
||||
"results": [
|
||||
...
|
||||
]
|
||||
@@ -60,6 +60,10 @@ _Added in AWX 1.4_
|
||||
|
||||
?related__search=findme
|
||||
|
||||
Note: If you want to provide more than one search terms, please use multiple
|
||||
search fields with the same key, like `?related__search=foo&related__search=bar`,
|
||||
All search terms with the same key will be ORed together.
|
||||
|
||||
## Filtering
|
||||
|
||||
Any additional query string parameters may be used to filter the list of
|
||||
@@ -70,7 +74,7 @@ in the specified value should be url-encoded. For example:
|
||||
?field=value%20xyz
|
||||
|
||||
Fields may also span relations, only for fields and relationships defined in
|
||||
the database:
|
||||
the database:
|
||||
|
||||
?other__field=value
|
||||
|
||||
|
||||
6
awx/api/templates/api/sub_list_destroy_api_view.md
Normal file
6
awx/api/templates/api/sub_list_destroy_api_view.md
Normal file
@@ -0,0 +1,6 @@
|
||||
{% include "api/sub_list_create_api_view.md" %}
|
||||
|
||||
# Delete all {{ model_verbose_name_plural }} of this {{ parent_model_verbose_name|title }}:
|
||||
|
||||
Make a DELETE request to this resource to delete all {{ model_verbose_name_plural }} show in the list.
|
||||
The {{ parent_model_verbose_name|title }} will not be deleted by this request.
|
||||
@@ -9,6 +9,7 @@ from awx.api.views import (
|
||||
InventoryUpdateCancel,
|
||||
InventoryUpdateStdout,
|
||||
InventoryUpdateNotificationsList,
|
||||
InventoryUpdateEventsList,
|
||||
)
|
||||
|
||||
|
||||
@@ -18,6 +19,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/cancel/$', InventoryUpdateCancel.as_view(), name='inventory_update_cancel'),
|
||||
url(r'^(?P<pk>[0-9]+)/stdout/$', InventoryUpdateStdout.as_view(), name='inventory_update_stdout'),
|
||||
url(r'^(?P<pk>[0-9]+)/notifications/$', InventoryUpdateNotificationsList.as_view(), name='inventory_update_notifications_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/events/$', InventoryUpdateEventsList.as_view(), name='inventory_update_events_list'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -10,6 +10,7 @@ from awx.api.views import (
|
||||
ProjectUpdateStdout,
|
||||
ProjectUpdateScmInventoryUpdates,
|
||||
ProjectUpdateNotificationsList,
|
||||
ProjectUpdateEventsList,
|
||||
)
|
||||
|
||||
|
||||
@@ -20,6 +21,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/stdout/$', ProjectUpdateStdout.as_view(), name='project_update_stdout'),
|
||||
url(r'^(?P<pk>[0-9]+)/scm_inventory_updates/$', ProjectUpdateScmInventoryUpdates.as_view(), name='project_update_scm_inventory_updates'),
|
||||
url(r'^(?P<pk>[0-9]+)/notifications/$', ProjectUpdateNotificationsList.as_view(), name='project_update_notifications_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/events/$', ProjectUpdateEventsList.as_view(), name='project_update_events_list'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -8,6 +8,7 @@ from awx.api.views import (
|
||||
SystemJobDetail,
|
||||
SystemJobCancel,
|
||||
SystemJobNotificationsList,
|
||||
SystemJobEventsList
|
||||
)
|
||||
|
||||
|
||||
@@ -16,6 +17,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/$', SystemJobDetail.as_view(), name='system_job_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/cancel/$', SystemJobCancel.as_view(), name='system_job_cancel'),
|
||||
url(r'^(?P<pk>[0-9]+)/notifications/$', SystemJobNotificationsList.as_view(), name='system_job_notifications_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/events/$', SystemJobEventsList.as_view(), name='system_job_events_list'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -22,6 +22,8 @@ from awx.api.views import (
|
||||
JobExtraCredentialsList,
|
||||
JobTemplateCredentialsList,
|
||||
JobTemplateExtraCredentialsList,
|
||||
SchedulePreview,
|
||||
ScheduleZoneInfo,
|
||||
)
|
||||
|
||||
from .organization import urls as organization_urls
|
||||
@@ -113,6 +115,8 @@ v2_urls = [
|
||||
url(r'^jobs/(?P<pk>[0-9]+)/credentials/$', JobCredentialsList.as_view(), name='job_credentials_list'),
|
||||
url(r'^job_templates/(?P<pk>[0-9]+)/extra_credentials/$', JobTemplateExtraCredentialsList.as_view(), name='job_template_extra_credentials_list'),
|
||||
url(r'^job_templates/(?P<pk>[0-9]+)/credentials/$', JobTemplateCredentialsList.as_view(), name='job_template_credentials_list'),
|
||||
url(r'^schedules/preview/$', SchedulePreview.as_view(), name='schedule_rrule'),
|
||||
url(r'^schedules/zoneinfo/$', ScheduleZoneInfo.as_view(), name='schedule_zoneinfo'),
|
||||
]
|
||||
|
||||
app_name = 'api'
|
||||
|
||||
340
awx/api/views.py
340
awx/api/views.py
@@ -2,24 +2,23 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import os
|
||||
import re
|
||||
import cgi
|
||||
import dateutil
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import logging
|
||||
import requests
|
||||
from base64 import b64encode
|
||||
from collections import OrderedDict, Iterable
|
||||
import six
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import FieldError
|
||||
from django.db.models import Q, Count, F
|
||||
from django.db import IntegrityError, transaction, connection
|
||||
from django.db import IntegrityError, transaction
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.utils.encoding import smart_text, force_text
|
||||
from django.utils.safestring import mark_safe
|
||||
@@ -54,6 +53,7 @@ import ansiconv
|
||||
# Python Social Auth
|
||||
from social_core.backends.utils import load_backends
|
||||
|
||||
import pytz
|
||||
from wsgiref.util import FileWrapper
|
||||
|
||||
# AWX
|
||||
@@ -72,6 +72,7 @@ from awx.main.utils import (
|
||||
extract_ansible_vars,
|
||||
decrypt_field,
|
||||
)
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.insights import filter_insights_api_response
|
||||
|
||||
@@ -315,6 +316,7 @@ class ApiV1ConfigView(APIView):
|
||||
data.update(dict(
|
||||
project_base_dir = settings.PROJECTS_ROOT,
|
||||
project_local_paths = Project.get_local_path_choices(),
|
||||
custom_virtualenvs = get_custom_venv_choices(),
|
||||
))
|
||||
|
||||
return Response(data)
|
||||
@@ -607,6 +609,43 @@ class ScheduleDetail(RetrieveUpdateDestroyAPIView):
|
||||
new_in_148 = True
|
||||
|
||||
|
||||
class SchedulePreview(GenericAPIView):
|
||||
|
||||
model = Schedule
|
||||
view_name = _('Schedule Recurrence Rule Preview')
|
||||
serializer_class = SchedulePreviewSerializer
|
||||
new_in_api_v2 = True
|
||||
|
||||
def post(self, request):
|
||||
serializer = self.get_serializer(data=request.data)
|
||||
if serializer.is_valid():
|
||||
next_stamp = now()
|
||||
schedule = []
|
||||
gen = Schedule.rrulestr(serializer.validated_data['rrule']).xafter(next_stamp, count=20)
|
||||
|
||||
# loop across the entire generator and grab the first 10 events
|
||||
for event in gen:
|
||||
if len(schedule) >= 10:
|
||||
break
|
||||
if not dateutil.tz.datetime_exists(event):
|
||||
# skip imaginary dates, like 2:30 on DST boundaries
|
||||
continue
|
||||
schedule.append(event)
|
||||
|
||||
return Response({
|
||||
'local': schedule,
|
||||
'utc': [s.astimezone(pytz.utc) for s in schedule]
|
||||
})
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class ScheduleZoneInfo(APIView):
|
||||
|
||||
def get(self, request):
|
||||
from dateutil.zoneinfo import get_zonefile_instance
|
||||
return Response(sorted(get_zonefile_instance().zones.keys()))
|
||||
|
||||
|
||||
class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
|
||||
|
||||
model = Credential
|
||||
@@ -1366,6 +1405,45 @@ class ProjectUpdateDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
|
||||
new_in_13 = True
|
||||
|
||||
|
||||
class ProjectUpdateEventsList(SubListAPIView):
|
||||
|
||||
model = ProjectUpdateEvent
|
||||
serializer_class = ProjectUpdateEventSerializer
|
||||
parent_model = ProjectUpdate
|
||||
relationship = 'project_update_events'
|
||||
view_name = _('Project Update Events List')
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||
return super(ProjectUpdateEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||
|
||||
|
||||
class SystemJobEventsList(SubListAPIView):
|
||||
|
||||
model = SystemJobEvent
|
||||
serializer_class = SystemJobEventSerializer
|
||||
parent_model = SystemJob
|
||||
relationship = 'system_job_events'
|
||||
view_name = _('System Job Events List')
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||
return super(SystemJobEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryUpdateEventsList(SubListAPIView):
|
||||
|
||||
model = InventoryUpdateEvent
|
||||
serializer_class = InventoryUpdateEventSerializer
|
||||
parent_model = InventoryUpdate
|
||||
relationship = 'inventory_update_events'
|
||||
view_name = _('Inventory Update Events List')
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||
return super(InventoryUpdateEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||
|
||||
|
||||
class ProjectUpdateCancel(RetrieveAPIView):
|
||||
|
||||
model = ProjectUpdate
|
||||
@@ -1967,7 +2045,17 @@ class InventoryJobTemplateList(SubListAPIView):
|
||||
return qs.filter(inventory=parent)
|
||||
|
||||
|
||||
class HostList(ListCreateAPIView):
|
||||
class HostRelatedSearchMixin(object):
|
||||
|
||||
@property
|
||||
def related_search_fields(self):
|
||||
# Edge-case handle: https://github.com/ansible/ansible-tower/issues/7712
|
||||
ret = super(HostRelatedSearchMixin, self).related_search_fields
|
||||
ret.append('ansible_facts')
|
||||
return ret
|
||||
|
||||
|
||||
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
|
||||
always_allow_superuser = False
|
||||
model = Host
|
||||
@@ -2004,7 +2092,7 @@ class HostAnsibleFactsDetail(RetrieveAPIView):
|
||||
new_in_api_v2 = True
|
||||
|
||||
|
||||
class InventoryHostsList(SubListCreateAttachDetachAPIView):
|
||||
class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = Host
|
||||
serializer_class = HostSerializer
|
||||
@@ -2274,7 +2362,9 @@ class GroupPotentialChildrenList(SubListAPIView):
|
||||
return qs.exclude(pk__in=except_pks)
|
||||
|
||||
|
||||
class GroupHostsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
|
||||
class GroupHostsList(HostRelatedSearchMixin,
|
||||
ControlledByScmMixin,
|
||||
SubListCreateAttachDetachAPIView):
|
||||
''' the list of hosts directly below a group '''
|
||||
|
||||
model = Host
|
||||
@@ -2301,7 +2391,7 @@ class GroupHostsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
|
||||
return super(GroupHostsList, self).create(request, *args, **kwargs)
|
||||
|
||||
|
||||
class GroupAllHostsList(SubListAPIView):
|
||||
class GroupAllHostsList(HostRelatedSearchMixin, SubListAPIView):
|
||||
''' the list of all hosts below a group, even including subgroups '''
|
||||
|
||||
model = Host
|
||||
@@ -2419,6 +2509,8 @@ class InventoryScriptView(RetrieveAPIView):
|
||||
def retrieve(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
hostname = request.query_params.get('host', '')
|
||||
hostvars = bool(request.query_params.get('hostvars', ''))
|
||||
towervars = bool(request.query_params.get('towervars', ''))
|
||||
show_all = bool(request.query_params.get('all', ''))
|
||||
if hostname:
|
||||
hosts_q = dict(name=hostname)
|
||||
@@ -2427,7 +2519,8 @@ class InventoryScriptView(RetrieveAPIView):
|
||||
host = get_object_or_404(obj.hosts, **hosts_q)
|
||||
return Response(host.variables_dict)
|
||||
return Response(obj.get_script_data(
|
||||
hostvars=bool(request.query_params.get('hostvars', '')),
|
||||
hostvars=hostvars,
|
||||
towervars=towervars,
|
||||
show_all=show_all
|
||||
))
|
||||
|
||||
@@ -2607,23 +2700,25 @@ class InventorySourceNotificationTemplatesSuccessList(InventorySourceNotificatio
|
||||
relationship = 'notification_templates_success'
|
||||
|
||||
|
||||
class InventorySourceHostsList(SubListAPIView):
|
||||
class InventorySourceHostsList(HostRelatedSearchMixin, SubListDestroyAPIView):
|
||||
|
||||
model = Host
|
||||
serializer_class = HostSerializer
|
||||
parent_model = InventorySource
|
||||
relationship = 'hosts'
|
||||
new_in_148 = True
|
||||
check_sub_obj_permission = False
|
||||
capabilities_prefetch = ['inventory.admin']
|
||||
|
||||
|
||||
class InventorySourceGroupsList(SubListAPIView):
|
||||
class InventorySourceGroupsList(SubListDestroyAPIView):
|
||||
|
||||
model = Group
|
||||
serializer_class = GroupSerializer
|
||||
parent_model = InventorySource
|
||||
relationship = 'groups'
|
||||
new_in_148 = True
|
||||
check_sub_obj_permission = False
|
||||
|
||||
|
||||
class InventorySourceUpdatesList(SubListAPIView):
|
||||
@@ -2918,13 +3013,8 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
if not feature_enabled('surveys'):
|
||||
raise LicenseForbids(_('Your license does not allow '
|
||||
'adding surveys.'))
|
||||
survey_spec = obj.survey_spec
|
||||
for pos, field in enumerate(survey_spec.get('spec', [])):
|
||||
if field.get('type') == 'password':
|
||||
if 'default' in field and field['default']:
|
||||
field['default'] = '$encrypted$'
|
||||
|
||||
return Response(survey_spec)
|
||||
return Response(obj.display_survey_spec())
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
@@ -2937,7 +3027,14 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
|
||||
if not request.user.can_access(self.model, 'change', obj, None):
|
||||
raise PermissionDenied()
|
||||
new_spec = request.data
|
||||
response = self._validate_spec_data(request.data, obj.survey_spec)
|
||||
if response:
|
||||
return response
|
||||
obj.survey_spec = request.data
|
||||
obj.save(update_fields=['survey_spec'])
|
||||
return Response()
|
||||
|
||||
def _validate_spec_data(self, new_spec, old_spec):
|
||||
if "name" not in new_spec:
|
||||
return Response(dict(error=_("'name' missing from survey spec.")), status=status.HTTP_400_BAD_REQUEST)
|
||||
if "description" not in new_spec:
|
||||
@@ -2949,9 +3046,9 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
if len(new_spec["spec"]) < 1:
|
||||
return Response(dict(error=_("'spec' doesn't contain any items.")), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
idx = 0
|
||||
variable_set = set()
|
||||
for survey_item in new_spec["spec"]:
|
||||
old_spec_dict = JobTemplate.pivot_spec(old_spec)
|
||||
for idx, survey_item in enumerate(new_spec["spec"]):
|
||||
if not isinstance(survey_item, dict):
|
||||
return Response(dict(error=_("Survey question %s is not a json object.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
|
||||
if "type" not in survey_item:
|
||||
@@ -2968,21 +3065,41 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
if "required" not in survey_item:
|
||||
return Response(dict(error=_("'required' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if survey_item["type"] == "password":
|
||||
if survey_item.get("default") and survey_item["default"].startswith('$encrypted$'):
|
||||
if not obj.survey_spec:
|
||||
return Response(dict(error=_("$encrypted$ is reserved keyword and may not be used as a default for password {}.".format(str(idx)))),
|
||||
status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
old_spec = obj.survey_spec
|
||||
for old_item in old_spec['spec']:
|
||||
if old_item['variable'] == survey_item['variable']:
|
||||
survey_item['default'] = old_item['default']
|
||||
idx += 1
|
||||
if survey_item["type"] == "password" and "default" in survey_item:
|
||||
if not isinstance(survey_item['default'], six.string_types):
|
||||
return Response(dict(error=_(
|
||||
"Value {question_default} for '{variable_name}' expected to be a string."
|
||||
).format(
|
||||
question_default=survey_item["default"], variable_name=survey_item["variable"])
|
||||
), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
obj.survey_spec = new_spec
|
||||
obj.save(update_fields=['survey_spec'])
|
||||
return Response()
|
||||
if ("default" in survey_item and isinstance(survey_item['default'], six.string_types) and
|
||||
survey_item['default'].startswith('$encrypted$')):
|
||||
# Submission expects the existence of encrypted DB value to replace given default
|
||||
if survey_item["type"] != "password":
|
||||
return Response(dict(error=_(
|
||||
"$encrypted$ is a reserved keyword for password question defaults, "
|
||||
"survey question {question_position} is type {question_type}."
|
||||
).format(
|
||||
question_position=str(idx), question_type=survey_item["type"])
|
||||
), status=status.HTTP_400_BAD_REQUEST)
|
||||
old_element = old_spec_dict.get(survey_item['variable'], {})
|
||||
encryptedish_default_exists = False
|
||||
if 'default' in old_element:
|
||||
old_default = old_element['default']
|
||||
if isinstance(old_default, six.string_types):
|
||||
if old_default.startswith('$encrypted$'):
|
||||
encryptedish_default_exists = True
|
||||
elif old_default == "": # unencrypted blank string is allowed as DB value as special case
|
||||
encryptedish_default_exists = True
|
||||
if not encryptedish_default_exists:
|
||||
return Response(dict(error=_(
|
||||
"$encrypted$ is a reserved keyword, may not be used for new default in position {question_position}."
|
||||
).format(question_position=str(idx))), status=status.HTTP_400_BAD_REQUEST)
|
||||
survey_item['default'] = old_element['default']
|
||||
elif survey_item["type"] == "password" and 'default' in survey_item:
|
||||
# Submission provides new encrypted default
|
||||
survey_item['default'] = encrypt_value(survey_item['default'])
|
||||
|
||||
def delete(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
@@ -3206,7 +3323,9 @@ class JobTemplateCallback(GenericAPIView):
|
||||
for inventory_source in inventory_sources:
|
||||
if inventory_source.needs_update_on_launch:
|
||||
# FIXME: Doesn't check for any existing updates.
|
||||
inventory_update = inventory_source.create_inventory_update(launch_type='callback')
|
||||
inventory_update = inventory_source.create_inventory_update(
|
||||
**{'_eager_fields': {'launch_type': 'callback'}}
|
||||
)
|
||||
inventory_update.signal_start()
|
||||
inventory_update_pks.add(inventory_update.pk)
|
||||
inventory_update_qs = InventoryUpdate.objects.filter(pk__in=inventory_update_pks, status__in=('pending', 'waiting', 'running'))
|
||||
@@ -3240,7 +3359,8 @@ class JobTemplateCallback(GenericAPIView):
|
||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Everything is fine; actually create the job.
|
||||
kv = {"limit": limit, "launch_type": 'callback'}
|
||||
kv = {"limit": limit}
|
||||
kv.setdefault('_eager_fields', {})['launch_type'] = 'callback'
|
||||
if extra_vars is not None and job_template.ask_variables_on_launch:
|
||||
extra_vars_redacted, removed = extract_ansible_vars(extra_vars)
|
||||
kv['extra_vars'] = extra_vars_redacted
|
||||
@@ -4121,7 +4241,7 @@ class JobEventChildrenList(SubListAPIView):
|
||||
view_name = _('Job Event Children List')
|
||||
|
||||
|
||||
class JobEventHostsList(SubListAPIView):
|
||||
class JobEventHostsList(HostRelatedSearchMixin, SubListAPIView):
|
||||
|
||||
model = Host
|
||||
serializer_class = HostSerializer
|
||||
@@ -4141,7 +4261,7 @@ class BaseJobEventsList(SubListAPIView):
|
||||
search_fields = ('stdout',)
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
response['X-UI-Max-Events'] = settings.RECOMMENDED_MAX_EVENTS_DISPLAY_HEADER
|
||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||
return super(BaseJobEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||
|
||||
|
||||
@@ -4457,7 +4577,7 @@ class StdoutANSIFilter(object):
|
||||
def __init__(self, fileobj):
|
||||
self.fileobj = fileobj
|
||||
self.extra_data = ''
|
||||
if hasattr(fileobj,'close'):
|
||||
if hasattr(fileobj, 'close'):
|
||||
self.close = fileobj.close
|
||||
|
||||
def read(self, size=-1):
|
||||
@@ -4491,97 +4611,69 @@ class UnifiedJobStdout(RetrieveAPIView):
|
||||
|
||||
def retrieve(self, request, *args, **kwargs):
|
||||
unified_job = self.get_object()
|
||||
obj_size = unified_job.result_stdout_size
|
||||
if request.accepted_renderer.format not in {'txt_download', 'ansi_download'} and obj_size > settings.STDOUT_MAX_BYTES_DISPLAY:
|
||||
response_message = _("Standard Output too large to display (%(text_size)d bytes), "
|
||||
"only download supported for sizes over %(supported_size)d bytes") % {
|
||||
'text_size': obj_size, 'supported_size': settings.STDOUT_MAX_BYTES_DISPLAY}
|
||||
try:
|
||||
target_format = request.accepted_renderer.format
|
||||
if target_format in ('html', 'api', 'json'):
|
||||
content_format = request.query_params.get('content_format', 'html')
|
||||
content_encoding = request.query_params.get('content_encoding', None)
|
||||
start_line = request.query_params.get('start_line', 0)
|
||||
end_line = request.query_params.get('end_line', None)
|
||||
dark_val = request.query_params.get('dark', '')
|
||||
dark = bool(dark_val and dark_val[0].lower() in ('1', 't', 'y'))
|
||||
content_only = bool(target_format in ('api', 'json'))
|
||||
dark_bg = (content_only and dark) or (not content_only and (dark or not dark_val))
|
||||
content, start, end, absolute_end = unified_job.result_stdout_raw_limited(start_line, end_line)
|
||||
|
||||
# Remove any ANSI escape sequences containing job event data.
|
||||
content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content)
|
||||
|
||||
body = ansiconv.to_html(cgi.escape(content))
|
||||
|
||||
context = {
|
||||
'title': get_view_name(self.__class__),
|
||||
'body': mark_safe(body),
|
||||
'dark': dark_bg,
|
||||
'content_only': content_only,
|
||||
}
|
||||
data = render_to_string('api/stdout.html', context).strip()
|
||||
|
||||
if target_format == 'api':
|
||||
return Response(mark_safe(data))
|
||||
if target_format == 'json':
|
||||
if content_encoding == 'base64' and content_format == 'ansi':
|
||||
return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': b64encode(content.encode('utf-8'))})
|
||||
elif content_format == 'html':
|
||||
return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': body})
|
||||
return Response(data)
|
||||
elif target_format == 'txt':
|
||||
return Response(unified_job.result_stdout)
|
||||
elif target_format == 'ansi':
|
||||
return Response(unified_job.result_stdout_raw)
|
||||
elif target_format in {'txt_download', 'ansi_download'}:
|
||||
filename = '{type}_{pk}{suffix}.txt'.format(
|
||||
type=camelcase_to_underscore(unified_job.__class__.__name__),
|
||||
pk=unified_job.id,
|
||||
suffix='.ansi' if target_format == 'ansi_download' else ''
|
||||
)
|
||||
content_fd = unified_job.result_stdout_raw_handle(enforce_max_bytes=False)
|
||||
if target_format == 'txt_download':
|
||||
content_fd = StdoutANSIFilter(content_fd)
|
||||
response = HttpResponse(FileWrapper(content_fd), content_type='text/plain')
|
||||
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
|
||||
return response
|
||||
else:
|
||||
return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs)
|
||||
except StdoutMaxBytesExceeded as e:
|
||||
response_message = _(
|
||||
"Standard Output too large to display ({text_size} bytes), "
|
||||
"only download supported for sizes over {supported_size} bytes").format(
|
||||
text_size=e.total, supported_size=e.supported
|
||||
)
|
||||
if request.accepted_renderer.format == 'json':
|
||||
return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message})
|
||||
else:
|
||||
return Response(response_message)
|
||||
|
||||
if request.accepted_renderer.format in ('html', 'api', 'json'):
|
||||
content_format = request.query_params.get('content_format', 'html')
|
||||
content_encoding = request.query_params.get('content_encoding', None)
|
||||
start_line = request.query_params.get('start_line', 0)
|
||||
end_line = request.query_params.get('end_line', None)
|
||||
dark_val = request.query_params.get('dark', '')
|
||||
dark = bool(dark_val and dark_val[0].lower() in ('1', 't', 'y'))
|
||||
content_only = bool(request.accepted_renderer.format in ('api', 'json'))
|
||||
dark_bg = (content_only and dark) or (not content_only and (dark or not dark_val))
|
||||
content, start, end, absolute_end = unified_job.result_stdout_raw_limited(start_line, end_line)
|
||||
|
||||
# Remove any ANSI escape sequences containing job event data.
|
||||
content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content)
|
||||
|
||||
body = ansiconv.to_html(cgi.escape(content))
|
||||
|
||||
context = {
|
||||
'title': get_view_name(self.__class__),
|
||||
'body': mark_safe(body),
|
||||
'dark': dark_bg,
|
||||
'content_only': content_only,
|
||||
}
|
||||
data = render_to_string('api/stdout.html', context).strip()
|
||||
|
||||
if request.accepted_renderer.format == 'api':
|
||||
return Response(mark_safe(data))
|
||||
if request.accepted_renderer.format == 'json':
|
||||
if content_encoding == 'base64' and content_format == 'ansi':
|
||||
return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': b64encode(content)})
|
||||
elif content_format == 'html':
|
||||
return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': body})
|
||||
return Response(data)
|
||||
elif request.accepted_renderer.format == 'txt':
|
||||
return Response(unified_job.result_stdout)
|
||||
elif request.accepted_renderer.format == 'ansi':
|
||||
return Response(unified_job.result_stdout_raw)
|
||||
elif request.accepted_renderer.format in {'txt_download', 'ansi_download'}:
|
||||
if not os.path.exists(unified_job.result_stdout_file):
|
||||
write_fd = open(unified_job.result_stdout_file, 'w')
|
||||
with connection.cursor() as cursor:
|
||||
try:
|
||||
tablename, related_name = {
|
||||
Job: ('main_jobevent', 'job_id'),
|
||||
AdHocCommand: ('main_adhoccommandevent', 'ad_hoc_command_id'),
|
||||
}.get(unified_job.__class__, (None, None))
|
||||
if tablename is None:
|
||||
# stdout job event reconstruction isn't supported
|
||||
# for certain job types (such as inventory syncs),
|
||||
# so just grab the raw stdout from the DB
|
||||
write_fd.write(unified_job.result_stdout_text)
|
||||
write_fd.close()
|
||||
else:
|
||||
cursor.copy_expert(
|
||||
"copy (select stdout from {} where {}={} order by start_line) to stdout".format(
|
||||
tablename,
|
||||
related_name,
|
||||
unified_job.id
|
||||
),
|
||||
write_fd
|
||||
)
|
||||
write_fd.close()
|
||||
subprocess.Popen("sed -i 's/\\\\r\\\\n/\\n/g' {}".format(unified_job.result_stdout_file),
|
||||
shell=True).wait()
|
||||
except Exception as e:
|
||||
return Response({"error": _("Error generating stdout download file: {}".format(e))})
|
||||
try:
|
||||
content_fd = open(unified_job.result_stdout_file, 'r')
|
||||
if request.accepted_renderer.format == 'txt_download':
|
||||
# For txt downloads, filter out ANSI escape sequences.
|
||||
content_fd = StdoutANSIFilter(content_fd)
|
||||
suffix = ''
|
||||
else:
|
||||
suffix = '_ansi'
|
||||
response = HttpResponse(FileWrapper(content_fd), content_type='text/plain')
|
||||
response["Content-Disposition"] = 'attachment; filename="job_%s%s.txt"' % (str(unified_job.id), suffix)
|
||||
return response
|
||||
except Exception as e:
|
||||
return Response({"error": _("Error generating stdout download file: %s") % str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs)
|
||||
|
||||
|
||||
class ProjectUpdateStdout(UnifiedJobStdout):
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Python
|
||||
import logging
|
||||
import urlparse
|
||||
from collections import OrderedDict
|
||||
|
||||
# Django
|
||||
from django.core.validators import URLValidator
|
||||
@@ -139,6 +140,8 @@ class KeyValueField(DictField):
|
||||
ret = super(KeyValueField, self).to_internal_value(data)
|
||||
for value in data.values():
|
||||
if not isinstance(value, six.string_types + six.integer_types + (float,)):
|
||||
if isinstance(value, OrderedDict):
|
||||
value = dict(value)
|
||||
self.fail('invalid_child', input=value)
|
||||
return ret
|
||||
|
||||
|
||||
@@ -120,6 +120,9 @@ class SettingsRegistry(object):
|
||||
def is_setting_read_only(self, setting):
|
||||
return bool(self._registry.get(setting, {}).get('read_only', False))
|
||||
|
||||
def get_setting_category(self, setting):
|
||||
return self._registry.get(setting, {}).get('category_slug', None)
|
||||
|
||||
def get_setting_field(self, setting, mixin_class=None, for_user=False, **kwargs):
|
||||
from rest_framework.fields import empty
|
||||
field_kwargs = {}
|
||||
|
||||
@@ -87,8 +87,10 @@ class SettingSingletonSerializer(serializers.Serializer):
|
||||
if self.instance and not hasattr(self.instance, key):
|
||||
continue
|
||||
extra_kwargs = {}
|
||||
# Make LICENSE read-only here; update via /api/v1/config/ only.
|
||||
if key == 'LICENSE':
|
||||
# Make LICENSE and AWX_ISOLATED_KEY_GENERATION read-only here;
|
||||
# LICENSE is only updated via /api/v1/config/
|
||||
# AWX_ISOLATED_KEY_GENERATION is only set/unset via the setup playbook
|
||||
if key in ('LICENSE', 'AWX_ISOLATED_KEY_GENERATION'):
|
||||
extra_kwargs['read_only'] = True
|
||||
field = settings_registry.get_setting_field(key, mixin_class=SettingFieldMixin, for_user=bool(category_slug == 'user'), **extra_kwargs)
|
||||
fields[key] = field
|
||||
|
||||
@@ -14,6 +14,7 @@ from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db import ProgrammingError, OperationalError
|
||||
from django.utils.functional import cached_property
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import empty, SkipField
|
||||
@@ -230,7 +231,8 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
self.__dict__['cache'] = EncryptedCacheProxy(cache, registry)
|
||||
self.__dict__['registry'] = registry
|
||||
|
||||
def _get_supported_settings(self):
|
||||
@cached_property
|
||||
def all_supported_settings(self):
|
||||
return self.registry.get_registered_settings()
|
||||
|
||||
def _preload_cache(self):
|
||||
@@ -382,7 +384,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
if name in self._get_supported_settings():
|
||||
if name in self.all_supported_settings:
|
||||
with _log_database_error():
|
||||
value = self._get_local(name)
|
||||
if value is not empty:
|
||||
@@ -414,7 +416,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
# post_save handler will delete from cache when changed.
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name in self._get_supported_settings():
|
||||
if name in self.all_supported_settings:
|
||||
with _log_database_error():
|
||||
self._set_local(name, value)
|
||||
else:
|
||||
@@ -430,7 +432,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
# pre_delete handler will delete from cache.
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name in self._get_supported_settings():
|
||||
if name in self.all_supported_settings:
|
||||
with _log_database_error():
|
||||
self._del_local(name)
|
||||
else:
|
||||
@@ -440,7 +442,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
keys = []
|
||||
with _log_database_error():
|
||||
for setting in Setting.objects.filter(
|
||||
key__in=self._get_supported_settings(), user__isnull=True):
|
||||
key__in=self.all_supported_settings, user__isnull=True):
|
||||
# Skip returning settings that have been overridden but are
|
||||
# considered to be "not set".
|
||||
if setting.value is None and SETTING_CACHE_NOTSET == SETTING_CACHE_NONE:
|
||||
@@ -454,7 +456,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
|
||||
def is_overridden(self, setting):
|
||||
set_locally = False
|
||||
if setting in self._get_supported_settings():
|
||||
if setting in self.all_supported_settings:
|
||||
with _log_database_error():
|
||||
set_locally = Setting.objects.filter(key=setting, user__isnull=True).exists()
|
||||
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
|
||||
|
||||
@@ -6,10 +6,10 @@ import glob
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# RedBaron
|
||||
from redbaron import RedBaron, indent
|
||||
# AWX
|
||||
from awx.conf.registry import settings_registry
|
||||
|
||||
__all__ = ['comment_assignments']
|
||||
__all__ = ['comment_assignments', 'conf_to_dict']
|
||||
|
||||
|
||||
def comment_assignments(patterns, assignment_names, dry_run=True, backup_suffix='.old'):
|
||||
@@ -30,6 +30,8 @@ def comment_assignments(patterns, assignment_names, dry_run=True, backup_suffix=
|
||||
|
||||
|
||||
def comment_assignments_in_file(filename, assignment_names, dry_run=True, backup_filename=None):
|
||||
from redbaron import RedBaron, indent
|
||||
|
||||
if isinstance(assignment_names, basestring):
|
||||
assignment_names = [assignment_names]
|
||||
else:
|
||||
@@ -103,6 +105,13 @@ def comment_assignments_in_file(filename, assignment_names, dry_run=True, backup
|
||||
return '\n'.join(diff_lines)
|
||||
|
||||
|
||||
def conf_to_dict(obj):
|
||||
return {
|
||||
'category': settings_registry.get_setting_category(obj.key),
|
||||
'name': obj.key,
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pattern = os.path.join(os.path.dirname(__file__), '..', 'settings', 'local_*.py')
|
||||
diffs = comment_assignments(pattern, ['AUTH_LDAP_ORGANIZATION_MAP'])
|
||||
|
||||
@@ -123,6 +123,8 @@ class EventContext(object):
|
||||
event_data['job_id'] = int(os.getenv('JOB_ID', '0'))
|
||||
if os.getenv('AD_HOC_COMMAND_ID', ''):
|
||||
event_data['ad_hoc_command_id'] = int(os.getenv('AD_HOC_COMMAND_ID', '0'))
|
||||
if os.getenv('PROJECT_UPDATE_ID', ''):
|
||||
event_data['project_update_id'] = int(os.getenv('PROJECT_UPDATE_ID', '0'))
|
||||
event_data.setdefault('pid', os.getpid())
|
||||
event_data.setdefault('uuid', str(uuid.uuid4()))
|
||||
event_data.setdefault('created', datetime.datetime.utcnow().isoformat())
|
||||
@@ -145,7 +147,7 @@ class EventContext(object):
|
||||
event_data['res'] = {}
|
||||
event_dict = dict(event=event, event_data=event_data)
|
||||
for key in event_data.keys():
|
||||
if key in ('job_id', 'ad_hoc_command_id', 'uuid', 'parent_uuid', 'created',):
|
||||
if key in ('job_id', 'ad_hoc_command_id', 'project_update_id', 'uuid', 'parent_uuid', 'created',):
|
||||
event_dict[key] = event_data.pop(key)
|
||||
elif key in ('verbosity', 'pid'):
|
||||
event_dict[key] = event_data[key]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -12,6 +12,7 @@ from django.db.models import Q, Prefetch
|
||||
from django.contrib.auth.models import User
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import ParseError, PermissionDenied, ValidationError
|
||||
@@ -31,7 +32,7 @@ from awx.conf.license import LicenseForbids, feature_enabled
|
||||
|
||||
__all__ = ['get_user_queryset', 'check_user_access', 'check_user_access_with_errors',
|
||||
'user_accessible_objects', 'consumer_access',
|
||||
'user_admin_role', 'StateConflict',]
|
||||
'user_admin_role', 'ActiveJobConflict',]
|
||||
|
||||
logger = logging.getLogger('awx.main.access')
|
||||
|
||||
@@ -71,9 +72,15 @@ def get_object_from_data(field, Model, data, obj=None):
|
||||
raise ParseError(_("Bad data found in related field %s." % field))
|
||||
|
||||
|
||||
class StateConflict(ValidationError):
|
||||
class ActiveJobConflict(ValidationError):
|
||||
status_code = 409
|
||||
|
||||
def __init__(self, active_jobs):
|
||||
super(ActiveJobConflict, self).__init__({
|
||||
"conflict": _("Resource is being used by running jobs."),
|
||||
"active_jobs": active_jobs
|
||||
})
|
||||
|
||||
|
||||
def register_access(model_class, access_class):
|
||||
access_registry[model_class] = access_class
|
||||
@@ -568,8 +575,7 @@ class OrganizationAccess(BaseAccess):
|
||||
active_jobs.extend([dict(type="inventory_update", id=o.id)
|
||||
for o in InventoryUpdate.objects.filter(inventory_source__inventory__organization=obj, status__in=ACTIVE_STATES)])
|
||||
if len(active_jobs) > 0:
|
||||
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
|
||||
"active_jobs": active_jobs})
|
||||
raise ActiveJobConflict(active_jobs)
|
||||
return True
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
@@ -662,8 +668,7 @@ class InventoryAccess(BaseAccess):
|
||||
active_jobs.extend([dict(type="ad_hoc_command", id=o.id)
|
||||
for o in AdHocCommand.objects.filter(inventory=obj, status__in=ACTIVE_STATES)])
|
||||
if len(active_jobs) > 0:
|
||||
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
|
||||
"active_jobs": active_jobs})
|
||||
raise ActiveJobConflict(active_jobs)
|
||||
return True
|
||||
|
||||
def can_run_ad_hoc_commands(self, obj):
|
||||
@@ -788,8 +793,7 @@ class GroupAccess(BaseAccess):
|
||||
active_jobs.extend([dict(type="inventory_update", id=o.id)
|
||||
for o in InventoryUpdate.objects.filter(inventory_source__in=obj.inventory_sources.all(), status__in=ACTIVE_STATES)])
|
||||
if len(active_jobs) > 0:
|
||||
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
|
||||
"active_jobs": active_jobs})
|
||||
raise ActiveJobConflict(active_jobs)
|
||||
return True
|
||||
|
||||
def can_start(self, obj, validate_license=True):
|
||||
@@ -839,8 +843,7 @@ class InventorySourceAccess(BaseAccess):
|
||||
return False
|
||||
active_jobs_qs = InventoryUpdate.objects.filter(inventory_source=obj, status__in=ACTIVE_STATES)
|
||||
if active_jobs_qs.exists():
|
||||
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
|
||||
"active_jobs": [dict(type="inventory_update", id=o.id) for o in active_jobs_qs.all()]})
|
||||
raise ActiveJobConflict([dict(type="inventory_update", id=o.id) for o in active_jobs_qs.all()])
|
||||
return True
|
||||
|
||||
@check_superuser
|
||||
@@ -943,7 +946,8 @@ class CredentialAccess(BaseAccess):
|
||||
model = Credential
|
||||
select_related = ('created_by', 'modified_by',)
|
||||
prefetch_related = ('admin_role', 'use_role', 'read_role',
|
||||
'admin_role__parents', 'admin_role__members',)
|
||||
'admin_role__parents', 'admin_role__members',
|
||||
'credential_type', 'organization')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.accessible_objects(self.user, 'read_role')
|
||||
@@ -1090,8 +1094,7 @@ class ProjectAccess(BaseAccess):
|
||||
active_jobs.extend([dict(type="project_update", id=o.id)
|
||||
for o in ProjectUpdate.objects.filter(project=obj, status__in=ACTIVE_STATES)])
|
||||
if len(active_jobs) > 0:
|
||||
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
|
||||
"active_jobs": active_jobs})
|
||||
raise ActiveJobConflict(active_jobs)
|
||||
return True
|
||||
|
||||
@check_superuser
|
||||
@@ -1124,8 +1127,11 @@ class ProjectUpdateAccess(BaseAccess):
|
||||
|
||||
def can_start(self, obj, validate_license=True):
|
||||
# for relaunching
|
||||
if obj and obj.project:
|
||||
return self.user in obj.project.update_role
|
||||
try:
|
||||
if obj and obj.project:
|
||||
return self.user in obj.project.update_role
|
||||
except ObjectDoesNotExist:
|
||||
pass
|
||||
return False
|
||||
|
||||
@check_superuser
|
||||
@@ -1142,7 +1148,11 @@ class JobTemplateAccess(BaseAccess):
|
||||
model = JobTemplate
|
||||
select_related = ('created_by', 'modified_by', 'inventory', 'project',
|
||||
'next_schedule',)
|
||||
prefetch_related = ('credentials__credential_type',)
|
||||
prefetch_related = (
|
||||
'instance_groups',
|
||||
'credentials__credential_type',
|
||||
Prefetch('labels', queryset=Label.objects.all().order_by('name')),
|
||||
)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.accessible_objects(self.user, 'read_role')
|
||||
@@ -1265,8 +1275,7 @@ class JobTemplateAccess(BaseAccess):
|
||||
active_jobs = [dict(type="job", id=o.id)
|
||||
for o in obj.jobs.filter(status__in=ACTIVE_STATES)]
|
||||
if len(active_jobs) > 0:
|
||||
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
|
||||
"active_jobs": active_jobs})
|
||||
raise ActiveJobConflict(active_jobs)
|
||||
return True
|
||||
|
||||
@check_superuser
|
||||
@@ -1305,7 +1314,7 @@ class JobAccess(BaseAccess):
|
||||
|
||||
model = Job
|
||||
select_related = ('created_by', 'modified_by', 'job_template', 'inventory',
|
||||
'project', 'job_template',)
|
||||
'project', 'project_update',)
|
||||
prefetch_related = (
|
||||
'unified_job_template',
|
||||
'instance_group',
|
||||
@@ -1771,8 +1780,7 @@ class WorkflowJobTemplateAccess(BaseAccess):
|
||||
active_jobs = [dict(type="workflow_job", id=o.id)
|
||||
for o in obj.workflow_jobs.filter(status__in=ACTIVE_STATES)]
|
||||
if len(active_jobs) > 0:
|
||||
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
|
||||
"active_jobs": active_jobs})
|
||||
raise ActiveJobConflict(active_jobs)
|
||||
return True
|
||||
|
||||
|
||||
@@ -1979,6 +1987,64 @@ class JobEventAccess(BaseAccess):
|
||||
return False
|
||||
|
||||
|
||||
class ProjectUpdateEventAccess(BaseAccess):
|
||||
'''
|
||||
I can see project update event records whenever I can access the project update
|
||||
'''
|
||||
|
||||
model = ProjectUpdateEvent
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
Q(project_update__in=ProjectUpdate.accessible_pk_qs(self.user, 'read_role')))
|
||||
|
||||
def can_add(self, data):
|
||||
return False
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return False
|
||||
|
||||
def can_delete(self, obj):
|
||||
return False
|
||||
|
||||
|
||||
class InventoryUpdateEventAccess(BaseAccess):
|
||||
'''
|
||||
I can see inventory update event records whenever I can access the inventory update
|
||||
'''
|
||||
|
||||
model = InventoryUpdateEvent
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
Q(inventory_update__in=InventoryUpdate.accessible_pk_qs(self.user, 'read_role')))
|
||||
|
||||
def can_add(self, data):
|
||||
return False
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return False
|
||||
|
||||
def can_delete(self, obj):
|
||||
return False
|
||||
|
||||
|
||||
class SystemJobEventAccess(BaseAccess):
|
||||
'''
|
||||
I can only see manage System Jobs events if I'm a super user
|
||||
'''
|
||||
model = SystemJobEvent
|
||||
|
||||
def can_add(self, data):
|
||||
return False
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return False
|
||||
|
||||
def can_delete(self, obj):
|
||||
return False
|
||||
|
||||
|
||||
class UnifiedJobTemplateAccess(BaseAccess):
|
||||
'''
|
||||
I can see a unified job template whenever I can see the same project,
|
||||
|
||||
@@ -5,7 +5,7 @@ import re
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'satellite6', 'cloudforms')
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'cloudforms', 'tower')
|
||||
SCHEDULEABLE_PROVIDERS = CLOUD_PROVIDERS + ('custom', 'scm',)
|
||||
PRIVILEGE_ESCALATION_METHODS = [
|
||||
('sudo', _('Sudo')), ('su', _('Su')), ('pbrun', _('Pbrun')), ('pfexec', _('Pfexec')),
|
||||
|
||||
@@ -1,24 +1,36 @@
|
||||
class AwxTaskError(Exception):
|
||||
"""Base exception for errors in unified job runs"""
|
||||
def __init__(self, task, message=None):
|
||||
# Copyright (c) 2018 Ansible by Red Hat
|
||||
# All Rights Reserved.
|
||||
|
||||
# Celery does not respect exception type when using a serializer different than pickle;
|
||||
# and awx uses the json serializer
|
||||
# https://github.com/celery/celery/issues/3586
|
||||
|
||||
|
||||
class _AwxTaskError():
|
||||
def build_exception(self, task, message=None):
|
||||
if message is None:
|
||||
message = "Execution error running {}".format(task.log_format)
|
||||
super(AwxTaskError, self).__init__(message)
|
||||
self.task = task
|
||||
|
||||
|
||||
class TaskCancel(AwxTaskError):
|
||||
"""Canceled flag caused run_pexpect to kill the job run"""
|
||||
def __init__(self, task, rc):
|
||||
super(TaskCancel, self).__init__(
|
||||
task, message="{} was canceled (rc={})".format(task.log_format, rc))
|
||||
self.rc = rc
|
||||
e = Exception(message)
|
||||
e.task = task
|
||||
e.is_awx_task_error = True
|
||||
return e
|
||||
|
||||
def TaskCancel(self, task, rc):
|
||||
"""Canceled flag caused run_pexpect to kill the job run"""
|
||||
message="{} was canceled (rc={})".format(task.log_format, rc)
|
||||
e = self.build_exception(task, message)
|
||||
e.rc = rc
|
||||
e.awx_task_error_type = "TaskCancel"
|
||||
return e
|
||||
|
||||
def TaskError(self, task, rc):
|
||||
"""Userspace error (non-zero exit code) in run_pexpect subprocess"""
|
||||
message = "{} encountered an error (rc={}), please see task stdout for details.".format(task.log_format, rc)
|
||||
e = self.build_exception(task, message)
|
||||
e.rc = rc
|
||||
e.awx_task_error_type = "TaskError"
|
||||
return e
|
||||
|
||||
|
||||
class TaskError(AwxTaskError):
|
||||
"""Userspace error (non-zero exit code) in run_pexpect subprocess"""
|
||||
def __init__(self, task, rc):
|
||||
super(TaskError, self).__init__(
|
||||
task, message="%s encountered an error (rc=%s), please see task stdout for details.".format(task.log_format, rc))
|
||||
self.rc = rc
|
||||
AwxTaskError = _AwxTaskError()
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import base64
|
||||
import cStringIO
|
||||
import codecs
|
||||
import StringIO
|
||||
import json
|
||||
@@ -143,7 +142,7 @@ class IsolatedManager(object):
|
||||
|
||||
# if an ssh private key fifo exists, read its contents and delete it
|
||||
if self.ssh_key_path:
|
||||
buff = cStringIO.StringIO()
|
||||
buff = StringIO.StringIO()
|
||||
with open(self.ssh_key_path, 'r') as fifo:
|
||||
for line in fifo:
|
||||
buff.write(line)
|
||||
@@ -183,7 +182,7 @@ class IsolatedManager(object):
|
||||
job_timeout=settings.AWX_ISOLATED_LAUNCH_TIMEOUT,
|
||||
pexpect_timeout=5
|
||||
)
|
||||
output = buff.getvalue()
|
||||
output = buff.getvalue().encode('utf-8')
|
||||
playbook_logger.info('Isolated job {} dispatch:\n{}'.format(self.instance.id, output))
|
||||
if status != 'successful':
|
||||
self.stdout_handle.write(output)
|
||||
@@ -283,7 +282,7 @@ class IsolatedManager(object):
|
||||
status = 'failed'
|
||||
output = ''
|
||||
rc = None
|
||||
buff = cStringIO.StringIO()
|
||||
buff = StringIO.StringIO()
|
||||
last_check = time.time()
|
||||
seek = 0
|
||||
job_timeout = remaining = self.job_timeout
|
||||
@@ -304,7 +303,7 @@ class IsolatedManager(object):
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
buff = cStringIO.StringIO()
|
||||
buff = StringIO.StringIO()
|
||||
logger.debug('Checking on isolated job {} with `check_isolated.yml`.'.format(self.instance.id))
|
||||
status, rc = IsolatedManager.run_pexpect(
|
||||
args, self.awx_playbook_path(), self.management_env, buff,
|
||||
@@ -314,7 +313,7 @@ class IsolatedManager(object):
|
||||
pexpect_timeout=5,
|
||||
proot_cmd=self.proot_cmd
|
||||
)
|
||||
output = buff.getvalue()
|
||||
output = buff.getvalue().encode('utf-8')
|
||||
playbook_logger.info('Isolated job {} check:\n{}'.format(self.instance.id, output))
|
||||
|
||||
path = self.path_to('artifacts', 'stdout')
|
||||
@@ -356,14 +355,14 @@ class IsolatedManager(object):
|
||||
}
|
||||
args = self._build_args('clean_isolated.yml', '%s,' % self.host, extra_vars)
|
||||
logger.debug('Cleaning up job {} on isolated host with `clean_isolated.yml` playbook.'.format(self.instance.id))
|
||||
buff = cStringIO.StringIO()
|
||||
buff = StringIO.StringIO()
|
||||
timeout = max(60, 2 * settings.AWX_ISOLATED_CONNECTION_TIMEOUT)
|
||||
status, rc = IsolatedManager.run_pexpect(
|
||||
args, self.awx_playbook_path(), self.management_env, buff,
|
||||
idle_timeout=timeout, job_timeout=timeout,
|
||||
pexpect_timeout=5
|
||||
)
|
||||
output = buff.getvalue()
|
||||
output = buff.getvalue().encode('utf-8')
|
||||
playbook_logger.info('Isolated job {} cleanup:\n{}'.format(self.instance.id, output))
|
||||
|
||||
if status != 'successful':
|
||||
@@ -406,14 +405,14 @@ class IsolatedManager(object):
|
||||
env = cls._base_management_env()
|
||||
env['ANSIBLE_STDOUT_CALLBACK'] = 'json'
|
||||
|
||||
buff = cStringIO.StringIO()
|
||||
buff = StringIO.StringIO()
|
||||
timeout = max(60, 2 * settings.AWX_ISOLATED_CONNECTION_TIMEOUT)
|
||||
status, rc = IsolatedManager.run_pexpect(
|
||||
args, cls.awx_playbook_path(), env, buff,
|
||||
idle_timeout=timeout, job_timeout=timeout,
|
||||
pexpect_timeout=5
|
||||
)
|
||||
output = buff.getvalue()
|
||||
output = buff.getvalue().encode('utf-8')
|
||||
buff.close()
|
||||
|
||||
try:
|
||||
@@ -445,7 +444,7 @@ class IsolatedManager(object):
|
||||
instance.hostname, instance.modified))
|
||||
|
||||
@staticmethod
|
||||
def wrap_stdout_handle(instance, private_data_dir, stdout_handle, event_data_key='job_id'):
|
||||
def get_stdout_handle(instance, private_data_dir, event_data_key='job_id'):
|
||||
dispatcher = CallbackQueueDispatcher()
|
||||
|
||||
def job_event_callback(event_data):
|
||||
@@ -463,7 +462,7 @@ class IsolatedManager(object):
|
||||
event_data.get('event', ''), event_data['uuid'], instance.id, event_data))
|
||||
dispatcher.dispatch(event_data)
|
||||
|
||||
return OutputEventFilter(stdout_handle, job_event_callback)
|
||||
return OutputEventFilter(job_event_callback)
|
||||
|
||||
def run(self, instance, host, private_data_dir, proot_temp_dir):
|
||||
"""
|
||||
|
||||
@@ -99,7 +99,6 @@ def run_pexpect(args, cwd, env, logfile,
|
||||
password_patterns = expect_passwords.keys()
|
||||
password_values = expect_passwords.values()
|
||||
|
||||
logfile_pos = logfile.tell()
|
||||
child = pexpect.spawn(
|
||||
args[0], args[1:], cwd=cwd, env=env, ignore_sighup=True,
|
||||
encoding='utf-8', echo=False,
|
||||
@@ -116,8 +115,6 @@ def run_pexpect(args, cwd, env, logfile,
|
||||
password = password_values[result_id]
|
||||
if password is not None:
|
||||
child.sendline(password)
|
||||
if logfile_pos != logfile.tell():
|
||||
logfile_pos = logfile.tell()
|
||||
last_stdout_update = time.time()
|
||||
if cancelled_callback:
|
||||
try:
|
||||
|
||||
@@ -6,6 +6,7 @@ import copy
|
||||
import json
|
||||
import re
|
||||
import six
|
||||
import urllib
|
||||
|
||||
from jinja2 import Environment, StrictUndefined
|
||||
from jinja2.exceptions import UndefinedError
|
||||
@@ -352,6 +353,7 @@ class SmartFilterField(models.TextField):
|
||||
# https://docs.python.org/2/library/stdtypes.html#truth-value-testing
|
||||
if not value:
|
||||
return None
|
||||
value = urllib.unquote(value)
|
||||
try:
|
||||
SmartFilter().query_from_string(value)
|
||||
except RuntimeError, e:
|
||||
|
||||
@@ -173,6 +173,7 @@ class AnsibleInventoryLoader(object):
|
||||
def load(self):
|
||||
base_args = self.get_base_args()
|
||||
logger.info('Reading Ansible inventory source: %s', self.source)
|
||||
|
||||
data = self.command_to_json(base_args + ['--list'])
|
||||
|
||||
# TODO: remove after we run custom scripts through ansible-inventory
|
||||
@@ -225,6 +226,7 @@ def load_inventory_source(source, group_filter_re=None,
|
||||
'''
|
||||
# Sanity check: We sanitize these module names for our API but Ansible proper doesn't follow
|
||||
# good naming conventions
|
||||
source = source.replace('rhv.py', 'ovirt4.py')
|
||||
source = source.replace('satellite6.py', 'foreman.py')
|
||||
source = source.replace('vmware.py', 'vmware_inventory.py')
|
||||
if not os.path.exists(source):
|
||||
@@ -600,27 +602,20 @@ class Command(BaseCommand):
|
||||
|
||||
def _update_inventory(self):
|
||||
'''
|
||||
Update/overwrite variables from "all" group. If importing from a
|
||||
cloud source attached to a specific group, variables will be set on
|
||||
the base group, otherwise they will be set on the whole inventory.
|
||||
Update inventory variables from "all" group.
|
||||
'''
|
||||
# FIXME: figure out how "all" variables are handled in the new inventory source system
|
||||
# TODO: We disable variable overwrite here in case user-defined inventory variables get
|
||||
# mangled. But we still need to figure out a better way of processing multiple inventory
|
||||
# update variables mixing with each other.
|
||||
all_obj = self.inventory
|
||||
all_name = 'inventory'
|
||||
db_variables = all_obj.variables_dict
|
||||
if self.overwrite_vars:
|
||||
db_variables = self.all_group.variables
|
||||
else:
|
||||
db_variables.update(self.all_group.variables)
|
||||
db_variables.update(self.all_group.variables)
|
||||
if db_variables != all_obj.variables_dict:
|
||||
all_obj.variables = json.dumps(db_variables)
|
||||
all_obj.save(update_fields=['variables'])
|
||||
if self.overwrite_vars:
|
||||
logger.info('%s variables replaced from "all" group', all_name.capitalize())
|
||||
else:
|
||||
logger.info('%s variables updated from "all" group', all_name.capitalize())
|
||||
logger.info('Inventory variables updated from "all" group')
|
||||
else:
|
||||
logger.info('%s variables unmodified', all_name.capitalize())
|
||||
logger.info('Inventory variables unmodified')
|
||||
|
||||
def _create_update_groups(self):
|
||||
'''
|
||||
|
||||
@@ -12,11 +12,17 @@ from awx.main.models import (
|
||||
UnifiedJob,
|
||||
Job,
|
||||
AdHocCommand,
|
||||
ProjectUpdate,
|
||||
InventoryUpdate,
|
||||
SystemJob
|
||||
)
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.api.serializers import (
|
||||
JobEventWebSocketSerializer,
|
||||
AdHocCommandEventWebSocketSerializer,
|
||||
ProjectUpdateEventWebSocketSerializer,
|
||||
InventoryUpdateEventWebSocketSerializer,
|
||||
SystemJobEventWebSocketSerializer
|
||||
)
|
||||
|
||||
|
||||
@@ -60,7 +66,16 @@ class ReplayJobEvents():
|
||||
return self.replay_elapsed().total_seconds() - (self.recording_elapsed(created).total_seconds() * (1.0 / speed))
|
||||
|
||||
def get_job_events(self, job):
|
||||
job_events = job.job_events.order_by('created')
|
||||
if type(job) is Job:
|
||||
job_events = job.job_events.order_by('created')
|
||||
elif type(job) is AdHocCommand:
|
||||
job_events = job.ad_hoc_command_events.order_by('created')
|
||||
elif type(job) is ProjectUpdate:
|
||||
job_events = job.project_update_events.order_by('created')
|
||||
elif type(job) is InventoryUpdate:
|
||||
job_events = job.inventory_update_events.order_by('created')
|
||||
elif type(job) is SystemJob:
|
||||
job_events = job.system_job_events.order_by('created')
|
||||
if job_events.count() == 0:
|
||||
raise RuntimeError("No events for job id {}".format(job.id))
|
||||
return job_events
|
||||
@@ -70,6 +85,12 @@ class ReplayJobEvents():
|
||||
return JobEventWebSocketSerializer
|
||||
elif type(job) is AdHocCommand:
|
||||
return AdHocCommandEventWebSocketSerializer
|
||||
elif type(job) is ProjectUpdate:
|
||||
return ProjectUpdateEventWebSocketSerializer
|
||||
elif type(job) is InventoryUpdate:
|
||||
return InventoryUpdateEventWebSocketSerializer
|
||||
elif type(job) is SystemJob:
|
||||
return SystemJobEventWebSocketSerializer
|
||||
else:
|
||||
raise RuntimeError("Job is of type {} and replay is not yet supported.".format(type(job)))
|
||||
sys.exit(1)
|
||||
|
||||
@@ -3,13 +3,14 @@
|
||||
|
||||
# Python
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import time
|
||||
from uuid import UUID
|
||||
from multiprocessing import Process
|
||||
from multiprocessing import Queue as MPQueue
|
||||
from Queue import Empty as QueueEmpty
|
||||
from Queue import Full as QueueFull
|
||||
import os
|
||||
|
||||
from kombu import Connection, Exchange, Queue
|
||||
from kombu.mixins import ConsumerMixin
|
||||
@@ -18,11 +19,13 @@ from kombu.mixins import ConsumerMixin
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection
|
||||
from django.db import DatabaseError
|
||||
from django.db import DatabaseError, OperationalError
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
from django.core.cache import cache as django_cache
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
@@ -39,6 +42,9 @@ class WorkerSignalHandler:
|
||||
|
||||
|
||||
class CallbackBrokerWorker(ConsumerMixin):
|
||||
|
||||
MAX_RETRIES = 2
|
||||
|
||||
def __init__(self, connection, use_workers=True):
|
||||
self.connection = connection
|
||||
self.worker_queues = []
|
||||
@@ -123,8 +129,17 @@ class CallbackBrokerWorker(ConsumerMixin):
|
||||
logger.error("Exception on worker thread, restarting: " + str(e))
|
||||
continue
|
||||
try:
|
||||
if 'job_id' not in body and 'ad_hoc_command_id' not in body:
|
||||
raise Exception('Payload does not have a job_id or ad_hoc_command_id')
|
||||
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
|
||||
if not any([key in body for key in event_map]):
|
||||
raise Exception('Payload does not have a job identifier')
|
||||
if settings.DEBUG:
|
||||
from pygments import highlight
|
||||
from pygments.lexers import PythonLexer
|
||||
@@ -132,14 +147,51 @@ class CallbackBrokerWorker(ConsumerMixin):
|
||||
from pprint import pformat
|
||||
logger.info('Body: {}'.format(
|
||||
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
|
||||
))
|
||||
try:
|
||||
if 'job_id' in body:
|
||||
JobEvent.create_from_data(**body)
|
||||
elif 'ad_hoc_command_id' in body:
|
||||
AdHocCommandEvent.create_from_data(**body)
|
||||
except DatabaseError as e:
|
||||
logger.error('Database Error Saving Job Event: {}'.format(e))
|
||||
)[:1024 * 4])
|
||||
|
||||
def _save_event_data():
|
||||
for key, cls in event_map.items():
|
||||
if key in body:
|
||||
cls.create_from_data(**body)
|
||||
|
||||
job_identifier = 'unknown job'
|
||||
for key in event_map.keys():
|
||||
if key in body:
|
||||
job_identifier = body[key]
|
||||
break
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification(
|
||||
'jobs-summary',
|
||||
dict(group_name='jobs', unified_job_id=job_identifier)
|
||||
)
|
||||
continue
|
||||
|
||||
retries = 0
|
||||
while retries <= self.MAX_RETRIES:
|
||||
try:
|
||||
_save_event_data()
|
||||
break
|
||||
except (OperationalError, InterfaceError, InternalError) as e:
|
||||
if retries >= self.MAX_RETRIES:
|
||||
logger.exception('Worker could not re-establish database connectivity, shutting down gracefully: Job {}'.format(job_identifier))
|
||||
os.kill(os.getppid(), signal.SIGINT)
|
||||
return
|
||||
delay = 60 * retries
|
||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
|
||||
i=retries + 1,
|
||||
delay=delay
|
||||
))
|
||||
django_connection.close()
|
||||
time.sleep(delay)
|
||||
retries += 1
|
||||
except DatabaseError as e:
|
||||
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
|
||||
break
|
||||
except Exception as exc:
|
||||
import traceback
|
||||
tb = traceback.format_exc()
|
||||
|
||||
50
awx/main/management/commands/test_isolated_connection.py
Normal file
50
awx/main/management/commands/test_isolated_connection.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from optparse import make_option
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
from awx.main.expect import run
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Tests SSH connectivity between a controller and target isolated node"""
|
||||
help = 'Tests SSH connectivity between a controller and target isolated node'
|
||||
|
||||
option_list = BaseCommand.option_list + (
|
||||
make_option('--hostname', dest='hostname', type='string',
|
||||
help='Hostname of an isolated node'),
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
hostname = options.get('hostname')
|
||||
if not hostname:
|
||||
raise CommandError("--hostname is a required argument")
|
||||
|
||||
try:
|
||||
path = tempfile.mkdtemp(prefix='awx_isolated_ssh', dir=settings.AWX_PROOT_BASE_PATH)
|
||||
args = [
|
||||
'ansible', 'all', '-i', '{},'.format(hostname), '-u',
|
||||
settings.AWX_ISOLATED_USERNAME, '-T5', '-m', 'shell',
|
||||
'-a', 'hostname', '-vvv'
|
||||
]
|
||||
if all([
|
||||
getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True,
|
||||
getattr(settings, 'AWX_ISOLATED_PRIVATE_KEY', None)
|
||||
]):
|
||||
ssh_key_path = os.path.join(path, '.isolated')
|
||||
ssh_auth_sock = os.path.join(path, 'ssh_auth.sock')
|
||||
run.open_fifo_write(ssh_key_path, settings.AWX_ISOLATED_PRIVATE_KEY)
|
||||
args = run.wrap_args_with_ssh_agent(args, ssh_key_path, ssh_auth_sock)
|
||||
try:
|
||||
print ' '.join(args)
|
||||
subprocess.check_call(args)
|
||||
except subprocess.CalledProcessError as e:
|
||||
sys.exit(e.returncode)
|
||||
finally:
|
||||
shutil.rmtree(path)
|
||||
|
||||
@@ -21,11 +21,15 @@ class HostManager(models.Manager):
|
||||
"""Custom manager class for Hosts model."""
|
||||
|
||||
def active_count(self):
|
||||
"""Return count of active, unique hosts for licensing."""
|
||||
try:
|
||||
return self.order_by('name').distinct('name').count()
|
||||
except NotImplementedError: # For unit tests only, SQLite doesn't support distinct('name')
|
||||
return len(set(self.values_list('name', flat=True)))
|
||||
"""Return count of active, unique hosts for licensing.
|
||||
Construction of query involves:
|
||||
- remove any ordering specified in model's Meta
|
||||
- Exclude hosts sourced from another Tower
|
||||
- Restrict the query to only return the name column
|
||||
- Only consider results that are unique
|
||||
- Return the count of this query
|
||||
"""
|
||||
return self.order_by().exclude(inventory_sources__source='tower').values('name').distinct().count()
|
||||
|
||||
def get_queryset(self):
|
||||
"""When the parent instance of the host query set has a `kind=smart` and a `host_filter`
|
||||
|
||||
@@ -5,6 +5,10 @@ import logging
|
||||
import threading
|
||||
import uuid
|
||||
import six
|
||||
import time
|
||||
import cProfile
|
||||
import pstats
|
||||
import os
|
||||
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User
|
||||
@@ -25,6 +29,40 @@ from awx.conf import fields, register
|
||||
|
||||
logger = logging.getLogger('awx.main.middleware')
|
||||
analytics_logger = logging.getLogger('awx.analytics.activity_stream')
|
||||
perf_logger = logging.getLogger('awx.analytics.performance')
|
||||
|
||||
|
||||
class TimingMiddleware(threading.local):
|
||||
|
||||
dest = '/var/lib/awx/profile'
|
||||
|
||||
def process_request(self, request):
|
||||
self.start_time = time.time()
|
||||
if settings.AWX_REQUEST_PROFILE:
|
||||
self.prof = cProfile.Profile()
|
||||
self.prof.enable()
|
||||
|
||||
def process_response(self, request, response):
|
||||
if not hasattr(self, 'start_time'): # some tools may not invoke process_request
|
||||
return response
|
||||
total_time = time.time() - self.start_time
|
||||
response['X-API-Total-Time'] = '%0.3fs' % total_time
|
||||
if settings.AWX_REQUEST_PROFILE:
|
||||
self.prof.disable()
|
||||
cprofile_file = self.save_profile_file(request)
|
||||
response['cprofile_file'] = cprofile_file
|
||||
perf_logger.info('api response times', extra=dict(python_objects=dict(request=request, response=response)))
|
||||
return response
|
||||
|
||||
def save_profile_file(self, request):
|
||||
if not os.path.isdir(self.dest):
|
||||
os.makedirs(self.dest)
|
||||
filename = '%.3fs-%s' % (pstats.Stats(self.prof).total_tt, uuid.uuid4())
|
||||
filepath = os.path.join(self.dest, filename)
|
||||
with open(filepath, 'w') as f:
|
||||
f.write('%s %s\n' % (request.method, request.get_full_path()))
|
||||
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
|
||||
return filepath
|
||||
|
||||
|
||||
class ActivityStreamMiddleware(threading.local):
|
||||
|
||||
@@ -8,14 +8,9 @@ from __future__ import unicode_literals
|
||||
from django.db import migrations, models
|
||||
from django.conf import settings
|
||||
import awx.main.fields
|
||||
import jsonfield.fields
|
||||
|
||||
|
||||
def update_dashed_host_variables(apps, schema_editor):
|
||||
Host = apps.get_model('main', 'Host')
|
||||
for host in Host.objects.filter(variables='---'):
|
||||
host.variables = ''
|
||||
host.save()
|
||||
import _squashed
|
||||
from _squashed_30 import SQUASHED_30
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
@@ -27,13 +22,7 @@ class Migration(migrations.Migration):
|
||||
(b'main', '0025_v300_update_rbac_parents'),
|
||||
(b'main', '0026_v300_credential_unique'),
|
||||
(b'main', '0027_v300_team_migrations'),
|
||||
(b'main', '0028_v300_org_team_cascade'),
|
||||
(b'main', '0029_v302_add_ask_skip_tags'),
|
||||
(b'main', '0030_v302_job_survey_passwords'),
|
||||
(b'main', '0031_v302_migrate_survey_passwords'),
|
||||
(b'main', '0032_v302_credential_permissions_update'),
|
||||
(b'main', '0033_v303_v245_host_variable_fix'),]
|
||||
|
||||
(b'main', '0028_v300_org_team_cascade')] + _squashed.replaces(SQUASHED_30, applied=True)
|
||||
|
||||
dependencies = [
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
@@ -130,27 +119,4 @@ class Migration(migrations.Migration):
|
||||
field=models.ForeignKey(related_name='teams', to='main.Organization'),
|
||||
preserve_default=False,
|
||||
),
|
||||
# add ask skip tags
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='ask_skip_tags_on_launch',
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
# job survery passwords
|
||||
migrations.AddField(
|
||||
model_name='job',
|
||||
name='survey_passwords',
|
||||
field=jsonfield.fields.JSONField(default={}, editable=False, blank=True),
|
||||
),
|
||||
# RBAC credential permission updates
|
||||
migrations.AlterField(
|
||||
model_name='credential',
|
||||
name='admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'singleton:system_administrator', b'organization.admin_role'], to='main.Role', null=b'True'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credential',
|
||||
name='use_role',
|
||||
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'admin_role'], to='main.Role', null=b'True'),
|
||||
),
|
||||
]
|
||||
] + _squashed.operations(SQUASHED_30, applied=True)
|
||||
|
||||
@@ -8,6 +8,9 @@ import django.db.models.deletion
|
||||
import awx.main.models.workflow
|
||||
import awx.main.fields
|
||||
|
||||
import _squashed
|
||||
from _squashed_30 import SQUASHED_30
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
@@ -15,11 +18,11 @@ class Migration(migrations.Migration):
|
||||
('main', '0003_squashed_v300_v303_updates'),
|
||||
]
|
||||
|
||||
replaces = [
|
||||
replaces = _squashed.replaces(SQUASHED_30) + [
|
||||
(b'main', '0034_v310_release'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
operations = _squashed.operations(SQUASHED_30) + [
|
||||
# Create ChannelGroup table
|
||||
migrations.CreateModel(
|
||||
name='ChannelGroup',
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
from django.db import migrations
|
||||
|
||||
from django.db import migrations, models
|
||||
import _squashed
|
||||
from _squashed_31 import SQUASHED_31
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
@@ -10,28 +12,5 @@ class Migration(migrations.Migration):
|
||||
('main', '0004_squashed_v310_release'),
|
||||
]
|
||||
|
||||
replaces = [
|
||||
(b'main', '0035_v310_remove_tower_settings'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
# Remove Tower settings, these settings are now in separate awx.conf app.
|
||||
migrations.RemoveField(
|
||||
model_name='towersettings',
|
||||
name='user',
|
||||
),
|
||||
migrations.DeleteModel(
|
||||
name='TowerSettings',
|
||||
),
|
||||
|
||||
migrations.AlterField(
|
||||
model_name='project',
|
||||
name='scm_type',
|
||||
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='projectupdate',
|
||||
name='scm_type',
|
||||
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
|
||||
),
|
||||
]
|
||||
replaces = _squashed.replaces(SQUASHED_31)
|
||||
operations = _squashed.operations(SQUASHED_31)
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0005_squashed_v310_v313_updates'),
|
||||
]
|
||||
|
||||
replaces = [
|
||||
(b'main', '0036_v311_insights'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='project',
|
||||
name='scm_type',
|
||||
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='projectupdate',
|
||||
name='scm_type',
|
||||
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
|
||||
),
|
||||
]
|
||||
@@ -1,24 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0005a_squashed_v310_v313_updates'),
|
||||
]
|
||||
|
||||
replaces = [
|
||||
(b'main', '0037_v313_instance_version'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
# Remove Tower settings, these settings are now in separate awx.conf app.
|
||||
migrations.AddField(
|
||||
model_name='instance',
|
||||
name='version',
|
||||
field=models.CharField(max_length=24, blank=True),
|
||||
),
|
||||
]
|
||||
@@ -6,7 +6,13 @@ from __future__ import unicode_literals
|
||||
from psycopg2.extensions import AsIs
|
||||
|
||||
# Django
|
||||
from django.db import migrations, models
|
||||
from django.db import (
|
||||
connection,
|
||||
migrations,
|
||||
models,
|
||||
OperationalError,
|
||||
ProgrammingError
|
||||
)
|
||||
from django.conf import settings
|
||||
import taggit.managers
|
||||
|
||||
@@ -15,12 +21,24 @@ import awx.main.fields
|
||||
from awx.main.models import Host
|
||||
|
||||
|
||||
def replaces():
|
||||
squashed = ['0005a_squashed_v310_v313_updates', '0005b_squashed_v310_v313_updates']
|
||||
try:
|
||||
recorder = migrations.recorder.MigrationRecorder(connection)
|
||||
result = recorder.migration_qs.filter(app='main').filter(name__in=squashed).all()
|
||||
return [('main', m.name) for m in result]
|
||||
except (OperationalError, ProgrammingError):
|
||||
return []
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0005b_squashed_v310_v313_updates'),
|
||||
('main', '0005_squashed_v310_v313_updates'),
|
||||
]
|
||||
|
||||
replaces = replaces()
|
||||
|
||||
operations = [
|
||||
# Release UJT unique_together constraint
|
||||
migrations.AlterUniqueTogether(
|
||||
|
||||
@@ -6,6 +6,7 @@ from __future__ import unicode_literals
|
||||
from django.db import migrations, models
|
||||
|
||||
# AWX
|
||||
from awx.main.migrations import ActivityStreamDisabledMigration
|
||||
from awx.main.migrations import _inventory_source as invsrc
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
from awx.main.migrations import _reencrypt as reencrypt
|
||||
@@ -15,7 +16,7 @@ from awx.main.migrations import _azure_credentials as azurecreds
|
||||
import awx.main.fields
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
class Migration(ActivityStreamDisabledMigration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0006_v320_release'),
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
import awx.main.fields
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0008_v320_drop_v1_credential_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='activitystream',
|
||||
name='setting',
|
||||
field=awx.main.fields.JSONField(default=dict, blank=True),
|
||||
),
|
||||
]
|
||||
28
awx/main/migrations/0010_v322_add_ovirt4_tower_inventory.py
Normal file
28
awx/main/migrations/0010_v322_add_ovirt4_tower_inventory.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# AWX
|
||||
from awx.main.migrations import _credentialtypes as credentialtypes
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0009_v322_add_setting_field_for_activity_stream'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(credentialtypes.create_rhv_tower_credtype),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script'), (b'scm', 'Sourced from a Project'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'rhv', 'Red Hat Virtualization'), (b'tower', 'Ansible Tower'), (b'custom', 'Custom Script')]),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script'), (b'scm', 'Sourced from a Project'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'rhv', 'Red Hat Virtualization'), (b'tower', 'Ansible Tower'), (b'custom', 'Custom Script')]),
|
||||
),
|
||||
]
|
||||
19
awx/main/migrations/0011_v322_encrypt_survey_passwords.py
Normal file
19
awx/main/migrations/0011_v322_encrypt_survey_passwords.py
Normal file
@@ -0,0 +1,19 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
from awx.main.migrations import ActivityStreamDisabledMigration
|
||||
from awx.main.migrations import _reencrypt as reencrypt
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
|
||||
|
||||
class Migration(ActivityStreamDisabledMigration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0010_v322_add_ovirt4_tower_inventory'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(reencrypt.encrypt_survey_passwords),
|
||||
]
|
||||
18
awx/main/migrations/0012_v322_update_cred_types.py
Normal file
18
awx/main/migrations/0012_v322_update_cred_types.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# AWX
|
||||
from awx.main.migrations import _credentialtypes as credentialtypes
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0011_v322_encrypt_survey_passwords'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(credentialtypes.add_azure_cloud_environment_field),
|
||||
]
|
||||
@@ -11,7 +11,7 @@ from awx.main.migrations._multi_cred import migrate_to_multi_cred
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0008_v320_drop_v1_credential_fields'),
|
||||
('main', '0012_v322_update_cred_types'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
@@ -13,7 +13,7 @@ from awx.main.migrations._scan_jobs import remove_scan_type_nodes
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0009_v330_multi_credential'),
|
||||
('main', '0013_v330_multi_credential'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
@@ -13,7 +13,7 @@ from awx.main.migrations._reencrypt import blank_old_start_args
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0010_saved_launchtime_configs'),
|
||||
('main', '0014_v330_saved_launchtime_configs'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
@@ -10,7 +10,7 @@ import django.db.models.deletion
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0011_blank_start_args'),
|
||||
('main', '0015_v330_blank_start_args'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
@@ -8,7 +8,7 @@ from django.db import migrations, models
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0012_non_blank_workflow'),
|
||||
('main', '0016_v330_non_blank_workflow'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
@@ -0,0 +1,85 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.7 on 2017-12-14 15:13
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0017_v330_move_deprecated_stdout'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='InventoryUpdateEvent',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('created', models.DateTimeField(default=None, editable=False)),
|
||||
('modified', models.DateTimeField(default=None, editable=False)),
|
||||
('event_data', awx.main.fields.JSONField(blank=True, default={})),
|
||||
('uuid', models.CharField(default=b'', editable=False, max_length=1024)),
|
||||
('counter', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('stdout', models.TextField(default=b'', editable=False)),
|
||||
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('start_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('end_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('inventory_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.InventoryUpdate')),
|
||||
],
|
||||
options={
|
||||
'ordering': ('-pk',),
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='ProjectUpdateEvent',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('created', models.DateTimeField(default=None, editable=False)),
|
||||
('modified', models.DateTimeField(default=None, editable=False)),
|
||||
('event', models.CharField(choices=[(b'runner_on_failed', 'Host Failed'), (b'runner_on_ok', 'Host OK'), (b'runner_on_error', 'Host Failure'), (b'runner_on_skipped', 'Host Skipped'), (b'runner_on_unreachable', 'Host Unreachable'), (b'runner_on_no_hosts', 'No Hosts Remaining'), (b'runner_on_async_poll', 'Host Polling'), (b'runner_on_async_ok', 'Host Async OK'), (b'runner_on_async_failed', 'Host Async Failure'), (b'runner_item_on_ok', 'Item OK'), (b'runner_item_on_failed', 'Item Failed'), (b'runner_item_on_skipped', 'Item Skipped'), (b'runner_retry', 'Host Retry'), (b'runner_on_file_diff', 'File Difference'), (b'playbook_on_start', 'Playbook Started'), (b'playbook_on_notify', 'Running Handlers'), (b'playbook_on_include', 'Including File'), (b'playbook_on_no_hosts_matched', 'No Hosts Matched'), (b'playbook_on_no_hosts_remaining', 'No Hosts Remaining'), (b'playbook_on_task_start', 'Task Started'), (b'playbook_on_vars_prompt', 'Variables Prompted'), (b'playbook_on_setup', 'Gathering Facts'), (b'playbook_on_import_for_host', 'internal: on Import for Host'), (b'playbook_on_not_import_for_host', 'internal: on Not Import for Host'), (b'playbook_on_play_start', 'Play Started'), (b'playbook_on_stats', 'Playbook Complete'), (b'debug', 'Debug'), (b'verbose', 'Verbose'), (b'deprecated', 'Deprecated'), (b'warning', 'Warning'), (b'system_warning', 'System Warning'), (b'error', 'Error')], max_length=100)),
|
||||
('event_data', awx.main.fields.JSONField(blank=True, default={})),
|
||||
('failed', models.BooleanField(default=False, editable=False)),
|
||||
('changed', models.BooleanField(default=False, editable=False)),
|
||||
('uuid', models.CharField(default=b'', editable=False, max_length=1024)),
|
||||
('playbook', models.CharField(default=b'', editable=False, max_length=1024)),
|
||||
('play', models.CharField(default=b'', editable=False, max_length=1024)),
|
||||
('role', models.CharField(default=b'', editable=False, max_length=1024)),
|
||||
('task', models.CharField(default=b'', editable=False, max_length=1024)),
|
||||
('counter', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('stdout', models.TextField(default=b'', editable=False)),
|
||||
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('start_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('end_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('project_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.ProjectUpdate')),
|
||||
],
|
||||
options={
|
||||
'ordering': ('pk',),
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='SystemJobEvent',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('created', models.DateTimeField(default=None, editable=False)),
|
||||
('modified', models.DateTimeField(default=None, editable=False)),
|
||||
('event_data', awx.main.fields.JSONField(blank=True, default={})),
|
||||
('uuid', models.CharField(default=b'', editable=False, max_length=1024)),
|
||||
('counter', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('stdout', models.TextField(default=b'', editable=False)),
|
||||
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('start_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('end_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('system_job', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.SystemJob')),
|
||||
],
|
||||
options={
|
||||
'ordering': ('-pk',),
|
||||
},
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='unifiedjob',
|
||||
name='result_stdout_file',
|
||||
),
|
||||
]
|
||||
30
awx/main/migrations/0019_v330_custom_virtualenv.py
Normal file
30
awx/main/migrations/0019_v330_custom_virtualenv.py
Normal file
@@ -0,0 +1,30 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.7 on 2018-01-09 21:30
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0018_v330_add_additional_stdout_events'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='custom_virtualenv',
|
||||
field=models.CharField(blank=True, default=None, max_length=100, null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='custom_virtualenv',
|
||||
field=models.CharField(blank=True, default=None, max_length=100, null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='project',
|
||||
name='custom_virtualenv',
|
||||
field=models.CharField(blank=True, default=None, max_length=100, null=True),
|
||||
),
|
||||
]
|
||||
@@ -1,2 +1,12 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.db.migrations import Migration
|
||||
|
||||
|
||||
class ActivityStreamDisabledMigration(Migration):
|
||||
|
||||
def apply(self, project_state, schema_editor, collect_sql=False):
|
||||
from awx.main.signals import disable_activity_stream
|
||||
with disable_activity_stream():
|
||||
return Migration.apply(self, project_state, schema_editor, collect_sql)
|
||||
|
||||
@@ -178,3 +178,14 @@ def add_vault_id_field(apps, schema_editor):
|
||||
vault_credtype = CredentialType.objects.get(kind='vault')
|
||||
vault_credtype.inputs = CredentialType.defaults.get('vault')().inputs
|
||||
vault_credtype.save()
|
||||
|
||||
|
||||
def create_rhv_tower_credtype(apps, schema_editor):
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
|
||||
|
||||
def add_azure_cloud_environment_field(apps, schema_editor):
|
||||
azure_rm_credtype = CredentialType.objects.get(kind='cloud',
|
||||
name='Microsoft Azure Resource Manager')
|
||||
azure_rm_credtype.inputs = CredentialType.defaults.get('azure_rm')().inputs
|
||||
azure_rm_credtype.save()
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import logging
|
||||
|
||||
from django.db.models import Q
|
||||
import six
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
@@ -38,8 +39,10 @@ def rename_inventory_sources(apps, schema_editor):
|
||||
Q(deprecated_group__inventory__organization=org)).distinct().all()):
|
||||
|
||||
inventory = invsrc.deprecated_group.inventory if invsrc.deprecated_group else invsrc.inventory
|
||||
name = '{0} - {1} - {2}'.format(invsrc.name, inventory.name, i)
|
||||
logger.debug("Renaming InventorySource({0}) {1} -> {2}".format(invsrc.pk, invsrc.name, name))
|
||||
name = six.text_type('{0} - {1} - {2}').format(invsrc.name, inventory.name, i)
|
||||
logger.debug(six.text_type("Renaming InventorySource({0}) {1} -> {2}").format(
|
||||
invsrc.pk, invsrc.name, name
|
||||
))
|
||||
invsrc.name = name
|
||||
invsrc.save()
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import logging
|
||||
import json
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
import six
|
||||
|
||||
from awx.conf.migrations._reencrypt import (
|
||||
decrypt_field,
|
||||
@@ -65,7 +67,6 @@ def _credentials(apps):
|
||||
credential.save()
|
||||
|
||||
|
||||
|
||||
def _unified_jobs(apps):
|
||||
UnifiedJob = apps.get_model('main', 'UnifiedJob')
|
||||
for uj in UnifiedJob.objects.all():
|
||||
@@ -91,3 +92,53 @@ def blank_old_start_args(apps, schema_editor):
|
||||
logger.debug('Blanking job args for %s', uj.pk)
|
||||
uj.start_args = ''
|
||||
uj.save()
|
||||
|
||||
|
||||
def encrypt_survey_passwords(apps, schema_editor):
|
||||
_encrypt_survey_passwords(
|
||||
apps.get_model('main', 'Job'),
|
||||
apps.get_model('main', 'JobTemplate'),
|
||||
apps.get_model('main', 'WorkflowJob'),
|
||||
apps.get_model('main', 'WorkflowJobTemplate'),
|
||||
)
|
||||
|
||||
|
||||
def _encrypt_survey_passwords(Job, JobTemplate, WorkflowJob, WorkflowJobTemplate):
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
for _type in (JobTemplate, WorkflowJobTemplate):
|
||||
for jt in _type.objects.exclude(survey_spec={}):
|
||||
changed = False
|
||||
if jt.survey_spec.get('spec', []):
|
||||
for field in jt.survey_spec['spec']:
|
||||
if field.get('type') == 'password' and field.get('default', ''):
|
||||
default = field['default']
|
||||
if default.startswith('$encrypted$'):
|
||||
if default == '$encrypted$':
|
||||
# If you have a survey_spec with a literal
|
||||
# '$encrypted$' as the default, you have
|
||||
# encountered a known bug in awx/Tower
|
||||
# https://github.com/ansible/ansible-tower/issues/7800
|
||||
logger.error(
|
||||
'{}.pk={} survey_spec has ambiguous $encrypted$ default for {}, needs attention...'.format(jt, jt.pk, field['variable'])
|
||||
)
|
||||
field['default'] = ''
|
||||
changed = True
|
||||
continue
|
||||
field['default'] = encrypt_value(field['default'], pk=None)
|
||||
changed = True
|
||||
if changed:
|
||||
jt.save()
|
||||
|
||||
for _type in (Job, WorkflowJob):
|
||||
for job in _type.objects.defer('result_stdout_text').exclude(survey_passwords={}).iterator():
|
||||
changed = False
|
||||
for key in job.survey_passwords:
|
||||
if key in job.extra_vars:
|
||||
extra_vars = json.loads(job.extra_vars)
|
||||
if not extra_vars.get(key, '') or extra_vars[key].startswith('$encrypted$'):
|
||||
continue
|
||||
extra_vars[key] = encrypt_value(extra_vars[key], pk=None)
|
||||
job.extra_vars = json.dumps(extra_vars)
|
||||
changed = True
|
||||
if changed:
|
||||
job.save()
|
||||
|
||||
63
awx/main/migrations/_squashed.py
Normal file
63
awx/main/migrations/_squashed.py
Normal file
@@ -0,0 +1,63 @@
|
||||
from itertools import chain
|
||||
from django.db import (
|
||||
connection,
|
||||
migrations,
|
||||
OperationalError,
|
||||
ProgrammingError,
|
||||
)
|
||||
|
||||
|
||||
def squash_data(squashed):
|
||||
'''Returns a tuple of the squashed_keys and the key position to begin
|
||||
processing replace and operation lists'''
|
||||
|
||||
cm = current_migration()
|
||||
squashed_keys = sorted(squashed.keys())
|
||||
if cm is None:
|
||||
return squashed_keys, 0
|
||||
|
||||
try:
|
||||
key_index = squashed_keys.index(cm.name) + 1
|
||||
except ValueError:
|
||||
key_index = 0
|
||||
return squashed_keys, key_index
|
||||
|
||||
|
||||
def current_migration(exclude_squashed=True):
|
||||
'''Get the latest migration non-squashed migration'''
|
||||
try:
|
||||
recorder = migrations.recorder.MigrationRecorder(connection)
|
||||
migration_qs = recorder.migration_qs.filter(app='main')
|
||||
if exclude_squashed:
|
||||
migration_qs = migration_qs.exclude(name__contains='squashed')
|
||||
return migration_qs.latest('id')
|
||||
except (recorder.Migration.DoesNotExist, OperationalError, ProgrammingError):
|
||||
return None
|
||||
|
||||
|
||||
def replaces(squashed, applied=False):
|
||||
'''Build a list of replacement migrations based on the most recent non-squashed migration
|
||||
and the provided list of SQUASHED migrations. If the most recent non-squashed migration
|
||||
is not present anywhere in the SQUASHED dictionary, assume they have all been applied.
|
||||
|
||||
If applied is True, this will return a list of all the migrations that have already
|
||||
been applied.
|
||||
'''
|
||||
squashed_keys, key_index = squash_data(squashed)
|
||||
if applied:
|
||||
return [(b'main', key) for key in squashed_keys[:key_index]]
|
||||
return [(b'main', key) for key in squashed_keys[key_index:]]
|
||||
|
||||
|
||||
def operations(squashed, applied=False):
|
||||
'''Build a list of migration operations based on the most recent non-squashed migration
|
||||
and the provided list of squashed migrations. If the most recent non-squashed migration
|
||||
is not present anywhere in the `squashed` dictionary, assume they have all been applied.
|
||||
|
||||
If applied is True, this will return a list of all the operations that have
|
||||
already been applied.
|
||||
'''
|
||||
squashed_keys, key_index = squash_data(squashed)
|
||||
op_keys = squashed_keys[:key_index] if applied else squashed_keys[key_index:]
|
||||
ops = [squashed[op_key] for op_key in op_keys]
|
||||
return [op for op in chain.from_iterable(ops)]
|
||||
60
awx/main/migrations/_squashed_30.py
Normal file
60
awx/main/migrations/_squashed_30.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from django.db import (
|
||||
migrations,
|
||||
models,
|
||||
)
|
||||
import jsonfield.fields
|
||||
import awx.main.fields
|
||||
|
||||
from awx.main.migrations import _save_password_keys
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
|
||||
|
||||
def update_dashed_host_variables(apps, schema_editor):
|
||||
Host = apps.get_model('main', 'Host')
|
||||
for host in Host.objects.filter(variables='---'):
|
||||
host.variables = ''
|
||||
host.save()
|
||||
|
||||
|
||||
SQUASHED_30 = {
|
||||
'0029_v302_add_ask_skip_tags': [
|
||||
# add ask skip tags
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='ask_skip_tags_on_launch',
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
],
|
||||
'0030_v302_job_survey_passwords': [
|
||||
# job survery passwords
|
||||
migrations.AddField(
|
||||
model_name='job',
|
||||
name='survey_passwords',
|
||||
field=jsonfield.fields.JSONField(default={}, editable=False, blank=True),
|
||||
),
|
||||
],
|
||||
'0031_v302_migrate_survey_passwords': [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(_save_password_keys.migrate_survey_passwords),
|
||||
],
|
||||
'0032_v302_credential_permissions_update': [
|
||||
# RBAC credential permission updates
|
||||
migrations.AlterField(
|
||||
model_name='credential',
|
||||
name='admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'singleton:system_administrator', b'organization.admin_role'], to='main.Role', null=b'True'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credential',
|
||||
name='use_role',
|
||||
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'admin_role'], to='main.Role', null=b'True'),
|
||||
),
|
||||
],
|
||||
'0033_v303_v245_host_variable_fix': [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(update_dashed_host_variables),
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
__all__ = ['SQUASHED_30']
|
||||
50
awx/main/migrations/_squashed_31.py
Normal file
50
awx/main/migrations/_squashed_31.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from django.db import (
|
||||
migrations,
|
||||
models,
|
||||
)
|
||||
|
||||
SQUASHED_31 = {
|
||||
'0035_v310_remove_tower_settings': [
|
||||
# Remove Tower settings, these settings are now in separate awx.conf app.
|
||||
migrations.RemoveField(
|
||||
model_name='towersettings',
|
||||
name='user',
|
||||
),
|
||||
migrations.DeleteModel(
|
||||
name='TowerSettings',
|
||||
),
|
||||
|
||||
migrations.AlterField(
|
||||
model_name='project',
|
||||
name='scm_type',
|
||||
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='projectupdate',
|
||||
name='scm_type',
|
||||
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
|
||||
),
|
||||
],
|
||||
'0036_v311_insights': [
|
||||
migrations.AlterField(
|
||||
model_name='project',
|
||||
name='scm_type',
|
||||
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='projectupdate',
|
||||
name='scm_type',
|
||||
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
|
||||
),
|
||||
],
|
||||
'0037_v313_instance_version': [
|
||||
# Remove Tower settings, these settings are now in separate awx.conf app.
|
||||
migrations.AddField(
|
||||
model_name='instance',
|
||||
name='version',
|
||||
field=models.CharField(max_length=24, blank=True),
|
||||
),
|
||||
],
|
||||
}
|
||||
|
||||
__all__ = ['SQUASHED_31']
|
||||
@@ -12,6 +12,7 @@ from awx.main.models.credential import * # noqa
|
||||
from awx.main.models.projects import * # noqa
|
||||
from awx.main.models.inventory import * # noqa
|
||||
from awx.main.models.jobs import * # noqa
|
||||
from awx.main.models.events import * # noqa
|
||||
from awx.main.models.ad_hoc_commands import * # noqa
|
||||
from awx.main.models.schedules import * # noqa
|
||||
from awx.main.models.activity_stream import * # noqa
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
# Tower
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.fields import JSONField
|
||||
|
||||
# Django
|
||||
from django.db import models
|
||||
@@ -66,6 +67,8 @@ class ActivityStream(models.Model):
|
||||
role = models.ManyToManyField("Role", blank=True)
|
||||
instance_group = models.ManyToManyField("InstanceGroup", blank=True)
|
||||
|
||||
setting = JSONField(blank=True)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:activity_stream_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
|
||||
@@ -2,29 +2,26 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import datetime
|
||||
import logging
|
||||
from urlparse import urljoin
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import models
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
from django.utils.timezone import utc
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.events import AdHocCommandEvent
|
||||
from awx.main.models.unified_jobs import * # noqa
|
||||
from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate
|
||||
from awx.main.fields import JSONField
|
||||
|
||||
logger = logging.getLogger('awx.main.models.ad_hoc_commands')
|
||||
|
||||
__all__ = ['AdHocCommand', 'AdHocCommandEvent']
|
||||
__all__ = ['AdHocCommand']
|
||||
|
||||
|
||||
class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
@@ -127,6 +124,10 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
raise ValidationError(_('No argument passed to %s module.') % self.module_name)
|
||||
return module_args
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
return AdHocCommandEvent
|
||||
|
||||
@property
|
||||
def passwords_needed_to_start(self):
|
||||
'''Return list of password field names needed to start the job.'''
|
||||
@@ -224,169 +225,3 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
def get_notification_friendly_name(self):
|
||||
return "AdHoc Command"
|
||||
|
||||
|
||||
class AdHocCommandEvent(CreatedModifiedModel):
|
||||
'''
|
||||
An event/message logged from the ad hoc event callback for each host.
|
||||
'''
|
||||
|
||||
EVENT_TYPES = [
|
||||
# (event, verbose name, failed)
|
||||
('runner_on_failed', _('Host Failed'), True),
|
||||
('runner_on_ok', _('Host OK'), False),
|
||||
('runner_on_unreachable', _('Host Unreachable'), True),
|
||||
# Tower won't see no_hosts (check is done earlier without callback).
|
||||
# ('runner_on_no_hosts', _('No Hosts Matched'), False),
|
||||
# Tower will see skipped (when running in check mode for a module that
|
||||
# does not support check mode).
|
||||
('runner_on_skipped', _('Host Skipped'), False),
|
||||
# Tower does not support async for ad hoc commands (not used in v2).
|
||||
# ('runner_on_async_poll', _('Host Polling'), False),
|
||||
# ('runner_on_async_ok', _('Host Async OK'), False),
|
||||
# ('runner_on_async_failed', _('Host Async Failure'), True),
|
||||
# Tower does not yet support --diff mode.
|
||||
# ('runner_on_file_diff', _('File Difference'), False),
|
||||
|
||||
# Additional event types for captured stdout not directly related to
|
||||
# runner events.
|
||||
('debug', _('Debug'), False),
|
||||
('verbose', _('Verbose'), False),
|
||||
('deprecated', _('Deprecated'), False),
|
||||
('warning', _('Warning'), False),
|
||||
('system_warning', _('System Warning'), False),
|
||||
('error', _('Error'), False),
|
||||
]
|
||||
FAILED_EVENTS = [x[0] for x in EVENT_TYPES if x[2]]
|
||||
EVENT_CHOICES = [(x[0], x[1]) for x in EVENT_TYPES]
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
index_together = [
|
||||
('ad_hoc_command', 'event'),
|
||||
('ad_hoc_command', 'uuid'),
|
||||
('ad_hoc_command', 'start_line'),
|
||||
('ad_hoc_command', 'end_line'),
|
||||
]
|
||||
|
||||
ad_hoc_command = models.ForeignKey(
|
||||
'AdHocCommand',
|
||||
related_name='ad_hoc_command_events',
|
||||
on_delete=models.CASCADE,
|
||||
editable=False,
|
||||
)
|
||||
host = models.ForeignKey(
|
||||
'Host',
|
||||
related_name='ad_hoc_command_events',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
)
|
||||
host_name = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
event = models.CharField(
|
||||
max_length=100,
|
||||
choices=EVENT_CHOICES,
|
||||
)
|
||||
event_data = JSONField(
|
||||
blank=True,
|
||||
default={},
|
||||
)
|
||||
failed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
)
|
||||
changed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
)
|
||||
uuid = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
counter = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
stdout = models.TextField(
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
verbosity = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
start_line = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
end_line = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:ad_hoc_command_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def __unicode__(self):
|
||||
return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
res = self.event_data.get('res', None)
|
||||
if self.event in self.FAILED_EVENTS:
|
||||
if not self.event_data.get('ignore_errors', False):
|
||||
self.failed = True
|
||||
if 'failed' not in update_fields:
|
||||
update_fields.append('failed')
|
||||
if isinstance(res, dict) and res.get('changed', False):
|
||||
self.changed = True
|
||||
if 'changed' not in update_fields:
|
||||
update_fields.append('changed')
|
||||
self.host_name = self.event_data.get('host', '').strip()
|
||||
if 'host_name' not in update_fields:
|
||||
update_fields.append('host_name')
|
||||
if not self.host_id and self.host_name:
|
||||
host_qs = self.ad_hoc_command.inventory.hosts.filter(name=self.host_name)
|
||||
try:
|
||||
host_id = host_qs.only('id').values_list('id', flat=True)
|
||||
if host_id.exists():
|
||||
self.host_id = host_id[0]
|
||||
if 'host_id' not in update_fields:
|
||||
update_fields.append('host_id')
|
||||
except (IndexError, AttributeError):
|
||||
pass
|
||||
super(AdHocCommandEvent, self).save(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def create_from_data(self, **kwargs):
|
||||
# Convert the datetime for the ad hoc command event's creation
|
||||
# appropriately, and include a time zone for it.
|
||||
#
|
||||
# In the event of any issue, throw it out, and Django will just save
|
||||
# the current time.
|
||||
try:
|
||||
if not isinstance(kwargs['created'], datetime.datetime):
|
||||
kwargs['created'] = parse_datetime(kwargs['created'])
|
||||
if not kwargs['created'].tzinfo:
|
||||
kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
# Sanity check: Don't honor keys that we don't recognize.
|
||||
valid_keys = {'ad_hoc_command_id', 'event', 'event_data', 'created',
|
||||
'counter', 'uuid', 'stdout', 'start_line', 'end_line',
|
||||
'verbosity'}
|
||||
for key in kwargs.keys():
|
||||
if key not in valid_keys:
|
||||
kwargs.pop(key)
|
||||
|
||||
return AdHocCommandEvent.objects.create(**kwargs)
|
||||
|
||||
@@ -50,7 +50,7 @@ PROJECT_UPDATE_JOB_TYPE_CHOICES = [
|
||||
(PERM_INVENTORY_CHECK, _('Check')),
|
||||
]
|
||||
|
||||
CLOUD_INVENTORY_SOURCES = ['ec2', 'vmware', 'gce', 'azure_rm', 'openstack', 'custom', 'satellite6', 'cloudforms', 'scm',]
|
||||
CLOUD_INVENTORY_SOURCES = ['ec2', 'vmware', 'gce', 'azure_rm', 'openstack', 'rhv', 'custom', 'satellite6', 'cloudforms', 'scm', 'tower',]
|
||||
|
||||
VERBOSITY_CHOICES = [
|
||||
(0, '0 (Normal)'),
|
||||
@@ -288,7 +288,10 @@ class PrimordialModel(CreatedModifiedModel):
|
||||
continue
|
||||
if not (self.pk and self.pk == obj.pk):
|
||||
errors.append(
|
||||
'%s with this (%s) combination already exists.' % (model.__name__, ', '.join(ut))
|
||||
'%s with this (%s) combination already exists.' % (
|
||||
model.__name__,
|
||||
', '.join(set(ut) - {'polymorphic_ctype'})
|
||||
)
|
||||
)
|
||||
if errors:
|
||||
raise ValidationError(errors)
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
from collections import OrderedDict
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
import operator
|
||||
import os
|
||||
import re
|
||||
import stat
|
||||
import tempfile
|
||||
|
||||
@@ -32,8 +34,33 @@ from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
from awx.main.utils import encrypt_field
|
||||
from . import injectors as builtin_injectors
|
||||
|
||||
__all__ = ['Credential', 'CredentialType', 'V1Credential']
|
||||
__all__ = ['Credential', 'CredentialType', 'V1Credential', 'build_safe_env']
|
||||
|
||||
logger = logging.getLogger('awx.main.models.credential')
|
||||
|
||||
HIDDEN_PASSWORD = '**********'
|
||||
|
||||
|
||||
def build_safe_env(env):
|
||||
'''
|
||||
Build environment dictionary, hiding potentially sensitive information
|
||||
such as passwords or keys.
|
||||
'''
|
||||
hidden_re = re.compile(r'API|TOKEN|KEY|SECRET|PASS', re.I)
|
||||
urlpass_re = re.compile(r'^.*?://[^:]+:(.*?)@.*?$')
|
||||
safe_env = dict(env)
|
||||
for k, v in safe_env.items():
|
||||
if k == 'AWS_ACCESS_KEY_ID':
|
||||
continue
|
||||
elif k.startswith('ANSIBLE_') and not k.startswith('ANSIBLE_NET'):
|
||||
continue
|
||||
elif hidden_re.search(k):
|
||||
safe_env[k] = HIDDEN_PASSWORD
|
||||
elif type(v) == str and urlpass_re.match(v):
|
||||
safe_env[k] = urlpass_re.sub(HIDDEN_PASSWORD, v)
|
||||
return safe_env
|
||||
|
||||
|
||||
class V1Credential(object):
|
||||
@@ -59,7 +86,9 @@ class V1Credential(object):
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('insights', 'Insights'),
|
||||
('tower', 'Ansible Tower'),
|
||||
]
|
||||
FIELDS = {
|
||||
'kind': models.CharField(
|
||||
@@ -413,8 +442,8 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
ENV_BLACKLIST = set((
|
||||
'VIRTUAL_ENV', 'PATH', 'PYTHONPATH', 'PROOT_TMP_DIR', 'JOB_ID',
|
||||
'INVENTORY_ID', 'INVENTORY_SOURCE_ID', 'INVENTORY_UPDATE_ID',
|
||||
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'TOWER_HOST',
|
||||
'AWX_HOST', 'MAX_EVENT_RES', 'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
|
||||
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'MAX_EVENT_RES',
|
||||
'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
|
||||
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS', 'FACT_QUEUE',
|
||||
))
|
||||
|
||||
@@ -498,6 +527,11 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
for default in cls.defaults.values():
|
||||
default_ = default()
|
||||
if persisted:
|
||||
if CredentialType.objects.filter(name=default_.name, kind=default_.kind).count():
|
||||
continue
|
||||
logger.debug(_(
|
||||
"adding %s credential type" % default_.name
|
||||
))
|
||||
default_.save()
|
||||
|
||||
@classmethod
|
||||
@@ -552,6 +586,11 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
files)
|
||||
"""
|
||||
if not self.injectors:
|
||||
if self.managed_by_tower and credential.kind in dir(builtin_injectors):
|
||||
injected_env = {}
|
||||
getattr(builtin_injectors, credential.kind)(credential, injected_env)
|
||||
env.update(injected_env)
|
||||
safe_env.update(build_safe_env(injected_env))
|
||||
return
|
||||
|
||||
class TowerNamespace:
|
||||
@@ -1009,6 +1048,12 @@ def azure_rm(cls):
|
||||
'id': 'tenant',
|
||||
'label': 'Tenant ID',
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'cloud_environment',
|
||||
'label': 'Azure Cloud Environment',
|
||||
'type': 'string',
|
||||
'help_text': ('Environment variable AZURE_CLOUD_ENVIRONMENT when'
|
||||
' using Azure GovCloud or Azure stack.')
|
||||
}],
|
||||
'required': ['subscription'],
|
||||
}
|
||||
@@ -1041,3 +1086,89 @@ def insights(cls):
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@CredentialType.default
|
||||
def rhv(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='Red Hat Virtualization',
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'host',
|
||||
'label': 'Host (Authentication URL)',
|
||||
'type': 'string',
|
||||
'help_text': ('The host to authenticate with.')
|
||||
}, {
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'ca_file',
|
||||
'label': 'CA File',
|
||||
'type': 'string',
|
||||
'help_text': ('Absolute file path to the CA file to use (optional)')
|
||||
}],
|
||||
'required': ['host', 'username', 'password'],
|
||||
},
|
||||
injectors={
|
||||
# The duplication here is intentional; the ovirt4 inventory plugin
|
||||
# writes a .ini file for authentication, while the ansible modules for
|
||||
# ovirt4 use a separate authentication process that support
|
||||
# environment variables; by injecting both, we support both
|
||||
'file': {
|
||||
'template': '\n'.join([
|
||||
'[ovirt]',
|
||||
'ovirt_url={{host}}',
|
||||
'ovirt_username={{username}}',
|
||||
'ovirt_password={{password}}',
|
||||
'{% if ca_file %}ovirt_ca_file={{ca_file}}{% endif %}'])
|
||||
},
|
||||
'env': {
|
||||
'OVIRT_INI_PATH': '{{tower.filename}}',
|
||||
'OVIRT_URL': '{{host}}',
|
||||
'OVIRT_USERNAME': '{{username}}',
|
||||
'OVIRT_PASSWORD': '{{password}}'
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@CredentialType.default
|
||||
def tower(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='Ansible Tower',
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'host',
|
||||
'label': 'Ansible Tower Hostname',
|
||||
'type': 'string',
|
||||
'help_text': ('The Ansible Tower base URL to authenticate with.')
|
||||
}, {
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}],
|
||||
'required': ['host', 'username', 'password'],
|
||||
},
|
||||
injectors={
|
||||
'env': {
|
||||
'TOWER_HOST': '{{host}}',
|
||||
'TOWER_USERNAME': '{{username}}',
|
||||
'TOWER_PASSWORD': '{{password}}',
|
||||
}
|
||||
},
|
||||
)
|
||||
35
awx/main/models/credential/injectors.py
Normal file
35
awx/main/models/credential/injectors.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from awx.main.utils import decrypt_field
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
def aws(cred, env):
|
||||
env['AWS_ACCESS_KEY_ID'] = cred.username
|
||||
env['AWS_SECRET_ACCESS_KEY'] = decrypt_field(cred, 'password')
|
||||
if len(cred.security_token) > 0:
|
||||
env['AWS_SECURITY_TOKEN'] = decrypt_field(cred, 'security_token')
|
||||
|
||||
|
||||
def gce(cred, env):
|
||||
env['GCE_EMAIL'] = cred.username
|
||||
env['GCE_PROJECT'] = cred.project
|
||||
|
||||
|
||||
def azure_rm(cred, env):
|
||||
if len(cred.client) and len(cred.tenant):
|
||||
env['AZURE_CLIENT_ID'] = cred.client
|
||||
env['AZURE_SECRET'] = decrypt_field(cred, 'secret')
|
||||
env['AZURE_TENANT'] = cred.tenant
|
||||
env['AZURE_SUBSCRIPTION_ID'] = cred.subscription
|
||||
else:
|
||||
env['AZURE_SUBSCRIPTION_ID'] = cred.subscription
|
||||
env['AZURE_AD_USER'] = cred.username
|
||||
env['AZURE_PASSWORD'] = decrypt_field(cred, 'password')
|
||||
if cred.inputs.get('cloud_environment', None):
|
||||
env['AZURE_CLOUD_ENVIRONMENT'] = cred.inputs['cloud_environment']
|
||||
|
||||
|
||||
def vmware(cred, env):
|
||||
env['VMWARE_USER'] = cred.username
|
||||
env['VMWARE_PASSWORD'] = decrypt_field(cred, 'password')
|
||||
env['VMWARE_HOST'] = cred.host
|
||||
env['VMWARE_VALIDATE_CERTS'] = str(settings.VMWARE_VALIDATE_CERTS)
|
||||
774
awx/main/models/events.py
Normal file
774
awx/main/models/events.py
Normal file
@@ -0,0 +1,774 @@
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import models
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.timezone import utc
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.encoding import force_text
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.fields import JSONField
|
||||
from awx.main.models.base import CreatedModifiedModel
|
||||
from awx.main.utils import ignore_inventory_computed_fields
|
||||
|
||||
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||
|
||||
|
||||
__all__ = ['JobEvent', 'ProjectUpdateEvent', 'AdHocCommandEvent',
|
||||
'InventoryUpdateEvent', 'SystemJobEvent']
|
||||
|
||||
|
||||
class BasePlaybookEvent(CreatedModifiedModel):
|
||||
'''
|
||||
An event/message logged from a playbook callback for each host.
|
||||
'''
|
||||
|
||||
VALID_KEYS = [
|
||||
'event', 'event_data', 'playbook', 'play', 'role', 'task', 'created',
|
||||
'counter', 'uuid', 'stdout', 'parent_uuid', 'start_line', 'end_line',
|
||||
'verbosity'
|
||||
]
|
||||
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
# Playbook events will be structured to form the following hierarchy:
|
||||
# - playbook_on_start (once for each playbook file)
|
||||
# - playbook_on_vars_prompt (for each play, but before play starts, we
|
||||
# currently don't handle responding to these prompts)
|
||||
# - playbook_on_play_start (once for each play)
|
||||
# - playbook_on_import_for_host (not logged, not used for v2)
|
||||
# - playbook_on_not_import_for_host (not logged, not used for v2)
|
||||
# - playbook_on_no_hosts_matched
|
||||
# - playbook_on_no_hosts_remaining
|
||||
# - playbook_on_include (only v2 - only used for handlers?)
|
||||
# - playbook_on_setup (not used for v2)
|
||||
# - runner_on*
|
||||
# - playbook_on_task_start (once for each task within a play)
|
||||
# - runner_on_failed
|
||||
# - runner_on_ok
|
||||
# - runner_on_error (not used for v2)
|
||||
# - runner_on_skipped
|
||||
# - runner_on_unreachable
|
||||
# - runner_on_no_hosts (not used for v2)
|
||||
# - runner_on_async_poll (not used for v2)
|
||||
# - runner_on_async_ok (not used for v2)
|
||||
# - runner_on_async_failed (not used for v2)
|
||||
# - runner_on_file_diff (v2 event is v2_on_file_diff)
|
||||
# - runner_item_on_ok (v2 only)
|
||||
# - runner_item_on_failed (v2 only)
|
||||
# - runner_item_on_skipped (v2 only)
|
||||
# - runner_retry (v2 only)
|
||||
# - playbook_on_notify (once for each notification from the play, not used for v2)
|
||||
# - playbook_on_stats
|
||||
|
||||
EVENT_TYPES = [
|
||||
# (level, event, verbose name, failed)
|
||||
(3, 'runner_on_failed', _('Host Failed'), True),
|
||||
(3, 'runner_on_ok', _('Host OK'), False),
|
||||
(3, 'runner_on_error', _('Host Failure'), True),
|
||||
(3, 'runner_on_skipped', _('Host Skipped'), False),
|
||||
(3, 'runner_on_unreachable', _('Host Unreachable'), True),
|
||||
(3, 'runner_on_no_hosts', _('No Hosts Remaining'), False),
|
||||
(3, 'runner_on_async_poll', _('Host Polling'), False),
|
||||
(3, 'runner_on_async_ok', _('Host Async OK'), False),
|
||||
(3, 'runner_on_async_failed', _('Host Async Failure'), True),
|
||||
(3, 'runner_item_on_ok', _('Item OK'), False),
|
||||
(3, 'runner_item_on_failed', _('Item Failed'), True),
|
||||
(3, 'runner_item_on_skipped', _('Item Skipped'), False),
|
||||
(3, 'runner_retry', _('Host Retry'), False),
|
||||
# Tower does not yet support --diff mode.
|
||||
(3, 'runner_on_file_diff', _('File Difference'), False),
|
||||
(0, 'playbook_on_start', _('Playbook Started'), False),
|
||||
(2, 'playbook_on_notify', _('Running Handlers'), False),
|
||||
(2, 'playbook_on_include', _('Including File'), False),
|
||||
(2, 'playbook_on_no_hosts_matched', _('No Hosts Matched'), False),
|
||||
(2, 'playbook_on_no_hosts_remaining', _('No Hosts Remaining'), False),
|
||||
(2, 'playbook_on_task_start', _('Task Started'), False),
|
||||
# Tower does not yet support vars_prompt (and will probably hang :)
|
||||
(1, 'playbook_on_vars_prompt', _('Variables Prompted'), False),
|
||||
(2, 'playbook_on_setup', _('Gathering Facts'), False),
|
||||
(2, 'playbook_on_import_for_host', _('internal: on Import for Host'), False),
|
||||
(2, 'playbook_on_not_import_for_host', _('internal: on Not Import for Host'), False),
|
||||
(1, 'playbook_on_play_start', _('Play Started'), False),
|
||||
(1, 'playbook_on_stats', _('Playbook Complete'), False),
|
||||
|
||||
# Additional event types for captured stdout not directly related to
|
||||
# playbook or runner events.
|
||||
(0, 'debug', _('Debug'), False),
|
||||
(0, 'verbose', _('Verbose'), False),
|
||||
(0, 'deprecated', _('Deprecated'), False),
|
||||
(0, 'warning', _('Warning'), False),
|
||||
(0, 'system_warning', _('System Warning'), False),
|
||||
(0, 'error', _('Error'), True),
|
||||
]
|
||||
FAILED_EVENTS = [x[1] for x in EVENT_TYPES if x[3]]
|
||||
EVENT_CHOICES = [(x[1], x[2]) for x in EVENT_TYPES]
|
||||
LEVEL_FOR_EVENT = dict([(x[1], x[0]) for x in EVENT_TYPES])
|
||||
|
||||
event = models.CharField(
|
||||
max_length=100,
|
||||
choices=EVENT_CHOICES,
|
||||
)
|
||||
event_data = JSONField(
|
||||
blank=True,
|
||||
default={},
|
||||
)
|
||||
failed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
)
|
||||
changed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
)
|
||||
uuid = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
playbook = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
play = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
role = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
task = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
counter = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
stdout = models.TextField(
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
verbosity = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
start_line = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
end_line = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
|
||||
@property
|
||||
def event_level(self):
|
||||
return self.LEVEL_FOR_EVENT.get(self.event, 0)
|
||||
|
||||
def get_event_display2(self):
|
||||
msg = self.get_event_display()
|
||||
if self.event == 'playbook_on_play_start':
|
||||
if self.play:
|
||||
msg = "%s (%s)" % (msg, self.play)
|
||||
elif self.event == 'playbook_on_task_start':
|
||||
if self.task:
|
||||
if self.event_data.get('is_conditional', False):
|
||||
msg = 'Handler Notified'
|
||||
if self.role:
|
||||
msg = '%s (%s | %s)' % (msg, self.role, self.task)
|
||||
else:
|
||||
msg = "%s (%s)" % (msg, self.task)
|
||||
|
||||
# Change display for runner events triggered by async polling. Some of
|
||||
# these events may not show in most cases, due to filterting them out
|
||||
# of the job event queryset returned to the user.
|
||||
res = self.event_data.get('res', {})
|
||||
# Fix for existing records before we had added the workaround on save
|
||||
# to change async_ok to async_failed.
|
||||
if self.event == 'runner_on_async_ok':
|
||||
try:
|
||||
if res.get('failed', False) or res.get('rc', 0) != 0:
|
||||
msg = 'Host Async Failed'
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
# Runner events with ansible_job_id are part of async starting/polling.
|
||||
if self.event in ('runner_on_ok', 'runner_on_failed'):
|
||||
try:
|
||||
module_name = res['invocation']['module_name']
|
||||
job_id = res['ansible_job_id']
|
||||
except (TypeError, KeyError, AttributeError):
|
||||
module_name = None
|
||||
job_id = None
|
||||
if module_name and job_id:
|
||||
if module_name == 'async_status':
|
||||
msg = 'Host Async Checking'
|
||||
else:
|
||||
msg = 'Host Async Started'
|
||||
# Handle both 1.2 on_failed and 1.3+ on_async_failed events when an
|
||||
# async task times out.
|
||||
if self.event in ('runner_on_failed', 'runner_on_async_failed'):
|
||||
try:
|
||||
if res['msg'] == 'timed out':
|
||||
msg = 'Host Async Timeout'
|
||||
except (TypeError, KeyError, AttributeError):
|
||||
pass
|
||||
return msg
|
||||
|
||||
def _update_from_event_data(self):
|
||||
# Update event model fields from event data.
|
||||
updated_fields = set()
|
||||
event_data = self.event_data
|
||||
res = event_data.get('res', None)
|
||||
if self.event in self.FAILED_EVENTS and not event_data.get('ignore_errors', False):
|
||||
self.failed = True
|
||||
updated_fields.add('failed')
|
||||
if isinstance(res, dict):
|
||||
if res.get('changed', False):
|
||||
self.changed = True
|
||||
updated_fields.add('changed')
|
||||
# If we're not in verbose mode, wipe out any module arguments.
|
||||
invocation = res.get('invocation', None)
|
||||
if isinstance(invocation, dict) and self.job_verbosity == 0 and 'module_args' in invocation:
|
||||
event_data['res']['invocation']['module_args'] = ''
|
||||
self.event_data = event_data
|
||||
updated_fields.add('event_data')
|
||||
if self.event == 'playbook_on_stats':
|
||||
try:
|
||||
failures_dict = event_data.get('failures', {})
|
||||
dark_dict = event_data.get('dark', {})
|
||||
self.failed = bool(sum(failures_dict.values()) +
|
||||
sum(dark_dict.values()))
|
||||
updated_fields.add('failed')
|
||||
changed_dict = event_data.get('changed', {})
|
||||
self.changed = bool(sum(changed_dict.values()))
|
||||
updated_fields.add('changed')
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
for field in ('playbook', 'play', 'task', 'role'):
|
||||
value = force_text(event_data.get(field, '')).strip()
|
||||
if value != getattr(self, field):
|
||||
setattr(self, field, value)
|
||||
updated_fields.add(field)
|
||||
return updated_fields
|
||||
|
||||
@classmethod
|
||||
def create_from_data(self, **kwargs):
|
||||
pk = None
|
||||
for key in ('job_id', 'project_update_id'):
|
||||
if key in kwargs:
|
||||
pk = key
|
||||
if pk is None:
|
||||
# payload must contain either a job_id or a project_update_id
|
||||
return
|
||||
|
||||
# Convert the datetime for the job event's creation appropriately,
|
||||
# and include a time zone for it.
|
||||
#
|
||||
# In the event of any issue, throw it out, and Django will just save
|
||||
# the current time.
|
||||
try:
|
||||
if not isinstance(kwargs['created'], datetime.datetime):
|
||||
kwargs['created'] = parse_datetime(kwargs['created'])
|
||||
if not kwargs['created'].tzinfo:
|
||||
kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
# Sanity check: Don't honor keys that we don't recognize.
|
||||
for key in kwargs.keys():
|
||||
if key not in self.VALID_KEYS:
|
||||
kwargs.pop(key)
|
||||
|
||||
event_data = kwargs.get('event_data', None)
|
||||
artifact_dict = None
|
||||
if event_data:
|
||||
artifact_dict = event_data.pop('artifact_data', None)
|
||||
|
||||
job_event = self.objects.create(**kwargs)
|
||||
|
||||
analytics_logger.info('Event data saved.', extra=dict(python_objects=dict(job_event=job_event)))
|
||||
|
||||
# Save artifact data to parent job (if provided).
|
||||
if artifact_dict:
|
||||
if event_data and isinstance(event_data, dict):
|
||||
# Note: Core has not added support for marking artifacts as
|
||||
# sensitive yet. Going forward, core will not use
|
||||
# _ansible_no_log to denote sensitive set_stats calls.
|
||||
# Instead, they plan to add a flag outside of the traditional
|
||||
# no_log mechanism. no_log will not work for this feature,
|
||||
# in core, because sensitive data is scrubbed before sending
|
||||
# data to the callback. The playbook_on_stats is the callback
|
||||
# in which the set_stats data is used.
|
||||
|
||||
# Again, the sensitive artifact feature has not yet landed in
|
||||
# core. The below is how we mark artifacts payload as
|
||||
# senstive
|
||||
# artifact_dict['_ansible_no_log'] = True
|
||||
#
|
||||
parent_job = self.objects.filter(pk=pk).first()
|
||||
if hasattr(parent_job, 'artifacts') and parent_job.artifacts != artifact_dict:
|
||||
parent_job.artifacts = artifact_dict
|
||||
parent_job.save(update_fields=['artifacts'])
|
||||
|
||||
return job_event
|
||||
|
||||
@property
|
||||
def job_verbosity(self):
|
||||
return 0
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
# Update model fields and related objects unless we're only updating
|
||||
# failed/changed flags triggered from a child event.
|
||||
from_parent_update = kwargs.pop('from_parent_update', False)
|
||||
if not from_parent_update:
|
||||
# Update model fields from event data.
|
||||
updated_fields = self._update_from_event_data()
|
||||
for field in updated_fields:
|
||||
if field not in update_fields:
|
||||
update_fields.append(field)
|
||||
|
||||
# Update host related field from host_name.
|
||||
if hasattr(self, 'job') and not self.host_id and self.host_name:
|
||||
host_qs = self.job.inventory.hosts.filter(name=self.host_name)
|
||||
host_id = host_qs.only('id').values_list('id', flat=True).first()
|
||||
if host_id != self.host_id:
|
||||
self.host_id = host_id
|
||||
if 'host_id' not in update_fields:
|
||||
update_fields.append('host_id')
|
||||
super(BasePlaybookEvent, self).save(*args, **kwargs)
|
||||
|
||||
# Update related objects after this event is saved.
|
||||
if hasattr(self, 'job') and not from_parent_update:
|
||||
if getattr(settings, 'CAPTURE_JOB_EVENT_HOSTS', False):
|
||||
self._update_hosts()
|
||||
if self.event == 'playbook_on_stats':
|
||||
self._update_parents_failed_and_changed()
|
||||
|
||||
hostnames = self._hostnames()
|
||||
self._update_host_summary_from_stats(hostnames)
|
||||
self.job.inventory.update_computed_fields()
|
||||
|
||||
|
||||
|
||||
class JobEvent(BasePlaybookEvent):
|
||||
'''
|
||||
An event/message logged from the callback when running a job.
|
||||
'''
|
||||
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id']
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('pk',)
|
||||
index_together = [
|
||||
('job', 'event'),
|
||||
('job', 'uuid'),
|
||||
('job', 'start_line'),
|
||||
('job', 'end_line'),
|
||||
('job', 'parent_uuid'),
|
||||
]
|
||||
|
||||
job = models.ForeignKey(
|
||||
'Job',
|
||||
related_name='job_events',
|
||||
on_delete=models.CASCADE,
|
||||
editable=False,
|
||||
)
|
||||
host = models.ForeignKey(
|
||||
'Host',
|
||||
related_name='job_events_as_primary_host',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
)
|
||||
host_name = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
hosts = models.ManyToManyField(
|
||||
'Host',
|
||||
related_name='job_events',
|
||||
editable=False,
|
||||
)
|
||||
parent = models.ForeignKey(
|
||||
'self',
|
||||
related_name='children',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
)
|
||||
parent_uuid = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:job_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def __unicode__(self):
|
||||
return u'%s @ %s' % (self.get_event_display2(), self.created.isoformat())
|
||||
|
||||
def _update_from_event_data(self):
|
||||
# Update job event hostname
|
||||
updated_fields = super(JobEvent, self)._update_from_event_data()
|
||||
value = force_text(self.event_data.get('host', '')).strip()
|
||||
if value != getattr(self, 'host_name'):
|
||||
setattr(self, 'host_name', value)
|
||||
updated_fields.add('host_name')
|
||||
return updated_fields
|
||||
|
||||
def _update_parents_failed_and_changed(self):
|
||||
# Update parent events to reflect failed, changed
|
||||
runner_events = JobEvent.objects.filter(job=self.job,
|
||||
event__startswith='runner_on')
|
||||
changed_events = runner_events.filter(changed=True)
|
||||
failed_events = runner_events.filter(failed=True)
|
||||
JobEvent.objects.filter(uuid__in=changed_events.values_list('parent_uuid', flat=True)).update(changed=True)
|
||||
JobEvent.objects.filter(uuid__in=failed_events.values_list('parent_uuid', flat=True)).update(failed=True)
|
||||
|
||||
def _update_hosts(self, extra_host_pks=None):
|
||||
# Update job event hosts m2m from host_name, propagate to parent events.
|
||||
extra_host_pks = set(extra_host_pks or [])
|
||||
hostnames = set()
|
||||
if self.host_name:
|
||||
hostnames.add(self.host_name)
|
||||
if self.event == 'playbook_on_stats':
|
||||
try:
|
||||
for v in self.event_data.values():
|
||||
hostnames.update(v.keys())
|
||||
except AttributeError: # In case event_data or v isn't a dict.
|
||||
pass
|
||||
qs = self.job.inventory.hosts.all()
|
||||
qs = qs.filter(models.Q(name__in=hostnames) | models.Q(pk__in=extra_host_pks))
|
||||
qs = qs.exclude(job_events__pk=self.id).only('id')
|
||||
for host in qs:
|
||||
self.hosts.add(host)
|
||||
if self.parent_uuid:
|
||||
parent = JobEvent.objects.filter(uuid=self.parent_uuid)
|
||||
if parent.exists():
|
||||
parent = parent[0]
|
||||
parent._update_hosts(qs.values_list('id', flat=True))
|
||||
|
||||
def _hostnames(self):
|
||||
hostnames = set()
|
||||
try:
|
||||
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
|
||||
hostnames.update(self.event_data.get(stat, {}).keys())
|
||||
except AttributeError: # In case event_data or v isn't a dict.
|
||||
pass
|
||||
return hostnames
|
||||
|
||||
def _update_host_summary_from_stats(self, hostnames):
|
||||
with ignore_inventory_computed_fields():
|
||||
qs = self.job.inventory.hosts.filter(name__in=hostnames)
|
||||
job = self.job
|
||||
for host in hostnames:
|
||||
host_stats = {}
|
||||
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
|
||||
try:
|
||||
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
|
||||
except AttributeError: # in case event_data[stat] isn't a dict.
|
||||
pass
|
||||
if qs.filter(name=host).exists():
|
||||
host_actual = qs.get(name=host)
|
||||
host_summary, created = job.job_host_summaries.get_or_create(host=host_actual, host_name=host_actual.name, defaults=host_stats)
|
||||
else:
|
||||
host_summary, created = job.job_host_summaries.get_or_create(host_name=host, defaults=host_stats)
|
||||
|
||||
if not created:
|
||||
update_fields = []
|
||||
for stat, value in host_stats.items():
|
||||
if getattr(host_summary, stat) != value:
|
||||
setattr(host_summary, stat, value)
|
||||
update_fields.append(stat)
|
||||
if update_fields:
|
||||
host_summary.save(update_fields=update_fields)
|
||||
|
||||
@property
|
||||
def job_verbosity(self):
|
||||
return self.job.verbosity
|
||||
|
||||
|
||||
class ProjectUpdateEvent(BasePlaybookEvent):
|
||||
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['project_update_id']
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('pk',)
|
||||
index_together = [
|
||||
('project_update', 'event'),
|
||||
('project_update', 'uuid'),
|
||||
('project_update', 'start_line'),
|
||||
('project_update', 'end_line'),
|
||||
]
|
||||
|
||||
project_update = models.ForeignKey(
|
||||
'ProjectUpdate',
|
||||
related_name='project_update_events',
|
||||
on_delete=models.CASCADE,
|
||||
editable=False,
|
||||
)
|
||||
|
||||
@property
|
||||
def host_name(self):
|
||||
return 'localhost'
|
||||
|
||||
|
||||
class BaseCommandEvent(CreatedModifiedModel):
|
||||
'''
|
||||
An event/message logged from a command for each host.
|
||||
'''
|
||||
|
||||
VALID_KEYS = [
|
||||
'event_data', 'created', 'counter', 'uuid', 'stdout', 'start_line',
|
||||
'end_line', 'verbosity'
|
||||
]
|
||||
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
event_data = JSONField(
|
||||
blank=True,
|
||||
default={},
|
||||
)
|
||||
uuid = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
counter = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
stdout = models.TextField(
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
verbosity = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
start_line = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
end_line = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
|
||||
def __unicode__(self):
|
||||
return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())
|
||||
|
||||
@classmethod
|
||||
def create_from_data(self, **kwargs):
|
||||
# Convert the datetime for the event's creation
|
||||
# appropriately, and include a time zone for it.
|
||||
#
|
||||
# In the event of any issue, throw it out, and Django will just save
|
||||
# the current time.
|
||||
try:
|
||||
if not isinstance(kwargs['created'], datetime.datetime):
|
||||
kwargs['created'] = parse_datetime(kwargs['created'])
|
||||
if not kwargs['created'].tzinfo:
|
||||
kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
# Sanity check: Don't honor keys that we don't recognize.
|
||||
for key in kwargs.keys():
|
||||
if key not in self.VALID_KEYS:
|
||||
kwargs.pop(key)
|
||||
|
||||
return self.objects.create(**kwargs)
|
||||
|
||||
|
||||
class AdHocCommandEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event']
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
index_together = [
|
||||
('ad_hoc_command', 'event'),
|
||||
('ad_hoc_command', 'uuid'),
|
||||
('ad_hoc_command', 'start_line'),
|
||||
('ad_hoc_command', 'end_line'),
|
||||
]
|
||||
|
||||
EVENT_TYPES = [
|
||||
# (event, verbose name, failed)
|
||||
('runner_on_failed', _('Host Failed'), True),
|
||||
('runner_on_ok', _('Host OK'), False),
|
||||
('runner_on_unreachable', _('Host Unreachable'), True),
|
||||
# Tower won't see no_hosts (check is done earlier without callback).
|
||||
# ('runner_on_no_hosts', _('No Hosts Matched'), False),
|
||||
# Tower will see skipped (when running in check mode for a module that
|
||||
# does not support check mode).
|
||||
('runner_on_skipped', _('Host Skipped'), False),
|
||||
# Tower does not support async for ad hoc commands (not used in v2).
|
||||
# ('runner_on_async_poll', _('Host Polling'), False),
|
||||
# ('runner_on_async_ok', _('Host Async OK'), False),
|
||||
# ('runner_on_async_failed', _('Host Async Failure'), True),
|
||||
# Tower does not yet support --diff mode.
|
||||
# ('runner_on_file_diff', _('File Difference'), False),
|
||||
|
||||
# Additional event types for captured stdout not directly related to
|
||||
# runner events.
|
||||
('debug', _('Debug'), False),
|
||||
('verbose', _('Verbose'), False),
|
||||
('deprecated', _('Deprecated'), False),
|
||||
('warning', _('Warning'), False),
|
||||
('system_warning', _('System Warning'), False),
|
||||
('error', _('Error'), False),
|
||||
]
|
||||
FAILED_EVENTS = [x[0] for x in EVENT_TYPES if x[2]]
|
||||
EVENT_CHOICES = [(x[0], x[1]) for x in EVENT_TYPES]
|
||||
|
||||
event = models.CharField(
|
||||
max_length=100,
|
||||
choices=EVENT_CHOICES,
|
||||
)
|
||||
failed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
)
|
||||
changed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
)
|
||||
ad_hoc_command = models.ForeignKey(
|
||||
'AdHocCommand',
|
||||
related_name='ad_hoc_command_events',
|
||||
on_delete=models.CASCADE,
|
||||
editable=False,
|
||||
)
|
||||
host = models.ForeignKey(
|
||||
'Host',
|
||||
related_name='ad_hoc_command_events',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
)
|
||||
host_name = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:ad_hoc_command_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
res = self.event_data.get('res', None)
|
||||
if self.event in self.FAILED_EVENTS:
|
||||
if not self.event_data.get('ignore_errors', False):
|
||||
self.failed = True
|
||||
if 'failed' not in update_fields:
|
||||
update_fields.append('failed')
|
||||
if isinstance(res, dict) and res.get('changed', False):
|
||||
self.changed = True
|
||||
if 'changed' not in update_fields:
|
||||
update_fields.append('changed')
|
||||
self.host_name = self.event_data.get('host', '').strip()
|
||||
if 'host_name' not in update_fields:
|
||||
update_fields.append('host_name')
|
||||
if not self.host_id and self.host_name:
|
||||
host_qs = self.ad_hoc_command.inventory.hosts.filter(name=self.host_name)
|
||||
try:
|
||||
host_id = host_qs.only('id').values_list('id', flat=True)
|
||||
if host_id.exists():
|
||||
self.host_id = host_id[0]
|
||||
if 'host_id' not in update_fields:
|
||||
update_fields.append('host_id')
|
||||
except (IndexError, AttributeError):
|
||||
pass
|
||||
super(AdHocCommandEvent, self).save(*args, **kwargs)
|
||||
|
||||
|
||||
class InventoryUpdateEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['inventory_update_id']
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
index_together = [
|
||||
('inventory_update', 'uuid'),
|
||||
('inventory_update', 'start_line'),
|
||||
('inventory_update', 'end_line'),
|
||||
]
|
||||
|
||||
inventory_update = models.ForeignKey(
|
||||
'InventoryUpdate',
|
||||
related_name='inventory_update_events',
|
||||
on_delete=models.CASCADE,
|
||||
editable=False,
|
||||
)
|
||||
|
||||
@property
|
||||
def event(self):
|
||||
return 'verbose'
|
||||
|
||||
@property
|
||||
def failed(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def changed(self):
|
||||
return False
|
||||
|
||||
|
||||
class SystemJobEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['system_job_id']
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
index_together = [
|
||||
('system_job', 'uuid'),
|
||||
('system_job', 'start_line'),
|
||||
('system_job', 'end_line'),
|
||||
]
|
||||
|
||||
system_job = models.ForeignKey(
|
||||
'SystemJob',
|
||||
related_name='system_job_events',
|
||||
on_delete=models.CASCADE,
|
||||
editable=False,
|
||||
)
|
||||
|
||||
@property
|
||||
def event(self):
|
||||
return 'verbose'
|
||||
|
||||
@property
|
||||
def failed(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def changed(self):
|
||||
return False
|
||||
@@ -8,6 +8,7 @@ import re
|
||||
import copy
|
||||
from urlparse import urljoin
|
||||
import os.path
|
||||
import six
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -29,6 +30,7 @@ from awx.main.fields import (
|
||||
)
|
||||
from awx.main.managers import HostManager
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.events import InventoryUpdateEvent
|
||||
from awx.main.models.unified_jobs import * # noqa
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerInventoryUpdateMixin
|
||||
from awx.main.models.notifications import (
|
||||
@@ -209,7 +211,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin):
|
||||
group_children.add(from_group_id)
|
||||
return group_children_map
|
||||
|
||||
def get_script_data(self, hostvars=False, show_all=False):
|
||||
def get_script_data(self, hostvars=False, towervars=False, show_all=False):
|
||||
if show_all:
|
||||
hosts_q = dict()
|
||||
else:
|
||||
@@ -271,6 +273,10 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin):
|
||||
data['_meta'].setdefault('hostvars', dict())
|
||||
for host in self.hosts.filter(**hosts_q):
|
||||
data['_meta']['hostvars'][host.name] = host.variables_dict
|
||||
if towervars:
|
||||
tower_dict = dict(remote_tower_enabled=str(host.enabled).lower(),
|
||||
remote_tower_id=host.id)
|
||||
data['_meta']['hostvars'][host.name].update(tower_dict)
|
||||
|
||||
return data
|
||||
|
||||
@@ -399,8 +405,13 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin):
|
||||
active_hosts = self.hosts
|
||||
failed_hosts = active_hosts.filter(has_active_failures=True)
|
||||
active_groups = self.groups
|
||||
if self.kind == 'smart':
|
||||
active_groups = active_groups.none()
|
||||
failed_groups = active_groups.filter(has_active_failures=True)
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
if self.kind == 'smart':
|
||||
active_inventory_sources = self.inventory_sources.none()
|
||||
else:
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
failed_inventory_sources = active_inventory_sources.filter(last_job_failed=True)
|
||||
computed_fields = {
|
||||
'has_active_failures': bool(failed_hosts.count()),
|
||||
@@ -417,6 +428,8 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin):
|
||||
for field, value in computed_fields.items():
|
||||
if getattr(iobj, field) != value:
|
||||
setattr(iobj, field, value)
|
||||
# update in-memory object
|
||||
setattr(self, field, value)
|
||||
else:
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
@@ -464,6 +477,10 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin):
|
||||
def save(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
super(Inventory, self).save(*args, **kwargs)
|
||||
if (self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and
|
||||
connection.vendor != 'sqlite'):
|
||||
# Minimal update of host_count for smart inventory host filter changes
|
||||
self.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
@@ -937,6 +954,8 @@ class InventorySourceOptions(BaseModel):
|
||||
('satellite6', _('Red Hat Satellite 6')),
|
||||
('cloudforms', _('Red Hat CloudForms')),
|
||||
('openstack', _('OpenStack')),
|
||||
('rhv', _('Red Hat Virtualization')),
|
||||
('tower', _('Ansible Tower')),
|
||||
('custom', _('Custom Script')),
|
||||
]
|
||||
|
||||
@@ -1185,6 +1204,16 @@ class InventorySourceOptions(BaseModel):
|
||||
"""Red Hat CloudForms region choices (not implemented)"""
|
||||
return [('all', 'All')]
|
||||
|
||||
@classmethod
|
||||
def get_rhv_region_choices(self):
|
||||
"""No region supprt"""
|
||||
return [('all', 'All')]
|
||||
|
||||
@classmethod
|
||||
def get_tower_region_choices(self):
|
||||
"""No region supprt"""
|
||||
return [('all', 'All')]
|
||||
|
||||
def clean_credential(self):
|
||||
if not self.source:
|
||||
return None
|
||||
@@ -1256,7 +1285,7 @@ class InventorySourceOptions(BaseModel):
|
||||
raise ValidationError(_('Invalid filter expression: %(filter)s') %
|
||||
{'filter': ', '.join(invalid_filters)})
|
||||
return instance_filters
|
||||
elif self.source == 'vmware':
|
||||
elif self.source in ('vmware', 'tower'):
|
||||
return instance_filters
|
||||
else:
|
||||
return ''
|
||||
@@ -1337,9 +1366,9 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return ['name', 'description', 'source', 'source_path', 'source_script', 'source_vars', 'schedule',
|
||||
'credential', 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
|
||||
'timeout', 'verbosity', 'source_project_update',]
|
||||
return set(f.name for f in InventorySourceOptions._meta.fields) | set(
|
||||
['name', 'description', 'schedule']
|
||||
)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
@@ -1413,6 +1442,19 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
|
||||
def create_inventory_update(self, **kwargs):
|
||||
return self.create_unified_job(**kwargs)
|
||||
|
||||
def create_unified_job(self, **kwargs):
|
||||
# Use special name, if name not already specified
|
||||
if self.inventory:
|
||||
if '_eager_fields' not in kwargs:
|
||||
kwargs['_eager_fields'] = {}
|
||||
if 'name' not in kwargs['_eager_fields']:
|
||||
name = six.text_type('{} - {}').format(self.inventory.name, self.name)
|
||||
name_field = self._meta.get_field('name')
|
||||
if len(name) > name_field.max_length:
|
||||
name = name[:name_field.max_length]
|
||||
kwargs['_eager_fields']['name'] = name
|
||||
return super(InventorySource, self).create_unified_job(**kwargs)
|
||||
|
||||
@property
|
||||
def cache_timeout_blocked(self):
|
||||
if not self.last_job_run:
|
||||
@@ -1540,15 +1582,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
websocket_data.update(dict(group_id=self.inventory_source.deprecated_group.id))
|
||||
return websocket_data
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
inventory_source = self.inventory_source
|
||||
if inventory_source.inventory and self.name == inventory_source.name:
|
||||
self.name = inventory_source.inventory.name
|
||||
if 'name' not in update_fields:
|
||||
update_fields.append('name')
|
||||
super(InventoryUpdate, self).save(*args, **kwargs)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:inventory_update_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@@ -1563,6 +1596,10 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
self.inventory_source.source_project.get_project_path(check_if_exists=False),
|
||||
self.source_path)
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
return InventoryUpdateEvent
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
return 50
|
||||
|
||||
@@ -14,12 +14,9 @@ from django.conf import settings
|
||||
from django.db import models
|
||||
#from django.core.cache import cache
|
||||
import memcache
|
||||
from django.db.models import Q, Count
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from dateutil import parser
|
||||
from dateutil.tz import tzutc
|
||||
from django.utils.encoding import force_text, smart_str
|
||||
from django.utils.timezone import utc
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.core.exceptions import ValidationError, FieldDoesNotExist
|
||||
|
||||
@@ -29,27 +26,23 @@ from rest_framework.exceptions import ParseError
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.events import JobEvent, SystemJobEvent
|
||||
from awx.main.models.unified_jobs import * # noqa
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import (
|
||||
ignore_inventory_computed_fields,
|
||||
parse_yaml_or_json,
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.models.mixins import ResourceMixin, SurveyJobTemplateMixin, SurveyJobMixin, TaskManagerJobMixin
|
||||
from awx.main.models.mixins import ResourceMixin, SurveyJobTemplateMixin, SurveyJobMixin, TaskManagerJobMixin, CustomVirtualEnvMixin
|
||||
from awx.main.fields import JSONField, AskForField
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.models.jobs')
|
||||
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
__all__ = ['JobTemplate', 'JobLaunchConfig', 'Job', 'JobHostSummary', 'JobEvent', 'SystemJobTemplate', 'SystemJob']
|
||||
__all__ = ['JobTemplate', 'JobLaunchConfig', 'Job', 'JobHostSummary', 'SystemJobTemplate', 'SystemJob']
|
||||
|
||||
|
||||
class JobOptions(BaseModel):
|
||||
@@ -222,7 +215,7 @@ class JobOptions(BaseModel):
|
||||
return needed
|
||||
|
||||
|
||||
class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, ResourceMixin):
|
||||
class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, ResourceMixin, CustomVirtualEnvMixin):
|
||||
'''
|
||||
A job template is a reusable job definition for applying a project (with
|
||||
playbook) to an inventory source with a given credential.
|
||||
@@ -289,13 +282,9 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return ['name', 'description', 'job_type', 'inventory', 'project',
|
||||
'playbook', 'credentials', 'forks', 'schedule', 'limit',
|
||||
'verbosity', 'job_tags', 'extra_vars',
|
||||
'force_handlers', 'skip_tags', 'start_at_task',
|
||||
'become_enabled', 'labels', 'survey_passwords',
|
||||
'allow_simultaneous', 'timeout', 'use_fact_cache',
|
||||
'diff_mode',]
|
||||
return set(f.name for f in JobOptions._meta.fields) | set(
|
||||
['name', 'description', 'schedule', 'survey_passwords', 'labels', 'credentials']
|
||||
)
|
||||
|
||||
@property
|
||||
def validation_errors(self):
|
||||
@@ -342,6 +331,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
# not block a provisioning callback from creating/launching jobs.
|
||||
if callback_extra_vars is None:
|
||||
for ask_field_name in set(self.get_ask_mapping().values()):
|
||||
if ask_field_name == 'ask_credential_on_launch':
|
||||
# if ask_credential_on_launch is True, it just means it can
|
||||
# optionally be specified at launch time, not that it's *required*
|
||||
# to launch
|
||||
continue
|
||||
if getattr(self, ask_field_name):
|
||||
prompting_needed = True
|
||||
break
|
||||
@@ -355,7 +349,8 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
rejected_data = {}
|
||||
accepted_vars, rejected_vars, errors_dict = self.accept_or_ignore_variables(
|
||||
kwargs.get('extra_vars', {}),
|
||||
_exclude_errors=exclude_errors)
|
||||
_exclude_errors=exclude_errors,
|
||||
extra_passwords=kwargs.get('survey_passwords', {}))
|
||||
if accepted_vars:
|
||||
prompted_data['extra_vars'] = accepted_vars
|
||||
if rejected_vars:
|
||||
@@ -519,6 +514,22 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def get_ui_url(self):
|
||||
return urljoin(settings.TOWER_URL_BASE, "/#/jobs/{}".format(self.pk))
|
||||
|
||||
@property
|
||||
def ansible_virtualenv_path(self):
|
||||
# the order here enforces precedence (it matters)
|
||||
for virtualenv in (
|
||||
self.job_template.custom_virtualenv if self.job_template else None,
|
||||
self.project.custom_virtualenv,
|
||||
self.project.organization.custom_virtualenv
|
||||
):
|
||||
if virtualenv:
|
||||
return virtualenv
|
||||
return settings.ANSIBLE_VENV_PATH
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
return JobEvent
|
||||
|
||||
@property
|
||||
def ask_diff_mode_on_launch(self):
|
||||
if self.job_template is not None:
|
||||
@@ -664,7 +675,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
data.update(dict(inventory=self.inventory.name if self.inventory else None,
|
||||
project=self.project.name if self.project else None,
|
||||
playbook=self.playbook,
|
||||
credential=self.credential.name if self.credential else None,
|
||||
credential=getattr(self.get_deprecated_credential('ssh'), 'name', None),
|
||||
limit=self.limit,
|
||||
extra_vars=self.display_extra_vars(),
|
||||
hosts=all_hosts))
|
||||
@@ -801,7 +812,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(inventory_id=host.inventory.id, host_name=host.name,
|
||||
ansible_facts=host.ansible_facts,
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat()))
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
|
||||
job_id=self.id))
|
||||
|
||||
|
||||
# Add on aliases for the non-related-model fields
|
||||
@@ -892,7 +904,7 @@ class LaunchTimeConfig(BaseModel):
|
||||
Hides fields marked as passwords in survey.
|
||||
'''
|
||||
if self.survey_passwords:
|
||||
extra_data = parse_yaml_or_json(self.extra_data)
|
||||
extra_data = parse_yaml_or_json(self.extra_data).copy()
|
||||
for key, value in self.survey_passwords.items():
|
||||
if key in extra_data:
|
||||
extra_data[key] = value
|
||||
@@ -1031,477 +1043,6 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
#self.host.update_computed_fields()
|
||||
|
||||
|
||||
class JobEvent(CreatedModifiedModel):
|
||||
'''
|
||||
An event/message logged from the callback when running a job.
|
||||
'''
|
||||
|
||||
# Playbook events will be structured to form the following hierarchy:
|
||||
# - playbook_on_start (once for each playbook file)
|
||||
# - playbook_on_vars_prompt (for each play, but before play starts, we
|
||||
# currently don't handle responding to these prompts)
|
||||
# - playbook_on_play_start (once for each play)
|
||||
# - playbook_on_import_for_host (not logged, not used for v2)
|
||||
# - playbook_on_not_import_for_host (not logged, not used for v2)
|
||||
# - playbook_on_no_hosts_matched
|
||||
# - playbook_on_no_hosts_remaining
|
||||
# - playbook_on_include (only v2 - only used for handlers?)
|
||||
# - playbook_on_setup (not used for v2)
|
||||
# - runner_on*
|
||||
# - playbook_on_task_start (once for each task within a play)
|
||||
# - runner_on_failed
|
||||
# - runner_on_ok
|
||||
# - runner_on_error (not used for v2)
|
||||
# - runner_on_skipped
|
||||
# - runner_on_unreachable
|
||||
# - runner_on_no_hosts (not used for v2)
|
||||
# - runner_on_async_poll (not used for v2)
|
||||
# - runner_on_async_ok (not used for v2)
|
||||
# - runner_on_async_failed (not used for v2)
|
||||
# - runner_on_file_diff (v2 event is v2_on_file_diff)
|
||||
# - runner_item_on_ok (v2 only)
|
||||
# - runner_item_on_failed (v2 only)
|
||||
# - runner_item_on_skipped (v2 only)
|
||||
# - runner_retry (v2 only)
|
||||
# - playbook_on_notify (once for each notification from the play, not used for v2)
|
||||
# - playbook_on_stats
|
||||
|
||||
EVENT_TYPES = [
|
||||
# (level, event, verbose name, failed)
|
||||
(3, 'runner_on_failed', _('Host Failed'), True),
|
||||
(3, 'runner_on_ok', _('Host OK'), False),
|
||||
(3, 'runner_on_error', _('Host Failure'), True),
|
||||
(3, 'runner_on_skipped', _('Host Skipped'), False),
|
||||
(3, 'runner_on_unreachable', _('Host Unreachable'), True),
|
||||
(3, 'runner_on_no_hosts', _('No Hosts Remaining'), False),
|
||||
(3, 'runner_on_async_poll', _('Host Polling'), False),
|
||||
(3, 'runner_on_async_ok', _('Host Async OK'), False),
|
||||
(3, 'runner_on_async_failed', _('Host Async Failure'), True),
|
||||
(3, 'runner_item_on_ok', _('Item OK'), False),
|
||||
(3, 'runner_item_on_failed', _('Item Failed'), True),
|
||||
(3, 'runner_item_on_skipped', _('Item Skipped'), False),
|
||||
(3, 'runner_retry', _('Host Retry'), False),
|
||||
# Tower does not yet support --diff mode.
|
||||
(3, 'runner_on_file_diff', _('File Difference'), False),
|
||||
(0, 'playbook_on_start', _('Playbook Started'), False),
|
||||
(2, 'playbook_on_notify', _('Running Handlers'), False),
|
||||
(2, 'playbook_on_include', _('Including File'), False),
|
||||
(2, 'playbook_on_no_hosts_matched', _('No Hosts Matched'), False),
|
||||
(2, 'playbook_on_no_hosts_remaining', _('No Hosts Remaining'), False),
|
||||
(2, 'playbook_on_task_start', _('Task Started'), False),
|
||||
# Tower does not yet support vars_prompt (and will probably hang :)
|
||||
(1, 'playbook_on_vars_prompt', _('Variables Prompted'), False),
|
||||
(2, 'playbook_on_setup', _('Gathering Facts'), False),
|
||||
(2, 'playbook_on_import_for_host', _('internal: on Import for Host'), False),
|
||||
(2, 'playbook_on_not_import_for_host', _('internal: on Not Import for Host'), False),
|
||||
(1, 'playbook_on_play_start', _('Play Started'), False),
|
||||
(1, 'playbook_on_stats', _('Playbook Complete'), False),
|
||||
|
||||
# Additional event types for captured stdout not directly related to
|
||||
# playbook or runner events.
|
||||
(0, 'debug', _('Debug'), False),
|
||||
(0, 'verbose', _('Verbose'), False),
|
||||
(0, 'deprecated', _('Deprecated'), False),
|
||||
(0, 'warning', _('Warning'), False),
|
||||
(0, 'system_warning', _('System Warning'), False),
|
||||
(0, 'error', _('Error'), True),
|
||||
]
|
||||
FAILED_EVENTS = [x[1] for x in EVENT_TYPES if x[3]]
|
||||
EVENT_CHOICES = [(x[1], x[2]) for x in EVENT_TYPES]
|
||||
LEVEL_FOR_EVENT = dict([(x[1], x[0]) for x in EVENT_TYPES])
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('pk',)
|
||||
index_together = [
|
||||
('job', 'event'),
|
||||
('job', 'uuid'),
|
||||
('job', 'start_line'),
|
||||
('job', 'end_line'),
|
||||
('job', 'parent_uuid'),
|
||||
]
|
||||
|
||||
job = models.ForeignKey(
|
||||
'Job',
|
||||
related_name='job_events',
|
||||
on_delete=models.CASCADE,
|
||||
editable=False,
|
||||
)
|
||||
event = models.CharField(
|
||||
max_length=100,
|
||||
choices=EVENT_CHOICES,
|
||||
)
|
||||
event_data = JSONField(
|
||||
blank=True,
|
||||
default={},
|
||||
)
|
||||
failed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
)
|
||||
changed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
)
|
||||
uuid = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
host = models.ForeignKey(
|
||||
'Host',
|
||||
related_name='job_events_as_primary_host',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
)
|
||||
host_name = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
hosts = models.ManyToManyField(
|
||||
'Host',
|
||||
related_name='job_events',
|
||||
editable=False,
|
||||
)
|
||||
playbook = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
play = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
role = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
task = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
parent = models.ForeignKey(
|
||||
'self',
|
||||
related_name='children',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
)
|
||||
parent_uuid = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
counter = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
stdout = models.TextField(
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
verbosity = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
start_line = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
end_line = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:job_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def __unicode__(self):
|
||||
return u'%s @ %s' % (self.get_event_display2(), self.created.isoformat())
|
||||
|
||||
@property
|
||||
def event_level(self):
|
||||
return self.LEVEL_FOR_EVENT.get(self.event, 0)
|
||||
|
||||
def get_event_display2(self):
|
||||
msg = self.get_event_display()
|
||||
if self.event == 'playbook_on_play_start':
|
||||
if self.play:
|
||||
msg = "%s (%s)" % (msg, self.play)
|
||||
elif self.event == 'playbook_on_task_start':
|
||||
if self.task:
|
||||
if self.event_data.get('is_conditional', False):
|
||||
msg = 'Handler Notified'
|
||||
if self.role:
|
||||
msg = '%s (%s | %s)' % (msg, self.role, self.task)
|
||||
else:
|
||||
msg = "%s (%s)" % (msg, self.task)
|
||||
|
||||
# Change display for runner events trigged by async polling. Some of
|
||||
# these events may not show in most cases, due to filterting them out
|
||||
# of the job event queryset returned to the user.
|
||||
res = self.event_data.get('res', {})
|
||||
# Fix for existing records before we had added the workaround on save
|
||||
# to change async_ok to async_failed.
|
||||
if self.event == 'runner_on_async_ok':
|
||||
try:
|
||||
if res.get('failed', False) or res.get('rc', 0) != 0:
|
||||
msg = 'Host Async Failed'
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
# Runner events with ansible_job_id are part of async starting/polling.
|
||||
if self.event in ('runner_on_ok', 'runner_on_failed'):
|
||||
try:
|
||||
module_name = res['invocation']['module_name']
|
||||
job_id = res['ansible_job_id']
|
||||
except (TypeError, KeyError, AttributeError):
|
||||
module_name = None
|
||||
job_id = None
|
||||
if module_name and job_id:
|
||||
if module_name == 'async_status':
|
||||
msg = 'Host Async Checking'
|
||||
else:
|
||||
msg = 'Host Async Started'
|
||||
# Handle both 1.2 on_failed and 1.3+ on_async_failed events when an
|
||||
# async task times out.
|
||||
if self.event in ('runner_on_failed', 'runner_on_async_failed'):
|
||||
try:
|
||||
if res['msg'] == 'timed out':
|
||||
msg = 'Host Async Timeout'
|
||||
except (TypeError, KeyError, AttributeError):
|
||||
pass
|
||||
return msg
|
||||
|
||||
def _update_from_event_data(self):
|
||||
# Update job event model fields from event data.
|
||||
updated_fields = set()
|
||||
job = self.job
|
||||
verbosity = job.verbosity
|
||||
event_data = self.event_data
|
||||
res = event_data.get('res', None)
|
||||
if self.event in self.FAILED_EVENTS and not event_data.get('ignore_errors', False):
|
||||
self.failed = True
|
||||
updated_fields.add('failed')
|
||||
if isinstance(res, dict):
|
||||
if res.get('changed', False):
|
||||
self.changed = True
|
||||
updated_fields.add('changed')
|
||||
# If we're not in verbose mode, wipe out any module arguments.
|
||||
invocation = res.get('invocation', None)
|
||||
if isinstance(invocation, dict) and verbosity == 0 and 'module_args' in invocation:
|
||||
event_data['res']['invocation']['module_args'] = ''
|
||||
self.event_data = event_data
|
||||
updated_fields.add('event_data')
|
||||
if self.event == 'playbook_on_stats':
|
||||
try:
|
||||
failures_dict = event_data.get('failures', {})
|
||||
dark_dict = event_data.get('dark', {})
|
||||
self.failed = bool(sum(failures_dict.values()) +
|
||||
sum(dark_dict.values()))
|
||||
updated_fields.add('failed')
|
||||
changed_dict = event_data.get('changed', {})
|
||||
self.changed = bool(sum(changed_dict.values()))
|
||||
updated_fields.add('changed')
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
for field in ('playbook', 'play', 'task', 'role', 'host'):
|
||||
value = force_text(event_data.get(field, '')).strip()
|
||||
if field == 'host':
|
||||
field = 'host_name'
|
||||
if value != getattr(self, field):
|
||||
setattr(self, field, value)
|
||||
updated_fields.add(field)
|
||||
return updated_fields
|
||||
|
||||
def _update_parents_failed_and_changed(self):
|
||||
# Update parent events to reflect failed, changed
|
||||
runner_events = JobEvent.objects.filter(job=self.job,
|
||||
event__startswith='runner_on')
|
||||
changed_events = runner_events.filter(changed=True)
|
||||
failed_events = runner_events.filter(failed=True)
|
||||
JobEvent.objects.filter(uuid__in=changed_events.values_list('parent_uuid', flat=True)).update(changed=True)
|
||||
JobEvent.objects.filter(uuid__in=failed_events.values_list('parent_uuid', flat=True)).update(failed=True)
|
||||
|
||||
def _update_hosts(self, extra_host_pks=None):
|
||||
# Update job event hosts m2m from host_name, propagate to parent events.
|
||||
extra_host_pks = set(extra_host_pks or [])
|
||||
hostnames = set()
|
||||
if self.host_name:
|
||||
hostnames.add(self.host_name)
|
||||
if self.event == 'playbook_on_stats':
|
||||
try:
|
||||
for v in self.event_data.values():
|
||||
hostnames.update(v.keys())
|
||||
except AttributeError: # In case event_data or v isn't a dict.
|
||||
pass
|
||||
qs = self.job.inventory.hosts.all()
|
||||
qs = qs.filter(Q(name__in=hostnames) | Q(pk__in=extra_host_pks))
|
||||
qs = qs.exclude(job_events__pk=self.id).only('id')
|
||||
for host in qs:
|
||||
self.hosts.add(host)
|
||||
if self.parent_uuid:
|
||||
parent = JobEvent.objects.filter(uuid=self.parent_uuid)
|
||||
if parent.exists():
|
||||
parent = parent[0]
|
||||
parent._update_hosts(qs.values_list('id', flat=True))
|
||||
|
||||
def _hostnames(self):
|
||||
hostnames = set()
|
||||
try:
|
||||
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
|
||||
hostnames.update(self.event_data.get(stat, {}).keys())
|
||||
except AttributeError: # In case event_data or v isn't a dict.
|
||||
pass
|
||||
return hostnames
|
||||
|
||||
def _update_host_summary_from_stats(self, hostnames):
|
||||
with ignore_inventory_computed_fields():
|
||||
qs = self.job.inventory.hosts.filter(name__in=hostnames)
|
||||
job = self.job
|
||||
for host in hostnames:
|
||||
host_stats = {}
|
||||
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
|
||||
try:
|
||||
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
|
||||
except AttributeError: # in case event_data[stat] isn't a dict.
|
||||
pass
|
||||
if qs.filter(name=host).exists():
|
||||
host_actual = qs.get(name=host)
|
||||
host_summary, created = job.job_host_summaries.get_or_create(host=host_actual, host_name=host_actual.name, defaults=host_stats)
|
||||
else:
|
||||
host_summary, created = job.job_host_summaries.get_or_create(host_name=host, defaults=host_stats)
|
||||
|
||||
if not created:
|
||||
update_fields = []
|
||||
for stat, value in host_stats.items():
|
||||
if getattr(host_summary, stat) != value:
|
||||
setattr(host_summary, stat, value)
|
||||
update_fields.append(stat)
|
||||
if update_fields:
|
||||
host_summary.save(update_fields=update_fields)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
# Update model fields and related objects unless we're only updating
|
||||
# failed/changed flags triggered from a child event.
|
||||
from_parent_update = kwargs.pop('from_parent_update', False)
|
||||
if not from_parent_update:
|
||||
# Update model fields from event data.
|
||||
updated_fields = self._update_from_event_data()
|
||||
for field in updated_fields:
|
||||
if field not in update_fields:
|
||||
update_fields.append(field)
|
||||
# Update host related field from host_name.
|
||||
if not self.host_id and self.host_name:
|
||||
host_qs = self.job.inventory.hosts.filter(name=self.host_name)
|
||||
host_id = host_qs.only('id').values_list('id', flat=True).first()
|
||||
if host_id != self.host_id:
|
||||
self.host_id = host_id
|
||||
if 'host_id' not in update_fields:
|
||||
update_fields.append('host_id')
|
||||
super(JobEvent, self).save(*args, **kwargs)
|
||||
# Update related objects after this event is saved.
|
||||
if not from_parent_update:
|
||||
if getattr(settings, 'CAPTURE_JOB_EVENT_HOSTS', False):
|
||||
self._update_hosts()
|
||||
if self.event == 'playbook_on_stats':
|
||||
self._update_parents_failed_and_changed()
|
||||
|
||||
hostnames = self._hostnames()
|
||||
self._update_host_summary_from_stats(hostnames)
|
||||
self.job.inventory.update_computed_fields()
|
||||
|
||||
emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=self.job.id))
|
||||
|
||||
@classmethod
|
||||
def create_from_data(self, **kwargs):
|
||||
# Must have a job_id specified.
|
||||
if not kwargs.get('job_id', None):
|
||||
return
|
||||
|
||||
# Convert the datetime for the job event's creation appropriately,
|
||||
# and include a time zone for it.
|
||||
#
|
||||
# In the event of any issue, throw it out, and Django will just save
|
||||
# the current time.
|
||||
try:
|
||||
if not isinstance(kwargs['created'], datetime.datetime):
|
||||
kwargs['created'] = parse_datetime(kwargs['created'])
|
||||
if not kwargs['created'].tzinfo:
|
||||
kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
# Sanity check: Don't honor keys that we don't recognize.
|
||||
valid_keys = {'job_id', 'event', 'event_data', 'playbook', 'play',
|
||||
'role', 'task', 'created', 'counter', 'uuid', 'stdout',
|
||||
'parent_uuid', 'start_line', 'end_line', 'verbosity'}
|
||||
for key in kwargs.keys():
|
||||
if key not in valid_keys:
|
||||
kwargs.pop(key)
|
||||
|
||||
event_data = kwargs.get('event_data', None)
|
||||
artifact_dict = None
|
||||
if event_data:
|
||||
artifact_dict = event_data.pop('artifact_data', None)
|
||||
|
||||
job_event = JobEvent.objects.create(**kwargs)
|
||||
|
||||
analytics_logger.info('Job event data saved.', extra=dict(python_objects=dict(job_event=job_event)))
|
||||
|
||||
# Save artifact data to parent job (if provided).
|
||||
if artifact_dict:
|
||||
if event_data and isinstance(event_data, dict):
|
||||
# Note: Core has not added support for marking artifacts as
|
||||
# sensitive yet. Going forward, core will not use
|
||||
# _ansible_no_log to denote sensitive set_stats calls.
|
||||
# Instead, they plan to add a flag outside of the traditional
|
||||
# no_log mechanism. no_log will not work for this feature,
|
||||
# in core, because sensitive data is scrubbed before sending
|
||||
# data to the callback. The playbook_on_stats is the callback
|
||||
# in which the set_stats data is used.
|
||||
|
||||
# Again, the sensitive artifact feature has not yet landed in
|
||||
# core. The below is how we mark artifacts payload as
|
||||
# senstive
|
||||
# artifact_dict['_ansible_no_log'] = True
|
||||
#
|
||||
parent_job = Job.objects.filter(pk=kwargs['job_id']).first()
|
||||
if parent_job and parent_job.artifacts != artifact_dict:
|
||||
parent_job.artifacts = artifact_dict
|
||||
parent_job.save(update_fields=['artifacts'])
|
||||
|
||||
return job_event
|
||||
|
||||
@classmethod
|
||||
def get_startevent_queryset(cls, parent_task, starting_events, ordering=None):
|
||||
'''
|
||||
We need to pull information about each start event.
|
||||
|
||||
This is super tricky, because this table has a one-to-many
|
||||
relationship with itself (parent-child), and we're getting
|
||||
information for an arbitrary number of children. This means we
|
||||
need stats on grandchildren, sorted by child.
|
||||
'''
|
||||
qs = (JobEvent.objects.filter(parent__parent=parent_task,
|
||||
parent__event__in=starting_events)
|
||||
.values('parent__id', 'event', 'changed')
|
||||
.annotate(num=Count('event'))
|
||||
.order_by('parent__id'))
|
||||
if ordering is not None:
|
||||
qs = qs.order_by(ordering)
|
||||
return qs
|
||||
|
||||
|
||||
class SystemJobOptions(BaseModel):
|
||||
'''
|
||||
Common fields for SystemJobTemplate and SystemJob.
|
||||
@@ -1643,6 +1184,10 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
|
||||
def get_ui_url(self):
|
||||
return urljoin(settings.TOWER_URL_BASE, "/#/management_jobs/{}".format(self.pk))
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
return SystemJobEvent
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
return 150
|
||||
|
||||
@@ -1,25 +1,29 @@
|
||||
# Python
|
||||
import os
|
||||
import json
|
||||
from copy import copy
|
||||
from copy import copy, deepcopy
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import models
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.contrib.auth.models import User # noqa
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import prevent_search
|
||||
from awx.main.models.rbac import (
|
||||
Role, RoleAncestorEntry, get_roles_on_resource
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json
|
||||
from awx.main.utils import parse_yaml_or_json, get_custom_venv_choices
|
||||
from awx.main.utils.encryption import decrypt_value, get_encryption_key, is_encrypted
|
||||
from awx.main.fields import JSONField, AskForField
|
||||
|
||||
|
||||
__all__ = ['ResourceMixin', 'SurveyJobTemplateMixin', 'SurveyJobMixin',
|
||||
'TaskManagerUnifiedJobMixin', 'TaskManagerJobMixin', 'TaskManagerProjectUpdateMixin',
|
||||
'TaskManagerInventoryUpdateMixin',]
|
||||
'TaskManagerInventoryUpdateMixin', 'CustomVirtualEnvMixin']
|
||||
|
||||
|
||||
class ResourceMixin(models.Model):
|
||||
@@ -141,21 +145,27 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
else:
|
||||
runtime_extra_vars = {}
|
||||
|
||||
# Overwrite with job template extra vars with survey default vars
|
||||
# Overwrite job template extra vars with survey default vars
|
||||
if self.survey_enabled and 'spec' in self.survey_spec:
|
||||
for survey_element in self.survey_spec.get("spec", []):
|
||||
default = survey_element.get('default')
|
||||
variable_key = survey_element.get('variable')
|
||||
|
||||
if survey_element.get('type') == 'password':
|
||||
if variable_key in runtime_extra_vars and default:
|
||||
if variable_key in runtime_extra_vars:
|
||||
kw_value = runtime_extra_vars[variable_key]
|
||||
if kw_value.startswith('$encrypted$') and kw_value != default:
|
||||
runtime_extra_vars[variable_key] = default
|
||||
if kw_value == '$encrypted$':
|
||||
runtime_extra_vars.pop(variable_key)
|
||||
|
||||
if default is not None:
|
||||
data = {variable_key: default}
|
||||
errors = self._survey_element_validation(survey_element, data)
|
||||
decrypted_default = default
|
||||
if (
|
||||
survey_element['type'] == "password" and
|
||||
isinstance(decrypted_default, basestring) and
|
||||
decrypted_default.startswith('$encrypted$')
|
||||
):
|
||||
decrypted_default = decrypt_value(get_encryption_key('value', pk=None), decrypted_default)
|
||||
errors = self._survey_element_validation(survey_element, {variable_key: decrypted_default})
|
||||
if not errors:
|
||||
survey_defaults[variable_key] = default
|
||||
extra_vars.update(survey_defaults)
|
||||
@@ -166,10 +176,25 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
create_kwargs['extra_vars'] = json.dumps(extra_vars)
|
||||
return create_kwargs
|
||||
|
||||
def _survey_element_validation(self, survey_element, data):
|
||||
def _survey_element_validation(self, survey_element, data, validate_required=True):
|
||||
# Don't apply validation to the `$encrypted$` placeholder; the decrypted
|
||||
# default (if any) will be validated against instead
|
||||
errors = []
|
||||
|
||||
if (survey_element['type'] == "password"):
|
||||
password_value = data.get(survey_element['variable'])
|
||||
if (
|
||||
isinstance(password_value, basestring) and
|
||||
password_value == '$encrypted$'
|
||||
):
|
||||
if survey_element.get('default') is None and survey_element['required']:
|
||||
if validate_required:
|
||||
errors.append("'%s' value missing" % survey_element['variable'])
|
||||
return errors
|
||||
|
||||
if survey_element['variable'] not in data and survey_element['required']:
|
||||
errors.append("'%s' value missing" % survey_element['variable'])
|
||||
if validate_required:
|
||||
errors.append("'%s' value missing" % survey_element['variable'])
|
||||
elif survey_element['type'] in ["textarea", "text", "password"]:
|
||||
if survey_element['variable'] in data:
|
||||
if type(data[survey_element['variable']]) not in (str, unicode):
|
||||
@@ -233,7 +258,7 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
choice_list))
|
||||
return errors
|
||||
|
||||
def _accept_or_ignore_variables(self, data, errors=None, _exclude_errors=()):
|
||||
def _accept_or_ignore_variables(self, data, errors=None, _exclude_errors=(), extra_passwords=None):
|
||||
survey_is_enabled = (self.survey_enabled and self.survey_spec)
|
||||
extra_vars = data.copy()
|
||||
if errors is None:
|
||||
@@ -245,8 +270,16 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
# Check for data violation of survey rules
|
||||
survey_errors = []
|
||||
for survey_element in self.survey_spec.get("spec", []):
|
||||
element_errors = self._survey_element_validation(survey_element, data)
|
||||
key = survey_element.get('variable', None)
|
||||
value = data.get(key, None)
|
||||
validate_required = 'required' not in _exclude_errors
|
||||
if extra_passwords and key in extra_passwords and is_encrypted(value):
|
||||
element_errors = self._survey_element_validation(survey_element, {
|
||||
key: decrypt_value(get_encryption_key('value', pk=None), value)
|
||||
}, validate_required=validate_required)
|
||||
else:
|
||||
element_errors = self._survey_element_validation(
|
||||
survey_element, data, validate_required=validate_required)
|
||||
|
||||
if element_errors:
|
||||
survey_errors += element_errors
|
||||
@@ -267,11 +300,46 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
rejected.update(extra_vars)
|
||||
# ignored variables does not block manual launch
|
||||
if 'prompts' not in _exclude_errors:
|
||||
errors['extra_vars'] = [_('Variables {list_of_keys} are not allowed on launch.').format(
|
||||
errors['extra_vars'] = [_('Variables {list_of_keys} are not allowed on launch. Check the Prompt on Launch setting '+
|
||||
'on the Job Template to include Extra Variables.').format(
|
||||
list_of_keys=', '.join(extra_vars.keys()))]
|
||||
|
||||
return (accepted, rejected, errors)
|
||||
|
||||
@staticmethod
|
||||
def pivot_spec(spec):
|
||||
'''
|
||||
Utility method that will return a dictionary keyed off variable names
|
||||
'''
|
||||
pivoted = {}
|
||||
for element_data in spec.get('spec', []):
|
||||
if 'variable' in element_data:
|
||||
pivoted[element_data['variable']] = element_data
|
||||
return pivoted
|
||||
|
||||
def survey_variable_validation(self, data):
|
||||
errors = []
|
||||
if not self.survey_enabled:
|
||||
return errors
|
||||
if 'name' not in self.survey_spec:
|
||||
errors.append("'name' missing from survey spec.")
|
||||
if 'description' not in self.survey_spec:
|
||||
errors.append("'description' missing from survey spec.")
|
||||
for survey_element in self.survey_spec.get("spec", []):
|
||||
errors += self._survey_element_validation(survey_element, data)
|
||||
return errors
|
||||
|
||||
def display_survey_spec(self):
|
||||
'''
|
||||
Hide encrypted default passwords in survey specs
|
||||
'''
|
||||
survey_spec = deepcopy(self.survey_spec) if self.survey_spec else {}
|
||||
for field in survey_spec.get('spec', []):
|
||||
if field.get('type') == 'password':
|
||||
if 'default' in field and field['default']:
|
||||
field['default'] = '$encrypted$'
|
||||
return survey_spec
|
||||
|
||||
|
||||
class SurveyJobMixin(models.Model):
|
||||
class Meta:
|
||||
@@ -296,6 +364,20 @@ class SurveyJobMixin(models.Model):
|
||||
else:
|
||||
return self.extra_vars
|
||||
|
||||
def decrypted_extra_vars(self):
|
||||
'''
|
||||
Decrypts fields marked as passwords in survey.
|
||||
'''
|
||||
if self.survey_passwords:
|
||||
extra_vars = json.loads(self.extra_vars)
|
||||
for key in self.survey_passwords:
|
||||
value = extra_vars.get(key)
|
||||
if value and isinstance(value, basestring) and value.startswith('$encrypted$'):
|
||||
extra_vars[key] = decrypt_value(get_encryption_key('value', pk=None), value)
|
||||
return json.dumps(extra_vars)
|
||||
else:
|
||||
return self.extra_vars
|
||||
|
||||
|
||||
class TaskManagerUnifiedJobMixin(models.Model):
|
||||
class Meta:
|
||||
@@ -312,6 +394,9 @@ class TaskManagerJobMixin(TaskManagerUnifiedJobMixin):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
def get_jobs_fail_chain(self):
|
||||
return [self.project_update] if self.project_update else []
|
||||
|
||||
def dependent_jobs_finished(self):
|
||||
for j in self.dependent_jobs.all():
|
||||
if j.status in ['pending', 'waiting', 'running']:
|
||||
@@ -335,3 +420,23 @@ class TaskManagerProjectUpdateMixin(TaskManagerUpdateOnLaunchMixin):
|
||||
class TaskManagerInventoryUpdateMixin(TaskManagerUpdateOnLaunchMixin):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
|
||||
class CustomVirtualEnvMixin(models.Model):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
custom_virtualenv = models.CharField(
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
max_length=100
|
||||
)
|
||||
|
||||
def clean_custom_virtualenv(self):
|
||||
value = self.custom_virtualenv
|
||||
if value and os.path.join(value, '') not in get_custom_venv_choices():
|
||||
raise ValidationError(
|
||||
_('{} is not a valid virtualenv in {}').format(value, settings.BASE_VENV_PATH)
|
||||
)
|
||||
return os.path.join(value or '', '')
|
||||
|
||||
@@ -22,12 +22,12 @@ from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
from awx.main.models.mixins import ResourceMixin
|
||||
from awx.main.models.mixins import ResourceMixin, CustomVirtualEnvMixin
|
||||
|
||||
__all__ = ['Organization', 'Team', 'Profile', 'AuthToken']
|
||||
|
||||
|
||||
class Organization(CommonModel, NotificationFieldsModel, ResourceMixin):
|
||||
class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVirtualEnvMixin):
|
||||
'''
|
||||
An organization is the basic unit of multi-tenancy divisions
|
||||
'''
|
||||
|
||||
@@ -18,12 +18,13 @@ from django.utils.timezone import now, make_aware, get_default_timezone
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.events import ProjectUpdateEvent
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.models.unified_jobs import * # noqa
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerProjectUpdateMixin
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerProjectUpdateMixin, CustomVirtualEnvMixin
|
||||
from awx.main.utils import update_scm_url
|
||||
from awx.main.utils.ansible import skip_directory, could_be_inventory, could_be_playbook
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
@@ -222,7 +223,7 @@ class ProjectOptions(models.Model):
|
||||
return proj_path + '.lock'
|
||||
|
||||
|
||||
class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
|
||||
class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEnvMixin):
|
||||
'''
|
||||
A project represents a playbook git repo that can access a set of inventories
|
||||
'''
|
||||
@@ -306,9 +307,9 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return ['name', 'description', 'local_path', 'scm_type', 'scm_url',
|
||||
'scm_branch', 'scm_clean', 'scm_delete_on_update',
|
||||
'credential', 'schedule', 'timeout',]
|
||||
return set(f.name for f in ProjectOptions._meta.fields) | set(
|
||||
['name', 'description', 'schedule']
|
||||
)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
new_instance = not bool(self.pk)
|
||||
@@ -485,6 +486,10 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
websocket_data.update(dict(project_id=self.project.id))
|
||||
return websocket_data
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
return ProjectUpdateEvent
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
return 0 if self.job_type == 'run' else 20
|
||||
|
||||
@@ -5,11 +5,12 @@ import re
|
||||
import logging
|
||||
import datetime
|
||||
import dateutil.rrule
|
||||
from dateutil.tz import gettz, datetime_exists
|
||||
|
||||
# Django
|
||||
from django.db import models
|
||||
from django.db.models.query import QuerySet
|
||||
from django.utils.timezone import now, make_aware, get_default_timezone
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# AWX
|
||||
@@ -19,6 +20,9 @@ from awx.main.models.jobs import LaunchTimeConfig
|
||||
from awx.main.utils import ignore_inventory_computed_fields
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
|
||||
import pytz
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.models.schedule')
|
||||
|
||||
__all__ = ['Schedule']
|
||||
@@ -53,6 +57,10 @@ class ScheduleManager(ScheduleFilterMethods, models.Manager):
|
||||
|
||||
class Schedule(CommonModel, LaunchTimeConfig):
|
||||
|
||||
TZID_REGEX = re.compile(
|
||||
"^(DTSTART;TZID=(?P<tzid>[^:]+)(?P<stamp>\:[0-9]+T[0-9]+))(?P<rrule> .*)$"
|
||||
)
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ['-next_run']
|
||||
@@ -91,6 +99,67 @@ class Schedule(CommonModel, LaunchTimeConfig):
|
||||
help_text=_("The next time that the scheduled action will run.")
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def rrulestr(cls, rrule, **kwargs):
|
||||
"""
|
||||
Apply our own custom rrule parsing logic to support TZID=
|
||||
|
||||
python-dateutil doesn't _natively_ support `DTSTART;TZID=`; this
|
||||
function parses out the TZID= component and uses it to produce the
|
||||
`tzinfos` keyword argument to `dateutil.rrule.rrulestr()`. In this
|
||||
way, we translate:
|
||||
|
||||
DTSTART;TZID=America/New_York:20180601T120000 RRULE:FREQ=DAILY;INTERVAL=1
|
||||
|
||||
...into...
|
||||
|
||||
DTSTART:20180601T120000TZID RRULE:FREQ=DAILY;INTERVAL=1
|
||||
|
||||
...and we pass a hint about the local timezone to dateutil's parser:
|
||||
`dateutil.rrule.rrulestr(rrule, {
|
||||
'tzinfos': {
|
||||
'TZID': dateutil.tz.gettz('America/New_York')
|
||||
}
|
||||
})`
|
||||
|
||||
it's likely that we can remove the custom code that performs this
|
||||
parsing if TZID= gains support in upstream dateutil:
|
||||
https://github.com/dateutil/dateutil/pull/619
|
||||
"""
|
||||
kwargs['forceset'] = True
|
||||
kwargs['tzinfos'] = {x: dateutil.tz.tzutc() for x in dateutil.parser.parserinfo().UTCZONE}
|
||||
match = cls.TZID_REGEX.match(rrule)
|
||||
if match is not None:
|
||||
rrule = cls.TZID_REGEX.sub("DTSTART\g<stamp>TZI\g<rrule>", rrule)
|
||||
timezone = gettz(match.group('tzid'))
|
||||
kwargs['tzinfos']['TZI'] = timezone
|
||||
x = dateutil.rrule.rrulestr(rrule, **kwargs)
|
||||
|
||||
for r in x._rrule:
|
||||
if r._dtstart and r._until:
|
||||
if all((
|
||||
r._dtstart.tzinfo != dateutil.tz.tzlocal(),
|
||||
r._until.tzinfo != dateutil.tz.tzutc(),
|
||||
)):
|
||||
# According to RFC5545 Section 3.3.10:
|
||||
# https://tools.ietf.org/html/rfc5545#section-3.3.10
|
||||
#
|
||||
# > If the "DTSTART" property is specified as a date with UTC
|
||||
# > time or a date with local time and time zone reference,
|
||||
# > then the UNTIL rule part MUST be specified as a date with
|
||||
# > UTC time.
|
||||
raise ValueError('RRULE UNTIL values must be specified in UTC')
|
||||
|
||||
try:
|
||||
first_event = x[0]
|
||||
if first_event < now() - datetime.timedelta(days=365 * 5):
|
||||
# For older DTSTART values, if there are more than 1000 recurrences...
|
||||
if len(x[:1001]) > 1000:
|
||||
raise ValueError('RRULE values that yield more than 1000 events are not allowed.')
|
||||
except IndexError:
|
||||
pass
|
||||
return x
|
||||
|
||||
def __unicode__(self):
|
||||
return u'%s_t%s_%s_%s' % (self.name, self.unified_job_template.id, self.id, self.next_run)
|
||||
|
||||
@@ -106,21 +175,26 @@ class Schedule(CommonModel, LaunchTimeConfig):
|
||||
return job_kwargs
|
||||
|
||||
def update_computed_fields(self):
|
||||
future_rs = dateutil.rrule.rrulestr(self.rrule, forceset=True)
|
||||
future_rs = Schedule.rrulestr(self.rrule)
|
||||
next_run_actual = future_rs.after(now())
|
||||
|
||||
if next_run_actual is not None:
|
||||
if not datetime_exists(next_run_actual):
|
||||
# skip imaginary dates, like 2:30 on DST boundaries
|
||||
next_run_actual = future_rs.after(next_run_actual)
|
||||
next_run_actual = next_run_actual.astimezone(pytz.utc)
|
||||
|
||||
self.next_run = next_run_actual
|
||||
try:
|
||||
self.dtstart = future_rs[0]
|
||||
self.dtstart = future_rs[0].astimezone(pytz.utc)
|
||||
except IndexError:
|
||||
self.dtstart = None
|
||||
self.dtend = None
|
||||
if 'until' in self.rrule.lower():
|
||||
match_until = re.match(".*?(UNTIL\=[0-9]+T[0-9]+Z)", self.rrule)
|
||||
until_date = match_until.groups()[0].split("=")[1]
|
||||
self.dtend = make_aware(datetime.datetime.strptime(until_date, "%Y%m%dT%H%M%SZ"), get_default_timezone())
|
||||
if 'count' in self.rrule.lower():
|
||||
self.dtend = future_rs[-1]
|
||||
if 'until' in self.rrule.lower() or 'count' in self.rrule.lower():
|
||||
try:
|
||||
self.dtend = future_rs[-1].astimezone(pytz.utc)
|
||||
except IndexError:
|
||||
self.dtend = None
|
||||
emit_channel_notification('schedules-changed', dict(id=self.id, group_name='schedules'))
|
||||
with ignore_inventory_computed_fields():
|
||||
self.unified_job_template.update_computed_fields()
|
||||
|
||||
@@ -2,14 +2,14 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import codecs
|
||||
from StringIO import StringIO
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import subprocess
|
||||
import tempfile
|
||||
from collections import OrderedDict
|
||||
from StringIO import StringIO
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -34,7 +34,7 @@ from django_celery_results.models import TaskResult
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin
|
||||
from awx.main.utils import (
|
||||
decrypt_field, _inventory_updates,
|
||||
encrypt_dict, decrypt_field, _inventory_updates,
|
||||
copy_model_by_class, copy_m2m_relationships,
|
||||
get_type_for_model, parse_yaml_or_json
|
||||
)
|
||||
@@ -42,7 +42,7 @@ from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.fields import JSONField, AskForField
|
||||
|
||||
__all__ = ['UnifiedJobTemplate', 'UnifiedJob']
|
||||
__all__ = ['UnifiedJobTemplate', 'UnifiedJob', 'StdoutMaxBytesExceeded']
|
||||
|
||||
logger = logging.getLogger('awx.main.models.unified_jobs')
|
||||
|
||||
@@ -345,11 +345,18 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
'''
|
||||
new_job_passwords = kwargs.pop('survey_passwords', {})
|
||||
eager_fields = kwargs.pop('_eager_fields', None)
|
||||
|
||||
# automatically encrypt survey fields
|
||||
if hasattr(self, 'survey_spec') and getattr(self, 'survey_enabled', False):
|
||||
password_list = self.survey_password_variables()
|
||||
encrypt_dict(kwargs.get('extra_vars', {}), password_list)
|
||||
|
||||
unified_job_class = self._get_unified_job_class()
|
||||
fields = self._get_unified_job_field_names()
|
||||
unallowed_fields = set(kwargs.keys()) - set(fields)
|
||||
if unallowed_fields:
|
||||
raise Exception('Fields {} are not allowed as overrides.'.format(unallowed_fields))
|
||||
logger.warn('Fields {} are not allowed as overrides.'.format(unallowed_fields))
|
||||
map(kwargs.pop, unallowed_fields)
|
||||
|
||||
unified_job = copy_model_by_class(self, unified_job_class, fields, kwargs)
|
||||
|
||||
@@ -435,7 +442,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
errors[field_name] = [_("Field is not allowed on launch.")]
|
||||
return ({}, kwargs, errors)
|
||||
|
||||
def accept_or_ignore_variables(self, data, errors=None, _exclude_errors=()):
|
||||
def accept_or_ignore_variables(self, data, errors=None, _exclude_errors=(), extra_passwords=None):
|
||||
'''
|
||||
If subclasses accept any `variables` or `extra_vars`, they should
|
||||
define _accept_or_ignore_variables to place those variables in the accepted dict,
|
||||
@@ -453,7 +460,11 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
# SurveyJobTemplateMixin cannot override any methods because of
|
||||
# resolution order, forced by how metaclass processes fields,
|
||||
# thus the need for hasattr check
|
||||
return self._accept_or_ignore_variables(data, errors, _exclude_errors=_exclude_errors)
|
||||
if extra_passwords:
|
||||
return self._accept_or_ignore_variables(
|
||||
data, errors, _exclude_errors=_exclude_errors, extra_passwords=extra_passwords)
|
||||
else:
|
||||
return self._accept_or_ignore_variables(data, errors, _exclude_errors=_exclude_errors)
|
||||
elif data:
|
||||
errors['extra_vars'] = [
|
||||
_('Variables {list_of_keys} provided, but this template cannot accept variables.'.format(
|
||||
@@ -504,6 +515,13 @@ class UnifiedJobDeprecatedStdout(models.Model):
|
||||
)
|
||||
|
||||
|
||||
class StdoutMaxBytesExceeded(Exception):
|
||||
|
||||
def __init__(self, total, supported):
|
||||
self.total = total
|
||||
self.supported = supported
|
||||
|
||||
|
||||
class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique, UnifiedJobTypeStringMixin, TaskManagerUnifiedJobMixin):
|
||||
'''
|
||||
Concrete base class for unified job run by the task engine.
|
||||
@@ -632,11 +650,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
default='',
|
||||
editable=False,
|
||||
))
|
||||
result_stdout_file = models.TextField( # FilePathfield?
|
||||
blank=True,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
result_traceback = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
@@ -812,14 +825,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
# Done.
|
||||
return result
|
||||
|
||||
def delete(self):
|
||||
if self.result_stdout_file != "":
|
||||
try:
|
||||
os.remove(self.result_stdout_file)
|
||||
except Exception:
|
||||
pass
|
||||
super(UnifiedJob, self).delete()
|
||||
|
||||
def copy_unified_job(self, limit=None):
|
||||
'''
|
||||
Returns saved object, including related fields.
|
||||
@@ -828,7 +833,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
unified_job_class = self.__class__
|
||||
unified_jt_class = self._get_unified_job_template_class()
|
||||
parent_field_name = unified_job_class._get_parent_field_name()
|
||||
fields = unified_jt_class._get_unified_job_field_names() + [parent_field_name]
|
||||
fields = unified_jt_class._get_unified_job_field_names() | set([parent_field_name])
|
||||
|
||||
create_data = {"launch_type": "relaunch"}
|
||||
if limit:
|
||||
@@ -889,6 +894,10 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
config.credentials.add(*job_creds)
|
||||
return config
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def result_stdout_text(self):
|
||||
related = UnifiedJobDeprecatedStdout.objects.get(pk=self.pk)
|
||||
@@ -902,36 +911,100 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
related.result_stdout_text = value
|
||||
related.save()
|
||||
|
||||
def result_stdout_raw_handle(self, attempt=0):
|
||||
"""Return a file-like object containing the standard out of the
|
||||
job's result.
|
||||
def result_stdout_raw_handle(self, enforce_max_bytes=True):
|
||||
"""
|
||||
msg = {
|
||||
'pending': 'Waiting for results...',
|
||||
'missing': 'stdout capture is missing',
|
||||
}
|
||||
if self.result_stdout_text:
|
||||
return StringIO(self.result_stdout_text)
|
||||
else:
|
||||
if not os.path.exists(self.result_stdout_file) or os.stat(self.result_stdout_file).st_size < 1:
|
||||
return StringIO(msg['missing' if self.finished else 'pending'])
|
||||
This method returns a file-like object ready to be read which contains
|
||||
all stdout for the UnifiedJob.
|
||||
|
||||
# There is a potential timing issue here, because another
|
||||
# process may be deleting the stdout file after it is written
|
||||
# to the database.
|
||||
#
|
||||
# Therefore, if we get an IOError (which generally means the
|
||||
# file does not exist), reload info from the database and
|
||||
# try again.
|
||||
try:
|
||||
return codecs.open(self.result_stdout_file, "r",
|
||||
encoding='utf-8')
|
||||
except IOError:
|
||||
if attempt < 3:
|
||||
self.result_stdout_text = type(self).objects.get(id=self.id).result_stdout_text
|
||||
return self.result_stdout_raw_handle(attempt=attempt + 1)
|
||||
If the size of the file is greater than
|
||||
`settings.STDOUT_MAX_BYTES_DISPLAY`, a StdoutMaxBytesExceeded exception
|
||||
will be raised.
|
||||
"""
|
||||
max_supported = settings.STDOUT_MAX_BYTES_DISPLAY
|
||||
|
||||
if enforce_max_bytes:
|
||||
# If enforce_max_bytes is True, we're not grabbing the whole file,
|
||||
# just the first <settings.STDOUT_MAX_BYTES_DISPLAY> bytes;
|
||||
# in this scenario, it's probably safe to use a StringIO.
|
||||
fd = StringIO()
|
||||
else:
|
||||
# If enforce_max_bytes = False, that means they're downloading
|
||||
# the entire file. To avoid ballooning memory, let's write the
|
||||
# stdout content to a temporary disk location
|
||||
if not os.path.exists(settings.JOBOUTPUT_ROOT):
|
||||
os.makedirs(settings.JOBOUTPUT_ROOT)
|
||||
fd = tempfile.NamedTemporaryFile(
|
||||
prefix='{}-{}-'.format(self.model_to_str(), self.pk),
|
||||
suffix='.out',
|
||||
dir=settings.JOBOUTPUT_ROOT
|
||||
)
|
||||
|
||||
# Before the addition of event-based stdout, older versions of
|
||||
# awx stored stdout as raw text blobs in a certain database column
|
||||
# (`main_unifiedjob.result_stdout_text`)
|
||||
# For older installs, this data still exists in the database; check for
|
||||
# it and use if it exists
|
||||
legacy_stdout_text = self.result_stdout_text
|
||||
if legacy_stdout_text:
|
||||
if enforce_max_bytes and len(legacy_stdout_text) > max_supported:
|
||||
raise StdoutMaxBytesExceeded(len(legacy_stdout_text), max_supported)
|
||||
fd.write(legacy_stdout_text)
|
||||
if hasattr(fd, 'name'):
|
||||
fd.flush()
|
||||
return open(fd.name, 'r')
|
||||
else:
|
||||
# we just wrote to this StringIO, so rewind it
|
||||
fd.seek(0)
|
||||
return fd
|
||||
else:
|
||||
# Note: the code in this block _intentionally_ does not use the
|
||||
# Django ORM because of the potential size (many MB+) of
|
||||
# `main_jobevent.stdout`; we *do not* want to generate queries
|
||||
# here that construct model objects by fetching large gobs of
|
||||
# data (and potentially ballooning memory usage); instead, we
|
||||
# just want to write concatenated values of a certain column
|
||||
# (`stdout`) directly to a file
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
tablename = self._meta.db_table
|
||||
related_name = {
|
||||
'main_job': 'job_id',
|
||||
'main_adhoccommand': 'ad_hoc_command_id',
|
||||
'main_projectupdate': 'project_update_id',
|
||||
'main_inventoryupdate': 'inventory_update_id',
|
||||
'main_systemjob': 'system_job_id',
|
||||
}[tablename]
|
||||
|
||||
if enforce_max_bytes:
|
||||
# detect the length of all stdout for this UnifiedJob, and
|
||||
# if it exceeds settings.STDOUT_MAX_BYTES_DISPLAY bytes,
|
||||
# don't bother actually fetching the data
|
||||
total = self.event_class.objects.filter(**{related_name: self.id}).aggregate(
|
||||
total=models.Sum(models.Func(models.F('stdout'), function='LENGTH'))
|
||||
)['total']
|
||||
if total > max_supported:
|
||||
raise StdoutMaxBytesExceeded(total, max_supported)
|
||||
|
||||
cursor.copy_expert(
|
||||
"copy (select stdout from {} where {}={} order by start_line) to stdout".format(
|
||||
tablename + 'event',
|
||||
related_name,
|
||||
self.id
|
||||
),
|
||||
fd
|
||||
)
|
||||
|
||||
if hasattr(fd, 'name'):
|
||||
# If we're dealing with a physical file, use `sed` to clean
|
||||
# up escaped line sequences
|
||||
fd.flush()
|
||||
subprocess.Popen("sed -i 's/\\\\r\\\\n/\\n/g' {}".format(fd.name), shell=True).wait()
|
||||
return open(fd.name, 'r')
|
||||
else:
|
||||
return StringIO(msg['missing' if self.finished else 'pending'])
|
||||
# If we're dealing with an in-memory string buffer, use
|
||||
# string.replace()
|
||||
fd = StringIO(fd.getvalue().replace('\\r\\n', '\n'))
|
||||
return fd
|
||||
|
||||
def _escape_ascii(self, content):
|
||||
# Remove ANSI escape sequences used to embed event data.
|
||||
@@ -941,7 +1014,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
return content
|
||||
|
||||
def _result_stdout_raw(self, redact_sensitive=False, escape_ascii=False):
|
||||
content = self.result_stdout_raw_handle().read()
|
||||
content = self.result_stdout_raw_handle().read().decode('utf-8')
|
||||
if redact_sensitive:
|
||||
content = UriCleaner.remove_sensitive(content)
|
||||
if escape_ascii:
|
||||
@@ -956,21 +1029,14 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
def result_stdout(self):
|
||||
return self._result_stdout_raw(escape_ascii=True)
|
||||
|
||||
@property
|
||||
def result_stdout_size(self):
|
||||
try:
|
||||
return os.stat(self.result_stdout_file).st_size
|
||||
except Exception:
|
||||
return len(self.result_stdout)
|
||||
|
||||
def _result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True, escape_ascii=False):
|
||||
return_buffer = u""
|
||||
return_buffer = StringIO()
|
||||
if end_line is not None:
|
||||
end_line = int(end_line)
|
||||
stdout_lines = self.result_stdout_raw_handle().readlines()
|
||||
absolute_end = len(stdout_lines)
|
||||
for line in stdout_lines[int(start_line):end_line]:
|
||||
return_buffer += line
|
||||
return_buffer.write(line)
|
||||
if int(start_line) < 0:
|
||||
start_actual = len(stdout_lines) + int(start_line)
|
||||
end_actual = len(stdout_lines)
|
||||
@@ -981,6 +1047,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
else:
|
||||
end_actual = len(stdout_lines)
|
||||
|
||||
return_buffer = return_buffer.getvalue().decode('utf-8')
|
||||
if redact_sensitive:
|
||||
return_buffer = UriCleaner.remove_sensitive(return_buffer)
|
||||
if escape_ascii:
|
||||
@@ -1052,6 +1119,8 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
def can_schedule(self):
|
||||
if getattr(self, 'passwords_needed_to_start', None):
|
||||
return False
|
||||
if getattr(self, 'inventory', None) is None:
|
||||
return False
|
||||
JobLaunchConfig = self._meta.get_field('launch_config').related_model
|
||||
try:
|
||||
self.launch_config
|
||||
|
||||
@@ -316,15 +316,16 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return ['name', 'description', 'extra_vars', 'labels', 'survey_passwords',
|
||||
'schedule', 'launch_type', 'allow_simultaneous']
|
||||
return set(f.name for f in WorkflowJobOptions._meta.fields) | set(
|
||||
['name', 'description', 'schedule', 'survey_passwords', 'labels']
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _get_unified_jt_copy_names(cls):
|
||||
base_list = super(WorkflowJobTemplate, cls)._get_unified_jt_copy_names()
|
||||
base_list.remove('labels')
|
||||
return (base_list +
|
||||
['survey_spec', 'survey_enabled', 'ask_variables_on_launch', 'organization'])
|
||||
return (base_list |
|
||||
set(['survey_spec', 'survey_enabled', 'ask_variables_on_launch', 'organization']))
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:workflow_job_template_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@@ -24,7 +24,9 @@ from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_gr
|
||||
from awx.main.tasks import update_inventory_computed_fields
|
||||
from awx.main.fields import is_implicit_parent
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main import consumers
|
||||
|
||||
from awx.conf.utils import conf_to_dict
|
||||
|
||||
__all__ = []
|
||||
|
||||
@@ -41,20 +43,35 @@ def get_current_user_or_none():
|
||||
return u
|
||||
|
||||
|
||||
def emit_job_event_detail(sender, **kwargs):
|
||||
def emit_event_detail(serializer, relation, **kwargs):
|
||||
instance = kwargs['instance']
|
||||
created = kwargs['created']
|
||||
if created:
|
||||
event_serialized = JobEventWebSocketSerializer(instance).data
|
||||
emit_channel_notification('job_events-' + str(instance.job.id), event_serialized)
|
||||
event_serializer = serializer(instance)
|
||||
consumers.emit_channel_notification(
|
||||
'-'.join([event_serializer.get_group_name(instance), str(getattr(instance, relation))]),
|
||||
event_serializer.data
|
||||
)
|
||||
|
||||
|
||||
def emit_job_event_detail(sender, **kwargs):
|
||||
emit_event_detail(JobEventWebSocketSerializer, 'job_id', **kwargs)
|
||||
|
||||
|
||||
def emit_ad_hoc_command_event_detail(sender, **kwargs):
|
||||
instance = kwargs['instance']
|
||||
created = kwargs['created']
|
||||
if created:
|
||||
event_serialized = AdHocCommandEventWebSocketSerializer(instance).data
|
||||
emit_channel_notification('ad_hoc_command_events-' + str(instance.ad_hoc_command_id), event_serialized)
|
||||
emit_event_detail(AdHocCommandEventWebSocketSerializer, 'ad_hoc_command_id', **kwargs)
|
||||
|
||||
|
||||
def emit_project_update_event_detail(sender, **kwargs):
|
||||
emit_event_detail(ProjectUpdateEventWebSocketSerializer, 'project_update_id', **kwargs)
|
||||
|
||||
|
||||
def emit_inventory_update_event_detail(sender, **kwargs):
|
||||
emit_event_detail(InventoryUpdateEventWebSocketSerializer, 'inventory_update_id', **kwargs)
|
||||
|
||||
|
||||
def emit_system_job_event_detail(sender, **kwargs):
|
||||
emit_event_detail(SystemJobEventWebSocketSerializer, 'system_job_id', **kwargs)
|
||||
|
||||
|
||||
def emit_update_inventory_computed_fields(sender, **kwargs):
|
||||
@@ -220,6 +237,9 @@ connect_computed_field_signals()
|
||||
|
||||
post_save.connect(emit_job_event_detail, sender=JobEvent)
|
||||
post_save.connect(emit_ad_hoc_command_event_detail, sender=AdHocCommandEvent)
|
||||
post_save.connect(emit_project_update_event_detail, sender=ProjectUpdateEvent)
|
||||
post_save.connect(emit_inventory_update_event_detail, sender=InventoryUpdateEvent)
|
||||
post_save.connect(emit_system_job_event_detail, sender=SystemJobEvent)
|
||||
m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through)
|
||||
m2m_changed.connect(org_admin_edit_members, Role.members.through)
|
||||
m2m_changed.connect(rbac_activity_stream, Role.members.through)
|
||||
@@ -284,7 +304,12 @@ def _update_host_last_jhs(host):
|
||||
except IndexError:
|
||||
jhs = None
|
||||
update_fields = []
|
||||
last_job = jhs.job if jhs else None
|
||||
try:
|
||||
last_job = jhs.job if jhs else None
|
||||
except Job.DoesNotExist:
|
||||
# The job (and its summaries) have already been/are currently being
|
||||
# deleted, so there's no need to update the host w/ a reference to it
|
||||
return
|
||||
if host.last_job != last_job:
|
||||
host.last_job = last_job
|
||||
update_fields.append('last_job')
|
||||
@@ -392,12 +417,15 @@ def activity_stream_create(sender, instance, created, **kwargs):
|
||||
object1=object1,
|
||||
changes=json.dumps(changes),
|
||||
actor=get_current_user_or_none())
|
||||
activity_entry.save()
|
||||
#TODO: Weird situation where cascade SETNULL doesn't work
|
||||
# it might actually be a good idea to remove all of these FK references since
|
||||
# we don't really use them anyway.
|
||||
if instance._meta.model_name != 'setting': # Is not conf.Setting instance
|
||||
activity_entry.save()
|
||||
getattr(activity_entry, object1).add(instance)
|
||||
else:
|
||||
activity_entry.setting = conf_to_dict(instance)
|
||||
activity_entry.save()
|
||||
|
||||
|
||||
def activity_stream_update(sender, instance, **kwargs):
|
||||
@@ -423,9 +451,12 @@ def activity_stream_update(sender, instance, **kwargs):
|
||||
object1=object1,
|
||||
changes=json.dumps(changes),
|
||||
actor=get_current_user_or_none())
|
||||
activity_entry.save()
|
||||
if instance._meta.model_name != 'setting': # Is not conf.Setting instance
|
||||
activity_entry.save()
|
||||
getattr(activity_entry, object1).add(instance)
|
||||
else:
|
||||
activity_entry.setting = conf_to_dict(instance)
|
||||
activity_entry.save()
|
||||
|
||||
|
||||
def activity_stream_delete(sender, instance, **kwargs):
|
||||
@@ -535,8 +566,8 @@ def get_current_user_from_drf_request(sender, **kwargs):
|
||||
drf_request on the underlying Django Request object.
|
||||
'''
|
||||
request = get_current_request()
|
||||
drf_request = getattr(request, 'drf_request', None)
|
||||
return (getattr(drf_request, 'user', False), 0)
|
||||
drf_request_user = getattr(request, 'drf_request_user', False)
|
||||
return (drf_request_user, 0)
|
||||
|
||||
|
||||
@receiver(pre_delete, sender=Organization)
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import codecs
|
||||
from collections import OrderedDict
|
||||
import ConfigParser
|
||||
import cStringIO
|
||||
@@ -17,7 +16,6 @@ import tempfile
|
||||
import time
|
||||
import traceback
|
||||
import urlparse
|
||||
import uuid
|
||||
from distutils.version import LooseVersion as Version
|
||||
import yaml
|
||||
import fcntl
|
||||
@@ -50,7 +48,7 @@ from awx import celery_app
|
||||
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.models.unified_jobs import ACTIVE_STATES
|
||||
from awx.main.exceptions import AwxTaskError, TaskCancel, TaskError
|
||||
from awx.main.exceptions import AwxTaskError
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
from awx.main.expect import run, isolated_manager
|
||||
from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url,
|
||||
@@ -81,7 +79,7 @@ logger = logging.getLogger('awx.main.tasks')
|
||||
|
||||
class LogErrorsTask(Task):
|
||||
def on_failure(self, exc, task_id, args, kwargs, einfo):
|
||||
if isinstance(exc, AwxTaskError):
|
||||
if getattr(exc, 'is_awx_task_error', False):
|
||||
# Error caused by user / tracked in job output
|
||||
logger.warning(str(exc))
|
||||
elif isinstance(self, BaseTask):
|
||||
@@ -363,8 +361,9 @@ def handle_work_success(self, result, task_actual):
|
||||
|
||||
|
||||
@shared_task(queue='tower', base=LogErrorsTask)
|
||||
def handle_work_error(request, exc, traceback, task_id, subtasks=None):
|
||||
logger.debug('Executing error task id %s, subtasks: %s' % (request.id, str(subtasks)))
|
||||
def handle_work_error(task_id, *args, **kwargs):
|
||||
subtasks = kwargs.get('subtasks', None)
|
||||
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
|
||||
first_instance = None
|
||||
first_instance_type = ''
|
||||
if subtasks is not None:
|
||||
@@ -432,13 +431,22 @@ def update_host_smart_inventory_memberships():
|
||||
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
||||
SmartInventoryMembership.objects.all().delete()
|
||||
memberships = []
|
||||
changed_inventories = set([])
|
||||
for smart_inventory in smart_inventories:
|
||||
memberships.extend([SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id[0])
|
||||
for host_id in smart_inventory.hosts.values_list('id')])
|
||||
add_for_inventory = [
|
||||
SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id[0])
|
||||
for host_id in smart_inventory.hosts.values_list('id')
|
||||
]
|
||||
memberships.extend(add_for_inventory)
|
||||
if add_for_inventory:
|
||||
changed_inventories.add(smart_inventory)
|
||||
SmartInventoryMembership.objects.bulk_create(memberships)
|
||||
except IntegrityError as e:
|
||||
logger.error("Update Host Smart Inventory Memberships failed due to an exception: " + str(e))
|
||||
return
|
||||
# Update computed fields for changed inventories outside atomic action
|
||||
for smart_inventory in changed_inventories:
|
||||
smart_inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
|
||||
|
||||
@shared_task(bind=True, queue='tower', base=LogErrorsTask, max_retries=5)
|
||||
@@ -489,6 +497,7 @@ def with_path_cleanup(f):
|
||||
class BaseTask(LogErrorsTask):
|
||||
name = None
|
||||
model = None
|
||||
event_model = None
|
||||
abstract = True
|
||||
cleanup_paths = []
|
||||
proot_show_paths = []
|
||||
@@ -509,17 +518,13 @@ class BaseTask(LogErrorsTask):
|
||||
if updates:
|
||||
update_fields = ['modified']
|
||||
for field, value in updates.items():
|
||||
if field in ('result_stdout', 'result_traceback'):
|
||||
if field in ('result_traceback'):
|
||||
for srch, repl in output_replacements:
|
||||
value = value.replace(srch, repl)
|
||||
setattr(instance, field, value)
|
||||
update_fields.append(field)
|
||||
if field == 'status':
|
||||
update_fields.append('failed')
|
||||
if 'result_stdout_text' in update_fields:
|
||||
# result_stdout_text is now deprecated, and is no longer
|
||||
# an actual Django field (it's a property)
|
||||
update_fields.remove('result_stdout_text')
|
||||
instance.save(update_fields=update_fields)
|
||||
return instance
|
||||
except DatabaseError as e:
|
||||
@@ -621,10 +626,16 @@ class BaseTask(LogErrorsTask):
|
||||
'': '',
|
||||
}
|
||||
|
||||
def add_ansible_venv(self, env, add_awx_lib=True):
|
||||
env['VIRTUAL_ENV'] = settings.ANSIBLE_VENV_PATH
|
||||
env['PATH'] = os.path.join(settings.ANSIBLE_VENV_PATH, "bin") + ":" + env['PATH']
|
||||
venv_libdir = os.path.join(settings.ANSIBLE_VENV_PATH, "lib")
|
||||
def add_ansible_venv(self, venv_path, env, add_awx_lib=True):
|
||||
env['VIRTUAL_ENV'] = venv_path
|
||||
env['PATH'] = os.path.join(venv_path, "bin") + ":" + env['PATH']
|
||||
venv_libdir = os.path.join(venv_path, "lib")
|
||||
|
||||
if not os.path.exists(venv_libdir):
|
||||
raise RuntimeError(
|
||||
'a valid Python virtualenv does not exist at {}'.format(venv_path)
|
||||
)
|
||||
|
||||
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
|
||||
if os.path.isdir(os.path.join(venv_libdir, "python2.7")):
|
||||
env['PYTHONPATH'] = os.path.join(venv_libdir, "python2.7", "site-packages") + ":"
|
||||
@@ -659,25 +670,6 @@ class BaseTask(LogErrorsTask):
|
||||
env['PROOT_TMP_DIR'] = settings.AWX_PROOT_BASE_PATH
|
||||
return env
|
||||
|
||||
def build_safe_env(self, env, **kwargs):
|
||||
'''
|
||||
Build environment dictionary, hiding potentially sensitive information
|
||||
such as passwords or keys.
|
||||
'''
|
||||
hidden_re = re.compile(r'API|TOKEN|KEY|SECRET|PASS', re.I)
|
||||
urlpass_re = re.compile(r'^.*?://[^:]+:(.*?)@.*?$')
|
||||
safe_env = dict(env)
|
||||
for k,v in safe_env.items():
|
||||
if k == 'AWS_ACCESS_KEY_ID':
|
||||
continue
|
||||
elif k.startswith('ANSIBLE_') and not k.startswith('ANSIBLE_NET'):
|
||||
continue
|
||||
elif hidden_re.search(k):
|
||||
safe_env[k] = HIDDEN_PASSWORD
|
||||
elif type(v) == str and urlpass_re.match(v):
|
||||
safe_env[k] = urlpass_re.sub(HIDDEN_PASSWORD, v)
|
||||
return safe_env
|
||||
|
||||
def should_use_proot(self, instance, **kwargs):
|
||||
'''
|
||||
Return whether this task should use proot.
|
||||
@@ -729,14 +721,19 @@ class BaseTask(LogErrorsTask):
|
||||
|
||||
def get_stdout_handle(self, instance):
|
||||
'''
|
||||
Return an open file object for capturing stdout.
|
||||
Return an virtual file object for capturing stdout and events.
|
||||
'''
|
||||
if not os.path.exists(settings.JOBOUTPUT_ROOT):
|
||||
os.makedirs(settings.JOBOUTPUT_ROOT)
|
||||
stdout_filename = os.path.join(settings.JOBOUTPUT_ROOT, "%d-%s.out" % (instance.pk, str(uuid.uuid1())))
|
||||
stdout_handle = codecs.open(stdout_filename, 'w', encoding='utf-8')
|
||||
assert stdout_handle.name == stdout_filename
|
||||
return stdout_handle
|
||||
dispatcher = CallbackQueueDispatcher()
|
||||
|
||||
def event_callback(event_data):
|
||||
event_data.setdefault(self.event_data_key, instance.id)
|
||||
if 'uuid' in event_data:
|
||||
cache_event = cache.get('ev-{}'.format(event_data['uuid']), None)
|
||||
if cache_event is not None:
|
||||
event_data.update(cache_event)
|
||||
dispatcher.dispatch(event_data)
|
||||
|
||||
return OutputEventFilter(event_callback)
|
||||
|
||||
def pre_run_hook(self, instance, **kwargs):
|
||||
'''
|
||||
@@ -792,12 +789,14 @@ class BaseTask(LogErrorsTask):
|
||||
kwargs['private_data_files'] = self.build_private_data_files(instance, **kwargs)
|
||||
kwargs['passwords'] = self.build_passwords(instance, **kwargs)
|
||||
kwargs['proot_show_paths'] = self.proot_show_paths
|
||||
if getattr(instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH) != settings.ANSIBLE_VENV_PATH:
|
||||
kwargs['proot_custom_virtualenv'] = instance.ansible_virtualenv_path
|
||||
args = self.build_args(instance, **kwargs)
|
||||
safe_args = self.build_safe_args(instance, **kwargs)
|
||||
output_replacements = self.build_output_replacements(instance, **kwargs)
|
||||
cwd = self.build_cwd(instance, **kwargs)
|
||||
env = self.build_env(instance, **kwargs)
|
||||
safe_env = self.build_safe_env(env, **kwargs)
|
||||
safe_env = build_safe_env(env)
|
||||
|
||||
# handle custom injectors specified on the CredentialType
|
||||
credentials = []
|
||||
@@ -818,10 +817,8 @@ class BaseTask(LogErrorsTask):
|
||||
if isolated_host is None:
|
||||
stdout_handle = self.get_stdout_handle(instance)
|
||||
else:
|
||||
base_handle = super(self.__class__, self).get_stdout_handle(instance)
|
||||
stdout_handle = isolated_manager.IsolatedManager.wrap_stdout_handle(
|
||||
instance, kwargs['private_data_dir'], base_handle,
|
||||
event_data_key=self.event_data_key)
|
||||
stdout_handle = isolated_manager.IsolatedManager.get_stdout_handle(
|
||||
instance, kwargs['private_data_dir'], event_data_key=self.event_data_key)
|
||||
if self.should_use_proot(instance, **kwargs):
|
||||
if not check_proot_installed():
|
||||
raise RuntimeError('bubblewrap is not installed')
|
||||
@@ -838,7 +835,7 @@ class BaseTask(LogErrorsTask):
|
||||
args = run.wrap_args_with_ssh_agent(args, ssh_key_path, ssh_auth_sock)
|
||||
safe_args = run.wrap_args_with_ssh_agent(safe_args, ssh_key_path, ssh_auth_sock)
|
||||
instance = self.update_model(pk, job_args=json.dumps(safe_args),
|
||||
job_cwd=cwd, job_env=safe_env, result_stdout_file=stdout_handle.name)
|
||||
job_cwd=cwd, job_env=safe_env)
|
||||
|
||||
expect_passwords = {}
|
||||
for k, v in self.get_password_prompts(**kwargs).items():
|
||||
@@ -874,6 +871,12 @@ class BaseTask(LogErrorsTask):
|
||||
try:
|
||||
stdout_handle.flush()
|
||||
stdout_handle.close()
|
||||
# If stdout_handle was wrapped with event filter, log data
|
||||
if hasattr(stdout_handle, '_event_ct'):
|
||||
logger.info('%s finished running, producing %s events.',
|
||||
instance.log_format, stdout_handle._event_ct)
|
||||
else:
|
||||
logger.info('%s finished running', instance.log_format)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@@ -897,9 +900,9 @@ class BaseTask(LogErrorsTask):
|
||||
# Raising an exception will mark the job as 'failed' in celery
|
||||
# and will stop a task chain from continuing to execute
|
||||
if status == 'canceled':
|
||||
raise TaskCancel(instance, rc)
|
||||
raise AwxTaskError.TaskCancel(instance, rc)
|
||||
else:
|
||||
raise TaskError(instance, rc)
|
||||
raise AwxTaskError.TaskError(instance, rc)
|
||||
|
||||
def get_ssh_key_path(self, instance, **kwargs):
|
||||
'''
|
||||
@@ -925,7 +928,8 @@ class RunJob(BaseTask):
|
||||
|
||||
name = 'awx.main.tasks.run_job'
|
||||
model = Job
|
||||
event_data_key= 'job_id'
|
||||
event_model = JobEvent
|
||||
event_data_key = 'job_id'
|
||||
|
||||
def build_private_data(self, job, **kwargs):
|
||||
'''
|
||||
@@ -1006,7 +1010,7 @@ class RunJob(BaseTask):
|
||||
plugin_dirs.extend(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
|
||||
plugin_path = ':'.join(plugin_dirs)
|
||||
env = super(RunJob, self).build_env(job, **kwargs)
|
||||
env = self.add_ansible_venv(env, add_awx_lib=kwargs.get('isolated', False))
|
||||
env = self.add_ansible_venv(job.ansible_virtualenv_path, env, add_awx_lib=kwargs.get('isolated', False))
|
||||
# Set environment variables needed for inventory and job event
|
||||
# callbacks to work.
|
||||
env['JOB_ID'] = str(job.pk)
|
||||
@@ -1026,13 +1030,7 @@ class RunJob(BaseTask):
|
||||
env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
|
||||
env['TOWER_HOST'] = settings.TOWER_URL_BASE
|
||||
env['AWX_HOST'] = settings.TOWER_URL_BASE
|
||||
env['CALLBACK_QUEUE'] = settings.CALLBACK_QUEUE
|
||||
env['CALLBACK_CONNECTION'] = settings.CELERY_BROKER_URL
|
||||
env['CACHE'] = settings.CACHES['default']['LOCATION'] if 'LOCATION' in settings.CACHES['default'] else ''
|
||||
if getattr(settings, 'JOB_CALLBACK_DEBUG', False):
|
||||
env['JOB_CALLBACK_DEBUG'] = '2'
|
||||
elif settings.DEBUG:
|
||||
env['JOB_CALLBACK_DEBUG'] = '1'
|
||||
|
||||
# Create a directory for ControlPath sockets that is unique to each
|
||||
# job and visible inside the proot environment (when enabled).
|
||||
@@ -1047,31 +1045,8 @@ class RunJob(BaseTask):
|
||||
# Set environment variables for cloud credentials.
|
||||
cred_files = kwargs.get('private_data_files', {}).get('credentials', {})
|
||||
for cloud_cred in job.cloud_credentials:
|
||||
if cloud_cred and cloud_cred.kind == 'aws':
|
||||
env['AWS_ACCESS_KEY_ID'] = cloud_cred.username
|
||||
env['AWS_SECRET_ACCESS_KEY'] = decrypt_field(cloud_cred, 'password')
|
||||
if len(cloud_cred.security_token) > 0:
|
||||
env['AWS_SECURITY_TOKEN'] = decrypt_field(cloud_cred, 'security_token')
|
||||
# FIXME: Add EC2_URL, maybe EC2_REGION!
|
||||
elif cloud_cred and cloud_cred.kind == 'gce':
|
||||
env['GCE_EMAIL'] = cloud_cred.username
|
||||
env['GCE_PROJECT'] = cloud_cred.project
|
||||
if cloud_cred and cloud_cred.kind == 'gce':
|
||||
env['GCE_PEM_FILE_PATH'] = cred_files.get(cloud_cred, '')
|
||||
elif cloud_cred and cloud_cred.kind == 'azure_rm':
|
||||
if len(cloud_cred.client) and len(cloud_cred.tenant):
|
||||
env['AZURE_CLIENT_ID'] = cloud_cred.client
|
||||
env['AZURE_SECRET'] = decrypt_field(cloud_cred, 'secret')
|
||||
env['AZURE_TENANT'] = cloud_cred.tenant
|
||||
env['AZURE_SUBSCRIPTION_ID'] = cloud_cred.subscription
|
||||
else:
|
||||
env['AZURE_SUBSCRIPTION_ID'] = cloud_cred.subscription
|
||||
env['AZURE_AD_USER'] = cloud_cred.username
|
||||
env['AZURE_PASSWORD'] = decrypt_field(cloud_cred, 'password')
|
||||
elif cloud_cred and cloud_cred.kind == 'vmware':
|
||||
env['VMWARE_USER'] = cloud_cred.username
|
||||
env['VMWARE_PASSWORD'] = decrypt_field(cloud_cred, 'password')
|
||||
env['VMWARE_HOST'] = cloud_cred.host
|
||||
env['VMWARE_VALIDATE_CERTS'] = str(settings.VMWARE_VALIDATE_CERTS)
|
||||
elif cloud_cred and cloud_cred.kind == 'openstack':
|
||||
env['OS_CLIENT_CONFIG_FILE'] = cred_files.get(cloud_cred, '')
|
||||
|
||||
@@ -1159,7 +1134,7 @@ class RunJob(BaseTask):
|
||||
if kwargs.get('display', False) and job.job_template:
|
||||
extra_vars.update(json.loads(job.display_extra_vars()))
|
||||
else:
|
||||
extra_vars.update(job.extra_vars_dict)
|
||||
extra_vars.update(json.loads(job.decrypted_extra_vars()))
|
||||
args.extend(['-e', json.dumps(extra_vars)])
|
||||
|
||||
# Add path to playbook (relative to project.local_path).
|
||||
@@ -1196,29 +1171,6 @@ class RunJob(BaseTask):
|
||||
d[re.compile(r'Vault password \({}\):\s*?$'.format(vault_id), re.M)] = k
|
||||
return d
|
||||
|
||||
def get_stdout_handle(self, instance):
|
||||
'''
|
||||
Wrap stdout file object to capture events.
|
||||
'''
|
||||
stdout_handle = super(RunJob, self).get_stdout_handle(instance)
|
||||
|
||||
if getattr(settings, 'USE_CALLBACK_QUEUE', False):
|
||||
dispatcher = CallbackQueueDispatcher()
|
||||
|
||||
def job_event_callback(event_data):
|
||||
event_data.setdefault(self.event_data_key, instance.id)
|
||||
if 'uuid' in event_data:
|
||||
cache_event = cache.get('ev-{}'.format(event_data['uuid']), None)
|
||||
if cache_event is not None:
|
||||
event_data.update(cache_event)
|
||||
dispatcher.dispatch(event_data)
|
||||
else:
|
||||
def job_event_callback(event_data):
|
||||
event_data.setdefault(self.event_data_key, instance.id)
|
||||
JobEvent.create_from_data(**event_data)
|
||||
|
||||
return OutputEventFilter(stdout_handle, job_event_callback)
|
||||
|
||||
def should_use_proot(self, instance, **kwargs):
|
||||
'''
|
||||
Return whether this task should use proot.
|
||||
@@ -1226,6 +1178,10 @@ class RunJob(BaseTask):
|
||||
return getattr(settings, 'AWX_PROOT_ENABLED', False)
|
||||
|
||||
def pre_run_hook(self, job, **kwargs):
|
||||
if job.inventory is None:
|
||||
error = _('Job could not start because it does not have a valid inventory.')
|
||||
self.update_model(job.pk, status='failed', job_explanation=error)
|
||||
raise RuntimeError(error)
|
||||
if job.project and job.project.scm_type:
|
||||
job_request_id = '' if self.request.id is None else self.request.id
|
||||
pu_ig = job.instance_group
|
||||
@@ -1252,10 +1208,12 @@ class RunJob(BaseTask):
|
||||
task_instance.run(local_project_sync.id)
|
||||
job = self.update_model(job.pk, scm_revision=job.project.scm_revision)
|
||||
except Exception:
|
||||
job = self.update_model(job.pk, status='failed',
|
||||
job_explanation=('Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' %
|
||||
('project_update', local_project_sync.name, local_project_sync.id)))
|
||||
raise
|
||||
local_project_sync.refresh_from_db()
|
||||
if local_project_sync.status != 'canceled':
|
||||
job = self.update_model(job.pk, status='failed',
|
||||
job_explanation=('Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' %
|
||||
('project_update', local_project_sync.name, local_project_sync.id)))
|
||||
raise
|
||||
|
||||
if job.use_fact_cache and not kwargs.get('isolated'):
|
||||
job.start_job_fact_cache()
|
||||
@@ -1277,6 +1235,8 @@ class RunProjectUpdate(BaseTask):
|
||||
|
||||
name = 'awx.main.tasks.run_project_update'
|
||||
model = ProjectUpdate
|
||||
event_model = ProjectUpdateEvent
|
||||
event_data_key = 'project_update_id'
|
||||
|
||||
@property
|
||||
def proot_show_paths(self):
|
||||
@@ -1322,11 +1282,18 @@ class RunProjectUpdate(BaseTask):
|
||||
Build environment dictionary for ansible-playbook.
|
||||
'''
|
||||
env = super(RunProjectUpdate, self).build_env(project_update, **kwargs)
|
||||
env = self.add_ansible_venv(env)
|
||||
env = self.add_ansible_venv(settings.ANSIBLE_VENV_PATH, env)
|
||||
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
|
||||
env['ANSIBLE_ASK_PASS'] = str(False)
|
||||
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
|
||||
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
|
||||
# give ansible a hint about the intended tmpdir to work around issues
|
||||
# like https://github.com/ansible/ansible/issues/30064
|
||||
env['TMP'] = settings.AWX_PROOT_BASE_PATH
|
||||
env['CACHE'] = settings.CACHES['default']['LOCATION'] if 'LOCATION' in settings.CACHES['default'] else ''
|
||||
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
|
||||
env['ANSIBLE_CALLBACK_PLUGINS'] = self.get_path_to('..', 'plugins', 'callback')
|
||||
env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
|
||||
return env
|
||||
|
||||
def _build_scm_url_extra_vars(self, project_update, **kwargs):
|
||||
@@ -1464,16 +1431,6 @@ class RunProjectUpdate(BaseTask):
|
||||
def get_idle_timeout(self):
|
||||
return getattr(settings, 'PROJECT_UPDATE_IDLE_TIMEOUT', None)
|
||||
|
||||
def get_stdout_handle(self, instance):
|
||||
stdout_handle = super(RunProjectUpdate, self).get_stdout_handle(instance)
|
||||
pk = instance.pk
|
||||
|
||||
def raw_callback(data):
|
||||
instance_actual = self.update_model(pk)
|
||||
result_stdout_text = instance_actual.result_stdout_text + data
|
||||
self.update_model(pk, result_stdout_text=result_stdout_text)
|
||||
return OutputEventFilter(stdout_handle, raw_callback=raw_callback)
|
||||
|
||||
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
|
||||
project_request_id = '' if self.request.id is None else self.request.id
|
||||
scm_revision = project_update.project.scm_revision
|
||||
@@ -1519,11 +1476,11 @@ class RunProjectUpdate(BaseTask):
|
||||
except InventoryUpdate.DoesNotExist:
|
||||
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
|
||||
continue
|
||||
if project_update.cancel_flag or local_inv_update.cancel_flag:
|
||||
if not project_update.cancel_flag:
|
||||
self.update_model(project_update.pk, cancel_flag=True, job_explanation=_(
|
||||
'Dependent inventory update {} was canceled.'.format(local_inv_update.name)))
|
||||
break # Stop rest of updates if project or inventory update was canceled
|
||||
if project_update.cancel_flag:
|
||||
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
|
||||
break
|
||||
if local_inv_update.cancel_flag:
|
||||
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
|
||||
if local_inv_update.status == 'successful':
|
||||
inv_src.scm_last_revision = scm_revision
|
||||
inv_src.save(update_fields=['scm_last_revision'])
|
||||
@@ -1599,6 +1556,8 @@ class RunInventoryUpdate(BaseTask):
|
||||
|
||||
name = 'awx.main.tasks.run_inventory_update'
|
||||
model = InventoryUpdate
|
||||
event_model = InventoryUpdateEvent
|
||||
event_data_key = 'inventory_update_id'
|
||||
|
||||
def build_private_data(self, inventory_update, **kwargs):
|
||||
"""
|
||||
@@ -1764,7 +1723,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
cp.set(section, 'ssl_verify', "false")
|
||||
|
||||
cloudforms_opts = dict(inventory_update.source_vars_dict.items())
|
||||
for opt in ['version', 'purge_actions', 'clean_group_keys', 'nest_tags']:
|
||||
for opt in ['version', 'purge_actions', 'clean_group_keys', 'nest_tags', 'suffix']:
|
||||
if opt in cloudforms_opts:
|
||||
cp.set(section, opt, cloudforms_opts[opt])
|
||||
|
||||
@@ -1840,45 +1799,39 @@ class RunInventoryUpdate(BaseTask):
|
||||
# The inventory modules are vendored in AWX in the
|
||||
# `awx/plugins/inventory` directory; those files should be kept in
|
||||
# sync with those in Ansible core at all times.
|
||||
passwords = kwargs.get('passwords', {})
|
||||
cred_data = kwargs.get('private_data_files', {}).get('credentials', '')
|
||||
cloud_credential = cred_data.get(inventory_update.credential, '')
|
||||
if inventory_update.source == 'ec2':
|
||||
if passwords.get('source_username', '') and passwords.get('source_password', ''):
|
||||
env['AWS_ACCESS_KEY_ID'] = passwords['source_username']
|
||||
env['AWS_SECRET_ACCESS_KEY'] = passwords['source_password']
|
||||
if len(passwords['source_security_token']) > 0:
|
||||
env['AWS_SECURITY_TOKEN'] = passwords['source_security_token']
|
||||
env['EC2_INI_PATH'] = cloud_credential
|
||||
elif inventory_update.source == 'vmware':
|
||||
env['VMWARE_INI_PATH'] = cloud_credential
|
||||
elif inventory_update.source == 'azure_rm':
|
||||
if len(passwords.get('source_client', '')) and \
|
||||
len(passwords.get('source_tenant', '')):
|
||||
env['AZURE_CLIENT_ID'] = passwords.get('source_client', '')
|
||||
env['AZURE_SECRET'] = passwords.get('source_secret', '')
|
||||
env['AZURE_TENANT'] = passwords.get('source_tenant', '')
|
||||
env['AZURE_SUBSCRIPTION_ID'] = passwords.get('source_subscription', '')
|
||||
else:
|
||||
env['AZURE_SUBSCRIPTION_ID'] = passwords.get('source_subscription', '')
|
||||
env['AZURE_AD_USER'] = passwords.get('source_username', '')
|
||||
env['AZURE_PASSWORD'] = passwords.get('source_password', '')
|
||||
env['AZURE_INI_PATH'] = cloud_credential
|
||||
elif inventory_update.source == 'gce':
|
||||
env['GCE_EMAIL'] = passwords.get('source_username', '')
|
||||
env['GCE_PROJECT'] = passwords.get('source_project', '')
|
||||
env['GCE_PEM_FILE_PATH'] = cloud_credential
|
||||
env['GCE_ZONE'] = inventory_update.source_regions if inventory_update.source_regions != 'all' else ''
|
||||
elif inventory_update.source == 'openstack':
|
||||
env['OS_CLIENT_CONFIG_FILE'] = cloud_credential
|
||||
elif inventory_update.source == 'satellite6':
|
||||
env['FOREMAN_INI_PATH'] = cloud_credential
|
||||
elif inventory_update.source == 'cloudforms':
|
||||
env['CLOUDFORMS_INI_PATH'] = cloud_credential
|
||||
|
||||
ini_mapping = {
|
||||
'ec2': 'EC2_INI_PATH',
|
||||
'vmware': 'VMWARE_INI_PATH',
|
||||
'azure_rm': 'AZURE_INI_PATH',
|
||||
'gce': 'GCE_PEM_FILE_PATH',
|
||||
'openstack': 'OS_CLIENT_CONFIG_FILE',
|
||||
'satellite6': 'FOREMAN_INI_PATH',
|
||||
'cloudforms': 'CLOUDFORMS_INI_PATH'
|
||||
}
|
||||
if inventory_update.source in ini_mapping:
|
||||
cred_data = kwargs.get('private_data_files', {}).get('credentials', '')
|
||||
env[ini_mapping[inventory_update.source]] = cred_data.get(inventory_update.credential, '')
|
||||
|
||||
if inventory_update.source == 'gce':
|
||||
env['GCE_ZONE'] = inventory_update.source_regions if inventory_update.source_regions != 'all' else '' # noqa
|
||||
|
||||
# by default, the GCE inventory source caches results on disk for
|
||||
# 5 minutes; disable this behavior
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.add_section('cache')
|
||||
cp.set('cache', 'cache_max_age', '0')
|
||||
handle, path = tempfile.mkstemp(dir=kwargs.get('private_data_dir', None))
|
||||
cp.write(os.fdopen(handle, 'w'))
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
env['GCE_INI_PATH'] = path
|
||||
elif inventory_update.source in ['scm', 'custom']:
|
||||
for env_k in inventory_update.source_vars_dict:
|
||||
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLACKLIST:
|
||||
env[str(env_k)] = unicode(inventory_update.source_vars_dict[env_k])
|
||||
elif inventory_update.source == 'tower':
|
||||
env['TOWER_INVENTORY'] = inventory_update.instance_filters
|
||||
env['TOWER_LICENSE_TYPE'] = get_licenser().validate()['license_type']
|
||||
elif inventory_update.source == 'file':
|
||||
raise NotImplementedError('Cannot update file sources through the task system.')
|
||||
# add private_data_files
|
||||
@@ -1954,16 +1907,6 @@ class RunInventoryUpdate(BaseTask):
|
||||
args.append('--traceback')
|
||||
return args
|
||||
|
||||
def get_stdout_handle(self, instance):
|
||||
stdout_handle = super(RunInventoryUpdate, self).get_stdout_handle(instance)
|
||||
pk = instance.pk
|
||||
|
||||
def raw_callback(data):
|
||||
instance_actual = self.update_model(pk)
|
||||
result_stdout_text = instance_actual.result_stdout_text + data
|
||||
self.update_model(pk, result_stdout_text=result_stdout_text)
|
||||
return OutputEventFilter(stdout_handle, raw_callback=raw_callback)
|
||||
|
||||
def build_cwd(self, inventory_update, **kwargs):
|
||||
return self.get_path_to('..', 'plugins', 'inventory')
|
||||
|
||||
@@ -2010,6 +1953,7 @@ class RunAdHocCommand(BaseTask):
|
||||
|
||||
name = 'awx.main.tasks.run_ad_hoc_command'
|
||||
model = AdHocCommand
|
||||
event_model = AdHocCommandEvent
|
||||
event_data_key = 'ad_hoc_command_id'
|
||||
|
||||
def build_private_data(self, ad_hoc_command, **kwargs):
|
||||
@@ -2057,7 +2001,7 @@ class RunAdHocCommand(BaseTask):
|
||||
'''
|
||||
plugin_dir = self.get_path_to('..', 'plugins', 'callback')
|
||||
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, **kwargs)
|
||||
env = self.add_ansible_venv(env)
|
||||
env = self.add_ansible_venv(settings.ANSIBLE_VENV_PATH, env)
|
||||
# Set environment variables needed for inventory and ad hoc event
|
||||
# callbacks to work.
|
||||
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
|
||||
@@ -2066,14 +2010,8 @@ class RunAdHocCommand(BaseTask):
|
||||
env['ANSIBLE_CALLBACK_PLUGINS'] = plugin_dir
|
||||
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
|
||||
env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal' # Hardcoded by Ansible for ad-hoc commands (either minimal or oneline).
|
||||
env['CALLBACK_QUEUE'] = settings.CALLBACK_QUEUE
|
||||
env['CALLBACK_CONNECTION'] = settings.CELERY_BROKER_URL
|
||||
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
|
||||
env['CACHE'] = settings.CACHES['default']['LOCATION'] if 'LOCATION' in settings.CACHES['default'] else ''
|
||||
if getattr(settings, 'JOB_CALLBACK_DEBUG', False):
|
||||
env['JOB_CALLBACK_DEBUG'] = '2'
|
||||
elif settings.DEBUG:
|
||||
env['JOB_CALLBACK_DEBUG'] = '1'
|
||||
|
||||
# Specify empty SSH args (should disable ControlPersist entirely for
|
||||
# ad hoc commands).
|
||||
@@ -2124,14 +2062,27 @@ class RunAdHocCommand(BaseTask):
|
||||
if ad_hoc_command.verbosity:
|
||||
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
|
||||
|
||||
# Define special extra_vars for AWX, combine with ad_hoc_command.extra_vars
|
||||
extra_vars = {
|
||||
'tower_job_id': ad_hoc_command.pk,
|
||||
'awx_job_id': ad_hoc_command.pk,
|
||||
}
|
||||
if ad_hoc_command.created_by:
|
||||
extra_vars.update({
|
||||
'tower_user_id': ad_hoc_command.created_by.pk,
|
||||
'tower_user_name': ad_hoc_command.created_by.username,
|
||||
'awx_user_id': ad_hoc_command.created_by.pk,
|
||||
'awx_user_name': ad_hoc_command.created_by.username,
|
||||
})
|
||||
|
||||
if ad_hoc_command.extra_vars_dict:
|
||||
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
|
||||
if removed_vars:
|
||||
raise ValueError(_(
|
||||
"{} are prohibited from use in ad hoc commands."
|
||||
).format(", ".join(removed_vars)))
|
||||
|
||||
args.extend(['-e', json.dumps(ad_hoc_command.extra_vars_dict)])
|
||||
extra_vars.update(ad_hoc_command.extra_vars_dict)
|
||||
args.extend(['-e', json.dumps(extra_vars)])
|
||||
|
||||
args.extend(['-m', ad_hoc_command.module_name])
|
||||
args.extend(['-a', ad_hoc_command.module_args])
|
||||
@@ -2160,29 +2111,6 @@ class RunAdHocCommand(BaseTask):
|
||||
d[re.compile(r'Password:\s*?$', re.M)] = 'ssh_password'
|
||||
return d
|
||||
|
||||
def get_stdout_handle(self, instance):
|
||||
'''
|
||||
Wrap stdout file object to capture events.
|
||||
'''
|
||||
stdout_handle = super(RunAdHocCommand, self).get_stdout_handle(instance)
|
||||
|
||||
if getattr(settings, 'USE_CALLBACK_QUEUE', False):
|
||||
dispatcher = CallbackQueueDispatcher()
|
||||
|
||||
def ad_hoc_command_event_callback(event_data):
|
||||
event_data.setdefault(self.event_data_key, instance.id)
|
||||
if 'uuid' in event_data:
|
||||
cache_event = cache.get('ev-{}'.format(event_data['uuid']), None)
|
||||
if cache_event is not None:
|
||||
event_data.update(cache_event)
|
||||
dispatcher.dispatch(event_data)
|
||||
else:
|
||||
def ad_hoc_command_event_callback(event_data):
|
||||
event_data.setdefault(self.event_data_key, instance.id)
|
||||
AdHocCommandEvent.create_from_data(**event_data)
|
||||
|
||||
return OutputEventFilter(stdout_handle, ad_hoc_command_event_callback)
|
||||
|
||||
def should_use_proot(self, instance, **kwargs):
|
||||
'''
|
||||
Return whether this task should use proot.
|
||||
@@ -2194,6 +2122,8 @@ class RunSystemJob(BaseTask):
|
||||
|
||||
name = 'awx.main.tasks.run_system_job'
|
||||
model = SystemJob
|
||||
event_model = SystemJobEvent
|
||||
event_data_key = 'system_job_id'
|
||||
|
||||
def build_args(self, system_job, **kwargs):
|
||||
args = ['awx-manage', system_job.job_type]
|
||||
@@ -2220,16 +2150,6 @@ class RunSystemJob(BaseTask):
|
||||
logger.exception("%s Failed to parse system job", system_job.log_format)
|
||||
return args
|
||||
|
||||
def get_stdout_handle(self, instance):
|
||||
stdout_handle = super(RunSystemJob, self).get_stdout_handle(instance)
|
||||
pk = instance.pk
|
||||
|
||||
def raw_callback(data):
|
||||
instance_actual = self.update_model(pk)
|
||||
result_stdout_text = instance_actual.result_stdout_text + data
|
||||
self.update_model(pk, result_stdout_text=result_stdout_text)
|
||||
return OutputEventFilter(stdout_handle, raw_callback=raw_callback)
|
||||
|
||||
def build_env(self, instance, **kwargs):
|
||||
env = super(RunSystemJob, self).build_env(instance,
|
||||
**kwargs)
|
||||
|
||||
163
awx/main/tests/data/ldap_ansible.ldif
Normal file
163
awx/main/tests/data/ldap_ansible.ldif
Normal file
@@ -0,0 +1,163 @@
|
||||
|
||||
dn: dc=ansible,dc=com
|
||||
dc: ansible
|
||||
description: My wonderful company as much text as you want to place
|
||||
in this line up to 32K continuation data for the line above must
|
||||
have <CR> or <CR><LF> i.e. ENTER work
|
||||
on both Windows and *nix system - new line MUST begin with ONE SPACE
|
||||
objectClass: dcObject
|
||||
objectClass: organization
|
||||
o: ansible.com
|
||||
|
||||
# groups
|
||||
|
||||
dn: ou=groups,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
ou: groups
|
||||
|
||||
# group: Superusers
|
||||
|
||||
dn: cn=superusers,ou=groups,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: superusers
|
||||
member: cn=super_user1,ou=people,dc=ansible,dc=com
|
||||
|
||||
# group: Engineering
|
||||
|
||||
dn: cn=engineering,ou=groups,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: engineering
|
||||
member: cn=eng_admin1,ou=people,dc=ansible,dc=com
|
||||
member: cn=eng_user1,ou=people,dc=ansible,dc=com
|
||||
member: cn=eng_user2,ou=people,dc=ansible,dc=com
|
||||
|
||||
dn: cn=engineering_admins,ou=groups,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: engineering_admins
|
||||
member: cn=eng_admin1,ou=people,dc=ansible,dc=com
|
||||
|
||||
# group: Sales
|
||||
|
||||
dn: cn=sales,ou=groups,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: sales
|
||||
member: cn=sales_user1,ou=people,dc=ansible,dc=com
|
||||
member: cn=sales_user2,ou=people,dc=ansible,dc=com
|
||||
|
||||
# group: IT
|
||||
|
||||
dn: cn=it,ou=groups,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: it
|
||||
member: cn=it_user1,ou=people,dc=ansible,dc=com
|
||||
member: cn=it_user2,ou=people,dc=ansible,dc=com
|
||||
|
||||
|
||||
# users
|
||||
|
||||
dn: ou=people,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
ou: people
|
||||
|
||||
# users - superusers
|
||||
|
||||
dn: cn=super_user1,ou=people,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: super_user1
|
||||
sn: User 1
|
||||
givenName: Super
|
||||
mail: super_user1@ansible.com
|
||||
userPassword: password
|
||||
|
||||
# users - engineering
|
||||
|
||||
dn: cn=eng_user1,ou=people,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: eng_user1
|
||||
sn: User 1
|
||||
givenName: Engineering
|
||||
mail: eng_user1@ansible.com
|
||||
userPassword: password
|
||||
|
||||
dn: cn=eng_user2,ou=people,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: eng_user2
|
||||
sn: User 2
|
||||
givenName: Engineering
|
||||
mail: eng_user2@ansible.com
|
||||
userPassword: password
|
||||
|
||||
dn: cn=eng_admin1,ou=people,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: eng_admin1
|
||||
sn: Admin 1
|
||||
givenName: Engineering
|
||||
mail: eng_admin1@ansible.com
|
||||
userPassword: password
|
||||
|
||||
# users - IT
|
||||
|
||||
dn: cn=it_user1,ou=people,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: it_user1
|
||||
sn: Technology User 1
|
||||
givenName: Information
|
||||
mail: it_user1@ansible.com
|
||||
userPassword: password
|
||||
|
||||
dn: cn=it_user2,ou=people,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: it_user2
|
||||
sn: Technology User 2
|
||||
givenName: Information
|
||||
mail: it_user2@ansible.com
|
||||
userPassword: password
|
||||
|
||||
# users - Sales
|
||||
|
||||
dn: cn=sales_user1,ou=people,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: sales_user1
|
||||
sn: Person 1
|
||||
givenName: Sales
|
||||
mail: sales_user1@ansible.com
|
||||
userPassword: password
|
||||
|
||||
dn: cn=sales_user2,ou=people,dc=ansible,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: sales_user2
|
||||
sn: Person 2
|
||||
givenName: Sales
|
||||
mail: sales_user2@ansible.com
|
||||
userPassword: password
|
||||
78
awx/main/tests/data/ldap_example.ldif
Normal file
78
awx/main/tests/data/ldap_example.ldif
Normal file
@@ -0,0 +1,78 @@
|
||||
|
||||
dn: dc=example,dc=com
|
||||
dc: example
|
||||
description: My wonderful company as much text as you want to place
|
||||
in this line up to 32K continuation data for the line above must
|
||||
have <CR> or <CR><LF> i.e. ENTER work
|
||||
on both Windows and *nix system - new line MUST begin with ONE SPACE
|
||||
objectClass: dcObject
|
||||
objectClass: organization
|
||||
o: example.com
|
||||
|
||||
# groups
|
||||
|
||||
dn: ou=groups,dc=example,dc=com
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
ou: groups
|
||||
|
||||
# group: Superusers
|
||||
|
||||
dn: cn=superusers,ou=groups,dc=example,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: superusers
|
||||
member: cn=super_user1,ou=people,dc=example,dc=com
|
||||
|
||||
# group: Sales
|
||||
|
||||
dn: cn=sales,ou=groups,dc=example,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: sales
|
||||
member: cn=sales_user1,ou=people,dc=example,dc=com
|
||||
member: cn=sales_user2,ou=people,dc=example,dc=com
|
||||
|
||||
# users
|
||||
|
||||
dn: ou=people,dc=example,dc=com
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
ou: people
|
||||
|
||||
# users - superusers
|
||||
|
||||
dn: cn=super_user1,ou=people,dc=example,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: super_user1
|
||||
sn: User 1
|
||||
givenName: Super
|
||||
mail: super_user1@example.com
|
||||
userPassword: password
|
||||
|
||||
# users - Sales
|
||||
|
||||
dn: cn=sales_user1,ou=people,dc=example,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: sales_user1
|
||||
sn: Person 1
|
||||
givenName: Sales
|
||||
mail: sales_user1@example.com
|
||||
userPassword: password
|
||||
|
||||
dn: cn=sales_user2,ou=people,dc=example,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: sales_user2
|
||||
sn: Person 2
|
||||
givenName: Sales
|
||||
mail: sales_user2@example.com
|
||||
userPassword: password
|
||||
163
awx/main/tests/data/ldap_redhat.ldif
Normal file
163
awx/main/tests/data/ldap_redhat.ldif
Normal file
@@ -0,0 +1,163 @@
|
||||
|
||||
dn: dc=redhat,dc=com
|
||||
dc: redhat
|
||||
description: My wonderful company as much text as you want to place
|
||||
in this line up to 32K continuation data for the line above must
|
||||
have <CR> or <CR><LF> i.e. ENTER work
|
||||
on both Windows and *nix system - new line MUST begin with ONE SPACE
|
||||
objectClass: dcObject
|
||||
objectClass: organization
|
||||
o: redhat.com
|
||||
|
||||
# groups
|
||||
|
||||
dn: ou=groups,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
ou: groups
|
||||
|
||||
# group: Superusers
|
||||
|
||||
dn: cn=superusers,ou=groups,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: superusers
|
||||
member: cn=super_user1,ou=people,dc=redhat,dc=com
|
||||
|
||||
# group: Engineering
|
||||
|
||||
dn: cn=engineering,ou=groups,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: engineering
|
||||
member: cn=eng_admin1,ou=people,dc=redhat,dc=com
|
||||
member: cn=eng_user1,ou=people,dc=redhat,dc=com
|
||||
member: cn=eng_user2,ou=people,dc=redhat,dc=com
|
||||
|
||||
dn: cn=engineering_admins,ou=groups,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: engineering_admins
|
||||
member: cn=eng_admin1,ou=people,dc=redhat,dc=com
|
||||
|
||||
# group: Sales
|
||||
|
||||
dn: cn=sales,ou=groups,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: sales
|
||||
member: cn=sales_user1,ou=people,dc=redhat,dc=com
|
||||
member: cn=sales_user2,ou=people,dc=redhat,dc=com
|
||||
|
||||
# group: IT
|
||||
|
||||
dn: cn=it,ou=groups,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
cn: it
|
||||
member: cn=it_user1,ou=people,dc=redhat,dc=com
|
||||
member: cn=it_user2,ou=people,dc=redhat,dc=com
|
||||
|
||||
|
||||
# users
|
||||
|
||||
dn: ou=people,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
ou: people
|
||||
|
||||
# users - superusers
|
||||
|
||||
dn: cn=super_user1,ou=people,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: super_user1
|
||||
sn: User 1
|
||||
givenName: Super
|
||||
mail: super_user1@redhat.com
|
||||
userPassword: password
|
||||
|
||||
# users - engineering
|
||||
|
||||
dn: cn=eng_user1,ou=people,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: eng_user1
|
||||
sn: User 1
|
||||
givenName: Engineering
|
||||
mail: eng_user1@redhat.com
|
||||
userPassword: password
|
||||
|
||||
dn: cn=eng_user2,ou=people,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: eng_user2
|
||||
sn: User 2
|
||||
givenName: Engineering
|
||||
mail: eng_user2@redhat.com
|
||||
userPassword: password
|
||||
|
||||
dn: cn=eng_admin1,ou=people,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: eng_admin1
|
||||
sn: Admin 1
|
||||
givenName: Engineering
|
||||
mail: eng_admin1@redhat.com
|
||||
userPassword: password
|
||||
|
||||
# users - IT
|
||||
|
||||
dn: cn=it_user1,ou=people,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: it_user1
|
||||
sn: Technology User 1
|
||||
givenName: Information
|
||||
mail: it_user1@redhat.com
|
||||
userPassword: password
|
||||
|
||||
dn: cn=it_user2,ou=people,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: it_user2
|
||||
sn: Technology User 2
|
||||
givenName: Information
|
||||
mail: it_user2@redhat.com
|
||||
userPassword: password
|
||||
|
||||
# users - Sales
|
||||
|
||||
dn: cn=sales_user1,ou=people,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: sales_user1
|
||||
sn: Person 1
|
||||
givenName: Sales
|
||||
mail: sales_user1@redhat.com
|
||||
userPassword: password
|
||||
|
||||
dn: cn=sales_user2,ou=people,dc=redhat,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
cn: sales_user2
|
||||
sn: Person 2
|
||||
givenName: Sales
|
||||
mail: sales_user2@redhat.com
|
||||
userPassword: password
|
||||
@@ -5,6 +5,7 @@ from awx.api.versioning import reverse
|
||||
from awx.main.middleware import ActivityStreamMiddleware
|
||||
from awx.main.models.activity_stream import ActivityStream
|
||||
from awx.main.access import ActivityStreamAccess
|
||||
from awx.conf.models import Setting
|
||||
|
||||
|
||||
def mock_feature_enabled(feature):
|
||||
@@ -47,6 +48,26 @@ def test_basic_fields(monkeypatch, organization, get, user, settings):
|
||||
assert response.data['summary_fields']['organization'][0]['name'] == 'test-org'
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled)
|
||||
@pytest.mark.django_db
|
||||
def test_ctint_activity_stream(monkeypatch, get, user, settings):
|
||||
Setting.objects.create(key="FOO", value="bar")
|
||||
settings.ACTIVITY_STREAM_ENABLED = True
|
||||
u = user('admin', True)
|
||||
activity_stream = ActivityStream.objects.filter(setting__icontains="FOO").latest('pk')
|
||||
activity_stream.actor = u
|
||||
activity_stream.save()
|
||||
|
||||
aspk = activity_stream.pk
|
||||
url = reverse('api:activity_stream_detail', kwargs={'pk': aspk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.status_code == 200
|
||||
assert 'summary_fields' in response.data
|
||||
assert 'setting' in response.data['summary_fields']
|
||||
assert response.data['summary_fields']['setting'][0]['name'] == 'FOO'
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled)
|
||||
@pytest.mark.django_db
|
||||
def test_middleware_actor_added(monkeypatch, post, get, user, settings):
|
||||
|
||||
@@ -4,7 +4,9 @@ import re
|
||||
import mock # noqa
|
||||
import pytest
|
||||
|
||||
from awx.main.models.credential import Credential, CredentialType
|
||||
from awx.main.models import (AdHocCommand, Credential, CredentialType, Job, JobTemplate,
|
||||
Inventory, InventorySource, Project,
|
||||
WorkflowJobNode)
|
||||
from awx.main.utils import decrypt_field
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
@@ -12,6 +14,17 @@ EXAMPLE_PRIVATE_KEY = '-----BEGIN PRIVATE KEY-----\nxyz==\n-----END PRIVATE KEY-
|
||||
EXAMPLE_ENCRYPTED_PRIVATE_KEY = '-----BEGIN PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nxyz==\n-----END PRIVATE KEY-----'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_idempotent_credential_type_setup():
|
||||
assert CredentialType.objects.count() == 0
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
total = CredentialType.objects.count()
|
||||
assert total > 0
|
||||
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
assert CredentialType.objects.count() == total
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('kind, total', [
|
||||
('ssh', 1), ('net', 0)
|
||||
@@ -575,7 +588,7 @@ def test_create_org_credential_as_admin(post, organization, org_admin, credentia
|
||||
params['name'] = 'Some name'
|
||||
params['organization'] = organization.id
|
||||
response = post(
|
||||
reverse('api:credential_list'),
|
||||
reverse('api:credential_list', kwargs={'version': version}),
|
||||
params,
|
||||
org_admin
|
||||
)
|
||||
@@ -591,7 +604,7 @@ def test_credential_detail(post, get, organization, org_admin, credentialtype_ss
|
||||
params['name'] = 'Some name'
|
||||
params['organization'] = organization.id
|
||||
response = post(
|
||||
reverse('api:credential_list'),
|
||||
reverse('api:credential_list', kwargs={'version': version}),
|
||||
params,
|
||||
org_admin
|
||||
)
|
||||
@@ -1410,7 +1423,17 @@ def test_field_removal(put, organization, admin, credentialtype_ssh, version, pa
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_credential_type_immutable_in_v2(patch, organization, admin, credentialtype_ssh, credentialtype_aws):
|
||||
@pytest.mark.parametrize('relation, related_obj', [
|
||||
['ad_hoc_commands', AdHocCommand()],
|
||||
['insights_inventories', Inventory()],
|
||||
['inventorysources', InventorySource()],
|
||||
['unifiedjobs', Job()],
|
||||
['unifiedjobtemplates', JobTemplate()],
|
||||
['projects', Project()],
|
||||
['workflowjobnodes', WorkflowJobNode()],
|
||||
])
|
||||
def test_credential_type_mutability(patch, organization, admin, credentialtype_ssh,
|
||||
credentialtype_aws, relation, related_obj):
|
||||
cred = Credential(
|
||||
credential_type=credentialtype_ssh,
|
||||
name='Best credential ever',
|
||||
@@ -1422,19 +1445,39 @@ def test_credential_type_immutable_in_v2(patch, organization, admin, credentialt
|
||||
)
|
||||
cred.save()
|
||||
|
||||
related_obj.save()
|
||||
getattr(cred, relation).add(related_obj)
|
||||
|
||||
def _change_credential_type():
|
||||
return patch(
|
||||
reverse('api:credential_detail', kwargs={'version': 'v2', 'pk': cred.pk}),
|
||||
{
|
||||
'credential_type': credentialtype_aws.pk,
|
||||
'inputs': {
|
||||
'username': u'jim',
|
||||
'password': u'pass'
|
||||
}
|
||||
},
|
||||
admin
|
||||
)
|
||||
|
||||
response = _change_credential_type()
|
||||
assert response.status_code == 400
|
||||
expected = ['You cannot change the credential type of the credential, '
|
||||
'as it may break the functionality of the resources using it.']
|
||||
assert response.data['credential_type'] == expected
|
||||
|
||||
response = patch(
|
||||
reverse('api:credential_detail', kwargs={'version': 'v2', 'pk': cred.pk}),
|
||||
{
|
||||
'credential_type': credentialtype_aws.pk,
|
||||
'inputs': {
|
||||
'username': u'jim',
|
||||
'password': u'pass'
|
||||
}
|
||||
},
|
||||
{'name': 'Worst credential ever'},
|
||||
admin
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert 'credential_type' in response.data
|
||||
assert response.status_code == 200
|
||||
assert Credential.objects.get(pk=cred.pk).name == 'Worst credential ever'
|
||||
|
||||
related_obj.delete()
|
||||
response = _change_credential_type()
|
||||
assert response.status_code == 200
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -60,3 +60,34 @@ def test_proxy_ip_whitelist(get, patch, admin):
|
||||
REMOTE_HOST='my.proxy.example.org',
|
||||
HTTP_X_FROM_THE_LOAD_BALANCER='some-actual-ip')
|
||||
assert middleware.environ['HTTP_X_FROM_THE_LOAD_BALANCER'] == 'some-actual-ip'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestDeleteViews:
|
||||
def test_sublist_delete_permission_check(self, inventory_source, host, rando, delete):
|
||||
inventory_source.hosts.add(host)
|
||||
inventory_source.inventory.read_role.members.add(rando)
|
||||
delete(
|
||||
reverse(
|
||||
'api:inventory_source_hosts_list',
|
||||
kwargs={'version': 'v2', 'pk': inventory_source.pk}
|
||||
), user=rando, expect=403
|
||||
)
|
||||
|
||||
def test_sublist_delete_functionality(self, inventory_source, host, rando, delete):
|
||||
inventory_source.hosts.add(host)
|
||||
inventory_source.inventory.admin_role.members.add(rando)
|
||||
delete(
|
||||
reverse(
|
||||
'api:inventory_source_hosts_list',
|
||||
kwargs={'version': 'v2', 'pk': inventory_source.pk}
|
||||
), user=rando, expect=204
|
||||
)
|
||||
assert inventory_source.hosts.count() == 0
|
||||
|
||||
def test_destroy_permission_check(self, job_factory, system_auditor, delete):
|
||||
job = job_factory()
|
||||
resp = delete(
|
||||
job.get_absolute_url(), user=system_auditor
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import pytest
|
||||
import mock
|
||||
|
||||
@@ -236,6 +237,51 @@ def test_create_inventory_smart_inventory_sources(post, get, inventory, admin_us
|
||||
assert jdata['count'] == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_urlencode_host_filter(post, admin_user, organization):
|
||||
"""
|
||||
Host filters saved on the model must correspond to the same result
|
||||
as when that host_filter is used in the URL as a querystring.
|
||||
That means that it must be url-encoded patterns like %22 for quotes
|
||||
must be escaped as the string is saved to the model.
|
||||
|
||||
Expected host filter in this test would match a host such as:
|
||||
inventory.hosts.create(
|
||||
ansible_facts={"ansible_distribution_version": "7.4"}
|
||||
)
|
||||
"""
|
||||
# Create smart inventory with host filter that corresponds to querystring
|
||||
post(
|
||||
reverse('api:inventory_list'),
|
||||
data={
|
||||
'name': 'smart inventory', 'kind': 'smart',
|
||||
'organization': organization.pk,
|
||||
'host_filter': 'ansible_facts__ansible_distribution_version=%227.4%22'
|
||||
},
|
||||
user=admin_user,
|
||||
expect=201
|
||||
)
|
||||
# Assert that the saved version of host filter has escaped ""
|
||||
si = Inventory.objects.get(name='smart inventory')
|
||||
assert si.host_filter == 'ansible_facts__ansible_distribution_version="7.4"'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_filter_unicode(post, admin_user, organization):
|
||||
post(
|
||||
reverse('api:inventory_list'),
|
||||
data={
|
||||
'name': 'smart inventory', 'kind': 'smart',
|
||||
'organization': organization.pk,
|
||||
'host_filter': u'ansible_facts__ansible_distribution=レッドハット'
|
||||
},
|
||||
user=admin_user,
|
||||
expect=201
|
||||
)
|
||||
si = Inventory.objects.get(name='smart inventory')
|
||||
assert si.host_filter == u'ansible_facts__ansible_distribution=レッドハット'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("role_field,expected_status_code", [
|
||||
(None, 403),
|
||||
('admin_role', 201),
|
||||
|
||||
@@ -403,6 +403,22 @@ def test_job_launch_fails_with_missing_multivault_password(machine_credential, v
|
||||
|
||||
url = reverse('api:job_template_launch', kwargs={'pk': deploy_jobtemplate.pk})
|
||||
resp = get(url, rando, expect=200)
|
||||
|
||||
assert {
|
||||
'credential_type': vault_cred_first.credential_type_id,
|
||||
'passwords_needed': ['vault_password.abc'],
|
||||
'vault_id': u'abc',
|
||||
'name': u'Vault #1',
|
||||
'id': vault_cred_first.id
|
||||
} in resp.data['defaults']['credentials']
|
||||
assert {
|
||||
'credential_type': vault_cred_second.credential_type_id,
|
||||
'passwords_needed': ['vault_password.xyz'],
|
||||
'vault_id': u'xyz',
|
||||
'name': u'Vault #2',
|
||||
'id': vault_cred_second.id
|
||||
} in resp.data['defaults']['credentials']
|
||||
|
||||
assert resp.data['passwords_needed_to_start'] == ['vault_password.abc', 'vault_password.xyz']
|
||||
assert sum([
|
||||
cred['passwords_needed'] for cred in resp.data['defaults']['credentials']
|
||||
@@ -544,10 +560,11 @@ def test_callback_accept_prompted_extra_var(mocker, survey_spec_factory, job_tem
|
||||
dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"),
|
||||
admin_user, expect=201, format='json')
|
||||
assert JobTemplate.create_unified_job.called
|
||||
assert JobTemplate.create_unified_job.call_args == ({'extra_vars': {'survey_var': 4,
|
||||
'job_launch_var': 3},
|
||||
'launch_type': 'callback',
|
||||
'limit': 'single-host'},)
|
||||
assert JobTemplate.create_unified_job.call_args == ({
|
||||
'extra_vars': {'survey_var': 4, 'job_launch_var': 3},
|
||||
'_eager_fields': {'launch_type': 'callback'},
|
||||
'limit': 'single-host'},
|
||||
)
|
||||
|
||||
mock_job.signal_start.assert_called_once()
|
||||
|
||||
@@ -569,8 +586,10 @@ def test_callback_ignore_unprompted_extra_var(mocker, survey_spec_factory, job_t
|
||||
dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"),
|
||||
admin_user, expect=201, format='json')
|
||||
assert JobTemplate.create_unified_job.called
|
||||
assert JobTemplate.create_unified_job.call_args == ({'launch_type': 'callback',
|
||||
'limit': 'single-host'},)
|
||||
assert JobTemplate.create_unified_job.call_args == ({
|
||||
'_eager_fields': {'launch_type': 'callback'},
|
||||
'limit': 'single-host'},
|
||||
)
|
||||
|
||||
mock_job.signal_start.assert_called_once()
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
import os
|
||||
|
||||
from backports.tempfile import TemporaryDirectory
|
||||
import pytest
|
||||
|
||||
# AWX
|
||||
@@ -7,6 +10,7 @@ from awx.main.models.jobs import Job, JobTemplate
|
||||
from awx.main.migrations import _save_password_keys as save_password_keys
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.apps import apps
|
||||
|
||||
|
||||
@@ -570,3 +574,31 @@ def test_save_survey_passwords_on_migration(job_template_with_survey_passwords):
|
||||
save_password_keys.migrate_survey_passwords(apps, None)
|
||||
job = job_template_with_survey_passwords.jobs.all()[0]
|
||||
assert job.survey_passwords == {'SSN': '$encrypted$', 'secret_key': '$encrypted$'}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_template_custom_virtualenv(get, patch, organization_factory, job_template_factory):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
with TemporaryDirectory(dir=settings.BASE_VENV_PATH) as temp_dir:
|
||||
admin = objs.superusers.admin
|
||||
os.makedirs(os.path.join(temp_dir, 'bin', 'activate'))
|
||||
url = reverse('api:job_template_detail', kwargs={'pk': jt.id})
|
||||
patch(url, {'custom_virtualenv': temp_dir}, user=admin, expect=200)
|
||||
assert get(url, user=admin).data['custom_virtualenv'] == os.path.join(temp_dir, '')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_template_invalid_custom_virtualenv(get, patch, organization_factory,
|
||||
job_template_factory):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_detail', kwargs={'pk': jt.id})
|
||||
resp = patch(url, {'custom_virtualenv': '/foo/bar'}, user=objs.superusers.admin, expect=400)
|
||||
assert resp.data['custom_virtualenv'] == [
|
||||
'/foo/bar is not a valid virtualenv in {}'.format(settings.BASE_VENV_PATH)
|
||||
]
|
||||
|
||||
@@ -2,15 +2,16 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import os
|
||||
|
||||
from backports.tempfile import TemporaryDirectory
|
||||
from django.conf import settings
|
||||
import pytest
|
||||
import mock
|
||||
|
||||
|
||||
# Django
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -188,3 +189,21 @@ def test_delete_organization_xfail1(delete, organization, alice):
|
||||
@mock.patch('awx.main.access.BaseAccess.check_license', lambda *a, **kw: True)
|
||||
def test_delete_organization_xfail2(delete, organization):
|
||||
delete(reverse('api:organization_detail', kwargs={'pk': organization.id}), user=None, expect=401)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_custom_virtualenv(get, patch, organization, admin):
|
||||
with TemporaryDirectory(dir=settings.BASE_VENV_PATH) as temp_dir:
|
||||
os.makedirs(os.path.join(temp_dir, 'bin', 'activate'))
|
||||
url = reverse('api:organization_detail', kwargs={'pk': organization.id})
|
||||
patch(url, {'custom_virtualenv': temp_dir}, user=admin, expect=200)
|
||||
assert get(url, user=admin).data['custom_virtualenv'] == os.path.join(temp_dir, '')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_invalid_custom_virtualenv(get, patch, organization, admin):
|
||||
url = reverse('api:organization_detail', kwargs={'pk': organization.id})
|
||||
resp = patch(url, {'custom_virtualenv': '/foo/bar'}, user=admin, expect=400)
|
||||
assert resp.data['custom_virtualenv'] == [
|
||||
'/foo/bar is not a valid virtualenv in {}'.format(settings.BASE_VENV_PATH)
|
||||
]
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
import os
|
||||
|
||||
from backports.tempfile import TemporaryDirectory
|
||||
from django.conf import settings
|
||||
import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestInsightsCredential:
|
||||
@@ -13,3 +19,20 @@ class TestInsightsCredential:
|
||||
{'credential': scm_credential.id}, admin_user,
|
||||
expect=400)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_project_custom_virtualenv(get, patch, project, admin):
|
||||
with TemporaryDirectory(dir=settings.BASE_VENV_PATH) as temp_dir:
|
||||
os.makedirs(os.path.join(temp_dir, 'bin', 'activate'))
|
||||
url = reverse('api:project_detail', kwargs={'pk': project.id})
|
||||
patch(url, {'custom_virtualenv': temp_dir}, user=admin, expect=200)
|
||||
assert get(url, user=admin).data['custom_virtualenv'] == os.path.join(temp_dir, '')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_project_invalid_custom_virtualenv(get, patch, project, admin):
|
||||
url = reverse('api:project_detail', kwargs={'pk': project.id})
|
||||
resp = patch(url, {'custom_virtualenv': '/foo/bar'}, user=admin, expect=400)
|
||||
assert resp.data['custom_virtualenv'] == [
|
||||
'/foo/bar is not a valid virtualenv in {}'.format(settings.BASE_VENV_PATH)
|
||||
]
|
||||
|
||||
@@ -8,6 +8,17 @@ from awx.main.models import JobTemplate
|
||||
RRULE_EXAMPLE = 'DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1'
|
||||
|
||||
|
||||
def get_rrule(tz=None):
|
||||
parts = ['DTSTART']
|
||||
if tz:
|
||||
parts.append(';TZID={}'.format(tz))
|
||||
parts.append(':20300308T050000')
|
||||
if tz is None:
|
||||
parts.append('Z')
|
||||
parts.append(' RRULE:FREQ=DAILY;INTERVAL=1;COUNT=5')
|
||||
return ''.join(parts)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_non_job_extra_vars_prohibited(post, project, admin_user):
|
||||
url = reverse('api:project_schedules_list', kwargs={'pk': project.id})
|
||||
@@ -32,3 +43,234 @@ def test_valid_survey_answer(post, admin_user, project, inventory, survey_spec_f
|
||||
url = reverse('api:job_template_schedules_list', kwargs={'pk': job_template.id})
|
||||
post(url, {'name': 'test sch', 'rrule': RRULE_EXAMPLE, 'extra_data': '{"var1": 54}'},
|
||||
admin_user, expect=201)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('rrule, error', [
|
||||
("", "This field may not be blank"),
|
||||
("DTSTART:NONSENSE", "Valid DTSTART required in rrule"),
|
||||
("DTSTART:20300308T050000Z DTSTART:20310308T050000", "Multiple DTSTART is not supported"),
|
||||
("DTSTART:20300308T050000Z", "RRULE required in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:NONSENSE", "INTERVAL required in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=SECONDLY;INTERVAL=5;COUNT=6", "SECONDLY is not supported"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=MONTHLY;INTERVAL=1;BYMONTHDAY=3,4", "Multiple BYMONTHDAYs not supported"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYMONTH=1,2", "Multiple BYMONTHs not supported"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO", "BYDAY with numeric prefix not supported"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYYEARDAY=100", "BYYEARDAY not supported"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYWEEKNO=20", "BYWEEKNO not supported"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000", "COUNT > 999 is unsupported"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=REGULARLY;INTERVAL=1", "rrule parsing failed validation: invalid 'FREQ': REGULARLY"), # noqa
|
||||
("DTSTART;TZID=America/New_York:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1", "rrule parsing failed validation"),
|
||||
("DTSTART:20300308T050000 RRULE:FREQ=DAILY;INTERVAL=1", "DTSTART cannot be a naive datetime"),
|
||||
("DTSTART:19700101T000000Z RRULE:FREQ=MINUTELY;INTERVAL=1", "more than 1000 events are not allowed"), # noqa
|
||||
])
|
||||
def test_invalid_rrules(post, admin_user, project, inventory, rrule, error):
|
||||
job_template = JobTemplate.objects.create(
|
||||
name='test-jt',
|
||||
project=project,
|
||||
playbook='helloworld.yml',
|
||||
inventory=inventory
|
||||
)
|
||||
url = reverse('api:job_template_schedules_list', kwargs={'pk': job_template.id})
|
||||
resp = post(url, {
|
||||
'name': 'Some Schedule',
|
||||
'rrule': rrule,
|
||||
}, admin_user, expect=400)
|
||||
assert error in resp.content
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_utc_preview(post, admin_user):
|
||||
url = reverse('api:schedule_rrule')
|
||||
r = post(url, {'rrule': get_rrule()}, admin_user, expect=200)
|
||||
assert r.data['utc'] == r.data['local']
|
||||
assert map(str, r.data['utc']) == [
|
||||
'2030-03-08 05:00:00+00:00',
|
||||
'2030-03-09 05:00:00+00:00',
|
||||
'2030-03-10 05:00:00+00:00',
|
||||
'2030-03-11 05:00:00+00:00',
|
||||
'2030-03-12 05:00:00+00:00',
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_nyc_with_dst(post, admin_user):
|
||||
url = reverse('api:schedule_rrule')
|
||||
r = post(url, {'rrule': get_rrule('America/New_York')}, admin_user, expect=200)
|
||||
|
||||
# March 10, 2030 is when DST takes effect in NYC
|
||||
assert map(str, r.data['local']) == [
|
||||
'2030-03-08 05:00:00-05:00',
|
||||
'2030-03-09 05:00:00-05:00',
|
||||
'2030-03-10 05:00:00-04:00',
|
||||
'2030-03-11 05:00:00-04:00',
|
||||
'2030-03-12 05:00:00-04:00',
|
||||
]
|
||||
assert map(str, r.data['utc']) == [
|
||||
'2030-03-08 10:00:00+00:00',
|
||||
'2030-03-09 10:00:00+00:00',
|
||||
'2030-03-10 09:00:00+00:00',
|
||||
'2030-03-11 09:00:00+00:00',
|
||||
'2030-03-12 09:00:00+00:00',
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_phoenix_without_dst(post, admin_user):
|
||||
# The state of Arizona (aside from a few Native American territories) does
|
||||
# not observe DST
|
||||
url = reverse('api:schedule_rrule')
|
||||
r = post(url, {'rrule': get_rrule('America/Phoenix')}, admin_user, expect=200)
|
||||
|
||||
# March 10, 2030 is when DST takes effect in NYC
|
||||
assert map(str, r.data['local']) == [
|
||||
'2030-03-08 05:00:00-07:00',
|
||||
'2030-03-09 05:00:00-07:00',
|
||||
'2030-03-10 05:00:00-07:00',
|
||||
'2030-03-11 05:00:00-07:00',
|
||||
'2030-03-12 05:00:00-07:00',
|
||||
]
|
||||
assert map(str, r.data['utc']) == [
|
||||
'2030-03-08 12:00:00+00:00',
|
||||
'2030-03-09 12:00:00+00:00',
|
||||
'2030-03-10 12:00:00+00:00',
|
||||
'2030-03-11 12:00:00+00:00',
|
||||
'2030-03-12 12:00:00+00:00',
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_interval_by_local_day(post, admin_user):
|
||||
url = reverse('api:schedule_rrule')
|
||||
rrule = 'DTSTART;TZID=America/New_York:20300112T210000 RRULE:FREQ=MONTHLY;INTERVAL=1;BYDAY=SA;BYSETPOS=1;COUNT=4'
|
||||
r = post(url, {'rrule': rrule}, admin_user, expect=200)
|
||||
|
||||
# March 10, 2030 is when DST takes effect in NYC
|
||||
assert map(str, r.data['local']) == [
|
||||
'2030-02-02 21:00:00-05:00',
|
||||
'2030-03-02 21:00:00-05:00',
|
||||
'2030-04-06 21:00:00-04:00',
|
||||
'2030-05-04 21:00:00-04:00',
|
||||
]
|
||||
|
||||
assert map(str, r.data['utc']) == [
|
||||
'2030-02-03 02:00:00+00:00',
|
||||
'2030-03-03 02:00:00+00:00',
|
||||
'2030-04-07 01:00:00+00:00',
|
||||
'2030-05-05 01:00:00+00:00',
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_weekday_timezone_boundary(post, admin_user):
|
||||
url = reverse('api:schedule_rrule')
|
||||
rrule = 'DTSTART;TZID=America/New_York:20300101T210000 RRULE:FREQ=WEEKLY;BYDAY=TU;INTERVAL=1;COUNT=3'
|
||||
r = post(url, {'rrule': rrule}, admin_user, expect=200)
|
||||
|
||||
assert map(str, r.data['local']) == [
|
||||
'2030-01-01 21:00:00-05:00',
|
||||
'2030-01-08 21:00:00-05:00',
|
||||
'2030-01-15 21:00:00-05:00',
|
||||
]
|
||||
|
||||
assert map(str, r.data['utc']) == [
|
||||
'2030-01-02 02:00:00+00:00',
|
||||
'2030-01-09 02:00:00+00:00',
|
||||
'2030-01-16 02:00:00+00:00',
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_first_monthly_weekday_timezone_boundary(post, admin_user):
|
||||
url = reverse('api:schedule_rrule')
|
||||
rrule = 'DTSTART;TZID=America/New_York:20300101T210000 RRULE:FREQ=MONTHLY;BYDAY=SU;BYSETPOS=1;INTERVAL=1;COUNT=3'
|
||||
r = post(url, {'rrule': rrule}, admin_user, expect=200)
|
||||
|
||||
assert map(str, r.data['local']) == [
|
||||
'2030-01-06 21:00:00-05:00',
|
||||
'2030-02-03 21:00:00-05:00',
|
||||
'2030-03-03 21:00:00-05:00',
|
||||
]
|
||||
|
||||
assert map(str, r.data['utc']) == [
|
||||
'2030-01-07 02:00:00+00:00',
|
||||
'2030-02-04 02:00:00+00:00',
|
||||
'2030-03-04 02:00:00+00:00',
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_annual_timezone_boundary(post, admin_user):
|
||||
url = reverse('api:schedule_rrule')
|
||||
rrule = 'DTSTART;TZID=America/New_York:20301231T230000 RRULE:FREQ=YEARLY;INTERVAL=1;COUNT=3'
|
||||
r = post(url, {'rrule': rrule}, admin_user, expect=200)
|
||||
|
||||
assert map(str, r.data['local']) == [
|
||||
'2030-12-31 23:00:00-05:00',
|
||||
'2031-12-31 23:00:00-05:00',
|
||||
'2032-12-31 23:00:00-05:00',
|
||||
]
|
||||
|
||||
assert map(str, r.data['utc']) == [
|
||||
'2031-01-01 04:00:00+00:00',
|
||||
'2032-01-01 04:00:00+00:00',
|
||||
'2033-01-01 04:00:00+00:00',
|
||||
]
|
||||
|
||||
|
||||
def test_dst_phantom_hour(post, admin_user):
|
||||
# The DST period in the United States begins at 02:00 (2 am) local time, so
|
||||
# the hour from 2:00:00 to 2:59:59 does not exist in the night of the
|
||||
# switch.
|
||||
|
||||
# Three Sundays, starting 2:30AM America/New_York, starting Mar 3, 2030,
|
||||
# should _not_ include Mar 10, 2030 @ 2:30AM (because it doesn't exist)
|
||||
url = reverse('api:schedule_rrule')
|
||||
rrule = 'DTSTART;TZID=America/New_York:20300303T023000 RRULE:FREQ=WEEKLY;BYDAY=SU;INTERVAL=1;COUNT=3'
|
||||
r = post(url, {'rrule': rrule}, admin_user, expect=200)
|
||||
|
||||
assert map(str, r.data['local']) == [
|
||||
'2030-03-03 02:30:00-05:00',
|
||||
'2030-03-17 02:30:00-04:00', # Skip 3/10 because 3/10 @ 2:30AM isn't a real date
|
||||
]
|
||||
|
||||
assert map(str, r.data['utc']) == [
|
||||
'2030-03-03 07:30:00+00:00',
|
||||
'2030-03-17 06:30:00+00:00', # Skip 3/10 because 3/10 @ 2:30AM isn't a real date
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_months_with_31_days(post, admin_user):
|
||||
url = reverse('api:schedule_rrule')
|
||||
rrule = 'DTSTART;TZID=America/New_York:20300101T000000 RRULE:FREQ=MONTHLY;INTERVAL=1;BYMONTHDAY=31;COUNT=7'
|
||||
r = post(url, {'rrule': rrule}, admin_user, expect=200)
|
||||
|
||||
# 30 days have September, April, June, and November...
|
||||
assert map(str, r.data['local']) == [
|
||||
'2030-01-31 00:00:00-05:00',
|
||||
'2030-03-31 00:00:00-04:00',
|
||||
'2030-05-31 00:00:00-04:00',
|
||||
'2030-07-31 00:00:00-04:00',
|
||||
'2030-08-31 00:00:00-04:00',
|
||||
'2030-10-31 00:00:00-04:00',
|
||||
'2030-12-31 00:00:00-05:00',
|
||||
]
|
||||
|
||||
|
||||
def test_dst_rollback_duplicates(post, admin_user):
|
||||
# From Nov 2 -> Nov 3, 2030, daylight savings ends and we "roll back" an hour.
|
||||
# Make sure we don't "double count" duplicate times in the "rolled back"
|
||||
# hour.
|
||||
|
||||
url = reverse('api:schedule_rrule')
|
||||
rrule = 'DTSTART;TZID=America/New_York:20301102T233000 RRULE:FREQ=HOURLY;INTERVAL=1;COUNT=5'
|
||||
r = post(url, {'rrule': rrule}, admin_user, expect=200)
|
||||
|
||||
assert map(str, r.data['local']) == [
|
||||
'2030-11-02 23:30:00-04:00',
|
||||
'2030-11-03 00:30:00-04:00',
|
||||
'2030-11-03 01:30:00-04:00',
|
||||
'2030-11-03 02:30:00-05:00',
|
||||
'2030-11-03 03:30:00-05:00',
|
||||
]
|
||||
|
||||
@@ -304,3 +304,19 @@ def test_isolated_keys_readonly(get, patch, delete, admin, key, expected):
|
||||
|
||||
delete(url, user=admin)
|
||||
assert getattr(settings, key) == 'secret'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_isolated_key_flag_readonly(get, patch, delete, admin):
|
||||
settings.AWX_ISOLATED_KEY_GENERATION = True
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'jobs'})
|
||||
resp = get(url, user=admin)
|
||||
assert resp.data['AWX_ISOLATED_KEY_GENERATION'] is True
|
||||
|
||||
patch(url, user=admin, data={
|
||||
'AWX_ISOLATED_KEY_GENERATION': False
|
||||
})
|
||||
assert settings.AWX_ISOLATED_KEY_GENERATION is True
|
||||
|
||||
delete(url, user=admin)
|
||||
assert settings.AWX_ISOLATED_KEY_GENERATION is True
|
||||
|
||||
@@ -111,6 +111,241 @@ def test_survey_spec_sucessful_creation(survey_spec_factory, job_template, post,
|
||||
assert updated_jt.survey_spec == survey_input_data
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('with_default', [True, False])
|
||||
@pytest.mark.parametrize('value, status', [
|
||||
('SUPERSECRET', 201),
|
||||
(['some', 'invalid', 'list'], 400),
|
||||
({'some-invalid': 'dict'}, 400),
|
||||
(False, 400)
|
||||
])
|
||||
def test_survey_spec_passwords_are_encrypted_on_launch(job_template_factory, post, admin_user, with_default, value, status):
|
||||
objects = job_template_factory('jt', organization='org1', project='prj',
|
||||
inventory='inv', credential='cred')
|
||||
job_template = objects.job_template
|
||||
job_template.survey_enabled = True
|
||||
job_template.save()
|
||||
input_data = {
|
||||
'description': 'A survey',
|
||||
'spec': [{
|
||||
'index': 0,
|
||||
'question_name': 'What is your password?',
|
||||
'required': True,
|
||||
'variable': 'secret_value',
|
||||
'type': 'password'
|
||||
}],
|
||||
'name': 'my survey'
|
||||
}
|
||||
if with_default:
|
||||
input_data['spec'][0]['default'] = 'some-default'
|
||||
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
|
||||
data=input_data, user=admin_user, expect=200)
|
||||
resp = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
|
||||
dict(extra_vars=dict(secret_value=value)), admin_user, expect=status)
|
||||
|
||||
if status == 201:
|
||||
job = Job.objects.get(pk=resp.data['id'])
|
||||
assert json.loads(job.extra_vars)['secret_value'].startswith('$encrypted$')
|
||||
assert json.loads(job.decrypted_extra_vars()) == {
|
||||
'secret_value': value
|
||||
}
|
||||
else:
|
||||
assert "for 'secret_value' expected to be a string." in json.dumps(resp.data)
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
|
||||
@pytest.mark.django_db
|
||||
def test_survey_spec_passwords_with_empty_default(job_template_factory, post, admin_user):
|
||||
objects = job_template_factory('jt', organization='org1', project='prj',
|
||||
inventory='inv', credential='cred')
|
||||
job_template = objects.job_template
|
||||
job_template.survey_enabled = True
|
||||
job_template.save()
|
||||
input_data = {
|
||||
'description': 'A survey',
|
||||
'spec': [{
|
||||
'index': 0,
|
||||
'question_name': 'What is your password?',
|
||||
'required': False,
|
||||
'variable': 'secret_value',
|
||||
'type': 'password',
|
||||
'default': ''
|
||||
}],
|
||||
'name': 'my survey'
|
||||
}
|
||||
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
|
||||
data=input_data, user=admin_user, expect=200)
|
||||
|
||||
resp = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
|
||||
{}, admin_user, expect=201)
|
||||
job = Job.objects.get(pk=resp.data['id'])
|
||||
assert json.loads(job.extra_vars)['secret_value'] == ''
|
||||
assert json.loads(job.decrypted_extra_vars()) == {
|
||||
'secret_value': ''
|
||||
}
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('default, launch_value, expected_extra_vars, status', [
|
||||
['', '$encrypted$', {'secret_value': ''}, 201],
|
||||
['', 'y', {'secret_value': 'y'}, 201],
|
||||
['', 'y' * 100, None, 400],
|
||||
[None, '$encrypted$', {}, 201],
|
||||
[None, 'y', {'secret_value': 'y'}, 201],
|
||||
[None, 'y' * 100, {}, 400],
|
||||
['x', '$encrypted$', {'secret_value': 'x'}, 201],
|
||||
['x', 'y', {'secret_value': 'y'}, 201],
|
||||
['x', 'y' * 100, {}, 400],
|
||||
['x' * 100, '$encrypted$', {}, 201],
|
||||
['x' * 100, 'y', {'secret_value': 'y'}, 201],
|
||||
['x' * 100, 'y' * 100, {}, 400],
|
||||
])
|
||||
def test_survey_spec_passwords_with_default_optional(job_template_factory, post, admin_user,
|
||||
default, launch_value,
|
||||
expected_extra_vars, status):
|
||||
objects = job_template_factory('jt', organization='org1', project='prj',
|
||||
inventory='inv', credential='cred')
|
||||
job_template = objects.job_template
|
||||
job_template.survey_enabled = True
|
||||
job_template.save()
|
||||
input_data = {
|
||||
'description': 'A survey',
|
||||
'spec': [{
|
||||
'index': 0,
|
||||
'question_name': 'What is your password?',
|
||||
'required': False,
|
||||
'variable': 'secret_value',
|
||||
'type': 'password',
|
||||
'max': 3
|
||||
}],
|
||||
'name': 'my survey'
|
||||
}
|
||||
if default is not None:
|
||||
input_data['spec'][0]['default'] = default
|
||||
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
|
||||
data=input_data, user=admin_user, expect=200)
|
||||
|
||||
resp = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
|
||||
data={'extra_vars': {'secret_value': launch_value}}, user=admin_user, expect=status)
|
||||
|
||||
if status == 201:
|
||||
job = Job.objects.get(pk=resp.data['job'])
|
||||
assert json.loads(job.decrypted_extra_vars()) == expected_extra_vars
|
||||
if default:
|
||||
assert default not in json.loads(job.extra_vars).values()
|
||||
assert launch_value not in json.loads(job.extra_vars).values()
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('default, launch_value, expected_extra_vars, status', [
|
||||
['', '$encrypted$', {'secret_value': ''}, 201],
|
||||
[None, '$encrypted$', {}, 400],
|
||||
[None, 'y', {'secret_value': 'y'}, 201],
|
||||
])
|
||||
def test_survey_spec_passwords_with_default_required(job_template_factory, post, admin_user,
|
||||
default, launch_value,
|
||||
expected_extra_vars, status):
|
||||
objects = job_template_factory('jt', organization='org1', project='prj',
|
||||
inventory='inv', credential='cred')
|
||||
job_template = objects.job_template
|
||||
job_template.survey_enabled = True
|
||||
job_template.save()
|
||||
input_data = {
|
||||
'description': 'A survey',
|
||||
'spec': [{
|
||||
'index': 0,
|
||||
'question_name': 'What is your password?',
|
||||
'required': True,
|
||||
'variable': 'secret_value',
|
||||
'type': 'password',
|
||||
'max': 3
|
||||
}],
|
||||
'name': 'my survey'
|
||||
}
|
||||
if default is not None:
|
||||
input_data['spec'][0]['default'] = default
|
||||
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
|
||||
data=input_data, user=admin_user, expect=200)
|
||||
|
||||
resp = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
|
||||
data={'extra_vars': {'secret_value': launch_value}}, user=admin_user, expect=status)
|
||||
|
||||
if status == 201:
|
||||
job = Job.objects.get(pk=resp.data['job'])
|
||||
assert json.loads(job.decrypted_extra_vars()) == expected_extra_vars
|
||||
if default:
|
||||
assert default not in json.loads(job.extra_vars).values()
|
||||
assert launch_value not in json.loads(job.extra_vars).values()
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('default, status', [
|
||||
('SUPERSECRET', 200),
|
||||
(['some', 'invalid', 'list'], 400),
|
||||
({'some-invalid': 'dict'}, 400),
|
||||
(False, 400)
|
||||
])
|
||||
def test_survey_spec_default_passwords_are_encrypted(job_template, post, admin_user, default, status):
|
||||
job_template.survey_enabled = True
|
||||
job_template.save()
|
||||
input_data = {
|
||||
'description': 'A survey',
|
||||
'spec': [{
|
||||
'index': 0,
|
||||
'question_name': 'What is your password?',
|
||||
'required': True,
|
||||
'variable': 'secret_value',
|
||||
'default': default,
|
||||
'type': 'password'
|
||||
}],
|
||||
'name': 'my survey'
|
||||
}
|
||||
resp = post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
|
||||
data=input_data, user=admin_user, expect=status)
|
||||
|
||||
if status == 200:
|
||||
updated_jt = JobTemplate.objects.get(pk=job_template.pk)
|
||||
assert updated_jt.survey_spec['spec'][0]['default'].startswith('$encrypted$')
|
||||
|
||||
job = updated_jt.create_unified_job()
|
||||
assert json.loads(job.extra_vars)['secret_value'].startswith('$encrypted$')
|
||||
assert json.loads(job.decrypted_extra_vars()) == {
|
||||
'secret_value': default
|
||||
}
|
||||
else:
|
||||
assert "for 'secret_value' expected to be a string." in str(resp.data)
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
|
||||
@pytest.mark.django_db
|
||||
def test_survey_spec_default_passwords_encrypted_on_update(job_template, post, put, admin_user):
|
||||
input_data = {
|
||||
'description': 'A survey',
|
||||
'spec': [{
|
||||
'index': 0,
|
||||
'question_name': 'What is your password?',
|
||||
'required': True,
|
||||
'variable': 'secret_value',
|
||||
'default': 'SUPERSECRET',
|
||||
'type': 'password'
|
||||
}],
|
||||
'name': 'my survey'
|
||||
}
|
||||
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
|
||||
data=input_data, user=admin_user, expect=200)
|
||||
updated_jt = JobTemplate.objects.get(pk=job_template.pk)
|
||||
|
||||
# simulate a survey field edit where we're not changing the default value
|
||||
input_data['spec'][0]['default'] = '$encrypted$'
|
||||
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
|
||||
data=input_data, user=admin_user, expect=200)
|
||||
assert updated_jt.survey_spec == JobTemplate.objects.get(pk=job_template.pk).survey_spec
|
||||
|
||||
|
||||
# Tests related to survey content validation
|
||||
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
|
||||
@pytest.mark.django_db
|
||||
|
||||
271
awx/main/tests/functional/api/test_unified_jobs_stdout.py
Normal file
271
awx/main/tests/functional/api/test_unified_jobs_stdout.py
Normal file
@@ -0,0 +1,271 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import base64
|
||||
import json
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
from django.conf import settings
|
||||
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
|
||||
import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import (Job, JobEvent, AdHocCommand, AdHocCommandEvent,
|
||||
Project, ProjectUpdate, ProjectUpdateEvent,
|
||||
InventoryUpdate, InventorySource,
|
||||
InventoryUpdateEvent, SystemJob, SystemJobEvent)
|
||||
|
||||
|
||||
def _mk_project_update():
|
||||
project = Project()
|
||||
project.save()
|
||||
return ProjectUpdate(project=project)
|
||||
|
||||
|
||||
def _mk_inventory_update():
|
||||
source = InventorySource()
|
||||
source.save()
|
||||
iu = InventoryUpdate(inventory_source=source)
|
||||
return iu
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def sqlite_copy_expert(request):
|
||||
# copy_expert is postgres-specific, and SQLite doesn't support it; mock its
|
||||
# behavior to test that it writes a file that contains stdout from events
|
||||
path = tempfile.mkdtemp(prefix='job-event-stdout')
|
||||
|
||||
def write_stdout(self, sql, fd):
|
||||
# simulate postgres copy_expert support with ORM code
|
||||
parts = sql.split(' ')
|
||||
tablename = parts[parts.index('from') + 1]
|
||||
for cls in (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
|
||||
InventoryUpdateEvent, SystemJobEvent):
|
||||
if cls._meta.db_table == tablename:
|
||||
for event in cls.objects.order_by('start_line').all():
|
||||
fd.write(event.stdout.encode('utf-8'))
|
||||
|
||||
setattr(SQLiteCursorWrapper, 'copy_expert', write_stdout)
|
||||
request.addfinalizer(lambda: shutil.rmtree(path))
|
||||
request.addfinalizer(lambda: delattr(SQLiteCursorWrapper, 'copy_expert'))
|
||||
return path
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('Parent, Child, relation, view', [
|
||||
[Job, JobEvent, 'job', 'api:job_stdout'],
|
||||
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
|
||||
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
|
||||
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
|
||||
])
|
||||
def test_text_stdout(sqlite_copy_expert, Parent, Child, relation, view, get, admin):
|
||||
job = Parent()
|
||||
job.save()
|
||||
for i in range(3):
|
||||
Child(**{relation: job, 'stdout': 'Testing {}\n'.format(i), 'start_line': i}).save()
|
||||
url = reverse(view, kwargs={'pk': job.pk}) + '?format=txt'
|
||||
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert response.content.splitlines() == ['Testing %d' % i for i in range(3)]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('Parent, Child, relation, view', [
|
||||
[Job, JobEvent, 'job', 'api:job_stdout'],
|
||||
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
|
||||
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
|
||||
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
|
||||
])
|
||||
@pytest.mark.parametrize('download', [True, False])
|
||||
def test_ansi_stdout_filtering(sqlite_copy_expert, Parent, Child, relation,
|
||||
view, download, get, admin):
|
||||
job = Parent()
|
||||
job.save()
|
||||
for i in range(3):
|
||||
Child(**{
|
||||
relation: job,
|
||||
'stdout': '\x1B[0;36mTesting {}\x1B[0m\n'.format(i),
|
||||
'start_line': i
|
||||
}).save()
|
||||
url = reverse(view, kwargs={'pk': job.pk})
|
||||
|
||||
# ansi codes in ?format=txt should get filtered
|
||||
fmt = "?format={}".format("txt_download" if download else "txt")
|
||||
response = get(url + fmt, user=admin, expect=200)
|
||||
assert response.content.splitlines() == ['Testing %d' % i for i in range(3)]
|
||||
has_download_header = response.has_header('Content-Disposition')
|
||||
assert has_download_header if download else not has_download_header
|
||||
|
||||
# ask for ansi and you'll get it
|
||||
fmt = "?format={}".format("ansi_download" if download else "ansi")
|
||||
response = get(url + fmt, user=admin, expect=200)
|
||||
assert response.content.splitlines() == ['\x1B[0;36mTesting %d\x1B[0m' % i for i in range(3)]
|
||||
has_download_header = response.has_header('Content-Disposition')
|
||||
assert has_download_header if download else not has_download_header
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('Parent, Child, relation, view', [
|
||||
[Job, JobEvent, 'job', 'api:job_stdout'],
|
||||
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
|
||||
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
|
||||
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
|
||||
])
|
||||
def test_colorized_html_stdout(sqlite_copy_expert, Parent, Child, relation, view, get, admin):
|
||||
job = Parent()
|
||||
job.save()
|
||||
for i in range(3):
|
||||
Child(**{
|
||||
relation: job,
|
||||
'stdout': '\x1B[0;36mTesting {}\x1B[0m\n'.format(i),
|
||||
'start_line': i
|
||||
}).save()
|
||||
url = reverse(view, kwargs={'pk': job.pk}) + '?format=html'
|
||||
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert '.ansi36 { color: #2dbaba; }' in response.content
|
||||
for i in range(3):
|
||||
assert '<span class="ansi36">Testing {}</span>'.format(i) in response.content
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('Parent, Child, relation, view', [
|
||||
[Job, JobEvent, 'job', 'api:job_stdout'],
|
||||
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
|
||||
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
|
||||
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
|
||||
])
|
||||
def test_stdout_line_range(sqlite_copy_expert, Parent, Child, relation, view, get, admin):
|
||||
job = Parent()
|
||||
job.save()
|
||||
for i in range(20):
|
||||
Child(**{relation: job, 'stdout': 'Testing {}\n'.format(i), 'start_line': i}).save()
|
||||
url = reverse(view, kwargs={'pk': job.pk}) + '?format=html&start_line=5&end_line=10'
|
||||
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert re.findall('Testing [0-9]+', response.content) == ['Testing %d' % i for i in range(5, 10)]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_text_stdout_from_system_job_events(sqlite_copy_expert, get, admin):
|
||||
job = SystemJob()
|
||||
job.save()
|
||||
for i in range(3):
|
||||
SystemJobEvent(system_job=job, stdout='Testing {}\n'.format(i), start_line=i).save()
|
||||
url = reverse('api:system_job_detail', kwargs={'pk': job.pk})
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert response.data['result_stdout'].splitlines() == ['Testing %d' % i for i in range(3)]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('Parent, Child, relation, view', [
|
||||
[Job, JobEvent, 'job', 'api:job_stdout'],
|
||||
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
|
||||
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
|
||||
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
|
||||
])
|
||||
@pytest.mark.parametrize('fmt', ['txt', 'ansi'])
|
||||
def test_max_bytes_display(sqlite_copy_expert, Parent, Child, relation, view, fmt, get, admin):
|
||||
job = Parent()
|
||||
job.save()
|
||||
total_bytes = settings.STDOUT_MAX_BYTES_DISPLAY + 1
|
||||
large_stdout = 'X' * total_bytes
|
||||
Child(**{relation: job, 'stdout': large_stdout, 'start_line': 0}).save()
|
||||
url = reverse(view, kwargs={'pk': job.pk})
|
||||
|
||||
response = get(url + '?format={}'.format(fmt), user=admin, expect=200)
|
||||
assert response.content == (
|
||||
'Standard Output too large to display ({actual} bytes), only download '
|
||||
'supported for sizes over {max} bytes'.format(
|
||||
actual=total_bytes,
|
||||
max=settings.STDOUT_MAX_BYTES_DISPLAY
|
||||
)
|
||||
)
|
||||
|
||||
response = get(url + '?format={}_download'.format(fmt), user=admin, expect=200)
|
||||
assert response.content == large_stdout
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('Cls, view', [
|
||||
[_mk_project_update, 'api:project_update_stdout'],
|
||||
[_mk_inventory_update, 'api:inventory_update_stdout']
|
||||
])
|
||||
@pytest.mark.parametrize('fmt', ['txt', 'ansi', 'txt_download', 'ansi_download'])
|
||||
def test_legacy_result_stdout_text_fallback(Cls, view, fmt, get, admin):
|
||||
# older versions of stored raw stdout in a raw text blob at
|
||||
# main_unifiedjob.result_stdout_text; this test ensures that fallback
|
||||
# works properly if no job events exist
|
||||
job = Cls()
|
||||
job.save()
|
||||
job.result_stdout_text = 'LEGACY STDOUT!'
|
||||
job.save()
|
||||
url = reverse(view, kwargs={'pk': job.pk})
|
||||
|
||||
response = get(url + '?format={}'.format(fmt), user=admin, expect=200)
|
||||
assert response.content == 'LEGACY STDOUT!'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('Cls, view', [
|
||||
[_mk_project_update, 'api:project_update_stdout'],
|
||||
[_mk_inventory_update, 'api:inventory_update_stdout']
|
||||
])
|
||||
@pytest.mark.parametrize('fmt', ['txt', 'ansi'])
|
||||
def test_legacy_result_stdout_with_max_bytes(Cls, view, fmt, get, admin):
|
||||
job = Cls()
|
||||
job.save()
|
||||
total_bytes = settings.STDOUT_MAX_BYTES_DISPLAY + 1
|
||||
large_stdout = 'X' * total_bytes
|
||||
job.result_stdout_text = large_stdout
|
||||
job.save()
|
||||
url = reverse(view, kwargs={'pk': job.pk})
|
||||
|
||||
response = get(url + '?format={}'.format(fmt), user=admin, expect=200)
|
||||
assert response.content == (
|
||||
'Standard Output too large to display ({actual} bytes), only download '
|
||||
'supported for sizes over {max} bytes'.format(
|
||||
actual=total_bytes,
|
||||
max=settings.STDOUT_MAX_BYTES_DISPLAY
|
||||
)
|
||||
)
|
||||
|
||||
response = get(url + '?format={}'.format(fmt + '_download'), user=admin, expect=200)
|
||||
assert response.content == large_stdout
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('Parent, Child, relation, view', [
|
||||
[Job, JobEvent, 'job', 'api:job_stdout'],
|
||||
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
|
||||
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
|
||||
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
|
||||
])
|
||||
@pytest.mark.parametrize('fmt', ['txt', 'ansi', 'txt_download', 'ansi_download'])
|
||||
def test_text_with_unicode_stdout(sqlite_copy_expert, Parent, Child, relation,
|
||||
view, get, admin, fmt):
|
||||
job = Parent()
|
||||
job.save()
|
||||
for i in range(3):
|
||||
Child(**{relation: job, 'stdout': u'オ{}\n'.format(i), 'start_line': i}).save()
|
||||
url = reverse(view, kwargs={'pk': job.pk}) + '?format=' + fmt
|
||||
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert response.content.splitlines() == ['オ%d' % i for i in range(3)]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_unicode_with_base64_ansi(sqlite_copy_expert, get, admin):
|
||||
job = Job()
|
||||
job.save()
|
||||
for i in range(3):
|
||||
JobEvent(job=job, stdout=u'オ{}\n'.format(i), start_line=i).save()
|
||||
url = reverse(
|
||||
'api:job_stdout',
|
||||
kwargs={'pk': job.pk}
|
||||
) + '?format=json&content_encoding=base64&content_format=ansi'
|
||||
|
||||
response = get(url, user=admin, expect=200)
|
||||
content = base64.b64decode(json.loads(response.content)['content'])
|
||||
assert content.splitlines() == ['オ%d' % i for i in range(3)]
|
||||
@@ -73,7 +73,8 @@ def user():
|
||||
try:
|
||||
user = User.objects.get(username=name)
|
||||
except User.DoesNotExist:
|
||||
user = User(username=name, is_superuser=is_superuser, password=name)
|
||||
user = User(username=name, is_superuser=is_superuser)
|
||||
user.set_password(name)
|
||||
user.save()
|
||||
return user
|
||||
return u
|
||||
@@ -544,7 +545,8 @@ def _request(verb):
|
||||
response.data = data_copy
|
||||
print(response.data)
|
||||
assert response.status_code == expect
|
||||
response.render()
|
||||
if hasattr(response, 'render'):
|
||||
response.render()
|
||||
return response
|
||||
return rf
|
||||
|
||||
|
||||
69
awx/main/tests/functional/models/test_events.py
Normal file
69
awx/main/tests/functional/models/test_events.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import mock
|
||||
import pytest
|
||||
|
||||
from awx.main.models import (Job, JobEvent, ProjectUpdate, ProjectUpdateEvent,
|
||||
AdHocCommand, AdHocCommandEvent, InventoryUpdate,
|
||||
InventorySource, InventoryUpdateEvent, SystemJob,
|
||||
SystemJobEvent)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_job_event_websocket_notifications(emit):
|
||||
j = Job(id=123)
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk)
|
||||
assert len(emit.call_args_list) == 1
|
||||
topic, payload = emit.call_args_list[0][0]
|
||||
assert topic == 'job_events-123'
|
||||
assert payload['job'] == 123
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_ad_hoc_event_websocket_notifications(emit):
|
||||
ahc = AdHocCommand(id=123)
|
||||
ahc.save()
|
||||
AdHocCommandEvent.create_from_data(ad_hoc_command_id=ahc.pk)
|
||||
assert len(emit.call_args_list) == 1
|
||||
topic, payload = emit.call_args_list[0][0]
|
||||
assert topic == 'ad_hoc_command_events-123'
|
||||
assert payload['ad_hoc_command'] == 123
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_project_update_event_websocket_notifications(emit, project):
|
||||
pu = ProjectUpdate(id=123, project=project)
|
||||
pu.save()
|
||||
ProjectUpdateEvent.create_from_data(project_update_id=pu.pk)
|
||||
assert len(emit.call_args_list) == 1
|
||||
topic, payload = emit.call_args_list[0][0]
|
||||
assert topic == 'project_update_events-123'
|
||||
assert payload['project_update'] == 123
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_inventory_update_event_websocket_notifications(emit, inventory):
|
||||
source = InventorySource()
|
||||
source.save()
|
||||
iu = InventoryUpdate(id=123, inventory_source=source)
|
||||
iu.save()
|
||||
InventoryUpdateEvent.create_from_data(inventory_update_id=iu.pk)
|
||||
assert len(emit.call_args_list) == 1
|
||||
topic, payload = emit.call_args_list[0][0]
|
||||
assert topic == 'inventory_update_events-123'
|
||||
assert payload['inventory_update'] == 123
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_system_job_event_websocket_notifications(emit, inventory):
|
||||
j = SystemJob(id=123)
|
||||
j.save()
|
||||
SystemJobEvent.create_from_data(system_job_id=j.pk)
|
||||
assert len(emit.call_args_list) == 1
|
||||
topic, payload = emit.call_args_list[0][0]
|
||||
assert topic == 'system_job_events-123'
|
||||
assert payload['system_job'] == 123
|
||||
@@ -1,5 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import pytest
|
||||
import mock
|
||||
import six
|
||||
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
@@ -13,6 +16,52 @@ from awx.main.models import (
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestInventoryScript:
|
||||
|
||||
def test_hostvars(self, inventory):
|
||||
inventory.hosts.create(name='ahost', variables={"foo": "bar"})
|
||||
assert inventory.get_script_data(
|
||||
hostvars=True
|
||||
)['_meta']['hostvars']['ahost'] == {
|
||||
'foo': 'bar'
|
||||
}
|
||||
|
||||
def test_towervars(self, inventory):
|
||||
host = inventory.hosts.create(name='ahost')
|
||||
assert inventory.get_script_data(
|
||||
hostvars=True,
|
||||
towervars=True
|
||||
)['_meta']['hostvars']['ahost'] == {
|
||||
'remote_tower_enabled': 'true',
|
||||
'remote_tower_id': host.id
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestActiveCount:
|
||||
|
||||
def test_host_active_count(self, organization):
|
||||
inv1 = Inventory.objects.create(name='inv1', organization=organization)
|
||||
inv2 = Inventory.objects.create(name='inv2', organization=organization)
|
||||
assert Host.objects.active_count() == 0
|
||||
inv1.hosts.create(name='host1')
|
||||
inv2.hosts.create(name='host1')
|
||||
assert Host.objects.active_count() == 1
|
||||
inv1.hosts.create(name='host2')
|
||||
assert Host.objects.active_count() == 2
|
||||
|
||||
def test_active_count_minus_tower(self, inventory):
|
||||
inventory.hosts.create(name='locally-managed-host')
|
||||
source = inventory.inventory_sources.create(
|
||||
name='tower-source', source='tower'
|
||||
)
|
||||
source.hosts.create(
|
||||
name='remotely-managed-host', inventory=inventory
|
||||
)
|
||||
assert Host.objects.active_count() == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestSCMUpdateFeatures:
|
||||
|
||||
@@ -103,6 +152,30 @@ def setup_inventory_groups(inventory, group_factory):
|
||||
groupB.save()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_update_name(inventory, inventory_source):
|
||||
iu = inventory_source.update()
|
||||
assert inventory_source.name != inventory.name
|
||||
assert iu.name == inventory.name + ' - ' + inventory_source.name
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_name_with_unicode(inventory, inventory_source):
|
||||
inventory.name = six.u('オオオ')
|
||||
inventory.save()
|
||||
iu = inventory_source.update()
|
||||
assert iu.name.startswith(inventory.name)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_update_excessively_long_name(inventory, inventory_source):
|
||||
inventory.name = 'a' * 400 # field max length 512
|
||||
inventory_source.name = 'b' * 400
|
||||
iu = inventory_source.update()
|
||||
assert inventory_source.name != inventory.name
|
||||
assert iu.name.startswith(inventory.name)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestHostManager:
|
||||
def test_host_filter_not_smart(self, setup_ec2_gce, organization):
|
||||
|
||||
51
awx/main/tests/functional/models/test_job.py
Normal file
51
awx/main/tests/functional/models/test_job.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.models import JobTemplate, Job
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_awx_virtualenv_from_settings(inventory, project, machine_credential):
|
||||
jt = JobTemplate.objects.create(
|
||||
name='my-jt',
|
||||
inventory=inventory,
|
||||
project=project,
|
||||
playbook='helloworld.yml'
|
||||
)
|
||||
jt.credentials.add(machine_credential)
|
||||
job = jt.create_unified_job()
|
||||
assert job.ansible_virtualenv_path == '/venv/ansible'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_awx_custom_virtualenv(inventory, project, machine_credential):
|
||||
jt = JobTemplate.objects.create(
|
||||
name='my-jt',
|
||||
inventory=inventory,
|
||||
project=project,
|
||||
playbook='helloworld.yml'
|
||||
)
|
||||
jt.credentials.add(machine_credential)
|
||||
job = jt.create_unified_job()
|
||||
|
||||
job.project.organization.custom_virtualenv = '/venv/fancy-org'
|
||||
job.project.organization.save()
|
||||
assert job.ansible_virtualenv_path == '/venv/fancy-org'
|
||||
|
||||
job.project.custom_virtualenv = '/venv/fancy-proj'
|
||||
job.project.save()
|
||||
assert job.ansible_virtualenv_path == '/venv/fancy-proj'
|
||||
|
||||
job.job_template.custom_virtualenv = '/venv/fancy-jt'
|
||||
job.job_template.save()
|
||||
assert job.ansible_virtualenv_path == '/venv/fancy-jt'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_awx_custom_virtualenv_without_jt(project):
|
||||
project.custom_virtualenv = '/venv/fancy-proj'
|
||||
project.save()
|
||||
job = Job(project=project)
|
||||
job.save()
|
||||
|
||||
job = Job.objects.get(pk=job.id)
|
||||
assert job.ansible_virtualenv_path == '/venv/fancy-proj'
|
||||
205
awx/main/tests/functional/models/test_schedule.py
Normal file
205
awx/main/tests/functional/models/test_schedule.py
Normal file
@@ -0,0 +1,205 @@
|
||||
from datetime import datetime
|
||||
|
||||
import mock
|
||||
import pytest
|
||||
import pytz
|
||||
|
||||
from awx.main.models import JobTemplate, Schedule
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def job_template(inventory, project):
|
||||
# need related resources set for these tests
|
||||
return JobTemplate.objects.create(
|
||||
name='test-job_template',
|
||||
inventory=inventory,
|
||||
project=project
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_repeats_forever(job_template):
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule='DTSTART:20300112T210000Z RRULE:FREQ=DAILY;INTERVAL=1',
|
||||
unified_job_template=job_template
|
||||
)
|
||||
s.save()
|
||||
assert str(s.next_run) == str(s.dtstart) == '2030-01-12 21:00:00+00:00'
|
||||
assert s.dtend is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_no_recurrence_utc(job_template):
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule='DTSTART:20300112T210000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1',
|
||||
unified_job_template=job_template
|
||||
)
|
||||
s.save()
|
||||
assert str(s.next_run) == str(s.dtstart) == str(s.dtend) == '2030-01-12 21:00:00+00:00'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_no_recurrence_est(job_template):
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule='DTSTART;TZID=America/New_York:20300112T210000 RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1',
|
||||
unified_job_template=job_template
|
||||
)
|
||||
s.save()
|
||||
assert str(s.next_run) == str(s.dtstart) == str(s.dtend) == '2030-01-13 02:00:00+00:00'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_next_run_utc(job_template):
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule='DTSTART:20300112T210000Z RRULE:FREQ=MONTHLY;INTERVAL=1;BYDAY=SA;BYSETPOS=1;COUNT=4',
|
||||
unified_job_template=job_template
|
||||
)
|
||||
s.save()
|
||||
assert str(s.next_run) == '2030-02-02 21:00:00+00:00'
|
||||
assert str(s.next_run) == str(s.dtstart)
|
||||
assert str(s.dtend) == '2030-05-04 21:00:00+00:00'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_next_run_est(job_template):
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule='DTSTART;TZID=America/New_York:20300112T210000 RRULE:FREQ=MONTHLY;INTERVAL=1;BYDAY=SA;BYSETPOS=1;COUNT=4',
|
||||
unified_job_template=job_template
|
||||
)
|
||||
s.save()
|
||||
|
||||
assert str(s.next_run) == '2030-02-03 02:00:00+00:00'
|
||||
assert str(s.next_run) == str(s.dtstart)
|
||||
|
||||
# March 10, 2030 is when DST takes effect in NYC
|
||||
assert str(s.dtend) == '2030-05-05 01:00:00+00:00'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_year_boundary(job_template):
|
||||
rrule = 'DTSTART;TZID=America/New_York:20301231T230000 RRULE:FREQ=YEARLY;INTERVAL=1;BYMONTH=12;BYMONTHDAY=31;COUNT=4' # noqa
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule=rrule,
|
||||
unified_job_template=job_template
|
||||
)
|
||||
s.save()
|
||||
|
||||
assert str(s.next_run) == '2031-01-01 04:00:00+00:00' # UTC = +5 EST
|
||||
assert str(s.next_run) == str(s.dtstart)
|
||||
assert str(s.dtend) == '2034-01-01 04:00:00+00:00' # UTC = +5 EST
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_leap_year_day(job_template):
|
||||
rrule = 'DTSTART;TZID=America/New_York:20320229T050000 RRULE:FREQ=YEARLY;INTERVAL=1;BYMONTH=02;BYMONTHDAY=29;COUNT=2' # noqa
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule=rrule,
|
||||
unified_job_template=job_template
|
||||
)
|
||||
s.save()
|
||||
|
||||
assert str(s.next_run) == '2032-02-29 10:00:00+00:00' # UTC = +5 EST
|
||||
assert str(s.next_run) == str(s.dtstart)
|
||||
assert str(s.dtend) == '2036-02-29 10:00:00+00:00' # UTC = +5 EST
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('until, dtend', [
|
||||
['20180602T170000Z', '2018-06-02 12:00:00+00:00'],
|
||||
['20180602T000000Z', '2018-06-01 12:00:00+00:00'],
|
||||
])
|
||||
def test_utc_until(job_template, until, dtend):
|
||||
rrule = 'DTSTART:20180601T120000Z RRULE:FREQ=DAILY;INTERVAL=1;UNTIL={}'.format(until)
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule=rrule,
|
||||
unified_job_template=job_template
|
||||
)
|
||||
s.save()
|
||||
|
||||
assert str(s.next_run) == '2018-06-01 12:00:00+00:00'
|
||||
assert str(s.next_run) == str(s.dtstart)
|
||||
assert str(s.dtend) == dtend
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('dtstart, until', [
|
||||
['20180601T120000Z', '20180602T170000'],
|
||||
['TZID=America/New_York:20180601T120000', '20180602T170000'],
|
||||
])
|
||||
def test_tzinfo_naive_until(job_template, dtstart, until):
|
||||
rrule = 'DTSTART;{} RRULE:FREQ=DAILY;INTERVAL=1;UNTIL={}'.format(dtstart, until) # noqa
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule=rrule,
|
||||
unified_job_template=job_template
|
||||
)
|
||||
with pytest.raises(ValueError):
|
||||
s.save()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_until_must_be_utc(job_template):
|
||||
rrule = 'DTSTART;TZID=America/New_York:20180601T120000 RRULE:FREQ=DAILY;INTERVAL=1;UNTIL=20180602T000000' # noqa the Z is required
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule=rrule,
|
||||
unified_job_template=job_template
|
||||
)
|
||||
with pytest.raises(ValueError) as e:
|
||||
s.save()
|
||||
assert 'RRULE UNTIL values must be specified in UTC' in str(e)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_utc_until_in_the_past(job_template):
|
||||
rrule = 'DTSTART:20180601T120000Z RRULE:FREQ=DAILY;INTERVAL=1;UNTIL=20150101T100000Z'
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule=rrule,
|
||||
unified_job_template=job_template
|
||||
)
|
||||
s.save()
|
||||
|
||||
assert s.next_run is s.dtstart is s.dtend is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.models.schedules.now', lambda: datetime(2030, 03, 05, tzinfo=pytz.utc))
|
||||
def test_dst_phantom_hour(job_template):
|
||||
# The DST period in the United States begins at 02:00 (2 am) local time, so
|
||||
# the hour from 2:00:00 to 2:59:59 does not exist in the night of the
|
||||
# switch.
|
||||
|
||||
# Three Sundays, starting 2:30AM America/New_York, starting Mar 3, 2030,
|
||||
# (which doesn't exist)
|
||||
rrule = 'DTSTART;TZID=America/New_York:20300303T023000 RRULE:FREQ=WEEKLY;BYDAY=SU;INTERVAL=1;COUNT=3'
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule=rrule,
|
||||
unified_job_template=job_template
|
||||
)
|
||||
s.save()
|
||||
|
||||
# 3/10/30 @ 2:30AM is skipped because it _doesn't exist_ <cue twilight zone music>
|
||||
assert str(s.next_run) == '2030-03-17 06:30:00+00:00'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_beginning_of_time(job_template):
|
||||
# ensure that really large generators don't have performance issues
|
||||
rrule = 'DTSTART:19700101T000000Z RRULE:FREQ=MINUTELY;INTERVAL=1'
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule=rrule,
|
||||
unified_job_template=job_template
|
||||
)
|
||||
with pytest.raises(ValueError):
|
||||
s.save()
|
||||
@@ -25,9 +25,11 @@ def test_default_cred_types():
|
||||
'insights',
|
||||
'net',
|
||||
'openstack',
|
||||
'rhv',
|
||||
'satellite6',
|
||||
'scm',
|
||||
'ssh',
|
||||
'tower',
|
||||
'vault',
|
||||
'vmware',
|
||||
]
|
||||
|
||||
@@ -30,13 +30,15 @@ def test_job_capacity_and_with_inactive_node():
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_notification_data(inventory):
|
||||
def test_job_notification_data(inventory, machine_credential, project):
|
||||
encrypted_str = "$encrypted$"
|
||||
job = Job.objects.create(
|
||||
job_template=None, inventory=inventory, name='hi world',
|
||||
extra_vars=json.dumps({"SSN": "123-45-6789"}),
|
||||
survey_passwords={"SSN": encrypted_str}
|
||||
survey_passwords={"SSN": encrypted_str},
|
||||
project=project,
|
||||
)
|
||||
job.credentials = [machine_credential]
|
||||
notification_data = job.notification_data(block=0)
|
||||
assert json.loads(notification_data['extra_vars'])['SSN'] == encrypted_str
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user