mirror of
https://github.com/ansible/awx.git
synced 2026-02-11 14:44:44 -03:30
Compare commits
467 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a28f8c43cb | ||
|
|
fbec6a60bf | ||
|
|
6a4f3c8758 | ||
|
|
04625f566b | ||
|
|
895a567ed1 | ||
|
|
e152b30fc1 | ||
|
|
a1fe60da78 | ||
|
|
92f0893764 | ||
|
|
479448ff09 | ||
|
|
62a36e3704 | ||
|
|
2d286c5f68 | ||
|
|
f435e577b2 | ||
|
|
236b332a8b | ||
|
|
a7028df828 | ||
|
|
a59017ceef | ||
|
|
affacb8ab5 | ||
|
|
37f9024940 | ||
|
|
f72fca5fcf | ||
|
|
21aeda0f45 | ||
|
|
65a0e5ed45 | ||
|
|
571e34bf79 | ||
|
|
6dc58af8e1 | ||
|
|
bbd3edba47 | ||
|
|
46d6dce738 | ||
|
|
475a701f78 | ||
|
|
dccd7f2e9d | ||
|
|
47711bc007 | ||
|
|
04eec61387 | ||
|
|
ef4a2cbebb | ||
|
|
c8d76dbe78 | ||
|
|
61a706274b | ||
|
|
20226f8984 | ||
|
|
7ff04dafd3 | ||
|
|
f9bdb1da15 | ||
|
|
dab678c5cc | ||
|
|
44ffcf86de | ||
|
|
8a18984be1 | ||
|
|
0b1776098b | ||
|
|
5da13683ce | ||
|
|
89c2038ea3 | ||
|
|
a1012b365c | ||
|
|
484ef1b6a8 | ||
|
|
7fc269b65a | ||
|
|
ddda6b3d21 | ||
|
|
80adbf9c03 | ||
|
|
264f35d259 | ||
|
|
e513f8fe31 | ||
|
|
b9f35e5b50 | ||
|
|
002f463ffd | ||
|
|
28512e042b | ||
|
|
482395eb6a | ||
|
|
e1d44d6d14 | ||
|
|
19030b9d5f | ||
|
|
3e4738d948 | ||
|
|
94083f55c7 | ||
|
|
6ecd18b2e2 | ||
|
|
4e9c705997 | ||
|
|
1803a76a4d | ||
|
|
86ca1875f1 | ||
|
|
bf5c259d92 | ||
|
|
9bca937fad | ||
|
|
526ca3ae42 | ||
|
|
695c7ade86 | ||
|
|
c133b35162 | ||
|
|
afb3c0e31e | ||
|
|
8965f1934e | ||
|
|
556040fb8b | ||
|
|
8b3e49cb24 | ||
|
|
331e272be0 | ||
|
|
9e7808f2c9 | ||
|
|
0bb2de24f3 | ||
|
|
72ce7b194f | ||
|
|
85958c51a8 | ||
|
|
1dd44df471 | ||
|
|
b132f855a0 | ||
|
|
a361b5da6e | ||
|
|
7df63830ed | ||
|
|
b3cf93256b | ||
|
|
c695ba2e10 | ||
|
|
c44160933d | ||
|
|
9ae3e1c40f | ||
|
|
c7c5a9d2f7 | ||
|
|
a56a231869 | ||
|
|
5087ca7f62 | ||
|
|
3b7336c570 | ||
|
|
9b413afb2e | ||
|
|
eec05eac3c | ||
|
|
41671b5868 | ||
|
|
0bbf1d7014 | ||
|
|
c5ce62e11d | ||
|
|
9316c9ea3e | ||
|
|
427b8bdabb | ||
|
|
cce470a5f8 | ||
|
|
92baea2ee6 | ||
|
|
3be9113d6b | ||
|
|
785c6fe846 | ||
|
|
0d29bbfdc6 | ||
|
|
ce8117ef19 | ||
|
|
bb921af146 | ||
|
|
5e0ecc7f43 | ||
|
|
73dc58e810 | ||
|
|
89344c2eee | ||
|
|
d61cd519d7 | ||
|
|
8057438c67 | ||
|
|
110671532d | ||
|
|
92ac3054c6 | ||
|
|
c95c2a4580 | ||
|
|
2c01476eca | ||
|
|
8adbc8a026 | ||
|
|
98b8d7fb69 | ||
|
|
477551325d | ||
|
|
fdedc472d1 | ||
|
|
88819ada6b | ||
|
|
f3ee93b67f | ||
|
|
b4549e5581 | ||
|
|
3afed6adb7 | ||
|
|
1bc78fd429 | ||
|
|
6ce1b50751 | ||
|
|
6c87b88e2c | ||
|
|
10f21b8817 | ||
|
|
0d1b25131d | ||
|
|
d2118b8d25 | ||
|
|
b852caaaa3 | ||
|
|
b0dd10b538 | ||
|
|
8f4aa5511b | ||
|
|
4b26ac06ba | ||
|
|
4dc6452dea | ||
|
|
5a17acb131 | ||
|
|
6cfd9dbfe4 | ||
|
|
110c5a8e84 | ||
|
|
b185c1e0a2 | ||
|
|
f1a4a62304 | ||
|
|
9f3e3bad54 | ||
|
|
4198227116 | ||
|
|
56525bc34f | ||
|
|
3f2068e74e | ||
|
|
6117f8297e | ||
|
|
8953d06905 | ||
|
|
bf39a2a747 | ||
|
|
f27ec8cd89 | ||
|
|
aec3244f52 | ||
|
|
07aaad53aa | ||
|
|
42a0192425 | ||
|
|
ac08033d3e | ||
|
|
6d0fed6d9a | ||
|
|
53ae05094e | ||
|
|
78c4d5005e | ||
|
|
79002ae563 | ||
|
|
6c868c7552 | ||
|
|
6e4f3efc4b | ||
|
|
ce9da4edb7 | ||
|
|
6ff1fe8548 | ||
|
|
140b85688f | ||
|
|
0477581dea | ||
|
|
d5c557c639 | ||
|
|
8e60cb1270 | ||
|
|
906eb98d8e | ||
|
|
119b9475ea | ||
|
|
12c8994faf | ||
|
|
f3e73bbed8 | ||
|
|
e2a1b7902c | ||
|
|
d65a3fa037 | ||
|
|
f6600887bc | ||
|
|
96c18fa311 | ||
|
|
9645e5bcd3 | ||
|
|
0a09d98fe8 | ||
|
|
1224e2c889 | ||
|
|
c8e6fa3bb3 | ||
|
|
00cae104b3 | ||
|
|
f27a34cd1c | ||
|
|
720a634702 | ||
|
|
c722e50595 | ||
|
|
1cecfd9771 | ||
|
|
011c8ae822 | ||
|
|
73f54b2237 | ||
|
|
0a964b2bf6 | ||
|
|
fa18b94725 | ||
|
|
5ab6255c67 | ||
|
|
ac80bc874a | ||
|
|
2e98446394 | ||
|
|
c4afbbc2ca | ||
|
|
517043e209 | ||
|
|
e7c52bc5e7 | ||
|
|
c25d208465 | ||
|
|
921231fe3d | ||
|
|
6721ea54e9 | ||
|
|
99a42e91fe | ||
|
|
9a580ba644 | ||
|
|
74fcdabc22 | ||
|
|
9bec7cf3b0 | ||
|
|
f9e402658b | ||
|
|
9570981c7f | ||
|
|
f79debac42 | ||
|
|
a9f3eeef05 | ||
|
|
6eb1feffcd | ||
|
|
6f55cde6d3 | ||
|
|
48511b6c33 | ||
|
|
771daefcfd | ||
|
|
1167361128 | ||
|
|
3a4cc0d464 | ||
|
|
78901ab48e | ||
|
|
938bf1b531 | ||
|
|
27da141889 | ||
|
|
2bf2412759 | ||
|
|
1e3c229460 | ||
|
|
cfa93b52b7 | ||
|
|
96ad2b2b28 | ||
|
|
f53a1fedf6 | ||
|
|
8fceaf8810 | ||
|
|
c39370dbd0 | ||
|
|
bdc7efb274 | ||
|
|
10c76e2337 | ||
|
|
ff1e8cc356 | ||
|
|
da74f1d01f | ||
|
|
8ad46436df | ||
|
|
be01bed34b | ||
|
|
8a763d6cf8 | ||
|
|
1165dcfa07 | ||
|
|
f9928eef70 | ||
|
|
ee1d5e43b9 | ||
|
|
e94e79d57a | ||
|
|
f87a09c46a | ||
|
|
535e16c6cf | ||
|
|
5ae19fd9c2 | ||
|
|
7d5f6aa49d | ||
|
|
c0fc3a74ee | ||
|
|
22c831ff31 | ||
|
|
17dc6bf5a1 | ||
|
|
e7fb82ffe7 | ||
|
|
70ae546dee | ||
|
|
5d22fc2bd7 | ||
|
|
b4919f9ebd | ||
|
|
9033b3f2a5 | ||
|
|
de60165a49 | ||
|
|
b02677a8d0 | ||
|
|
b8c1724880 | ||
|
|
6baa2a109d | ||
|
|
7a5cfd05a3 | ||
|
|
b9279ebd5e | ||
|
|
1b25dd0127 | ||
|
|
49396178ca | ||
|
|
a4dfd96a8d | ||
|
|
a2f4e36e47 | ||
|
|
ad566cc651 | ||
|
|
4d9523afa4 | ||
|
|
40602875e0 | ||
|
|
d0572cf170 | ||
|
|
1edede213e | ||
|
|
e0c7a7bece | ||
|
|
640f9474fc | ||
|
|
29b90b700e | ||
|
|
f7c5289195 | ||
|
|
ee11341430 | ||
|
|
56263a5fea | ||
|
|
89e41f7524 | ||
|
|
3a8bacb8ef | ||
|
|
f328f8cad4 | ||
|
|
7752446067 | ||
|
|
96bc0f1578 | ||
|
|
9f25fdd079 | ||
|
|
7249b21214 | ||
|
|
2d642b95ae | ||
|
|
b94d5c7f20 | ||
|
|
02c23fc1c6 | ||
|
|
b75f8ceca6 | ||
|
|
bfc74497b0 | ||
|
|
c8c982428d | ||
|
|
e6dbf71252 | ||
|
|
3701567ad7 | ||
|
|
86140dec08 | ||
|
|
50fe0392ed | ||
|
|
f18c965a8a | ||
|
|
f874e55051 | ||
|
|
1dcd2b1883 | ||
|
|
7684579464 | ||
|
|
16e89ed081 | ||
|
|
62e3b9e3b6 | ||
|
|
dc3f81920e | ||
|
|
8a66213dbe | ||
|
|
23d4122574 | ||
|
|
5900af726b | ||
|
|
9fc4c03e5b | ||
|
|
f8d2a32756 | ||
|
|
0bb1b0ed45 | ||
|
|
3b11219fff | ||
|
|
1b4c3f56fa | ||
|
|
6c5334c7d3 | ||
|
|
1371e394de | ||
|
|
ec67feef2f | ||
|
|
89e656b2a4 | ||
|
|
5910b8c562 | ||
|
|
580004b395 | ||
|
|
910663764f | ||
|
|
43aa0fc741 | ||
|
|
b9b9fc1934 | ||
|
|
093f453073 | ||
|
|
0e696d0515 | ||
|
|
c4a29ded1c | ||
|
|
f402ff0ee7 | ||
|
|
b982793a3a | ||
|
|
b3f2f7efe5 | ||
|
|
19f9a3f918 | ||
|
|
4b2e709e8d | ||
|
|
146590d0c2 | ||
|
|
247ee4ddac | ||
|
|
2f2294b65a | ||
|
|
ec873dd28c | ||
|
|
c2bd36e580 | ||
|
|
1f8736ce1d | ||
|
|
50a9f0be6b | ||
|
|
8f3c5be04e | ||
|
|
34ceaf4551 | ||
|
|
786e907e3b | ||
|
|
c5aa9ee12b | ||
|
|
24f8cb49b5 | ||
|
|
54d967af0d | ||
|
|
3c91370cab | ||
|
|
e22dc3dc7b | ||
|
|
9ae41dc3ba | ||
|
|
f175d6dfae | ||
|
|
34c659d8b6 | ||
|
|
6eb406ac39 | ||
|
|
cddceb0e06 | ||
|
|
a549bea815 | ||
|
|
860fbdad02 | ||
|
|
f639e46718 | ||
|
|
56dc08683e | ||
|
|
148daec49b | ||
|
|
2d03938451 | ||
|
|
bc7b586803 | ||
|
|
1408200927 | ||
|
|
3576e192f4 | ||
|
|
70d930e019 | ||
|
|
c69e41b261 | ||
|
|
2949efd6ec | ||
|
|
2592613bde | ||
|
|
aa717a2728 | ||
|
|
de158cb41d | ||
|
|
f7737e2f94 | ||
|
|
4e45b6ba6d | ||
|
|
5885654405 | ||
|
|
227960e3ea | ||
|
|
d6ba3e1fc2 | ||
|
|
2643a1b3d6 | ||
|
|
6afb47789a | ||
|
|
2acc488adf | ||
|
|
d0598e720d | ||
|
|
162ef08cef | ||
|
|
aa0d2cff5c | ||
|
|
d608402dc1 | ||
|
|
04dbc2fcc4 | ||
|
|
0bc9b1d431 | ||
|
|
138f8a45ae | ||
|
|
ee348b7169 | ||
|
|
38b9b47e6b | ||
|
|
2187655c68 | ||
|
|
13203af353 | ||
|
|
4781df62ec | ||
|
|
72372b3810 | ||
|
|
b742746e5d | ||
|
|
74fc0fef04 | ||
|
|
bb8025c1af | ||
|
|
d824508cfb | ||
|
|
077e541876 | ||
|
|
4561fd7270 | ||
|
|
50786f201f | ||
|
|
5561eb30f7 | ||
|
|
e2c4fd5ebb | ||
|
|
7226acb2b6 | ||
|
|
7ef8e147f4 | ||
|
|
45db305e69 | ||
|
|
52abb29091 | ||
|
|
d564a268fd | ||
|
|
8280aff612 | ||
|
|
b35d6b7425 | ||
|
|
7b692b0c31 | ||
|
|
a271837007 | ||
|
|
a3d0e10f51 | ||
|
|
5e8f7b76f1 | ||
|
|
c67e9143fb | ||
|
|
5abe045e6c | ||
|
|
4bc63cc37e | ||
|
|
5cdd947196 | ||
|
|
67d1267d98 | ||
|
|
66db615c0c | ||
|
|
598449c2ce | ||
|
|
4119c1dd0b | ||
|
|
2acf055f6a | ||
|
|
4eeb62766e | ||
|
|
d995068396 | ||
|
|
ee139b306c | ||
|
|
a36b0061fa | ||
|
|
eb0cf945cf | ||
|
|
2e7ab57645 | ||
|
|
d8f6c0aebc | ||
|
|
f8e5e38614 | ||
|
|
3f841180da | ||
|
|
9a85578925 | ||
|
|
f1e0c1e977 | ||
|
|
1b8cb45024 | ||
|
|
fb9e508b6b | ||
|
|
0868f97335 | ||
|
|
30fbeb43bb | ||
|
|
d2aea30d3d | ||
|
|
cdb347cba5 | ||
|
|
c95c7c8b18 | ||
|
|
14043f792a | ||
|
|
9632f3b69e | ||
|
|
da1da6f530 | ||
|
|
5b93007ba1 | ||
|
|
e87633f1d8 | ||
|
|
ca35eb39d2 | ||
|
|
5d84863237 | ||
|
|
f4728149d9 | ||
|
|
4c7c8b6db3 | ||
|
|
db8ee2810a | ||
|
|
5ba8bbb08b | ||
|
|
87adfe5889 | ||
|
|
07cb2aa9bb | ||
|
|
19c5564ec8 | ||
|
|
e05d071dab | ||
|
|
6ba1b170d2 | ||
|
|
63d7abc7e4 | ||
|
|
b318fa7814 | ||
|
|
5f6907ba83 | ||
|
|
cffa324762 | ||
|
|
b690e61576 | ||
|
|
ae207b5f33 | ||
|
|
5c23c63e6d | ||
|
|
3a133836dc | ||
|
|
ab7cc88caf | ||
|
|
3b997cdd3a | ||
|
|
59f246d297 | ||
|
|
14a8258835 | ||
|
|
897fb96f94 | ||
|
|
79a29ebcc8 | ||
|
|
950e4dab04 | ||
|
|
ab82cc3ba3 | ||
|
|
93a8a952f1 | ||
|
|
2b9954c373 | ||
|
|
21f0c1d1d7 | ||
|
|
379979511b | ||
|
|
2e6a7205e7 | ||
|
|
14685901aa | ||
|
|
15480a56db | ||
|
|
9f54ba069e | ||
|
|
42f01b7f05 | ||
|
|
6cf1fb3c10 | ||
|
|
03058cd1e8 | ||
|
|
a30c2fe227 | ||
|
|
d8e890b651 | ||
|
|
95735ee01a | ||
|
|
61931d0b6c | ||
|
|
516607551c | ||
|
|
5e974d84b6 | ||
|
|
91bc39be6b | ||
|
|
49222d5e72 | ||
|
|
686e5ac545 | ||
|
|
0c3d6e7c33 | ||
|
|
e1b7e7f6ce | ||
|
|
a294a6f06e | ||
|
|
97c8005d00 | ||
|
|
d2ab7bd54d | ||
|
|
e02e8994ad | ||
|
|
ada2d65547 | ||
|
|
0443bd3099 | ||
|
|
adaa164a19 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
|
||||
# Tags
|
||||
.tags
|
||||
.tags1
|
||||
@@ -52,6 +53,7 @@ __pycache__
|
||||
**/node_modules/**
|
||||
/tmp
|
||||
**/npm-debug.log*
|
||||
**/package-lock.json
|
||||
|
||||
# UI build flag files
|
||||
awx/ui/.deps_built
|
||||
@@ -112,7 +114,6 @@ local/
|
||||
*.mo
|
||||
requirements/vendor
|
||||
.i18n_built
|
||||
VERSION
|
||||
.idea/*
|
||||
|
||||
# AWX python libs populated by requirements.txt
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
|
||||
Hi there! We're excited to have you as a contributor.
|
||||
|
||||
Have questions about this document or anything not covered here? Come chat with us at `#ansible-awx` on irc.freenode.net, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project) .
|
||||
Have questions about this document or anything not covered here? Come chat with us at `#ansible-awx` on irc.freenode.net, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||
|
||||
## Table of contents
|
||||
|
||||
* [Things to know prior to submitting code](#things-to-know-prior-to-contributing-code)
|
||||
* [Things to know prior to submitting code](#things-to-know-prior-to-submitting-code)
|
||||
* [Setting up your development environment](#setting-up-your-development-environment)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Docker](#docker)
|
||||
@@ -17,14 +17,14 @@ Have questions about this document or anything not covered here? Come chat with
|
||||
* [Create local settings](#create-local-settings)
|
||||
* [Build the base image](#build-the-base-image)
|
||||
* [Build the user interface](#build-the-user-interface)
|
||||
# [Running the environment](#running-the-environment)
|
||||
* [Running the environment](#running-the-environment)
|
||||
* [Start the containers](#start-the-containers)
|
||||
* [Start from the container shell](#start-from-the-container-shell)
|
||||
* [Post Build Steps](#post-build-steps)
|
||||
* [Start a shell](#start-the-shell)
|
||||
* [Create a superuser](#create-a-superuser)
|
||||
* [Load the data](#load-the-data)
|
||||
* [Building API Documentation](#build-documentation)
|
||||
* [Start a shell](#start-a-shell)
|
||||
* [Create a superuser](#create-a-superuser)
|
||||
* [Load the data](#load-the-data)
|
||||
* [Building API Documentation](#build-api-documentation)
|
||||
* [Accessing the AWX web interface](#accessing-the-awx-web-interface)
|
||||
* [Purging containers and images](#purging-containers-and-images)
|
||||
* [What should I work on?](#what-should-i-work-on)
|
||||
@@ -86,8 +86,8 @@ If you're not using Docker for Mac, or Docker for Windows, you may need, or choo
|
||||
|
||||
The AWX UI requires the following:
|
||||
|
||||
- Node 6.x LTS version
|
||||
- NPM 3.x LTS
|
||||
- Node 8.x LTS
|
||||
- NPM 6.x LTS
|
||||
|
||||
### Build the environment
|
||||
|
||||
@@ -145,7 +145,7 @@ Start the development containers by running the following:
|
||||
(host)$ make docker-compose
|
||||
```
|
||||
|
||||
The above utilizes the image built in the previous step, and will automatically start all required services and dependent containers. Once the containers launch, your session will be attached to the *awx* container, and you'll be able to watch log messages and events in real time. You will see messages from Django, celery, and the front end build process.
|
||||
The above utilizes the image built in the previous step, and will automatically start all required services and dependent containers. Once the containers launch, your session will be attached to the *awx* container, and you'll be able to watch log messages and events in real time. You will see messages from Django and the front end build process.
|
||||
|
||||
If you start a second terminal session, you can take a look at the running containers using the `docker ps` command. For example:
|
||||
|
||||
@@ -174,7 +174,7 @@ The first time you start the environment, database migrations need to run in ord
|
||||
```bash
|
||||
awx_1 | Operations to perform:
|
||||
awx_1 | Synchronize unmigrated apps: solo, api, staticfiles, debug_toolbar, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
awx_1 | Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
awx_1 | Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
awx_1 | Synchronizing apps without migrations:
|
||||
awx_1 | Creating tables...
|
||||
awx_1 | Running deferred SQL...
|
||||
@@ -329,7 +329,7 @@ We like to keep our commit history clean, and will require resubmission of pull
|
||||
|
||||
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefully. Please be patient.
|
||||
|
||||
All submitted PRs will have the linter and unit tests run against them, and the status reported in the PR.
|
||||
All submitted PRs will have the linter and unit tests run against them via Zuul, and the status reported in the PR.
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ Export all objects
|
||||
|
||||
Clean up remnants of the old AWX install:
|
||||
|
||||
```docker rm -f $(ps -aq)``` # remove all old awx containers
|
||||
```docker rm -f $(docker ps -aq)``` # remove all old awx containers
|
||||
|
||||
```make clean-ui``` # clean up ui artifacts
|
||||
|
||||
|
||||
18
INSTALL.md
18
INSTALL.md
@@ -62,8 +62,8 @@ Before you can run a deployment, you'll need the following installed in your loc
|
||||
- [docker-py](https://github.com/docker/docker-py) Python module
|
||||
- [GNU Make](https://www.gnu.org/software/make/)
|
||||
- [Git](https://git-scm.com/) Requires Version 1.8.4+
|
||||
- [Node 6.x LTS version](https://nodejs.org/en/download/)
|
||||
- [NPM 3.x LTS](https://docs.npmjs.com/)
|
||||
- [Node 8.x LTS version](https://nodejs.org/en/download/)
|
||||
- [NPM 6.x LTS](https://docs.npmjs.com/)
|
||||
|
||||
### System Requirements
|
||||
|
||||
@@ -119,12 +119,12 @@ To complete a deployment to OpenShift, you will obviously need access to an Open
|
||||
|
||||
You will also need to have the `oc` command in your PATH. The `install.yml` playbook will call out to `oc` when logging into, and creating objects on the cluster.
|
||||
|
||||
The default resource requests per-pod requires:
|
||||
The default resource requests per-deployment requires:
|
||||
|
||||
> Memory: 6GB
|
||||
> CPU: 3 cores
|
||||
|
||||
This can be tuned by overriding the variables found in [/installer/openshift/defaults/main.yml](/installer/openshift/defaults/main.yml). Special care should be taken when doing this as undersized instances will experience crashes and resource exhaustion.
|
||||
This can be tuned by overriding the variables found in [/installer/roles/kubernetes/defaults/main.yml](/installer/roles/kubernetes/defaults/main.yml). Special care should be taken when doing this as undersized instances will experience crashes and resource exhaustion.
|
||||
|
||||
For more detail on how resource requests are formed see: [https://docs.openshift.com/container-platform/latest/dev_guide/compute_resources.html#dev-compute-resources](https://docs.openshift.com/container-platform/latest/dev_guide/compute_resources.html#dev-compute-resources)
|
||||
|
||||
@@ -236,7 +236,7 @@ Using /etc/ansible/ansible.cfg as config file
|
||||
}
|
||||
Operations to perform:
|
||||
Synchronize unmigrated apps: solo, api, staticfiles, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Synchronizing apps without migrations:
|
||||
Creating tables...
|
||||
Running deferred SQL...
|
||||
@@ -426,7 +426,7 @@ If you choose to use the official images then the remote host will be the one to
|
||||
|
||||
> As mentioned above, in [Prerequisites](#prerequisites-1), the prerequisites are required on the remote host.
|
||||
|
||||
> When deploying to a remote host, the playook does not execute tasks with the `become` option. For this reason, make sure the user that connects to the remote host has privileges to run the `docker` command. This typically means that non-privileged users need to be part of the `docker` group.
|
||||
> When deploying to a remote host, the playbook does not execute tasks with the `become` option. For this reason, make sure the user that connects to the remote host has privileges to run the `docker` command. This typically means that non-privileged users need to be part of the `docker` group.
|
||||
|
||||
|
||||
#### Inventory variables
|
||||
@@ -449,6 +449,10 @@ Before starting the build process, review the [inventory](./installer/inventory)
|
||||
|
||||
When using docker-compose, the `docker-compose.yml` file will be created there (default `/var/lib/awx`).
|
||||
|
||||
*ca_trust_dir*
|
||||
|
||||
> If you're using a non trusted CA, provide a path where the untrusted Certs are stored on your Host.
|
||||
|
||||
#### Docker registry
|
||||
|
||||
If you wish to tag and push built images to a Docker registry, set the following variables in the inventory file:
|
||||
@@ -548,7 +552,7 @@ Using /etc/ansible/ansible.cfg as config file
|
||||
}
|
||||
Operations to perform:
|
||||
Synchronize unmigrated apps: solo, api, staticfiles, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Synchronizing apps without migrations:
|
||||
Creating tables...
|
||||
Running deferred SQL...
|
||||
|
||||
77
Makefile
77
Makefile
@@ -12,10 +12,7 @@ MANAGEMENT_COMMAND ?= awx-manage
|
||||
IMAGE_REPOSITORY_AUTH ?=
|
||||
IMAGE_REPOSITORY_BASE ?= https://gcr.io
|
||||
|
||||
VERSION=$(shell git describe --long --first-parent)
|
||||
VERSION3=$(shell git describe --long --first-parent | sed 's/\-g.*//')
|
||||
VERSION3DOT=$(shell git describe --long --first-parent | sed 's/\-g.*//' | sed 's/\-/\./')
|
||||
RELEASE_VERSION=$(shell git describe --long --first-parent | sed 's@\([0-9.]\{1,\}\).*@\1@')
|
||||
VERSION := $(shell cat VERSION)
|
||||
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
@@ -30,8 +27,6 @@ DEV_DOCKER_TAG_BASE ?= gcr.io/ansible-tower-engineering
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
|
||||
CURWD = $(shell pwd)
|
||||
|
||||
# Determine appropriate shasum command
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S),Linux)
|
||||
@@ -48,20 +43,9 @@ DATE := $(shell date -u +%Y%m%d%H%M)
|
||||
NAME ?= awx
|
||||
GIT_REMOTE_URL = $(shell git config --get remote.origin.url)
|
||||
|
||||
ifeq ($(OFFICIAL),yes)
|
||||
VERSION_TARGET ?= $(RELEASE_VERSION)
|
||||
else
|
||||
VERSION_TARGET ?= $(VERSION3DOT)
|
||||
endif
|
||||
|
||||
# TAR build parameters
|
||||
ifeq ($(OFFICIAL),yes)
|
||||
SDIST_TAR_NAME=$(NAME)-$(RELEASE_VERSION)
|
||||
WHEEL_NAME=$(NAME)-$(RELEASE_VERSION)
|
||||
else
|
||||
SDIST_TAR_NAME=$(NAME)-$(VERSION3DOT)
|
||||
WHEEL_NAME=$(NAME)-$(VERSION3DOT)
|
||||
endif
|
||||
SDIST_TAR_NAME=$(NAME)-$(VERSION)
|
||||
WHEEL_NAME=$(NAME)-$(VERSION)
|
||||
|
||||
SDIST_COMMAND ?= sdist
|
||||
WHEEL_COMMAND ?= bdist_wheel
|
||||
@@ -75,7 +59,7 @@ UI_RELEASE_FLAG_FILE = awx/ui/.release_built
|
||||
I18N_FLAG_FILE = .i18n_built
|
||||
|
||||
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
||||
develop refresh adduser migrate dbchange dbshell runserver celeryd \
|
||||
develop refresh adduser migrate dbchange dbshell runserver \
|
||||
receiver test test_unit test_ansible test_coverage coverage_html \
|
||||
dev_build release_build release_clean sdist \
|
||||
ui-docker-machine ui-docker ui-release ui-devel \
|
||||
@@ -112,7 +96,6 @@ clean: clean-ui clean-dist
|
||||
rm -rf requirements/vendor
|
||||
rm -rf tmp
|
||||
rm -rf $(I18N_FLAG_FILE)
|
||||
rm -f VERSION
|
||||
mkdir tmp
|
||||
rm -rf build $(NAME)-$(VERSION) *.egg-info
|
||||
find . -type f -regex ".*\.py[co]$$" -delete
|
||||
@@ -182,7 +165,7 @@ requirements_awx: virtualenv_awx
|
||||
else \
|
||||
cat requirements/requirements.txt requirements/requirements_git.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/awx/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt
|
||||
#$(VENV_BASE)/awx/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt
|
||||
|
||||
requirements_awx_dev:
|
||||
$(VENV_BASE)/awx/bin/pip install -r requirements/requirements_dev.txt
|
||||
@@ -250,7 +233,7 @@ server_noattach:
|
||||
tmux new-session -d -s awx 'exec make uwsgi'
|
||||
tmux rename-window 'AWX'
|
||||
tmux select-window -t awx:0
|
||||
tmux split-window -v 'exec make celeryd'
|
||||
tmux split-window -v 'exec make dispatcher'
|
||||
tmux new-window 'exec make daphne'
|
||||
tmux select-window -t awx:1
|
||||
tmux rename-window 'WebSockets'
|
||||
@@ -282,12 +265,6 @@ honcho:
|
||||
fi; \
|
||||
honcho start -f tools/docker-compose/Procfile
|
||||
|
||||
flower:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
celery flower --address=0.0.0.0 --port=5555 --broker=amqp://guest:guest@$(RABBITMQ_HOST):5672//
|
||||
|
||||
collectstatic:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -298,7 +275,7 @@ uwsgi: collectstatic
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)" --hook-accepting1-once="exec:/bin/sh -c '[ -f /tmp/celery_pid ] && kill -1 `cat /tmp/celery_pid` || true'"
|
||||
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)" --hook-accepting1-once="exec:awx-manage run_dispatcher --reload"
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -319,13 +296,13 @@ runserver:
|
||||
fi; \
|
||||
$(PYTHON) manage.py runserver
|
||||
|
||||
# Run to start the background celery worker for development.
|
||||
celeryd:
|
||||
rm -f /tmp/celery_pid
|
||||
# Run to start the background task dispatcher for development.
|
||||
dispatcher:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
celery worker -A awx -l DEBUG -B -Ofair --autoscale=100,4 --schedule=$(CELERY_SCHEDULE_FILE) -n celery@$(COMPOSE_HOST) --pidfile /tmp/celery_pid
|
||||
$(PYTHON) manage.py run_dispatcher
|
||||
|
||||
|
||||
# Run to start the zeromq callback receiver
|
||||
receiver:
|
||||
@@ -372,7 +349,7 @@ check: flake8 pep8 # pyflakes pylint
|
||||
awx-link:
|
||||
cp -R /tmp/awx.egg-info /awx_devel/ || true
|
||||
sed -i "s/placeholder/$(shell git describe --long | sed 's/\./\\./g')/" /awx_devel/awx.egg-info/PKG-INFO
|
||||
cp /tmp/awx.egg-link /venv/awx/lib/python2.7/site-packages/awx.egg-link
|
||||
cp -f /tmp/awx.egg-link /venv/awx/lib/python2.7/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
|
||||
@@ -381,7 +358,8 @@ test:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
py.test -n auto $(TEST_DIRS)
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider -n auto $(TEST_DIRS)
|
||||
awx-manage check_migrations --dry-run --check -n 'vNNN_missing_migration_file'
|
||||
|
||||
test_combined: test_ansible test
|
||||
|
||||
@@ -482,7 +460,7 @@ $(I18N_FLAG_FILE): $(UI_DEPS_FLAG_FILE)
|
||||
ui-deps: $(UI_DEPS_FLAG_FILE)
|
||||
|
||||
$(UI_DEPS_FLAG_FILE):
|
||||
$(NPM_BIN) --unsafe-perm --prefix awx/ui install awx/ui
|
||||
$(NPM_BIN) --unsafe-perm --prefix awx/ui install --no-save awx/ui
|
||||
touch $(UI_DEPS_FLAG_FILE)
|
||||
|
||||
ui-docker-machine: $(UI_DEPS_FLAG_FILE)
|
||||
@@ -568,17 +546,27 @@ docker-isolated:
|
||||
else \
|
||||
docker exec "tools_isolated_1" bash -c "mkdir -p /root/.ssh && rm -f /root/.ssh/authorized_keys && echo $$(docker exec -t tools_awx_1 cat /root/.ssh/id_rsa.pub) >> /root/.ssh/authorized_keys"; \
|
||||
fi
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml up
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml up
|
||||
|
||||
# Docker Compose Development environment
|
||||
docker-compose: docker-auth
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml up --no-recreate awx
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml up --no-recreate awx
|
||||
|
||||
docker-compose-cluster: docker-auth
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml up
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml up
|
||||
|
||||
docker-compose-test: docker-auth
|
||||
cd tools && TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /bin/bash
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /bin/bash
|
||||
|
||||
docker-compose-runtest:
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh
|
||||
|
||||
docker-compose-build-swagger:
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh swagger
|
||||
|
||||
docker-compose-clean:
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm -w /awx_devel --service-ports awx make clean
|
||||
cd tools && TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose rm -sf
|
||||
|
||||
docker-compose-build: awx-devel-build
|
||||
|
||||
@@ -604,13 +592,13 @@ docker-refresh: docker-clean docker-compose
|
||||
|
||||
# Docker Development Environment with Elastic Stack Connected
|
||||
docker-compose-elk: docker-auth
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-cluster-elk: docker-auth
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
minishift-dev:
|
||||
ansible-playbook -i localhost, -e devtree_directory=$(CURWD) tools/clusterdevel/start_minishift_dev.yml
|
||||
ansible-playbook -i localhost, -e devtree_directory=$(CURDIR) tools/clusterdevel/start_minishift_dev.yml
|
||||
|
||||
|
||||
clean-elk:
|
||||
@@ -625,5 +613,4 @@ psql-container:
|
||||
docker run -it --net tools_default --rm postgres:9.6 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
|
||||
|
||||
VERSION:
|
||||
@echo $(VERSION_TARGET) > $@
|
||||
@echo "awx: $(VERSION_TARGET)"
|
||||
@echo "awx: $(VERSION)"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
[](https://app.shippable.com/github/ansible/awx)
|
||||
[](https://ansible.softwarefactory-project.io/zuul/status)
|
||||
|
||||
AWX
|
||||
===
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is the upstream project for [Tower](https://www.ansible.com/tower), a commercial derivative of AWX.
|
||||
|
||||
@@ -11,6 +10,8 @@ To learn more about using AWX, and Tower, view the [Tower docs site](http://docs
|
||||
|
||||
The AWX Project Frequently Asked Questions can be found [here](https://www.ansible.com/awx-project-faq).
|
||||
|
||||
The AWX logos and branding assets are covered by [our trademark guidelines](https://github.com/ansible/awx-logos/blob/master/TRADEMARKS.md).
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
|
||||
@@ -12,14 +12,6 @@ __version__ = get_distribution('awx').version
|
||||
__all__ = ['__version__']
|
||||
|
||||
|
||||
# Isolated nodes do not have celery installed
|
||||
try:
|
||||
from .celery import app as celery_app # noqa
|
||||
__all__.append('celery_app')
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
# Check for the presence/absence of "devonly" module to determine if running
|
||||
# from a source code checkout or release packaage.
|
||||
try:
|
||||
|
||||
@@ -97,7 +97,7 @@ class DeprecatedCredentialField(serializers.IntegerField):
|
||||
kwargs['allow_null'] = True
|
||||
kwargs['default'] = None
|
||||
kwargs['min_value'] = 1
|
||||
kwargs['help_text'] = 'This resource has been deprecated and will be removed in a future release'
|
||||
kwargs.setdefault('help_text', 'This resource has been deprecated and will be removed in a future release')
|
||||
super(DeprecatedCredentialField, self).__init__(**kwargs)
|
||||
|
||||
def to_internal_value(self, pk):
|
||||
|
||||
@@ -390,7 +390,6 @@ class GenericAPIView(generics.GenericAPIView, APIView):
|
||||
]:
|
||||
d[key] = self.metadata_class().get_serializer_info(serializer, method=method)
|
||||
d['settings'] = settings
|
||||
d['has_named_url'] = self.model in settings.NAMED_URL_GRAPH
|
||||
return d
|
||||
|
||||
|
||||
|
||||
@@ -63,12 +63,15 @@ class Metadata(metadata.SimpleMetadata):
|
||||
verbose_name = smart_text(opts.verbose_name)
|
||||
field_info['help_text'] = field_help_text[field.field_name].format(verbose_name)
|
||||
|
||||
for model_field in serializer.Meta.model._meta.fields:
|
||||
if field.field_name == model_field.name:
|
||||
field_info['filterable'] = True
|
||||
break
|
||||
if field.field_name == 'type':
|
||||
field_info['filterable'] = True
|
||||
else:
|
||||
field_info['filterable'] = False
|
||||
for model_field in serializer.Meta.model._meta.fields:
|
||||
if field.field_name == model_field.name:
|
||||
field_info['filterable'] = True
|
||||
break
|
||||
else:
|
||||
field_info['filterable'] = False
|
||||
|
||||
# Indicate if a field has a default value.
|
||||
# FIXME: Still isn't showing all default values?
|
||||
|
||||
@@ -1903,7 +1903,9 @@ class CustomInventoryScriptSerializer(BaseSerializer):
|
||||
|
||||
|
||||
class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
credential = DeprecatedCredentialField()
|
||||
credential = DeprecatedCredentialField(
|
||||
help_text=_('Cloud credential to use for inventory updates.')
|
||||
)
|
||||
|
||||
class Meta:
|
||||
fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',
|
||||
@@ -3009,7 +3011,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
|
||||
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',
|
||||
'allow_simultaneous', 'custom_virtualenv')
|
||||
'allow_simultaneous', 'custom_virtualenv', 'job_slice_count')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobTemplateSerializer, self).get_related(obj)
|
||||
@@ -3026,6 +3028,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
labels = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}),
|
||||
object_roles = self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}),
|
||||
instance_groups = self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}),
|
||||
slice_workflow_jobs = self.reverse('api:job_template_slice_workflow_jobs_list', kwargs={'pk': obj.pk}),
|
||||
))
|
||||
if self.version > 1:
|
||||
res['copy'] = self.reverse('api:job_template_copy', kwargs={'pk': obj.pk})
|
||||
@@ -3121,7 +3124,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch', 'ask_skip_tags_on_launch',
|
||||
'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch', 'allow_simultaneous', 'artifacts', 'scm_revision',
|
||||
'instance_group', 'diff_mode')
|
||||
'instance_group', 'diff_mode', 'job_slice_number', 'job_slice_count')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobSerializer, self).get_related(obj)
|
||||
@@ -3588,6 +3591,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJob
|
||||
fields = ('*', 'workflow_job_template', 'extra_vars', 'allow_simultaneous',
|
||||
'job_template', 'is_sliced_job',
|
||||
'-execution_node', '-event_processing_finished', '-controller_node',)
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -3596,6 +3600,8 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail',
|
||||
kwargs={'pk': obj.workflow_job_template.pk})
|
||||
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
|
||||
if obj.job_template_id:
|
||||
res['job_template'] = self.reverse('api:job_template_detail', kwargs={'pk': obj.job_template_id})
|
||||
res['workflow_nodes'] = self.reverse('api:workflow_job_workflow_nodes_list', kwargs={'pk': obj.pk})
|
||||
res['labels'] = self.reverse('api:workflow_job_label_list', kwargs={'pk': obj.pk})
|
||||
res['activity_stream'] = self.reverse('api:workflow_job_activity_stream_list', kwargs={'pk': obj.pk})
|
||||
@@ -4533,13 +4539,13 @@ class SchedulePreviewSerializer(BaseSerializer):
|
||||
# - COUNT > 999
|
||||
def validate_rrule(self, value):
|
||||
rrule_value = value
|
||||
multi_by_month_day = ".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
|
||||
multi_by_month = ".*?BYMONTH[\:\=][0-9]+,[0-9]+"
|
||||
by_day_with_numeric_prefix = ".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
|
||||
match_count = re.match(".*?(COUNT\=[0-9]+)", rrule_value)
|
||||
match_multiple_dtstart = re.findall(".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
|
||||
match_native_dtstart = re.findall(".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
|
||||
match_multiple_rrule = re.findall(".*?(RRULE\:)", rrule_value)
|
||||
multi_by_month_day = r".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
|
||||
multi_by_month = r".*?BYMONTH[\:\=][0-9]+,[0-9]+"
|
||||
by_day_with_numeric_prefix = r".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
|
||||
match_count = re.match(r".*?(COUNT\=[0-9]+)", rrule_value)
|
||||
match_multiple_dtstart = re.findall(r".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
|
||||
match_native_dtstart = re.findall(r".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
|
||||
match_multiple_rrule = re.findall(r".*?(RRULE\:)", rrule_value)
|
||||
if not len(match_multiple_dtstart):
|
||||
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
|
||||
if len(match_native_dtstart):
|
||||
|
||||
@@ -54,8 +54,6 @@ within all designated text fields of a model.
|
||||
|
||||
?search=findme
|
||||
|
||||
_Added in AWX 1.4_
|
||||
|
||||
(_Added in Ansible Tower 3.1.0_) Search across related fields:
|
||||
|
||||
?related__search=findme
|
||||
@@ -84,7 +82,7 @@ To exclude results matching certain criteria, prefix the field parameter with
|
||||
|
||||
?not__field=value
|
||||
|
||||
(_Added in AWX 1.4_) By default, all query string filters are AND'ed together, so
|
||||
By default, all query string filters are AND'ed together, so
|
||||
only the results matching *all* filters will be returned. To combine results
|
||||
matching *any* one of multiple criteria, prefix each query string parameter
|
||||
with `or__`:
|
||||
|
||||
@@ -10,7 +10,7 @@ object containing groups, including the hosts, children and variables for each
|
||||
group. The response data is equivalent to that returned by passing the
|
||||
`--list` argument to an inventory script.
|
||||
|
||||
_(Added in AWX 1.3)_ Specify a query string of `?hostvars=1` to retrieve the JSON
|
||||
Specify a query string of `?hostvars=1` to retrieve the JSON
|
||||
object above including all host variables. The `['_meta']['hostvars']` object
|
||||
in the response contains an entry for each host with its variables. This
|
||||
response format can be used with Ansible 1.3 and later to avoid making a
|
||||
@@ -18,11 +18,19 @@ separate API request for each host. Refer to
|
||||
[Tuning the External Inventory Script](http://docs.ansible.com/developing_inventory.html#tuning-the-external-inventory-script)
|
||||
for more information on this feature.
|
||||
|
||||
_(Added in AWX 1.4)_ By default, the inventory script will only return hosts that
|
||||
By default, the inventory script will only return hosts that
|
||||
are enabled in the inventory. This feature allows disabled hosts to be skipped
|
||||
when running jobs without removing them from the inventory. Specify a query
|
||||
string of `?all=1` to return all hosts, including disabled ones.
|
||||
|
||||
Specify a query string of `?towervars=1` to add variables
|
||||
to the hostvars of each host that specifies its enabled state and database ID.
|
||||
|
||||
Specify a query string of `?subset=slice2of5` to produce an inventory that
|
||||
has a restricted number of hosts according to the rules of job slicing.
|
||||
|
||||
To apply multiple query strings, join them with the `&` character, like `?hostvars=1&all=1`.
|
||||
|
||||
## Host Response
|
||||
|
||||
Make a GET request to this resource with a query string similar to
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
{% if has_named_url %}
|
||||
### Note: starting from api v2, this resource object can be accessed via its named URL.
|
||||
{% endif %}
|
||||
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
Make GET request to this resource to retrieve a single {{ model_verbose_name }}
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
{% if has_named_url %}
|
||||
### Note: starting from api v2, this resource object can be accessed via its named URL.
|
||||
{% endif %}
|
||||
|
||||
{% ifmeth GET %}
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
{% if has_named_url %}
|
||||
### Note: starting from api v2, this resource object can be accessed via its named URL.
|
||||
{% endif %}
|
||||
|
||||
{% ifmeth GET %}
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
{% if has_named_url %}
|
||||
### Note: starting from api v2, this resource object can be accessed via its named URL.
|
||||
{% endif %}
|
||||
|
||||
{% ifmeth GET %}
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ from awx.api.views import (
|
||||
JobTemplateDetail,
|
||||
JobTemplateLaunch,
|
||||
JobTemplateJobsList,
|
||||
JobTemplateSliceWorkflowJobsList,
|
||||
JobTemplateCallback,
|
||||
JobTemplateSchedulesList,
|
||||
JobTemplateSurveySpec,
|
||||
@@ -28,6 +29,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/$', JobTemplateDetail.as_view(), name='job_template_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/launch/$', JobTemplateLaunch.as_view(), name='job_template_launch'),
|
||||
url(r'^(?P<pk>[0-9]+)/jobs/$', JobTemplateJobsList.as_view(), name='job_template_jobs_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/slice_workflow_jobs/$', JobTemplateSliceWorkflowJobsList.as_view(), name='job_template_slice_workflow_jobs_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/callback/$', JobTemplateCallback.as_view(), name='job_template_callback'),
|
||||
url(r'^(?P<pk>[0-9]+)/schedules/$', JobTemplateSchedulesList.as_view(), name='job_template_schedules_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/survey_spec/$', JobTemplateSurveySpec.as_view(), name='job_template_survey_spec'),
|
||||
|
||||
@@ -18,8 +18,8 @@ import six
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||
from django.db.models import Q, Count, F
|
||||
from django.db import IntegrityError, transaction
|
||||
from django.db.models import Q, Count
|
||||
from django.db import IntegrityError, transaction, connection
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.safestring import mark_safe
|
||||
@@ -92,7 +92,15 @@ from awx.api.serializers import * # noqa
|
||||
from awx.api.metadata import RoleMetadata, JobTypeMetadata
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.scheduler.tasks import run_job_complete
|
||||
from awx.api.exceptions import ActiveJobConflict
|
||||
from awx.api.views.mixin import (
|
||||
ActivityStreamEnforcementMixin,
|
||||
SystemTrackingEnforcementMixin,
|
||||
WorkflowsEnforcementMixin,
|
||||
UnifiedJobDeletionMixin,
|
||||
InstanceGroupMembershipMixin,
|
||||
RelatedJobsPreventDeleteMixin,
|
||||
OrganizationCountsMixin,
|
||||
)
|
||||
|
||||
logger = logging.getLogger('awx.api.views')
|
||||
|
||||
@@ -110,154 +118,6 @@ def api_exception_handler(exc, context):
|
||||
return exception_handler(exc, context)
|
||||
|
||||
|
||||
class ActivityStreamEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports activity streams.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
if not feature_enabled('activity_streams'):
|
||||
raise LicenseForbids(_('Your license does not allow use of the activity stream.'))
|
||||
return super(ActivityStreamEnforcementMixin, self).check_permissions(request)
|
||||
|
||||
|
||||
class SystemTrackingEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports system tracking.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
if not feature_enabled('system_tracking'):
|
||||
raise LicenseForbids(_('Your license does not permit use of system tracking.'))
|
||||
return super(SystemTrackingEnforcementMixin, self).check_permissions(request)
|
||||
|
||||
|
||||
class WorkflowsEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports workflows.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS', 'DELETE'):
|
||||
raise LicenseForbids(_('Your license does not allow use of workflows.'))
|
||||
return super(WorkflowsEnforcementMixin, self).check_permissions(request)
|
||||
|
||||
|
||||
class UnifiedJobDeletionMixin(object):
|
||||
'''
|
||||
Special handling when deleting a running unified job object.
|
||||
'''
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if not request.user.can_access(self.model, 'delete', obj):
|
||||
raise PermissionDenied()
|
||||
try:
|
||||
if obj.unified_job_node.workflow_job.status in ACTIVE_STATES:
|
||||
raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.'))
|
||||
except self.model.unified_job_node.RelatedObjectDoesNotExist:
|
||||
pass
|
||||
# Still allow deletion of new status, because these can be manually created
|
||||
if obj.status in ACTIVE_STATES and obj.status != 'new':
|
||||
raise PermissionDenied(detail=_("Cannot delete running job resource."))
|
||||
elif not obj.event_processing_finished:
|
||||
# Prohibit deletion if job events are still coming in
|
||||
if obj.finished and now() < obj.finished + dateutil.relativedelta.relativedelta(minutes=1):
|
||||
# less than 1 minute has passed since job finished and events are not in
|
||||
return Response({"error": _("Job has not finished processing events.")},
|
||||
status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
# if it has been > 1 minute, events are probably lost
|
||||
logger.warning('Allowing deletion of {} through the API without all events '
|
||||
'processed.'.format(obj.log_format))
|
||||
obj.delete()
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
class InstanceGroupMembershipMixin(object):
|
||||
'''
|
||||
Manages signaling celery to reload its queue configuration on Instance Group membership changes
|
||||
'''
|
||||
def attach(self, request, *args, **kwargs):
|
||||
response = super(InstanceGroupMembershipMixin, self).attach(request, *args, **kwargs)
|
||||
sub_id, res = self.attach_validate(request)
|
||||
if status.is_success(response.status_code):
|
||||
if self.parent_model is Instance:
|
||||
inst_name = ig_obj.hostname
|
||||
else:
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {
|
||||
self.lookup_field: self.kwargs.get(self.lookup_field, None),
|
||||
}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
if inst_name not in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.append(inst_name)
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
return response
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.is_isolated():
|
||||
return {'error': _('Isolated instances may not be added or removed from instances groups via the API.')}
|
||||
if self.parent_model is InstanceGroup:
|
||||
ig_obj = self.get_parent_object()
|
||||
if ig_obj.controller_id is not None:
|
||||
return {'error': _('Isolated instance group membership may not be managed via the API.')}
|
||||
return None
|
||||
|
||||
def unattach_validate(self, request):
|
||||
(sub_id, res) = super(InstanceGroupMembershipMixin, self).unattach_validate(request)
|
||||
if res:
|
||||
return (sub_id, res)
|
||||
sub = get_object_or_400(self.model, pk=sub_id)
|
||||
attach_errors = self.is_valid_relation(None, sub)
|
||||
if attach_errors:
|
||||
return (sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST))
|
||||
return (sub_id, res)
|
||||
|
||||
def unattach(self, request, *args, **kwargs):
|
||||
response = super(InstanceGroupMembershipMixin, self).unattach(request, *args, **kwargs)
|
||||
if status.is_success(response.status_code):
|
||||
sub_id = request.data.get('id', None)
|
||||
if self.parent_model is Instance:
|
||||
inst_name = self.get_parent_object().hostname
|
||||
else:
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {
|
||||
self.lookup_field: self.kwargs.get(self.lookup_field, None),
|
||||
}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
if inst_name in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
return response
|
||||
|
||||
|
||||
class RelatedJobsPreventDeleteMixin(object):
|
||||
def perform_destroy(self, obj):
|
||||
self.check_related_active_jobs(obj)
|
||||
return super(RelatedJobsPreventDeleteMixin, self).perform_destroy(obj)
|
||||
|
||||
def check_related_active_jobs(self, obj):
|
||||
active_jobs = obj.get_active_jobs()
|
||||
if len(active_jobs) > 0:
|
||||
raise ActiveJobConflict(active_jobs)
|
||||
time_cutoff = now() - dateutil.relativedelta.relativedelta(minutes=1)
|
||||
recent_jobs = obj._get_related_jobs().filter(finished__gte = time_cutoff)
|
||||
for unified_job in recent_jobs.get_real_instances():
|
||||
if not unified_job.event_processing_finished:
|
||||
raise PermissionDenied(_(
|
||||
'Related job {} is still processing events.'
|
||||
).format(unified_job.log_format))
|
||||
|
||||
|
||||
class ApiRootView(APIView):
|
||||
|
||||
permission_classes = (AllowAny,)
|
||||
@@ -442,9 +302,9 @@ class ApiV1ConfigView(APIView):
|
||||
data.update(dict(
|
||||
project_base_dir = settings.PROJECTS_ROOT,
|
||||
project_local_paths = Project.get_local_path_choices(),
|
||||
custom_virtualenvs = get_custom_venv_choices()
|
||||
))
|
||||
|
||||
if JobTemplate.accessible_objects(request.user, 'admin_role').exists():
|
||||
elif JobTemplate.accessible_objects(request.user, 'admin_role').exists():
|
||||
data['custom_virtualenvs'] = get_custom_venv_choices()
|
||||
|
||||
return Response(data)
|
||||
@@ -739,7 +599,7 @@ class InstanceGroupInstanceList(InstanceGroupMembershipMixin, SubListAttachDetac
|
||||
search_fields = ('hostname',)
|
||||
|
||||
|
||||
class ScheduleList(ListAPIView):
|
||||
class ScheduleList(ListCreateAPIView):
|
||||
|
||||
view_name = _("Schedules")
|
||||
model = Schedule
|
||||
@@ -884,92 +744,6 @@ class AuthView(APIView):
|
||||
return Response(data)
|
||||
|
||||
|
||||
|
||||
class OrganizationCountsMixin(object):
|
||||
|
||||
def get_serializer_context(self, *args, **kwargs):
|
||||
full_context = super(OrganizationCountsMixin, self).get_serializer_context(*args, **kwargs)
|
||||
|
||||
if self.request is None:
|
||||
return full_context
|
||||
|
||||
db_results = {}
|
||||
org_qs = self.model.accessible_objects(self.request.user, 'read_role')
|
||||
org_id_list = org_qs.values('id')
|
||||
if len(org_id_list) == 0:
|
||||
if self.request.method == 'POST':
|
||||
full_context['related_field_counts'] = {}
|
||||
return full_context
|
||||
|
||||
inv_qs = Inventory.accessible_objects(self.request.user, 'read_role')
|
||||
project_qs = Project.accessible_objects(self.request.user, 'read_role')
|
||||
|
||||
# Produce counts of Foreign Key relationships
|
||||
db_results['inventories'] = inv_qs\
|
||||
.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
db_results['teams'] = Team.accessible_objects(
|
||||
self.request.user, 'read_role').values('organization').annotate(
|
||||
Count('organization')).order_by('organization')
|
||||
|
||||
JT_project_reference = 'project__organization'
|
||||
JT_inventory_reference = 'inventory__organization'
|
||||
db_results['job_templates_project'] = JobTemplate.accessible_objects(
|
||||
self.request.user, 'read_role').exclude(
|
||||
project__organization=F(JT_inventory_reference)).values(JT_project_reference).annotate(
|
||||
Count(JT_project_reference)).order_by(JT_project_reference)
|
||||
|
||||
db_results['job_templates_inventory'] = JobTemplate.accessible_objects(
|
||||
self.request.user, 'read_role').values(JT_inventory_reference).annotate(
|
||||
Count(JT_inventory_reference)).order_by(JT_inventory_reference)
|
||||
|
||||
db_results['projects'] = project_qs\
|
||||
.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
# Other members and admins of organization are always viewable
|
||||
db_results['users'] = org_qs.annotate(
|
||||
users=Count('member_role__members', distinct=True),
|
||||
admins=Count('admin_role__members', distinct=True)
|
||||
).values('id', 'users', 'admins')
|
||||
|
||||
count_context = {}
|
||||
for org in org_id_list:
|
||||
org_id = org['id']
|
||||
count_context[org_id] = {
|
||||
'inventories': 0, 'teams': 0, 'users': 0, 'job_templates': 0,
|
||||
'admins': 0, 'projects': 0}
|
||||
|
||||
for res, count_qs in db_results.items():
|
||||
if res == 'job_templates_project':
|
||||
org_reference = JT_project_reference
|
||||
elif res == 'job_templates_inventory':
|
||||
org_reference = JT_inventory_reference
|
||||
elif res == 'users':
|
||||
org_reference = 'id'
|
||||
else:
|
||||
org_reference = 'organization'
|
||||
for entry in count_qs:
|
||||
org_id = entry[org_reference]
|
||||
if org_id in count_context:
|
||||
if res == 'users':
|
||||
count_context[org_id]['admins'] = entry['admins']
|
||||
count_context[org_id]['users'] = entry['users']
|
||||
continue
|
||||
count_context[org_id][res] = entry['%s__count' % org_reference]
|
||||
|
||||
# Combine the counts for job templates by project and inventory
|
||||
for org in org_id_list:
|
||||
org_id = org['id']
|
||||
count_context[org_id]['job_templates'] = 0
|
||||
for related_path in ['job_templates_project', 'job_templates_inventory']:
|
||||
if related_path in count_context[org_id]:
|
||||
count_context[org_id]['job_templates'] += count_context[org_id].pop(related_path)
|
||||
|
||||
full_context['related_field_counts'] = count_context
|
||||
|
||||
return full_context
|
||||
|
||||
|
||||
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
||||
|
||||
model = Organization
|
||||
@@ -1517,7 +1291,7 @@ class ProjectUpdateNotificationsList(SubListAPIView):
|
||||
search_fields = ('subject', 'notification_type', 'body',)
|
||||
|
||||
|
||||
class ProjectUpdateScmInventoryUpdates(SubListCreateAPIView):
|
||||
class ProjectUpdateScmInventoryUpdates(SubListAPIView):
|
||||
|
||||
view_name = _("Project Update SCM Inventory Updates")
|
||||
model = InventoryUpdate
|
||||
@@ -1710,10 +1484,11 @@ class OAuth2TokenActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIVi
|
||||
search_fields = ('changes',)
|
||||
|
||||
|
||||
class UserTeamsList(ListAPIView):
|
||||
class UserTeamsList(SubListAPIView):
|
||||
|
||||
model = User
|
||||
model = Team
|
||||
serializer_class = TeamSerializer
|
||||
parent_model = User
|
||||
|
||||
def get_queryset(self):
|
||||
u = get_object_or_404(User, pk=self.kwargs['pk'])
|
||||
@@ -1891,7 +1666,7 @@ class CredentialTypeDetail(RetrieveUpdateDestroyAPIView):
|
||||
return super(CredentialTypeDetail, self).destroy(request, *args, **kwargs)
|
||||
|
||||
|
||||
class CredentialTypeCredentialList(SubListAPIView):
|
||||
class CredentialTypeCredentialList(SubListCreateAPIView):
|
||||
|
||||
model = Credential
|
||||
parent_model = CredentialType
|
||||
@@ -2677,6 +2452,16 @@ class InventoryScriptView(RetrieveAPIView):
|
||||
hostvars = bool(request.query_params.get('hostvars', ''))
|
||||
towervars = bool(request.query_params.get('towervars', ''))
|
||||
show_all = bool(request.query_params.get('all', ''))
|
||||
subset = request.query_params.get('subset', '')
|
||||
if subset:
|
||||
if not isinstance(subset, six.string_types):
|
||||
raise ParseError(_('Inventory subset argument must be a string.'))
|
||||
if subset.startswith('slice'):
|
||||
slice_number, slice_count = Inventory.parse_slice_params(subset)
|
||||
else:
|
||||
raise ParseError(_('Subset does not use any supported syntax.'))
|
||||
else:
|
||||
slice_number, slice_count = 1, 1
|
||||
if hostname:
|
||||
hosts_q = dict(name=hostname)
|
||||
if not show_all:
|
||||
@@ -2686,7 +2471,8 @@ class InventoryScriptView(RetrieveAPIView):
|
||||
return Response(obj.get_script_data(
|
||||
hostvars=hostvars,
|
||||
towervars=towervars,
|
||||
show_all=show_all
|
||||
show_all=show_all,
|
||||
slice_number=slice_number, slice_count=slice_count
|
||||
))
|
||||
|
||||
|
||||
@@ -2883,17 +2669,14 @@ class InventorySourceCredentialsList(SubListAttachDetachAPIView):
|
||||
relationship = 'credentials'
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
# Inventory source credentials are exclusive with all other credentials
|
||||
# subject to change for https://github.com/ansible/awx/issues/277
|
||||
# or https://github.com/ansible/awx/issues/223
|
||||
if parent.credentials.exists():
|
||||
return {'msg': _("Source already has credential assigned.")}
|
||||
error = InventorySource.cloud_credential_validation(parent.source, sub)
|
||||
if error:
|
||||
return {'msg': error}
|
||||
if sub.credential_type == 'vault':
|
||||
# TODO: support this
|
||||
return {"msg": _("Vault credentials are not yet supported for inventory sources.")}
|
||||
else:
|
||||
# Cloud credentials are exclusive with all other cloud credentials
|
||||
cloud_cred_qs = parent.credentials.exclude(credential_type__kind='vault')
|
||||
if cloud_cred_qs.exists():
|
||||
return {'msg': _("Source already has cloud credential assigned.")}
|
||||
return None
|
||||
|
||||
|
||||
@@ -3140,9 +2923,14 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
data = OrderedDict()
|
||||
data['job'] = new_job.id
|
||||
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
|
||||
data.update(JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
|
||||
if isinstance(new_job, WorkflowJob):
|
||||
data['workflow_job'] = new_job.id
|
||||
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
|
||||
data.update(WorkflowJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
|
||||
else:
|
||||
data['job'] = new_job.id
|
||||
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
|
||||
data.update(JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
|
||||
headers = {'Location': new_job.get_absolute_url(request)}
|
||||
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
|
||||
@@ -3188,6 +2976,16 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
obj_permission_type = 'admin'
|
||||
serializer_class = EmptySerializer
|
||||
|
||||
ALLOWED_TYPES = {
|
||||
'text': six.string_types,
|
||||
'textarea': six.string_types,
|
||||
'password': six.string_types,
|
||||
'multiplechoice': six.string_types,
|
||||
'multiselect': six.string_types,
|
||||
'integer': int,
|
||||
'float': float
|
||||
}
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if not feature_enabled('surveys'):
|
||||
@@ -3214,7 +3012,8 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
obj.save(update_fields=['survey_spec'])
|
||||
return Response()
|
||||
|
||||
def _validate_spec_data(self, new_spec, old_spec):
|
||||
@staticmethod
|
||||
def _validate_spec_data(new_spec, old_spec):
|
||||
schema_errors = {}
|
||||
for field, expect_type, type_label in [
|
||||
('name', six.string_types, 'string'),
|
||||
@@ -3235,40 +3034,75 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
variable_set = set()
|
||||
old_spec_dict = JobTemplate.pivot_spec(old_spec)
|
||||
for idx, survey_item in enumerate(new_spec["spec"]):
|
||||
context = dict(
|
||||
idx=six.text_type(idx),
|
||||
survey_item=survey_item
|
||||
)
|
||||
# General element validation
|
||||
if not isinstance(survey_item, dict):
|
||||
return Response(dict(error=_("Survey question %s is not a json object.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
|
||||
if "type" not in survey_item:
|
||||
return Response(dict(error=_("'type' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
|
||||
if "question_name" not in survey_item:
|
||||
return Response(dict(error=_("'question_name' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
|
||||
if "variable" not in survey_item:
|
||||
return Response(dict(error=_("'variable' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
|
||||
for field_name in ['type', 'question_name', 'variable', 'required']:
|
||||
if field_name not in survey_item:
|
||||
return Response(dict(error=_("'{field_name}' missing from survey question {idx}").format(
|
||||
field_name=field_name, **context
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
val = survey_item[field_name]
|
||||
allow_types = six.string_types
|
||||
type_label = 'string'
|
||||
if field_name == 'required':
|
||||
allow_types = bool
|
||||
type_label = 'boolean'
|
||||
if not isinstance(val, allow_types):
|
||||
return Response(dict(error=_("'{field_name}' in survey question {idx} expected to be {type_label}.").format(
|
||||
field_name=field_name, type_label=type_label, **context
|
||||
)))
|
||||
if survey_item['variable'] in variable_set:
|
||||
return Response(dict(error=_("'variable' '%(item)s' duplicated in survey question %(survey)s.") % {
|
||||
'item': survey_item['variable'], 'survey': str(idx)}), status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
variable_set.add(survey_item['variable'])
|
||||
if "required" not in survey_item:
|
||||
return Response(dict(error=_("'required' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if survey_item["type"] == "password" and "default" in survey_item:
|
||||
if not isinstance(survey_item['default'], six.string_types):
|
||||
# Type-specific validation
|
||||
# validate question type <-> default type
|
||||
qtype = survey_item["type"]
|
||||
if qtype not in JobTemplateSurveySpec.ALLOWED_TYPES:
|
||||
return Response(dict(error=_(
|
||||
"'{survey_item[type]}' in survey question {idx} is not one of '{allowed_types}' allowed question types."
|
||||
).format(
|
||||
allowed_types=', '.join(JobTemplateSurveySpec.ALLOWED_TYPES.keys()), **context
|
||||
)))
|
||||
if 'default' in survey_item:
|
||||
if not isinstance(survey_item['default'], JobTemplateSurveySpec.ALLOWED_TYPES[qtype]):
|
||||
type_label = 'string'
|
||||
if qtype in ['integer', 'float']:
|
||||
type_label = qtype
|
||||
return Response(dict(error=_(
|
||||
"Value {question_default} for '{variable_name}' expected to be a string."
|
||||
"Default value {survey_item[default]} in survey question {idx} expected to be {type_label}."
|
||||
).format(
|
||||
question_default=survey_item["default"], variable_name=survey_item["variable"])
|
||||
), status=status.HTTP_400_BAD_REQUEST)
|
||||
type_label=type_label, **context
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
# additional type-specific properties, the UI provides these even
|
||||
# if not applicable to the question, TODO: request that they not do this
|
||||
for key in ['min', 'max']:
|
||||
if key in survey_item:
|
||||
if survey_item[key] is not None and (not isinstance(survey_item[key], int)):
|
||||
return Response(dict(error=_(
|
||||
"The {min_or_max} limit in survey question {idx} expected to be integer."
|
||||
).format(min_or_max=key, **context)))
|
||||
if qtype in ['multiplechoice', 'multiselect'] and 'choices' not in survey_item:
|
||||
return Response(dict(error=_(
|
||||
"Survey question {idx} of type {survey_item[type]} must specify choices.".format(**context)
|
||||
)))
|
||||
|
||||
# Process encryption substitution
|
||||
if ("default" in survey_item and isinstance(survey_item['default'], six.string_types) and
|
||||
survey_item['default'].startswith('$encrypted$')):
|
||||
# Submission expects the existence of encrypted DB value to replace given default
|
||||
if survey_item["type"] != "password":
|
||||
if qtype != "password":
|
||||
return Response(dict(error=_(
|
||||
"$encrypted$ is a reserved keyword for password question defaults, "
|
||||
"survey question {question_position} is type {question_type}."
|
||||
).format(
|
||||
question_position=str(idx), question_type=survey_item["type"])
|
||||
), status=status.HTTP_400_BAD_REQUEST)
|
||||
"survey question {idx} is type {survey_item[type]}."
|
||||
).format(**context)), status=status.HTTP_400_BAD_REQUEST)
|
||||
old_element = old_spec_dict.get(survey_item['variable'], {})
|
||||
encryptedish_default_exists = False
|
||||
if 'default' in old_element:
|
||||
@@ -3280,10 +3114,10 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
encryptedish_default_exists = True
|
||||
if not encryptedish_default_exists:
|
||||
return Response(dict(error=_(
|
||||
"$encrypted$ is a reserved keyword, may not be used for new default in position {question_position}."
|
||||
).format(question_position=str(idx))), status=status.HTTP_400_BAD_REQUEST)
|
||||
"$encrypted$ is a reserved keyword, may not be used for new default in position {idx}."
|
||||
).format(**context)), status=status.HTTP_400_BAD_REQUEST)
|
||||
survey_item['default'] = old_element['default']
|
||||
elif survey_item["type"] == "password" and 'default' in survey_item:
|
||||
elif qtype == "password" and 'default' in survey_item:
|
||||
# Submission provides new encrypted default
|
||||
survey_item['default'] = encrypt_value(survey_item['default'])
|
||||
|
||||
@@ -3354,8 +3188,8 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
|
||||
return {"error": _("Cannot assign multiple {credential_type} credentials.".format(
|
||||
credential_type=sub.unique_hash(display=True)))}
|
||||
return {"error": _("Cannot assign multiple {credential_type} credentials.").format(
|
||||
credential_type=sub.unique_hash(display=True))}
|
||||
kind = sub.credential_type.kind
|
||||
if kind not in ('ssh', 'vault', 'cloud', 'net'):
|
||||
return {'error': _('Cannot assign a Credential of kind `{}`.').format(kind)}
|
||||
@@ -3544,10 +3378,11 @@ class JobTemplateCallback(GenericAPIView):
|
||||
if extra_vars is not None and job_template.ask_variables_on_launch:
|
||||
extra_vars_redacted, removed = extract_ansible_vars(extra_vars)
|
||||
kv['extra_vars'] = extra_vars_redacted
|
||||
kv['_prevent_slicing'] = True # will only run against 1 host, so no point
|
||||
with transaction.atomic():
|
||||
job = job_template.create_job(**kv)
|
||||
|
||||
# Send a signal to celery that the job should be started.
|
||||
# Send a signal to signify that the job should be started.
|
||||
result = job.signal_start(inventory_sources_already_updated=inventory_sources_already_updated)
|
||||
if not result:
|
||||
data = dict(msg=_('Error starting job!'))
|
||||
@@ -3575,6 +3410,15 @@ class JobTemplateJobsList(SubListCreateAPIView):
|
||||
return methods
|
||||
|
||||
|
||||
class JobTemplateSliceWorkflowJobsList(SubListCreateAPIView):
|
||||
|
||||
model = WorkflowJob
|
||||
serializer_class = WorkflowJobListSerializer
|
||||
parent_model = JobTemplate
|
||||
relationship = 'slice_workflow_jobs'
|
||||
parent_key = 'job_template'
|
||||
|
||||
|
||||
class JobTemplateInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
|
||||
model = InstanceGroup
|
||||
@@ -3667,10 +3511,6 @@ class WorkflowJobTemplateNodeChildrenBaseList(WorkflowsEnforcementMixin, Enforce
|
||||
return getattr(parent, self.relationship).all()
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
mutex_list = ('success_nodes', 'failure_nodes') if self.relationship == 'always_nodes' else ('always_nodes',)
|
||||
for relation in mutex_list:
|
||||
if getattr(parent, relation).all().exists():
|
||||
return {'Error': _('Cannot associate {0} when {1} have been associated.').format(self.relationship, relation)}
|
||||
|
||||
if created:
|
||||
return None
|
||||
@@ -3871,6 +3711,8 @@ class WorkflowJobRelaunch(WorkflowsEnforcementMixin, GenericAPIView):
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.is_sliced_job and not obj.job_template_id:
|
||||
raise ParseError(_('Cannot relaunch slice workflow job orphaned from job template.'))
|
||||
new_workflow_job = obj.create_relaunch_workflow_job()
|
||||
new_workflow_job.signal_start()
|
||||
|
||||
@@ -4960,7 +4802,7 @@ class NotificationTemplateTest(GenericAPIView):
|
||||
if not notification:
|
||||
return Response({}, status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
send_notifications.delay([notification.id])
|
||||
connection.on_commit(lambda: send_notifications.delay([notification.id]))
|
||||
data = OrderedDict()
|
||||
data['notification'] = notification.id
|
||||
data.update(NotificationSerializer(notification, context=self.get_serializer_context()).to_representation(notification))
|
||||
275
awx/api/views/mixin.py
Normal file
275
awx/api/views/mixin.py
Normal file
@@ -0,0 +1,275 @@
|
||||
# Copyright (c) 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import dateutil
|
||||
import logging
|
||||
|
||||
from django.db.models import (
|
||||
Count,
|
||||
F,
|
||||
)
|
||||
from django.db import transaction
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.utils import get_object_or_400
|
||||
from awx.main.models.ha import (
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
)
|
||||
from awx.main.models.organization import Team
|
||||
from awx.main.models.projects import Project
|
||||
from awx.main.models.inventory import Inventory
|
||||
from awx.main.models.jobs import JobTemplate
|
||||
from awx.conf.license import (
|
||||
feature_enabled,
|
||||
LicenseForbids,
|
||||
)
|
||||
from awx.api.exceptions import ActiveJobConflict
|
||||
|
||||
logger = logging.getLogger('awx.api.views.mixin')
|
||||
|
||||
|
||||
class ActivityStreamEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports activity streams.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
ret = super(ActivityStreamEnforcementMixin, self).check_permissions(request)
|
||||
if not feature_enabled('activity_streams'):
|
||||
raise LicenseForbids(_('Your license does not allow use of the activity stream.'))
|
||||
return ret
|
||||
|
||||
|
||||
class SystemTrackingEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports system tracking.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
ret = super(SystemTrackingEnforcementMixin, self).check_permissions(request)
|
||||
if not feature_enabled('system_tracking'):
|
||||
raise LicenseForbids(_('Your license does not permit use of system tracking.'))
|
||||
return ret
|
||||
|
||||
|
||||
class WorkflowsEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports workflows.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
ret = super(WorkflowsEnforcementMixin, self).check_permissions(request)
|
||||
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS', 'DELETE'):
|
||||
raise LicenseForbids(_('Your license does not allow use of workflows.'))
|
||||
return ret
|
||||
|
||||
|
||||
class UnifiedJobDeletionMixin(object):
|
||||
'''
|
||||
Special handling when deleting a running unified job object.
|
||||
'''
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if not request.user.can_access(self.model, 'delete', obj):
|
||||
raise PermissionDenied()
|
||||
try:
|
||||
if obj.unified_job_node.workflow_job.status in ACTIVE_STATES:
|
||||
raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.'))
|
||||
except self.model.unified_job_node.RelatedObjectDoesNotExist:
|
||||
pass
|
||||
# Still allow deletion of new status, because these can be manually created
|
||||
if obj.status in ACTIVE_STATES and obj.status != 'new':
|
||||
raise PermissionDenied(detail=_("Cannot delete running job resource."))
|
||||
elif not obj.event_processing_finished:
|
||||
# Prohibit deletion if job events are still coming in
|
||||
if obj.finished and now() < obj.finished + dateutil.relativedelta.relativedelta(minutes=1):
|
||||
# less than 1 minute has passed since job finished and events are not in
|
||||
return Response({"error": _("Job has not finished processing events.")},
|
||||
status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
# if it has been > 1 minute, events are probably lost
|
||||
logger.warning('Allowing deletion of {} through the API without all events '
|
||||
'processed.'.format(obj.log_format))
|
||||
obj.delete()
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
class InstanceGroupMembershipMixin(object):
|
||||
'''
|
||||
This mixin overloads attach/detach so that it calls InstanceGroup.save(),
|
||||
triggering a background recalculation of policy-based instance group
|
||||
membership.
|
||||
'''
|
||||
def attach(self, request, *args, **kwargs):
|
||||
response = super(InstanceGroupMembershipMixin, self).attach(request, *args, **kwargs)
|
||||
sub_id, res = self.attach_validate(request)
|
||||
if status.is_success(response.status_code):
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(self.model, pk=sub_id)
|
||||
inst_name = ig_obj.hostname
|
||||
else:
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {
|
||||
self.lookup_field: self.kwargs.get(self.lookup_field, None),
|
||||
}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
if inst_name not in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.append(inst_name)
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
return response
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.is_isolated():
|
||||
return {'error': _('Isolated instances may not be added or removed from instances groups via the API.')}
|
||||
if self.parent_model is InstanceGroup:
|
||||
ig_obj = self.get_parent_object()
|
||||
if ig_obj.controller_id is not None:
|
||||
return {'error': _('Isolated instance group membership may not be managed via the API.')}
|
||||
return None
|
||||
|
||||
def unattach_validate(self, request):
|
||||
(sub_id, res) = super(InstanceGroupMembershipMixin, self).unattach_validate(request)
|
||||
if res:
|
||||
return (sub_id, res)
|
||||
sub = get_object_or_400(self.model, pk=sub_id)
|
||||
attach_errors = self.is_valid_relation(None, sub)
|
||||
if attach_errors:
|
||||
return (sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST))
|
||||
return (sub_id, res)
|
||||
|
||||
def unattach(self, request, *args, **kwargs):
|
||||
response = super(InstanceGroupMembershipMixin, self).unattach(request, *args, **kwargs)
|
||||
if status.is_success(response.status_code):
|
||||
sub_id = request.data.get('id', None)
|
||||
if self.parent_model is Instance:
|
||||
inst_name = self.get_parent_object().hostname
|
||||
else:
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {
|
||||
self.lookup_field: self.kwargs.get(self.lookup_field, None),
|
||||
}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
if inst_name in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
return response
|
||||
|
||||
|
||||
class RelatedJobsPreventDeleteMixin(object):
|
||||
def perform_destroy(self, obj):
|
||||
self.check_related_active_jobs(obj)
|
||||
return super(RelatedJobsPreventDeleteMixin, self).perform_destroy(obj)
|
||||
|
||||
def check_related_active_jobs(self, obj):
|
||||
active_jobs = obj.get_active_jobs()
|
||||
if len(active_jobs) > 0:
|
||||
raise ActiveJobConflict(active_jobs)
|
||||
time_cutoff = now() - dateutil.relativedelta.relativedelta(minutes=1)
|
||||
recent_jobs = obj._get_related_jobs().filter(finished__gte = time_cutoff)
|
||||
for unified_job in recent_jobs.get_real_instances():
|
||||
if not unified_job.event_processing_finished:
|
||||
raise PermissionDenied(_(
|
||||
'Related job {} is still processing events.'
|
||||
).format(unified_job.log_format))
|
||||
|
||||
|
||||
class OrganizationCountsMixin(object):
|
||||
|
||||
def get_serializer_context(self, *args, **kwargs):
|
||||
full_context = super(OrganizationCountsMixin, self).get_serializer_context(*args, **kwargs)
|
||||
|
||||
if self.request is None:
|
||||
return full_context
|
||||
|
||||
db_results = {}
|
||||
org_qs = self.model.accessible_objects(self.request.user, 'read_role')
|
||||
org_id_list = org_qs.values('id')
|
||||
if len(org_id_list) == 0:
|
||||
if self.request.method == 'POST':
|
||||
full_context['related_field_counts'] = {}
|
||||
return full_context
|
||||
|
||||
inv_qs = Inventory.accessible_objects(self.request.user, 'read_role')
|
||||
project_qs = Project.accessible_objects(self.request.user, 'read_role')
|
||||
|
||||
# Produce counts of Foreign Key relationships
|
||||
db_results['inventories'] = inv_qs\
|
||||
.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
db_results['teams'] = Team.accessible_objects(
|
||||
self.request.user, 'read_role').values('organization').annotate(
|
||||
Count('organization')).order_by('organization')
|
||||
|
||||
JT_project_reference = 'project__organization'
|
||||
JT_inventory_reference = 'inventory__organization'
|
||||
db_results['job_templates_project'] = JobTemplate.accessible_objects(
|
||||
self.request.user, 'read_role').exclude(
|
||||
project__organization=F(JT_inventory_reference)).values(JT_project_reference).annotate(
|
||||
Count(JT_project_reference)).order_by(JT_project_reference)
|
||||
|
||||
db_results['job_templates_inventory'] = JobTemplate.accessible_objects(
|
||||
self.request.user, 'read_role').values(JT_inventory_reference).annotate(
|
||||
Count(JT_inventory_reference)).order_by(JT_inventory_reference)
|
||||
|
||||
db_results['projects'] = project_qs\
|
||||
.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
# Other members and admins of organization are always viewable
|
||||
db_results['users'] = org_qs.annotate(
|
||||
users=Count('member_role__members', distinct=True),
|
||||
admins=Count('admin_role__members', distinct=True)
|
||||
).values('id', 'users', 'admins')
|
||||
|
||||
count_context = {}
|
||||
for org in org_id_list:
|
||||
org_id = org['id']
|
||||
count_context[org_id] = {
|
||||
'inventories': 0, 'teams': 0, 'users': 0, 'job_templates': 0,
|
||||
'admins': 0, 'projects': 0}
|
||||
|
||||
for res, count_qs in db_results.items():
|
||||
if res == 'job_templates_project':
|
||||
org_reference = JT_project_reference
|
||||
elif res == 'job_templates_inventory':
|
||||
org_reference = JT_inventory_reference
|
||||
elif res == 'users':
|
||||
org_reference = 'id'
|
||||
else:
|
||||
org_reference = 'organization'
|
||||
for entry in count_qs:
|
||||
org_id = entry[org_reference]
|
||||
if org_id in count_context:
|
||||
if res == 'users':
|
||||
count_context[org_id]['admins'] = entry['admins']
|
||||
count_context[org_id]['users'] = entry['users']
|
||||
continue
|
||||
count_context[org_id][res] = entry['%s__count' % org_reference]
|
||||
|
||||
# Combine the counts for job templates by project and inventory
|
||||
for org in org_id_list:
|
||||
org_id = org['id']
|
||||
count_context[org_id]['job_templates'] = 0
|
||||
for related_path in ['job_templates_project', 'job_templates_inventory']:
|
||||
if related_path in count_context[org_id]:
|
||||
count_context[org_id]['job_templates'] += count_context[org_id].pop(related_path)
|
||||
|
||||
full_context['related_field_counts'] = count_context
|
||||
|
||||
return full_context
|
||||
@@ -1,25 +0,0 @@
|
||||
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import os
|
||||
from celery import Celery
|
||||
from django.conf import settings # noqa
|
||||
|
||||
|
||||
try:
|
||||
import awx.devonly # noqa
|
||||
MODE = 'development'
|
||||
except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'awx.settings.%s' % MODE)
|
||||
|
||||
app = Celery('awx')
|
||||
app.config_from_object('django.conf:settings')
|
||||
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.start()
|
||||
@@ -2,11 +2,13 @@
|
||||
from collections import namedtuple
|
||||
import contextlib
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import StringIO
|
||||
import traceback
|
||||
import urllib
|
||||
|
||||
import six
|
||||
|
||||
@@ -60,6 +62,15 @@ SETTING_CACHE_DEFAULTS = True
|
||||
__all__ = ['SettingsWrapper', 'get_settings_to_cache', 'SETTING_CACHE_NOTSET']
|
||||
|
||||
|
||||
def normalize_broker_url(value):
|
||||
parts = value.rsplit('@', 1)
|
||||
match = re.search('(amqp://[^:]+:)(.*)', parts[0])
|
||||
if match:
|
||||
prefix, password = match.group(1), match.group(2)
|
||||
parts[0] = prefix + urllib.quote(password)
|
||||
return '@'.join(parts)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _ctit_db_wrapper(trans_safe=False):
|
||||
'''
|
||||
@@ -115,7 +126,8 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
bottom_stack.close()
|
||||
# Log the combined stack
|
||||
if trans_safe:
|
||||
logger.warning('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
if 'check_migrations' not in sys.argv:
|
||||
logger.warning('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
else:
|
||||
logger.error('Error modifying something related to database settings.\n{}'.format(tb_string))
|
||||
finally:
|
||||
@@ -444,7 +456,16 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
value = self._get_local(name)
|
||||
if value is not empty:
|
||||
return value
|
||||
return self._get_default(name)
|
||||
value = self._get_default(name)
|
||||
# sometimes users specify RabbitMQ passwords that contain
|
||||
# unescaped : and @ characters that confused urlparse, e.g.,
|
||||
# amqp://guest:a@ns:ibl3#@localhost:5672//
|
||||
#
|
||||
# detect these scenarios, and automatically escape the user's
|
||||
# password so it just works
|
||||
if name == 'BROKER_URL':
|
||||
value = normalize_broker_url(value)
|
||||
return value
|
||||
|
||||
def _set_local(self, name, value):
|
||||
field = self.registry.get_setting_field(name)
|
||||
|
||||
@@ -24,7 +24,12 @@ import os
|
||||
import pwd
|
||||
|
||||
# PSUtil
|
||||
import psutil
|
||||
try:
|
||||
import psutil
|
||||
except ImportError:
|
||||
raise ImportError('psutil is missing; {}bin/pip install psutil'.format(
|
||||
os.environ['VIRTUAL_ENV']
|
||||
))
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
@@ -27,7 +27,13 @@ import os
|
||||
import stat
|
||||
import threading
|
||||
import uuid
|
||||
import memcache
|
||||
|
||||
try:
|
||||
import memcache
|
||||
except ImportError:
|
||||
raise ImportError('python-memcached is missing; {}bin/pip install python-memcached'.format(
|
||||
os.environ['VIRTUAL_ENV']
|
||||
))
|
||||
|
||||
from six.moves import xrange
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2018-08-03 19:04+0000\n"
|
||||
"POT-Creation-Date: 2018-08-14 13:52+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
@@ -125,15 +125,15 @@ msgid ""
|
||||
"our REST API, the Content-Type must be application/json"
|
||||
msgstr ""
|
||||
|
||||
#: awx/api/generics.py:629 awx/api/generics.py:691
|
||||
#: awx/api/generics.py:635 awx/api/generics.py:697
|
||||
msgid "\"id\" field must be an integer."
|
||||
msgstr ""
|
||||
|
||||
#: awx/api/generics.py:688
|
||||
#: awx/api/generics.py:694
|
||||
msgid "\"id\" is required to disassociate"
|
||||
msgstr ""
|
||||
|
||||
#: awx/api/generics.py:739
|
||||
#: awx/api/generics.py:745
|
||||
msgid "{} 'id' field is missing."
|
||||
msgstr ""
|
||||
|
||||
@@ -1642,86 +1642,86 @@ msgstr ""
|
||||
msgid "Bad data found in related field %s."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:304
|
||||
#: awx/main/access.py:302
|
||||
msgid "License is missing."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:306
|
||||
#: awx/main/access.py:304
|
||||
msgid "License has expired."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:314
|
||||
#: awx/main/access.py:312
|
||||
#, python-format
|
||||
msgid "License count of %s instances has been reached."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:316
|
||||
#: awx/main/access.py:314
|
||||
#, python-format
|
||||
msgid "License count of %s instances has been exceeded."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:318
|
||||
#: awx/main/access.py:316
|
||||
msgid "Host count exceeds available instances."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:322
|
||||
#: awx/main/access.py:320
|
||||
#, python-format
|
||||
msgid "Feature %s is not enabled in the active license."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:324
|
||||
#: awx/main/access.py:322
|
||||
msgid "Features not found in active license."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:837
|
||||
#: awx/main/access.py:835
|
||||
msgid "Unable to change inventory on a host."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:854 awx/main/access.py:899
|
||||
#: awx/main/access.py:852 awx/main/access.py:897
|
||||
msgid "Cannot associate two items from different inventories."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:887
|
||||
#: awx/main/access.py:885
|
||||
msgid "Unable to change inventory on a group."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:1148
|
||||
#: awx/main/access.py:1146
|
||||
msgid "Unable to change organization on a team."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:1165
|
||||
#: awx/main/access.py:1163
|
||||
msgid "The {} role cannot be assigned to a team"
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:1167
|
||||
#: awx/main/access.py:1165
|
||||
msgid "The admin_role for a User cannot be assigned to a team"
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:1533 awx/main/access.py:1967
|
||||
#: awx/main/access.py:1531 awx/main/access.py:1965
|
||||
msgid "Job was launched with prompts provided by another user."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:1553
|
||||
#: awx/main/access.py:1551
|
||||
msgid "Job has been orphaned from its job template."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:1555
|
||||
#: awx/main/access.py:1553
|
||||
msgid "Job was launched with unknown prompted fields."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:1557
|
||||
#: awx/main/access.py:1555
|
||||
msgid "Job was launched with prompted fields."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:1559
|
||||
#: awx/main/access.py:1557
|
||||
msgid " Organization level permissions required."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:1561
|
||||
#: awx/main/access.py:1559
|
||||
msgid " You do not have permission to related resources."
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/access.py:1981
|
||||
#: awx/main/access.py:1979
|
||||
msgid ""
|
||||
"You do not have permission to the workflow job resources required for "
|
||||
"relaunch."
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -611,7 +611,8 @@ class OAuth2ApplicationAccess(BaseAccess):
|
||||
select_related = ('user',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(organization__in=self.user.organizations)
|
||||
org_access_qs = Organization.accessible_objects(self.user, 'member_role')
|
||||
return self.model.objects.filter(organization__in=org_access_qs)
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return self.user.is_superuser or self.check_related('organization', Organization, data, obj=obj,
|
||||
@@ -1788,7 +1789,7 @@ class WorkflowJobNodeAccess(BaseAccess):
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
workflow_job__workflow_job_template__in=WorkflowJobTemplate.accessible_objects(
|
||||
workflow_job__unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(
|
||||
self.user, 'read_role'))
|
||||
|
||||
@check_superuser
|
||||
@@ -1914,7 +1915,7 @@ class WorkflowJobAccess(BaseAccess):
|
||||
|
||||
def filtered_queryset(self):
|
||||
return WorkflowJob.objects.filter(
|
||||
workflow_job_template__in=WorkflowJobTemplate.accessible_objects(
|
||||
unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(
|
||||
self.user, 'read_role'))
|
||||
|
||||
def can_add(self, data):
|
||||
@@ -1946,9 +1947,11 @@ class WorkflowJobAccess(BaseAccess):
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
|
||||
wfjt = obj.workflow_job_template
|
||||
template = obj.workflow_job_template
|
||||
if not template and obj.job_template_id:
|
||||
template = obj.job_template
|
||||
# only superusers can relaunch orphans
|
||||
if not wfjt:
|
||||
if not template:
|
||||
return False
|
||||
|
||||
# If job was launched by another user, it could have survey passwords
|
||||
@@ -1966,7 +1969,7 @@ class WorkflowJobAccess(BaseAccess):
|
||||
return False
|
||||
|
||||
# execute permission to WFJT is mandatory for any relaunch
|
||||
return (self.user in wfjt.execute_role)
|
||||
return (self.user in template.execute_role)
|
||||
|
||||
def can_recreate(self, obj):
|
||||
node_qs = obj.workflow_job_nodes.all().prefetch_related('inventory', 'credentials', 'unified_job_template')
|
||||
|
||||
@@ -29,3 +29,11 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
CENSOR_VALUE = '************'
|
||||
ENV_BLACKLIST = frozenset((
|
||||
'VIRTUAL_ENV', 'PATH', 'PYTHONPATH', 'PROOT_TMP_DIR', 'JOB_ID',
|
||||
'INVENTORY_ID', 'INVENTORY_SOURCE_ID', 'INVENTORY_UPDATE_ID',
|
||||
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'MAX_EVENT_RES',
|
||||
'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
|
||||
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS', 'FACT_QUEUE',
|
||||
'AWX_HOST', 'PROJECT_REVISION'
|
||||
))
|
||||
|
||||
5
awx/main/dispatch/__init__.py
Normal file
5
awx/main/dispatch/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
def get_local_queuename():
|
||||
return settings.CLUSTER_HOST_ID.encode('utf-8')
|
||||
58
awx/main/dispatch/control.py
Normal file
58
awx/main/dispatch/control.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import logging
|
||||
import socket
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from kombu import Connection, Queue, Exchange, Producer, Consumer
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
class Control(object):
|
||||
|
||||
services = ('dispatcher', 'callback_receiver')
|
||||
result = None
|
||||
|
||||
def __init__(self, service, host=None):
|
||||
if service not in self.services:
|
||||
raise RuntimeError('{} must be in {}'.format(service, self.services))
|
||||
self.service = service
|
||||
self.queuename = host or get_local_queuename()
|
||||
self.queue = Queue(self.queuename, Exchange(self.queuename), routing_key=self.queuename)
|
||||
|
||||
def publish(self, msg, conn, **kwargs):
|
||||
producer = Producer(
|
||||
exchange=self.queue.exchange,
|
||||
channel=conn,
|
||||
routing_key=self.queuename
|
||||
)
|
||||
producer.publish(msg, expiration=5, **kwargs)
|
||||
|
||||
def status(self, *args, **kwargs):
|
||||
return self.control_with_reply('status', *args, **kwargs)
|
||||
|
||||
def running(self, *args, **kwargs):
|
||||
return self.control_with_reply('running', *args, **kwargs)
|
||||
|
||||
def control_with_reply(self, command, timeout=5):
|
||||
logger.warn('checking {} {} for {}'.format(self.service, command, self.queuename))
|
||||
reply_queue = Queue(name="amq.rabbitmq.reply-to")
|
||||
self.result = None
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
with Consumer(conn, reply_queue, callbacks=[self.process_message], no_ack=True):
|
||||
self.publish({'control': command}, conn, reply_to='amq.rabbitmq.reply-to')
|
||||
try:
|
||||
conn.drain_events(timeout=timeout)
|
||||
except socket.timeout:
|
||||
logger.error('{} did not reply within {}s'.format(self.service, timeout))
|
||||
raise
|
||||
return self.result
|
||||
|
||||
def control(self, msg, **kwargs):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
self.publish(msg, conn)
|
||||
|
||||
def process_message(self, body, message):
|
||||
self.result = body
|
||||
message.ack()
|
||||
405
awx/main/dispatch/pool.py
Normal file
405
awx/main/dispatch/pool.py
Normal file
@@ -0,0 +1,405 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import random
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
|
||||
import collections
|
||||
from multiprocessing import Process
|
||||
from multiprocessing import Queue as MPQueue
|
||||
from Queue import Full as QueueFull, Empty as QueueEmpty
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connection as django_connection, connections
|
||||
from django.core.cache import cache as django_cache
|
||||
from jinja2 import Template
|
||||
import psutil
|
||||
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.dispatch import reaper
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
class PoolWorker(object):
|
||||
'''
|
||||
Used to track a worker child process and its pending and finished messages.
|
||||
|
||||
This class makes use of two distinct multiprocessing.Queues to track state:
|
||||
|
||||
- self.queue: this is a queue which represents pending messages that should
|
||||
be handled by this worker process; as new AMQP messages come
|
||||
in, a pool will put() them into this queue; the child
|
||||
process that is forked will get() from this queue and handle
|
||||
received messages in an endless loop
|
||||
- self.finished: this is a queue which the worker process uses to signal
|
||||
that it has finished processing a message
|
||||
|
||||
When a message is put() onto this worker, it is tracked in
|
||||
self.managed_tasks.
|
||||
|
||||
Periodically, the worker will call .calculate_managed_tasks(), which will
|
||||
cause messages in self.finished to be removed from self.managed_tasks.
|
||||
|
||||
In this way, self.managed_tasks represents a view of the messages assigned
|
||||
to a specific process. The message at [0] is the least-recently inserted
|
||||
message, and it represents what the worker is running _right now_
|
||||
(self.current_task).
|
||||
|
||||
A worker is "busy" when it has at least one message in self.managed_tasks.
|
||||
It is "idle" when self.managed_tasks is empty.
|
||||
'''
|
||||
|
||||
def __init__(self, queue_size, target, args):
|
||||
self.messages_sent = 0
|
||||
self.messages_finished = 0
|
||||
self.managed_tasks = collections.OrderedDict()
|
||||
self.finished = MPQueue(queue_size)
|
||||
self.queue = MPQueue(queue_size)
|
||||
self.process = Process(target=target, args=(self.queue, self.finished) + args)
|
||||
self.process.daemon = True
|
||||
|
||||
def start(self):
|
||||
self.process.start()
|
||||
|
||||
def put(self, body):
|
||||
uuid = '?'
|
||||
if isinstance(body, dict):
|
||||
if not body.get('uuid'):
|
||||
body['uuid'] = str(uuid4())
|
||||
uuid = body['uuid']
|
||||
logger.debug('delivered {} to worker[{}] qsize {}'.format(
|
||||
uuid, self.pid, self.qsize
|
||||
))
|
||||
self.managed_tasks[uuid] = body
|
||||
self.queue.put(body, block=True, timeout=5)
|
||||
self.messages_sent += 1
|
||||
self.calculate_managed_tasks()
|
||||
|
||||
def quit(self):
|
||||
'''
|
||||
Send a special control message to the worker that tells it to exit
|
||||
gracefully.
|
||||
'''
|
||||
self.queue.put('QUIT')
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self.process.pid
|
||||
|
||||
@property
|
||||
def qsize(self):
|
||||
return self.queue.qsize()
|
||||
|
||||
@property
|
||||
def alive(self):
|
||||
return self.process.is_alive()
|
||||
|
||||
@property
|
||||
def mb(self):
|
||||
if self.alive:
|
||||
return '{:0.3f}'.format(
|
||||
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
|
||||
)
|
||||
return '0'
|
||||
|
||||
@property
|
||||
def exitcode(self):
|
||||
return str(self.process.exitcode)
|
||||
|
||||
def calculate_managed_tasks(self):
|
||||
# look to see if any tasks were finished
|
||||
finished = []
|
||||
for _ in range(self.finished.qsize()):
|
||||
try:
|
||||
finished.append(self.finished.get(block=False))
|
||||
except QueueEmpty:
|
||||
break # qsize is not always _totally_ up to date
|
||||
|
||||
# if any tasks were finished, removed them from the managed tasks for
|
||||
# this worker
|
||||
for uuid in finished:
|
||||
self.messages_finished += 1
|
||||
del self.managed_tasks[uuid]
|
||||
|
||||
@property
|
||||
def current_task(self):
|
||||
self.calculate_managed_tasks()
|
||||
# the task at [0] is the one that's running right now (or is about to
|
||||
# be running)
|
||||
if len(self.managed_tasks):
|
||||
return self.managed_tasks[self.managed_tasks.keys()[0]]
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def orphaned_tasks(self):
|
||||
orphaned = []
|
||||
if not self.alive:
|
||||
# if this process had a running task that never finished,
|
||||
# requeue its error callbacks
|
||||
current_task = self.current_task
|
||||
if isinstance(current_task, dict):
|
||||
orphaned.extend(current_task.get('errbacks', []))
|
||||
|
||||
# if this process has any pending messages requeue them
|
||||
for _ in range(self.qsize):
|
||||
try:
|
||||
message = self.queue.get(block=False)
|
||||
if message != 'QUIT':
|
||||
orphaned.append(message)
|
||||
except QueueEmpty:
|
||||
break # qsize is not always _totally_ up to date
|
||||
if len(orphaned):
|
||||
logger.error(
|
||||
'requeuing {} messages from gone worker pid:{}'.format(
|
||||
len(orphaned), self.pid
|
||||
)
|
||||
)
|
||||
return orphaned
|
||||
|
||||
@property
|
||||
def busy(self):
|
||||
self.calculate_managed_tasks()
|
||||
return len(self.managed_tasks) > 0
|
||||
|
||||
@property
|
||||
def idle(self):
|
||||
return not self.busy
|
||||
|
||||
|
||||
class WorkerPool(object):
|
||||
'''
|
||||
Creates a pool of forked PoolWorkers.
|
||||
|
||||
As WorkerPool.write(...) is called (generally, by a kombu consumer
|
||||
implementation when it receives an AMQP message), messages are passed to
|
||||
one of the multiprocessing Queues where some work can be done on them.
|
||||
|
||||
class MessagePrinter(awx.main.dispatch.worker.BaseWorker):
|
||||
|
||||
def perform_work(self, body):
|
||||
print body
|
||||
|
||||
pool = WorkerPool(min_workers=4) # spawn four worker processes
|
||||
pool.init_workers(MessagePrint().work_loop)
|
||||
pool.write(
|
||||
0, # preferred worker 0
|
||||
'Hello, World!'
|
||||
)
|
||||
'''
|
||||
|
||||
debug_meta = ''
|
||||
|
||||
def __init__(self, min_workers=None, queue_size=None):
|
||||
self.name = settings.CLUSTER_HOST_ID
|
||||
self.pid = os.getpid()
|
||||
self.min_workers = min_workers or settings.JOB_EVENT_WORKERS
|
||||
self.queue_size = queue_size or settings.JOB_EVENT_MAX_QUEUE_SIZE
|
||||
self.workers = []
|
||||
|
||||
def __len__(self):
|
||||
return len(self.workers)
|
||||
|
||||
def init_workers(self, target, *target_args):
|
||||
self.target = target
|
||||
self.target_args = target_args
|
||||
for idx in range(self.min_workers):
|
||||
self.up()
|
||||
|
||||
def up(self):
|
||||
idx = len(self.workers)
|
||||
# It's important to close these because we're _about_ to fork, and we
|
||||
# don't want the forked processes to inherit the open sockets
|
||||
# for the DB and memcached connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
worker = PoolWorker(self.queue_size, self.target, (idx,) + self.target_args)
|
||||
self.workers.append(worker)
|
||||
try:
|
||||
worker.start()
|
||||
except Exception:
|
||||
logger.exception('could not fork')
|
||||
else:
|
||||
logger.warn('scaling up worker pid:{}'.format(worker.pid))
|
||||
return idx, worker
|
||||
|
||||
def debug(self, *args, **kwargs):
|
||||
self.cleanup()
|
||||
tmpl = Template(
|
||||
'{{ pool.name }}[pid:{{ pool.pid }}] workers total={{ workers|length }} {{ meta }} \n'
|
||||
'{% for w in workers %}'
|
||||
'. worker[pid:{{ w.pid }}]{% if not w.alive %} GONE exit={{ w.exitcode }}{% endif %}'
|
||||
' sent={{ w.messages_sent }}'
|
||||
' finished={{ w.messages_finished }}'
|
||||
' qsize={{ w.managed_tasks|length }}'
|
||||
' rss={{ w.mb }}MB'
|
||||
'{% for task in w.managed_tasks.values() %}'
|
||||
'\n - {% if loop.index0 == 0 %}running {% else %}queued {% endif %}'
|
||||
'{{ task["uuid"] }} '
|
||||
'{% if "task" in task %}'
|
||||
'{{ task["task"].rsplit(".", 1)[-1] }}'
|
||||
# don't print kwargs, they often contain launch-time secrets
|
||||
'(*{{ task.get("args", []) }})'
|
||||
'{% endif %}'
|
||||
'{% endfor %}'
|
||||
'{% if not w.managed_tasks|length %}'
|
||||
' [IDLE]'
|
||||
'{% endif %}'
|
||||
'\n'
|
||||
'{% endfor %}'
|
||||
)
|
||||
return tmpl.render(pool=self, workers=self.workers, meta=self.debug_meta)
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
queue_order = sorted(range(len(self.workers)), cmp=lambda x, y: -1 if x==preferred_queue else 0)
|
||||
write_attempt_order = []
|
||||
for queue_actual in queue_order:
|
||||
try:
|
||||
self.workers[queue_actual].put(body)
|
||||
return queue_actual
|
||||
except QueueFull:
|
||||
pass
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
logger.warn("could not write to queue %s" % preferred_queue)
|
||||
logger.warn("detail: {}".format(tb))
|
||||
write_attempt_order.append(preferred_queue)
|
||||
logger.warn("could not write payload to any queue, attempted order: {}".format(write_attempt_order))
|
||||
return None
|
||||
|
||||
def stop(self, signum):
|
||||
try:
|
||||
for worker in self.workers:
|
||||
os.kill(worker.pid, signum)
|
||||
except Exception:
|
||||
logger.exception('could not kill {}'.format(worker.pid))
|
||||
|
||||
|
||||
class AutoscalePool(WorkerPool):
|
||||
'''
|
||||
An extended pool implementation that automatically scales workers up and
|
||||
down based on demand
|
||||
'''
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.max_workers = kwargs.pop('max_workers', None)
|
||||
super(AutoscalePool, self).__init__(*args, **kwargs)
|
||||
|
||||
if self.max_workers is None:
|
||||
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
|
||||
if settings_absmem is not None:
|
||||
total_memory_gb = int(settings_absmem)
|
||||
else:
|
||||
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
|
||||
# 5 workers per GB of total memory
|
||||
self.max_workers = (total_memory_gb * 5)
|
||||
|
||||
@property
|
||||
def should_grow(self):
|
||||
if len(self.workers) < self.min_workers:
|
||||
# If we don't have at least min_workers, add more
|
||||
return True
|
||||
# If every worker is busy doing something, add more
|
||||
return all([w.busy for w in self.workers])
|
||||
|
||||
@property
|
||||
def full(self):
|
||||
return len(self.workers) == self.max_workers
|
||||
|
||||
@property
|
||||
def debug_meta(self):
|
||||
return 'min={} max={}'.format(self.min_workers, self.max_workers)
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
Perform some internal account and cleanup. This is run on
|
||||
every cluster node heartbeat:
|
||||
|
||||
1. Discover worker processes that exited, and recover messages they
|
||||
were handling.
|
||||
2. Clean up unnecessary, idle workers.
|
||||
3. Check to see if the database says this node is running any tasks
|
||||
that aren't actually running. If so, reap them.
|
||||
"""
|
||||
orphaned = []
|
||||
for w in self.workers[::]:
|
||||
if not w.alive:
|
||||
# the worker process has exited
|
||||
# 1. take the task it was running and enqueue the error
|
||||
# callbacks
|
||||
# 2. take any pending tasks delivered to its queue and
|
||||
# send them to another worker
|
||||
logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))
|
||||
if w.current_task:
|
||||
if w.current_task != 'QUIT':
|
||||
try:
|
||||
for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):
|
||||
reaper.reap_job(j, 'failed')
|
||||
except Exception:
|
||||
logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))
|
||||
orphaned.extend(w.orphaned_tasks)
|
||||
self.workers.remove(w)
|
||||
elif w.idle and len(self.workers) > self.min_workers:
|
||||
# the process has an empty queue (it's idle) and we have
|
||||
# more processes in the pool than we need (> min)
|
||||
# send this process a message so it will exit gracefully
|
||||
# at the next opportunity
|
||||
logger.warn('scaling down worker pid:{}'.format(w.pid))
|
||||
w.quit()
|
||||
self.workers.remove(w)
|
||||
|
||||
for m in orphaned:
|
||||
# if all the workers are dead, spawn at least one
|
||||
if not len(self.workers):
|
||||
self.up()
|
||||
idx = random.choice(range(len(self.workers)))
|
||||
self.write(idx, m)
|
||||
|
||||
# if the database says a job is running on this node, but it's *not*,
|
||||
# then reap it
|
||||
running_uuids = []
|
||||
for worker in self.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
running_uuids.extend(worker.managed_tasks.keys())
|
||||
try:
|
||||
reaper.reap(excluded_uuids=running_uuids)
|
||||
except Exception:
|
||||
# we _probably_ failed here due to DB connectivity issues, so
|
||||
# don't use our logger (it accesses the database for configuration)
|
||||
_, _, tb = sys.exc_info()
|
||||
traceback.print_tb(tb)
|
||||
|
||||
def up(self):
|
||||
if self.full:
|
||||
# if we can't spawn more workers, just toss this message into a
|
||||
# random worker's backlog
|
||||
idx = random.choice(range(len(self.workers)))
|
||||
return idx, self.workers[idx]
|
||||
else:
|
||||
return super(AutoscalePool, self).up()
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
try:
|
||||
# when the cluster heartbeat occurs, clean up internally
|
||||
if isinstance(body, dict) and 'cluster_node_heartbeat' in body['task']:
|
||||
self.cleanup()
|
||||
if self.should_grow:
|
||||
self.up()
|
||||
# we don't care about "preferred queue" round robin distribution, just
|
||||
# find the first non-busy worker and claim it
|
||||
workers = self.workers[:]
|
||||
random.shuffle(workers)
|
||||
for w in workers:
|
||||
if not w.busy:
|
||||
w.put(body)
|
||||
break
|
||||
else:
|
||||
return super(AutoscalePool, self).write(preferred_queue, body)
|
||||
except Exception:
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
logger.exception('failed to write inbound message')
|
||||
128
awx/main/dispatch/publish.py
Normal file
128
awx/main/dispatch/publish.py
Normal file
@@ -0,0 +1,128 @@
|
||||
import inspect
|
||||
import logging
|
||||
import sys
|
||||
from uuid import uuid4
|
||||
|
||||
from django.conf import settings
|
||||
from kombu import Connection, Exchange, Producer
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def serialize_task(f):
|
||||
return '.'.join([f.__module__, f.__name__])
|
||||
|
||||
|
||||
class task:
|
||||
"""
|
||||
Used to decorate a function or class so that it can be run asynchronously
|
||||
via the task dispatcher. Tasks can be simple functions:
|
||||
|
||||
@task()
|
||||
def add(a, b):
|
||||
return a + b
|
||||
|
||||
...or classes that define a `run` method:
|
||||
|
||||
@task()
|
||||
class Adder:
|
||||
def run(self, a, b):
|
||||
return a + b
|
||||
|
||||
# Tasks can be run synchronously...
|
||||
assert add(1, 1) == 2
|
||||
assert Adder().run(1, 1) == 2
|
||||
|
||||
# ...or published to a queue:
|
||||
add.apply_async([1, 1])
|
||||
Adder.apply_async([1, 1])
|
||||
|
||||
# Tasks can also define a specific target queue or exchange type:
|
||||
|
||||
@task(queue='slow-tasks')
|
||||
def snooze():
|
||||
time.sleep(10)
|
||||
|
||||
@task(queue='tower_broadcast', exchange_type='fanout')
|
||||
def announce():
|
||||
print "Run this everywhere!"
|
||||
"""
|
||||
|
||||
def __init__(self, queue=None, exchange_type=None):
|
||||
self.queue = queue
|
||||
self.exchange_type = exchange_type
|
||||
|
||||
def __call__(self, fn=None):
|
||||
queue = self.queue
|
||||
exchange_type = self.exchange_type
|
||||
|
||||
class PublisherMixin(object):
|
||||
|
||||
queue = None
|
||||
|
||||
@classmethod
|
||||
def delay(cls, *args, **kwargs):
|
||||
return cls.apply_async(args, kwargs)
|
||||
|
||||
@classmethod
|
||||
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
|
||||
task_id = uuid or str(uuid4())
|
||||
args = args or []
|
||||
kwargs = kwargs or {}
|
||||
queue = (
|
||||
queue or
|
||||
getattr(cls.queue, 'im_func', cls.queue) or
|
||||
settings.CELERY_DEFAULT_QUEUE
|
||||
)
|
||||
obj = {
|
||||
'uuid': task_id,
|
||||
'args': args,
|
||||
'kwargs': kwargs,
|
||||
'task': cls.name
|
||||
}
|
||||
obj.update(**kw)
|
||||
if callable(queue):
|
||||
queue = queue()
|
||||
if not settings.IS_TESTING(sys.argv):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
exchange = Exchange(queue, type=exchange_type or 'direct')
|
||||
producer = Producer(conn)
|
||||
logger.debug('publish {}({}, queue={})'.format(
|
||||
cls.name,
|
||||
task_id,
|
||||
queue
|
||||
))
|
||||
producer.publish(obj,
|
||||
serializer='json',
|
||||
compression='bzip2',
|
||||
exchange=exchange,
|
||||
declare=[exchange],
|
||||
delivery_mode="persistent",
|
||||
routing_key=queue)
|
||||
return (obj, queue)
|
||||
|
||||
# If the object we're wrapping *is* a class (e.g., RunJob), return
|
||||
# a *new* class that inherits from the wrapped class *and* BaseTask
|
||||
# In this way, the new class returned by our decorator is the class
|
||||
# being decorated *plus* PublisherMixin so cls.apply_async() and
|
||||
# cls.delay() work
|
||||
bases = []
|
||||
ns = {'name': serialize_task(fn), 'queue': queue}
|
||||
if inspect.isclass(fn):
|
||||
bases = list(fn.__bases__)
|
||||
ns.update(fn.__dict__)
|
||||
cls = type(
|
||||
fn.__name__,
|
||||
tuple(bases + [PublisherMixin]),
|
||||
ns
|
||||
)
|
||||
if inspect.isclass(fn):
|
||||
return cls
|
||||
|
||||
# if the object being decorated is *not* a class (it's a Python
|
||||
# function), make fn.apply_async and fn.delay proxy through to the
|
||||
# PublisherMixin we dynamically created above
|
||||
setattr(fn, 'name', cls.name)
|
||||
setattr(fn, 'apply_async', cls.apply_async)
|
||||
setattr(fn, 'delay', cls.delay)
|
||||
return fn
|
||||
46
awx/main/dispatch/reaper.py
Normal file
46
awx/main/dispatch/reaper.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
|
||||
from django.db.models import Q
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
from awx.main.models import Instance, UnifiedJob, WorkflowJob
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def reap_job(j, status):
|
||||
j.status = status
|
||||
j.start_args = '' # blank field to remove encrypted passwords
|
||||
j.job_explanation += ' '.join((
|
||||
'Task was marked as running in Tower but was not present in',
|
||||
'the job queue, so it has been marked as failed.',
|
||||
))
|
||||
j.save(update_fields=['status', 'start_args', 'job_explanation'])
|
||||
if hasattr(j, 'send_notification_templates'):
|
||||
j.send_notification_templates('failed')
|
||||
j.websocket_emit_status(status)
|
||||
logger.error(
|
||||
'{} is no longer running; reaping'.format(j.log_format)
|
||||
)
|
||||
|
||||
|
||||
def reap(instance=None, status='failed', excluded_uuids=[]):
|
||||
'''
|
||||
Reap all jobs in waiting|running for this instance.
|
||||
'''
|
||||
me = instance or Instance.objects.me()
|
||||
now = tz_now()
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
jobs = UnifiedJob.objects.filter(
|
||||
(
|
||||
Q(status='running') |
|
||||
Q(status='waiting', modified__lte=now - timedelta(seconds=60))
|
||||
) & (
|
||||
Q(execution_node=me.hostname) |
|
||||
Q(controller_node=me.hostname)
|
||||
) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
).exclude(celery_task_id__in=excluded_uuids)
|
||||
for j in jobs:
|
||||
reap_job(j, status)
|
||||
3
awx/main/dispatch/worker/__init__.py
Normal file
3
awx/main/dispatch/worker/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .base import AWXConsumer, BaseWorker # noqa
|
||||
from .callback import CallbackBrokerWorker # noqa
|
||||
from .task import TaskWorker # noqa
|
||||
146
awx/main/dispatch/worker/base.py
Normal file
146
awx/main/dispatch/worker/base.py
Normal file
@@ -0,0 +1,146 @@
|
||||
# Copyright (c) 2018 Ansible by Red Hat
|
||||
# All Rights Reserved.
|
||||
|
||||
import os
|
||||
import logging
|
||||
import signal
|
||||
from uuid import UUID
|
||||
from Queue import Empty as QueueEmpty
|
||||
|
||||
from kombu import Producer
|
||||
from kombu.mixins import ConsumerMixin
|
||||
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def signame(sig):
|
||||
return dict(
|
||||
(k, v) for v, k in signal.__dict__.items()
|
||||
if v.startswith('SIG') and not v.startswith('SIG_')
|
||||
)[sig]
|
||||
|
||||
|
||||
class WorkerSignalHandler:
|
||||
|
||||
def __init__(self):
|
||||
self.kill_now = False
|
||||
signal.signal(signal.SIGINT, self.exit_gracefully)
|
||||
|
||||
def exit_gracefully(self, *args, **kwargs):
|
||||
self.kill_now = True
|
||||
|
||||
|
||||
class AWXConsumer(ConsumerMixin):
|
||||
|
||||
def __init__(self, name, connection, worker, queues=[], pool=None):
|
||||
self.connection = connection
|
||||
self.total_messages = 0
|
||||
self.queues = queues
|
||||
self.worker = worker
|
||||
self.pool = pool
|
||||
if pool is None:
|
||||
self.pool = WorkerPool()
|
||||
self.pool.init_workers(self.worker.work_loop)
|
||||
|
||||
def get_consumers(self, Consumer, channel):
|
||||
logger.debug(self.listening_on)
|
||||
return [Consumer(queues=self.queues, accept=['json'],
|
||||
callbacks=[self.process_task])]
|
||||
|
||||
@property
|
||||
def listening_on(self):
|
||||
return 'listening on {}'.format([
|
||||
'{} [{}]'.format(q.name, q.exchange.type) for q in self.queues
|
||||
])
|
||||
|
||||
def control(self, body, message):
|
||||
logger.warn(body)
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running'):
|
||||
producer = Producer(
|
||||
channel=self.connection,
|
||||
routing_key=message.properties['reply_to']
|
||||
)
|
||||
if control == 'status':
|
||||
msg = '\n'.join([self.listening_on, self.pool.debug()])
|
||||
elif control == 'running':
|
||||
msg = []
|
||||
for worker in self.pool.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
msg.extend(worker.managed_tasks.keys())
|
||||
producer.publish(msg)
|
||||
elif control == 'reload':
|
||||
for worker in self.pool.workers:
|
||||
worker.quit()
|
||||
else:
|
||||
logger.error('unrecognized control message: {}'.format(control))
|
||||
message.ack()
|
||||
|
||||
def process_task(self, body, message):
|
||||
if 'control' in body:
|
||||
return self.control(body, message)
|
||||
if len(self.pool):
|
||||
if "uuid" in body and body['uuid']:
|
||||
try:
|
||||
queue = UUID(body['uuid']).int % len(self.pool)
|
||||
except Exception:
|
||||
queue = self.total_messages % len(self.pool)
|
||||
else:
|
||||
queue = self.total_messages % len(self.pool)
|
||||
else:
|
||||
queue = 0
|
||||
self.pool.write(queue, body)
|
||||
self.total_messages += 1
|
||||
message.ack()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
signal.signal(signal.SIGINT, self.stop)
|
||||
signal.signal(signal.SIGTERM, self.stop)
|
||||
self.worker.on_start()
|
||||
super(AWXConsumer, self).run(*args, **kwargs)
|
||||
|
||||
def stop(self, signum, frame):
|
||||
self.should_stop = True # this makes the kombu mixin stop consuming
|
||||
logger.debug('received {}, stopping'.format(signame(signum)))
|
||||
self.worker.on_stop()
|
||||
raise SystemExit()
|
||||
|
||||
|
||||
class BaseWorker(object):
|
||||
|
||||
def work_loop(self, queue, finished, idx, *args):
|
||||
ppid = os.getppid()
|
||||
signal_handler = WorkerSignalHandler()
|
||||
while not signal_handler.kill_now:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
if os.getppid() != ppid:
|
||||
break
|
||||
try:
|
||||
body = queue.get(block=True, timeout=1)
|
||||
if body == 'QUIT':
|
||||
break
|
||||
except QueueEmpty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error("Exception on worker {}, restarting: ".format(idx) + str(e))
|
||||
continue
|
||||
try:
|
||||
self.perform_work(body, *args)
|
||||
finally:
|
||||
if 'uuid' in body:
|
||||
uuid = body['uuid']
|
||||
logger.debug('task {} is finished'.format(uuid))
|
||||
finished.put(uuid)
|
||||
logger.warn('worker exiting gracefully pid:{}'.format(os.getpid()))
|
||||
|
||||
def perform_work(self, body):
|
||||
raise NotImplementedError()
|
||||
|
||||
def on_start(self):
|
||||
pass
|
||||
|
||||
def on_stop(self):
|
||||
pass
|
||||
130
awx/main/dispatch/worker/callback.py
Normal file
130
awx/main/dispatch/worker/callback.py
Normal file
@@ -0,0 +1,130 @@
|
||||
import logging
|
||||
import time
|
||||
import os
|
||||
import signal
|
||||
import traceback
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
|
||||
InventoryUpdateEvent, SystemJobEvent, UnifiedJob)
|
||||
|
||||
from .base import BaseWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
|
||||
class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
A worker implementation that deserializes callback event data and persists
|
||||
it into the database.
|
||||
|
||||
The code that *builds* these types of messages is found in the AWX display
|
||||
callback (`awx.lib.awx_display_callback`).
|
||||
'''
|
||||
|
||||
MAX_RETRIES = 2
|
||||
|
||||
def perform_work(self, body):
|
||||
try:
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
|
||||
if not any([key in body for key in event_map]):
|
||||
raise Exception('Payload does not have a job identifier')
|
||||
|
||||
def _save_event_data():
|
||||
for key, cls in event_map.items():
|
||||
if key in body:
|
||||
cls.create_from_data(**body)
|
||||
|
||||
job_identifier = 'unknown job'
|
||||
job_key = 'unknown'
|
||||
for key in event_map.keys():
|
||||
if key in body:
|
||||
job_identifier = body[key]
|
||||
job_key = key
|
||||
break
|
||||
|
||||
if settings.DEBUG:
|
||||
from pygments import highlight
|
||||
from pygments.lexers import PythonLexer
|
||||
from pygments.formatters import Terminal256Formatter
|
||||
from pprint import pformat
|
||||
if body.get('event') == 'EOF':
|
||||
event_thing = 'EOF event'
|
||||
else:
|
||||
event_thing = 'event {}'.format(body.get('counter', 'unknown'))
|
||||
logger.info('Callback worker received {} for {} {}'.format(
|
||||
event_thing, job_key[:-len('_id')], job_identifier
|
||||
))
|
||||
logger.debug('Body: {}'.format(
|
||||
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
|
||||
)[:1024 * 4])
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification(
|
||||
'jobs-summary',
|
||||
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
|
||||
)
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
if hasattr(uj, 'send_notification_templates'):
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
break
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
return
|
||||
|
||||
retries = 0
|
||||
while retries <= self.MAX_RETRIES:
|
||||
try:
|
||||
_save_event_data()
|
||||
break
|
||||
except (OperationalError, InterfaceError, InternalError):
|
||||
if retries >= self.MAX_RETRIES:
|
||||
logger.exception('Worker could not re-establish database connectivity, shutting down gracefully: Job {}'.format(job_identifier))
|
||||
os.kill(os.getppid(), signal.SIGINT)
|
||||
return
|
||||
delay = 60 * retries
|
||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
|
||||
i=retries + 1,
|
||||
delay=delay
|
||||
))
|
||||
django_connection.close()
|
||||
time.sleep(delay)
|
||||
retries += 1
|
||||
except DatabaseError:
|
||||
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
|
||||
break
|
||||
except Exception as exc:
|
||||
tb = traceback.format_exc()
|
||||
logger.error('Callback Task Processor Raised Exception: %r', exc)
|
||||
logger.error('Detail: {}'.format(tb))
|
||||
118
awx/main/dispatch/worker/task.py
Normal file
118
awx/main/dispatch/worker/task.py
Normal file
@@ -0,0 +1,118 @@
|
||||
import inspect
|
||||
import logging
|
||||
import importlib
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import six
|
||||
from django import db
|
||||
|
||||
from awx.main.tasks import dispatch_startup, inform_cluster_of_shutdown
|
||||
|
||||
from .base import BaseWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
class TaskWorker(BaseWorker):
|
||||
'''
|
||||
A worker implementation that deserializes task messages and runs native
|
||||
Python code.
|
||||
|
||||
The code that *builds* these types of messages is found in
|
||||
`awx.main.dispatch.publish`.
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
def resolve_callable(cls, task):
|
||||
'''
|
||||
Transform a dotted notation task into an imported, callable function, e.g.,
|
||||
|
||||
awx.main.tasks.delete_inventory
|
||||
awx.main.tasks.RunProjectUpdate
|
||||
'''
|
||||
module, target = task.rsplit('.', 1)
|
||||
module = importlib.import_module(module)
|
||||
_call = None
|
||||
if hasattr(module, target):
|
||||
_call = getattr(module, target, None)
|
||||
return _call
|
||||
|
||||
def run_callable(self, body):
|
||||
'''
|
||||
Given some AMQP message, import the correct Python code and run it.
|
||||
'''
|
||||
task = body['task']
|
||||
uuid = body.get('uuid', '<unknown>')
|
||||
args = body.get('args', [])
|
||||
kwargs = body.get('kwargs', {})
|
||||
_call = TaskWorker.resolve_callable(task)
|
||||
if inspect.isclass(_call):
|
||||
# the callable is a class, e.g., RunJob; instantiate and
|
||||
# return its `run()` method
|
||||
_call = _call().run
|
||||
# don't print kwargs, they often contain launch-time secrets
|
||||
logger.debug('task {} starting {}(*{})'.format(uuid, task, args))
|
||||
return _call(*args, **kwargs)
|
||||
|
||||
def perform_work(self, body):
|
||||
'''
|
||||
Import and run code for a task e.g.,
|
||||
|
||||
body = {
|
||||
'args': [8],
|
||||
'callbacks': [{
|
||||
'args': [],
|
||||
'kwargs': {}
|
||||
'task': u'awx.main.tasks.handle_work_success'
|
||||
}],
|
||||
'errbacks': [{
|
||||
'args': [],
|
||||
'kwargs': {},
|
||||
'task': 'awx.main.tasks.handle_work_error'
|
||||
}],
|
||||
'kwargs': {},
|
||||
'task': u'awx.main.tasks.RunProjectUpdate'
|
||||
}
|
||||
'''
|
||||
for conn in db.connections.all():
|
||||
# If the database connection has a hiccup during at task, close it
|
||||
# so we can establish a new connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
result = None
|
||||
try:
|
||||
result = self.run_callable(body)
|
||||
except Exception as exc:
|
||||
|
||||
try:
|
||||
if getattr(exc, 'is_awx_task_error', False):
|
||||
# Error caused by user / tracked in job output
|
||||
logger.warning(six.text_type("{}").format(exc))
|
||||
else:
|
||||
task = body['task']
|
||||
args = body.get('args', [])
|
||||
kwargs = body.get('kwargs', {})
|
||||
logger.exception('Worker failed to run task {}(*{}, **{}'.format(
|
||||
task, args, kwargs
|
||||
))
|
||||
except Exception:
|
||||
# It's fairly critical that this code _not_ raise exceptions on logging
|
||||
# If you configure external logging in a way that _it_ fails, there's
|
||||
# not a lot we can do here; sys.stderr.write is a final hail mary
|
||||
_, _, tb = sys.exc_info()
|
||||
traceback.print_tb(tb)
|
||||
|
||||
for callback in body.get('errbacks', []) or []:
|
||||
callback['uuid'] = body['uuid']
|
||||
self.perform_work(callback)
|
||||
|
||||
for callback in body.get('callbacks', []) or []:
|
||||
callback['uuid'] = body['uuid']
|
||||
self.perform_work(callback)
|
||||
return result
|
||||
|
||||
def on_start(self):
|
||||
dispatch_startup()
|
||||
|
||||
def on_stop(self):
|
||||
inform_cluster_of_shutdown()
|
||||
@@ -4,11 +4,6 @@
|
||||
import six
|
||||
|
||||
|
||||
# Celery does not respect exception type when using a serializer different than pickle;
|
||||
# and awx uses the json serializer
|
||||
# https://github.com/celery/celery/issues/3586
|
||||
|
||||
|
||||
class _AwxTaskError():
|
||||
def build_exception(self, task, message=None):
|
||||
if message is None:
|
||||
@@ -36,5 +31,3 @@ class _AwxTaskError():
|
||||
|
||||
|
||||
AwxTaskError = _AwxTaskError()
|
||||
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ class IsolatedManager(object):
|
||||
:param stdout_handle: a file-like object for capturing stdout
|
||||
:param ssh_key_path: a filepath where SSH key data can be read
|
||||
:param expect_passwords: a dict of regular expression password prompts
|
||||
to input values, i.e., {r'Password:\s*?$':
|
||||
to input values, i.e., {r'Password:*?$':
|
||||
'some_password'}
|
||||
:param cancelled_callback: a callable - which returns `True` or `False`
|
||||
- signifying if the job has been prematurely
|
||||
@@ -117,10 +117,10 @@ class IsolatedManager(object):
|
||||
|
||||
@classmethod
|
||||
def awx_playbook_path(cls):
|
||||
return os.path.join(
|
||||
return os.path.abspath(os.path.join(
|
||||
os.path.dirname(awx.__file__),
|
||||
'playbooks'
|
||||
)
|
||||
))
|
||||
|
||||
def path_to(self, *args):
|
||||
return os.path.join(self.private_data_dir, *args)
|
||||
|
||||
@@ -71,7 +71,7 @@ def run_pexpect(args, cwd, env, logfile,
|
||||
- signifying if the job has been prematurely
|
||||
cancelled
|
||||
:param expect_passwords: a dict of regular expression password prompts
|
||||
to input values, i.e., {r'Password:\s*?$':
|
||||
to input values, i.e., {r'Password:*?$':
|
||||
'some_password'}
|
||||
:param extra_update_fields: a dict used to specify DB fields which should
|
||||
be updated on the underlying model
|
||||
@@ -208,6 +208,12 @@ def run_isolated_job(private_data_dir, secrets, logfile=sys.stdout):
|
||||
env['AWX_ISOLATED_DATA_DIR'] = private_data_dir
|
||||
env['PYTHONPATH'] = env.get('PYTHONPATH', '') + callback_dir + ':'
|
||||
|
||||
venv_path = env.get('VIRTUAL_ENV')
|
||||
if venv_path and not os.path.exists(venv_path):
|
||||
raise RuntimeError(
|
||||
'a valid Python virtualenv does not exist at {}'.format(venv_path)
|
||||
)
|
||||
|
||||
return run_pexpect(args, cwd, env, logfile,
|
||||
expect_passwords=expect_passwords,
|
||||
idle_timeout=idle_timeout,
|
||||
|
||||
@@ -46,7 +46,7 @@ from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.encryption import encrypt_value, decrypt_value, get_encryption_key
|
||||
from awx.main.validators import validate_ssh_private_key
|
||||
from awx.main.models.rbac import batch_role_ancestor_rebuilding, Role
|
||||
from awx.main.constants import CHOICES_PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.constants import CHOICES_PRIVILEGE_ESCALATION_METHODS, ENV_BLACKLIST
|
||||
from awx.main import utils
|
||||
|
||||
|
||||
@@ -573,10 +573,10 @@ class CredentialInputField(JSONSchemaField):
|
||||
# string)
|
||||
match = re.search(
|
||||
# 'foo' is a dependency of 'bar'
|
||||
"'" # apostrophe
|
||||
"([^']+)" # one or more non-apostrophes (first group)
|
||||
"'[\w ]+'" # one or more words/spaces
|
||||
"([^']+)", # second group
|
||||
r"'" # apostrophe
|
||||
r"([^']+)" # one or more non-apostrophes (first group)
|
||||
r"'[\w ]+'" # one or more words/spaces
|
||||
r"([^']+)", # second group
|
||||
error.message,
|
||||
)
|
||||
if match:
|
||||
@@ -755,7 +755,7 @@ class CredentialTypeInjectorField(JSONSchemaField):
|
||||
'file': {
|
||||
'type': 'object',
|
||||
'patternProperties': {
|
||||
'^template(\.[a-zA-Z_]+[a-zA-Z0-9_]*)?$': {'type': 'string'},
|
||||
r'^template(\.[a-zA-Z_]+[a-zA-Z0-9_]*)?$': {'type': 'string'},
|
||||
},
|
||||
'additionalProperties': False,
|
||||
},
|
||||
@@ -767,7 +767,12 @@ class CredentialTypeInjectorField(JSONSchemaField):
|
||||
# of underscores, digits, and alphabetics from the portable
|
||||
# character set. The first character of a name is not
|
||||
# a digit.
|
||||
'^[a-zA-Z_]+[a-zA-Z0-9_]*$': {'type': 'string'},
|
||||
'^[a-zA-Z_]+[a-zA-Z0-9_]*$': {
|
||||
'type': 'string',
|
||||
# The environment variable _value_ can be any ascii,
|
||||
# but pexpect will choke on any unicode
|
||||
'pattern': '^[\x00-\x7F]*$'
|
||||
},
|
||||
},
|
||||
'additionalProperties': False,
|
||||
},
|
||||
@@ -783,6 +788,19 @@ class CredentialTypeInjectorField(JSONSchemaField):
|
||||
'additionalProperties': False
|
||||
}
|
||||
|
||||
def validate_env_var_allowed(self, env_var):
|
||||
if env_var.startswith('ANSIBLE_'):
|
||||
raise django_exceptions.ValidationError(
|
||||
_('Environment variable {} may affect Ansible configuration so its '
|
||||
'use is not allowed in credentials.').format(env_var),
|
||||
code='invalid', params={'value': env_var},
|
||||
)
|
||||
if env_var in ENV_BLACKLIST:
|
||||
raise django_exceptions.ValidationError(
|
||||
_('Environment variable {} is blacklisted from use in credentials.').format(env_var),
|
||||
code='invalid', params={'value': env_var},
|
||||
)
|
||||
|
||||
def validate(self, value, model_instance):
|
||||
super(CredentialTypeInjectorField, self).validate(
|
||||
value, model_instance
|
||||
@@ -834,6 +852,9 @@ class CredentialTypeInjectorField(JSONSchemaField):
|
||||
setattr(valid_namespace['tower'].filename, template_name, 'EXAMPLE_FILENAME')
|
||||
|
||||
for type_, injector in value.items():
|
||||
if type_ == 'env':
|
||||
for key in injector.keys():
|
||||
self.validate_env_var_allowed(key)
|
||||
for key, tmpl in injector.items():
|
||||
try:
|
||||
Environment(
|
||||
|
||||
12
awx/main/management/commands/check_migrations.py
Normal file
12
awx/main/management/commands/check_migrations.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from django.db import connections
|
||||
from django.db.backends.sqlite3.base import DatabaseWrapper
|
||||
from django.core.management.commands.makemigrations import Command as MakeMigrations
|
||||
|
||||
|
||||
class Command(MakeMigrations):
|
||||
|
||||
def execute(self, *args, **options):
|
||||
settings = connections['default'].settings_dict.copy()
|
||||
settings['ENGINE'] = 'sqlite3'
|
||||
connections['default'] = DatabaseWrapper(settings)
|
||||
return MakeMigrations().execute(*args, **options)
|
||||
@@ -15,6 +15,8 @@ class Command(BaseCommand):
|
||||
def handle(self, *args, **kwargs):
|
||||
# Sanity check: Is there already an organization in the system?
|
||||
if Organization.objects.count():
|
||||
print('An organization is already in the system, exiting.')
|
||||
print('(changed: False)')
|
||||
return
|
||||
|
||||
# Create a default organization as the first superuser found.
|
||||
@@ -54,3 +56,4 @@ class Command(BaseCommand):
|
||||
jt.credentials.add(c)
|
||||
print('Default organization added.')
|
||||
print('Demo Credential, Inventory, and Job Template added.')
|
||||
print('(changed: True)')
|
||||
|
||||
@@ -41,14 +41,18 @@ class Command(BaseCommand):
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
instance = Instance.objects.filter(hostname=hostname)
|
||||
if instance.exists():
|
||||
isolated = instance.first().is_isolated()
|
||||
instance.delete()
|
||||
print("Instance Removed")
|
||||
result = subprocess.Popen("rabbitmqctl forget_cluster_node rabbitmq@{}".format(hostname), shell=True).wait()
|
||||
if result != 0:
|
||||
print("Node deprovisioning may have failed when attempting to "
|
||||
"remove the RabbitMQ instance {} from the cluster".format(hostname))
|
||||
else:
|
||||
if isolated:
|
||||
print('Successfully deprovisioned {}'.format(hostname))
|
||||
else:
|
||||
result = subprocess.Popen("rabbitmqctl forget_cluster_node rabbitmq@{}".format(hostname), shell=True).wait()
|
||||
if result != 0:
|
||||
print("Node deprovisioning may have failed when attempting to "
|
||||
"remove the RabbitMQ instance {} from the cluster".format(hostname))
|
||||
else:
|
||||
print('Successfully deprovisioned {}'.format(hostname))
|
||||
print('(changed: True)')
|
||||
else:
|
||||
print('No instance found matching name {}'.format(hostname))
|
||||
|
||||
@@ -491,7 +491,7 @@ class Command(BaseCommand):
|
||||
for host in hosts_qs.filter(pk__in=del_pks):
|
||||
host_name = host.name
|
||||
host.delete()
|
||||
logger.info('Deleted host "%s"', host_name)
|
||||
logger.debug('Deleted host "%s"', host_name)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('host deletions took %d queries for %d hosts',
|
||||
len(connection.queries) - queries_before,
|
||||
@@ -528,7 +528,7 @@ class Command(BaseCommand):
|
||||
group_name = group.name
|
||||
with ignore_inventory_computed_fields():
|
||||
group.delete()
|
||||
logger.info('Group "%s" deleted', group_name)
|
||||
logger.debug('Group "%s" deleted', group_name)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('group deletions took %d queries for %d groups',
|
||||
len(connection.queries) - queries_before,
|
||||
@@ -549,7 +549,7 @@ class Command(BaseCommand):
|
||||
db_groups = self.inventory_source.groups
|
||||
for db_group in db_groups.all():
|
||||
if self.inventory_source.deprecated_group_id == db_group.id: # TODO: remove in 3.3
|
||||
logger.info(
|
||||
logger.debug(
|
||||
'Group "%s" from v1 API child group/host connections preserved',
|
||||
db_group.name
|
||||
)
|
||||
@@ -566,8 +566,8 @@ class Command(BaseCommand):
|
||||
for db_child in db_children.filter(pk__in=child_group_pks):
|
||||
group_group_count += 1
|
||||
db_group.children.remove(db_child)
|
||||
logger.info('Group "%s" removed from group "%s"',
|
||||
db_child.name, db_group.name)
|
||||
logger.debug('Group "%s" removed from group "%s"',
|
||||
db_child.name, db_group.name)
|
||||
# FIXME: Inventory source group relationships
|
||||
# Delete group/host relationships not present in imported data.
|
||||
db_hosts = db_group.hosts
|
||||
@@ -594,8 +594,8 @@ class Command(BaseCommand):
|
||||
if db_host not in db_group.hosts.all():
|
||||
continue
|
||||
db_group.hosts.remove(db_host)
|
||||
logger.info('Host "%s" removed from group "%s"',
|
||||
db_host.name, db_group.name)
|
||||
logger.debug('Host "%s" removed from group "%s"',
|
||||
db_host.name, db_group.name)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('group-group and group-host deletions took %d queries for %d relationships',
|
||||
len(connection.queries) - queries_before,
|
||||
@@ -614,9 +614,9 @@ class Command(BaseCommand):
|
||||
if db_variables != all_obj.variables_dict:
|
||||
all_obj.variables = json.dumps(db_variables)
|
||||
all_obj.save(update_fields=['variables'])
|
||||
logger.info('Inventory variables updated from "all" group')
|
||||
logger.debug('Inventory variables updated from "all" group')
|
||||
else:
|
||||
logger.info('Inventory variables unmodified')
|
||||
logger.debug('Inventory variables unmodified')
|
||||
|
||||
def _create_update_groups(self):
|
||||
'''
|
||||
@@ -648,11 +648,11 @@ class Command(BaseCommand):
|
||||
group.variables = json.dumps(db_variables)
|
||||
group.save(update_fields=['variables'])
|
||||
if self.overwrite_vars:
|
||||
logger.info('Group "%s" variables replaced', group.name)
|
||||
logger.debug('Group "%s" variables replaced', group.name)
|
||||
else:
|
||||
logger.info('Group "%s" variables updated', group.name)
|
||||
logger.debug('Group "%s" variables updated', group.name)
|
||||
else:
|
||||
logger.info('Group "%s" variables unmodified', group.name)
|
||||
logger.debug('Group "%s" variables unmodified', group.name)
|
||||
existing_group_names.add(group.name)
|
||||
self._batch_add_m2m(self.inventory_source.groups, group)
|
||||
for group_name in all_group_names:
|
||||
@@ -666,7 +666,7 @@ class Command(BaseCommand):
|
||||
'description':'imported'
|
||||
}
|
||||
)[0]
|
||||
logger.info('Group "%s" added', group.name)
|
||||
logger.debug('Group "%s" added', group.name)
|
||||
self._batch_add_m2m(self.inventory_source.groups, group)
|
||||
self._batch_add_m2m(self.inventory_source.groups, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
@@ -705,24 +705,24 @@ class Command(BaseCommand):
|
||||
if update_fields:
|
||||
db_host.save(update_fields=update_fields)
|
||||
if 'name' in update_fields:
|
||||
logger.info('Host renamed from "%s" to "%s"', old_name, mem_host.name)
|
||||
logger.debug('Host renamed from "%s" to "%s"', old_name, mem_host.name)
|
||||
if 'instance_id' in update_fields:
|
||||
if old_instance_id:
|
||||
logger.info('Host "%s" instance_id updated', mem_host.name)
|
||||
logger.debug('Host "%s" instance_id updated', mem_host.name)
|
||||
else:
|
||||
logger.info('Host "%s" instance_id added', mem_host.name)
|
||||
logger.debug('Host "%s" instance_id added', mem_host.name)
|
||||
if 'variables' in update_fields:
|
||||
if self.overwrite_vars:
|
||||
logger.info('Host "%s" variables replaced', mem_host.name)
|
||||
logger.debug('Host "%s" variables replaced', mem_host.name)
|
||||
else:
|
||||
logger.info('Host "%s" variables updated', mem_host.name)
|
||||
logger.debug('Host "%s" variables updated', mem_host.name)
|
||||
else:
|
||||
logger.info('Host "%s" variables unmodified', mem_host.name)
|
||||
logger.debug('Host "%s" variables unmodified', mem_host.name)
|
||||
if 'enabled' in update_fields:
|
||||
if enabled:
|
||||
logger.info('Host "%s" is now enabled', mem_host.name)
|
||||
logger.debug('Host "%s" is now enabled', mem_host.name)
|
||||
else:
|
||||
logger.info('Host "%s" is now disabled', mem_host.name)
|
||||
logger.debug('Host "%s" is now disabled', mem_host.name)
|
||||
self._batch_add_m2m(self.inventory_source.hosts, db_host)
|
||||
|
||||
def _create_update_hosts(self):
|
||||
@@ -796,9 +796,9 @@ class Command(BaseCommand):
|
||||
host_attrs['instance_id'] = instance_id
|
||||
db_host = self.inventory.hosts.update_or_create(name=mem_host_name, defaults=host_attrs)[0]
|
||||
if enabled is False:
|
||||
logger.info('Host "%s" added (disabled)', mem_host_name)
|
||||
logger.debug('Host "%s" added (disabled)', mem_host_name)
|
||||
else:
|
||||
logger.info('Host "%s" added', mem_host_name)
|
||||
logger.debug('Host "%s" added', mem_host_name)
|
||||
self._batch_add_m2m(self.inventory_source.hosts, db_host)
|
||||
|
||||
self._batch_add_m2m(self.inventory_source.hosts, flush=True)
|
||||
@@ -827,10 +827,10 @@ class Command(BaseCommand):
|
||||
child_names = all_child_names[offset2:(offset2 + self._batch_size)]
|
||||
db_children_qs = self.inventory.groups.filter(name__in=child_names)
|
||||
for db_child in db_children_qs.filter(children__id=db_group.id):
|
||||
logger.info('Group "%s" already child of group "%s"', db_child.name, db_group.name)
|
||||
logger.debug('Group "%s" already child of group "%s"', db_child.name, db_group.name)
|
||||
for db_child in db_children_qs.exclude(children__id=db_group.id):
|
||||
self._batch_add_m2m(db_group.children, db_child)
|
||||
logger.info('Group "%s" added as child of "%s"', db_child.name, db_group.name)
|
||||
logger.debug('Group "%s" added as child of "%s"', db_child.name, db_group.name)
|
||||
self._batch_add_m2m(db_group.children, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('Group-group updates took %d queries for %d group-group relationships',
|
||||
@@ -854,19 +854,19 @@ class Command(BaseCommand):
|
||||
host_names = all_host_names[offset2:(offset2 + self._batch_size)]
|
||||
db_hosts_qs = self.inventory.hosts.filter(name__in=host_names)
|
||||
for db_host in db_hosts_qs.filter(groups__id=db_group.id):
|
||||
logger.info('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
logger.debug('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
for db_host in db_hosts_qs.exclude(groups__id=db_group.id):
|
||||
self._batch_add_m2m(db_group.hosts, db_host)
|
||||
logger.info('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
logger.debug('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
all_instance_ids = sorted([h.instance_id for h in mem_group.hosts if h.instance_id])
|
||||
for offset2 in xrange(0, len(all_instance_ids), self._batch_size):
|
||||
instance_ids = all_instance_ids[offset2:(offset2 + self._batch_size)]
|
||||
db_hosts_qs = self.inventory.hosts.filter(instance_id__in=instance_ids)
|
||||
for db_host in db_hosts_qs.filter(groups__id=db_group.id):
|
||||
logger.info('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
logger.debug('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
for db_host in db_hosts_qs.exclude(groups__id=db_group.id):
|
||||
self._batch_add_m2m(db_group.hosts, db_host)
|
||||
logger.info('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
logger.debug('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
self._batch_add_m2m(db_group.hosts, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('Group-host updates took %d queries for %d group-host relationships',
|
||||
@@ -938,7 +938,7 @@ class Command(BaseCommand):
|
||||
self.exclude_empty_groups = bool(options.get('exclude_empty_groups', False))
|
||||
self.instance_id_var = options.get('instance_id_var', None)
|
||||
|
||||
self.celery_invoked = False if os.getenv('INVENTORY_SOURCE_ID', None) is None else True
|
||||
self.invoked_from_dispatcher = False if os.getenv('INVENTORY_SOURCE_ID', None) is None else True
|
||||
|
||||
# Load inventory and related objects from database.
|
||||
if self.inventory_name and self.inventory_id:
|
||||
@@ -1062,7 +1062,7 @@ class Command(BaseCommand):
|
||||
exc = e
|
||||
transaction.rollback()
|
||||
|
||||
if self.celery_invoked is False:
|
||||
if self.invoked_from_dispatcher is False:
|
||||
with ignore_inventory_computed_fields():
|
||||
self.inventory_update = InventoryUpdate.objects.get(pk=self.inventory_update.pk)
|
||||
self.inventory_update.result_traceback = tb
|
||||
|
||||
@@ -6,6 +6,22 @@ from django.core.management.base import BaseCommand
|
||||
import six
|
||||
|
||||
|
||||
class Ungrouped(object):
|
||||
|
||||
name = 'ungrouped'
|
||||
policy_instance_percentage = None
|
||||
policy_instance_minimum = None
|
||||
controller = None
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
return Instance.objects.filter(rampart_groups__isnull=True)
|
||||
|
||||
@property
|
||||
def capacity(self):
|
||||
return sum([x.capacity for x in self.instances])
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""List instances from the Tower database
|
||||
"""
|
||||
@@ -13,12 +29,28 @@ class Command(BaseCommand):
|
||||
def handle(self, *args, **options):
|
||||
super(Command, self).__init__()
|
||||
|
||||
for instance in Instance.objects.all():
|
||||
print(six.text_type(
|
||||
"hostname: {0.hostname}; created: {0.created}; "
|
||||
"heartbeat: {0.modified}; capacity: {0.capacity}").format(instance))
|
||||
for instance_group in InstanceGroup.objects.all():
|
||||
print(six.text_type(
|
||||
"Instance Group: {0.name}; created: {0.created}; "
|
||||
"capacity: {0.capacity}; members: {1}").format(instance_group,
|
||||
[x.hostname for x in instance_group.instances.all()]))
|
||||
groups = list(InstanceGroup.objects.all())
|
||||
ungrouped = Ungrouped()
|
||||
if len(ungrouped.instances):
|
||||
groups.append(ungrouped)
|
||||
|
||||
for instance_group in groups:
|
||||
fmt = '[{0.name} capacity={0.capacity}'
|
||||
if instance_group.policy_instance_percentage:
|
||||
fmt += ' policy={0.policy_instance_percentage}%'
|
||||
if instance_group.policy_instance_minimum:
|
||||
fmt += ' policy>={0.policy_instance_minimum}'
|
||||
if instance_group.controller:
|
||||
fmt += ' controller={0.controller.name}'
|
||||
print(six.text_type(fmt + ']').format(instance_group))
|
||||
for x in instance_group.instances.all():
|
||||
color = '\033[92m'
|
||||
if x.capacity == 0 or x.enabled is False:
|
||||
color = '\033[91m'
|
||||
fmt = '\t' + color + '{0.hostname} capacity={0.capacity} version={1}'
|
||||
if x.last_isolated_check:
|
||||
fmt += ' last_isolated_check="{0.last_isolated_check:%Y-%m-%d %H:%M:%S}"'
|
||||
if x.capacity:
|
||||
fmt += ' heartbeat="{0.modified:%Y-%m-%d %H:%M:%S}"'
|
||||
print(six.text_type(fmt + '\033[0m').format(x, x.version or '?'))
|
||||
print('')
|
||||
|
||||
@@ -95,7 +95,7 @@ class ReplayJobEvents():
|
||||
raise RuntimeError("Job is of type {} and replay is not yet supported.".format(type(job)))
|
||||
sys.exit(1)
|
||||
|
||||
def run(self, job_id, speed=1.0, verbosity=0, skip=0):
|
||||
def run(self, job_id, speed=1.0, verbosity=0, skip_range=[]):
|
||||
stats = {
|
||||
'events_ontime': {
|
||||
'total': 0,
|
||||
@@ -127,7 +127,7 @@ class ReplayJobEvents():
|
||||
|
||||
je_previous = None
|
||||
for n, je_current in enumerate(job_events):
|
||||
if n < skip:
|
||||
if je_current.counter in skip_range:
|
||||
continue
|
||||
|
||||
if not je_previous:
|
||||
@@ -193,19 +193,29 @@ class Command(BaseCommand):
|
||||
|
||||
help = 'Replay job events over websockets ordered by created on date.'
|
||||
|
||||
def _parse_slice_range(self, slice_arg):
|
||||
slice_arg = tuple([int(n) for n in slice_arg.split(':')])
|
||||
slice_obj = slice(*slice_arg)
|
||||
|
||||
start = slice_obj.start or 0
|
||||
stop = slice_obj.stop or -1
|
||||
step = slice_obj.step or 1
|
||||
|
||||
return range(start, stop, step)
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--job_id', dest='job_id', type=int, metavar='j',
|
||||
help='Id of the job to replay (job or adhoc)')
|
||||
parser.add_argument('--speed', dest='speed', type=int, metavar='s',
|
||||
help='Speedup factor.')
|
||||
parser.add_argument('--skip', dest='skip', type=int, metavar='k',
|
||||
help='Number of events to skip.')
|
||||
parser.add_argument('--skip-range', dest='skip_range', type=str, metavar='k',
|
||||
default='0:-1:1', help='Range of events to skip')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
job_id = options.get('job_id')
|
||||
speed = options.get('speed') or 1
|
||||
verbosity = options.get('verbosity') or 0
|
||||
skip = options.get('skip') or 0
|
||||
skip = self._parse_slice_range(options.get('skip_range'))
|
||||
|
||||
replayer = ReplayJobEvents()
|
||||
replayer.run(job_id, speed, verbosity, skip)
|
||||
|
||||
@@ -1,224 +1,11 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import time
|
||||
from uuid import UUID
|
||||
from multiprocessing import Process
|
||||
from multiprocessing import Queue as MPQueue
|
||||
from Queue import Empty as QueueEmpty
|
||||
from Queue import Full as QueueFull
|
||||
|
||||
from kombu import Connection, Exchange, Queue
|
||||
from kombu.mixins import ConsumerMixin
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection
|
||||
from django.db import DatabaseError, OperationalError
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
from django.core.cache import cache as django_cache
|
||||
from kombu import Connection, Exchange, Queue
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
|
||||
class WorkerSignalHandler:
|
||||
|
||||
def __init__(self):
|
||||
self.kill_now = False
|
||||
signal.signal(signal.SIGINT, self.exit_gracefully)
|
||||
signal.signal(signal.SIGTERM, self.exit_gracefully)
|
||||
|
||||
def exit_gracefully(self, *args, **kwargs):
|
||||
self.kill_now = True
|
||||
|
||||
|
||||
class CallbackBrokerWorker(ConsumerMixin):
|
||||
|
||||
MAX_RETRIES = 2
|
||||
|
||||
def __init__(self, connection, use_workers=True):
|
||||
self.connection = connection
|
||||
self.worker_queues = []
|
||||
self.total_messages = 0
|
||||
self.init_workers(use_workers)
|
||||
|
||||
def init_workers(self, use_workers=True):
|
||||
def shutdown_handler(active_workers):
|
||||
def _handler(signum, frame):
|
||||
try:
|
||||
for active_worker in active_workers:
|
||||
active_worker.terminate()
|
||||
signal.signal(signum, signal.SIG_DFL)
|
||||
os.kill(os.getpid(), signum) # Rethrow signal, this time without catching it
|
||||
except Exception:
|
||||
logger.exception('Error in shutdown_handler')
|
||||
return _handler
|
||||
|
||||
if use_workers:
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
for idx in range(settings.JOB_EVENT_WORKERS):
|
||||
queue_actual = MPQueue(settings.JOB_EVENT_MAX_QUEUE_SIZE)
|
||||
w = Process(target=self.callback_worker, args=(queue_actual, idx,))
|
||||
w.start()
|
||||
if settings.DEBUG:
|
||||
logger.info('Started worker %s' % str(idx))
|
||||
self.worker_queues.append([0, queue_actual, w])
|
||||
elif settings.DEBUG:
|
||||
logger.warn('Started callback receiver (no workers)')
|
||||
|
||||
signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in self.worker_queues]))
|
||||
signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in self.worker_queues]))
|
||||
|
||||
def get_consumers(self, Consumer, channel):
|
||||
return [Consumer(queues=[Queue(settings.CALLBACK_QUEUE,
|
||||
Exchange(settings.CALLBACK_QUEUE, type='direct'),
|
||||
routing_key=settings.CALLBACK_QUEUE)],
|
||||
accept=['json'],
|
||||
callbacks=[self.process_task])]
|
||||
|
||||
def process_task(self, body, message):
|
||||
if "uuid" in body and body['uuid']:
|
||||
try:
|
||||
queue = UUID(body['uuid']).int % settings.JOB_EVENT_WORKERS
|
||||
except Exception:
|
||||
queue = self.total_messages % settings.JOB_EVENT_WORKERS
|
||||
else:
|
||||
queue = self.total_messages % settings.JOB_EVENT_WORKERS
|
||||
self.write_queue_worker(queue, body)
|
||||
self.total_messages += 1
|
||||
message.ack()
|
||||
|
||||
def write_queue_worker(self, preferred_queue, body):
|
||||
queue_order = sorted(range(settings.JOB_EVENT_WORKERS), cmp=lambda x, y: -1 if x==preferred_queue else 0)
|
||||
write_attempt_order = []
|
||||
for queue_actual in queue_order:
|
||||
try:
|
||||
worker_actual = self.worker_queues[queue_actual]
|
||||
worker_actual[1].put(body, block=True, timeout=5)
|
||||
worker_actual[0] += 1
|
||||
return queue_actual
|
||||
except QueueFull:
|
||||
pass
|
||||
except Exception:
|
||||
import traceback
|
||||
tb = traceback.format_exc()
|
||||
logger.warn("Could not write to queue %s" % preferred_queue)
|
||||
logger.warn("Detail: {}".format(tb))
|
||||
write_attempt_order.append(preferred_queue)
|
||||
logger.warn("Could not write payload to any queue, attempted order: {}".format(write_attempt_order))
|
||||
return None
|
||||
|
||||
def callback_worker(self, queue_actual, idx):
|
||||
signal_handler = WorkerSignalHandler()
|
||||
while not signal_handler.kill_now:
|
||||
try:
|
||||
body = queue_actual.get(block=True, timeout=1)
|
||||
except QueueEmpty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error("Exception on worker thread, restarting: " + str(e))
|
||||
continue
|
||||
try:
|
||||
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
|
||||
if not any([key in body for key in event_map]):
|
||||
raise Exception('Payload does not have a job identifier')
|
||||
if settings.DEBUG:
|
||||
from pygments import highlight
|
||||
from pygments.lexers import PythonLexer
|
||||
from pygments.formatters import Terminal256Formatter
|
||||
from pprint import pformat
|
||||
logger.info('Body: {}'.format(
|
||||
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
|
||||
)[:1024 * 4])
|
||||
|
||||
def _save_event_data():
|
||||
for key, cls in event_map.items():
|
||||
if key in body:
|
||||
cls.create_from_data(**body)
|
||||
|
||||
job_identifier = 'unknown job'
|
||||
for key in event_map.keys():
|
||||
if key in body:
|
||||
job_identifier = body[key]
|
||||
break
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification(
|
||||
'jobs-summary',
|
||||
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
|
||||
)
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
if hasattr(uj, 'send_notification_templates'):
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
break
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
continue
|
||||
|
||||
retries = 0
|
||||
while retries <= self.MAX_RETRIES:
|
||||
try:
|
||||
_save_event_data()
|
||||
break
|
||||
except (OperationalError, InterfaceError, InternalError) as e:
|
||||
if retries >= self.MAX_RETRIES:
|
||||
logger.exception('Worker could not re-establish database connectivity, shutting down gracefully: Job {}'.format(job_identifier))
|
||||
os.kill(os.getppid(), signal.SIGINT)
|
||||
return
|
||||
delay = 60 * retries
|
||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
|
||||
i=retries + 1,
|
||||
delay=delay
|
||||
))
|
||||
django_connection.close()
|
||||
time.sleep(delay)
|
||||
retries += 1
|
||||
except DatabaseError as e:
|
||||
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
|
||||
break
|
||||
except Exception as exc:
|
||||
import traceback
|
||||
tb = traceback.format_exc()
|
||||
logger.error('Callback Task Processor Raised Exception: %r', exc)
|
||||
logger.error('Detail: {}'.format(tb))
|
||||
from awx.main.dispatch.worker import AWXConsumer, CallbackBrokerWorker
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -231,8 +18,22 @@ class Command(BaseCommand):
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
consumer = None
|
||||
try:
|
||||
worker = CallbackBrokerWorker(conn)
|
||||
worker.run()
|
||||
consumer = AWXConsumer(
|
||||
'callback_receiver',
|
||||
conn,
|
||||
CallbackBrokerWorker(),
|
||||
[
|
||||
Queue(
|
||||
settings.CALLBACK_QUEUE,
|
||||
Exchange(settings.CALLBACK_QUEUE, type='direct'),
|
||||
routing_key=settings.CALLBACK_QUEUE
|
||||
)
|
||||
]
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
print('Terminating Callback Receiver')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
|
||||
128
awx/main/management/commands/run_dispatcher.py
Normal file
128
awx/main/management/commands/run_dispatcher.py
Normal file
@@ -0,0 +1,128 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
import os
|
||||
import logging
|
||||
from multiprocessing import Process
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection, connections
|
||||
from kombu import Connection, Exchange, Queue
|
||||
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def construct_bcast_queue_name(common_name):
|
||||
return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the task dispatcher'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--status', dest='status', action='store_true',
|
||||
help='print the internal state of any running dispatchers')
|
||||
parser.add_argument('--running', dest='running', action='store_true',
|
||||
help='print the UUIDs of any tasked managed by this dispatcher')
|
||||
parser.add_argument('--reload', dest='reload', action='store_true',
|
||||
help=('cause the dispatcher to recycle all of its worker processes;'
|
||||
'running jobs will run to completion first'))
|
||||
|
||||
def beat(self):
|
||||
from celery import Celery
|
||||
from celery.beat import PersistentScheduler
|
||||
from celery.apps import beat
|
||||
|
||||
class AWXScheduler(PersistentScheduler):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.ppid = os.getppid()
|
||||
super(AWXScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def setup_schedule(self):
|
||||
super(AWXScheduler, self).setup_schedule()
|
||||
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
|
||||
|
||||
def tick(self, *args, **kwargs):
|
||||
if os.getppid() != self.ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
raise SystemExit()
|
||||
return super(AWXScheduler, self).tick(*args, **kwargs)
|
||||
|
||||
def apply_async(self, entry, producer=None, advance=True, **kwargs):
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
task = TaskWorker.resolve_callable(entry.task)
|
||||
result, queue = task.apply_async()
|
||||
|
||||
class TaskResult(object):
|
||||
id = result['uuid']
|
||||
|
||||
return TaskResult()
|
||||
|
||||
app = Celery()
|
||||
app.conf.BROKER_URL = settings.BROKER_URL
|
||||
app.conf.CELERY_TASK_RESULT_EXPIRES = False
|
||||
beat.Beat(
|
||||
30,
|
||||
app,
|
||||
schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler
|
||||
).run()
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
print Control('dispatcher').status()
|
||||
return
|
||||
if options.get('running'):
|
||||
print Control('dispatcher').running()
|
||||
return
|
||||
if options.get('reload'):
|
||||
return Control('dispatcher').control({'control': 'reload'})
|
||||
|
||||
# It's important to close these because we're _about_ to fork, and we
|
||||
# don't want the forked processes to inherit the open sockets
|
||||
# for the DB and memcached connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
beat = Process(target=self.beat)
|
||||
beat.daemon = True
|
||||
beat.start()
|
||||
|
||||
reaper.reap()
|
||||
consumer = None
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
try:
|
||||
bcast = 'tower_broadcast_all'
|
||||
queues = [
|
||||
Queue(q, Exchange(q), routing_key=q)
|
||||
for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()])
|
||||
]
|
||||
queues.append(
|
||||
Queue(
|
||||
construct_bcast_queue_name(bcast),
|
||||
exchange=Exchange(bcast, type='fanout'),
|
||||
routing_key=bcast,
|
||||
reply=True
|
||||
)
|
||||
)
|
||||
consumer = AWXConsumer(
|
||||
'dispatcher',
|
||||
conn,
|
||||
TaskWorker(),
|
||||
queues,
|
||||
AutoscalePool(min_workers=4)
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
@@ -1,66 +0,0 @@
|
||||
import datetime
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from celery import Celery
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Watch local celery workers"""
|
||||
help=("Sends a periodic ping to the local celery process over AMQP to ensure "
|
||||
"it's responsive; this command is only intended to run in an environment "
|
||||
"where celeryd is running")
|
||||
|
||||
#
|
||||
# Just because celery is _running_ doesn't mean it's _working_; it's
|
||||
# imperative that celery workers are _actually_ handling AMQP messages on
|
||||
# their appropriate queues for awx to function. Unfortunately, we've been
|
||||
# plagued by a variety of bugs in celery that cause it to hang and become
|
||||
# an unresponsive zombie, such as:
|
||||
#
|
||||
# https://github.com/celery/celery/issues/4185
|
||||
# https://github.com/celery/celery/issues/4457
|
||||
#
|
||||
# The goal of this code is periodically send a broadcast AMQP message to
|
||||
# the celery process on the local host via celery.app.control.ping;
|
||||
# If that _fails_, we attempt to determine the pid of the celery process
|
||||
# and send SIGHUP (which tends to resolve these sorts of issues for us).
|
||||
#
|
||||
|
||||
INTERVAL = 60
|
||||
|
||||
def _log(self, msg):
|
||||
sys.stderr.write(datetime.datetime.utcnow().isoformat())
|
||||
sys.stderr.write(' ')
|
||||
sys.stderr.write(msg)
|
||||
sys.stderr.write('\n')
|
||||
|
||||
def handle(self, **options):
|
||||
app = Celery('awx')
|
||||
app.config_from_object('django.conf:settings')
|
||||
while True:
|
||||
try:
|
||||
pongs = app.control.ping(['celery@{}'.format(settings.CLUSTER_HOST_ID)], timeout=30)
|
||||
except Exception:
|
||||
pongs = []
|
||||
if not pongs:
|
||||
self._log('celery is not responsive to ping over local AMQP')
|
||||
pid = self.getpid()
|
||||
if pid:
|
||||
self._log('sending SIGHUP to {}'.format(pid))
|
||||
os.kill(pid, signal.SIGHUP)
|
||||
time.sleep(self.INTERVAL)
|
||||
|
||||
def getpid(self):
|
||||
cmd = 'supervisorctl pid tower-processes:awx-celeryd'
|
||||
if os.path.exists('/supervisor_task.conf'):
|
||||
cmd = 'supervisorctl -c /supervisor_task.conf pid tower-processes:celery'
|
||||
try:
|
||||
return int(subprocess.check_output(cmd, shell=True))
|
||||
except Exception:
|
||||
self._log('could not detect celery pid')
|
||||
@@ -1,9 +1,9 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import uuid
|
||||
import logging
|
||||
import threading
|
||||
import uuid
|
||||
import six
|
||||
import time
|
||||
import cProfile
|
||||
@@ -128,8 +128,9 @@ class SessionTimeoutMiddleware(object):
|
||||
def process_response(self, request, response):
|
||||
req_session = getattr(request, 'session', None)
|
||||
if req_session and not req_session.is_empty():
|
||||
request.session.set_expiry(request.session.get_expiry_age())
|
||||
response['Session-Timeout'] = int(settings.SESSION_COOKIE_AGE)
|
||||
expiry = int(settings.SESSION_COOKIE_AGE)
|
||||
request.session.set_expiry(expiry)
|
||||
response['Session-Timeout'] = expiry
|
||||
return response
|
||||
|
||||
|
||||
|
||||
@@ -157,7 +157,7 @@ class Migration(migrations.Migration):
|
||||
('status', models.CharField(default=b'pending', max_length=20, editable=False, choices=[(b'pending', 'Pending'), (b'successful', 'Successful'), (b'failed', 'Failed')])),
|
||||
('error', models.TextField(default=b'', editable=False, blank=True)),
|
||||
('notifications_sent', models.IntegerField(default=0, editable=False)),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'mattermost', 'Mattermost'), (b'irc', 'IRC')])),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'mattermost', 'Mattermost'), (b'rocketchat', 'Rocket.Chat'), (b'irc', 'IRC')])),
|
||||
('recipients', models.TextField(default=b'', editable=False, blank=True)),
|
||||
('subject', models.TextField(default=b'', editable=False, blank=True)),
|
||||
('body', jsonfield.fields.JSONField(default=dict, blank=True)),
|
||||
@@ -174,7 +174,7 @@ class Migration(migrations.Migration):
|
||||
('modified', models.DateTimeField(default=None, editable=False)),
|
||||
('description', models.TextField(default=b'', blank=True)),
|
||||
('name', models.CharField(unique=True, max_length=512)),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'mattermost', 'Mattermost'), (b'irc', 'IRC')])),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'mattermost', 'Mattermost'), (b'rocketchat', 'Rocket.Chat'), (b'irc', 'IRC')])),
|
||||
('notification_configuration', jsonfield.fields.JSONField(default=dict)),
|
||||
('created_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_created+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
|
||||
('modified_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_modified+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
|
||||
|
||||
@@ -27,7 +27,7 @@ class Migration(migrations.Migration):
|
||||
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('start_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('end_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('inventory_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.InventoryUpdate')),
|
||||
('inventory_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='inventory_update_events', to='main.InventoryUpdate')),
|
||||
],
|
||||
options={
|
||||
'ordering': ('-pk',),
|
||||
@@ -53,7 +53,7 @@ class Migration(migrations.Migration):
|
||||
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('start_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('end_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('project_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.ProjectUpdate')),
|
||||
('project_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='project_update_events', to='main.ProjectUpdate')),
|
||||
],
|
||||
options={
|
||||
'ordering': ('pk',),
|
||||
@@ -72,12 +72,24 @@ class Migration(migrations.Migration):
|
||||
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('start_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('end_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('system_job', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.SystemJob')),
|
||||
('system_job', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='system_job_events', to='main.SystemJob')),
|
||||
],
|
||||
options={
|
||||
'ordering': ('-pk',),
|
||||
},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='inventoryupdateevent',
|
||||
index_together=set([('inventory_update', 'start_line'), ('inventory_update', 'uuid'), ('inventory_update', 'end_line')]),
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='projectupdateevent',
|
||||
index_together=set([('project_update', 'event'), ('project_update', 'end_line'), ('project_update', 'start_line'), ('project_update', 'uuid')]),
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='systemjobevent',
|
||||
index_together=set([('system_job', 'end_line'), ('system_job', 'uuid'), ('system_job', 'start_line')]),
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='unifiedjob',
|
||||
name='result_stdout_file',
|
||||
|
||||
@@ -64,12 +64,12 @@ class Migration(migrations.Migration):
|
||||
migrations.AddField(
|
||||
model_name='activitystream',
|
||||
name='o_auth2_access_token',
|
||||
field=models.ManyToManyField(to='main.OAuth2AccessToken', blank=True, related_name='main_o_auth2_accesstoken'),
|
||||
field=models.ManyToManyField(to='main.OAuth2AccessToken', blank=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='activitystream',
|
||||
name='o_auth2_application',
|
||||
field=models.ManyToManyField(to='main.OAuth2Application', blank=True, related_name='main_o_auth2_application'),
|
||||
field=models.ManyToManyField(to='main.OAuth2Application', blank=True),
|
||||
),
|
||||
|
||||
]
|
||||
|
||||
@@ -16,6 +16,6 @@ class Migration(migrations.Migration):
|
||||
migrations.AlterField(
|
||||
model_name='oauth2accesstoken',
|
||||
name='scope',
|
||||
field=models.TextField(blank=True, help_text="Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']."),
|
||||
field=models.TextField(blank=True, default=b'write', help_text="Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']."),
|
||||
),
|
||||
]
|
||||
|
||||
@@ -15,6 +15,6 @@ class Migration(migrations.Migration):
|
||||
migrations.AddField(
|
||||
model_name='oauth2accesstoken',
|
||||
name='modified',
|
||||
field=models.DateTimeField(editable=False),
|
||||
field=models.DateTimeField(editable=False, auto_now=True),
|
||||
),
|
||||
]
|
||||
|
||||
@@ -0,0 +1,147 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-08-16 16:46
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0047_v330_activitystream_instance'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='credential',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'credential', u'model_name': 'credential'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credential',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'credential', u'model_name': 'credential'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credentialtype',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'credentialtype', u'model_name': 'credentialtype'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credentialtype',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'credentialtype', u'model_name': 'credentialtype'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='custominventoryscript',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'custominventoryscript', u'model_name': 'custominventoryscript'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='custominventoryscript',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'custominventoryscript', u'model_name': 'custominventoryscript'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='group',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'group', u'model_name': 'group'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='group',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'group', u'model_name': 'group'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='host',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'host', u'model_name': 'host'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='host',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'host', u'model_name': 'host'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventory',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'inventory', u'model_name': 'inventory'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventory',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'inventory', u'model_name': 'inventory'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='label',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'label', u'model_name': 'label'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='label',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'label', u'model_name': 'label'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='notificationtemplate',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'notificationtemplate', u'model_name': 'notificationtemplate'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='notificationtemplate',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'notificationtemplate', u'model_name': 'notificationtemplate'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='organization',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'organization', u'model_name': 'organization'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='organization',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'organization', u'model_name': 'organization'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='schedule',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'schedule', u'model_name': 'schedule'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='schedule',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'schedule', u'model_name': 'schedule'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='team',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'team', u'model_name': 'team'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='team',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'team', u'model_name': 'team'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'unifiedjob', u'model_name': 'unifiedjob'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'unifiedjob', u'model_name': 'unifiedjob'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'unifiedjobtemplate', u'model_name': 'unifiedjobtemplate'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'unifiedjobtemplate', u'model_name': 'unifiedjobtemplate'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,22 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-08-17 16:13
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from decimal import Decimal
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0048_v330_django_created_modified_by_model_name'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='capacity_adjustment',
|
||||
field=models.DecimalField(decimal_places=2, default=Decimal('1'), max_digits=3, validators=[django.core.validators.MinValueValidator(0)]),
|
||||
),
|
||||
]
|
||||
21
awx/main/migrations/0050_v340_drop_celery_tables.py
Normal file
21
awx/main/migrations/0050_v340_drop_celery_tables.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0049_v330_validate_instance_capacity_adjustment'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunSQL([
|
||||
("DROP TABLE IF EXISTS {} CASCADE;".format(table))
|
||||
])
|
||||
for table in ('celery_taskmeta', 'celery_tasksetmeta', 'djcelery_crontabschedule',
|
||||
'djcelery_intervalschedule', 'djcelery_periodictask',
|
||||
'djcelery_periodictasks', 'djcelery_taskstate', 'djcelery_workerstate',
|
||||
'djkombu_message', 'djkombu_queue')
|
||||
]
|
||||
47
awx/main/migrations/0051_v340_job_slicing.py
Normal file
47
awx/main/migrations/0051_v340_job_slicing.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-10-15 16:21
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import awx.main.utils.polymorphic
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0050_v340_drop_celery_tables'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='job',
|
||||
name='job_slice_count',
|
||||
field=models.PositiveIntegerField(blank=True, default=1, help_text='If ran as part of sliced jobs, the total number of slices. If 1, job is not part of a sliced job.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='job',
|
||||
name='job_slice_number',
|
||||
field=models.PositiveIntegerField(blank=True, default=0, help_text='If part of a sliced job, the ID of the inventory slice operated on. If not part of sliced job, parameter is not used.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='job_slice_count',
|
||||
field=models.PositiveIntegerField(blank=True, default=1, help_text='The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjob',
|
||||
name='is_sliced_job',
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjob',
|
||||
name='job_template',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='If automatically created for a sliced job run, the job template the workflow job was created from.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='slice_workflow_jobs', to='main.JobTemplate'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='unified_job_template',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjob_unified_jobs', to='main.UnifiedJobTemplate'),
|
||||
),
|
||||
]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -136,8 +136,7 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
else:
|
||||
return []
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
return ''
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -439,15 +439,6 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
|
||||
defaults = OrderedDict()
|
||||
|
||||
ENV_BLACKLIST = set((
|
||||
'VIRTUAL_ENV', 'PATH', 'PYTHONPATH', 'PROOT_TMP_DIR', 'JOB_ID',
|
||||
'INVENTORY_ID', 'INVENTORY_SOURCE_ID', 'INVENTORY_UPDATE_ID',
|
||||
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'MAX_EVENT_RES',
|
||||
'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
|
||||
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS', 'FACT_QUEUE',
|
||||
'AWX_HOST', 'PROJECT_REVISION'
|
||||
))
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('kind', 'name')
|
||||
@@ -648,8 +639,14 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
file_label = file_label.split('.')[1]
|
||||
setattr(tower_namespace.filename, file_label, path)
|
||||
|
||||
injector_field = self._meta.get_field('injectors')
|
||||
for env_var, tmpl in self.injectors.get('env', {}).items():
|
||||
if env_var.startswith('ANSIBLE_') or env_var in self.ENV_BLACKLIST:
|
||||
try:
|
||||
injector_field.validate_env_var_allowed(env_var)
|
||||
except ValidationError as e:
|
||||
logger.error(six.text_type(
|
||||
'Ignoring prohibited env var {}, reason: {}'
|
||||
).format(env_var, e))
|
||||
continue
|
||||
env[env_var] = Template(tmpl).render(**namespace)
|
||||
safe_env[env_var] = Template(tmpl).render(**safe_namespace)
|
||||
|
||||
@@ -35,9 +35,9 @@ def sanitize_event_keys(kwargs, valid_keys):
|
||||
for key in [
|
||||
'play', 'role', 'task', 'playbook'
|
||||
]:
|
||||
if isinstance(kwargs.get(key), six.string_types):
|
||||
if len(kwargs[key]) > 1024:
|
||||
kwargs[key] = Truncator(kwargs[key]).chars(1024)
|
||||
if isinstance(kwargs.get('event_data', {}).get(key), six.string_types):
|
||||
if len(kwargs['event_data'][key]) > 1024:
|
||||
kwargs['event_data'][key] = Truncator(kwargs['event_data'][key]).chars(1024)
|
||||
|
||||
|
||||
def create_host_status_counts(event_data):
|
||||
|
||||
@@ -6,6 +6,7 @@ import random
|
||||
from decimal import Decimal
|
||||
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.core.validators import MinValueValidator
|
||||
from django.db import models, connection
|
||||
from django.db.models.signals import post_save, post_delete
|
||||
from django.dispatch import receiver
|
||||
@@ -31,7 +32,7 @@ __all__ = ('Instance', 'InstanceGroup', 'JobOrigin', 'TowerScheduleState',)
|
||||
|
||||
|
||||
def validate_queuename(v):
|
||||
# celery and kombu don't play nice with unicode in queue names
|
||||
# kombu doesn't play nice with unicode in queue names
|
||||
if v:
|
||||
try:
|
||||
'{}'.format(v.decode('utf-8'))
|
||||
@@ -81,6 +82,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
default=Decimal(1.0),
|
||||
max_digits=3,
|
||||
decimal_places=2,
|
||||
validators=[MinValueValidator(0)]
|
||||
)
|
||||
enabled = models.BooleanField(
|
||||
default=True
|
||||
|
||||
@@ -9,7 +9,6 @@ import copy
|
||||
from urlparse import urljoin
|
||||
import os.path
|
||||
import six
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -20,6 +19,9 @@ from django.core.exceptions import ValidationError
|
||||
from django.utils.timezone import now
|
||||
from django.db.models import Q
|
||||
|
||||
# REST Framework
|
||||
from rest_framework.exceptions import ParseError
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
@@ -42,7 +44,7 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import _inventory_updates, get_ansible_version, region_sorting
|
||||
from awx.main.utils import _inventory_updates, region_sorting
|
||||
|
||||
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate',
|
||||
@@ -218,67 +220,87 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
group_children.add(from_group_id)
|
||||
return group_children_map
|
||||
|
||||
def get_script_data(self, hostvars=False, towervars=False, show_all=False):
|
||||
if show_all:
|
||||
hosts_q = dict()
|
||||
else:
|
||||
hosts_q = dict(enabled=True)
|
||||
@staticmethod
|
||||
def parse_slice_params(slice_str):
|
||||
m = re.match(r"slice(?P<number>\d+)of(?P<step>\d+)", slice_str)
|
||||
if not m:
|
||||
raise ParseError(_('Could not parse subset as slice specification.'))
|
||||
number = int(m.group('number'))
|
||||
step = int(m.group('step'))
|
||||
if number > step:
|
||||
raise ParseError(_('Slice number must be less than total number of slices.'))
|
||||
elif number < 1:
|
||||
raise ParseError(_('Slice number must be 1 or higher.'))
|
||||
return (number, step)
|
||||
|
||||
def get_script_data(self, hostvars=False, towervars=False, show_all=False, slice_number=1, slice_count=1):
|
||||
hosts_kw = dict()
|
||||
if not show_all:
|
||||
hosts_kw['enabled'] = True
|
||||
fetch_fields = ['name', 'id', 'variables']
|
||||
if towervars:
|
||||
fetch_fields.append('enabled')
|
||||
hosts = self.hosts.filter(**hosts_kw).order_by('name').only(*fetch_fields)
|
||||
if slice_count > 1:
|
||||
offset = slice_number - 1
|
||||
hosts = hosts[offset::slice_count]
|
||||
|
||||
data = dict()
|
||||
all_group = data.setdefault('all', dict())
|
||||
all_hostnames = set(host.name for host in hosts)
|
||||
|
||||
if self.variables_dict:
|
||||
all_group = data.setdefault('all', dict())
|
||||
all_group['vars'] = self.variables_dict
|
||||
|
||||
if self.kind == 'smart':
|
||||
if len(self.hosts.all()) == 0:
|
||||
return {}
|
||||
else:
|
||||
all_group = data.setdefault('all', dict())
|
||||
smart_hosts_qs = self.hosts.filter(**hosts_q).all()
|
||||
smart_hosts = list(smart_hosts_qs.values_list('name', flat=True))
|
||||
all_group['hosts'] = smart_hosts
|
||||
all_group['hosts'] = [host.name for host in hosts]
|
||||
else:
|
||||
# Add hosts without a group to the all group.
|
||||
groupless_hosts_qs = self.hosts.filter(groups__isnull=True, **hosts_q)
|
||||
groupless_hosts = list(groupless_hosts_qs.values_list('name', flat=True))
|
||||
if groupless_hosts:
|
||||
all_group = data.setdefault('all', dict())
|
||||
all_group['hosts'] = groupless_hosts
|
||||
# Keep track of hosts that are members of a group
|
||||
grouped_hosts = set([])
|
||||
|
||||
# Build in-memory mapping of groups and their hosts.
|
||||
group_hosts_kw = dict(group__inventory_id=self.id, host__inventory_id=self.id)
|
||||
if 'enabled' in hosts_q:
|
||||
group_hosts_kw['host__enabled'] = hosts_q['enabled']
|
||||
group_hosts_qs = Group.hosts.through.objects.filter(**group_hosts_kw)
|
||||
group_hosts_qs = group_hosts_qs.values_list('group_id', 'host_id', 'host__name')
|
||||
group_hosts_qs = Group.hosts.through.objects.filter(
|
||||
group__inventory_id=self.id,
|
||||
host__inventory_id=self.id
|
||||
).values_list('group_id', 'host_id', 'host__name')
|
||||
group_hosts_map = {}
|
||||
for group_id, host_id, host_name in group_hosts_qs:
|
||||
if host_name not in all_hostnames:
|
||||
continue # host might not be in current shard
|
||||
group_hostnames = group_hosts_map.setdefault(group_id, [])
|
||||
group_hostnames.append(host_name)
|
||||
grouped_hosts.add(host_name)
|
||||
|
||||
# Build in-memory mapping of groups and their children.
|
||||
group_parents_qs = Group.parents.through.objects.filter(
|
||||
from_group__inventory_id=self.id,
|
||||
to_group__inventory_id=self.id,
|
||||
)
|
||||
group_parents_qs = group_parents_qs.values_list('from_group_id', 'from_group__name',
|
||||
'to_group_id')
|
||||
).values_list('from_group_id', 'from_group__name', 'to_group_id')
|
||||
group_children_map = {}
|
||||
for from_group_id, from_group_name, to_group_id in group_parents_qs:
|
||||
group_children = group_children_map.setdefault(to_group_id, [])
|
||||
group_children.append(from_group_name)
|
||||
|
||||
# Now use in-memory maps to build up group info.
|
||||
for group in self.groups.all():
|
||||
for group in self.groups.only('name', 'id', 'variables'):
|
||||
group_info = dict()
|
||||
group_info['hosts'] = group_hosts_map.get(group.id, [])
|
||||
group_info['children'] = group_children_map.get(group.id, [])
|
||||
group_info['vars'] = group.variables_dict
|
||||
data[group.name] = group_info
|
||||
|
||||
# Add ungrouped hosts to all group
|
||||
all_group['hosts'] = [host.name for host in hosts if host.name not in grouped_hosts]
|
||||
|
||||
# Remove any empty groups
|
||||
for group_name in list(data.keys()):
|
||||
if not data.get(group_name, {}).get('hosts', []):
|
||||
data.pop(group_name)
|
||||
|
||||
if hostvars:
|
||||
data.setdefault('_meta', dict())
|
||||
data['_meta'].setdefault('hostvars', dict())
|
||||
for host in self.hosts.filter(**hosts_q):
|
||||
for host in hosts:
|
||||
data['_meta']['hostvars'][host.name] = host.variables_dict
|
||||
if towervars:
|
||||
tower_dict = dict(remote_tower_enabled=str(host.enabled).lower(),
|
||||
@@ -1262,6 +1284,11 @@ class InventorySourceOptions(BaseModel):
|
||||
'Credentials of type machine, source control, insights and vault are '
|
||||
'disallowed for custom inventory sources.'
|
||||
)
|
||||
elif source == 'scm' and cred and cred.credential_type.kind in ('insights', 'vault'):
|
||||
return _(
|
||||
'Credentials of type insights and vault are '
|
||||
'disallowed for scm inventory sources.'
|
||||
)
|
||||
return None
|
||||
|
||||
def get_inventory_plugin_name(self):
|
||||
@@ -1573,12 +1600,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, RelatedJobsMix
|
||||
"Instead, configure the corresponding source project to update on launch."))
|
||||
return self.update_on_launch
|
||||
|
||||
def clean_overwrite_vars(self): # TODO: remove when Ansible 2.4 becomes unsupported, obviously
|
||||
if self.source == 'scm' and not self.overwrite_vars:
|
||||
if get_ansible_version() < LooseVersion('2.5'):
|
||||
raise ValidationError(_("SCM type sources must set `overwrite_vars` to `true` until Ansible 2.5."))
|
||||
return self.overwrite_vars
|
||||
|
||||
def clean_source_path(self):
|
||||
if self.source != 'scm' and self.source_path:
|
||||
raise ValidationError(_("Cannot set source_path if not SCM type."))
|
||||
@@ -1626,8 +1647,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
null=True
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
return 'inventory_source'
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -238,11 +238,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
app_label = 'main'
|
||||
ordering = ('name',)
|
||||
|
||||
host_config_key = models.CharField(
|
||||
host_config_key = prevent_search(models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
default='',
|
||||
)
|
||||
))
|
||||
ask_diff_mode_on_launch = AskForField(
|
||||
blank=True,
|
||||
default=False,
|
||||
@@ -277,6 +277,13 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
default=False,
|
||||
allows_field='credentials'
|
||||
)
|
||||
job_slice_count = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=1,
|
||||
help_text=_("The number of jobs to slice into at runtime. "
|
||||
"Will cause the Job Template to launch a workflow if value is greater than 1."),
|
||||
)
|
||||
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role=['project.organization.job_template_admin_role', 'inventory.organization.job_template_admin_role']
|
||||
)
|
||||
@@ -295,7 +302,8 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in JobOptions._meta.fields) | set(
|
||||
['name', 'description', 'schedule', 'survey_passwords', 'labels', 'credentials']
|
||||
['name', 'description', 'schedule', 'survey_passwords', 'labels', 'credentials',
|
||||
'job_slice_number', 'job_slice_count']
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -320,6 +328,31 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
'''
|
||||
return self.create_unified_job(**kwargs)
|
||||
|
||||
def create_unified_job(self, **kwargs):
|
||||
prevent_slicing = kwargs.pop('_prevent_slicing', False)
|
||||
slice_event = bool(self.job_slice_count > 1 and (not prevent_slicing))
|
||||
if slice_event:
|
||||
# A Slice Job Template will generate a WorkflowJob rather than a Job
|
||||
from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobNode
|
||||
kwargs['_unified_job_class'] = WorkflowJobTemplate._get_unified_job_class()
|
||||
kwargs['_parent_field_name'] = "job_template"
|
||||
kwargs.setdefault('_eager_fields', {})
|
||||
kwargs['_eager_fields']['is_sliced_job'] = True
|
||||
job = super(JobTemplate, self).create_unified_job(**kwargs)
|
||||
if slice_event:
|
||||
try:
|
||||
wj_config = job.launch_config
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
wj_config = JobLaunchConfig()
|
||||
actual_inventory = wj_config.inventory if wj_config.inventory else self.inventory
|
||||
for idx in xrange(min(self.job_slice_count,
|
||||
actual_inventory.hosts.count())):
|
||||
create_kwargs = dict(workflow_job=job,
|
||||
unified_job_template=self,
|
||||
ancestor_artifacts=dict(job_slice=idx + 1))
|
||||
WorkflowJobNode.objects.create(**create_kwargs)
|
||||
return job
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:job_template_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@@ -452,7 +485,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
RelatedJobsMixin
|
||||
'''
|
||||
def _get_related_jobs(self):
|
||||
return Job.objects.filter(job_template=self)
|
||||
return UnifiedJob.objects.filter(unified_job_template=self)
|
||||
|
||||
|
||||
class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskManagerJobMixin):
|
||||
@@ -501,10 +534,21 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
on_delete=models.SET_NULL,
|
||||
help_text=_('The SCM Refresh task used to make sure the playbooks were available for the job run'),
|
||||
)
|
||||
job_slice_number = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=0,
|
||||
help_text=_("If part of a sliced job, the ID of the inventory slice operated on. "
|
||||
"If not part of sliced job, parameter is not used."),
|
||||
)
|
||||
job_slice_count = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=1,
|
||||
help_text=_("If ran as part of sliced jobs, the total number of slices. "
|
||||
"If 1, job is not part of a sliced job."),
|
||||
)
|
||||
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
return 'job_template'
|
||||
|
||||
@classmethod
|
||||
@@ -545,6 +589,15 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def event_class(self):
|
||||
return JobEvent
|
||||
|
||||
def copy_unified_job(self, **new_prompts):
|
||||
# Needed for job slice relaunch consistency, do no re-spawn workflow job
|
||||
# target same slice as original job
|
||||
new_prompts['_prevent_slicing'] = True
|
||||
new_prompts.setdefault('_eager_fields', {})
|
||||
new_prompts['_eager_fields']['job_slice_number'] = self.job_slice_number
|
||||
new_prompts['_eager_fields']['job_slice_count'] = self.job_slice_count
|
||||
return super(Job, self).copy_unified_job(**new_prompts)
|
||||
|
||||
@property
|
||||
def ask_diff_mode_on_launch(self):
|
||||
if self.job_template is not None:
|
||||
@@ -638,6 +691,9 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
count_hosts = 2
|
||||
else:
|
||||
count_hosts = Host.objects.filter(inventory__jobs__pk=self.pk).count()
|
||||
if self.job_slice_count > 1:
|
||||
# Integer division intentional
|
||||
count_hosts = (count_hosts + self.job_slice_count - self.job_slice_number) / self.job_slice_count
|
||||
return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1
|
||||
|
||||
@property
|
||||
|
||||
@@ -466,6 +466,14 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
models.Q(ProjectUpdate___project=self)
|
||||
)
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
path_to_delete = self.get_project_path(check_if_exists=False)
|
||||
r = super(Project, self).delete(*args, **kwargs)
|
||||
if self.scm_type and path_to_delete: # non-manual, concrete path
|
||||
from awx.main.tasks import delete_project_files
|
||||
delete_project_files.delay(path_to_delete)
|
||||
return r
|
||||
|
||||
|
||||
class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManagerProjectUpdateMixin):
|
||||
'''
|
||||
@@ -488,8 +496,7 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
default='check',
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
return 'project'
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -153,7 +153,7 @@ class Schedule(CommonModel, LaunchTimeConfig):
|
||||
if 'until=' in rrule.lower():
|
||||
# if DTSTART;TZID= is used, coerce "naive" UNTIL values
|
||||
# to the proper UTC date
|
||||
match_until = re.match(".*?(?P<until>UNTIL\=[0-9]+T[0-9]+)(?P<utcflag>Z?)", rrule)
|
||||
match_until = re.match(r".*?(?P<until>UNTIL\=[0-9]+T[0-9]+)(?P<utcflag>Z?)", rrule)
|
||||
if not len(match_until.group('utcflag')):
|
||||
# rrule = DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000
|
||||
|
||||
|
||||
@@ -7,9 +7,11 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import subprocess
|
||||
import tempfile
|
||||
from collections import OrderedDict
|
||||
import six
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -27,11 +29,9 @@ from rest_framework.exceptions import ParseError
|
||||
# Django-Polymorphic
|
||||
from polymorphic.models import PolymorphicModel
|
||||
|
||||
# Django-Celery
|
||||
from djcelery.models import TaskMeta
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin
|
||||
from awx.main.utils import (
|
||||
encrypt_dict, decrypt_field, _inventory_updates,
|
||||
@@ -309,13 +309,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
'''
|
||||
raise NotImplementedError # Implement in subclass.
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
'''
|
||||
Return field names that should be copied from template to new job.
|
||||
'''
|
||||
raise NotImplementedError # Implement in subclass.
|
||||
|
||||
@property
|
||||
def notification_templates(self):
|
||||
'''
|
||||
@@ -338,19 +331,33 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
|
||||
unified_job_class = self._get_unified_job_class()
|
||||
fields = self._get_unified_job_field_names()
|
||||
unallowed_fields = set(kwargs.keys()) - set(fields)
|
||||
if unallowed_fields:
|
||||
logger.warn('Fields {} are not allowed as overrides.'.format(unallowed_fields))
|
||||
map(kwargs.pop, unallowed_fields)
|
||||
parent_field_name = None
|
||||
if "_unified_job_class" in kwargs:
|
||||
# Special case where spawned job is different type than usual
|
||||
# Only used for slice jobs
|
||||
unified_job_class = kwargs.pop("_unified_job_class")
|
||||
fields = unified_job_class._get_unified_job_field_names() & fields
|
||||
parent_field_name = kwargs.pop('_parent_field_name')
|
||||
|
||||
unified_job = copy_model_by_class(self, unified_job_class, fields, kwargs)
|
||||
unallowed_fields = set(kwargs.keys()) - set(fields)
|
||||
validated_kwargs = kwargs.copy()
|
||||
if unallowed_fields:
|
||||
if parent_field_name is None:
|
||||
logger.warn(six.text_type('Fields {} are not allowed as overrides to spawn {} from {}.').format(
|
||||
six.text_type(', ').join(unallowed_fields), unified_job, self
|
||||
))
|
||||
map(validated_kwargs.pop, unallowed_fields)
|
||||
|
||||
unified_job = copy_model_by_class(self, unified_job_class, fields, validated_kwargs)
|
||||
|
||||
if eager_fields:
|
||||
for fd, val in eager_fields.items():
|
||||
setattr(unified_job, fd, val)
|
||||
|
||||
# Set the unified job template back-link on the job
|
||||
parent_field_name = unified_job_class._get_parent_field_name()
|
||||
# NOTE: slice workflow jobs _get_parent_field_name method
|
||||
# is not correct until this is set
|
||||
if not parent_field_name:
|
||||
parent_field_name = unified_job._get_parent_field_name()
|
||||
setattr(unified_job, parent_field_name, self)
|
||||
|
||||
# For JobTemplate-based jobs with surveys, add passwords to list for perma-redaction
|
||||
@@ -364,24 +371,25 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
unified_job.save()
|
||||
|
||||
# Labels and credentials copied here
|
||||
if kwargs.get('credentials'):
|
||||
if validated_kwargs.get('credentials'):
|
||||
Credential = UnifiedJob._meta.get_field('credentials').related_model
|
||||
cred_dict = Credential.unique_dict(self.credentials.all())
|
||||
prompted_dict = Credential.unique_dict(kwargs['credentials'])
|
||||
prompted_dict = Credential.unique_dict(validated_kwargs['credentials'])
|
||||
# combine prompted credentials with JT
|
||||
cred_dict.update(prompted_dict)
|
||||
kwargs['credentials'] = [cred for cred in cred_dict.values()]
|
||||
validated_kwargs['credentials'] = [cred for cred in cred_dict.values()]
|
||||
kwargs['credentials'] = validated_kwargs['credentials']
|
||||
|
||||
from awx.main.signals import disable_activity_stream
|
||||
with disable_activity_stream():
|
||||
copy_m2m_relationships(self, unified_job, fields, kwargs=kwargs)
|
||||
copy_m2m_relationships(self, unified_job, fields, kwargs=validated_kwargs)
|
||||
|
||||
if 'extra_vars' in kwargs:
|
||||
unified_job.handle_extra_data(kwargs['extra_vars'])
|
||||
if 'extra_vars' in validated_kwargs:
|
||||
unified_job.handle_extra_data(validated_kwargs['extra_vars'])
|
||||
|
||||
if not getattr(self, '_deprecated_credential_launch', False):
|
||||
# Create record of provided prompts for relaunch and rescheduling
|
||||
unified_job.create_config_from_prompts(kwargs)
|
||||
unified_job.create_config_from_prompts(kwargs, parent=self)
|
||||
|
||||
return unified_job
|
||||
|
||||
@@ -546,7 +554,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
default=None,
|
||||
editable=False,
|
||||
related_name='%(class)s_unified_jobs',
|
||||
on_delete=models.SET_NULL,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
)
|
||||
launch_type = models.CharField(
|
||||
max_length=20,
|
||||
@@ -694,8 +702,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
def supports_isolation(cls):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
return 'unified_job_template' # Override in subclasses.
|
||||
|
||||
@classmethod
|
||||
@@ -828,7 +835,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
'''
|
||||
unified_job_class = self.__class__
|
||||
unified_jt_class = self._get_unified_job_template_class()
|
||||
parent_field_name = unified_job_class._get_parent_field_name()
|
||||
parent_field_name = self._get_parent_field_name()
|
||||
fields = unified_jt_class._get_unified_job_field_names() | set([parent_field_name])
|
||||
|
||||
create_data = {}
|
||||
@@ -847,10 +854,10 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
setattr(unified_job, fd, val)
|
||||
unified_job.save()
|
||||
|
||||
# Labels copied here
|
||||
from awx.main.signals import disable_activity_stream
|
||||
with disable_activity_stream():
|
||||
copy_m2m_relationships(self, unified_job, fields)
|
||||
# Labels copied here
|
||||
from awx.main.signals import disable_activity_stream
|
||||
with disable_activity_stream():
|
||||
copy_m2m_relationships(self, unified_job, fields)
|
||||
|
||||
return unified_job
|
||||
|
||||
@@ -866,16 +873,18 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
return None
|
||||
|
||||
def create_config_from_prompts(self, kwargs):
|
||||
def create_config_from_prompts(self, kwargs, parent=None):
|
||||
'''
|
||||
Create a launch configuration entry for this job, given prompts
|
||||
returns None if it can not be created
|
||||
'''
|
||||
if self.unified_job_template is None:
|
||||
return None
|
||||
JobLaunchConfig = self._meta.get_field('launch_config').related_model
|
||||
config = JobLaunchConfig(job=self)
|
||||
valid_fields = self.unified_job_template.get_ask_mapping().keys()
|
||||
if parent is None:
|
||||
parent = getattr(self, self._get_parent_field_name())
|
||||
if parent is None:
|
||||
return
|
||||
valid_fields = parent.get_ask_mapping().keys()
|
||||
# Special cases allowed for workflows
|
||||
if hasattr(self, 'extra_vars'):
|
||||
valid_fields.extend(['survey_passwords', 'extra_vars'])
|
||||
@@ -892,8 +901,9 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
setattr(config, key, value)
|
||||
config.save()
|
||||
|
||||
job_creds = (set(kwargs.get('credentials', [])) -
|
||||
set(self.unified_job_template.credentials.all()))
|
||||
job_creds = set(kwargs.get('credentials', []))
|
||||
if 'credentials' in [field.name for field in parent._meta.get_fields()]:
|
||||
job_creds = job_creds - set(parent.credentials.all())
|
||||
if job_creds:
|
||||
config.credentials.add(*job_creds)
|
||||
return config
|
||||
@@ -1112,14 +1122,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
pass
|
||||
return None
|
||||
|
||||
@property
|
||||
def celery_task(self):
|
||||
try:
|
||||
if self.celery_task_id:
|
||||
return TaskMeta.objects.get(task_id=self.celery_task_id)
|
||||
except TaskMeta.DoesNotExist:
|
||||
pass
|
||||
|
||||
def get_passwords_needed_to_start(self):
|
||||
return []
|
||||
|
||||
@@ -1224,29 +1226,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
|
||||
return (True, opts)
|
||||
|
||||
def start_celery_task(self, opts, error_callback, success_callback, queue):
|
||||
kwargs = {
|
||||
'link_error': error_callback,
|
||||
'link': success_callback,
|
||||
'queue': None,
|
||||
'task_id': None,
|
||||
}
|
||||
if not self.celery_task_id:
|
||||
raise RuntimeError("Expected celery_task_id to be set on model.")
|
||||
kwargs['task_id'] = self.celery_task_id
|
||||
task_class = self._get_task_class()
|
||||
kwargs['queue'] = queue
|
||||
task_class().apply_async([self.pk], opts, **kwargs)
|
||||
|
||||
def start(self, error_callback, success_callback, **kwargs):
|
||||
'''
|
||||
Start the task running via Celery.
|
||||
'''
|
||||
(res, opts) = self.pre_start(**kwargs)
|
||||
if res:
|
||||
self.start_celery_task(opts, error_callback, success_callback)
|
||||
return res
|
||||
|
||||
def signal_start(self, **kwargs):
|
||||
"""Notify the task runner system to begin work on this task."""
|
||||
|
||||
@@ -1282,46 +1261,35 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
# Done!
|
||||
return True
|
||||
|
||||
|
||||
@property
|
||||
def actually_running(self):
|
||||
# returns True if the job is running in the appropriate dispatcher process
|
||||
running = False
|
||||
if all([
|
||||
self.status == 'running',
|
||||
self.celery_task_id,
|
||||
self.execution_node
|
||||
]):
|
||||
# If the job is marked as running, but the dispatcher
|
||||
# doesn't know about it (or the dispatcher doesn't reply),
|
||||
# then cancel the job
|
||||
timeout = 5
|
||||
try:
|
||||
running = self.celery_task_id in ControlDispatcher(
|
||||
'dispatcher', self.execution_node
|
||||
).running(timeout=timeout)
|
||||
except socket.timeout:
|
||||
logger.error(six.text_type(
|
||||
'could not reach dispatcher on {} within {}s'
|
||||
).format(self.execution_node, timeout))
|
||||
running = False
|
||||
return running
|
||||
|
||||
@property
|
||||
def can_cancel(self):
|
||||
return bool(self.status in CAN_CANCEL)
|
||||
|
||||
def _force_cancel(self):
|
||||
# Update the status to 'canceled' if we can detect that the job
|
||||
# really isn't running (i.e. celery has crashed or forcefully
|
||||
# killed the worker).
|
||||
task_statuses = ('STARTED', 'SUCCESS', 'FAILED', 'RETRY', 'REVOKED')
|
||||
try:
|
||||
taskmeta = self.celery_task
|
||||
if not taskmeta or taskmeta.status not in task_statuses:
|
||||
return
|
||||
from celery import current_app
|
||||
i = current_app.control.inspect()
|
||||
for v in (i.active() or {}).values():
|
||||
if taskmeta.task_id in [x['id'] for x in v]:
|
||||
return
|
||||
for v in (i.reserved() or {}).values():
|
||||
if taskmeta.task_id in [x['id'] for x in v]:
|
||||
return
|
||||
for v in (i.revoked() or {}).values():
|
||||
if taskmeta.task_id in [x['id'] for x in v]:
|
||||
return
|
||||
for v in (i.scheduled() or {}).values():
|
||||
if taskmeta.task_id in [x['id'] for x in v]:
|
||||
return
|
||||
instance = self.__class__.objects.get(pk=self.pk)
|
||||
if instance.can_cancel:
|
||||
instance.status = 'canceled'
|
||||
update_fields = ['status']
|
||||
if not instance.job_explanation:
|
||||
instance.job_explanation = 'Forced cancel'
|
||||
update_fields.append('job_explanation')
|
||||
instance.save(update_fields=update_fields)
|
||||
self.websocket_emit_status("canceled")
|
||||
except Exception: # FIXME: Log this exception!
|
||||
if settings.DEBUG:
|
||||
raise
|
||||
|
||||
def _build_job_explanation(self):
|
||||
if not self.job_explanation:
|
||||
return 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \
|
||||
@@ -1340,13 +1308,14 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
if self.status in ('pending', 'waiting', 'new'):
|
||||
self.status = 'canceled'
|
||||
cancel_fields.append('status')
|
||||
if self.status == 'running' and not self.actually_running:
|
||||
self.status = 'canceled'
|
||||
cancel_fields.append('status')
|
||||
if job_explanation is not None:
|
||||
self.job_explanation = job_explanation
|
||||
cancel_fields.append('job_explanation')
|
||||
self.save(update_fields=cancel_fields)
|
||||
self.websocket_emit_status("canceled")
|
||||
if settings.BROKER_URL.startswith('amqp://'):
|
||||
self._force_cancel()
|
||||
return self.cancel_flag
|
||||
|
||||
@property
|
||||
@@ -1402,7 +1371,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
r['{}_user_last_name'.format(name)] = created_by.last_name
|
||||
return r
|
||||
|
||||
def get_celery_queue_name(self):
|
||||
def get_queue_name(self):
|
||||
return self.controller_node or self.execution_node or settings.CELERY_DEFAULT_QUEUE
|
||||
|
||||
def is_isolated(self):
|
||||
|
||||
@@ -9,6 +9,7 @@ import logging
|
||||
from django.db import models
|
||||
from django.conf import settings
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
#from django import settings as tower_settings
|
||||
|
||||
# AWX
|
||||
@@ -206,13 +207,24 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
workflow_pk=self.pk,
|
||||
error_text=errors))
|
||||
data.update(accepted_fields) # missing fields are handled in the scheduler
|
||||
try:
|
||||
# config saved on the workflow job itself
|
||||
wj_config = self.workflow_job.launch_config
|
||||
except ObjectDoesNotExist:
|
||||
wj_config = None
|
||||
if wj_config:
|
||||
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**wj_config.prompts_dict())
|
||||
accepted_fields.pop('extra_vars', None) # merge handled with other extra_vars later
|
||||
data.update(accepted_fields)
|
||||
# build ancestor artifacts, save them to node model for later
|
||||
aa_dict = {}
|
||||
is_root_node = True
|
||||
for parent_node in self.get_parent_nodes():
|
||||
is_root_node = False
|
||||
aa_dict.update(parent_node.ancestor_artifacts)
|
||||
if parent_node.job and hasattr(parent_node.job, 'artifacts'):
|
||||
aa_dict.update(parent_node.job.artifacts)
|
||||
if aa_dict:
|
||||
if aa_dict and not is_root_node:
|
||||
self.ancestor_artifacts = aa_dict
|
||||
self.save(update_fields=['ancestor_artifacts'])
|
||||
# process password list
|
||||
@@ -240,6 +252,12 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
data['extra_vars'] = extra_vars
|
||||
# ensure that unified jobs created by WorkflowJobs are marked
|
||||
data['_eager_fields'] = {'launch_type': 'workflow'}
|
||||
# Extra processing in the case that this is a slice job
|
||||
if 'job_slice' in self.ancestor_artifacts and is_root_node:
|
||||
data['_eager_fields']['allow_simultaneous'] = True
|
||||
data['_eager_fields']['job_slice_number'] = self.ancestor_artifacts['job_slice']
|
||||
data['_eager_fields']['job_slice_count'] = self.workflow_job.workflow_job_nodes.count()
|
||||
data['_prevent_slicing'] = True
|
||||
return data
|
||||
|
||||
|
||||
@@ -261,6 +279,12 @@ class WorkflowJobOptions(BaseModel):
|
||||
def workflow_nodes(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in WorkflowJobOptions._meta.fields) | set(
|
||||
['name', 'description', 'schedule', 'survey_passwords', 'labels']
|
||||
)
|
||||
|
||||
def _create_workflow_nodes(self, old_node_list, user=None):
|
||||
node_links = {}
|
||||
for old_node in old_node_list:
|
||||
@@ -288,7 +312,7 @@ class WorkflowJobOptions(BaseModel):
|
||||
|
||||
def create_relaunch_workflow_job(self):
|
||||
new_workflow_job = self.copy_unified_job()
|
||||
if self.workflow_job_template is None:
|
||||
if self.unified_job_template_id is None:
|
||||
new_workflow_job.copy_nodes_from_original(original=self)
|
||||
return new_workflow_job
|
||||
|
||||
@@ -331,12 +355,6 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
def _get_unified_job_class(cls):
|
||||
return WorkflowJob
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in WorkflowJobOptions._meta.fields) | set(
|
||||
['name', 'description', 'schedule', 'survey_passwords', 'labels']
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _get_unified_jt_copy_names(cls):
|
||||
base_list = super(WorkflowJobTemplate, cls)._get_unified_jt_copy_names()
|
||||
@@ -433,13 +451,28 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
job_template = models.ForeignKey(
|
||||
'JobTemplate',
|
||||
related_name='slice_workflow_jobs',
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
help_text=_("If automatically created for a sliced job run, the job template "
|
||||
"the workflow job was created from."),
|
||||
)
|
||||
is_sliced_job = models.BooleanField(
|
||||
default=False
|
||||
)
|
||||
|
||||
@property
|
||||
def workflow_nodes(self):
|
||||
return self.workflow_job_nodes
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
if self.job_template_id:
|
||||
# This is a workflow job which is a container for slice jobs
|
||||
return 'job_template'
|
||||
return 'workflow_job_template'
|
||||
|
||||
@classmethod
|
||||
@@ -482,8 +515,8 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
def preferred_instance_groups(self):
|
||||
return []
|
||||
|
||||
'''
|
||||
A WorkflowJob is a virtual job. It doesn't result in a celery task.
|
||||
'''
|
||||
def start_celery_task(self, opts, error_callback, success_callback, queue):
|
||||
return None
|
||||
@property
|
||||
def actually_running(self):
|
||||
# WorkflowJobs don't _actually_ run anything in the dispatcher, so
|
||||
# there's no point in asking the dispatcher if it knows about this task
|
||||
return self.status == 'running'
|
||||
|
||||
@@ -37,6 +37,7 @@ class SlackBackend(AWXBaseEmailBackend):
|
||||
if self.color:
|
||||
ret = connection.api_call("chat.postMessage",
|
||||
channel=r,
|
||||
as_user=True,
|
||||
attachments=[{
|
||||
"color": self.color,
|
||||
"text": m.subject
|
||||
|
||||
@@ -50,6 +50,7 @@ class WorkflowDAG(SimpleDAG):
|
||||
return [n['node_object'] for n in nodes_found]
|
||||
|
||||
def cancel_node_jobs(self):
|
||||
cancel_finished = True
|
||||
for n in self.nodes:
|
||||
obj = n['node_object']
|
||||
job = obj.job
|
||||
@@ -57,7 +58,9 @@ class WorkflowDAG(SimpleDAG):
|
||||
if not job:
|
||||
continue
|
||||
elif job.can_cancel:
|
||||
cancel_finished = False
|
||||
job.cancel()
|
||||
return cancel_finished
|
||||
|
||||
def is_workflow_done(self):
|
||||
root_nodes = self.get_root_nodes()
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# All Rights Reserved
|
||||
|
||||
# Python
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
import uuid
|
||||
import json
|
||||
@@ -11,18 +11,13 @@ import random
|
||||
from sets import Set
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.db import transaction, connection, DatabaseError
|
||||
from django.db import transaction, connection
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.timezone import now as tz_now, utc
|
||||
from django.db.models import Q
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.timezone import now as tz_now
|
||||
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
AdHocCommand,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
@@ -30,21 +25,15 @@ from awx.main.models import (
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
SystemJob,
|
||||
UnifiedJob,
|
||||
WorkflowJob,
|
||||
)
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils import get_type_for_model
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.utils import decrypt_field
|
||||
|
||||
# Celery
|
||||
from celery import Celery
|
||||
from celery.app.control import Inspect
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
@@ -76,7 +65,8 @@ class TaskManager():
|
||||
inventory_updates_qs = InventoryUpdate.objects.filter(
|
||||
status__in=status_list).exclude(source='file').prefetch_related('inventory_source', 'instance_group')
|
||||
inventory_updates = [i for i in inventory_updates_qs]
|
||||
project_updates = [p for p in ProjectUpdate.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
# Notice the job_type='check': we want to prevent implicit project updates from blocking our jobs.
|
||||
project_updates = [p for p in ProjectUpdate.objects.filter(status__in=status_list, job_type='check').prefetch_related('instance_group')]
|
||||
system_jobs = [s for s in SystemJob.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
ad_hoc_commands = [a for a in AdHocCommand.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
workflow_jobs = [w for w in WorkflowJob.objects.filter(status__in=status_list)]
|
||||
@@ -84,79 +74,6 @@ class TaskManager():
|
||||
key=lambda task: task.created)
|
||||
return all_tasks
|
||||
|
||||
'''
|
||||
Tasks that are running and SHOULD have a celery task.
|
||||
{
|
||||
'execution_node': [j1, j2,...],
|
||||
'execution_node': [j3],
|
||||
...
|
||||
}
|
||||
'''
|
||||
def get_running_tasks(self):
|
||||
execution_nodes = {}
|
||||
waiting_jobs = []
|
||||
now = tz_now()
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
jobs = UnifiedJob.objects.filter((Q(status='running') |
|
||||
Q(status='waiting', modified__lte=now - timedelta(seconds=60))) &
|
||||
~Q(polymorphic_ctype_id=workflow_ctype_id))
|
||||
for j in jobs:
|
||||
if j.execution_node:
|
||||
execution_nodes.setdefault(j.execution_node, []).append(j)
|
||||
else:
|
||||
waiting_jobs.append(j)
|
||||
return (execution_nodes, waiting_jobs)
|
||||
|
||||
'''
|
||||
Tasks that are currently running in celery
|
||||
|
||||
Transform:
|
||||
{
|
||||
"celery@ec2-54-204-222-62.compute-1.amazonaws.com": [],
|
||||
"celery@ec2-54-163-144-168.compute-1.amazonaws.com": [{
|
||||
...
|
||||
"id": "5238466a-f8c7-43b3-9180-5b78e9da8304",
|
||||
...
|
||||
}, {
|
||||
...,
|
||||
}, ...]
|
||||
}
|
||||
|
||||
to:
|
||||
{
|
||||
"ec2-54-204-222-62.compute-1.amazonaws.com": [
|
||||
"5238466a-f8c7-43b3-9180-5b78e9da8304",
|
||||
"5238466a-f8c7-43b3-9180-5b78e9da8306",
|
||||
...
|
||||
]
|
||||
}
|
||||
'''
|
||||
def get_active_tasks(self):
|
||||
if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'):
|
||||
app = Celery('awx')
|
||||
app.config_from_object('django.conf:settings')
|
||||
inspector = Inspect(app=app)
|
||||
active_task_queues = inspector.active()
|
||||
else:
|
||||
logger.warn("Ignoring celery task inspector")
|
||||
active_task_queues = None
|
||||
|
||||
queues = None
|
||||
|
||||
if active_task_queues is not None:
|
||||
queues = {}
|
||||
for queue in active_task_queues:
|
||||
active_tasks = set()
|
||||
map(lambda at: active_tasks.add(at['id']), active_task_queues[queue])
|
||||
|
||||
# celery worker name is of the form celery@myhost.com
|
||||
queue_name = queue.split('@')
|
||||
queue_name = queue_name[1 if len(queue_name) > 1 else 0]
|
||||
queues[queue_name] = active_tasks
|
||||
else:
|
||||
return (None, None)
|
||||
|
||||
return (active_task_queues, queues)
|
||||
|
||||
def get_latest_project_update_tasks(self, all_sorted_tasks):
|
||||
project_ids = Set()
|
||||
@@ -186,8 +103,15 @@ class TaskManager():
|
||||
|
||||
def spawn_workflow_graph_jobs(self, workflow_jobs):
|
||||
for workflow_job in workflow_jobs:
|
||||
if workflow_job.cancel_flag:
|
||||
logger.debug('Not spawning jobs for %s because it is pending cancelation.', workflow_job.log_format)
|
||||
continue
|
||||
dag = WorkflowDAG(workflow_job)
|
||||
spawn_nodes = dag.bfs_nodes_to_run()
|
||||
if spawn_nodes:
|
||||
logger.info('Spawning jobs for %s', workflow_job.log_format)
|
||||
else:
|
||||
logger.debug('No nodes to spawn for %s', workflow_job.log_format)
|
||||
for spawn_node in spawn_nodes:
|
||||
if spawn_node.unified_job_template is None:
|
||||
continue
|
||||
@@ -195,8 +119,13 @@ class TaskManager():
|
||||
job = spawn_node.unified_job_template.create_unified_job(**kv)
|
||||
spawn_node.job = job
|
||||
spawn_node.save()
|
||||
logger.info('Spawned %s in %s for node %s', job.log_format, workflow_job.log_format, spawn_node.pk)
|
||||
if job._resources_sufficient_for_launch():
|
||||
can_start = job.signal_start()
|
||||
if workflow_job.start_args:
|
||||
start_args = json.loads(decrypt_field(workflow_job, 'start_args'))
|
||||
else:
|
||||
start_args = {}
|
||||
can_start = job.signal_start(**start_args)
|
||||
if not can_start:
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was not in the right state or required manual credentials")
|
||||
@@ -212,23 +141,30 @@ class TaskManager():
|
||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||
#emit_websocket_notification('/socket.io/jobs', '', dict(id=))
|
||||
|
||||
# See comment in tasks.py::RunWorkflowJob::run()
|
||||
def process_finished_workflow_jobs(self, workflow_jobs):
|
||||
result = []
|
||||
for workflow_job in workflow_jobs:
|
||||
dag = WorkflowDAG(workflow_job)
|
||||
if workflow_job.cancel_flag:
|
||||
workflow_job.status = 'canceled'
|
||||
workflow_job.save()
|
||||
dag.cancel_node_jobs()
|
||||
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
|
||||
logger.debug('Canceling spawned jobs of %s due to cancel flag.', workflow_job.log_format)
|
||||
cancel_finished = dag.cancel_node_jobs()
|
||||
if cancel_finished:
|
||||
logger.info('Marking %s as canceled, all spawned jobs have concluded.', workflow_job.log_format)
|
||||
workflow_job.status = 'canceled'
|
||||
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
||||
workflow_job.save(update_fields=['status', 'start_args'])
|
||||
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
|
||||
else:
|
||||
is_done, has_failed = dag.is_workflow_done()
|
||||
if not is_done:
|
||||
continue
|
||||
logger.info('Marking %s as %s.', workflow_job.log_format, 'failed' if has_failed else 'successful')
|
||||
result.append(workflow_job.id)
|
||||
workflow_job.status = 'failed' if has_failed else 'successful'
|
||||
workflow_job.save()
|
||||
new_status = 'failed' if has_failed else 'successful'
|
||||
logger.debug(six.text_type("Transitioning {} to {} status.").format(workflow_job.log_format, new_status))
|
||||
workflow_job.status = new_status
|
||||
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
||||
workflow_job.save(update_fields=['status', 'start_args'])
|
||||
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
|
||||
return result
|
||||
|
||||
@@ -255,9 +191,6 @@ class TaskManager():
|
||||
rampart_group.name, task.log_format))
|
||||
return
|
||||
|
||||
error_handler = handle_work_error.s(subtasks=[task_actual] + dependencies)
|
||||
success_handler = handle_work_success.s(task_actual=task_actual)
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
(start_status, opts) = task.pre_start()
|
||||
@@ -299,11 +232,23 @@ class TaskManager():
|
||||
|
||||
def post_commit():
|
||||
task.websocket_emit_status(task.status)
|
||||
if task.status != 'failed':
|
||||
task.start_celery_task(opts,
|
||||
error_callback=error_handler,
|
||||
success_callback=success_handler,
|
||||
queue=task.get_celery_queue_name())
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
opts,
|
||||
queue=task.get_queue_name(),
|
||||
uuid=task.celery_task_id,
|
||||
callbacks=[{
|
||||
'task': handle_work_success.name,
|
||||
'kwargs': {'task_actual': task_actual}
|
||||
}],
|
||||
errbacks=[{
|
||||
'task': handle_work_error.name,
|
||||
'args': [task.celery_task_id],
|
||||
'kwargs': {'subtasks': [task_actual] + dependencies}
|
||||
}],
|
||||
)
|
||||
|
||||
connection.on_commit(post_commit)
|
||||
|
||||
@@ -482,7 +427,7 @@ class TaskManager():
|
||||
logger.debug(six.text_type("Dependent {} couldn't be scheduled on graph, waiting for next cycle").format(task.log_format))
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.workflow_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
for task in pending_tasks:
|
||||
self.process_dependencies(task, self.generate_dependencies(task))
|
||||
if self.is_job_blocked(task):
|
||||
@@ -492,12 +437,12 @@ class TaskManager():
|
||||
found_acceptable_queue = False
|
||||
idle_instance_that_fits = None
|
||||
if isinstance(task, WorkflowJob):
|
||||
if task.workflow_job_template_id in running_workflow_templates:
|
||||
if task.unified_job_template_id in running_workflow_templates:
|
||||
if not task.allow_simultaneous:
|
||||
logger.debug(six.text_type("{} is blocked from running, workflow already running").format(task.log_format))
|
||||
continue
|
||||
else:
|
||||
running_workflow_templates.add(task.workflow_job_template_id)
|
||||
running_workflow_templates.add(task.unified_job_template_id)
|
||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||
continue
|
||||
for rampart_group in preferred_instance_groups:
|
||||
@@ -528,105 +473,6 @@ class TaskManager():
|
||||
if not found_acceptable_queue:
|
||||
logger.debug(six.text_type("{} couldn't be scheduled on graph, waiting for next cycle").format(task.log_format))
|
||||
|
||||
def fail_jobs_if_not_in_celery(self, node_jobs, active_tasks, celery_task_start_time,
|
||||
isolated=False):
|
||||
for task in node_jobs:
|
||||
if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')):
|
||||
if isinstance(task, WorkflowJob):
|
||||
continue
|
||||
if task.modified > celery_task_start_time:
|
||||
continue
|
||||
new_status = 'failed'
|
||||
if isolated:
|
||||
new_status = 'error'
|
||||
task.status = new_status
|
||||
task.start_args = '' # blank field to remove encrypted passwords
|
||||
if isolated:
|
||||
# TODO: cancel and reap artifacts of lost jobs from heartbeat
|
||||
task.job_explanation += ' '.join((
|
||||
'Task was marked as running in Tower but its ',
|
||||
'controller management daemon was not present in',
|
||||
'the job queue, so it has been marked as failed.',
|
||||
'Task may still be running, but contactability is unknown.'
|
||||
))
|
||||
else:
|
||||
task.job_explanation += ' '.join((
|
||||
'Task was marked as running in Tower but was not present in',
|
||||
'the job queue, so it has been marked as failed.',
|
||||
))
|
||||
try:
|
||||
task.save(update_fields=['status', 'start_args', 'job_explanation'])
|
||||
except DatabaseError:
|
||||
logger.error("Task {} DB error in marking failed. Job possibly deleted.".format(task.log_format))
|
||||
continue
|
||||
if hasattr(task, 'send_notification_templates'):
|
||||
task.send_notification_templates('failed')
|
||||
task.websocket_emit_status(new_status)
|
||||
logger.error("{}Task {} has no record in celery. Marking as failed".format(
|
||||
'Isolated ' if isolated else '', task.log_format))
|
||||
|
||||
def cleanup_inconsistent_celery_tasks(self):
|
||||
'''
|
||||
Rectify tower db <-> celery inconsistent view of jobs state
|
||||
'''
|
||||
last_cleanup = cache.get('last_celery_task_cleanup') or datetime.min.replace(tzinfo=utc)
|
||||
if (tz_now() - last_cleanup).seconds < settings.AWX_INCONSISTENT_TASK_INTERVAL:
|
||||
return
|
||||
|
||||
logger.debug("Failing inconsistent running jobs.")
|
||||
celery_task_start_time = tz_now()
|
||||
active_task_queues, active_queues = self.get_active_tasks()
|
||||
cache.set('last_celery_task_cleanup', tz_now())
|
||||
|
||||
if active_queues is None:
|
||||
logger.error('Failed to retrieve active tasks from celery')
|
||||
return None
|
||||
|
||||
'''
|
||||
Only consider failing tasks on instances for which we obtained a task
|
||||
list from celery for.
|
||||
'''
|
||||
running_tasks, waiting_tasks = self.get_running_tasks()
|
||||
all_celery_task_ids = []
|
||||
for node, node_jobs in active_queues.iteritems():
|
||||
all_celery_task_ids.extend(node_jobs)
|
||||
|
||||
self.fail_jobs_if_not_in_celery(waiting_tasks, all_celery_task_ids, celery_task_start_time)
|
||||
|
||||
for node, node_jobs in running_tasks.iteritems():
|
||||
isolated = False
|
||||
if node in active_queues:
|
||||
active_tasks = active_queues[node]
|
||||
else:
|
||||
'''
|
||||
Node task list not found in celery. We may branch into cases:
|
||||
- instance is unknown to tower, system is improperly configured
|
||||
- instance is reported as down, then fail all jobs on the node
|
||||
- instance is an isolated node, then check running tasks
|
||||
among all allowed controller nodes for management process
|
||||
- valid healthy instance not included in celery task list
|
||||
probably a netsplit case, leave it alone
|
||||
'''
|
||||
instance = Instance.objects.filter(hostname=node).first()
|
||||
|
||||
if instance is None:
|
||||
logger.error("Execution node Instance {} not found in database. "
|
||||
"The node is currently executing jobs {}".format(
|
||||
node, [j.log_format for j in node_jobs]))
|
||||
active_tasks = []
|
||||
elif instance.capacity == 0:
|
||||
active_tasks = []
|
||||
elif instance.rampart_groups.filter(controller__isnull=False).exists():
|
||||
active_tasks = all_celery_task_ids
|
||||
isolated = True
|
||||
else:
|
||||
continue
|
||||
|
||||
self.fail_jobs_if_not_in_celery(
|
||||
node_jobs, active_tasks, celery_task_start_time,
|
||||
isolated=isolated
|
||||
)
|
||||
|
||||
def calculate_capacity_consumed(self, tasks):
|
||||
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
|
||||
|
||||
@@ -672,21 +518,28 @@ class TaskManager():
|
||||
running_workflow_tasks = self.get_running_workflow_jobs()
|
||||
finished_wfjs = self.process_finished_workflow_jobs(running_workflow_tasks)
|
||||
|
||||
previously_running_workflow_tasks = running_workflow_tasks
|
||||
running_workflow_tasks = []
|
||||
for workflow_job in previously_running_workflow_tasks:
|
||||
if workflow_job.status == 'running':
|
||||
running_workflow_tasks.append(workflow_job)
|
||||
else:
|
||||
logger.debug('Removed %s from job spawning consideration.', workflow_job.log_format)
|
||||
|
||||
self.spawn_workflow_graph_jobs(running_workflow_tasks)
|
||||
|
||||
self.process_tasks(all_sorted_tasks)
|
||||
return finished_wfjs
|
||||
|
||||
def schedule(self):
|
||||
with transaction.atomic():
|
||||
# Lock
|
||||
with advisory_lock('task_manager_lock', wait=False) as acquired:
|
||||
# Lock
|
||||
with advisory_lock('task_manager_lock', wait=False) as acquired:
|
||||
with transaction.atomic():
|
||||
if acquired is False:
|
||||
logger.debug("Not running scheduler, another task holds lock")
|
||||
return
|
||||
logger.debug("Starting Scheduler")
|
||||
|
||||
self.cleanup_inconsistent_celery_tasks()
|
||||
finished_wfjs = self._schedule()
|
||||
|
||||
# Operations whose queries rely on modifications made during the atomic scheduling session
|
||||
|
||||
@@ -2,30 +2,24 @@
|
||||
# Python
|
||||
import logging
|
||||
|
||||
# Celery
|
||||
from celery import shared_task
|
||||
|
||||
# AWX
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.dispatch.publish import task
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
# TODO: move logic to UnifiedJob model and use bind=True feature of celery.
|
||||
# Would we need the request loop then? I think so. Even if we get the in-memory
|
||||
# updated model, the call to schedule() may get stale data.
|
||||
|
||||
|
||||
@shared_task()
|
||||
@task()
|
||||
def run_job_launch(job_id):
|
||||
TaskManager().schedule()
|
||||
|
||||
|
||||
@shared_task()
|
||||
@task()
|
||||
def run_job_complete(job_id):
|
||||
TaskManager().schedule()
|
||||
|
||||
|
||||
@shared_task()
|
||||
@task()
|
||||
def run_task_manager():
|
||||
logger.debug("Running Tower task manager.")
|
||||
TaskManager().schedule()
|
||||
|
||||
@@ -13,12 +13,11 @@ import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import six
|
||||
import stat
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
import six
|
||||
import urlparse
|
||||
from distutils.version import LooseVersion as Version
|
||||
import yaml
|
||||
@@ -28,12 +27,6 @@ try:
|
||||
except Exception:
|
||||
psutil = None
|
||||
|
||||
# Celery
|
||||
from kombu import Queue, Exchange
|
||||
from kombu.common import Broadcast
|
||||
from celery import Task, shared_task
|
||||
from celery.signals import celeryd_init, worker_shutdown, celeryd_after_setup
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction, DatabaseError, IntegrityError
|
||||
@@ -58,10 +51,12 @@ from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.exceptions import AwxTaskError
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
from awx.main.expect import run, isolated_manager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url,
|
||||
check_proot_installed, build_proot_temp_dir, get_licenser,
|
||||
wrap_args_with_proot, OutputEventFilter, OutputVerboseFilter, ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal, get_type_for_model, extract_ansible_vars)
|
||||
ignore_inventory_group_removal, extract_ansible_vars)
|
||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
@@ -87,27 +82,7 @@ Try upgrading OpenSSH or providing your private key in an different format. \
|
||||
logger = logging.getLogger('awx.main.tasks')
|
||||
|
||||
|
||||
def log_celery_failure(self, exc, task_id, args, kwargs, einfo):
|
||||
try:
|
||||
if getattr(exc, 'is_awx_task_error', False):
|
||||
# Error caused by user / tracked in job output
|
||||
logger.warning(six.text_type("{}").format(exc))
|
||||
elif isinstance(self, BaseTask):
|
||||
logger.exception(six.text_type(
|
||||
'{!s} {!s} execution encountered exception.')
|
||||
.format(get_type_for_model(self.model), args[0]))
|
||||
else:
|
||||
logger.exception(six.text_type('Task {} encountered exception.').format(self.name), exc_info=exc)
|
||||
except Exception:
|
||||
# It's fairly critical that this code _not_ raise exceptions on logging
|
||||
# If you configure external logging in a way that _it_ fails, there's
|
||||
# not a lot we can do here; sys.stderr.write is a final hail mary
|
||||
_, _, tb = sys.exc_info()
|
||||
traceback.print_tb(tb)
|
||||
|
||||
|
||||
@celeryd_init.connect
|
||||
def celery_startup(conf=None, **kwargs):
|
||||
def dispatch_startup():
|
||||
startup_logger = logging.getLogger('awx.main.tasks')
|
||||
startup_logger.info("Syncing Schedules")
|
||||
for sch in Schedule.objects.all():
|
||||
@@ -119,37 +94,55 @@ def celery_startup(conf=None, **kwargs):
|
||||
except Exception:
|
||||
logger.exception(six.text_type("Failed to rebuild schedule {}.").format(sch))
|
||||
|
||||
# set the queues we want to bind to dynamically at startup
|
||||
queues = []
|
||||
me = Instance.objects.me()
|
||||
for q in [me.hostname] + settings.AWX_CELERY_QUEUES_STATIC:
|
||||
q = q.encode('utf-8')
|
||||
queues.append(Queue(q, Exchange(q), routing_key=q))
|
||||
for q in settings.AWX_CELERY_BCAST_QUEUES_STATIC:
|
||||
queues.append(Broadcast(q.encode('utf-8')))
|
||||
conf.CELERY_QUEUES = list(set(queues))
|
||||
|
||||
# Expedite the first hearbeat run so a node comes online quickly.
|
||||
cluster_node_heartbeat.apply([])
|
||||
#
|
||||
# When the dispatcher starts, if the instance cannot be found in the database,
|
||||
# automatically register it. This is mostly useful for openshift-based
|
||||
# deployments where:
|
||||
#
|
||||
# 2 Instances come online
|
||||
# Instance B encounters a network blip, Instance A notices, and
|
||||
# deprovisions it
|
||||
# Instance B's connectivity is restored, the dispatcher starts, and it
|
||||
# re-registers itself
|
||||
#
|
||||
# In traditional container-less deployments, instances don't get
|
||||
# deprovisioned when they miss their heartbeat, so this code is mostly a
|
||||
# no-op.
|
||||
#
|
||||
apply_cluster_membership_policies()
|
||||
cluster_node_heartbeat()
|
||||
if Instance.objects.me().is_controller():
|
||||
awx_isolated_heartbeat()
|
||||
|
||||
|
||||
@worker_shutdown.connect
|
||||
def inform_cluster_of_shutdown(*args, **kwargs):
|
||||
def inform_cluster_of_shutdown():
|
||||
try:
|
||||
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
|
||||
this_inst.capacity = 0 # No thank you to new jobs while shut down
|
||||
this_inst.save(update_fields=['capacity', 'modified'])
|
||||
try:
|
||||
reaper.reap(this_inst)
|
||||
except Exception:
|
||||
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
|
||||
logger.warning(six.text_type('Normal shutdown signal for instance {}, '
|
||||
'removed self from capacity pool.').format(this_inst.hostname))
|
||||
except Exception:
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
|
||||
|
||||
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
def apply_cluster_membership_policies(self):
|
||||
@task()
|
||||
def apply_cluster_membership_policies():
|
||||
started_waiting = time.time()
|
||||
with advisory_lock('cluster_policy_lock', wait=True):
|
||||
lock_time = time.time() - started_waiting
|
||||
if lock_time > 1.0:
|
||||
to_log = logger.info
|
||||
else:
|
||||
to_log = logger.debug
|
||||
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
|
||||
started_compute = time.time()
|
||||
all_instances = list(Instance.objects.order_by('id'))
|
||||
all_groups = list(InstanceGroup.objects.all())
|
||||
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
|
||||
iso_hostnames = set([])
|
||||
for ig in all_groups:
|
||||
if ig.controller_id is not None:
|
||||
@@ -159,28 +152,32 @@ def apply_cluster_membership_policies(self):
|
||||
total_instances = len(considered_instances)
|
||||
actual_groups = []
|
||||
actual_instances = []
|
||||
Group = namedtuple('Group', ['obj', 'instances'])
|
||||
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
|
||||
Node = namedtuple('Instance', ['obj', 'groups'])
|
||||
|
||||
# Process policy instance list first, these will represent manually managed memberships
|
||||
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
|
||||
for ig in all_groups:
|
||||
group_actual = Group(obj=ig, instances=[])
|
||||
group_actual = Group(obj=ig, instances=[], prior_instances=[
|
||||
instance.pk for instance in ig.instances.all() # obtained in prefetch
|
||||
])
|
||||
for hostname in ig.policy_instance_list:
|
||||
if hostname not in instance_hostnames_map:
|
||||
logger.info(six.text_type("Unknown instance {} in {} policy list").format(hostname, ig.name))
|
||||
continue
|
||||
inst = instance_hostnames_map[hostname]
|
||||
logger.info(six.text_type("Policy List, adding Instance {} to Group {}").format(inst.hostname, ig.name))
|
||||
group_actual.instances.append(inst.id)
|
||||
# NOTE: arguable behavior: policy-list-group is not added to
|
||||
# instance's group count for consideration in minimum-policy rules
|
||||
if group_actual.instances:
|
||||
logger.info(six.text_type("Policy List, adding Instances {} to Group {}").format(group_actual.instances, ig.name))
|
||||
|
||||
if ig.controller_id is None:
|
||||
actual_groups.append(group_actual)
|
||||
else:
|
||||
# For isolated groups, _only_ apply the policy_instance_list
|
||||
# do not add to in-memory list, so minimum rules not applied
|
||||
logger.info('Committing instances {} to isolated group {}'.format(group_actual.instances, ig.name))
|
||||
logger.info('Committing instances to isolated group {}'.format(ig.name))
|
||||
ig.instances.set(group_actual.instances)
|
||||
|
||||
# Process Instance minimum policies next, since it represents a concrete lower bound to the
|
||||
@@ -189,6 +186,7 @@ def apply_cluster_membership_policies(self):
|
||||
logger.info("Total non-isolated instances:{} available for policy: {}".format(
|
||||
total_instances, len(actual_instances)))
|
||||
for g in sorted(actual_groups, cmp=lambda x,y: len(x.instances) - len(y.instances)):
|
||||
policy_min_added = []
|
||||
for i in sorted(actual_instances, cmp=lambda x,y: len(x.groups) - len(y.groups)):
|
||||
if len(g.instances) >= g.obj.policy_instance_minimum:
|
||||
break
|
||||
@@ -196,12 +194,15 @@ def apply_cluster_membership_policies(self):
|
||||
# If the instance is already _in_ the group, it was
|
||||
# applied earlier via the policy list
|
||||
continue
|
||||
logger.info(six.text_type("Policy minimum, adding Instance {} to Group {}").format(i.obj.hostname, g.obj.name))
|
||||
g.instances.append(i.obj.id)
|
||||
i.groups.append(g.obj.id)
|
||||
policy_min_added.append(i.obj.id)
|
||||
if policy_min_added:
|
||||
logger.info(six.text_type("Policy minimum, adding Instances {} to Group {}").format(policy_min_added, g.obj.name))
|
||||
|
||||
# Finally, process instance policy percentages
|
||||
for g in sorted(actual_groups, cmp=lambda x,y: len(x.instances) - len(y.instances)):
|
||||
policy_per_added = []
|
||||
for i in sorted(actual_instances, cmp=lambda x,y: len(x.groups) - len(y.groups)):
|
||||
if i.obj.id in g.instances:
|
||||
# If the instance is already _in_ the group, it was
|
||||
@@ -209,59 +210,66 @@ def apply_cluster_membership_policies(self):
|
||||
continue
|
||||
if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage:
|
||||
break
|
||||
logger.info(six.text_type("Policy percentage, adding Instance {} to Group {}").format(i.obj.hostname, g.obj.name))
|
||||
g.instances.append(i.obj.id)
|
||||
i.groups.append(g.obj.id)
|
||||
policy_per_added.append(i.obj.id)
|
||||
if policy_per_added:
|
||||
logger.info(six.text_type("Policy percentage, adding Instances {} to Group {}").format(policy_per_added, g.obj.name))
|
||||
|
||||
# Determine if any changes need to be made
|
||||
needs_change = False
|
||||
for g in actual_groups:
|
||||
if set(g.instances) != set(g.prior_instances):
|
||||
needs_change = True
|
||||
break
|
||||
if not needs_change:
|
||||
logger.info('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
|
||||
return
|
||||
|
||||
# On a differential basis, apply instances to non-isolated groups
|
||||
with transaction.atomic():
|
||||
for g in actual_groups:
|
||||
logger.info('Committing instances {} to group {}'.format(g.instances, g.obj.name))
|
||||
g.obj.instances.set(g.instances)
|
||||
instances_to_add = set(g.instances) - set(g.prior_instances)
|
||||
instances_to_remove = set(g.prior_instances) - set(g.instances)
|
||||
if instances_to_add:
|
||||
logger.info('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
|
||||
g.obj.instances.add(*instances_to_add)
|
||||
if instances_to_remove:
|
||||
logger.info('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
|
||||
g.obj.instances.remove(*instances_to_remove)
|
||||
logger.info('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@shared_task(exchange='tower_broadcast_all', bind=True)
|
||||
def handle_setting_changes(self, setting_keys):
|
||||
@task(queue='tower_broadcast_all', exchange_type='fanout')
|
||||
def handle_setting_changes(setting_keys):
|
||||
orig_len = len(setting_keys)
|
||||
for i in range(orig_len):
|
||||
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
|
||||
setting_keys.append(dependent_key)
|
||||
logger.warn('Processing cache changes, task args: {0.args!r} kwargs: {0.kwargs!r}'.format(
|
||||
self.request))
|
||||
cache_keys = set(setting_keys)
|
||||
logger.debug('cache delete_many(%r)', cache_keys)
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
|
||||
@celeryd_after_setup.connect
|
||||
def auto_register_ha_instance(sender, instance, **kwargs):
|
||||
#
|
||||
# When celeryd starts, if the instance cannot be found in the database,
|
||||
# automatically register it. This is mostly useful for openshift-based
|
||||
# deployments where:
|
||||
#
|
||||
# 2 Instances come online
|
||||
# Instance B encounters a network blip, Instance A notices, and
|
||||
# deprovisions it
|
||||
# Instance B's connectivity is restored, celeryd starts, and it
|
||||
# re-registers itself
|
||||
#
|
||||
# In traditional container-less deployments, instances don't get
|
||||
# deprovisioned when they miss their heartbeat, so this code is mostly a
|
||||
# no-op.
|
||||
#
|
||||
if instance.hostname != 'celery@{}'.format(settings.CLUSTER_HOST_ID):
|
||||
error = six.text_type('celery -n {} does not match settings.CLUSTER_HOST_ID={}').format(
|
||||
instance.hostname, settings.CLUSTER_HOST_ID
|
||||
)
|
||||
logger.error(error)
|
||||
raise RuntimeError(error)
|
||||
(changed, tower_instance) = Instance.objects.get_or_register()
|
||||
if changed:
|
||||
logger.info(six.text_type("Registered tower node '{}'").format(tower_instance.hostname))
|
||||
@task(queue='tower_broadcast_all', exchange_type='fanout')
|
||||
def delete_project_files(project_path):
|
||||
# TODO: possibly implement some retry logic
|
||||
lock_file = project_path + '.lock'
|
||||
if os.path.exists(project_path):
|
||||
try:
|
||||
shutil.rmtree(project_path)
|
||||
logger.info(six.text_type('Success removing project files {}').format(project_path))
|
||||
except Exception:
|
||||
logger.exception(six.text_type('Could not remove project directory {}').format(project_path))
|
||||
if os.path.exists(lock_file):
|
||||
try:
|
||||
os.remove(lock_file)
|
||||
logger.debug(six.text_type('Success removing {}').format(lock_file))
|
||||
except Exception:
|
||||
logger.exception(six.text_type('Could not remove lock file {}').format(lock_file))
|
||||
|
||||
|
||||
@shared_task(queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
@task()
|
||||
def send_notifications(notification_list, job_id=None):
|
||||
if not isinstance(notification_list, list):
|
||||
raise TypeError("notification_list should be of type list")
|
||||
@@ -290,8 +298,8 @@ def send_notifications(notification_list, job_id=None):
|
||||
logger.exception(six.text_type('Error saving notification {} result.').format(notification.id))
|
||||
|
||||
|
||||
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
def run_administrative_checks(self):
|
||||
@task()
|
||||
def run_administrative_checks():
|
||||
logger.warn("Running administrative checks.")
|
||||
if not settings.TOWER_ADMIN_ALERTS:
|
||||
return
|
||||
@@ -312,8 +320,8 @@ def run_administrative_checks(self):
|
||||
fail_silently=True)
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def purge_old_stdout_files(self):
|
||||
@task(queue=get_local_queuename)
|
||||
def purge_old_stdout_files():
|
||||
nowtime = time.time()
|
||||
for f in os.listdir(settings.JOBOUTPUT_ROOT):
|
||||
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT,f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
|
||||
@@ -321,8 +329,8 @@ def purge_old_stdout_files(self):
|
||||
logger.info(six.text_type("Removing {}").format(os.path.join(settings.JOBOUTPUT_ROOT,f)))
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def cluster_node_heartbeat(self):
|
||||
@task(queue=get_local_queuename)
|
||||
def cluster_node_heartbeat():
|
||||
logger.debug("Cluster node heartbeat task.")
|
||||
nowtime = now()
|
||||
instance_list = list(Instance.objects.all_non_isolated())
|
||||
@@ -365,9 +373,13 @@ def cluster_node_heartbeat(self):
|
||||
this_inst.version))
|
||||
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
|
||||
# The heartbeat task will reset the capacity to the system capacity after upgrade.
|
||||
stop_local_services(['uwsgi', 'celery', 'beat', 'callback'], communicate=False)
|
||||
stop_local_services(communicate=False)
|
||||
raise RuntimeError("Shutting down.")
|
||||
for other_inst in lost_instances:
|
||||
try:
|
||||
reaper.reap(other_inst)
|
||||
except Exception:
|
||||
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
|
||||
try:
|
||||
# Capacity could already be 0 because:
|
||||
# * It's a new node and it never had a heartbeat
|
||||
@@ -392,8 +404,8 @@ def cluster_node_heartbeat(self):
|
||||
logger.exception(six.text_type('Error marking {} as lost').format(other_inst.hostname))
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def awx_isolated_heartbeat(self):
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_isolated_heartbeat():
|
||||
local_hostname = settings.CLUSTER_HOST_ID
|
||||
logger.debug("Controlling node checking for any isolated management tasks.")
|
||||
poll_interval = settings.AWX_ISOLATED_PERIODIC_CHECK
|
||||
@@ -420,8 +432,8 @@ def awx_isolated_heartbeat(self):
|
||||
isolated_manager.IsolatedManager.health_check(isolated_instance_qs, awx_application_version)
|
||||
|
||||
|
||||
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
def awx_periodic_scheduler(self):
|
||||
@task()
|
||||
def awx_periodic_scheduler():
|
||||
run_now = now()
|
||||
state = TowerScheduleState.get_solo()
|
||||
last_run = state.schedule_last_run
|
||||
@@ -471,8 +483,8 @@ def awx_periodic_scheduler(self):
|
||||
state.save()
|
||||
|
||||
|
||||
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
def handle_work_success(self, result, task_actual):
|
||||
@task()
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
except ObjectDoesNotExist:
|
||||
@@ -485,7 +497,7 @@ def handle_work_success(self, result, task_actual):
|
||||
run_job_complete.delay(instance.id)
|
||||
|
||||
|
||||
@shared_task(queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
@task()
|
||||
def handle_work_error(task_id, *args, **kwargs):
|
||||
subtasks = kwargs.get('subtasks', None)
|
||||
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
|
||||
@@ -526,7 +538,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
@shared_task(queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
@task()
|
||||
def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
'''
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
@@ -546,7 +558,7 @@ def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
raise
|
||||
|
||||
|
||||
@shared_task(queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
@task()
|
||||
def update_host_smart_inventory_memberships():
|
||||
try:
|
||||
with transaction.atomic():
|
||||
@@ -571,8 +583,8 @@ def update_host_smart_inventory_memberships():
|
||||
smart_inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
|
||||
|
||||
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE, max_retries=5)
|
||||
def delete_inventory(self, inventory_id, user_id):
|
||||
@task()
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
if user_id is None:
|
||||
user = None
|
||||
@@ -597,7 +609,9 @@ def delete_inventory(self, inventory_id, user_id):
|
||||
return
|
||||
except DatabaseError:
|
||||
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
|
||||
self.retry(countdown=10)
|
||||
if retries > 0:
|
||||
time.sleep(10)
|
||||
delete_inventory(inventory_id, user_id, retries=retries - 1)
|
||||
|
||||
|
||||
def with_path_cleanup(f):
|
||||
@@ -618,8 +632,7 @@ def with_path_cleanup(f):
|
||||
return _wrapped
|
||||
|
||||
|
||||
class BaseTask(Task):
|
||||
name = None
|
||||
class BaseTask(object):
|
||||
model = None
|
||||
event_model = None
|
||||
abstract = True
|
||||
@@ -761,12 +774,12 @@ class BaseTask(Task):
|
||||
os.chmod(path, stat.S_IRUSR)
|
||||
return path
|
||||
|
||||
def add_ansible_venv(self, venv_path, env, add_awx_lib=True):
|
||||
def add_ansible_venv(self, venv_path, env, add_awx_lib=True, **kwargs):
|
||||
env['VIRTUAL_ENV'] = venv_path
|
||||
env['PATH'] = os.path.join(venv_path, "bin") + ":" + env['PATH']
|
||||
venv_libdir = os.path.join(venv_path, "lib")
|
||||
|
||||
if not os.path.exists(venv_libdir):
|
||||
if not kwargs.get('isolated', False) and not os.path.exists(venv_libdir):
|
||||
raise RuntimeError(
|
||||
'a valid Python virtualenv does not exist at {}'.format(venv_path)
|
||||
)
|
||||
@@ -812,7 +825,12 @@ class BaseTask(Task):
|
||||
return False
|
||||
|
||||
def build_inventory(self, instance, **kwargs):
|
||||
json_data = json.dumps(instance.inventory.get_script_data(hostvars=True))
|
||||
script_params = dict(hostvars=True)
|
||||
if hasattr(instance, 'job_slice_number'):
|
||||
script_params['slice_number'] = instance.job_slice_number
|
||||
script_params['slice_count'] = instance.job_slice_count
|
||||
script_data = instance.inventory.get_script_data(**script_params)
|
||||
json_data = json.dumps(script_data)
|
||||
handle, path = tempfile.mkstemp(dir=kwargs.get('private_data_dir', None))
|
||||
f = os.fdopen(handle, 'w')
|
||||
f.write('#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nprint %r\n' % json_data)
|
||||
@@ -913,14 +931,11 @@ class BaseTask(Task):
|
||||
if instance.cancel_flag:
|
||||
instance = self.update_model(instance.pk, status='canceled')
|
||||
if instance.status != 'running':
|
||||
if hasattr(settings, 'CELERY_UNIT_TEST'):
|
||||
return
|
||||
else:
|
||||
# Stop the task chain and prevent starting the job if it has
|
||||
# already been canceled.
|
||||
instance = self.update_model(pk)
|
||||
status = instance.status
|
||||
raise RuntimeError('not starting %s task' % instance.status)
|
||||
# Stop the task chain and prevent starting the job if it has
|
||||
# already been canceled.
|
||||
instance = self.update_model(pk)
|
||||
status = instance.status
|
||||
raise RuntimeError('not starting %s task' % instance.status)
|
||||
|
||||
if not os.path.exists(settings.AWX_PROOT_BASE_PATH):
|
||||
raise RuntimeError('AWX_PROOT_BASE_PATH=%s does not exist' % settings.AWX_PROOT_BASE_PATH)
|
||||
@@ -1053,8 +1068,6 @@ class BaseTask(Task):
|
||||
logger.exception(six.text_type('{} Final run hook errored.').format(instance.log_format))
|
||||
instance.websocket_emit_status(status)
|
||||
if status != 'successful':
|
||||
# Raising an exception will mark the job as 'failed' in celery
|
||||
# and will stop a task chain from continuing to execute
|
||||
if status == 'canceled':
|
||||
raise AwxTaskError.TaskCancel(instance, rc)
|
||||
else:
|
||||
@@ -1077,12 +1090,12 @@ class BaseTask(Task):
|
||||
return ''
|
||||
|
||||
|
||||
@task()
|
||||
class RunJob(BaseTask):
|
||||
'''
|
||||
Celery task to run a job using ansible-playbook.
|
||||
Run a job using ansible-playbook.
|
||||
'''
|
||||
|
||||
name = 'awx.main.tasks.run_job'
|
||||
model = Job
|
||||
event_model = JobEvent
|
||||
event_data_key = 'job_id'
|
||||
@@ -1179,7 +1192,7 @@ class RunJob(BaseTask):
|
||||
plugin_dirs.extend(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
|
||||
plugin_path = ':'.join(plugin_dirs)
|
||||
env = super(RunJob, self).build_env(job, **kwargs)
|
||||
env = self.add_ansible_venv(job.ansible_virtualenv_path, env, add_awx_lib=kwargs.get('isolated', False))
|
||||
env = self.add_ansible_venv(job.ansible_virtualenv_path, env, add_awx_lib=kwargs.get('isolated', False), **kwargs)
|
||||
# Set environment variables needed for inventory and job event
|
||||
# callbacks to work.
|
||||
env['JOB_ID'] = str(job.pk)
|
||||
@@ -1372,7 +1385,6 @@ class RunJob(BaseTask):
|
||||
self.update_model(job.pk, status='failed', job_explanation=error)
|
||||
raise RuntimeError(error)
|
||||
if job.project and job.project.scm_type:
|
||||
job_request_id = '' if self.request.id is None else self.request.id
|
||||
pu_ig = job.instance_group
|
||||
pu_en = job.execution_node
|
||||
if job.is_isolated() is True:
|
||||
@@ -1385,16 +1397,14 @@ class RunJob(BaseTask):
|
||||
status='running',
|
||||
instance_group = pu_ig,
|
||||
execution_node=pu_en,
|
||||
celery_task_id=job_request_id))
|
||||
celery_task_id=job.celery_task_id))
|
||||
# save the associated job before calling run() so that a
|
||||
# cancel() call on the job can cancel the project update
|
||||
job = self.update_model(job.pk, project_update=local_project_sync)
|
||||
|
||||
project_update_task = local_project_sync._get_task_class()
|
||||
try:
|
||||
task_instance = project_update_task()
|
||||
task_instance.request.id = job_request_id
|
||||
task_instance.run(local_project_sync.id)
|
||||
project_update_task().run(local_project_sync.id)
|
||||
job = self.update_model(job.pk, scm_revision=job.project.scm_revision)
|
||||
except Exception:
|
||||
local_project_sync.refresh_from_db()
|
||||
@@ -1404,7 +1414,6 @@ class RunJob(BaseTask):
|
||||
('project_update', local_project_sync.name, local_project_sync.id)))
|
||||
raise
|
||||
|
||||
|
||||
def final_run_hook(self, job, status, **kwargs):
|
||||
super(RunJob, self).final_run_hook(job, status, **kwargs)
|
||||
if job.use_fact_cache:
|
||||
@@ -1435,9 +1444,9 @@ class RunJob(BaseTask):
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
|
||||
|
||||
@task()
|
||||
class RunProjectUpdate(BaseTask):
|
||||
|
||||
name = 'awx.main.tasks.run_project_update'
|
||||
model = ProjectUpdate
|
||||
event_model = ProjectUpdateEvent
|
||||
event_data_key = 'project_update_id'
|
||||
@@ -1638,7 +1647,6 @@ class RunProjectUpdate(BaseTask):
|
||||
return getattr(settings, 'PROJECT_UPDATE_IDLE_TIMEOUT', None)
|
||||
|
||||
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
|
||||
project_request_id = '' if self.request.id is None else self.request.id
|
||||
scm_revision = project_update.project.scm_revision
|
||||
inv_update_class = InventoryUpdate._get_task_class()
|
||||
for inv_src in dependent_inventory_sources:
|
||||
@@ -1661,13 +1669,10 @@ class RunProjectUpdate(BaseTask):
|
||||
status='running',
|
||||
instance_group=project_update.instance_group,
|
||||
execution_node=project_update.execution_node,
|
||||
celery_task_id=str(project_request_id),
|
||||
source_project_update=project_update))
|
||||
source_project_update=project_update,
|
||||
celery_task_id=project_update.celery_task_id))
|
||||
try:
|
||||
task_instance = inv_update_class()
|
||||
# Runs in the same Celery task as project update
|
||||
task_instance.request.id = project_request_id
|
||||
task_instance.run(local_inv_update.id)
|
||||
inv_update_class().run(local_inv_update.id)
|
||||
except Exception:
|
||||
logger.exception(six.text_type('{} Unhandled exception updating dependent SCM inventory sources.')
|
||||
.format(project_update.log_format))
|
||||
@@ -1772,9 +1777,9 @@ class RunProjectUpdate(BaseTask):
|
||||
return getattr(settings, 'AWX_PROOT_ENABLED', False)
|
||||
|
||||
|
||||
@task()
|
||||
class RunInventoryUpdate(BaseTask):
|
||||
|
||||
name = 'awx.main.tasks.run_inventory_update'
|
||||
model = InventoryUpdate
|
||||
event_model = InventoryUpdateEvent
|
||||
event_data_key = 'inventory_update_id'
|
||||
@@ -1992,8 +1997,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
This dictionary is used by `build_env`, below.
|
||||
"""
|
||||
# Run the superclass implementation.
|
||||
super_ = super(RunInventoryUpdate, self).build_passwords
|
||||
passwords = super_(inventory_update, **kwargs)
|
||||
passwords = super(RunInventoryUpdate, self).build_passwords(inventory_update, **kwargs)
|
||||
|
||||
# Take key fields from the credential in use and add them to the
|
||||
# passwords dictionary.
|
||||
@@ -2129,8 +2133,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
elif src == 'scm':
|
||||
args.append(inventory_update.get_actual_source_path())
|
||||
elif src == 'custom':
|
||||
runpath = tempfile.mkdtemp(prefix='awx_inventory_', dir=settings.AWX_PROOT_BASE_PATH)
|
||||
handle, path = tempfile.mkstemp(dir=runpath)
|
||||
handle, path = tempfile.mkstemp(dir=kwargs['private_data_dir'])
|
||||
f = os.fdopen(handle, 'w')
|
||||
if inventory_update.source_script is None:
|
||||
raise RuntimeError('Inventory Script does not exist')
|
||||
@@ -2139,7 +2142,6 @@ class RunInventoryUpdate(BaseTask):
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
args.append(path)
|
||||
args.append("--custom")
|
||||
self.cleanup_paths.append(runpath)
|
||||
args.append('-v%d' % inventory_update.verbosity)
|
||||
if settings.DEBUG:
|
||||
args.append('--traceback')
|
||||
@@ -2158,7 +2160,6 @@ class RunInventoryUpdate(BaseTask):
|
||||
if inventory_update.inventory_source:
|
||||
source_project = inventory_update.inventory_source.source_project
|
||||
if (inventory_update.source=='scm' and inventory_update.launch_type!='scm' and source_project):
|
||||
request_id = '' if self.request.id is None else self.request.id
|
||||
local_project_sync = source_project.create_project_update(
|
||||
_eager_fields=dict(
|
||||
launch_type="sync",
|
||||
@@ -2166,16 +2167,14 @@ class RunInventoryUpdate(BaseTask):
|
||||
status='running',
|
||||
execution_node=inventory_update.execution_node,
|
||||
instance_group = inventory_update.instance_group,
|
||||
celery_task_id=request_id))
|
||||
celery_task_id=inventory_update.celery_task_id))
|
||||
# associate the inventory update before calling run() so that a
|
||||
# cancel() call on the inventory update can cancel the project update
|
||||
local_project_sync.scm_inventory_updates.add(inventory_update)
|
||||
|
||||
project_update_task = local_project_sync._get_task_class()
|
||||
try:
|
||||
task_instance = project_update_task()
|
||||
task_instance.request.id = request_id
|
||||
task_instance.run(local_project_sync.id)
|
||||
project_update_task().run(local_project_sync.id)
|
||||
inventory_update.inventory_source.scm_last_revision = local_project_sync.project.scm_revision
|
||||
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
|
||||
except Exception:
|
||||
@@ -2186,12 +2185,12 @@ class RunInventoryUpdate(BaseTask):
|
||||
raise
|
||||
|
||||
|
||||
@task()
|
||||
class RunAdHocCommand(BaseTask):
|
||||
'''
|
||||
Celery task to run an ad hoc command using ansible.
|
||||
Run an ad hoc command using ansible.
|
||||
'''
|
||||
|
||||
name = 'awx.main.tasks.run_ad_hoc_command'
|
||||
model = AdHocCommand
|
||||
event_model = AdHocCommandEvent
|
||||
event_data_key = 'ad_hoc_command_id'
|
||||
@@ -2352,9 +2351,9 @@ class RunAdHocCommand(BaseTask):
|
||||
return getattr(settings, 'AWX_PROOT_ENABLED', False)
|
||||
|
||||
|
||||
@task()
|
||||
class RunSystemJob(BaseTask):
|
||||
|
||||
name = 'awx.main.tasks.run_system_job'
|
||||
model = SystemJob
|
||||
event_model = SystemJobEvent
|
||||
event_data_key = 'system_job_id'
|
||||
@@ -2409,9 +2408,9 @@ def _reconstruct_relationships(copy_mapping):
|
||||
new_obj.save()
|
||||
|
||||
|
||||
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
@task()
|
||||
def deep_copy_model_obj(
|
||||
self, model_module, model_name, obj_pk, new_obj_pk,
|
||||
model_module, model_name, obj_pk, new_obj_pk,
|
||||
user_pk, sub_obj_list, permission_check_func=None
|
||||
):
|
||||
logger.info(six.text_type('Deep copy {} from {} to {}.').format(model_name, obj_pk, new_obj_pk))
|
||||
|
||||
@@ -119,7 +119,7 @@ class TestSwaggerGeneration():
|
||||
def test_autogen_response_examples(self, swagger_autogen):
|
||||
for pattern, node in TestSwaggerGeneration.JSON['paths'].items():
|
||||
pattern = pattern.replace('{id}', '[0-9]+')
|
||||
pattern = pattern.replace('{category_slug}', '[a-zA-Z0-9\-]+')
|
||||
pattern = pattern.replace(r'{category_slug}', r'[a-zA-Z0-9\-]+')
|
||||
for path, result in swagger_autogen.items():
|
||||
if re.match('^{}$'.format(pattern), path):
|
||||
for key, value in result.items():
|
||||
|
||||
@@ -359,18 +359,17 @@ def test_create_with_valid_injectors(get, post, admin):
|
||||
},
|
||||
'injectors': {
|
||||
'env': {
|
||||
'ANSIBLE_MY_CLOUD_TOKEN': '{{api_token}}'
|
||||
'AWX_MY_CLOUD_TOKEN': '{{api_token}}'
|
||||
}
|
||||
}
|
||||
}, admin)
|
||||
assert response.status_code == 201
|
||||
}, admin, expect=201)
|
||||
|
||||
response = get(reverse('api:credential_type_list'), admin)
|
||||
assert response.data['count'] == 1
|
||||
injectors = response.data['results'][0]['injectors']
|
||||
assert len(injectors) == 1
|
||||
assert injectors['env'] == {
|
||||
'ANSIBLE_MY_CLOUD_TOKEN': '{{api_token}}'
|
||||
'AWX_MY_CLOUD_TOKEN': '{{api_token}}'
|
||||
}
|
||||
|
||||
|
||||
@@ -388,7 +387,7 @@ def test_create_with_undefined_template_variable_xfail(post, admin):
|
||||
}]
|
||||
},
|
||||
'injectors': {
|
||||
'env': {'ANSIBLE_MY_CLOUD_TOKEN': '{{api_tolkien}}'}
|
||||
'env': {'AWX_MY_CLOUD_TOKEN': '{{api_tolkien}}'}
|
||||
}
|
||||
}, admin)
|
||||
assert response.status_code == 400
|
||||
|
||||
@@ -59,7 +59,7 @@ def check_system_tracking_feature_forbidden(response):
|
||||
assert 'Your license does not permit use of system tracking.' == response.data['detail']
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled)
|
||||
@mock.patch('awx.api.views.mixin.feature_enabled', new=mock_feature_disabled)
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.license_feature
|
||||
def test_system_tracking_license_get(hosts, get, user):
|
||||
@@ -70,7 +70,7 @@ def test_system_tracking_license_get(hosts, get, user):
|
||||
check_system_tracking_feature_forbidden(response)
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled)
|
||||
@mock.patch('awx.api.views.mixin.feature_enabled', new=mock_feature_disabled)
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.license_feature
|
||||
def test_system_tracking_license_options(hosts, options, user):
|
||||
|
||||
@@ -41,7 +41,7 @@ def check_system_tracking_feature_forbidden(response):
|
||||
assert 'Your license does not permit use of system tracking.' == response.data['detail']
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled)
|
||||
@mock.patch('awx.api.views.mixin.feature_enabled', new=mock_feature_disabled)
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.license_feature
|
||||
def test_system_tracking_license_get(hosts, get, user):
|
||||
@@ -52,7 +52,7 @@ def test_system_tracking_license_get(hosts, get, user):
|
||||
check_system_tracking_feature_forbidden(response)
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled)
|
||||
@mock.patch('awx.api.views.mixin.feature_enabled', new=mock_feature_disabled)
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.license_feature
|
||||
def test_system_tracking_license_options(hosts, options, user):
|
||||
|
||||
@@ -365,6 +365,116 @@ def test_inventory_source_vars_prohibition(post, inventory, admin_user):
|
||||
assert 'FOOBAR' in r.data['source_vars'][0]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestInventorySourceCredential:
|
||||
def test_need_cloud_credential(self, inventory, admin_user, post):
|
||||
"""Test that a cloud-based source requires credential"""
|
||||
r = post(
|
||||
url=reverse('api:inventory_source_list'),
|
||||
data={'inventory': inventory.pk, 'name': 'foo', 'source': 'openstack'},
|
||||
expect=400,
|
||||
user=admin_user
|
||||
)
|
||||
assert 'Credential is required for a cloud source' in r.data['credential'][0]
|
||||
|
||||
def test_ec2_no_credential(self, inventory, admin_user, post):
|
||||
"""Test that an ec2 inventory source can be added with no credential"""
|
||||
post(
|
||||
url=reverse('api:inventory_source_list'),
|
||||
data={'inventory': inventory.pk, 'name': 'fobar', 'source': 'ec2'},
|
||||
expect=201,
|
||||
user=admin_user
|
||||
)
|
||||
|
||||
def test_validating_credential_type(self, organization, inventory, admin_user, post):
|
||||
"""Test that cloud sources must use their respective credential type"""
|
||||
from awx.main.models.credential import Credential, CredentialType
|
||||
openstack = CredentialType.defaults['openstack']()
|
||||
openstack.save()
|
||||
os_cred = Credential.objects.create(
|
||||
credential_type=openstack, name='bar', organization=organization)
|
||||
r = post(
|
||||
url=reverse('api:inventory_source_list'),
|
||||
data={
|
||||
'inventory': inventory.pk, 'name': 'fobar', 'source': 'ec2',
|
||||
'credential': os_cred.pk
|
||||
},
|
||||
expect=400,
|
||||
user=admin_user
|
||||
)
|
||||
assert 'Cloud-based inventory sources (such as ec2)' in r.data['credential'][0]
|
||||
assert 'require credentials for the matching cloud service' in r.data['credential'][0]
|
||||
|
||||
def test_vault_credential_not_allowed(self, project, inventory, vault_credential, admin_user, post):
|
||||
"""Vault credentials cannot be associated via the deprecated field"""
|
||||
# TODO: when feature is added, add tests to use the related credentials
|
||||
# endpoint for multi-vault attachment
|
||||
r = post(
|
||||
url=reverse('api:inventory_source_list'),
|
||||
data={
|
||||
'inventory': inventory.pk, 'name': 'fobar', 'source': 'scm',
|
||||
'source_project': project.pk, 'source_path': '',
|
||||
'credential': vault_credential.pk
|
||||
},
|
||||
expect=400,
|
||||
user=admin_user
|
||||
)
|
||||
assert 'Credentials of type insights and vault' in r.data['credential'][0]
|
||||
assert 'disallowed for scm inventory sources' in r.data['credential'][0]
|
||||
|
||||
def test_vault_credential_not_allowed_via_related(
|
||||
self, project, inventory, vault_credential, admin_user, post):
|
||||
"""Vault credentials cannot be associated via related endpoint"""
|
||||
inv_src = InventorySource.objects.create(
|
||||
inventory=inventory, name='foobar', source='scm',
|
||||
source_project=project, source_path=''
|
||||
)
|
||||
r = post(
|
||||
url=reverse('api:inventory_source_credentials_list', kwargs={'pk': inv_src.pk}),
|
||||
data={
|
||||
'id': vault_credential.pk
|
||||
},
|
||||
expect=400,
|
||||
user=admin_user
|
||||
)
|
||||
assert 'Credentials of type insights and vault' in r.data['msg']
|
||||
assert 'disallowed for scm inventory sources' in r.data['msg']
|
||||
|
||||
def test_credentials_relationship_mapping(self, project, inventory, organization, admin_user, post, patch):
|
||||
"""The credentials relationship is used to manage the cloud credential
|
||||
this test checks that replacement works"""
|
||||
from awx.main.models.credential import Credential, CredentialType
|
||||
openstack = CredentialType.defaults['openstack']()
|
||||
openstack.save()
|
||||
os_cred = Credential.objects.create(
|
||||
credential_type=openstack, name='bar', organization=organization)
|
||||
r = post(
|
||||
url=reverse('api:inventory_source_list'),
|
||||
data={
|
||||
'inventory': inventory.pk, 'name': 'fobar', 'source': 'scm',
|
||||
'source_project': project.pk, 'source_path': '',
|
||||
'credential': os_cred.pk
|
||||
},
|
||||
expect=201,
|
||||
user=admin_user
|
||||
)
|
||||
aws = CredentialType.defaults['aws']()
|
||||
aws.save()
|
||||
aws_cred = Credential.objects.create(
|
||||
credential_type=aws, name='bar2', organization=organization)
|
||||
inv_src = InventorySource.objects.get(pk=r.data['id'])
|
||||
assert list(inv_src.credentials.values_list('id', flat=True)) == [os_cred.pk]
|
||||
patch(
|
||||
url=inv_src.get_absolute_url(),
|
||||
data={
|
||||
'credential': aws_cred.pk
|
||||
},
|
||||
expect=200,
|
||||
user=admin_user
|
||||
)
|
||||
assert list(inv_src.credentials.values_list('id', flat=True)) == [aws_cred.pk]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestControlledBySCM:
|
||||
'''
|
||||
|
||||
@@ -122,6 +122,22 @@ def test_job_relaunch_on_failed_hosts(post, inventory, project, machine_credenti
|
||||
assert r.data.get('limit') == hosts
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_slice_jt_recent_jobs(slice_job_factory, admin_user, get):
|
||||
workflow_job = slice_job_factory(3, spawn=True)
|
||||
slice_jt = workflow_job.job_template
|
||||
r = get(
|
||||
url=slice_jt.get_absolute_url(),
|
||||
user=admin_user,
|
||||
expect=200
|
||||
)
|
||||
job_ids = [entry['id'] for entry in r.data['summary_fields']['recent_jobs']]
|
||||
assert workflow_job.pk not in job_ids
|
||||
for node in workflow_job.workflow_nodes.all():
|
||||
job = node.job
|
||||
assert job.pk in job_ids
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_block_unprocessed_events(delete, admin_user, mocker):
|
||||
time_of_finish = parse("Thu Feb 28 09:10:20 2013 -0500")
|
||||
@@ -141,7 +157,7 @@ def test_block_unprocessed_events(delete, admin_user, mocker):
|
||||
view = MockView()
|
||||
|
||||
time_of_request = time_of_finish + relativedelta(seconds=2)
|
||||
with mock.patch('awx.api.views.now', lambda: time_of_request):
|
||||
with mock.patch('awx.api.views.mixin.now', lambda: time_of_request):
|
||||
r = view.destroy(request)
|
||||
assert r.status_code == 400
|
||||
|
||||
@@ -162,7 +178,7 @@ def test_block_related_unprocessed_events(mocker, organization, project, delete,
|
||||
)
|
||||
view = RelatedJobsPreventDeleteMixin()
|
||||
time_of_request = time_of_finish + relativedelta(seconds=2)
|
||||
with mock.patch('awx.api.views.now', lambda: time_of_request):
|
||||
with mock.patch('awx.api.views.mixin.now', lambda: time_of_request):
|
||||
with pytest.raises(PermissionDenied):
|
||||
view.perform_destroy(organization)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import json
|
||||
from awx.api.serializers import JobLaunchSerializer
|
||||
from awx.main.models.credential import Credential
|
||||
from awx.main.models.inventory import Inventory, Host
|
||||
from awx.main.models.jobs import Job, JobTemplate
|
||||
from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
@@ -553,15 +553,15 @@ def test_callback_accept_prompted_extra_var(mocker, survey_spec_factory, job_tem
|
||||
|
||||
with mocker.patch('awx.main.access.BaseAccess.check_license'):
|
||||
mock_job = mocker.MagicMock(spec=Job, id=968, extra_vars={"job_launch_var": 3, "survey_var": 4})
|
||||
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
|
||||
with mocker.patch.object(UnifiedJobTemplate, 'create_unified_job', return_value=mock_job):
|
||||
with mocker.patch('awx.api.serializers.JobSerializer.to_representation', return_value={}):
|
||||
with mocker.patch('awx.api.views.JobTemplateCallback.find_matching_hosts', return_value=[host]):
|
||||
post(
|
||||
reverse('api:job_template_callback', kwargs={'pk': job_template.pk}),
|
||||
dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"),
|
||||
admin_user, expect=201, format='json')
|
||||
assert JobTemplate.create_unified_job.called
|
||||
assert JobTemplate.create_unified_job.call_args == ({
|
||||
assert UnifiedJobTemplate.create_unified_job.called
|
||||
assert UnifiedJobTemplate.create_unified_job.call_args == ({
|
||||
'extra_vars': {'survey_var': 4, 'job_launch_var': 3},
|
||||
'_eager_fields': {'launch_type': 'callback'},
|
||||
'limit': 'single-host'},
|
||||
@@ -579,15 +579,15 @@ def test_callback_ignore_unprompted_extra_var(mocker, survey_spec_factory, job_t
|
||||
|
||||
with mocker.patch('awx.main.access.BaseAccess.check_license'):
|
||||
mock_job = mocker.MagicMock(spec=Job, id=968, extra_vars={"job_launch_var": 3, "survey_var": 4})
|
||||
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
|
||||
with mocker.patch.object(UnifiedJobTemplate, 'create_unified_job', return_value=mock_job):
|
||||
with mocker.patch('awx.api.serializers.JobSerializer.to_representation', return_value={}):
|
||||
with mocker.patch('awx.api.views.JobTemplateCallback.find_matching_hosts', return_value=[host]):
|
||||
post(
|
||||
reverse('api:job_template_callback', kwargs={'pk':job_template.pk}),
|
||||
dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"),
|
||||
admin_user, expect=201, format='json')
|
||||
assert JobTemplate.create_unified_job.called
|
||||
assert JobTemplate.create_unified_job.call_args == ({
|
||||
assert UnifiedJobTemplate.create_unified_job.called
|
||||
assert UnifiedJobTemplate.create_unified_job.call_args == ({
|
||||
'_eager_fields': {'launch_type': 'callback'},
|
||||
'limit': 'single-host'},
|
||||
)
|
||||
|
||||
@@ -260,36 +260,6 @@ def test_oauth_list_user_tokens(oauth_application, post, get, admin, alice):
|
||||
post(url, {'scope': 'read'}, user, expect=201)
|
||||
response = get(url, admin, expect=200)
|
||||
assert response.data['count'] == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_refresh_accesstoken(oauth_application, post, get, delete, admin):
|
||||
response = post(
|
||||
reverse('api:o_auth2_application_token_list', kwargs={'pk': oauth_application.pk}),
|
||||
{'scope': 'read'}, admin, expect=201
|
||||
)
|
||||
token = AccessToken.objects.get(token=response.data['token'])
|
||||
refresh_token = RefreshToken.objects.get(token=response.data['refresh_token'])
|
||||
assert AccessToken.objects.count() == 1
|
||||
assert RefreshToken.objects.count() == 1
|
||||
|
||||
refresh_url = drf_reverse('api:oauth_authorization_root_view') + 'token/'
|
||||
response = post(
|
||||
refresh_url,
|
||||
data='grant_type=refresh_token&refresh_token=' + refresh_token.token,
|
||||
content_type='application/x-www-form-urlencoded',
|
||||
HTTP_AUTHORIZATION='Basic ' + base64.b64encode(':'.join([
|
||||
oauth_application.client_id, oauth_application.client_secret
|
||||
]))
|
||||
)
|
||||
|
||||
new_token = json.loads(response._container[0])['access_token']
|
||||
new_refresh_token = json.loads(response._container[0])['refresh_token']
|
||||
assert token not in AccessToken.objects.all()
|
||||
assert AccessToken.objects.get(token=new_token) != 0
|
||||
assert RefreshToken.objects.get(token=new_refresh_token) != 0
|
||||
refresh_token = RefreshToken.objects.get(token=refresh_token)
|
||||
assert refresh_token.revoked
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -314,3 +284,77 @@ def test_implicit_authorization(oauth_application, admin):
|
||||
assert 'http://test.com' in response.url and 'access_token' in response.url
|
||||
# Make sure no refresh token is created for app with implicit grant type.
|
||||
assert refresh_token_count == RefreshToken.objects.count()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_refresh_accesstoken(oauth_application, post, get, delete, admin):
|
||||
response = post(
|
||||
reverse('api:o_auth2_application_token_list', kwargs={'pk': oauth_application.pk}),
|
||||
{'scope': 'read'}, admin, expect=201
|
||||
)
|
||||
assert AccessToken.objects.count() == 1
|
||||
assert RefreshToken.objects.count() == 1
|
||||
token = AccessToken.objects.get(token=response.data['token'])
|
||||
refresh_token = RefreshToken.objects.get(token=response.data['refresh_token'])
|
||||
|
||||
refresh_url = drf_reverse('api:oauth_authorization_root_view') + 'token/'
|
||||
response = post(
|
||||
refresh_url,
|
||||
data='grant_type=refresh_token&refresh_token=' + refresh_token.token,
|
||||
content_type='application/x-www-form-urlencoded',
|
||||
HTTP_AUTHORIZATION='Basic ' + base64.b64encode(':'.join([
|
||||
oauth_application.client_id, oauth_application.client_secret
|
||||
]))
|
||||
)
|
||||
assert RefreshToken.objects.filter(token=refresh_token).exists()
|
||||
original_refresh_token = RefreshToken.objects.get(token=refresh_token)
|
||||
assert token not in AccessToken.objects.all()
|
||||
assert AccessToken.objects.count() == 1
|
||||
# the same RefreshToken remains but is marked revoked
|
||||
assert RefreshToken.objects.count() == 2
|
||||
new_token = json.loads(response._container[0])['access_token']
|
||||
new_refresh_token = json.loads(response._container[0])['refresh_token']
|
||||
assert AccessToken.objects.filter(token=new_token).count() == 1
|
||||
# checks that RefreshTokens are rotated (new RefreshToken issued)
|
||||
assert RefreshToken.objects.filter(token=new_refresh_token).count() == 1
|
||||
assert original_refresh_token.revoked # is not None
|
||||
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_revoke_access_then_refreshtoken(oauth_application, post, get, delete, admin):
|
||||
response = post(
|
||||
reverse('api:o_auth2_application_token_list', kwargs={'pk': oauth_application.pk}),
|
||||
{'scope': 'read'}, admin, expect=201
|
||||
)
|
||||
token = AccessToken.objects.get(token=response.data['token'])
|
||||
refresh_token = RefreshToken.objects.get(token=response.data['refresh_token'])
|
||||
assert AccessToken.objects.count() == 1
|
||||
assert RefreshToken.objects.count() == 1
|
||||
|
||||
token.revoke()
|
||||
assert AccessToken.objects.count() == 0
|
||||
assert RefreshToken.objects.count() == 1
|
||||
assert not refresh_token.revoked
|
||||
|
||||
refresh_token.revoke()
|
||||
assert AccessToken.objects.count() == 0
|
||||
assert RefreshToken.objects.count() == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_revoke_refreshtoken(oauth_application, post, get, delete, admin):
|
||||
response = post(
|
||||
reverse('api:o_auth2_application_token_list', kwargs={'pk': oauth_application.pk}),
|
||||
{'scope': 'read'}, admin, expect=201
|
||||
)
|
||||
refresh_token = RefreshToken.objects.get(token=response.data['refresh_token'])
|
||||
assert AccessToken.objects.count() == 1
|
||||
assert RefreshToken.objects.count() == 1
|
||||
|
||||
refresh_token.revoke()
|
||||
assert AccessToken.objects.count() == 0
|
||||
# the same RefreshToken is recycled
|
||||
new_refresh_token = RefreshToken.objects.all().first()
|
||||
assert refresh_token == new_refresh_token
|
||||
assert new_refresh_token.revoked
|
||||
|
||||
@@ -7,6 +7,7 @@ import pytest
|
||||
import os
|
||||
|
||||
from django.conf import settings
|
||||
from kombu.utils.url import parse_url
|
||||
|
||||
# Mock
|
||||
import mock
|
||||
@@ -358,3 +359,25 @@ def test_isolated_key_flag_readonly(get, patch, delete, admin):
|
||||
|
||||
delete(url, user=admin)
|
||||
assert settings.AWX_ISOLATED_KEY_GENERATION is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_default_broker_url():
|
||||
url = parse_url(settings.BROKER_URL)
|
||||
assert url['transport'] == 'amqp'
|
||||
assert url['hostname'] == 'rabbitmq'
|
||||
assert url['userid'] == 'guest'
|
||||
assert url['password'] == 'guest'
|
||||
assert url['virtual_host'] == '/'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_broker_url_with_special_characters():
|
||||
settings.BROKER_URL = 'amqp://guest:a@ns:ibl3#@rabbitmq:5672//'
|
||||
url = parse_url(settings.BROKER_URL)
|
||||
assert url['transport'] == 'amqp'
|
||||
assert url['hostname'] == 'rabbitmq'
|
||||
assert url['port'] == 5672
|
||||
assert url['userid'] == 'guest'
|
||||
assert url['password'] == 'a@ns:ibl3#'
|
||||
assert url['virtual_host'] == '/'
|
||||
|
||||
@@ -285,9 +285,7 @@ def test_survey_spec_passwords_with_default_required(job_template_factory, post,
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('default, status', [
|
||||
('SUPERSECRET', 200),
|
||||
(['some', 'invalid', 'list'], 400),
|
||||
({'some-invalid': 'dict'}, 400),
|
||||
(False, 400)
|
||||
])
|
||||
def test_survey_spec_default_passwords_are_encrypted(job_template, post, admin_user, default, status):
|
||||
job_template.survey_enabled = True
|
||||
@@ -317,7 +315,7 @@ def test_survey_spec_default_passwords_are_encrypted(job_template, post, admin_u
|
||||
'secret_value': default
|
||||
}
|
||||
else:
|
||||
assert "for 'secret_value' expected to be a string." in str(resp.data)
|
||||
assert "expected to be string." in str(resp.data)
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
|
||||
@@ -346,37 +344,6 @@ def test_survey_spec_default_passwords_encrypted_on_update(job_template, post, p
|
||||
assert updated_jt.survey_spec == JobTemplate.objects.get(pk=job_template.pk).survey_spec
|
||||
|
||||
|
||||
# Tests related to survey content validation
|
||||
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.survey
|
||||
def test_survey_spec_non_dict_error(deploy_jobtemplate, post, admin_user):
|
||||
"""When a question doesn't follow the standard format, verify error thrown."""
|
||||
response = post(
|
||||
url=reverse('api:job_template_survey_spec', kwargs={'pk': deploy_jobtemplate.id}),
|
||||
data={
|
||||
"description": "Email of the submitter",
|
||||
"spec": ["What is your email?"], "name": "Email survey"
|
||||
},
|
||||
user=admin_user,
|
||||
expect=400
|
||||
)
|
||||
assert response.data['error'] == "Survey question 0 is not a json object."
|
||||
|
||||
|
||||
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.survey
|
||||
def test_survey_spec_dual_names_error(survey_spec_factory, deploy_jobtemplate, post, user):
|
||||
response = post(
|
||||
url=reverse('api:job_template_survey_spec', kwargs={'pk': deploy_jobtemplate.id}),
|
||||
data=survey_spec_factory(['submitter_email', 'submitter_email']),
|
||||
user=user('admin', True),
|
||||
expect=400
|
||||
)
|
||||
assert response.data['error'] == "'variable' 'submitter_email' duplicated in survey question 1."
|
||||
|
||||
|
||||
# Test actions that should be allowed with non-survey license
|
||||
@mock.patch('awx.main.access.BaseAccess.check_license', new=mock_no_surveys)
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -8,13 +8,13 @@ import tempfile
|
||||
import shutil
|
||||
from datetime import timedelta
|
||||
from six.moves import xrange
|
||||
from mock import PropertyMock
|
||||
|
||||
# Django
|
||||
from django.core.urlresolvers import resolve
|
||||
from django.utils.six.moves.urllib.parse import urlparse
|
||||
from django.utils import timezone
|
||||
from django.contrib.auth.models import User
|
||||
from django.conf import settings
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
|
||||
from jsonbfield.fields import JSONField
|
||||
@@ -66,17 +66,6 @@ def swagger_autogen(requests=__SWAGGER_REQUESTS__):
|
||||
return requests
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def celery_memory_broker():
|
||||
'''
|
||||
FIXME: Not sure how "far" just setting the BROKER_URL will get us.
|
||||
We may need to incluence CELERY's configuration like we do in the old unit tests (see base.py)
|
||||
|
||||
Allows django signal code to execute without the need for redis
|
||||
'''
|
||||
settings.BROKER_URL='memory://localhost/'
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def user():
|
||||
def u(name, is_superuser=False):
|
||||
@@ -682,6 +671,24 @@ def job_template_labels(organization, job_template):
|
||||
return job_template
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def jt_linked(job_template_factory, credential, net_credential, vault_credential):
|
||||
'''
|
||||
A job template with a reasonably complete set of related objects to
|
||||
test RBAC and other functionality affected by related objects
|
||||
'''
|
||||
objects = job_template_factory(
|
||||
'testJT', organization='org1', project='proj1', inventory='inventory1',
|
||||
credential='cred1')
|
||||
jt = objects.job_template
|
||||
jt.credentials.add(vault_credential)
|
||||
jt.save()
|
||||
# Add AWS cloud credential and network credential
|
||||
jt.credentials.add(credential)
|
||||
jt.credentials.add(net_credential)
|
||||
return jt
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_job_template(organization):
|
||||
wjt = WorkflowJobTemplate(name='test-workflow_job_template', organization=organization)
|
||||
@@ -764,3 +771,42 @@ def sqlite_copy_expert(request):
|
||||
request.addfinalizer(lambda: delattr(SQLiteCursorWrapper, 'copy_expert'))
|
||||
return path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def disable_database_settings(mocker):
|
||||
m = mocker.patch('awx.conf.settings.SettingsWrapper.all_supported_settings', new_callable=PropertyMock)
|
||||
m.return_value = []
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def slice_jt_factory(inventory):
|
||||
def r(N, jt_kwargs=None):
|
||||
for i in range(N):
|
||||
inventory.hosts.create(name='foo{}'.format(i))
|
||||
if not jt_kwargs:
|
||||
jt_kwargs = {}
|
||||
return JobTemplate.objects.create(
|
||||
name='slice-jt-from-factory',
|
||||
job_slice_count=N,
|
||||
inventory=inventory,
|
||||
**jt_kwargs
|
||||
)
|
||||
return r
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def slice_job_factory(slice_jt_factory):
|
||||
def r(N, jt_kwargs=None, prompts=None, spawn=False):
|
||||
slice_jt = slice_jt_factory(N, jt_kwargs=jt_kwargs)
|
||||
if not prompts:
|
||||
prompts = {}
|
||||
slice_job = slice_jt.create_unified_job(**prompts)
|
||||
if spawn:
|
||||
for node in slice_job.workflow_nodes.all():
|
||||
# does what the task manager does for spawning workflow jobs
|
||||
kv = node.get_job_kwargs()
|
||||
job = node.unified_job_template.create_unified_job(**kv)
|
||||
node.job = job
|
||||
node.save()
|
||||
return slice_job
|
||||
return r
|
||||
|
||||
@@ -38,6 +38,33 @@ class TestInventoryScript:
|
||||
'remote_tower_id': host.id
|
||||
}
|
||||
|
||||
def test_slice_subset(self, inventory):
|
||||
for i in range(3):
|
||||
inventory.hosts.create(name='host{}'.format(i))
|
||||
for i in range(3):
|
||||
assert inventory.get_script_data(slice_number=i + 1, slice_count=3) == {
|
||||
'all': {'hosts': ['host{}'.format(i)]}
|
||||
}
|
||||
|
||||
def test_slice_subset_with_groups(self, inventory):
|
||||
hosts = []
|
||||
for i in range(3):
|
||||
host = inventory.hosts.create(name='host{}'.format(i))
|
||||
hosts.append(host)
|
||||
g1 = inventory.groups.create(name='contains_all_hosts')
|
||||
for host in hosts:
|
||||
g1.hosts.add(host)
|
||||
g2 = inventory.groups.create(name='contains_two_hosts')
|
||||
for host in hosts[:2]:
|
||||
g2.hosts.add(host)
|
||||
for i in range(3):
|
||||
expected_data = {
|
||||
'contains_all_hosts': {'hosts': ['host{}'.format(i)], 'children': [], 'vars': {}},
|
||||
}
|
||||
if i < 2:
|
||||
expected_data['contains_two_hosts'] = {'hosts': ['host{}'.format(i)], 'children': [], 'vars': {}}
|
||||
assert inventory.get_script_data(slice_number=i + 1, slice_count=3) == expected_data
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestActiveCount:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
import six
|
||||
|
||||
from awx.main.models import JobTemplate, Job, JobHostSummary
|
||||
from awx.main.models import JobTemplate, Job, JobHostSummary, WorkflowJob
|
||||
from crum import impersonate
|
||||
|
||||
|
||||
@@ -81,3 +81,23 @@ def test_job_host_summary_representation(host):
|
||||
jhs = JobHostSummary.objects.get(pk=jhs.id)
|
||||
host.delete()
|
||||
assert 'N/A changed=1 dark=2 failures=3 ok=4 processed=5 skipped=6' == six.text_type(jhs)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestSlicingModels:
|
||||
|
||||
def test_slice_workflow_spawn(self, slice_jt_factory):
|
||||
slice_jt = slice_jt_factory(3)
|
||||
job = slice_jt.create_unified_job()
|
||||
assert isinstance(job, WorkflowJob)
|
||||
assert job.job_template == slice_jt
|
||||
assert job.unified_job_template == slice_jt
|
||||
assert job.workflow_nodes.count() == 3
|
||||
|
||||
def test_slices_with_JT_and_prompts(self, slice_job_factory):
|
||||
job = slice_job_factory(3, jt_kwargs={'ask_limit_on_launch': True}, prompts={'limit': 'foobar'}, spawn=True)
|
||||
assert job.launch_config.prompts_dict() == {'limit': 'foobar'}
|
||||
for node in job.workflow_nodes.all():
|
||||
assert node.limit is None # data not saved in node prompts
|
||||
job = node.job
|
||||
assert job.limit == 'foobar'
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
import itertools
|
||||
import pytest
|
||||
import mock
|
||||
import six
|
||||
|
||||
# Django
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
# AWX
|
||||
from awx.main.models import UnifiedJobTemplate, Job, JobTemplate, WorkflowJobTemplate, Project, WorkflowJob, Schedule
|
||||
from awx.main.models.ha import InstanceGroup
|
||||
from awx.main.models import (
|
||||
UnifiedJobTemplate, Job, JobTemplate, WorkflowJobTemplate,
|
||||
Project, WorkflowJob, Schedule,
|
||||
Credential
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -55,9 +58,7 @@ class TestCreateUnifiedJob:
|
||||
job_with_links.save()
|
||||
job_with_links.credentials.add(machine_credential)
|
||||
job_with_links.credentials.add(net_credential)
|
||||
with mocker.patch('awx.main.models.unified_jobs.UnifiedJobTemplate._get_unified_job_field_names',
|
||||
return_value=['inventory', 'credential', 'limit']):
|
||||
second_job = job_with_links.copy_unified_job()
|
||||
second_job = job_with_links.copy_unified_job()
|
||||
|
||||
# Check that job data matches the original variables
|
||||
assert second_job.credential == job_with_links.credential
|
||||
@@ -65,47 +66,20 @@ class TestCreateUnifiedJob:
|
||||
assert second_job.limit == 'my_server'
|
||||
assert net_credential in second_job.credentials.all()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestIsolatedRuns:
|
||||
|
||||
def test_low_capacity_isolated_instance_selected(self):
|
||||
ig = InstanceGroup.objects.create(name='tower')
|
||||
iso_ig = InstanceGroup.objects.create(name='thepentagon', controller=ig)
|
||||
iso_ig.instances.create(hostname='iso1', capacity=50)
|
||||
i2 = iso_ig.instances.create(hostname='iso2', capacity=200)
|
||||
job = Job.objects.create(
|
||||
instance_group=iso_ig,
|
||||
celery_task_id='something',
|
||||
)
|
||||
|
||||
mock_async = mock.MagicMock()
|
||||
success_callback = mock.MagicMock()
|
||||
error_callback = mock.MagicMock()
|
||||
|
||||
class MockTaskClass:
|
||||
apply_async = mock_async
|
||||
|
||||
with mock.patch.object(job, '_get_task_class') as task_class:
|
||||
task_class.return_value = MockTaskClass
|
||||
job.start_celery_task([], error_callback, success_callback, 'thepentagon')
|
||||
mock_async.assert_called_with([job.id], [],
|
||||
link_error=error_callback,
|
||||
link=success_callback,
|
||||
queue='thepentagon',
|
||||
task_id='something')
|
||||
|
||||
i2.capacity = 20
|
||||
i2.save()
|
||||
|
||||
with mock.patch.object(job, '_get_task_class') as task_class:
|
||||
task_class.return_value = MockTaskClass
|
||||
job.start_celery_task([], error_callback, success_callback, 'thepentagon')
|
||||
mock_async.assert_called_with([job.id], [],
|
||||
link_error=error_callback,
|
||||
link=success_callback,
|
||||
queue='thepentagon',
|
||||
task_id='something')
|
||||
def test_job_relaunch_modifed_jt(self, jt_linked):
|
||||
# Replace all credentials with a new one of same type
|
||||
new_creds = []
|
||||
for cred in jt_linked.credentials.all():
|
||||
new_creds.append(Credential.objects.create(
|
||||
name=six.text_type(cred.name) + six.text_type('_new'),
|
||||
credential_type=cred.credential_type,
|
||||
inputs=cred.inputs
|
||||
))
|
||||
job = jt_linked.create_unified_job()
|
||||
jt_linked.credentials.clear()
|
||||
jt_linked.credentials.add(*new_creds)
|
||||
relaunched_job = job.copy_unified_job()
|
||||
assert set(relaunched_job.credentials.all()) == set(new_creds)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -178,3 +152,50 @@ def test_event_processing_not_finished():
|
||||
def test_event_model_undefined():
|
||||
wj = WorkflowJob.objects.create(name='foobar', status='finished')
|
||||
assert wj.event_processing_finished
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestTaskImpact:
|
||||
@pytest.fixture
|
||||
def job_host_limit(self, job_template, inventory):
|
||||
def r(hosts, forks):
|
||||
for i in range(hosts):
|
||||
inventory.hosts.create(name='foo' + str(i))
|
||||
job = Job.objects.create(
|
||||
name='fake-job',
|
||||
launch_type='workflow',
|
||||
job_template=job_template,
|
||||
inventory=inventory,
|
||||
forks=forks
|
||||
)
|
||||
return job
|
||||
return r
|
||||
|
||||
def test_limit_task_impact(self, job_host_limit):
|
||||
job = job_host_limit(5, 2)
|
||||
assert job.task_impact == 2 + 1 # forks becomes constraint
|
||||
|
||||
def test_host_task_impact(self, job_host_limit):
|
||||
job = job_host_limit(3, 5)
|
||||
assert job.task_impact == 3 + 1 # hosts becomes constraint
|
||||
|
||||
def test_shard_task_impact(self, slice_job_factory):
|
||||
# factory creates on host per slice
|
||||
workflow_job = slice_job_factory(3, jt_kwargs={'forks': 50}, spawn=True)
|
||||
# arrange the jobs by their number
|
||||
jobs = [None for i in range(3)]
|
||||
for node in workflow_job.workflow_nodes.all():
|
||||
jobs[node.job.job_slice_number - 1] = node.job
|
||||
# Even distribution - all jobs run on 1 host
|
||||
assert [
|
||||
len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts'])
|
||||
for i in range(3)
|
||||
] == [1, 1, 1]
|
||||
assert [job.task_impact for job in jobs] == [2, 2, 2] # plus one base task impact
|
||||
# Uneven distribution - first job takes the extra host
|
||||
jobs[0].inventory.hosts.create(name='remainder_foo')
|
||||
assert [
|
||||
len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts'])
|
||||
for i in range(3)
|
||||
] == [2, 1, 1]
|
||||
assert [job.task_impact for job in jobs] == [3, 2, 2]
|
||||
|
||||
@@ -7,6 +7,7 @@ from awx.main.models.workflow import WorkflowJob, WorkflowJobNode, WorkflowJobTe
|
||||
from awx.main.models.jobs import JobTemplate, Job
|
||||
from awx.main.models.projects import ProjectUpdate
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
# Django
|
||||
from django.test import TransactionTestCase
|
||||
@@ -196,9 +197,15 @@ class TestWorkflowJobTemplate:
|
||||
assert test_view.is_valid_relation(node_assoc, nodes[1]) == {'Error': 'Multiple parent relationship not allowed.'}
|
||||
# test mutex validation
|
||||
test_view.relationship = 'failure_nodes'
|
||||
node_assoc_1 = WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt)
|
||||
assert (test_view.is_valid_relation(nodes[2], node_assoc_1) ==
|
||||
{'Error': 'Cannot associate failure_nodes when always_nodes have been associated.'})
|
||||
|
||||
def test_always_success_failure_creation(self, wfjt, admin, get):
|
||||
wfjt_node = wfjt.workflow_job_template_nodes.all()[1]
|
||||
node = WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt)
|
||||
wfjt_node.always_nodes.add(node)
|
||||
assert len(node.get_parent_nodes()) == 1
|
||||
url = reverse('api:workflow_job_template_node_list') + str(wfjt_node.id) + '/'
|
||||
resp = get(url, admin)
|
||||
assert node.id in resp.data['always_nodes']
|
||||
|
||||
def test_wfjt_unique_together_with_org(self, organization):
|
||||
wfjt1 = WorkflowJobTemplate(name='foo', organization=organization)
|
||||
|
||||
@@ -1,19 +1,10 @@
|
||||
import pytest
|
||||
import mock
|
||||
import json
|
||||
from datetime import timedelta, datetime
|
||||
|
||||
from django.core.cache import cache
|
||||
from django.utils.timezone import now as tz_now
|
||||
from datetime import timedelta
|
||||
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
Instance,
|
||||
WorkflowJob,
|
||||
)
|
||||
from awx.main.models.notifications import JobNotificationMixin
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -245,140 +236,3 @@ def test_shared_dependencies_launch(default_instance_group, job_template_factory
|
||||
iu = [x for x in ii.inventory_updates.all()]
|
||||
assert len(pu) == 1
|
||||
assert len(iu) == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_cleanup_interval(mock_cache):
|
||||
with mock.patch.multiple('awx.main.scheduler.task_manager.cache', get=mock_cache.get, set=mock_cache.set):
|
||||
assert mock_cache.get('last_celery_task_cleanup') is None
|
||||
|
||||
TaskManager().cleanup_inconsistent_celery_tasks()
|
||||
last_cleanup = mock_cache.get('last_celery_task_cleanup')
|
||||
assert isinstance(last_cleanup, datetime)
|
||||
|
||||
TaskManager().cleanup_inconsistent_celery_tasks()
|
||||
assert cache.get('last_celery_task_cleanup') == last_cleanup
|
||||
|
||||
|
||||
class TestReaper():
|
||||
@pytest.fixture
|
||||
def all_jobs(self, mocker):
|
||||
now = tz_now()
|
||||
|
||||
Instance.objects.create(hostname='host1', capacity=100)
|
||||
Instance.objects.create(hostname='host2', capacity=100)
|
||||
Instance.objects.create(hostname='host3_split', capacity=100)
|
||||
Instance.objects.create(hostname='host4_offline', capacity=0)
|
||||
|
||||
j1 = Job.objects.create(status='pending', execution_node='host1')
|
||||
j2 = Job.objects.create(status='waiting', celery_task_id='considered_j2')
|
||||
j3 = Job.objects.create(status='waiting', celery_task_id='considered_j3')
|
||||
j3.modified = now - timedelta(seconds=60)
|
||||
j3.save(update_fields=['modified'])
|
||||
j4 = Job.objects.create(status='running', celery_task_id='considered_j4', execution_node='host1')
|
||||
j5 = Job.objects.create(status='waiting', celery_task_id='reapable_j5')
|
||||
j5.modified = now - timedelta(seconds=60)
|
||||
j5.save(update_fields=['modified'])
|
||||
j6 = Job.objects.create(status='waiting', celery_task_id='considered_j6')
|
||||
j6.modified = now - timedelta(seconds=60)
|
||||
j6.save(update_fields=['modified'])
|
||||
j7 = Job.objects.create(status='running', celery_task_id='considered_j7', execution_node='host2')
|
||||
j8 = Job.objects.create(status='running', celery_task_id='reapable_j7', execution_node='host2')
|
||||
j9 = Job.objects.create(status='waiting', celery_task_id='reapable_j8')
|
||||
j9.modified = now - timedelta(seconds=60)
|
||||
j9.save(update_fields=['modified'])
|
||||
j10 = Job.objects.create(status='running', celery_task_id='host3_j10', execution_node='host3_split')
|
||||
|
||||
j11 = Job.objects.create(status='running', celery_task_id='host4_j11', execution_node='host4_offline')
|
||||
|
||||
j12 = WorkflowJob.objects.create(status='running', celery_task_id='workflow_job', execution_node='host1')
|
||||
|
||||
js = [j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12]
|
||||
for j in js:
|
||||
j.save = mocker.Mock(wraps=j.save)
|
||||
j.websocket_emit_status = mocker.Mock()
|
||||
return js
|
||||
|
||||
@pytest.fixture
|
||||
def considered_jobs(self, all_jobs):
|
||||
return all_jobs[2:7] + [all_jobs[10]]
|
||||
|
||||
@pytest.fixture
|
||||
def running_tasks(self, all_jobs):
|
||||
return {
|
||||
'host1': [all_jobs[3]],
|
||||
'host2': [all_jobs[7], all_jobs[8]],
|
||||
'host3_split': [all_jobs[9]],
|
||||
'host4_offline': [all_jobs[10]],
|
||||
}
|
||||
|
||||
@pytest.fixture
|
||||
def waiting_tasks(self, all_jobs):
|
||||
return [all_jobs[2], all_jobs[4], all_jobs[5], all_jobs[8]]
|
||||
|
||||
@pytest.fixture
|
||||
def reapable_jobs(self, all_jobs):
|
||||
return [all_jobs[4], all_jobs[7], all_jobs[10]]
|
||||
|
||||
@pytest.fixture
|
||||
def unconsidered_jobs(self, all_jobs):
|
||||
return all_jobs[0:1] + all_jobs[5:7]
|
||||
|
||||
@pytest.fixture
|
||||
def active_tasks(self):
|
||||
return ([], {
|
||||
'host1': ['considered_j2', 'considered_j3', 'considered_j4',],
|
||||
'host2': ['considered_j6', 'considered_j7'],
|
||||
})
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch.object(JobNotificationMixin, 'send_notification_templates')
|
||||
@mock.patch.object(TaskManager, 'get_active_tasks', lambda self: ([], []))
|
||||
def test_cleanup_inconsistent_task(self, notify, active_tasks, considered_jobs, reapable_jobs, running_tasks, waiting_tasks, mocker, settings):
|
||||
settings.AWX_INCONSISTENT_TASK_INTERVAL = 0
|
||||
tm = TaskManager()
|
||||
|
||||
tm.get_running_tasks = mocker.Mock(return_value=(running_tasks, waiting_tasks))
|
||||
tm.get_active_tasks = mocker.Mock(return_value=active_tasks)
|
||||
|
||||
tm.cleanup_inconsistent_celery_tasks()
|
||||
|
||||
for j in considered_jobs:
|
||||
if j not in reapable_jobs:
|
||||
j.save.assert_not_called()
|
||||
|
||||
assert notify.call_count == 4
|
||||
notify.assert_has_calls([mock.call('failed') for j in reapable_jobs], any_order=True)
|
||||
|
||||
for j in reapable_jobs:
|
||||
j.websocket_emit_status.assert_called_once_with('failed')
|
||||
assert j.status == 'failed'
|
||||
assert j.job_explanation == (
|
||||
'Task was marked as running in Tower but was not present in the job queue, so it has been marked as failed.'
|
||||
)
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_get_running_tasks(self, all_jobs):
|
||||
tm = TaskManager()
|
||||
|
||||
# Ensure the query grabs the expected jobs
|
||||
execution_nodes_jobs, waiting_jobs = tm.get_running_tasks()
|
||||
assert 'host1' in execution_nodes_jobs
|
||||
assert 'host2' in execution_nodes_jobs
|
||||
assert 'host3_split' in execution_nodes_jobs
|
||||
|
||||
assert all_jobs[3] in execution_nodes_jobs['host1']
|
||||
|
||||
assert all_jobs[6] in execution_nodes_jobs['host2']
|
||||
assert all_jobs[7] in execution_nodes_jobs['host2']
|
||||
|
||||
assert all_jobs[9] in execution_nodes_jobs['host3_split']
|
||||
|
||||
assert all_jobs[10] in execution_nodes_jobs['host4_offline']
|
||||
|
||||
assert all_jobs[11] not in execution_nodes_jobs['host1']
|
||||
|
||||
assert all_jobs[2] in waiting_jobs
|
||||
assert all_jobs[4] in waiting_jobs
|
||||
assert all_jobs[5] in waiting_jobs
|
||||
assert all_jobs[8] in waiting_jobs
|
||||
|
||||
387
awx/main/tests/functional/test_dispatch.py
Normal file
387
awx/main/tests/functional/test_dispatch.py
Normal file
@@ -0,0 +1,387 @@
|
||||
import datetime
|
||||
import multiprocessing
|
||||
import random
|
||||
import signal
|
||||
import time
|
||||
|
||||
from django.utils.timezone import now as tz_now
|
||||
import pytest
|
||||
|
||||
from awx.main.models import Job, WorkflowJob, Instance
|
||||
from awx.main.dispatch import reaper
|
||||
from awx.main.dispatch.pool import PoolWorker, WorkerPool, AutoscalePool
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch.worker import BaseWorker, TaskWorker
|
||||
|
||||
|
||||
@task()
|
||||
def add(a, b):
|
||||
return a + b
|
||||
|
||||
|
||||
class BaseTask(object):
|
||||
|
||||
def add(self, a, b):
|
||||
return add(a, b)
|
||||
|
||||
|
||||
@task()
|
||||
class Adder(BaseTask):
|
||||
def run(self, a, b):
|
||||
return super(Adder, self).add(a, b)
|
||||
|
||||
|
||||
@task(queue='hard-math')
|
||||
def multiply(a, b):
|
||||
return a * b
|
||||
|
||||
|
||||
class SimpleWorker(BaseWorker):
|
||||
|
||||
def perform_work(self, body, *args):
|
||||
pass
|
||||
|
||||
|
||||
class ResultWriter(BaseWorker):
|
||||
|
||||
def perform_work(self, body, result_queue):
|
||||
result_queue.put(body + '!!!')
|
||||
|
||||
|
||||
class SlowResultWriter(BaseWorker):
|
||||
|
||||
def perform_work(self, body, result_queue):
|
||||
time.sleep(3)
|
||||
super(SlowResultWriter, self).perform_work(body, result_queue)
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("disable_database_settings")
|
||||
class TestPoolWorker:
|
||||
|
||||
def setup_method(self, test_method):
|
||||
self.worker = PoolWorker(1000, self.tick, tuple())
|
||||
|
||||
def tick(self):
|
||||
self.worker.finished.put(self.worker.queue.get()['uuid'])
|
||||
time.sleep(.5)
|
||||
|
||||
def test_qsize(self):
|
||||
assert self.worker.qsize == 0
|
||||
for i in range(3):
|
||||
self.worker.put({'task': 'abc123'})
|
||||
assert self.worker.qsize == 3
|
||||
|
||||
def test_put(self):
|
||||
assert len(self.worker.managed_tasks) == 0
|
||||
assert self.worker.messages_finished == 0
|
||||
self.worker.put({'task': 'abc123'})
|
||||
|
||||
assert len(self.worker.managed_tasks) == 1
|
||||
assert self.worker.messages_sent == 1
|
||||
|
||||
def test_managed_tasks(self):
|
||||
self.worker.put({'task': 'abc123'})
|
||||
self.worker.calculate_managed_tasks()
|
||||
assert len(self.worker.managed_tasks) == 1
|
||||
|
||||
self.tick()
|
||||
self.worker.calculate_managed_tasks()
|
||||
assert len(self.worker.managed_tasks) == 0
|
||||
|
||||
def test_current_task(self):
|
||||
self.worker.put({'task': 'abc123'})
|
||||
assert self.worker.current_task['task'] == 'abc123'
|
||||
|
||||
def test_quit(self):
|
||||
self.worker.quit()
|
||||
assert self.worker.queue.get() == 'QUIT'
|
||||
|
||||
def test_idle_busy(self):
|
||||
assert self.worker.idle is True
|
||||
assert self.worker.busy is False
|
||||
self.worker.put({'task': 'abc123'})
|
||||
assert self.worker.busy is True
|
||||
assert self.worker.idle is False
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestWorkerPool:
|
||||
|
||||
def setup_method(self, test_method):
|
||||
self.pool = WorkerPool(min_workers=3)
|
||||
|
||||
def teardown_method(self, test_method):
|
||||
self.pool.stop(signal.SIGTERM)
|
||||
|
||||
def test_worker(self):
|
||||
self.pool.init_workers(SimpleWorker().work_loop)
|
||||
assert len(self.pool) == 3
|
||||
for worker in self.pool.workers:
|
||||
assert worker.messages_sent == 0
|
||||
assert worker.alive is True
|
||||
|
||||
def test_single_task(self):
|
||||
self.pool.init_workers(SimpleWorker().work_loop)
|
||||
self.pool.write(0, 'xyz')
|
||||
assert self.pool.workers[0].messages_sent == 1 # worker at index 0 handled one task
|
||||
assert self.pool.workers[1].messages_sent == 0
|
||||
assert self.pool.workers[2].messages_sent == 0
|
||||
|
||||
def test_queue_preference(self):
|
||||
self.pool.init_workers(SimpleWorker().work_loop)
|
||||
self.pool.write(2, 'xyz')
|
||||
assert self.pool.workers[0].messages_sent == 0
|
||||
assert self.pool.workers[1].messages_sent == 0
|
||||
assert self.pool.workers[2].messages_sent == 1 # worker at index 2 handled one task
|
||||
|
||||
def test_worker_processing(self):
|
||||
result_queue = multiprocessing.Queue()
|
||||
self.pool.init_workers(ResultWriter().work_loop, result_queue)
|
||||
for i in range(10):
|
||||
self.pool.write(
|
||||
random.choice(range(len(self.pool))),
|
||||
'Hello, Worker {}'.format(i)
|
||||
)
|
||||
all_messages = [result_queue.get(timeout=1) for i in range(10)]
|
||||
all_messages.sort()
|
||||
assert all_messages == [
|
||||
'Hello, Worker {}!!!'.format(i)
|
||||
for i in range(10)
|
||||
]
|
||||
|
||||
total_handled = sum([worker.messages_sent for worker in self.pool.workers])
|
||||
assert total_handled == 10
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestAutoScaling:
|
||||
|
||||
def setup_method(self, test_method):
|
||||
self.pool = AutoscalePool(min_workers=2, max_workers=10)
|
||||
|
||||
def teardown_method(self, test_method):
|
||||
self.pool.stop(signal.SIGTERM)
|
||||
|
||||
def test_scale_up(self):
|
||||
result_queue = multiprocessing.Queue()
|
||||
self.pool.init_workers(SlowResultWriter().work_loop, result_queue)
|
||||
|
||||
# start with two workers, write an event to each worker and make it busy
|
||||
assert len(self.pool) == 2
|
||||
for i, w in enumerate(self.pool.workers):
|
||||
w.put('Hello, Worker {}'.format(0))
|
||||
assert len(self.pool) == 2
|
||||
|
||||
# wait for the subprocesses to start working on their tasks and be marked busy
|
||||
time.sleep(1)
|
||||
assert self.pool.should_grow
|
||||
|
||||
# write a third message, expect a new worker to spawn because all
|
||||
# workers are busy
|
||||
self.pool.write(0, 'Hello, Worker {}'.format(2))
|
||||
assert len(self.pool) == 3
|
||||
|
||||
def test_scale_down(self):
|
||||
self.pool.init_workers(ResultWriter().work_loop, multiprocessing.Queue())
|
||||
|
||||
# start with two workers, and scale up to 10 workers
|
||||
assert len(self.pool) == 2
|
||||
for i in range(8):
|
||||
self.pool.up()
|
||||
assert len(self.pool) == 10
|
||||
|
||||
# cleanup should scale down to 8 workers
|
||||
self.pool.cleanup()
|
||||
assert len(self.pool) == 2
|
||||
|
||||
def test_max_scale_up(self):
|
||||
self.pool.init_workers(ResultWriter().work_loop, multiprocessing.Queue())
|
||||
|
||||
assert len(self.pool) == 2
|
||||
for i in range(25):
|
||||
self.pool.up()
|
||||
assert self.pool.max_workers == 10
|
||||
assert self.pool.full is True
|
||||
assert len(self.pool) == 10
|
||||
|
||||
def test_equal_worker_distribution(self):
|
||||
# if all workers are busy, spawn new workers *before* adding messages
|
||||
# to an existing queue
|
||||
self.pool.init_workers(SlowResultWriter().work_loop, multiprocessing.Queue)
|
||||
|
||||
# start with two workers, write an event to each worker and make it busy
|
||||
assert len(self.pool) == 2
|
||||
for i in range(10):
|
||||
self.pool.write(0, 'Hello, World!')
|
||||
assert len(self.pool) == 10
|
||||
for w in self.pool.workers:
|
||||
assert w.busy
|
||||
assert len(w.managed_tasks) == 1
|
||||
|
||||
# the queue is full at 10, the _next_ write should put the message into
|
||||
# a worker's backlog
|
||||
assert len(self.pool) == 10
|
||||
for w in self.pool.workers:
|
||||
assert w.messages_sent == 1
|
||||
self.pool.write(0, 'Hello, World!')
|
||||
assert len(self.pool) == 10
|
||||
assert self.pool.workers[0].messages_sent == 2
|
||||
|
||||
def test_lost_worker_autoscale(self):
|
||||
# if a worker exits, it should be replaced automatically up to min_workers
|
||||
self.pool.init_workers(ResultWriter().work_loop, multiprocessing.Queue())
|
||||
|
||||
# start with two workers, kill one of them
|
||||
assert len(self.pool) == 2
|
||||
assert not self.pool.should_grow
|
||||
alive_pid = self.pool.workers[1].pid
|
||||
self.pool.workers[0].process.terminate()
|
||||
time.sleep(1) # wait a moment for sigterm
|
||||
|
||||
# clean up and the dead worker
|
||||
self.pool.cleanup()
|
||||
assert len(self.pool) == 1
|
||||
assert self.pool.workers[0].pid == alive_pid
|
||||
|
||||
# the next queue write should replace the lost worker
|
||||
self.pool.write(0, 'Hello, Worker')
|
||||
assert len(self.pool) == 2
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("disable_database_settings")
|
||||
class TestTaskDispatcher:
|
||||
|
||||
@property
|
||||
def tm(self):
|
||||
return TaskWorker()
|
||||
|
||||
def test_function_dispatch(self):
|
||||
result = self.tm.perform_work({
|
||||
'task': 'awx.main.tests.functional.test_dispatch.add',
|
||||
'args': [2, 2]
|
||||
})
|
||||
assert result == 4
|
||||
|
||||
def test_method_dispatch(self):
|
||||
result = self.tm.perform_work({
|
||||
'task': 'awx.main.tests.functional.test_dispatch.Adder',
|
||||
'args': [2, 2]
|
||||
})
|
||||
assert result == 4
|
||||
|
||||
|
||||
class TestTaskPublisher:
|
||||
|
||||
def test_function_callable(self):
|
||||
assert add(2, 2) == 4
|
||||
|
||||
def test_method_callable(self):
|
||||
assert Adder().run(2, 2) == 4
|
||||
|
||||
def test_function_apply_async(self):
|
||||
message, queue = add.apply_async([2, 2])
|
||||
assert message['args'] == [2, 2]
|
||||
assert message['kwargs'] == {}
|
||||
assert message['task'] == 'awx.main.tests.functional.test_dispatch.add'
|
||||
assert queue == 'awx_private_queue'
|
||||
|
||||
def test_method_apply_async(self):
|
||||
message, queue = Adder.apply_async([2, 2])
|
||||
assert message['args'] == [2, 2]
|
||||
assert message['kwargs'] == {}
|
||||
assert message['task'] == 'awx.main.tests.functional.test_dispatch.Adder'
|
||||
assert queue == 'awx_private_queue'
|
||||
|
||||
def test_apply_with_queue(self):
|
||||
message, queue = add.apply_async([2, 2], queue='abc123')
|
||||
assert queue == 'abc123'
|
||||
|
||||
def test_queue_defined_in_task_decorator(self):
|
||||
message, queue = multiply.apply_async([2, 2])
|
||||
assert queue == 'hard-math'
|
||||
|
||||
def test_queue_overridden_from_task_decorator(self):
|
||||
message, queue = multiply.apply_async([2, 2], queue='not-so-hard')
|
||||
assert queue == 'not-so-hard'
|
||||
|
||||
def test_apply_with_callable_queuename(self):
|
||||
message, queue = add.apply_async([2, 2], queue=lambda: 'called')
|
||||
assert queue == 'called'
|
||||
|
||||
|
||||
yesterday = tz_now() - datetime.timedelta(days=1)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestJobReaper(object):
|
||||
|
||||
@pytest.mark.parametrize('status, execution_node, controller_node, modified, fail', [
|
||||
('running', '', '', None, False), # running, not assigned to the instance
|
||||
('running', 'awx', '', None, True), # running, has the instance as its execution_node
|
||||
('running', '', 'awx', None, True), # running, has the instance as its controller_node
|
||||
('waiting', '', '', None, False), # waiting, not assigned to the instance
|
||||
('waiting', 'awx', '', None, False), # waiting, was edited less than a minute ago
|
||||
('waiting', '', 'awx', None, False), # waiting, was edited less than a minute ago
|
||||
('waiting', 'awx', '', yesterday, True), # waiting, assigned to the execution_node, stale
|
||||
('waiting', '', 'awx', yesterday, True), # waiting, assigned to the controller_node, stale
|
||||
])
|
||||
def test_should_reap(self, status, fail, execution_node, controller_node, modified):
|
||||
i = Instance(hostname='awx')
|
||||
i.save()
|
||||
j = Job(
|
||||
status=status,
|
||||
execution_node=execution_node,
|
||||
controller_node=controller_node,
|
||||
start_args='SENSITIVE',
|
||||
)
|
||||
j.save()
|
||||
if modified:
|
||||
# we have to edit the modification time _without_ calling save()
|
||||
# (because .save() overwrites it to _now_)
|
||||
Job.objects.filter(id=j.id).update(modified=modified)
|
||||
reaper.reap(i)
|
||||
job = Job.objects.first()
|
||||
if fail:
|
||||
assert job.status == 'failed'
|
||||
assert 'marked as failed' in job.job_explanation
|
||||
assert job.start_args == ''
|
||||
else:
|
||||
assert job.status == status
|
||||
|
||||
@pytest.mark.parametrize('excluded_uuids, fail', [
|
||||
(['abc123'], False),
|
||||
([], True),
|
||||
])
|
||||
def test_do_not_reap_excluded_uuids(self, excluded_uuids, fail):
|
||||
i = Instance(hostname='awx')
|
||||
i.save()
|
||||
j = Job(
|
||||
status='running',
|
||||
execution_node='awx',
|
||||
controller_node='',
|
||||
start_args='SENSITIVE',
|
||||
celery_task_id='abc123',
|
||||
)
|
||||
j.save()
|
||||
|
||||
# if the UUID is excluded, don't reap it
|
||||
reaper.reap(i, excluded_uuids=excluded_uuids)
|
||||
job = Job.objects.first()
|
||||
if fail:
|
||||
assert job.status == 'failed'
|
||||
assert 'marked as failed' in job.job_explanation
|
||||
assert job.start_args == ''
|
||||
else:
|
||||
assert job.status == 'running'
|
||||
|
||||
def test_workflow_does_not_reap(self):
|
||||
i = Instance(hostname='awx')
|
||||
i.save()
|
||||
j = WorkflowJob(
|
||||
status='running',
|
||||
execution_node='awx'
|
||||
)
|
||||
j.save()
|
||||
reaper.reap(i)
|
||||
|
||||
assert WorkflowJob.objects.first().status == 'running'
|
||||
@@ -3,6 +3,11 @@ import pytest
|
||||
from awx.main.models.inventory import Inventory
|
||||
from awx.main.models.credential import Credential
|
||||
from awx.main.models.jobs import JobTemplate, Job
|
||||
from awx.main.access import (
|
||||
UnifiedJobAccess,
|
||||
WorkflowJobAccess, WorkflowJobNodeAccess,
|
||||
JobAccess
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -43,6 +48,31 @@ def test_inventory_use_access(inventory, user):
|
||||
assert common_user.can_access(Inventory, 'use', inventory)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_slice_job(slice_job_factory, rando):
|
||||
workflow_job = slice_job_factory(2, jt_kwargs={'created_by': rando}, spawn=True)
|
||||
workflow_job.job_template.execute_role.members.add(rando)
|
||||
|
||||
# Abilities of user with execute_role for slice workflow job container
|
||||
assert WorkflowJobAccess(rando).can_start(workflow_job) # relaunch allowed
|
||||
for access_cls in (UnifiedJobAccess, WorkflowJobAccess):
|
||||
access = access_cls(rando)
|
||||
assert access.can_read(workflow_job)
|
||||
assert workflow_job in access.get_queryset()
|
||||
|
||||
# Abilities of user with execute_role for all the slice of the job
|
||||
for node in workflow_job.workflow_nodes.all():
|
||||
access = WorkflowJobNodeAccess(rando)
|
||||
assert access.can_read(node)
|
||||
assert node in access.get_queryset()
|
||||
job = node.job
|
||||
assert JobAccess(rando).can_start(job) # relaunch allowed
|
||||
for access_cls in (UnifiedJobAccess, JobAccess):
|
||||
access = access_cls(rando)
|
||||
assert access.can_read(job)
|
||||
assert job in access.get_queryset()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestJobRelaunchAccess:
|
||||
@pytest.fixture
|
||||
|
||||
@@ -15,24 +15,6 @@ from awx.main.models.organization import Organization
|
||||
from awx.main.models.schedules import Schedule
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def jt_linked(job_template_factory, credential, net_credential, vault_credential):
|
||||
'''
|
||||
A job template with a reasonably complete set of related objects to
|
||||
test RBAC and other functionality affected by related objects
|
||||
'''
|
||||
objects = job_template_factory(
|
||||
'testJT', organization='org1', project='proj1', inventory='inventory1',
|
||||
credential='cred1')
|
||||
jt = objects.job_template
|
||||
jt.credentials.add(vault_credential)
|
||||
jt.save()
|
||||
# Add AWS cloud credential and network credential
|
||||
jt.credentials.add(credential)
|
||||
jt.credentials.add(net_credential)
|
||||
return jt
|
||||
|
||||
|
||||
@mock.patch.object(BaseAccess, 'check_license', return_value=None)
|
||||
@pytest.mark.django_db
|
||||
def test_job_template_access_superuser(check_license, user, deploy_jobtemplate):
|
||||
|
||||
@@ -34,8 +34,17 @@ class TestOAuth2Application:
|
||||
client_type='confidential', authorization_grant_type='password', organization=organization
|
||||
)
|
||||
assert access.can_read(app) is can_access
|
||||
|
||||
|
||||
|
||||
def test_admin_only_can_read(self, user, organization):
|
||||
user = user('org-admin', False)
|
||||
organization.admin_role.members.add(user)
|
||||
access = OAuth2ApplicationAccess(user)
|
||||
app = Application.objects.create(
|
||||
name='test app for {}'.format(user.username), user=user,
|
||||
client_type='confidential', authorization_grant_type='password', organization=organization
|
||||
)
|
||||
assert access.can_read(app) is True
|
||||
|
||||
def test_app_activity_stream(self, org_admin, alice, organization):
|
||||
app = Application.objects.create(
|
||||
name='test app for {}'.format(org_admin.username), user=org_admin,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user