mirror of
https://github.com/ansible/awx.git
synced 2026-02-07 04:28:23 -03:30
Compare commits
404 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9479b1b824 | ||
|
|
fcf6b4ae45 | ||
|
|
7b4c63037a | ||
|
|
37a90320ec | ||
|
|
a803e86a95 | ||
|
|
196a6ff36c | ||
|
|
c3ba851908 | ||
|
|
11223472d3 | ||
|
|
d0a996b139 | ||
|
|
7dd635cd8d | ||
|
|
f715e4e410 | ||
|
|
a983d4bc1f | ||
|
|
f7cffbfe5c | ||
|
|
2329079326 | ||
|
|
055e7b4974 | ||
|
|
c44bf6f903 | ||
|
|
a6d031f46f | ||
|
|
2129f12085 | ||
|
|
23185ca22f | ||
|
|
2b6cf97157 | ||
|
|
07e5a00f14 | ||
|
|
1b0f5b05ad | ||
|
|
5ff4625eb1 | ||
|
|
1829e7cad4 | ||
|
|
e097f5a021 | ||
|
|
caa5596386 | ||
|
|
ec390b049d | ||
|
|
0814a9c4a1 | ||
|
|
0a1b220f56 | ||
|
|
d39b3b3165 | ||
|
|
19ad7d3983 | ||
|
|
cd7e358b73 | ||
|
|
bc5881ad21 | ||
|
|
dd854baba2 | ||
|
|
7cce3cad06 | ||
|
|
622fbc116b | ||
|
|
b9d489c788 | ||
|
|
5cbcfbe0c6 | ||
|
|
d46a403a49 | ||
|
|
de808d4911 | ||
|
|
43eff55fd4 | ||
|
|
6c130fa6c3 | ||
|
|
90ea9a8cc4 | ||
|
|
b09bca54b7 | ||
|
|
8e4a87d0af | ||
|
|
fd50feb258 | ||
|
|
f749a5d44d | ||
|
|
c3366db5ca | ||
|
|
07a9cd106e | ||
|
|
b2a1824d21 | ||
|
|
303443796e | ||
|
|
495dc2202f | ||
|
|
33f5200a20 | ||
|
|
8674e3b4de | ||
|
|
ace459cf70 | ||
|
|
d0c952692d | ||
|
|
af8e071840 | ||
|
|
e6abd77c96 | ||
|
|
42bfff301c | ||
|
|
0aff1a2c75 | ||
|
|
685f4018f2 | ||
|
|
1dff691830 | ||
|
|
525021214c | ||
|
|
c12c64f5e7 | ||
|
|
0eaeadad87 | ||
|
|
eb5846d1be | ||
|
|
87e1ba4dea | ||
|
|
a9427dbf1b | ||
|
|
e96e1e925c | ||
|
|
7476fefd65 | ||
|
|
8b2fc26219 | ||
|
|
9480f911b2 | ||
|
|
d7fc3f53b8 | ||
|
|
91cbaa1096 | ||
|
|
3e13eff7f4 | ||
|
|
a562994b64 | ||
|
|
b02d9ae282 | ||
|
|
57820b7056 | ||
|
|
e3bbd436b4 | ||
|
|
9aa9524257 | ||
|
|
af5a898919 | ||
|
|
a04329efed | ||
|
|
a0b2ce3ef1 | ||
|
|
cd62f39bce | ||
|
|
b7b97dd58d | ||
|
|
1d03625b27 | ||
|
|
af55c4c05e | ||
|
|
0a670e8db1 | ||
|
|
cf62fa67bd | ||
|
|
3c382322b0 | ||
|
|
f4ef3024fd | ||
|
|
67ca2fa335 | ||
|
|
c9ac805eed | ||
|
|
f40b637efc | ||
|
|
60ef160e85 | ||
|
|
be507dbefb | ||
|
|
8c26f20188 | ||
|
|
2c52a7d9a8 | ||
|
|
b006510035 | ||
|
|
8e48a3a523 | ||
|
|
b26c8f6b62 | ||
|
|
68d7532d01 | ||
|
|
e9cf1475ca | ||
|
|
1b3ae50076 | ||
|
|
7791c5f5ba | ||
|
|
19abd24c91 | ||
|
|
b2ae68850c | ||
|
|
1a6ae6e107 | ||
|
|
86c7fd3b5d | ||
|
|
46ad3fa7b1 | ||
|
|
060585434a | ||
|
|
2657779eda | ||
|
|
ac890b8cda | ||
|
|
b6d8f9c6f6 | ||
|
|
5583af2a58 | ||
|
|
2ee6713050 | ||
|
|
b28409c1c7 | ||
|
|
ac5dec272b | ||
|
|
33b19ebe1f | ||
|
|
5d3e39beac | ||
|
|
bed63b3690 | ||
|
|
43ef4183df | ||
|
|
74869494f9 | ||
|
|
bd4337976e | ||
|
|
50079c0441 | ||
|
|
f3173dbe26 | ||
|
|
a1dd5a4e19 | ||
|
|
52e86cf0c3 | ||
|
|
3d9a47f0d9 | ||
|
|
5135b8a969 | ||
|
|
8a04c22b2b | ||
|
|
f7842cf283 | ||
|
|
827ad0fa75 | ||
|
|
602ef9750f | ||
|
|
8fb65b40de | ||
|
|
a7cda95803 | ||
|
|
bb33ed6415 | ||
|
|
358ad05e51 | ||
|
|
74c84bd7df | ||
|
|
62e1d5fdd2 | ||
|
|
b8beb1c64e | ||
|
|
14d86ef5d3 | ||
|
|
9ab7752d32 | ||
|
|
3a4f56bb2b | ||
|
|
8f1c20423b | ||
|
|
6fd5f9c6d8 | ||
|
|
6b187946fb | ||
|
|
d54f633a7b | ||
|
|
d0571c2cab | ||
|
|
32ee9838af | ||
|
|
c41068edc4 | ||
|
|
f2548c5e66 | ||
|
|
66a52655df | ||
|
|
32dbe3f86a | ||
|
|
c6ae7d84a2 | ||
|
|
7d384262e4 | ||
|
|
64debd7230 | ||
|
|
d39cfd1778 | ||
|
|
2e0edcbabd | ||
|
|
2650cbfc87 | ||
|
|
df72a01f27 | ||
|
|
7cf2bc2410 | ||
|
|
a63a204a21 | ||
|
|
928de6127b | ||
|
|
85eca47a93 | ||
|
|
99c8c4bf2b | ||
|
|
50d8eb30e1 | ||
|
|
b1d9b14ab1 | ||
|
|
56b3d6c79b | ||
|
|
9e528ea898 | ||
|
|
e09684462c | ||
|
|
22c4b28917 | ||
|
|
9d0a8d2047 | ||
|
|
023fbc931d | ||
|
|
c3ae700888 | ||
|
|
ee6445d620 | ||
|
|
8fb7cb6e82 | ||
|
|
b55212368b | ||
|
|
649d854225 | ||
|
|
fb1d918c2d | ||
|
|
e8d93c99a6 | ||
|
|
b54ec6b9c8 | ||
|
|
3acb474b19 | ||
|
|
2e0d381f8f | ||
|
|
7eb483d810 | ||
|
|
7b570b59c6 | ||
|
|
09f9204917 | ||
|
|
2a8e6ecba1 | ||
|
|
abb221d942 | ||
|
|
c8fdf46dda | ||
|
|
a9226fc25f | ||
|
|
67eba3cf5c | ||
|
|
20b8cdfb3d | ||
|
|
1dcb7591c5 | ||
|
|
bca9735534 | ||
|
|
f95576764d | ||
|
|
92a600aaa9 | ||
|
|
5cf7cc21c8 | ||
|
|
499fd7b2f1 | ||
|
|
0593ac197c | ||
|
|
4fc0d220cc | ||
|
|
d309acfddb | ||
|
|
2196089216 | ||
|
|
b6bf68427a | ||
|
|
502decf8fe | ||
|
|
20347420ca | ||
|
|
b29a9cd86e | ||
|
|
2d15d13359 | ||
|
|
31f5d13a69 | ||
|
|
67753b790c | ||
|
|
661a54d356 | ||
|
|
970a714291 | ||
|
|
dc206c9ad6 | ||
|
|
658bdddac3 | ||
|
|
9f5f86c6a7 | ||
|
|
5e37882267 | ||
|
|
0fc0106cc7 | ||
|
|
bcdb590a29 | ||
|
|
6b46c7db8f | ||
|
|
8c5bcffd42 | ||
|
|
aad185e785 | ||
|
|
2ff22bd681 | ||
|
|
2934fabd98 | ||
|
|
2359231bda | ||
|
|
ca5f27aa9e | ||
|
|
80adcaab81 | ||
|
|
8100fc1cfb | ||
|
|
006797014c | ||
|
|
38934dc8d0 | ||
|
|
0ec6d652f7 | ||
|
|
1525c6d97e | ||
|
|
bf1769af6c | ||
|
|
6384e638f5 | ||
|
|
1df5e55a4e | ||
|
|
d1005f91e7 | ||
|
|
d9451ac12c | ||
|
|
a82304765d | ||
|
|
cc3f2e0819 | ||
|
|
03c07c0843 | ||
|
|
1b94b616f0 | ||
|
|
98c5cb1c4c | ||
|
|
ce5a85a53b | ||
|
|
046385d72e | ||
|
|
7eba55fbde | ||
|
|
6ac51b7b13 | ||
|
|
97cc467ae1 | ||
|
|
3312ebcb05 | ||
|
|
4d06ae48d3 | ||
|
|
cf75ea91a1 | ||
|
|
5e34f6582b | ||
|
|
60008dbd74 | ||
|
|
4afd0672a1 | ||
|
|
875a1c0b5f | ||
|
|
f5d7ca6913 | ||
|
|
df8a66e504 | ||
|
|
43a0a15f6f | ||
|
|
36ed890c14 | ||
|
|
0e8e5f65e1 | ||
|
|
6399ec59c9 | ||
|
|
e44c73883e | ||
|
|
c3406748de | ||
|
|
6d70651611 | ||
|
|
5e13da62a4 | ||
|
|
658e5f0fc8 | ||
|
|
cebd918e49 | ||
|
|
a8c4e92804 | ||
|
|
dea71d2682 | ||
|
|
ed568f569c | ||
|
|
13c05c68fc | ||
|
|
2be7f853f3 | ||
|
|
3b5681465a | ||
|
|
a6c7502217 | ||
|
|
50a87843ee | ||
|
|
238b6cbb61 | ||
|
|
3a7bf6a8ac | ||
|
|
6e64aa81fd | ||
|
|
445612315b | ||
|
|
bb276a8fcb | ||
|
|
b7b0bdaeca | ||
|
|
cc1a97b6d8 | ||
|
|
c6227797b4 | ||
|
|
b383144b69 | ||
|
|
d1bc013da9 | ||
|
|
723f581fd0 | ||
|
|
d2c345a374 | ||
|
|
22677029e1 | ||
|
|
0943f989ce | ||
|
|
c1698fff8e | ||
|
|
200028b269 | ||
|
|
0cae612159 | ||
|
|
5584f6a98b | ||
|
|
e5acf93c66 | ||
|
|
01b4b47087 | ||
|
|
14e9923037 | ||
|
|
7e47a924c5 | ||
|
|
635aa9fd56 | ||
|
|
2e4eb1885f | ||
|
|
0ce70c08bd | ||
|
|
e3c9a42741 | ||
|
|
d5d45e644d | ||
|
|
09684e2c41 | ||
|
|
04622d5786 | ||
|
|
8c9544e5ed | ||
|
|
ca043d9bfd | ||
|
|
711937b104 | ||
|
|
a9a2c1fa7b | ||
|
|
ec8a452f1d | ||
|
|
07def62373 | ||
|
|
30352e375f | ||
|
|
6abe9d5c0f | ||
|
|
9b04e93765 | ||
|
|
728fb1aaef | ||
|
|
2596ad26b9 | ||
|
|
ef3b1ee195 | ||
|
|
b1a33869dc | ||
|
|
9f04fbe4a4 | ||
|
|
1ece764547 | ||
|
|
2358e306c1 | ||
|
|
82b9f8ebb0 | ||
|
|
43ca4526b1 | ||
|
|
4174fc22b0 | ||
|
|
16e135249c | ||
|
|
889dae357b | ||
|
|
0063668582 | ||
|
|
1e4cd9ea8f | ||
|
|
954ccccbc5 | ||
|
|
6f43875e80 | ||
|
|
80cccab919 | ||
|
|
088673ceb0 | ||
|
|
4f13255f35 | ||
|
|
8f36e21c97 | ||
|
|
6a11281355 | ||
|
|
df4e4f80ad | ||
|
|
5682fb1503 | ||
|
|
640d2a2797 | ||
|
|
b173880766 | ||
|
|
30ce85b80a | ||
|
|
003ec64413 | ||
|
|
eda6d729d6 | ||
|
|
4f83d44142 | ||
|
|
7452eb2fa1 | ||
|
|
8300f7f51b | ||
|
|
eed94b641e | ||
|
|
0138e92ddc | ||
|
|
456ef49ee3 | ||
|
|
b91dee68ac | ||
|
|
781d36ef83 | ||
|
|
a1cef744a7 | ||
|
|
ba5319f479 | ||
|
|
0dbf21a15c | ||
|
|
45d522829a | ||
|
|
8b1c358dc6 | ||
|
|
ebd9d3dc67 | ||
|
|
80cf154fb7 | ||
|
|
9add96a0d3 | ||
|
|
ed2ad1e210 | ||
|
|
808ed74700 | ||
|
|
9bebf3217e | ||
|
|
ae7d26fab0 | ||
|
|
0f54d30f2c | ||
|
|
631d3515f2 | ||
|
|
551218fd44 | ||
|
|
4af54517d2 | ||
|
|
334f571ad3 | ||
|
|
295afa805c | ||
|
|
ad4d286db5 | ||
|
|
eea97c8928 | ||
|
|
c29275315e | ||
|
|
ef89195e6c | ||
|
|
06ff26752a | ||
|
|
58f5e1882e | ||
|
|
2765367308 | ||
|
|
2a94611801 | ||
|
|
e4eda3ef0d | ||
|
|
fbf6315a8c | ||
|
|
8a3c10686e | ||
|
|
c121565209 | ||
|
|
3c3e659042 | ||
|
|
406cb07018 | ||
|
|
099a82fdf8 | ||
|
|
52b88d839e | ||
|
|
e245e50ee4 | ||
|
|
a52c0415d9 | ||
|
|
98c7df3399 | ||
|
|
570283fba2 | ||
|
|
1bf2a455c6 | ||
|
|
cb222aaa40 | ||
|
|
6c9fc4a592 | ||
|
|
53a6341320 | ||
|
|
c0f9ee5e6e | ||
|
|
3321f3e34d | ||
|
|
953b6679ef | ||
|
|
d285261697 | ||
|
|
d84b58c857 | ||
|
|
4d3cacf87f | ||
|
|
fcba02cd86 | ||
|
|
205dc93e65 | ||
|
|
28a29293c7 | ||
|
|
ed78978b5f | ||
|
|
6e1457607e | ||
|
|
b4d54895ff | ||
|
|
5df384edd6 | ||
|
|
dce3795e0c | ||
|
|
1a619de91f |
@@ -83,12 +83,10 @@ If you're not using Docker for Mac, or Docker for Windows, you may need, or choo
|
||||
(host)$ pip install docker-compose
|
||||
```
|
||||
|
||||
#### Node and npm
|
||||
#### Frontend Development
|
||||
|
||||
The AWX UI requires the following:
|
||||
See [the ui development documentation](awx/ui/README.md).
|
||||
|
||||
- Node 8.x LTS
|
||||
- NPM 6.x LTS
|
||||
|
||||
### Build the environment
|
||||
|
||||
|
||||
21
INSTALL.md
21
INSTALL.md
@@ -27,7 +27,7 @@ This document provides a guide for installing AWX.
|
||||
- [Start the build](#start-the-build-1)
|
||||
- [Accessing AWX](#accessing-awx-1)
|
||||
- [SSL Termination](#ssl-termination)
|
||||
- [Docker or Docker Compose](#docker-or-docker-compose)
|
||||
- [Docker Compose](#docker-compose)
|
||||
- [Prerequisites](#prerequisites-3)
|
||||
- [Pre-build steps](#pre-build-steps-2)
|
||||
- [Deploying to a remote host](#deploying-to-a-remote-host)
|
||||
@@ -73,7 +73,7 @@ The system that runs the AWX service will need to satisfy the following requirem
|
||||
- At least 2 cpu cores
|
||||
- At least 20GB of space
|
||||
- Running Docker, Openshift, or Kubernetes
|
||||
- If you choose to use an external PostgreSQL database, please note that the minimum version is 9.4.
|
||||
- If you choose to use an external PostgreSQL database, please note that the minimum version is 9.6+.
|
||||
|
||||
### AWX Tunables
|
||||
|
||||
@@ -81,14 +81,14 @@ The system that runs the AWX service will need to satisfy the following requirem
|
||||
|
||||
### Choose a deployment platform
|
||||
|
||||
We currently support running AWX as a containerized application using Docker images deployed to either an OpenShift cluster, docker-compose or a standalone Docker daemon. The remainder of this document will walk you through the process of building the images, and deploying them to either platform.
|
||||
We currently support running AWX as a containerized application using Docker images deployed to either an OpenShift cluster or docker-compose. The remainder of this document will walk you through the process of building the images, and deploying them to either platform.
|
||||
|
||||
The [installer](./installer) directory contains an [inventory](./installer/inventory) file, and a playbook, [install.yml](./installer/install.yml). You'll begin by setting variables in the inventory file according to the platform you wish to use, and then you'll start the image build and deployment process by running the playbook.
|
||||
|
||||
In the sections below, you'll find deployment details and instructions for each platform:
|
||||
- [OpenShift](#openshift)
|
||||
- [Kubernetes](#kubernetes)
|
||||
- [Docker or Docker Compose](#docker-or-docker-compose).
|
||||
- [Docker Compose](#docker-compose).
|
||||
|
||||
### Official vs Building Images
|
||||
|
||||
@@ -391,14 +391,13 @@ If your provider is able to allocate an IP Address from the Ingress controller t
|
||||
Unlike Openshift's `Route` the Kubernetes `Ingress` doesn't yet handle SSL termination. As such the default configuration will only expose AWX through HTTP on port 80. You are responsible for configuring SSL support until support is added (either to Kubernetes or AWX itself).
|
||||
|
||||
|
||||
## Docker or Docker-Compose
|
||||
## Docker-Compose
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/) on the host where AWX will be deployed. After installing Docker, the Docker service must be started (depending on your OS, you may have to add the local user that uses Docker to the ``docker`` group, refer to the documentation for details)
|
||||
- [docker-py](https://github.com/docker/docker-py) Python module.
|
||||
|
||||
If you're installing using Docker Compose, you'll need [Docker Compose](https://docs.docker.com/compose/install/).
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/).
|
||||
|
||||
### Pre-build steps
|
||||
|
||||
@@ -441,13 +440,13 @@ Before starting the build process, review the [inventory](./installer/inventory)
|
||||
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container. Defaults to *80*.
|
||||
|
||||
*use_docker_compose*
|
||||
*ssl_certificate*
|
||||
|
||||
> Switch to ``true`` to use Docker Compose instead of the standalone Docker install.
|
||||
> Optionally, provide the path to a file that contains a certificate and its private key.
|
||||
|
||||
*docker_compose_dir*
|
||||
|
||||
When using docker-compose, the `docker-compose.yml` file will be created there (default `/var/lib/awx`).
|
||||
When using docker-compose, the `docker-compose.yml` file will be created there (default `/tmp/awxcompose`).
|
||||
|
||||
*ca_trust_dir*
|
||||
|
||||
@@ -527,7 +526,7 @@ After the playbook run completes, Docker will report up to 5 running containers.
|
||||
```bash
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
e240ed8209cd awx_task:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 8052/tcp awx_task
|
||||
1cfd02601690 awx_web:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 0.0.0.0:80->8052/tcp awx_web
|
||||
1cfd02601690 awx_web:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 0.0.0.0:443->8052/tcp awx_web
|
||||
55a552142bcd memcached:alpine "docker-entrypoint..." 2 minutes ago Up 2 minutes 11211/tcp memcached
|
||||
84011c072aad rabbitmq:3 "docker-entrypoint..." 2 minutes ago Up 2 minutes 4369/tcp, 5671-5672/tcp, 25672/tcp rabbitmq
|
||||
97e196120ab3 postgres:9.6 "docker-entrypoint..." 2 minutes ago Up 2 minutes 5432/tcp postgres
|
||||
|
||||
26
Makefile
26
Makefile
@@ -134,8 +134,8 @@ virtualenv_ansible_py3:
|
||||
if [ ! -d "$(VENV_BASE)" ]; then \
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/ansible3" ]; then \
|
||||
python3 -m venv --system-site-packages $(VENV_BASE)/ansible3; \
|
||||
if [ ! -d "$(VENV_BASE)/ansible" ]; then \
|
||||
$(PYTHON) -m venv --system-site-packages $(VENV_BASE)/ansible; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
@@ -145,7 +145,8 @@ virtualenv_awx:
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/awx" ]; then \
|
||||
$(PYTHON) -m venv $(VENV_BASE)/awx; \
|
||||
$(PYTHON) -m venv --system-site-packages $(VENV_BASE)/awx; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --ignore-installed docutils==0.14; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
@@ -158,22 +159,18 @@ requirements_ansible: virtualenv_ansible
|
||||
$(VENV_BASE)/ansible/bin/pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
|
||||
requirements_ansible_py3: virtualenv_ansible_py3
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible3/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin
|
||||
$(VENV_BASE)/ansible3/bin/pip3 install ansible # can't inherit from system ansible, it's py2
|
||||
$(VENV_BASE)/ansible3/bin/pip3 uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --ignore-installed -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip3 uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
|
||||
requirements_ansible_dev:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
$(VENV_BASE)/ansible/bin/pip install pytest mock; \
|
||||
fi
|
||||
|
||||
requirements_isolated:
|
||||
if [ ! -d "$(VENV_BASE)/awx" ]; then \
|
||||
$(PYTHON) -m venv $(VENV_BASE)/awx; \
|
||||
fi;
|
||||
echo "include-system-site-packages = true" >> $(VENV_BASE)/awx/lib/python$(PYTHON_VERSION)/pyvenv.cfg
|
||||
$(VENV_BASE)/awx/bin/pip install -r requirements/requirements_isolated.txt
|
||||
|
||||
# Install third-party requirements needed for AWX's environment.
|
||||
requirements_awx: virtualenv_awx
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
@@ -189,7 +186,7 @@ requirements_awx_dev:
|
||||
|
||||
requirements: requirements_ansible requirements_awx
|
||||
|
||||
requirements_dev: requirements requirements_ansible_py3 requirements_awx_dev requirements_ansible_dev
|
||||
requirements_dev: requirements requirements_awx_dev requirements_ansible_dev
|
||||
|
||||
requirements_test: requirements
|
||||
|
||||
@@ -569,7 +566,6 @@ docker-isolated:
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml create
|
||||
docker start tools_awx_1
|
||||
docker start tools_isolated_1
|
||||
echo "__version__ = '`git describe --long | cut -d - -f 1-1`'" | docker exec -i tools_isolated_1 /bin/bash -c "cat > /venv/awx/lib/python$(PYTHON_VERSION)/site-packages/awx.py"
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml up
|
||||
|
||||
# Docker Compose Development environment
|
||||
|
||||
@@ -101,6 +101,10 @@ class DeprecatedCredentialField(serializers.IntegerField):
|
||||
super(DeprecatedCredentialField, self).__init__(**kwargs)
|
||||
|
||||
def to_internal_value(self, pk):
|
||||
try:
|
||||
pk = int(pk)
|
||||
except ValueError:
|
||||
self.fail('invalid')
|
||||
try:
|
||||
Credential.objects.get(pk=pk)
|
||||
except ObjectDoesNotExist:
|
||||
|
||||
@@ -33,9 +33,17 @@ from rest_framework.negotiation import DefaultContentNegotiation
|
||||
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.models import (
|
||||
UnifiedJob, UnifiedJobTemplate, User, Role
|
||||
)
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.utils import * # noqa
|
||||
from awx.main.utils import (
|
||||
camelcase_to_underscore,
|
||||
get_search_fields,
|
||||
getattrd,
|
||||
get_object_or_400,
|
||||
decrypt_field
|
||||
)
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
|
||||
from awx.api.versioning import URLPathVersioning, get_request_version
|
||||
@@ -92,6 +100,8 @@ class LoggedLoginView(auth_views.LoginView):
|
||||
|
||||
return ret
|
||||
else:
|
||||
if 'username' in self.request.POST:
|
||||
logger.warn(smart_text(u"Login failed for user {} from {}".format(self.request.POST.get('username'),request.META.get('REMOTE_ADDR', None))))
|
||||
ret.status_code = 401
|
||||
return ret
|
||||
|
||||
|
||||
@@ -238,12 +238,10 @@ class JobTypeMetadata(Metadata):
|
||||
res = super(JobTypeMetadata, self).get_field_info(field)
|
||||
|
||||
if field.field_name == 'job_type':
|
||||
index = 0
|
||||
for choice in res['choices']:
|
||||
if choice[0] == 'scan':
|
||||
res['choices'].pop(index)
|
||||
break
|
||||
index += 1
|
||||
res['choices'] = [
|
||||
choice for choice in res['choices']
|
||||
if choice[0] != 'scan'
|
||||
]
|
||||
return res
|
||||
|
||||
|
||||
@@ -253,7 +251,7 @@ class SublistAttachDetatchMetadata(Metadata):
|
||||
actions = super(SublistAttachDetatchMetadata, self).determine_actions(request, view)
|
||||
method = 'POST'
|
||||
if method in actions:
|
||||
for field in actions[method]:
|
||||
for field in list(actions[method].keys()):
|
||||
if field == 'id':
|
||||
continue
|
||||
actions[method].pop(field)
|
||||
|
||||
@@ -9,8 +9,8 @@ from rest_framework.exceptions import MethodNotAllowed, PermissionDenied
|
||||
from rest_framework import permissions
|
||||
|
||||
# AWX
|
||||
from awx.main.access import * # noqa
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.access import check_user_access
|
||||
from awx.main.models import Inventory, UnifiedJob
|
||||
from awx.main.utils import get_object_or_400
|
||||
|
||||
logger = logging.getLogger('awx.api.permissions')
|
||||
|
||||
@@ -45,8 +45,22 @@ from awx.main.constants import (
|
||||
ACTIVE_STATES,
|
||||
CENSOR_VALUE,
|
||||
)
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.models.base import NEW_JOB_TYPE_CHOICES
|
||||
from awx.main.models import (
|
||||
ActivityStream, AdHocCommand, AdHocCommandEvent, Credential,
|
||||
CredentialType, CustomInventoryScript, Fact, Group, Host, Instance,
|
||||
InstanceGroup, Inventory, InventorySource, InventoryUpdate,
|
||||
InventoryUpdateEvent, Job, JobEvent, JobHostSummary, JobLaunchConfig,
|
||||
JobTemplate, Label, Notification, NotificationTemplate, OAuth2AccessToken,
|
||||
OAuth2Application, Organization, Project, ProjectUpdate,
|
||||
ProjectUpdateEvent, RefreshToken, Role, Schedule, SystemJob,
|
||||
SystemJobEvent, SystemJobTemplate, Team, UnifiedJob, UnifiedJobTemplate,
|
||||
UserSessionMembership, V1Credential, WorkflowJob, WorkflowJobNode,
|
||||
WorkflowJobTemplate, WorkflowJobTemplateNode, StdoutMaxBytesExceeded
|
||||
)
|
||||
from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES
|
||||
from awx.main.models.rbac import (
|
||||
get_roles_on_resource, role_summary_fields_generator
|
||||
)
|
||||
from awx.main.fields import ImplicitRoleField, JSONBField
|
||||
from awx.main.utils import (
|
||||
get_type_for_model, get_model_for_type, timestamp_apiformat,
|
||||
@@ -879,7 +893,7 @@ class UserSerializer(BaseSerializer):
|
||||
model = User
|
||||
fields = ('*', '-name', '-description', '-modified',
|
||||
'username', 'first_name', 'last_name',
|
||||
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'external_account')
|
||||
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'last_login', 'external_account')
|
||||
|
||||
def to_representation(self, obj): # TODO: Remove in 3.3
|
||||
ret = super(UserSerializer, self).to_representation(obj)
|
||||
@@ -1215,7 +1229,7 @@ class OrganizationSerializer(BaseSerializer):
|
||||
|
||||
class Meta:
|
||||
model = Organization
|
||||
fields = ('*', 'custom_virtualenv',)
|
||||
fields = ('*', 'max_hosts', 'custom_virtualenv',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(OrganizationSerializer, self).get_related(obj)
|
||||
@@ -1251,6 +1265,20 @@ class OrganizationSerializer(BaseSerializer):
|
||||
summary_dict['related_field_counts'] = counts_dict[obj.id]
|
||||
return summary_dict
|
||||
|
||||
def validate(self, attrs):
|
||||
obj = self.instance
|
||||
view = self.context['view']
|
||||
|
||||
obj_limit = getattr(obj, 'max_hosts', None)
|
||||
api_limit = attrs.get('max_hosts')
|
||||
|
||||
if not view.request.user.is_superuser:
|
||||
if api_limit is not None and api_limit != obj_limit:
|
||||
# Only allow superusers to edit the max_hosts field
|
||||
raise serializers.ValidationError(_('Cannot change max_hosts.'))
|
||||
|
||||
return super(OrganizationSerializer, self).validate(attrs)
|
||||
|
||||
|
||||
class ProjectOptionsSerializer(BaseSerializer):
|
||||
|
||||
@@ -2188,8 +2216,8 @@ class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSeri
|
||||
|
||||
class Meta:
|
||||
model = InventoryUpdate
|
||||
fields = ('*', 'inventory', 'inventory_source', 'license_error', 'source_project_update',
|
||||
'custom_virtualenv', '-controller_node',)
|
||||
fields = ('*', 'inventory', 'inventory_source', 'license_error', 'org_host_limit_error',
|
||||
'source_project_update', 'custom_virtualenv', '-controller_node',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventoryUpdateSerializer, self).get_related(obj)
|
||||
@@ -3993,7 +4021,7 @@ class WorkflowJobTemplateNodeDetailSerializer(WorkflowJobTemplateNodeSerializer)
|
||||
Influence the api browser sample data to not include workflow_job_template
|
||||
when editing a WorkflowNode.
|
||||
|
||||
Note: I was not able to accomplish this trough the use of extra_kwargs.
|
||||
Note: I was not able to accomplish this through the use of extra_kwargs.
|
||||
Maybe something to do with workflow_job_template being a relational field?
|
||||
'''
|
||||
def build_relational_field(self, field_name, relation_info):
|
||||
@@ -4024,7 +4052,8 @@ class JobHostSummarySerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = JobHostSummary
|
||||
fields = ('*', '-name', '-description', 'job', 'host', 'host_name', 'changed',
|
||||
'dark', 'failures', 'ok', 'processed', 'skipped', 'failed')
|
||||
'dark', 'failures', 'ok', 'processed', 'skipped', 'failed',
|
||||
'ignored', 'rescued')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobHostSummarySerializer, self).get_related(obj)
|
||||
@@ -4481,8 +4510,8 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
||||
fields = ('ask_inventory_on_launch', 'can_start_without_user_input', 'defaults', 'extra_vars',
|
||||
'inventory', 'survey_enabled', 'variables_needed_to_start',
|
||||
'node_templates_missing', 'node_prompts_rejected',
|
||||
'workflow_job_template_data', 'survey_enabled')
|
||||
read_only_fields = ('ask_inventory_on_launch',)
|
||||
'workflow_job_template_data', 'survey_enabled', 'ask_variables_on_launch')
|
||||
read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')
|
||||
|
||||
def get_survey_enabled(self, obj):
|
||||
if obj:
|
||||
@@ -4589,12 +4618,15 @@ class NotificationTemplateSerializer(BaseSerializer):
|
||||
object_actual = self.context['view'].get_object()
|
||||
else:
|
||||
object_actual = None
|
||||
for field in notification_class.init_parameters:
|
||||
for field, params in notification_class.init_parameters.items():
|
||||
if field not in attrs['notification_configuration']:
|
||||
missing_fields.append(field)
|
||||
continue
|
||||
if 'default' in params:
|
||||
attrs['notification_configuration'][field] = params['default']
|
||||
else:
|
||||
missing_fields.append(field)
|
||||
continue
|
||||
field_val = attrs['notification_configuration'][field]
|
||||
field_type = notification_class.init_parameters[field]['type']
|
||||
field_type = params['type']
|
||||
expected_types = self.type_map[field_type]
|
||||
if not type(field_val) in expected_types:
|
||||
incorrect_type_fields.append((field, field_type))
|
||||
@@ -5041,6 +5073,17 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
if fval is not None:
|
||||
job_template_item[field] = fval
|
||||
summary_fields['job_template'].append(job_template_item)
|
||||
if fk == 'workflow_job_template_node':
|
||||
summary_fields['workflow_job_template'] = []
|
||||
workflow_job_template_item = {}
|
||||
workflow_job_template_fields = SUMMARIZABLE_FK_FIELDS['workflow_job_template']
|
||||
workflow_job_template = getattr(thisItem, 'workflow_job_template', None)
|
||||
if workflow_job_template is not None:
|
||||
for field in workflow_job_template_fields:
|
||||
fval = getattr(workflow_job_template, field, None)
|
||||
if fval is not None:
|
||||
workflow_job_template_item[field] = fval
|
||||
summary_fields['workflow_job_template'].append(workflow_job_template_item)
|
||||
if fk == 'schedule':
|
||||
unified_job_template = getattr(thisItem, 'unified_job_template', None)
|
||||
if unified_job_template is not None:
|
||||
|
||||
@@ -13,6 +13,17 @@ from rest_framework.views import APIView
|
||||
from rest_framework_swagger import renderers
|
||||
|
||||
|
||||
class SuperUserSchemaGenerator(SchemaGenerator):
|
||||
|
||||
def has_view_permissions(self, path, method, view):
|
||||
#
|
||||
# Generate the Swagger schema as if you were a superuser and
|
||||
# permissions didn't matter; this short-circuits the schema path
|
||||
# discovery to include _all_ potential paths in the API.
|
||||
#
|
||||
return True
|
||||
|
||||
|
||||
class AutoSchema(DRFAuthSchema):
|
||||
|
||||
def get_link(self, path, method, base_url):
|
||||
@@ -59,7 +70,7 @@ class SwaggerSchemaView(APIView):
|
||||
]
|
||||
|
||||
def get(self, request):
|
||||
generator = SchemaGenerator(
|
||||
generator = SuperUserSchemaGenerator(
|
||||
title='Ansible Tower API',
|
||||
patterns=None,
|
||||
urlconf=None
|
||||
|
||||
@@ -56,6 +56,7 @@ For example:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-d "grant_type=password&username=<username>&password=<password>&scope=read" \
|
||||
-u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569e
|
||||
IaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \
|
||||
@@ -85,6 +86,7 @@ format:
|
||||
The `/api/o/token/` endpoint is used for refreshing access token:
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-d "grant_type=refresh_token&refresh_token=AL0NK9TTpv0qp54dGbC4VUZtsZ9r8z" \
|
||||
-u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569eIaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \
|
||||
http://localhost:8013/api/o/token/ -i
|
||||
@@ -114,6 +116,7 @@ Revoking is done by POSTing to `/api/o/revoke_token/` with the token to revoke a
|
||||
|
||||
```bash
|
||||
curl -X POST -d "token=rQONsve372fQwuc2pn76k3IHDCYpi7" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569eIaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \
|
||||
http://localhost:8013/api/o/revoke_token/ -i
|
||||
```
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,5 @@
|
||||
# Python
|
||||
import os
|
||||
import logging
|
||||
import urllib.parse as urlparse
|
||||
from collections import OrderedDict
|
||||
@@ -8,7 +9,10 @@ from django.core.validators import URLValidator
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import * # noqa
|
||||
from rest_framework.fields import ( # noqa
|
||||
BooleanField, CharField, ChoiceField, DictField, EmailField, IntegerField,
|
||||
ListField, NullBooleanField
|
||||
)
|
||||
|
||||
logger = logging.getLogger('awx.conf.fields')
|
||||
|
||||
@@ -93,6 +97,26 @@ class StringListBooleanField(ListField):
|
||||
self.fail('type_error', input_type=type(data))
|
||||
|
||||
|
||||
class StringListPathField(StringListField):
|
||||
|
||||
default_error_messages = {
|
||||
'type_error': _('Expected list of strings but got {input_type} instead.'),
|
||||
'path_error': _('{path} is not a valid path choice.'),
|
||||
}
|
||||
|
||||
def to_internal_value(self, paths):
|
||||
if isinstance(paths, (list, tuple)):
|
||||
for p in paths:
|
||||
if not isinstance(p, str):
|
||||
self.fail('type_error', input_type=type(p))
|
||||
if not os.path.exists(p):
|
||||
self.fail('path_error', path=p)
|
||||
|
||||
return super(StringListPathField, self).to_internal_value(sorted({os.path.normpath(path) for path in paths}))
|
||||
else:
|
||||
self.fail('type_error', input_type=type(paths))
|
||||
|
||||
|
||||
class URLField(CharField):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
|
||||
from rest_framework.fields import ValidationError
|
||||
from awx.conf.fields import StringListBooleanField, ListTuplesField
|
||||
from awx.conf.fields import StringListBooleanField, StringListPathField, ListTuplesField
|
||||
|
||||
|
||||
class TestStringListBooleanField():
|
||||
@@ -84,3 +84,49 @@ class TestListTuplesField():
|
||||
assert e.value.detail[0] == "Expected a list of tuples of max length 2 " \
|
||||
"but got {} instead.".format(t)
|
||||
|
||||
|
||||
class TestStringListPathField():
|
||||
|
||||
FIELD_VALUES = [
|
||||
((".", "..", "/"), [".", "..", "/"]),
|
||||
(("/home",), ["/home"]),
|
||||
(("///home///",), ["/home"]),
|
||||
(("/home/././././",), ["/home"]),
|
||||
(("/home", "/home", "/home/"), ["/home"]),
|
||||
(["/home/", "/home/", "/opt/", "/opt/", "/var/"], ["/home", "/opt", "/var"])
|
||||
]
|
||||
|
||||
FIELD_VALUES_INVALID_TYPE = [
|
||||
1.245,
|
||||
{"a": "b"},
|
||||
("/home"),
|
||||
]
|
||||
|
||||
FIELD_VALUES_INVALID_PATH = [
|
||||
"",
|
||||
"~/",
|
||||
"home",
|
||||
"/invalid_path",
|
||||
"/home/invalid_path",
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize("value_in, value_known", FIELD_VALUES)
|
||||
def test_to_internal_value_valid(self, value_in, value_known):
|
||||
field = StringListPathField()
|
||||
v = field.to_internal_value(value_in)
|
||||
assert v == value_known
|
||||
|
||||
@pytest.mark.parametrize("value", FIELD_VALUES_INVALID_TYPE)
|
||||
def test_to_internal_value_invalid_type(self, value):
|
||||
field = StringListPathField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_internal_value(value)
|
||||
assert e.value.detail[0] == "Expected list of strings but got {} instead.".format(type(value))
|
||||
|
||||
@pytest.mark.parametrize("value", FIELD_VALUES_INVALID_PATH)
|
||||
def test_to_internal_value_invalid_path(self, value):
|
||||
field = StringListPathField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_internal_value([value])
|
||||
assert e.value.detail[0] == "{} is not a valid path choice.".format(value)
|
||||
|
||||
|
||||
@@ -1,108 +1,9 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Python
|
||||
import difflib
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# AWX
|
||||
from awx.conf.registry import settings_registry
|
||||
|
||||
__all__ = ['comment_assignments', 'conf_to_dict']
|
||||
|
||||
|
||||
def comment_assignments(patterns, assignment_names, dry_run=True, backup_suffix='.old'):
|
||||
if isinstance(patterns, str):
|
||||
patterns = [patterns]
|
||||
diffs = []
|
||||
for pattern in patterns:
|
||||
for filename in sorted(glob.glob(pattern)):
|
||||
filename = os.path.abspath(os.path.normpath(filename))
|
||||
if backup_suffix:
|
||||
backup_filename = '{}{}'.format(filename, backup_suffix)
|
||||
else:
|
||||
backup_filename = None
|
||||
diff = comment_assignments_in_file(filename, assignment_names, dry_run, backup_filename)
|
||||
if diff:
|
||||
diffs.append(diff)
|
||||
return diffs
|
||||
|
||||
|
||||
def comment_assignments_in_file(filename, assignment_names, dry_run=True, backup_filename=None):
|
||||
from redbaron import RedBaron, indent
|
||||
|
||||
if isinstance(assignment_names, str):
|
||||
assignment_names = [assignment_names]
|
||||
else:
|
||||
assignment_names = assignment_names[:]
|
||||
current_file_data = open(filename).read()
|
||||
|
||||
for assignment_name in assignment_names[:]:
|
||||
if assignment_name in current_file_data:
|
||||
continue
|
||||
if assignment_name in assignment_names:
|
||||
assignment_names.remove(assignment_name)
|
||||
if not assignment_names:
|
||||
return ''
|
||||
|
||||
replace_lines = {}
|
||||
rb = RedBaron(current_file_data)
|
||||
for assignment_node in rb.find_all('assignment'):
|
||||
for assignment_name in assignment_names:
|
||||
|
||||
# Only target direct assignments to a variable.
|
||||
name_node = assignment_node.find('name', value=assignment_name)
|
||||
if not name_node:
|
||||
continue
|
||||
if assignment_node.target.type != 'name':
|
||||
continue
|
||||
|
||||
# Build a new node that comments out the existing assignment node.
|
||||
indentation = '{}# '.format(assignment_node.indentation or '')
|
||||
new_node_content = indent(assignment_node.dumps(), indentation)
|
||||
new_node_lines = new_node_content.splitlines()
|
||||
# Add a pass statement in case the assignment block is the only
|
||||
# child in a parent code block to prevent a syntax error.
|
||||
if assignment_node.indentation:
|
||||
new_node_lines[0] = new_node_lines[0].replace(indentation, '{}pass # '.format(assignment_node.indentation or ''), 1)
|
||||
new_node_lines[0] = '{0}This setting is now configured via the Tower API.\n{1}'.format(indentation, new_node_lines[0])
|
||||
|
||||
# Store new node lines in dictionary to be replaced in file.
|
||||
start_lineno = assignment_node.absolute_bounding_box.top_left.line
|
||||
end_lineno = assignment_node.absolute_bounding_box.bottom_right.line
|
||||
for n, new_node_line in enumerate(new_node_lines):
|
||||
new_lineno = start_lineno + n
|
||||
assert new_lineno <= end_lineno
|
||||
replace_lines[new_lineno] = new_node_line
|
||||
|
||||
if not replace_lines:
|
||||
return ''
|
||||
|
||||
# Iterate through all lines in current file and replace as needed.
|
||||
current_file_lines = current_file_data.splitlines()
|
||||
new_file_lines = []
|
||||
for n, line in enumerate(current_file_lines):
|
||||
new_file_lines.append(replace_lines.get(n + 1, line))
|
||||
new_file_data = '\n'.join(new_file_lines)
|
||||
new_file_lines = new_file_data.splitlines()
|
||||
|
||||
# If changed, syntax check and write the new file; return a diff of changes.
|
||||
diff_lines = []
|
||||
if new_file_data != current_file_data:
|
||||
compile(new_file_data, filename, 'exec')
|
||||
if backup_filename:
|
||||
from_file = backup_filename
|
||||
else:
|
||||
from_file = '{}.old'.format(filename)
|
||||
to_file = filename
|
||||
diff_lines = list(difflib.unified_diff(current_file_lines, new_file_lines, fromfile=from_file, tofile=to_file, lineterm=''))
|
||||
if not dry_run:
|
||||
if backup_filename:
|
||||
shutil.copy2(filename, backup_filename)
|
||||
with open(filename, 'w') as fileobj:
|
||||
fileobj.write(new_file_data)
|
||||
return '\n'.join(diff_lines)
|
||||
__all__ = ['conf_to_dict']
|
||||
|
||||
|
||||
def conf_to_dict(obj):
|
||||
@@ -110,10 +11,3 @@ def conf_to_dict(obj):
|
||||
'category': settings_registry.get_setting_category(obj.key),
|
||||
'name': obj.key,
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pattern = os.path.join(os.path.dirname(__file__), '..', 'settings', 'local_*.py')
|
||||
diffs = comment_assignments(pattern, ['AUTH_LDAP_ORGANIZATION_MAP'])
|
||||
for diff in diffs:
|
||||
print(diff)
|
||||
|
||||
@@ -17,10 +17,15 @@ from rest_framework import serializers
|
||||
from rest_framework import status
|
||||
|
||||
# Tower
|
||||
from awx.api.generics import * # noqa
|
||||
from awx.api.generics import (
|
||||
APIView,
|
||||
GenericAPIView,
|
||||
ListAPIView,
|
||||
RetrieveUpdateDestroyAPIView,
|
||||
)
|
||||
from awx.api.permissions import IsSuperUser
|
||||
from awx.api.versioning import reverse, get_request_version
|
||||
from awx.main.utils import * # noqa
|
||||
from awx.main.utils import camelcase_to_underscore
|
||||
from awx.main.utils.handlers import AWXProxyHandler, LoggingConnectivityException
|
||||
from awx.main.tasks import handle_setting_changes
|
||||
from awx.conf.license import get_licensed_features
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# AWX Display Callback
|
||||
from . import cleanup # noqa (registers control persistent cleanup)
|
||||
from . import display # noqa (wraps ansible.display.Display methods)
|
||||
from .module import AWXDefaultCallbackModule, AWXMinimalCallbackModule
|
||||
|
||||
__all__ = ['AWXDefaultCallbackModule', 'AWXMinimalCallbackModule']
|
||||
@@ -1,85 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import atexit
|
||||
import glob
|
||||
import os
|
||||
import pwd
|
||||
|
||||
# PSUtil
|
||||
try:
|
||||
import psutil
|
||||
except ImportError:
|
||||
raise ImportError('psutil is missing; {}bin/pip install psutil'.format(
|
||||
os.environ['VIRTUAL_ENV']
|
||||
))
|
||||
|
||||
__all__ = []
|
||||
|
||||
main_pid = os.getpid()
|
||||
|
||||
|
||||
@atexit.register
|
||||
def terminate_ssh_control_masters():
|
||||
# Only run this cleanup from the main process.
|
||||
if os.getpid() != main_pid:
|
||||
return
|
||||
# Determine if control persist is being used and if any open sockets
|
||||
# exist after running the playbook.
|
||||
cp_path = os.environ.get('ANSIBLE_SSH_CONTROL_PATH', '')
|
||||
if not cp_path:
|
||||
return
|
||||
cp_dir = os.path.dirname(cp_path)
|
||||
if not os.path.exists(cp_dir):
|
||||
return
|
||||
cp_pattern = os.path.join(cp_dir, 'ansible-ssh-*')
|
||||
cp_files = glob.glob(cp_pattern)
|
||||
if not cp_files:
|
||||
return
|
||||
|
||||
# Attempt to find any running control master processes.
|
||||
username = pwd.getpwuid(os.getuid())[0]
|
||||
ssh_cm_procs = []
|
||||
for proc in psutil.process_iter():
|
||||
try:
|
||||
pname = proc.name()
|
||||
pcmdline = proc.cmdline()
|
||||
pusername = proc.username()
|
||||
except psutil.NoSuchProcess:
|
||||
continue
|
||||
if pusername != username:
|
||||
continue
|
||||
if pname != 'ssh':
|
||||
continue
|
||||
for cp_file in cp_files:
|
||||
if pcmdline and cp_file in pcmdline[0]:
|
||||
ssh_cm_procs.append(proc)
|
||||
break
|
||||
|
||||
# Terminate then kill control master processes. Workaround older
|
||||
# version of psutil that may not have wait_procs implemented.
|
||||
for proc in ssh_cm_procs:
|
||||
try:
|
||||
proc.terminate()
|
||||
except psutil.NoSuchProcess:
|
||||
continue
|
||||
procs_gone, procs_alive = psutil.wait_procs(ssh_cm_procs, timeout=5)
|
||||
for proc in procs_alive:
|
||||
proc.kill()
|
||||
@@ -1,98 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import functools
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
# Ansible
|
||||
from ansible.utils.display import Display
|
||||
|
||||
# Tower Display Callback
|
||||
from .events import event_context
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
def with_context(**context):
|
||||
global event_context
|
||||
|
||||
def wrap(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
with event_context.set_local(**context):
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
return wrap
|
||||
|
||||
|
||||
for attr in dir(Display):
|
||||
if attr.startswith('_') or 'cow' in attr or 'prompt' in attr:
|
||||
continue
|
||||
if attr in ('display', 'v', 'vv', 'vvv', 'vvvv', 'vvvvv', 'vvvvvv', 'verbose'):
|
||||
continue
|
||||
if not callable(getattr(Display, attr)):
|
||||
continue
|
||||
setattr(Display, attr, with_context(**{attr: True})(getattr(Display, attr)))
|
||||
|
||||
|
||||
def with_verbosity(f):
|
||||
global event_context
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
host = args[2] if len(args) >= 3 else kwargs.get('host', None)
|
||||
caplevel = args[3] if len(args) >= 4 else kwargs.get('caplevel', 2)
|
||||
context = dict(verbose=True, verbosity=(caplevel + 1))
|
||||
if host is not None:
|
||||
context['remote_addr'] = host
|
||||
with event_context.set_local(**context):
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
Display.verbose = with_verbosity(Display.verbose)
|
||||
|
||||
|
||||
def display_with_context(f):
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
log_only = args[5] if len(args) >= 6 else kwargs.get('log_only', False)
|
||||
stderr = args[3] if len(args) >= 4 else kwargs.get('stderr', False)
|
||||
event_uuid = event_context.get().get('uuid', None)
|
||||
with event_context.display_lock:
|
||||
# If writing only to a log file or there is already an event UUID
|
||||
# set (from a callback module method), skip dumping the event data.
|
||||
if log_only or event_uuid:
|
||||
return f(*args, **kwargs)
|
||||
try:
|
||||
fileobj = sys.stderr if stderr else sys.stdout
|
||||
event_context.add_local(uuid=str(uuid.uuid4()))
|
||||
event_context.dump_begin(fileobj)
|
||||
return f(*args, **kwargs)
|
||||
finally:
|
||||
event_context.dump_end(fileobj)
|
||||
event_context.remove_local(uuid=None)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
Display.display = display_with_context(Display.display)
|
||||
@@ -1,186 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import base64
|
||||
import contextlib
|
||||
import datetime
|
||||
import json
|
||||
import multiprocessing
|
||||
import os
|
||||
import stat
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
try:
|
||||
import memcache
|
||||
except ImportError:
|
||||
raise ImportError('python-memcached is missing; {}bin/pip install python-memcached'.format(
|
||||
os.environ['VIRTUAL_ENV']
|
||||
))
|
||||
|
||||
__all__ = ['event_context']
|
||||
|
||||
|
||||
class IsolatedFileWrite:
|
||||
'''
|
||||
Stand-in class that will write partial event data to a file as a
|
||||
replacement for memcache when a job is running on an isolated host.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self.private_data_dir = os.getenv('AWX_ISOLATED_DATA_DIR')
|
||||
|
||||
def set(self, key, value):
|
||||
# Strip off the leading memcache key identifying characters :1:ev-
|
||||
event_uuid = key[len(':1:ev-'):]
|
||||
# Write data in a staging area and then atomic move to pickup directory
|
||||
filename = '{}-partial.json'.format(event_uuid)
|
||||
dropoff_location = os.path.join(self.private_data_dir, 'artifacts', 'job_events', filename)
|
||||
write_location = '.'.join([dropoff_location, 'tmp'])
|
||||
with os.fdopen(os.open(write_location, os.O_WRONLY | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR), 'w') as f:
|
||||
f.write(value)
|
||||
os.rename(write_location, dropoff_location)
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
'''
|
||||
Store global and local (per thread/process) data associated with callback
|
||||
events and other display output methods.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self.display_lock = multiprocessing.RLock()
|
||||
cache_actual = os.getenv('CACHE', '127.0.0.1:11211')
|
||||
if os.getenv('AWX_ISOLATED_DATA_DIR', False):
|
||||
self.cache = IsolatedFileWrite()
|
||||
else:
|
||||
self.cache = memcache.Client([cache_actual], debug=0)
|
||||
|
||||
def add_local(self, **kwargs):
|
||||
if not hasattr(self, '_local'):
|
||||
self._local = threading.local()
|
||||
self._local._ctx = {}
|
||||
self._local._ctx.update(kwargs)
|
||||
|
||||
def remove_local(self, **kwargs):
|
||||
if hasattr(self, '_local'):
|
||||
for key in kwargs.keys():
|
||||
self._local._ctx.pop(key, None)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_local(self, **kwargs):
|
||||
try:
|
||||
self.add_local(**kwargs)
|
||||
yield
|
||||
finally:
|
||||
self.remove_local(**kwargs)
|
||||
|
||||
def get_local(self):
|
||||
return getattr(getattr(self, '_local', None), '_ctx', {})
|
||||
|
||||
def add_global(self, **kwargs):
|
||||
if not hasattr(self, '_global_ctx'):
|
||||
self._global_ctx = {}
|
||||
self._global_ctx.update(kwargs)
|
||||
|
||||
def remove_global(self, **kwargs):
|
||||
if hasattr(self, '_global_ctx'):
|
||||
for key in kwargs.keys():
|
||||
self._global_ctx.pop(key, None)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_global(self, **kwargs):
|
||||
try:
|
||||
self.add_global(**kwargs)
|
||||
yield
|
||||
finally:
|
||||
self.remove_global(**kwargs)
|
||||
|
||||
def get_global(self):
|
||||
return getattr(self, '_global_ctx', {})
|
||||
|
||||
def get(self):
|
||||
ctx = {}
|
||||
ctx.update(self.get_global())
|
||||
ctx.update(self.get_local())
|
||||
return ctx
|
||||
|
||||
def get_begin_dict(self):
|
||||
event_data = self.get()
|
||||
if os.getenv('JOB_ID', ''):
|
||||
event_data['job_id'] = int(os.getenv('JOB_ID', '0'))
|
||||
if os.getenv('AD_HOC_COMMAND_ID', ''):
|
||||
event_data['ad_hoc_command_id'] = int(os.getenv('AD_HOC_COMMAND_ID', '0'))
|
||||
if os.getenv('PROJECT_UPDATE_ID', ''):
|
||||
event_data['project_update_id'] = int(os.getenv('PROJECT_UPDATE_ID', '0'))
|
||||
event_data.setdefault('pid', os.getpid())
|
||||
event_data.setdefault('uuid', str(uuid.uuid4()))
|
||||
event_data.setdefault('created', datetime.datetime.utcnow().isoformat())
|
||||
if not event_data.get('parent_uuid', None) and event_data.get('job_id', None):
|
||||
for key in ('task_uuid', 'play_uuid', 'playbook_uuid'):
|
||||
parent_uuid = event_data.get(key, None)
|
||||
if parent_uuid and parent_uuid != event_data.get('uuid', None):
|
||||
event_data['parent_uuid'] = parent_uuid
|
||||
break
|
||||
|
||||
event = event_data.pop('event', None)
|
||||
if not event:
|
||||
event = 'verbose'
|
||||
for key in ('debug', 'verbose', 'deprecated', 'warning', 'system_warning', 'error'):
|
||||
if event_data.get(key, False):
|
||||
event = key
|
||||
break
|
||||
max_res = int(os.getenv("MAX_EVENT_RES", 700000))
|
||||
if event not in ('playbook_on_stats',) and "res" in event_data and len(str(event_data['res'])) > max_res:
|
||||
event_data['res'] = {}
|
||||
event_dict = dict(event=event, event_data=event_data)
|
||||
for key in list(event_data.keys()):
|
||||
if key in ('job_id', 'ad_hoc_command_id', 'project_update_id', 'uuid', 'parent_uuid', 'created',):
|
||||
event_dict[key] = event_data.pop(key)
|
||||
elif key in ('verbosity', 'pid'):
|
||||
event_dict[key] = event_data[key]
|
||||
return event_dict
|
||||
|
||||
def get_end_dict(self):
|
||||
return {}
|
||||
|
||||
def dump(self, fileobj, data, max_width=78, flush=False):
|
||||
b64data = base64.b64encode(json.dumps(data).encode('utf-8')).decode()
|
||||
with self.display_lock:
|
||||
# pattern corresponding to OutputEventFilter expectation
|
||||
fileobj.write(u'\x1b[K')
|
||||
for offset in range(0, len(b64data), max_width):
|
||||
chunk = b64data[offset:offset + max_width]
|
||||
escaped_chunk = u'{}\x1b[{}D'.format(chunk, len(chunk))
|
||||
fileobj.write(escaped_chunk)
|
||||
fileobj.write(u'\x1b[K')
|
||||
if flush:
|
||||
fileobj.flush()
|
||||
|
||||
def dump_begin(self, fileobj):
|
||||
begin_dict = self.get_begin_dict()
|
||||
self.cache.set(":1:ev-{}".format(begin_dict['uuid']), json.dumps(begin_dict))
|
||||
self.dump(fileobj, {'uuid': begin_dict['uuid']})
|
||||
|
||||
def dump_end(self, fileobj):
|
||||
self.dump(fileobj, self.get_end_dict(), flush=True)
|
||||
|
||||
|
||||
event_context = EventContext()
|
||||
@@ -1,29 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import os
|
||||
|
||||
# Ansible
|
||||
import ansible
|
||||
|
||||
# Because of the way Ansible loads plugins, it's not possible to import
|
||||
# ansible.plugins.callback.minimal when being loaded as the minimal plugin. Ugh.
|
||||
with open(os.path.join(os.path.dirname(ansible.__file__), 'plugins', 'callback', 'minimal.py')) as in_file:
|
||||
exec(in_file.read())
|
||||
@@ -1,501 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import codecs
|
||||
import contextlib
|
||||
import json
|
||||
import os
|
||||
import stat
|
||||
import sys
|
||||
import uuid
|
||||
from copy import copy
|
||||
|
||||
# Ansible
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.plugins.callback.default import CallbackModule as DefaultCallbackModule
|
||||
|
||||
# AWX Display Callback
|
||||
from .events import event_context
|
||||
from .minimal import CallbackModule as MinimalCallbackModule
|
||||
|
||||
CENSORED = "the output has been hidden due to the fact that 'no_log: true' was specified for this result" # noqa
|
||||
|
||||
|
||||
class BaseCallbackModule(CallbackBase):
|
||||
'''
|
||||
Callback module for logging ansible/ansible-playbook events.
|
||||
'''
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
|
||||
# These events should never have an associated play.
|
||||
EVENTS_WITHOUT_PLAY = [
|
||||
'playbook_on_start',
|
||||
'playbook_on_stats',
|
||||
]
|
||||
|
||||
# These events should never have an associated task.
|
||||
EVENTS_WITHOUT_TASK = EVENTS_WITHOUT_PLAY + [
|
||||
'playbook_on_setup',
|
||||
'playbook_on_notify',
|
||||
'playbook_on_import_for_host',
|
||||
'playbook_on_not_import_for_host',
|
||||
'playbook_on_no_hosts_matched',
|
||||
'playbook_on_no_hosts_remaining',
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
super(BaseCallbackModule, self).__init__()
|
||||
self.task_uuids = set()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def capture_event_data(self, event, **event_data):
|
||||
event_data.setdefault('uuid', str(uuid.uuid4()))
|
||||
|
||||
if event not in self.EVENTS_WITHOUT_TASK:
|
||||
task = event_data.pop('task', None)
|
||||
else:
|
||||
task = None
|
||||
|
||||
if event_data.get('res'):
|
||||
if event_data['res'].get('_ansible_no_log', False):
|
||||
event_data['res'] = {'censored': CENSORED}
|
||||
if event_data['res'].get('results', []):
|
||||
event_data['res']['results'] = copy(event_data['res']['results'])
|
||||
for i, item in enumerate(event_data['res'].get('results', [])):
|
||||
if isinstance(item, dict) and item.get('_ansible_no_log', False):
|
||||
event_data['res']['results'][i] = {'censored': CENSORED}
|
||||
|
||||
with event_context.display_lock:
|
||||
try:
|
||||
event_context.add_local(event=event, **event_data)
|
||||
if task:
|
||||
self.set_task(task, local=True)
|
||||
event_context.dump_begin(sys.stdout)
|
||||
yield
|
||||
finally:
|
||||
event_context.dump_end(sys.stdout)
|
||||
if task:
|
||||
self.clear_task(local=True)
|
||||
event_context.remove_local(event=None, **event_data)
|
||||
|
||||
def set_playbook(self, playbook):
|
||||
# NOTE: Ansible doesn't generate a UUID for playbook_on_start so do it for them.
|
||||
self.playbook_uuid = str(uuid.uuid4())
|
||||
file_name = getattr(playbook, '_file_name', '???')
|
||||
event_context.add_global(playbook=file_name, playbook_uuid=self.playbook_uuid)
|
||||
self.clear_play()
|
||||
|
||||
def set_play(self, play):
|
||||
if hasattr(play, 'hosts'):
|
||||
if isinstance(play.hosts, list):
|
||||
pattern = ','.join(play.hosts)
|
||||
else:
|
||||
pattern = play.hosts
|
||||
else:
|
||||
pattern = ''
|
||||
name = play.get_name().strip() or pattern
|
||||
event_context.add_global(play=name, play_uuid=str(play._uuid), play_pattern=pattern)
|
||||
self.clear_task()
|
||||
|
||||
def clear_play(self):
|
||||
event_context.remove_global(play=None, play_uuid=None, play_pattern=None)
|
||||
self.clear_task()
|
||||
|
||||
def set_task(self, task, local=False):
|
||||
# FIXME: Task is "global" unless using free strategy!
|
||||
task_ctx = dict(
|
||||
task=(task.name or task.action),
|
||||
task_uuid=str(task._uuid),
|
||||
task_action=task.action,
|
||||
task_args='',
|
||||
)
|
||||
try:
|
||||
task_ctx['task_path'] = task.get_path()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if C.DISPLAY_ARGS_TO_STDOUT:
|
||||
if task.no_log:
|
||||
task_ctx['task_args'] = "the output has been hidden due to the fact that 'no_log: true' was specified for this result"
|
||||
else:
|
||||
task_args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
task_ctx['task_args'] = task_args
|
||||
if getattr(task, '_role', None):
|
||||
task_role = task._role._role_name
|
||||
else:
|
||||
task_role = getattr(task, 'role_name', '')
|
||||
if task_role:
|
||||
task_ctx['role'] = task_role
|
||||
if local:
|
||||
event_context.add_local(**task_ctx)
|
||||
else:
|
||||
event_context.add_global(**task_ctx)
|
||||
|
||||
def clear_task(self, local=False):
|
||||
task_ctx = dict(task=None, task_path=None, task_uuid=None, task_action=None, task_args=None, role=None)
|
||||
if local:
|
||||
event_context.remove_local(**task_ctx)
|
||||
else:
|
||||
event_context.remove_global(**task_ctx)
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.set_playbook(playbook)
|
||||
event_data = dict(
|
||||
uuid=self.playbook_uuid,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_start(playbook)
|
||||
|
||||
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None,
|
||||
encrypt=None, confirm=False, salt_size=None,
|
||||
salt=None, default=None):
|
||||
event_data = dict(
|
||||
varname=varname,
|
||||
private=private,
|
||||
prompt=prompt,
|
||||
encrypt=encrypt,
|
||||
confirm=confirm,
|
||||
salt_size=salt_size,
|
||||
salt=salt,
|
||||
default=default,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_vars_prompt', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_vars_prompt(
|
||||
varname, private, prompt, encrypt, confirm, salt_size, salt,
|
||||
default,
|
||||
)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
event_data = dict(
|
||||
included_file=included_file._filename if included_file is not None else None,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_include', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_include(included_file)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.set_play(play)
|
||||
if hasattr(play, 'hosts'):
|
||||
if isinstance(play.hosts, list):
|
||||
pattern = ','.join(play.hosts)
|
||||
else:
|
||||
pattern = play.hosts
|
||||
else:
|
||||
pattern = ''
|
||||
name = play.get_name().strip() or pattern
|
||||
event_data = dict(
|
||||
name=name,
|
||||
pattern=pattern,
|
||||
uuid=str(play._uuid),
|
||||
)
|
||||
with self.capture_event_data('playbook_on_play_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_play_start(play)
|
||||
|
||||
def v2_playbook_on_import_for_host(self, result, imported_file):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
with self.capture_event_data('playbook_on_import_for_host'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_import_for_host(result, imported_file)
|
||||
|
||||
def v2_playbook_on_not_import_for_host(self, result, missing_file):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
with self.capture_event_data('playbook_on_not_import_for_host'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_not_import_for_host(result, missing_file)
|
||||
|
||||
def v2_playbook_on_setup(self):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
with self.capture_event_data('playbook_on_setup'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_setup()
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
# FIXME: Flag task path output as vv.
|
||||
task_uuid = str(task._uuid)
|
||||
if task_uuid in self.task_uuids:
|
||||
# FIXME: When this task UUID repeats, it means the play is using the
|
||||
# free strategy, so different hosts may be running different tasks
|
||||
# within a play.
|
||||
return
|
||||
self.task_uuids.add(task_uuid)
|
||||
self.set_task(task)
|
||||
event_data = dict(
|
||||
task=task,
|
||||
name=task.get_name(),
|
||||
is_conditional=is_conditional,
|
||||
uuid=task_uuid,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_task_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_task_start(task, is_conditional)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
self.set_task(task)
|
||||
event_data = dict(
|
||||
task=task,
|
||||
name=task.get_name(),
|
||||
uuid=str(task._uuid),
|
||||
is_conditional=True,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_task_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_cleanup_task_start(task)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
# NOTE: Re-using playbook_on_task_start event for this v2-specific
|
||||
# event, but setting is_conditional=True, which is how v1 identified a
|
||||
# task run as a handler.
|
||||
self.set_task(task)
|
||||
event_data = dict(
|
||||
task=task,
|
||||
name=task.get_name(),
|
||||
uuid=str(task._uuid),
|
||||
is_conditional=True,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_task_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_handler_task_start(task)
|
||||
|
||||
def v2_playbook_on_no_hosts_matched(self):
|
||||
with self.capture_event_data('playbook_on_no_hosts_matched'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_no_hosts_matched()
|
||||
|
||||
def v2_playbook_on_no_hosts_remaining(self):
|
||||
with self.capture_event_data('playbook_on_no_hosts_remaining'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_no_hosts_remaining()
|
||||
|
||||
def v2_playbook_on_notify(self, handler, host):
|
||||
# NOTE: Not used by Ansible < 2.5.
|
||||
event_data = dict(
|
||||
host=host.get_name(),
|
||||
handler=handler.get_name(),
|
||||
)
|
||||
with self.capture_event_data('playbook_on_notify', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_notify(handler, host)
|
||||
|
||||
'''
|
||||
ansible_stats is, retoractively, added in 2.2
|
||||
'''
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
self.clear_play()
|
||||
# FIXME: Add count of plays/tasks.
|
||||
event_data = dict(
|
||||
changed=stats.changed,
|
||||
dark=stats.dark,
|
||||
failures=stats.failures,
|
||||
ok=stats.ok,
|
||||
processed=stats.processed,
|
||||
skipped=stats.skipped
|
||||
)
|
||||
|
||||
# write custom set_stat artifact data to the local disk so that it can
|
||||
# be persisted by awx after the process exits
|
||||
custom_artifact_data = stats.custom.get('_run', {}) if hasattr(stats, 'custom') else {}
|
||||
if custom_artifact_data:
|
||||
# create the directory for custom stats artifacts to live in (if it doesn't exist)
|
||||
custom_artifacts_dir = os.path.join(os.getenv('AWX_PRIVATE_DATA_DIR'), 'artifacts')
|
||||
if not os.path.isdir(custom_artifacts_dir):
|
||||
os.makedirs(custom_artifacts_dir, mode=stat.S_IXUSR + stat.S_IWUSR + stat.S_IRUSR)
|
||||
|
||||
custom_artifacts_path = os.path.join(custom_artifacts_dir, 'custom')
|
||||
with codecs.open(custom_artifacts_path, 'w', encoding='utf-8') as f:
|
||||
os.chmod(custom_artifacts_path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
json.dump(custom_artifact_data, f)
|
||||
|
||||
with self.capture_event_data('playbook_on_stats', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_stats(stats)
|
||||
|
||||
@staticmethod
|
||||
def _get_event_loop(task):
|
||||
if hasattr(task, 'loop_with'): # Ansible >=2.5
|
||||
return task.loop_with
|
||||
elif hasattr(task, 'loop'): # Ansible <2.4
|
||||
return task.loop
|
||||
return None
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
# FIXME: Display detailed results or not based on verbosity.
|
||||
|
||||
# strip environment vars from the job event; it already exists on the
|
||||
# job and sensitive values are filtered there
|
||||
if result._task.action in ('setup', 'gather_facts'):
|
||||
result._result.get('ansible_facts', {}).pop('ansible_env', None)
|
||||
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
event_loop=self._get_event_loop(result._task),
|
||||
)
|
||||
with self.capture_event_data('runner_on_ok', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_ok(result)
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
# FIXME: Add verbosity for exception/results output.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
res=result._result,
|
||||
task=result._task,
|
||||
ignore_errors=ignore_errors,
|
||||
event_loop=self._get_event_loop(result._task),
|
||||
)
|
||||
with self.capture_event_data('runner_on_failed', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_failed(result, ignore_errors)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
task=result._task,
|
||||
event_loop=self._get_event_loop(result._task),
|
||||
)
|
||||
with self.capture_event_data('runner_on_skipped', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_skipped(result)
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_on_unreachable', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_unreachable(result)
|
||||
|
||||
def v2_runner_on_no_hosts(self, task):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
task=task,
|
||||
)
|
||||
with self.capture_event_data('runner_on_no_hosts', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_no_hosts(task)
|
||||
|
||||
def v2_runner_on_async_poll(self, result):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
jid=result._result.get('ansible_job_id'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_async_poll', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_async_poll(result)
|
||||
|
||||
def v2_runner_on_async_ok(self, result):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
jid=result._result.get('ansible_job_id'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_async_ok', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_async_ok(result)
|
||||
|
||||
def v2_runner_on_async_failed(self, result):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
jid=result._result.get('ansible_job_id'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_async_failed', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_async_failed(result)
|
||||
|
||||
def v2_runner_on_file_diff(self, result, diff):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
diff=diff,
|
||||
)
|
||||
with self.capture_event_data('runner_on_file_diff', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_file_diff(result, diff)
|
||||
|
||||
def v2_on_file_diff(self, result):
|
||||
# NOTE: Logged as runner_on_file_diff.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
diff=result._result.get('diff'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_file_diff', **event_data):
|
||||
super(BaseCallbackModule, self).v2_on_file_diff(result)
|
||||
|
||||
def v2_runner_item_on_ok(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_item_on_ok', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_item_on_ok(result)
|
||||
|
||||
def v2_runner_item_on_failed(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_item_on_failed', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_item_on_failed(result)
|
||||
|
||||
def v2_runner_item_on_skipped(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_item_on_skipped', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_item_on_skipped(result)
|
||||
|
||||
def v2_runner_retry(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_retry', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_retry(result)
|
||||
|
||||
def v2_runner_on_start(self, host, task):
|
||||
event_data = dict(
|
||||
host=host.get_name(),
|
||||
task=task
|
||||
)
|
||||
with self.capture_event_data('runner_on_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_start(host, task)
|
||||
|
||||
|
||||
|
||||
class AWXDefaultCallbackModule(BaseCallbackModule, DefaultCallbackModule):
|
||||
|
||||
CALLBACK_NAME = 'awx_display'
|
||||
|
||||
|
||||
class AWXMinimalCallbackModule(BaseCallbackModule, MinimalCallbackModule):
|
||||
|
||||
CALLBACK_NAME = 'minimal'
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
pass
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.set_task(task)
|
||||
@@ -1,353 +0,0 @@
|
||||
# Copyright (c) 2017 Ansible by Red Hat
|
||||
# All Rights Reserved
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from collections import OrderedDict
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
# ansible uses `ANSIBLE_CALLBACK_PLUGINS` and `ANSIBLE_STDOUT_CALLBACK` to
|
||||
# discover callback plugins; `ANSIBLE_CALLBACK_PLUGINS` is a list of paths to
|
||||
# search for a plugin implementation (which should be named `CallbackModule`)
|
||||
#
|
||||
# this code modifies the Python path to make our
|
||||
# `awx.lib.awx_display_callback` callback importable (because `awx.lib`
|
||||
# itself is not a package)
|
||||
#
|
||||
# we use the `awx_display_callback` imports below within this file, but
|
||||
# Ansible also uses them when it discovers this file in
|
||||
# `ANSIBLE_CALLBACK_PLUGINS`
|
||||
CALLBACK = os.path.splitext(os.path.basename(__file__))[0]
|
||||
PLUGINS = os.path.dirname(__file__)
|
||||
with mock.patch.dict(os.environ, {'ANSIBLE_STDOUT_CALLBACK': CALLBACK,
|
||||
'ANSIBLE_CALLBACK_PLUGINS': PLUGINS}):
|
||||
from ansible import __version__ as ANSIBLE_VERSION
|
||||
from ansible.cli.playbook import PlaybookCLI
|
||||
from ansible.executor.playbook_executor import PlaybookExecutor
|
||||
from ansible.inventory.manager import InventoryManager
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.vars.manager import VariableManager
|
||||
|
||||
# Add awx/lib to sys.path so we can use the plugin
|
||||
path = os.path.abspath(os.path.join(PLUGINS, '..', '..', 'lib'))
|
||||
if path not in sys.path:
|
||||
sys.path.insert(0, path)
|
||||
|
||||
from awx_display_callback import AWXDefaultCallbackModule as CallbackModule # noqa
|
||||
from awx_display_callback.events import event_context # noqa
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def cache(request):
|
||||
class Cache(OrderedDict):
|
||||
def set(self, key, value):
|
||||
self[key] = value
|
||||
local_cache = Cache()
|
||||
patch = mock.patch.object(event_context, 'cache', local_cache)
|
||||
patch.start()
|
||||
request.addfinalizer(patch.stop)
|
||||
return local_cache
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def executor(tmpdir_factory, request):
|
||||
playbooks = request.node.callspec.params.get('playbook')
|
||||
playbook_files = []
|
||||
for name, playbook in playbooks.items():
|
||||
filename = str(tmpdir_factory.mktemp('data').join(name))
|
||||
with open(filename, 'w') as f:
|
||||
f.write(playbook)
|
||||
playbook_files.append(filename)
|
||||
|
||||
cli = PlaybookCLI(['', 'playbook.yml'])
|
||||
cli.parse()
|
||||
options = cli.parser.parse_args(['-v'])[0]
|
||||
loader = DataLoader()
|
||||
variable_manager = VariableManager(loader=loader)
|
||||
inventory = InventoryManager(loader=loader, sources='localhost,')
|
||||
variable_manager.set_inventory(inventory)
|
||||
|
||||
return PlaybookExecutor(playbooks=playbook_files, inventory=inventory,
|
||||
variable_manager=variable_manager, loader=loader,
|
||||
options=options, passwords={})
|
||||
|
||||
|
||||
@pytest.mark.parametrize('event', {'playbook_on_start',
|
||||
'playbook_on_play_start',
|
||||
'playbook_on_task_start', 'runner_on_ok',
|
||||
'playbook_on_stats'})
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'helloworld.yml': '''
|
||||
- name: Hello World Sample
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Hello Message
|
||||
debug:
|
||||
msg: "Hello World!"
|
||||
'''}, # noqa
|
||||
{'results_included.yml': '''
|
||||
- name: Run module which generates results list
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
vars:
|
||||
results: ['foo', 'bar']
|
||||
tasks:
|
||||
- name: Generate results list
|
||||
debug:
|
||||
var: results
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_receives_events(executor, cache, event, playbook):
|
||||
executor.run()
|
||||
assert len(cache)
|
||||
assert event in [task['event'] for task in cache.values()]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'no_log_on_ok.yml': '''
|
||||
- name: args should not be logged when task-level no_log is set
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo "SENSITIVE"
|
||||
no_log: true
|
||||
'''}, # noqa
|
||||
{'no_log_on_fail.yml': '''
|
||||
- name: failed args should not be logged when task-level no_log is set
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo "SENSITIVE"
|
||||
no_log: true
|
||||
failed_when: true
|
||||
ignore_errors: true
|
||||
'''}, # noqa
|
||||
{'no_log_on_skip.yml': '''
|
||||
- name: skipped task args should be suppressed with no_log
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo "SENSITIVE"
|
||||
no_log: true
|
||||
when: false
|
||||
'''}, # noqa
|
||||
{'no_log_on_play.yml': '''
|
||||
- name: args should not be logged when play-level no_log set
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
no_log: true
|
||||
tasks:
|
||||
- shell: echo "SENSITIVE"
|
||||
'''}, # noqa
|
||||
{'async_no_log.yml': '''
|
||||
- name: async task args should suppressed with no_log
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
no_log: true
|
||||
tasks:
|
||||
- async: 10
|
||||
poll: 1
|
||||
shell: echo "SENSITIVE"
|
||||
no_log: true
|
||||
'''}, # noqa
|
||||
{'with_items.yml': '''
|
||||
- name: with_items tasks should be suppressed with no_log
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo {{ item }}
|
||||
no_log: true
|
||||
with_items: [ "SENSITIVE", "SENSITIVE-SKIPPED", "SENSITIVE-FAILED" ]
|
||||
when: item != "SENSITIVE-SKIPPED"
|
||||
failed_when: item == "SENSITIVE-FAILED"
|
||||
ignore_errors: yes
|
||||
'''}, # noqa, NOTE: with_items will be deprecated in 2.9
|
||||
{'loop.yml': '''
|
||||
- name: loop tasks should be suppressed with no_log
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo {{ item }}
|
||||
no_log: true
|
||||
loop: [ "SENSITIVE", "SENSITIVE-SKIPPED", "SENSITIVE-FAILED" ]
|
||||
when: item != "SENSITIVE-SKIPPED"
|
||||
failed_when: item == "SENSITIVE-FAILED"
|
||||
ignore_errors: yes
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_no_log_filters(executor, cache, playbook):
|
||||
executor.run()
|
||||
assert len(cache)
|
||||
assert 'SENSITIVE' not in json.dumps(cache.items())
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'no_log_on_ok.yml': '''
|
||||
- name: args should not be logged when no_log is set at the task or module level
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo "PUBLIC"
|
||||
- shell: echo "PRIVATE"
|
||||
no_log: true
|
||||
- uri: url=https://example.org username="PUBLIC" password="PRIVATE"
|
||||
- copy: content="PRIVATE" dest="/tmp/tmp_no_log"
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_task_args_leak(executor, cache, playbook):
|
||||
executor.run()
|
||||
events = cache.values()
|
||||
assert events[0]['event'] == 'playbook_on_start'
|
||||
assert events[1]['event'] == 'playbook_on_play_start'
|
||||
|
||||
# task 1
|
||||
assert events[2]['event'] == 'playbook_on_task_start'
|
||||
assert events[3]['event'] == 'runner_on_ok'
|
||||
|
||||
# task 2 no_log=True
|
||||
assert events[4]['event'] == 'playbook_on_task_start'
|
||||
assert events[5]['event'] == 'runner_on_ok'
|
||||
assert 'PUBLIC' in json.dumps(cache.items())
|
||||
assert 'PRIVATE' not in json.dumps(cache.items())
|
||||
# make sure playbook was successful, so all tasks were hit
|
||||
assert not events[-1]['event_data']['failures'], 'Unexpected playbook execution failure'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'loop_with_no_log.yml': '''
|
||||
- name: playbook variable should not be overwritten when using no log
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- command: "{{ item }}"
|
||||
register: command_register
|
||||
no_log: True
|
||||
with_items:
|
||||
- "echo helloworld!"
|
||||
- debug: msg="{{ command_register.results|map(attribute='stdout')|list }}"
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_censoring_does_not_overwrite(executor, cache, playbook):
|
||||
executor.run()
|
||||
events = cache.values()
|
||||
assert events[0]['event'] == 'playbook_on_start'
|
||||
assert events[1]['event'] == 'playbook_on_play_start'
|
||||
|
||||
# task 1
|
||||
assert events[2]['event'] == 'playbook_on_task_start'
|
||||
# Ordering of task and item events may differ randomly
|
||||
assert set(['runner_on_ok', 'runner_item_on_ok']) == set([data['event'] for data in events[3:5]])
|
||||
|
||||
# task 2 no_log=True
|
||||
assert events[5]['event'] == 'playbook_on_task_start'
|
||||
assert events[6]['event'] == 'runner_on_ok'
|
||||
assert 'helloworld!' in events[6]['event_data']['res']['msg']
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'strip_env_vars.yml': '''
|
||||
- name: sensitive environment variables should be stripped from events
|
||||
connection: local
|
||||
hosts: all
|
||||
tasks:
|
||||
- shell: echo "Hello, World!"
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_strips_task_environ_variables(executor, cache, playbook):
|
||||
executor.run()
|
||||
assert len(cache)
|
||||
for event in cache.values():
|
||||
assert os.environ['PATH'] not in json.dumps(event)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'custom_set_stat.yml': '''
|
||||
- name: custom set_stat calls should persist to the local disk so awx can save them
|
||||
connection: local
|
||||
hosts: all
|
||||
tasks:
|
||||
- set_stats:
|
||||
data:
|
||||
foo: "bar"
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_saves_custom_stats(executor, cache, playbook):
|
||||
try:
|
||||
private_data_dir = tempfile.mkdtemp()
|
||||
with mock.patch.dict(os.environ, {'AWX_PRIVATE_DATA_DIR': private_data_dir}):
|
||||
executor.run()
|
||||
artifacts_path = os.path.join(private_data_dir, 'artifacts', 'custom')
|
||||
with open(artifacts_path, 'r') as f:
|
||||
assert json.load(f) == {'foo': 'bar'}
|
||||
finally:
|
||||
shutil.rmtree(os.path.join(private_data_dir))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'handle_playbook_on_notify.yml': '''
|
||||
- name: handle playbook_on_notify events properly
|
||||
connection: local
|
||||
hosts: all
|
||||
handlers:
|
||||
- name: my_handler
|
||||
debug: msg="My Handler"
|
||||
tasks:
|
||||
- debug: msg="My Task"
|
||||
changed_when: true
|
||||
notify:
|
||||
- my_handler
|
||||
'''}, # noqa
|
||||
])
|
||||
@pytest.mark.skipif(ANSIBLE_VERSION < '2.5', reason="v2_playbook_on_notify doesn't work before ansible 2.5")
|
||||
def test_callback_plugin_records_notify_events(executor, cache, playbook):
|
||||
executor.run()
|
||||
assert len(cache)
|
||||
notify_events = [x[1] for x in cache.items() if x[1]['event'] == 'playbook_on_notify']
|
||||
assert len(notify_events) == 1
|
||||
assert notify_events[0]['event_data']['handler'] == 'my_handler'
|
||||
assert notify_events[0]['event_data']['host'] == 'localhost'
|
||||
assert notify_events[0]['event_data']['task'] == 'debug'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'no_log_module_with_var.yml': '''
|
||||
- name: ensure that module-level secrets are redacted
|
||||
connection: local
|
||||
hosts: all
|
||||
vars:
|
||||
- pw: SENSITIVE
|
||||
tasks:
|
||||
- uri:
|
||||
url: https://example.org
|
||||
user: john-jacob-jingleheimer-schmidt
|
||||
password: "{{ pw }}"
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_module_level_no_log(executor, cache, playbook):
|
||||
# https://github.com/ansible/tower/issues/1101
|
||||
# It's possible for `no_log=True` to be defined at the _module_ level,
|
||||
# e.g., for the URI module password parameter
|
||||
# This test ensures that we properly redact those
|
||||
executor.run()
|
||||
assert len(cache)
|
||||
assert 'john-jacob-jingleheimer-schmidt' in json.dumps(cache.items())
|
||||
assert 'SENSITIVE' not in json.dumps(cache.items())
|
||||
@@ -28,7 +28,17 @@ from awx.main.utils import (
|
||||
to_python_boolean,
|
||||
get_licenser,
|
||||
)
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.models import (
|
||||
ActivityStream, AdHocCommand, AdHocCommandEvent, Credential, CredentialType,
|
||||
CustomInventoryScript, Group, Host, Instance, InstanceGroup, Inventory,
|
||||
InventorySource, InventoryUpdate, InventoryUpdateEvent, Job, JobEvent,
|
||||
JobHostSummary, JobLaunchConfig, JobTemplate, Label, Notification,
|
||||
NotificationTemplate, Organization, Project, ProjectUpdate,
|
||||
ProjectUpdateEvent, Role, Schedule, SystemJob, SystemJobEvent,
|
||||
SystemJobTemplate, Team, UnifiedJob, UnifiedJobTemplate, WorkflowJob,
|
||||
WorkflowJobNode, WorkflowJobTemplate, WorkflowJobTemplateNode,
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR
|
||||
)
|
||||
from awx.main.models.mixins import ResourceMixin
|
||||
|
||||
from awx.conf.license import LicenseForbids, feature_enabled
|
||||
@@ -320,6 +330,36 @@ class BaseAccess(object):
|
||||
elif "features" not in validation_info:
|
||||
raise LicenseForbids(_("Features not found in active license."))
|
||||
|
||||
def check_org_host_limit(self, data, add_host_name=None):
|
||||
validation_info = get_licenser().validate()
|
||||
if validation_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
return
|
||||
|
||||
inventory = get_object_from_data('inventory', Inventory, data)
|
||||
if inventory is None: # In this case a missing inventory error is launched
|
||||
return # further down the line, so just ignore it.
|
||||
|
||||
org = inventory.organization
|
||||
if org is None or org.max_hosts == 0:
|
||||
return
|
||||
|
||||
active_count = Host.objects.org_active_count(org.id)
|
||||
if active_count > org.max_hosts:
|
||||
raise PermissionDenied(
|
||||
_("You have already reached the maximum number of %s hosts"
|
||||
" allowed for your organization. Contact your System Administrator"
|
||||
" for assistance." % org.max_hosts)
|
||||
)
|
||||
|
||||
if add_host_name:
|
||||
host_exists = Host.objects.filter(inventory__organization=org.id, name=add_host_name).exists()
|
||||
if not host_exists and active_count == org.max_hosts:
|
||||
raise PermissionDenied(
|
||||
_("You have already reached the maximum number of %s hosts"
|
||||
" allowed for your organization. Contact your System Administrator"
|
||||
" for assistance." % org.max_hosts)
|
||||
)
|
||||
|
||||
def get_user_capabilities(self, obj, method_list=[], parent_obj=None, capabilities_cache={}):
|
||||
if obj is None:
|
||||
return {}
|
||||
@@ -350,7 +390,7 @@ class BaseAccess(object):
|
||||
user_capabilities[display_method] = self.user.is_superuser
|
||||
continue
|
||||
elif display_method == 'copy' and isinstance(obj, Project) and obj.scm_type == '':
|
||||
# Connot copy manual project without errors
|
||||
# Cannot copy manual project without errors
|
||||
user_capabilities[display_method] = False
|
||||
continue
|
||||
elif display_method in ['start', 'schedule'] and isinstance(obj, Group): # TODO: remove in 3.3
|
||||
@@ -434,12 +474,16 @@ class InstanceAccess(BaseAccess):
|
||||
skip_sub_obj_read_check=False):
|
||||
if relationship == 'rampart_groups' and isinstance(sub_obj, InstanceGroup):
|
||||
return self.user.is_superuser
|
||||
return super(InstanceAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
return super(InstanceAccess, self).can_attach(
|
||||
obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check
|
||||
)
|
||||
|
||||
def can_unattach(self, obj, sub_obj, relationship, data=None):
|
||||
if relationship == 'rampart_groups' and isinstance(sub_obj, InstanceGroup):
|
||||
return self.user.is_superuser
|
||||
return super(InstanceAccess, self).can_unattach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
return super(InstanceAccess, self).can_unattach(
|
||||
obj, sub_obj, relationship, relationship, data=data
|
||||
)
|
||||
|
||||
def can_add(self, data):
|
||||
return False
|
||||
@@ -614,7 +658,7 @@ class OAuth2ApplicationAccess(BaseAccess):
|
||||
return self.model.objects.filter(organization__in=org_access_qs)
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return self.user.is_superuser or self.check_related('organization', Organization, data, obj=obj,
|
||||
return self.user.is_superuser or self.check_related('organization', Organization, data, obj=obj,
|
||||
role_field='admin_role', mandatory=True)
|
||||
|
||||
def can_delete(self, obj):
|
||||
@@ -622,7 +666,7 @@ class OAuth2ApplicationAccess(BaseAccess):
|
||||
|
||||
def can_add(self, data):
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
return True
|
||||
if not data:
|
||||
return Organization.accessible_objects(self.user, 'admin_role').exists()
|
||||
return self.check_related('organization', Organization, data, role_field='admin_role', mandatory=True)
|
||||
@@ -636,29 +680,29 @@ class OAuth2TokenAccess(BaseAccess):
|
||||
- I am the user of the token.
|
||||
I can create an OAuth2 app token when:
|
||||
- I have the read permission of the related application.
|
||||
I can read, change or delete a personal token when:
|
||||
I can read, change or delete a personal token when:
|
||||
- I am the user of the token
|
||||
- I am the superuser
|
||||
I can create an OAuth2 Personal Access Token when:
|
||||
- I am a user. But I can only create a PAT for myself.
|
||||
- I am a user. But I can only create a PAT for myself.
|
||||
'''
|
||||
|
||||
model = OAuth2AccessToken
|
||||
|
||||
|
||||
select_related = ('user', 'application')
|
||||
|
||||
def filtered_queryset(self):
|
||||
|
||||
def filtered_queryset(self):
|
||||
org_access_qs = Organization.objects.filter(
|
||||
Q(admin_role__members=self.user) | Q(auditor_role__members=self.user))
|
||||
return self.model.objects.filter(application__organization__in=org_access_qs) | self.model.objects.filter(user__id=self.user.pk)
|
||||
|
||||
|
||||
def can_delete(self, obj):
|
||||
if (self.user.is_superuser) | (obj.user == self.user):
|
||||
return True
|
||||
elif not obj.application:
|
||||
return False
|
||||
return self.user in obj.application.organization.admin_role
|
||||
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return self.can_delete(obj)
|
||||
|
||||
@@ -826,6 +870,10 @@ class HostAccess(BaseAccess):
|
||||
|
||||
# Check to see if we have enough licenses
|
||||
self.check_license(add_host_name=data.get('name', None))
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit(data, add_host_name=data.get('name', None))
|
||||
|
||||
return True
|
||||
|
||||
def can_change(self, obj, data):
|
||||
@@ -838,6 +886,10 @@ class HostAccess(BaseAccess):
|
||||
if data and 'name' in data:
|
||||
self.check_license(add_host_name=data['name'])
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit({'inventory': obj.inventory},
|
||||
add_host_name=data['name'])
|
||||
|
||||
# Checks for admin or change permission on inventory, controls whether
|
||||
# the user can edit variable data.
|
||||
return obj and self.user in obj.inventory.admin_role
|
||||
@@ -1332,7 +1384,7 @@ class JobTemplateAccess(BaseAccess):
|
||||
return self.user in project.use_role
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
@check_superuser
|
||||
def can_copy_related(self, obj):
|
||||
'''
|
||||
@@ -1341,13 +1393,17 @@ class JobTemplateAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
# obj.credentials.all() is accessible ONLY when object is saved (has valid id)
|
||||
credential_manager = getattr(obj, 'credentials', None) if getattr(obj, 'id', False) else Credentials.objects.none()
|
||||
credential_manager = getattr(obj, 'credentials', None) if getattr(obj, 'id', False) else Credential.objects.none()
|
||||
return reduce(lambda prev, cred: prev and self.user in cred.use_role, credential_manager.all(), True)
|
||||
|
||||
def can_start(self, obj, validate_license=True):
|
||||
# Check license.
|
||||
if validate_license:
|
||||
self.check_license()
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit({'inventory': obj.inventory})
|
||||
|
||||
if obj.survey_enabled:
|
||||
self.check_license(feature='surveys')
|
||||
if Instance.objects.active_count() > 1:
|
||||
@@ -1506,6 +1562,9 @@ class JobAccess(BaseAccess):
|
||||
if validate_license:
|
||||
self.check_license()
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit({'inventory': obj.inventory})
|
||||
|
||||
# A super user can relaunch a job
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
@@ -1850,7 +1909,6 @@ class WorkflowJobTemplateAccess(BaseAccess):
|
||||
qs = obj.workflow_job_template_nodes
|
||||
qs = qs.prefetch_related('unified_job_template', 'inventory__use_role', 'credentials__use_role')
|
||||
for node in qs.all():
|
||||
node_errors = {}
|
||||
if node.inventory and self.user not in node.inventory.use_role:
|
||||
missing_inventories.append(node.inventory.name)
|
||||
for cred in node.credentials.all():
|
||||
@@ -1859,8 +1917,6 @@ class WorkflowJobTemplateAccess(BaseAccess):
|
||||
ujt = node.unified_job_template
|
||||
if ujt and not self.user.can_access(UnifiedJobTemplate, 'start', ujt, validate_license=False):
|
||||
missing_ujt.append(ujt.name)
|
||||
if node_errors:
|
||||
wfjt_errors[node.id] = node_errors
|
||||
if missing_ujt:
|
||||
self.messages['templates_unable_to_copy'] = missing_ujt
|
||||
if missing_credentials:
|
||||
@@ -1875,6 +1931,10 @@ class WorkflowJobTemplateAccess(BaseAccess):
|
||||
if validate_license:
|
||||
# check basic license, node count
|
||||
self.check_license()
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit({'inventory': obj.inventory})
|
||||
|
||||
# if surveys are added to WFJTs, check license here
|
||||
if obj.survey_enabled:
|
||||
self.check_license(feature='surveys')
|
||||
@@ -1946,6 +2006,9 @@ class WorkflowJobAccess(BaseAccess):
|
||||
if validate_license:
|
||||
self.check_license()
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit({'inventory': obj.inventory})
|
||||
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
|
||||
@@ -2022,6 +2085,9 @@ class AdHocCommandAccess(BaseAccess):
|
||||
if validate_license:
|
||||
self.check_license()
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit(data)
|
||||
|
||||
# If a credential is provided, the user should have use access to it.
|
||||
if not self.check_related('credential', Credential, data, role_field='use_role'):
|
||||
return False
|
||||
@@ -2431,7 +2497,7 @@ class ActivityStreamAccess(BaseAccess):
|
||||
model = ActivityStream
|
||||
prefetch_related = ('organization', 'user', 'inventory', 'host', 'group',
|
||||
'inventory_update', 'credential', 'credential_type', 'team',
|
||||
'ad_hoc_command', 'o_auth2_application', 'o_auth2_access_token',
|
||||
'ad_hoc_command', 'o_auth2_application', 'o_auth2_access_token',
|
||||
'notification_template', 'notification', 'label', 'role', 'actor',
|
||||
'schedule', 'custom_inventory_script', 'unified_job_template',
|
||||
'workflow_job_template_node',)
|
||||
|
||||
@@ -126,6 +126,17 @@ register(
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'CUSTOM_VENV_PATHS',
|
||||
field_class=fields.StringListPathField,
|
||||
label=_('Custom virtual environment paths'),
|
||||
help_text=_('Paths where Tower will look for custom virtual environments '
|
||||
'(in addition to /var/lib/awx/venv/). Enter one path per line.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
default=[],
|
||||
)
|
||||
|
||||
register(
|
||||
'AD_HOC_COMMANDS',
|
||||
field_class=fields.StringListField,
|
||||
|
||||
@@ -16,7 +16,8 @@ SCHEDULEABLE_PROVIDERS = CLOUD_PROVIDERS + ('custom', 'scm',)
|
||||
PRIVILEGE_ESCALATION_METHODS = [
|
||||
('sudo', _('Sudo')), ('su', _('Su')), ('pbrun', _('Pbrun')), ('pfexec', _('Pfexec')),
|
||||
('dzdo', _('DZDO')), ('pmrun', _('Pmrun')), ('runas', _('Runas')),
|
||||
('enable', _('Enable')), ('doas', _('Doas')),
|
||||
('enable', _('Enable')), ('doas', _('Doas')), ('ksu', _('Ksu')),
|
||||
('machinectl', _('Machinectl')), ('sesu', _('Sesu')),
|
||||
]
|
||||
CHOICES_PRIVILEGE_ESCALATION_METHODS = [('', _('None'))] + PRIVILEGE_ESCALATION_METHODS
|
||||
ANSI_SGR_PATTERN = re.compile(r'\x1b\[[0-9;]*m')
|
||||
@@ -24,7 +25,9 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
# Failure to parse inventory should always be fatal
|
||||
'ANSIBLE_INVENTORY_UNPARSED_FAILED': 'True',
|
||||
# Always use the --export option for ansible-inventory
|
||||
'ANSIBLE_INVENTORY_EXPORT': 'True'
|
||||
'ANSIBLE_INVENTORY_EXPORT': 'True',
|
||||
# Redirecting output to stderr allows JSON parsing to still work with -vvv
|
||||
'ANSIBLE_VERBOSE_TO_STDERR': 'True'
|
||||
}
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
|
||||
0
awx/main/db/__init__.py
Normal file
0
awx/main/db/__init__.py
Normal file
0
awx/main/db/profiled_pg/__init__.py
Normal file
0
awx/main/db/profiled_pg/__init__.py
Normal file
155
awx/main/db/profiled_pg/base.py
Normal file
155
awx/main/db/profiled_pg/base.py
Normal file
@@ -0,0 +1,155 @@
|
||||
import os
|
||||
import pkg_resources
|
||||
import sqlite3
|
||||
import sys
|
||||
import traceback
|
||||
import uuid
|
||||
|
||||
from django.core.cache import cache
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.db.backends.postgresql.base import DatabaseWrapper as BaseDatabaseWrapper
|
||||
|
||||
from awx.main.utils import memoize
|
||||
|
||||
__loc__ = LocMemCache(str(uuid.uuid4()), {})
|
||||
__all__ = ['DatabaseWrapper']
|
||||
|
||||
|
||||
class RecordedQueryLog(object):
|
||||
|
||||
def __init__(self, log, db, dest='/var/log/tower/profile'):
|
||||
self.log = log
|
||||
self.db = db
|
||||
self.dest = dest
|
||||
try:
|
||||
self.threshold = cache.get('awx-profile-sql-threshold')
|
||||
except Exception:
|
||||
# if we can't reach memcached, just assume profiling's off
|
||||
self.threshold = None
|
||||
|
||||
def append(self, query):
|
||||
ret = self.log.append(query)
|
||||
try:
|
||||
self.write(query)
|
||||
except Exception:
|
||||
# not sure what else to do her e- we can't really safely
|
||||
# *use* our loggers because it'll just generate more DB queries
|
||||
# and potentially recurse into this state again
|
||||
_, _, tb = sys.exc_info()
|
||||
traceback.print_tb(tb)
|
||||
return ret
|
||||
|
||||
def write(self, query):
|
||||
if self.threshold is None:
|
||||
return
|
||||
seconds = float(query['time'])
|
||||
|
||||
# if the query is slow enough...
|
||||
if seconds >= self.threshold:
|
||||
sql = query['sql']
|
||||
if sql.startswith('EXPLAIN'):
|
||||
return
|
||||
|
||||
# build a printable Python stack
|
||||
bt = ' '.join(traceback.format_stack())
|
||||
|
||||
# and re-run the same query w/ EXPLAIN
|
||||
explain = ''
|
||||
cursor = self.db.cursor()
|
||||
cursor.execute('EXPLAIN VERBOSE {}'.format(sql))
|
||||
for line in cursor.fetchall():
|
||||
explain += line[0] + '\n'
|
||||
|
||||
# write a row of data into a per-PID sqlite database
|
||||
if not os.path.isdir(self.dest):
|
||||
os.makedirs(self.dest)
|
||||
progname = ' '.join(sys.argv)
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'runworker'):
|
||||
if match in progname:
|
||||
progname = match
|
||||
break
|
||||
else:
|
||||
progname = os.path.basename(sys.argv[0])
|
||||
filepath = os.path.join(
|
||||
self.dest,
|
||||
'{}.sqlite'.format(progname)
|
||||
)
|
||||
version = pkg_resources.get_distribution('awx').version
|
||||
log = sqlite3.connect(filepath, timeout=3)
|
||||
log.execute(
|
||||
'CREATE TABLE IF NOT EXISTS queries ('
|
||||
' id INTEGER PRIMARY KEY,'
|
||||
' version TEXT,'
|
||||
' pid INTEGER,'
|
||||
' stamp DATETIME DEFAULT CURRENT_TIMESTAMP,'
|
||||
' argv REAL,'
|
||||
' time REAL,'
|
||||
' sql TEXT,'
|
||||
' explain TEXT,'
|
||||
' bt TEXT'
|
||||
');'
|
||||
)
|
||||
log.commit()
|
||||
log.execute(
|
||||
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) '
|
||||
'VALUES (?, ?, ?, ?, ?, ?, ?);',
|
||||
(os.getpid(), version, ' ' .join(sys.argv), seconds, sql, explain, bt)
|
||||
)
|
||||
log.commit()
|
||||
|
||||
def __len__(self):
|
||||
return len(self.log)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.log)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self.log, attr)
|
||||
|
||||
|
||||
class DatabaseWrapper(BaseDatabaseWrapper):
|
||||
"""
|
||||
This is a special subclass of Django's postgres DB backend which - based on
|
||||
the value of a special flag in memcached - captures slow queries and
|
||||
writes profile and Python stack metadata to the disk.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DatabaseWrapper, self).__init__(*args, **kwargs)
|
||||
# Django's default base wrapper implementation has `queries_log`
|
||||
# which is a `collections.deque` that every query is appended to
|
||||
#
|
||||
# this line wraps the deque with a proxy that can capture each query
|
||||
# and - if it's slow enough - record profiling metadata to the file
|
||||
# system for debugging purposes
|
||||
self.queries_log = RecordedQueryLog(self.queries_log, self)
|
||||
|
||||
@property
|
||||
@memoize(ttl=1, cache=__loc__)
|
||||
def force_debug_cursor(self):
|
||||
# in Django's base DB implementation, `self.force_debug_cursor` is just
|
||||
# a simple boolean, and this value is used to signal to Django that it
|
||||
# should record queries into `self.queries_log` as they're executed (this
|
||||
# is the same mechanism used by libraries like the django-debug-toolbar)
|
||||
#
|
||||
# in _this_ implementation, we represent it as a property which will
|
||||
# check memcache for a special flag to be set (when the flag is set, it
|
||||
# means we should start recording queries because somebody called
|
||||
# `awx-manage profile_sql`)
|
||||
#
|
||||
# it's worth noting that this property is wrapped w/ @memoize because
|
||||
# Django references this attribute _constantly_ (in particular, once
|
||||
# per executed query); doing a memcached.get() _at most_ once per
|
||||
# second is a good enough window to detect when profiling is turned
|
||||
# on/off by a system administrator
|
||||
try:
|
||||
threshold = cache.get('awx-profile-sql-threshold')
|
||||
except Exception:
|
||||
# if we can't reach memcached, just assume profiling's off
|
||||
threshold = None
|
||||
self.queries_log.threshold = threshold
|
||||
return threshold is not None
|
||||
|
||||
@force_debug_cursor.setter
|
||||
def force_debug_cursor(self, v):
|
||||
return
|
||||
@@ -1,6 +1,7 @@
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -18,7 +19,10 @@ import psutil
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.dispatch import reaper
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
else:
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
class PoolWorker(object):
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
import os
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
from uuid import UUID
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
@@ -13,7 +14,10 @@ from kombu.mixins import ConsumerMixin
|
||||
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
else:
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def signame(sig):
|
||||
@@ -108,7 +112,7 @@ class AWXConsumer(ConsumerMixin):
|
||||
|
||||
def stop(self, signum, frame):
|
||||
self.should_stop = True # this makes the kombu mixin stop consuming
|
||||
logger.debug('received {}, stopping'.format(signame(signum)))
|
||||
logger.warn('received {}, stopping'.format(signame(signum)))
|
||||
self.worker.on_stop()
|
||||
raise SystemExit()
|
||||
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import base64
|
||||
import codecs
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
@@ -7,15 +5,13 @@ import stat
|
||||
import tempfile
|
||||
import time
|
||||
import logging
|
||||
from distutils.version import LooseVersion as Version
|
||||
from io import StringIO
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import smart_bytes, smart_str
|
||||
|
||||
import awx
|
||||
from awx.main.expect import run
|
||||
from awx.main.utils import OutputEventFilter, get_system_task_capacity
|
||||
from awx.main.utils import get_system_task_capacity
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
|
||||
logger = logging.getLogger('awx.isolated.manager')
|
||||
@@ -24,23 +20,11 @@ playbook_logger = logging.getLogger('awx.isolated.manager.playbooks')
|
||||
|
||||
class IsolatedManager(object):
|
||||
|
||||
def __init__(self, args, cwd, env, stdout_handle, ssh_key_path,
|
||||
expect_passwords={}, cancelled_callback=None, job_timeout=0,
|
||||
idle_timeout=None, extra_update_fields=None,
|
||||
pexpect_timeout=5, proot_cmd='bwrap'):
|
||||
def __init__(self, env, cancelled_callback=None, job_timeout=0,
|
||||
idle_timeout=None):
|
||||
"""
|
||||
:param args: a list of `subprocess.call`-style arguments
|
||||
representing a subprocess e.g.,
|
||||
['ansible-playbook', '...']
|
||||
:param cwd: the directory where the subprocess should run,
|
||||
generally the directory where playbooks exist
|
||||
:param env: a dict containing environment variables for the
|
||||
subprocess, ala `os.environ`
|
||||
:param stdout_handle: a file-like object for capturing stdout
|
||||
:param ssh_key_path: a filepath where SSH key data can be read
|
||||
:param expect_passwords: a dict of regular expression password prompts
|
||||
to input values, i.e., {r'Password:*?$':
|
||||
'some_password'}
|
||||
:param cancelled_callback: a callable - which returns `True` or `False`
|
||||
- signifying if the job has been prematurely
|
||||
cancelled
|
||||
@@ -49,26 +33,11 @@ class IsolatedManager(object):
|
||||
:param idle_timeout a timeout (in seconds); if new output is not
|
||||
sent to stdout in this interval, the process
|
||||
will be terminated
|
||||
:param extra_update_fields: a dict used to specify DB fields which should
|
||||
be updated on the underlying model
|
||||
object after execution completes
|
||||
:param pexpect_timeout a timeout (in seconds) to wait on
|
||||
`pexpect.spawn().expect()` calls
|
||||
:param proot_cmd the command used to isolate processes, `bwrap`
|
||||
"""
|
||||
self.args = args
|
||||
self.cwd = cwd
|
||||
self.isolated_env = self._redact_isolated_env(env.copy())
|
||||
self.management_env = self._base_management_env()
|
||||
self.stdout_handle = stdout_handle
|
||||
self.ssh_key_path = ssh_key_path
|
||||
self.expect_passwords = {k.pattern: v for k, v in expect_passwords.items()}
|
||||
self.cancelled_callback = cancelled_callback
|
||||
self.job_timeout = job_timeout
|
||||
self.idle_timeout = idle_timeout
|
||||
self.extra_update_fields = extra_update_fields
|
||||
self.pexpect_timeout = pexpect_timeout
|
||||
self.proot_cmd = proot_cmd
|
||||
self.started_at = None
|
||||
|
||||
@staticmethod
|
||||
@@ -106,18 +75,6 @@ class IsolatedManager(object):
|
||||
args.append('-%s' % ('v' * min(5, settings.AWX_ISOLATED_VERBOSITY)))
|
||||
return args
|
||||
|
||||
@staticmethod
|
||||
def _redact_isolated_env(env):
|
||||
'''
|
||||
strips some environment variables that aren't applicable to
|
||||
job execution within the isolated instance
|
||||
'''
|
||||
for var in (
|
||||
'HOME', 'RABBITMQ_HOST', 'RABBITMQ_PASS', 'RABBITMQ_USER', 'CACHE',
|
||||
'DJANGO_PROJECT_DIR', 'DJANGO_SETTINGS_MODULE', 'RABBITMQ_VHOST'):
|
||||
env.pop(var, None)
|
||||
return env
|
||||
|
||||
@classmethod
|
||||
def awx_playbook_path(cls):
|
||||
return os.path.abspath(os.path.join(
|
||||
@@ -128,55 +85,31 @@ class IsolatedManager(object):
|
||||
def path_to(self, *args):
|
||||
return os.path.join(self.private_data_dir, *args)
|
||||
|
||||
def dispatch(self):
|
||||
def dispatch(self, playbook=None, module=None, module_args=None):
|
||||
'''
|
||||
Compile the playbook, its environment, and metadata into a series
|
||||
of files, and ship to a remote host for isolated execution.
|
||||
Ship the runner payload to a remote host for isolated execution.
|
||||
'''
|
||||
self.handled_events = set()
|
||||
self.started_at = time.time()
|
||||
secrets = {
|
||||
'env': self.isolated_env,
|
||||
'passwords': self.expect_passwords,
|
||||
'ssh_key_data': None,
|
||||
'idle_timeout': self.idle_timeout,
|
||||
'job_timeout': self.job_timeout,
|
||||
'pexpect_timeout': self.pexpect_timeout
|
||||
}
|
||||
|
||||
# if an ssh private key fifo exists, read its contents and delete it
|
||||
if self.ssh_key_path:
|
||||
buff = StringIO()
|
||||
with open(self.ssh_key_path, 'r') as fifo:
|
||||
for line in fifo:
|
||||
buff.write(line)
|
||||
secrets['ssh_key_data'] = buff.getvalue()
|
||||
os.remove(self.ssh_key_path)
|
||||
|
||||
# write the entire secret payload to a named pipe
|
||||
# the run_isolated.yml playbook will use a lookup to read this data
|
||||
# into a variable, and will replicate the data into a named pipe on the
|
||||
# isolated instance
|
||||
secrets_path = os.path.join(self.private_data_dir, 'env')
|
||||
run.open_fifo_write(
|
||||
secrets_path,
|
||||
smart_str(base64.b64encode(smart_bytes(json.dumps(secrets))))
|
||||
)
|
||||
|
||||
self.build_isolated_job_data()
|
||||
|
||||
extra_vars = {
|
||||
'src': self.private_data_dir,
|
||||
'dest': settings.AWX_PROOT_BASE_PATH,
|
||||
'ident': self.ident
|
||||
}
|
||||
if self.proot_temp_dir:
|
||||
extra_vars['proot_temp_dir'] = self.proot_temp_dir
|
||||
if playbook:
|
||||
extra_vars['playbook'] = playbook
|
||||
if module and module_args:
|
||||
extra_vars['module'] = module
|
||||
extra_vars['module_args'] = module_args
|
||||
|
||||
# Run ansible-playbook to launch a job on the isolated host. This:
|
||||
#
|
||||
# - sets up a temporary directory for proot/bwrap (if necessary)
|
||||
# - copies encrypted job data from the controlling host to the isolated host (with rsync)
|
||||
# - writes the encryption secret to a named pipe on the isolated host
|
||||
# - launches the isolated playbook runner via `awx-expect start <job-id>`
|
||||
# - launches ansible-runner
|
||||
args = self._build_args('run_isolated.yml', '%s,' % self.host, extra_vars)
|
||||
if self.instance.verbosity:
|
||||
args.append('-%s' % ('v' * min(5, self.instance.verbosity)))
|
||||
@@ -188,10 +121,15 @@ class IsolatedManager(object):
|
||||
job_timeout=settings.AWX_ISOLATED_LAUNCH_TIMEOUT,
|
||||
pexpect_timeout=5
|
||||
)
|
||||
output = buff.getvalue().encode('utf-8')
|
||||
output = buff.getvalue()
|
||||
playbook_logger.info('Isolated job {} dispatch:\n{}'.format(self.instance.id, output))
|
||||
if status != 'successful':
|
||||
self.stdout_handle.write(output)
|
||||
for event_data in [
|
||||
{'event': 'verbose', 'stdout': output},
|
||||
{'event': 'EOF', 'final_counter': 1},
|
||||
]:
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
CallbackQueueDispatcher().dispatch(event_data)
|
||||
return status, rc
|
||||
|
||||
@classmethod
|
||||
@@ -215,11 +153,8 @@ class IsolatedManager(object):
|
||||
|
||||
def build_isolated_job_data(self):
|
||||
'''
|
||||
Write the playbook and metadata into a collection of files on the local
|
||||
file system.
|
||||
|
||||
This function is intended to be used to compile job data so that it
|
||||
can be shipped to a remote, isolated host (via ssh).
|
||||
Write metadata related to the playbook run into a collection of files
|
||||
on the local file system.
|
||||
'''
|
||||
|
||||
rsync_exclude = [
|
||||
@@ -229,42 +164,18 @@ class IsolatedManager(object):
|
||||
'- /project/.hg',
|
||||
# don't rsync job events that are in the process of being written
|
||||
'- /artifacts/job_events/*-partial.json.tmp',
|
||||
# rsync can't copy named pipe data - we're replicating this manually ourselves in the playbook
|
||||
'- /env'
|
||||
# don't rsync the ssh_key FIFO
|
||||
'- /env/ssh_key',
|
||||
]
|
||||
|
||||
for filename, data in (
|
||||
['.rsync-filter', '\n'.join(rsync_exclude)],
|
||||
['args', json.dumps(self.args)]
|
||||
):
|
||||
path = self.path_to(filename)
|
||||
with open(path, 'w') as f:
|
||||
f.write(data)
|
||||
os.chmod(path, stat.S_IRUSR)
|
||||
|
||||
# symlink the scm checkout (if there is one) so that it's rsync'ed over, too
|
||||
if 'AD_HOC_COMMAND_ID' not in self.isolated_env:
|
||||
os.symlink(self.cwd, self.path_to('project'))
|
||||
|
||||
# create directories for build artifacts to live in
|
||||
os.makedirs(self.path_to('artifacts', 'job_events'), mode=stat.S_IXUSR + stat.S_IWUSR + stat.S_IRUSR)
|
||||
|
||||
def _missing_artifacts(self, path_list, output):
|
||||
missing_artifacts = list(filter(lambda path: not os.path.exists(path), path_list))
|
||||
for path in missing_artifacts:
|
||||
self.stdout_handle.write('ansible did not exit cleanly, missing `{}`.\n'.format(path))
|
||||
if missing_artifacts:
|
||||
daemon_path = self.path_to('artifacts', 'daemon.log')
|
||||
if os.path.exists(daemon_path):
|
||||
# If available, show log files from the run.py call
|
||||
with codecs.open(daemon_path, 'r', encoding='utf-8') as f:
|
||||
self.stdout_handle.write(f.read())
|
||||
else:
|
||||
# Provide the management playbook standard out if not available
|
||||
self.stdout_handle.write(output)
|
||||
return True
|
||||
return False
|
||||
|
||||
def check(self, interval=None):
|
||||
"""
|
||||
Repeatedly poll the isolated node to determine if the job has run.
|
||||
@@ -290,18 +201,11 @@ class IsolatedManager(object):
|
||||
rc = None
|
||||
buff = StringIO()
|
||||
last_check = time.time()
|
||||
seek = 0
|
||||
job_timeout = remaining = self.job_timeout
|
||||
dispatcher = CallbackQueueDispatcher()
|
||||
while status == 'failed':
|
||||
if job_timeout != 0:
|
||||
remaining = max(0, job_timeout - (time.time() - self.started_at))
|
||||
if remaining == 0:
|
||||
# if it takes longer than $REMAINING_JOB_TIMEOUT to retrieve
|
||||
# job artifacts from the host, consider the job failed
|
||||
if isinstance(self.extra_update_fields, dict):
|
||||
self.extra_update_fields['job_explanation'] = "Job terminated due to timeout"
|
||||
status = 'failed'
|
||||
break
|
||||
|
||||
canceled = self.cancelled_callback() if self.cancelled_callback else False
|
||||
if not canceled and time.time() - last_check < interval:
|
||||
@@ -309,6 +213,9 @@ class IsolatedManager(object):
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
if canceled:
|
||||
logger.warning('Isolated job {} was manually cancelled.'.format(self.instance.id))
|
||||
|
||||
buff = StringIO()
|
||||
logger.debug('Checking on isolated job {} with `check_isolated.yml`.'.format(self.instance.id))
|
||||
status, rc = IsolatedManager.run_pexpect(
|
||||
@@ -317,36 +224,50 @@ class IsolatedManager(object):
|
||||
idle_timeout=remaining,
|
||||
job_timeout=remaining,
|
||||
pexpect_timeout=5,
|
||||
proot_cmd=self.proot_cmd
|
||||
proot_cmd='bwrap'
|
||||
)
|
||||
output = buff.getvalue().encode('utf-8')
|
||||
playbook_logger.info('Isolated job {} check:\n{}'.format(self.instance.id, output))
|
||||
|
||||
path = self.path_to('artifacts', 'stdout')
|
||||
if os.path.exists(path):
|
||||
with codecs.open(path, 'r', encoding='utf-8') as f:
|
||||
f.seek(seek)
|
||||
for line in f:
|
||||
self.stdout_handle.write(line)
|
||||
seek += len(line)
|
||||
# discover new events and ingest them
|
||||
events_path = self.path_to('artifacts', self.ident, 'job_events')
|
||||
|
||||
# it's possible that `events_path` doesn't exist *yet*, because runner
|
||||
# hasn't actually written any events yet (if you ran e.g., a sleep 30)
|
||||
# only attempt to consume events if any were rsynced back
|
||||
if os.path.exists(events_path):
|
||||
for event in set(os.listdir(events_path)) - self.handled_events:
|
||||
path = os.path.join(events_path, event)
|
||||
if os.path.exists(path):
|
||||
event_data = json.load(
|
||||
open(os.path.join(events_path, event), 'r')
|
||||
)
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
dispatcher.dispatch(event_data)
|
||||
self.handled_events.add(event)
|
||||
|
||||
# handle artifacts
|
||||
if event_data.get('event_data', {}).get('artifact_data', {}):
|
||||
self.instance.artifacts = event_data['event_data']['artifact_data']
|
||||
self.instance.save(update_fields=['artifacts'])
|
||||
|
||||
last_check = time.time()
|
||||
|
||||
if status == 'successful':
|
||||
status_path = self.path_to('artifacts', 'status')
|
||||
rc_path = self.path_to('artifacts', 'rc')
|
||||
if self._missing_artifacts([status_path, rc_path], output):
|
||||
status = 'failed'
|
||||
rc = 1
|
||||
else:
|
||||
with open(status_path, 'r') as f:
|
||||
status = f.readline()
|
||||
with open(rc_path, 'r') as f:
|
||||
rc = int(f.readline())
|
||||
elif status == 'failed':
|
||||
# if we were unable to retrieve job reults from the isolated host,
|
||||
# print stdout of the `check_isolated.yml` playbook for clues
|
||||
self.stdout_handle.write(smart_str(output))
|
||||
status_path = self.path_to('artifacts', self.ident, 'status')
|
||||
rc_path = self.path_to('artifacts', self.ident, 'rc')
|
||||
with open(status_path, 'r') as f:
|
||||
status = f.readline()
|
||||
with open(rc_path, 'r') as f:
|
||||
rc = int(f.readline())
|
||||
|
||||
# emit an EOF event
|
||||
event_data = {
|
||||
'event': 'EOF',
|
||||
'final_counter': len(self.handled_events)
|
||||
}
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
dispatcher.dispatch(event_data)
|
||||
|
||||
return status, rc
|
||||
|
||||
@@ -356,7 +277,6 @@ class IsolatedManager(object):
|
||||
'private_data_dir': self.private_data_dir,
|
||||
'cleanup_dirs': [
|
||||
self.private_data_dir,
|
||||
self.proot_temp_dir,
|
||||
],
|
||||
}
|
||||
args = self._build_args('clean_isolated.yml', '%s,' % self.host, extra_vars)
|
||||
@@ -377,23 +297,15 @@ class IsolatedManager(object):
|
||||
|
||||
@classmethod
|
||||
def update_capacity(cls, instance, task_result, awx_application_version):
|
||||
instance.version = task_result['version']
|
||||
instance.version = 'ansible-runner-{}'.format(task_result['version'])
|
||||
|
||||
isolated_version = instance.version.split("-", 1)[0]
|
||||
cluster_version = awx_application_version.split("-", 1)[0]
|
||||
|
||||
if Version(cluster_version) > Version(isolated_version):
|
||||
err_template = "Isolated instance {} reports version {}, cluster node is at {}, setting capacity to zero."
|
||||
logger.error(err_template.format(instance.hostname, instance.version, awx_application_version))
|
||||
instance.capacity = 0
|
||||
else:
|
||||
if instance.capacity == 0 and task_result['capacity_cpu']:
|
||||
logger.warning('Isolated instance {} has re-joined.'.format(instance.hostname))
|
||||
instance.cpu_capacity = int(task_result['capacity_cpu'])
|
||||
instance.mem_capacity = int(task_result['capacity_mem'])
|
||||
instance.capacity = get_system_task_capacity(scale=instance.capacity_adjustment,
|
||||
cpu_capacity=int(task_result['capacity_cpu']),
|
||||
mem_capacity=int(task_result['capacity_mem']))
|
||||
if instance.capacity == 0 and task_result['capacity_cpu']:
|
||||
logger.warning('Isolated instance {} has re-joined.'.format(instance.hostname))
|
||||
instance.cpu_capacity = int(task_result['capacity_cpu'])
|
||||
instance.mem_capacity = int(task_result['capacity_mem'])
|
||||
instance.capacity = get_system_task_capacity(scale=instance.capacity_adjustment,
|
||||
cpu_capacity=int(task_result['capacity_cpu']),
|
||||
mem_capacity=int(task_result['capacity_mem']))
|
||||
instance.save(update_fields=['cpu_capacity', 'mem_capacity', 'capacity', 'version', 'modified'])
|
||||
|
||||
@classmethod
|
||||
@@ -460,28 +372,8 @@ class IsolatedManager(object):
|
||||
if os.path.exists(facts_path):
|
||||
shutil.rmtree(facts_path)
|
||||
|
||||
@staticmethod
|
||||
def get_stdout_handle(instance, private_data_dir, event_data_key='job_id'):
|
||||
dispatcher = CallbackQueueDispatcher()
|
||||
|
||||
def job_event_callback(event_data):
|
||||
event_data.setdefault(event_data_key, instance.id)
|
||||
if 'uuid' in event_data:
|
||||
filename = '{}-partial.json'.format(event_data['uuid'])
|
||||
partial_filename = os.path.join(private_data_dir, 'artifacts', 'job_events', filename)
|
||||
try:
|
||||
with codecs.open(partial_filename, 'r', encoding='utf-8') as f:
|
||||
partial_event_data = json.load(f)
|
||||
event_data.update(partial_event_data)
|
||||
except IOError:
|
||||
if event_data.get('event', '') != 'verbose':
|
||||
logger.error('Missing callback data for event type `{}`, uuid {}, job {}.\nevent_data: {}'.format(
|
||||
event_data.get('event', ''), event_data['uuid'], instance.id, event_data))
|
||||
dispatcher.dispatch(event_data)
|
||||
|
||||
return OutputEventFilter(job_event_callback)
|
||||
|
||||
def run(self, instance, private_data_dir, proot_temp_dir):
|
||||
def run(self, instance, private_data_dir, playbook, module, module_args,
|
||||
event_data_key, ident=None):
|
||||
"""
|
||||
Run a job on an isolated host.
|
||||
|
||||
@@ -489,18 +381,21 @@ class IsolatedManager(object):
|
||||
:param private_data_dir: an absolute path on the local file system
|
||||
where job-specific data should be written
|
||||
(i.e., `/tmp/ansible_awx_xyz/`)
|
||||
:param proot_temp_dir: a temporary directory which bwrap maps
|
||||
restricted paths to
|
||||
:param playbook: the playbook to run
|
||||
:param module: the module to run
|
||||
:param module_args: the module args to use
|
||||
:param event_data_key: e.g., job_id, inventory_id, ...
|
||||
|
||||
For a completed job run, this function returns (status, rc),
|
||||
representing the status and return code of the isolated
|
||||
`ansible-playbook` run.
|
||||
"""
|
||||
self.ident = ident
|
||||
self.event_data_key = event_data_key
|
||||
self.instance = instance
|
||||
self.host = instance.execution_node
|
||||
self.private_data_dir = private_data_dir
|
||||
self.proot_temp_dir = proot_temp_dir
|
||||
status, rc = self.dispatch()
|
||||
status, rc = self.dispatch(playbook, module, module_args)
|
||||
if status == 'successful':
|
||||
status, rc = self.check()
|
||||
self.cleanup()
|
||||
|
||||
@@ -14,10 +14,7 @@ import signal
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
try:
|
||||
from io import StringIO
|
||||
except ImportError:
|
||||
from StringIO import StringIO
|
||||
from io import StringIO
|
||||
|
||||
import pexpect
|
||||
import psutil
|
||||
@@ -194,18 +191,7 @@ def run_isolated_job(private_data_dir, secrets, logfile=sys.stdout):
|
||||
job_timeout = secrets.get('job_timeout', 10)
|
||||
pexpect_timeout = secrets.get('pexpect_timeout', 5)
|
||||
|
||||
# Use local callback directory
|
||||
callback_dir = os.getenv('AWX_LIB_DIRECTORY')
|
||||
if callback_dir is None:
|
||||
raise RuntimeError('Location for callbacks must be specified '
|
||||
'by environment variable AWX_LIB_DIRECTORY.')
|
||||
env['ANSIBLE_CALLBACK_PLUGINS'] = os.path.join(callback_dir, 'isolated_callbacks')
|
||||
if 'AD_HOC_COMMAND_ID' in env:
|
||||
env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
|
||||
else:
|
||||
env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
|
||||
env['AWX_ISOLATED_DATA_DIR'] = private_data_dir
|
||||
env['PYTHONPATH'] = env.get('PYTHONPATH', '') + callback_dir + ':'
|
||||
|
||||
venv_path = env.get('VIRTUAL_ENV')
|
||||
if venv_path and not os.path.exists(venv_path):
|
||||
@@ -231,10 +217,7 @@ def handle_termination(pid, args, proot_cmd, is_cancel=True):
|
||||
instance's cancel_flag.
|
||||
'''
|
||||
try:
|
||||
if sys.version_info > (3, 0):
|
||||
used_proot = proot_cmd.encode('utf-8') in args
|
||||
else:
|
||||
used_proot = proot_cmd in ' '.join(args)
|
||||
used_proot = proot_cmd.encode('utf-8') in args
|
||||
if used_proot:
|
||||
if not psutil:
|
||||
os.kill(pid, signal.SIGKILL)
|
||||
|
||||
@@ -671,6 +671,7 @@ class CredentialTypeInputField(JSONSchemaField):
|
||||
'multiline': {'type': 'boolean'},
|
||||
'secret': {'type': 'boolean'},
|
||||
'ask_at_runtime': {'type': 'boolean'},
|
||||
'default': {},
|
||||
},
|
||||
'additionalProperties': False,
|
||||
'required': ['id', 'label'],
|
||||
@@ -714,6 +715,14 @@ class CredentialTypeInputField(JSONSchemaField):
|
||||
# If no type is specified, default to string
|
||||
field['type'] = 'string'
|
||||
|
||||
if 'default' in field:
|
||||
default = field['default']
|
||||
_type = {'string': str, 'boolean': bool}[field['type']]
|
||||
if type(default) != _type:
|
||||
raise django_exceptions.ValidationError(
|
||||
_('{} is not a {}').format(default, field['type'])
|
||||
)
|
||||
|
||||
for key in ('choices', 'multiline', 'format', 'secret',):
|
||||
if key in field and field['type'] != 'string':
|
||||
raise django_exceptions.ValidationError(
|
||||
|
||||
@@ -12,6 +12,7 @@ import sys
|
||||
import time
|
||||
import traceback
|
||||
import shutil
|
||||
from distutils.version import LooseVersion as Version
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -37,6 +38,7 @@ from awx.main.utils import (
|
||||
build_proot_temp_dir,
|
||||
get_licenser
|
||||
)
|
||||
from awx.main.utils.common import _get_ansible_version
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.constants import STANDARD_INVENTORY_UPDATE_ENV
|
||||
|
||||
@@ -124,7 +126,16 @@ class AnsibleInventoryLoader(object):
|
||||
|
||||
def get_base_args(self):
|
||||
# get ansible-inventory absolute path for running in bubblewrap/proot, in Popen
|
||||
bargs= [self.get_path_to_ansible_inventory(), '-i', self.source]
|
||||
ansible_inventory_path = self.get_path_to_ansible_inventory()
|
||||
# NOTE: why do we add "python" to the start of these args?
|
||||
# the script that runs ansible-inventory specifies a python interpreter
|
||||
# that makes no sense in light of the fact that we put all the dependencies
|
||||
# inside of /venv/ansible, so we override the specified interpreter
|
||||
# https://github.com/ansible/ansible/issues/50714
|
||||
bargs = ['python', ansible_inventory_path, '-i', self.source]
|
||||
ansible_version = _get_ansible_version(ansible_inventory_path[:-len('-inventory')])
|
||||
if ansible_version != 'unknown' and Version(ansible_version) >= Version('2.5'):
|
||||
bargs.extend(['--playbook-dir', self.source_dir])
|
||||
logger.debug('Using base command: {}'.format(' '.join(bargs)))
|
||||
return bargs
|
||||
|
||||
@@ -302,11 +313,6 @@ class Command(BaseCommand):
|
||||
raise NotImplementedError('Value of enabled {} not understood.'.format(enabled))
|
||||
|
||||
def get_source_absolute_path(self, source):
|
||||
# Sanity check: We sanitize these module names for our API but Ansible proper doesn't follow
|
||||
# good naming conventions
|
||||
source = source.replace('rhv.py', 'ovirt4.py')
|
||||
source = source.replace('satellite6.py', 'foreman.py')
|
||||
source = source.replace('vmware.py', 'vmware_inventory.py')
|
||||
if not os.path.exists(source):
|
||||
raise IOError('Source does not exist: %s' % source)
|
||||
source = os.path.join(os.getcwd(), os.path.dirname(source),
|
||||
@@ -413,6 +419,16 @@ class Command(BaseCommand):
|
||||
mem_host.instance_id = instance_id
|
||||
self.mem_instance_id_map[instance_id] = mem_host.name
|
||||
|
||||
def _existing_host_pks(self):
|
||||
'''Returns cached set of existing / previous host primary key values
|
||||
this is the starting set, meaning that it is pre-modification
|
||||
by deletions and other things done in the course of this import
|
||||
'''
|
||||
if not hasattr(self, '_cached_host_pk_set'):
|
||||
self._cached_host_pk_set = frozenset(
|
||||
self.inventory_source.hosts.values_list('pk', flat=True))
|
||||
return self._cached_host_pk_set
|
||||
|
||||
def _delete_hosts(self):
|
||||
'''
|
||||
For each host in the database that is NOT in the local list, delete
|
||||
@@ -424,7 +440,7 @@ class Command(BaseCommand):
|
||||
queries_before = len(connection.queries)
|
||||
hosts_qs = self.inventory_source.hosts
|
||||
# Build list of all host pks, remove all that should not be deleted.
|
||||
del_host_pks = set(hosts_qs.values_list('pk', flat=True))
|
||||
del_host_pks = set(self._existing_host_pks()) # makes mutable copy
|
||||
if self.instance_id_var:
|
||||
all_instance_ids = list(self.mem_instance_id_map.keys())
|
||||
instance_ids = []
|
||||
@@ -504,6 +520,10 @@ class Command(BaseCommand):
|
||||
group_group_count = 0
|
||||
group_host_count = 0
|
||||
db_groups = self.inventory_source.groups
|
||||
# Set of all group names managed by this inventory source
|
||||
all_source_group_names = frozenset(self.all_group.all_groups.keys())
|
||||
# Set of all host pks managed by this inventory source
|
||||
all_source_host_pks = self._existing_host_pks()
|
||||
for db_group in db_groups.all():
|
||||
if self.inventory_source.deprecated_group_id == db_group.id: # TODO: remove in 3.3
|
||||
logger.debug(
|
||||
@@ -514,9 +534,18 @@ class Command(BaseCommand):
|
||||
# Delete child group relationships not present in imported data.
|
||||
db_children = db_group.children
|
||||
db_children_name_pk_map = dict(db_children.values_list('name', 'pk'))
|
||||
# Exclude child groups from removal list if they were returned by
|
||||
# the import, because this parent-child relationship has not changed
|
||||
mem_children = self.all_group.all_groups[db_group.name].children
|
||||
for mem_group in mem_children:
|
||||
db_children_name_pk_map.pop(mem_group.name, None)
|
||||
# Exclude child groups from removal list if they were not imported
|
||||
# by this specific inventory source, because
|
||||
# those relationships are outside of the dominion of this inventory source
|
||||
other_source_group_names = set(db_children_name_pk_map.keys()) - all_source_group_names
|
||||
for group_name in other_source_group_names:
|
||||
db_children_name_pk_map.pop(group_name, None)
|
||||
# Removal list is complete - now perform the removals
|
||||
del_child_group_pks = list(set(db_children_name_pk_map.values()))
|
||||
for offset in range(0, len(del_child_group_pks), self._batch_size):
|
||||
child_group_pks = del_child_group_pks[offset:(offset + self._batch_size)]
|
||||
@@ -529,6 +558,12 @@ class Command(BaseCommand):
|
||||
# Delete group/host relationships not present in imported data.
|
||||
db_hosts = db_group.hosts
|
||||
del_host_pks = set(db_hosts.values_list('pk', flat=True))
|
||||
# Exclude child hosts from removal list if they were not imported
|
||||
# by this specific inventory source, because
|
||||
# those relationships are outside of the dominion of this inventory source
|
||||
del_host_pks = del_host_pks & all_source_host_pks
|
||||
# Exclude child hosts from removal list if they were returned by
|
||||
# the import, because this group-host relationship has not changed
|
||||
mem_hosts = self.all_group.all_groups[db_group.name].hosts
|
||||
all_mem_host_names = [h.name for h in mem_hosts if not h.instance_id]
|
||||
for offset in range(0, len(all_mem_host_names), self._batch_size):
|
||||
@@ -543,6 +578,7 @@ class Command(BaseCommand):
|
||||
all_db_host_pks = [v for k,v in self.db_instance_id_map.items() if k in all_mem_instance_ids]
|
||||
for db_host_pk in all_db_host_pks:
|
||||
del_host_pks.discard(db_host_pk)
|
||||
# Removal list is complete - now perform the removals
|
||||
del_host_pks = list(del_host_pks)
|
||||
for offset in range(0, len(del_host_pks), self._batch_size):
|
||||
del_pks = del_host_pks[offset:(offset + self._batch_size)]
|
||||
@@ -849,12 +885,24 @@ class Command(BaseCommand):
|
||||
self._create_update_group_children()
|
||||
self._create_update_group_hosts()
|
||||
|
||||
def remote_tower_license_compare(self, local_license_type):
|
||||
# this requires https://github.com/ansible/ansible/pull/52747
|
||||
source_vars = self.all_group.variables
|
||||
remote_license_type = source_vars.get('tower_metadata', {}).get('license_type', None)
|
||||
if remote_license_type is None:
|
||||
raise CommandError('Unexpected Error: Tower inventory plugin missing needed metadata!')
|
||||
if local_license_type != remote_license_type:
|
||||
raise CommandError('Tower server licenses must match: source: {} local: {}'.format(
|
||||
remote_license_type, local_license_type
|
||||
))
|
||||
|
||||
def check_license(self):
|
||||
license_info = get_licenser().validate()
|
||||
local_license_type = license_info.get('license_type', 'UNLICENSED')
|
||||
if license_info.get('license_key', 'UNLICENSED') == 'UNLICENSED':
|
||||
logger.error(LICENSE_NON_EXISTANT_MESSAGE)
|
||||
raise CommandError('No license found!')
|
||||
elif license_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
elif local_license_type == 'open':
|
||||
return
|
||||
available_instances = license_info.get('available_instances', 0)
|
||||
free_instances = license_info.get('free_instances', 0)
|
||||
@@ -863,6 +911,13 @@ class Command(BaseCommand):
|
||||
if time_remaining <= 0 and not license_info.get('demo', False):
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
raise CommandError("License has expired!")
|
||||
# special check for tower-type inventory sources
|
||||
# but only if running the plugin
|
||||
TOWER_SOURCE_FILES = ['tower.yml', 'tower.yaml']
|
||||
if self.inventory_source.source == 'tower' and any(f in self.source for f in TOWER_SOURCE_FILES):
|
||||
# only if this is the 2nd call to license check, we cannot compare before running plugin
|
||||
if hasattr(self, 'all_group'):
|
||||
self.remote_tower_license_compare(local_license_type)
|
||||
if free_instances < 0:
|
||||
d = {
|
||||
'new_count': new_count,
|
||||
@@ -874,10 +929,27 @@ class Command(BaseCommand):
|
||||
logger.error(LICENSE_MESSAGE % d)
|
||||
raise CommandError('License count exceeded!')
|
||||
|
||||
def check_org_host_limit(self):
|
||||
license_info = get_licenser().validate()
|
||||
if license_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
return
|
||||
|
||||
org = self.inventory.organization
|
||||
if org is None or org.max_hosts == 0:
|
||||
return
|
||||
|
||||
active_count = Host.objects.org_active_count(org.id)
|
||||
if active_count > org.max_hosts:
|
||||
raise CommandError('Host limit for organization exceeded!')
|
||||
|
||||
def mark_license_failure(self, save=True):
|
||||
self.inventory_update.license_error = True
|
||||
self.inventory_update.save(update_fields=['license_error'])
|
||||
|
||||
def mark_org_limits_failure(self, save=True):
|
||||
self.inventory_update.org_host_limit_error = True
|
||||
self.inventory_update.save(update_fields=['org_host_limit_error'])
|
||||
|
||||
def handle(self, *args, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
self.set_logging_level()
|
||||
@@ -931,6 +1003,13 @@ class Command(BaseCommand):
|
||||
self.mark_license_failure(save=True)
|
||||
raise e
|
||||
|
||||
try:
|
||||
# Check the per-org host limits
|
||||
self.check_org_host_limit()
|
||||
except CommandError as e:
|
||||
self.mark_org_limits_failure(save=True)
|
||||
raise e
|
||||
|
||||
status, tb, exc = 'error', '', None
|
||||
try:
|
||||
if settings.SQL_DEBUG:
|
||||
@@ -1002,9 +1081,17 @@ class Command(BaseCommand):
|
||||
# If the license is not valid, a CommandError will be thrown,
|
||||
# and inventory update will be marked as invalid.
|
||||
# with transaction.atomic() will roll back the changes.
|
||||
license_fail = True
|
||||
self.check_license()
|
||||
|
||||
# Check the per-org host limits
|
||||
license_fail = False
|
||||
self.check_org_host_limit()
|
||||
except CommandError as e:
|
||||
self.mark_license_failure()
|
||||
if license_fail:
|
||||
self.mark_license_failure()
|
||||
else:
|
||||
self.mark_org_limits_failure()
|
||||
raise e
|
||||
|
||||
if settings.SQL_DEBUG:
|
||||
@@ -1032,9 +1119,8 @@ class Command(BaseCommand):
|
||||
else:
|
||||
tb = traceback.format_exc()
|
||||
exc = e
|
||||
transaction.rollback()
|
||||
|
||||
if self.invoked_from_dispatcher is False:
|
||||
if not self.invoked_from_dispatcher:
|
||||
with ignore_inventory_computed_fields():
|
||||
self.inventory_update = InventoryUpdate.objects.get(pk=self.inventory_update.pk)
|
||||
self.inventory_update.result_traceback = tb
|
||||
@@ -1043,7 +1129,10 @@ class Command(BaseCommand):
|
||||
self.inventory_source.status = status
|
||||
self.inventory_source.save(update_fields=['status'])
|
||||
|
||||
if exc and isinstance(exc, CommandError):
|
||||
sys.exit(1)
|
||||
elif exc:
|
||||
if exc:
|
||||
logger.error(str(exc))
|
||||
|
||||
if exc:
|
||||
if isinstance(exc, CommandError):
|
||||
sys.exit(1)
|
||||
raise exc
|
||||
|
||||
21
awx/main/management/commands/profile_sql.py
Normal file
21
awx/main/management/commands/profile_sql.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.tasks import profile_sql
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Enable or disable SQL Profiling across all Python processes.
|
||||
SQL profile data will be recorded at /var/log/tower/profile
|
||||
"""
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--threshold', dest='threshold', type=float, default=2.0,
|
||||
help='The minimum query duration in seconds (default=2). Use 0 to disable.')
|
||||
parser.add_argument('--minutes', dest='minutes', type=float, default=5,
|
||||
help='How long to record for in minutes (default=5)')
|
||||
|
||||
def handle(self, **options):
|
||||
profile_sql.delay(
|
||||
threshold=options['threshold'], minutes=options['minutes']
|
||||
)
|
||||
@@ -28,7 +28,7 @@ class Command(BaseCommand):
|
||||
args = [
|
||||
'ansible', 'all', '-i', '{},'.format(hostname), '-u',
|
||||
settings.AWX_ISOLATED_USERNAME, '-T5', '-m', 'shell',
|
||||
'-a', 'awx-expect -h', '-vvv'
|
||||
'-a', 'ansible-runner --version', '-vvv'
|
||||
]
|
||||
if all([
|
||||
getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True,
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""A command that reports whether a username exists within the
|
||||
system or not.
|
||||
"""
|
||||
def handle(self, *args, **options):
|
||||
"""Print out information about the user to the console."""
|
||||
|
||||
# Sanity check: There should be one and exactly one positional
|
||||
# argument.
|
||||
if len(args) != 1:
|
||||
raise CommandError('This command requires one positional argument '
|
||||
'(a username).')
|
||||
|
||||
# Get the user.
|
||||
try:
|
||||
username = args[0]
|
||||
user = User.objects.get(username=username)
|
||||
|
||||
# Print a cute header.
|
||||
header = 'Information for user: %s' % username
|
||||
print('%s\n%s' % (header, '=' * len(header)))
|
||||
|
||||
# Print the email and real name of the user.
|
||||
print('Email: %s' % user.email)
|
||||
if user.first_name or user.last_name:
|
||||
print('Name: %s %s' % (user.first_name, user.last_name))
|
||||
else:
|
||||
print('No name provided.')
|
||||
except User.DoesNotExist:
|
||||
raise CommandError('User %s does not exist.' % username)
|
||||
|
||||
@@ -29,6 +29,34 @@ class HostManager(models.Manager):
|
||||
"""
|
||||
return self.order_by().exclude(inventory_sources__source='tower').values('name').distinct().count()
|
||||
|
||||
def org_active_count(self, org_id):
|
||||
"""Return count of active, unique hosts used by an organization.
|
||||
Construction of query involves:
|
||||
- remove any ordering specified in model's Meta
|
||||
- Exclude hosts sourced from another Tower
|
||||
- Consider only hosts where the canonical inventory is owned by the organization
|
||||
- Restrict the query to only return the name column
|
||||
- Only consider results that are unique
|
||||
- Return the count of this query
|
||||
"""
|
||||
return self.order_by().exclude(
|
||||
inventory_sources__source='tower'
|
||||
).filter(inventory__organization=org_id).values('name').distinct().count()
|
||||
|
||||
def active_counts_by_org(self):
|
||||
"""Return the counts of active, unique hosts for each organization.
|
||||
Construction of query involves:
|
||||
- remove any ordering specified in model's Meta
|
||||
- Exclude hosts sourced from another Tower
|
||||
- Consider only hosts where the canonical inventory is owned by each organization
|
||||
- Restrict the query to only count distinct names
|
||||
- Return the counts
|
||||
"""
|
||||
return self.order_by().exclude(
|
||||
inventory_sources__source='tower'
|
||||
).values('inventory__organization').annotate(
|
||||
inventory__organization__count=models.Count('name', distinct=True))
|
||||
|
||||
def get_queryset(self):
|
||||
"""When the parent instance of the host query set has a `kind=smart` and a `host_filter`
|
||||
set. Use the `host_filter` to generate the queryset for the hosts.
|
||||
|
||||
@@ -34,7 +34,7 @@ perf_logger = logging.getLogger('awx.analytics.performance')
|
||||
|
||||
class TimingMiddleware(threading.local):
|
||||
|
||||
dest = '/var/lib/awx/profile'
|
||||
dest = '/var/log/tower/profile'
|
||||
|
||||
def process_request(self, request):
|
||||
self.start_time = time.time()
|
||||
@@ -57,7 +57,7 @@ class TimingMiddleware(threading.local):
|
||||
def save_profile_file(self, request):
|
||||
if not os.path.isdir(self.dest):
|
||||
os.makedirs(self.dest)
|
||||
filename = '%.3fs-%s' % (pstats.Stats(self.prof).total_tt, uuid.uuid4())
|
||||
filename = '%.3fs-%s.pstats' % (pstats.Stats(self.prof).total_tt, uuid.uuid4())
|
||||
filepath = os.path.join(self.dest, filename)
|
||||
with open(filepath, 'w') as f:
|
||||
f.write('%s %s\n' % (request.method, request.get_full_path()))
|
||||
|
||||
20
awx/main/migrations/0059_v350_remove_adhoc_limit.py
Normal file
20
awx/main/migrations/0059_v350_remove_adhoc_limit.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-02-13 13:27
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0058_v350_remove_limit_limit'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='adhoccommand',
|
||||
name='limit',
|
||||
field=models.TextField(blank=True, default=''),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-02-13 17:45
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0059_v350_remove_adhoc_limit'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='schedule',
|
||||
name='name',
|
||||
field=models.CharField(max_length=512),
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='schedule',
|
||||
unique_together=set([('unified_job_template', 'name')]),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,27 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-02-19 04:27
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
from awx.main.models import CredentialType
|
||||
|
||||
|
||||
def migrate_to_static_inputs(apps, schema_editor):
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0060_v350_update_schedule_uniqueness_constraint'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='credentialtype',
|
||||
name='namespace',
|
||||
field=models.CharField(default=None, editable=False, max_length=1024, null=True),
|
||||
),
|
||||
migrations.RunPython(migrate_to_static_inputs)
|
||||
]
|
||||
25
awx/main/migrations/0062_v350_new_playbook_stats.py
Normal file
25
awx/main/migrations/0062_v350_new_playbook_stats.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-02-14 00:44
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0061_v350_track_native_credentialtype_source'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='jobhostsummary',
|
||||
name='ignored',
|
||||
field=models.PositiveIntegerField(default=0, editable=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobhostsummary',
|
||||
name='rescued',
|
||||
field=models.PositiveIntegerField(default=0, editable=False),
|
||||
),
|
||||
]
|
||||
25
awx/main/migrations/0063_v350_org_host_limits.py
Normal file
25
awx/main/migrations/0063_v350_org_host_limits.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-02-15 20:03
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0062_v350_new_playbook_stats'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='inventoryupdate',
|
||||
name='org_host_limit_error',
|
||||
field=models.BooleanField(default=False, editable=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='max_hosts',
|
||||
field=models.PositiveIntegerField(blank=True, default=0, help_text='Maximum number of hosts allowed to be managed by this organization.'),
|
||||
),
|
||||
]
|
||||
@@ -62,155 +62,40 @@ def _disassociate_non_insights_projects(apps, cred):
|
||||
|
||||
|
||||
def migrate_to_v2_credentials(apps, schema_editor):
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
deprecated_cred = _generate_deprecated_cred_types()
|
||||
|
||||
# this monkey-patch is necessary to make the implicit role generation save
|
||||
# signal use the correct Role model (the version active at this point in
|
||||
# migration, not the one at HEAD)
|
||||
orig_current_apps = utils.get_current_apps
|
||||
try:
|
||||
utils.get_current_apps = lambda: apps
|
||||
for cred in apps.get_model('main', 'Credential').objects.all():
|
||||
job_templates = cred.jobtemplates.all()
|
||||
jobs = cred.jobs.all()
|
||||
data = {}
|
||||
if getattr(cred, 'vault_password', None):
|
||||
data['vault_password'] = cred.vault_password
|
||||
if _is_insights_scm(apps, cred):
|
||||
_disassociate_non_insights_projects(apps, cred)
|
||||
credential_type = _get_insights_credential_type()
|
||||
else:
|
||||
credential_type = _populate_deprecated_cred_types(deprecated_cred, cred.kind) or CredentialType.from_v1_kind(cred.kind, data)
|
||||
|
||||
defined_fields = credential_type.defined_fields
|
||||
cred.credential_type = apps.get_model('main', 'CredentialType').objects.get(pk=credential_type.pk)
|
||||
|
||||
for field in defined_fields:
|
||||
if getattr(cred, field, None):
|
||||
cred.inputs[field] = getattr(cred, field)
|
||||
if cred.vault_password:
|
||||
for jt in job_templates:
|
||||
jt.credential = None
|
||||
jt.vault_credential = cred
|
||||
jt.save()
|
||||
for job in jobs:
|
||||
job.credential = None
|
||||
job.vault_credential = cred
|
||||
job.save()
|
||||
if data.get('is_insights', False):
|
||||
cred.kind = 'insights'
|
||||
cred.save()
|
||||
|
||||
#
|
||||
# If the credential contains a vault password, create a new
|
||||
# *additional* credential for the ssh details
|
||||
#
|
||||
if cred.vault_password:
|
||||
# We need to make an ssh credential, too
|
||||
ssh_type = CredentialType.from_v1_kind('ssh')
|
||||
new_cred = apps.get_model('main', 'Credential').objects.get(pk=cred.pk)
|
||||
new_cred.pk = None
|
||||
new_cred.vault_password = ''
|
||||
new_cred.credential_type = apps.get_model('main', 'CredentialType').objects.get(pk=ssh_type.pk)
|
||||
if 'vault_password' in new_cred.inputs:
|
||||
del new_cred.inputs['vault_password']
|
||||
|
||||
# unset these attributes so that new roles are properly created
|
||||
# at save time
|
||||
new_cred.read_role = None
|
||||
new_cred.admin_role = None
|
||||
new_cred.use_role = None
|
||||
|
||||
if any([getattr(cred, field) for field in ssh_type.defined_fields]):
|
||||
new_cred.save(force_insert=True)
|
||||
|
||||
# copy rbac roles
|
||||
for role_type in ('read_role', 'admin_role', 'use_role'):
|
||||
for member in getattr(cred, role_type).members.all():
|
||||
getattr(new_cred, role_type).members.add(member)
|
||||
for role in getattr(cred, role_type).parents.all():
|
||||
getattr(new_cred, role_type).parents.add(role)
|
||||
|
||||
for jt in job_templates:
|
||||
jt.credential = new_cred
|
||||
jt.save()
|
||||
for job in jobs:
|
||||
job.credential = new_cred
|
||||
job.save()
|
||||
|
||||
# passwords must be decrypted and re-encrypted, because
|
||||
# their encryption is based on the Credential's primary key
|
||||
# (which has changed)
|
||||
for field in ssh_type.defined_fields:
|
||||
if field in ssh_type.secret_fields:
|
||||
value = decrypt_field(cred, field)
|
||||
if value:
|
||||
setattr(new_cred, field, value)
|
||||
new_cred.inputs[field] = encrypt_field(new_cred, field)
|
||||
setattr(new_cred, field, '')
|
||||
elif getattr(cred, field):
|
||||
new_cred.inputs[field] = getattr(cred, field)
|
||||
new_cred.save()
|
||||
finally:
|
||||
utils.get_current_apps = orig_current_apps
|
||||
# TODO: remove once legacy/EOL'd Towers no longer support this upgrade path
|
||||
pass
|
||||
|
||||
|
||||
def migrate_job_credentials(apps, schema_editor):
|
||||
# this monkey-patch is necessary to make the implicit role generation save
|
||||
# signal use the correct Role model (the version active at this point in
|
||||
# migration, not the one at HEAD)
|
||||
orig_current_apps = utils.get_current_apps
|
||||
try:
|
||||
utils.get_current_apps = lambda: apps
|
||||
for type_ in ('Job', 'JobTemplate'):
|
||||
for obj in apps.get_model('main', type_).objects.all():
|
||||
if obj.cloud_credential:
|
||||
obj.extra_credentials.add(obj.cloud_credential)
|
||||
if obj.network_credential:
|
||||
obj.extra_credentials.add(obj.network_credential)
|
||||
obj.save()
|
||||
finally:
|
||||
utils.get_current_apps = orig_current_apps
|
||||
# TODO: remove once legacy/EOL'd Towers no longer support this upgrade path
|
||||
pass
|
||||
|
||||
|
||||
def add_vault_id_field(apps, schema_editor):
|
||||
vault_credtype = CredentialType.objects.get(kind='vault')
|
||||
vault_credtype.inputs = CredentialType.defaults.get('vault')().inputs
|
||||
vault_credtype.save()
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
|
||||
def remove_vault_id_field(apps, schema_editor):
|
||||
vault_credtype = CredentialType.objects.get(kind='vault')
|
||||
idx = 0
|
||||
for i, input in enumerate(vault_credtype.inputs['fields']):
|
||||
if input['id'] == 'vault_id':
|
||||
idx = i
|
||||
break
|
||||
vault_credtype.inputs['fields'].pop(idx)
|
||||
vault_credtype.save()
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
|
||||
def create_rhv_tower_credtype(apps, schema_editor):
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
|
||||
def add_tower_verify_field(apps, schema_editor):
|
||||
tower_credtype = CredentialType.objects.get(
|
||||
kind='cloud', name='Ansible Tower', managed_by_tower=True
|
||||
)
|
||||
tower_credtype.inputs = CredentialType.defaults.get('tower')().inputs
|
||||
tower_credtype.save()
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
|
||||
def add_azure_cloud_environment_field(apps, schema_editor):
|
||||
azure_rm_credtype = CredentialType.objects.get(kind='cloud',
|
||||
name='Microsoft Azure Resource Manager')
|
||||
azure_rm_credtype.inputs = CredentialType.defaults.get('azure_rm')().inputs
|
||||
azure_rm_credtype.save()
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
|
||||
def remove_become_methods(apps, schema_editor):
|
||||
become_credtype = CredentialType.objects.filter(kind='ssh', managed_by_tower=True).first()
|
||||
become_credtype.inputs = CredentialType.defaults.get('ssh')().inputs
|
||||
become_credtype.save()
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
@@ -6,27 +6,60 @@ from django.conf import settings # noqa
|
||||
from django.db.models.signals import pre_delete # noqa
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.unified_jobs import * # noqa
|
||||
from awx.main.models.organization import * # noqa
|
||||
from awx.main.models.credential import * # noqa
|
||||
from awx.main.models.projects import * # noqa
|
||||
from awx.main.models.inventory import * # noqa
|
||||
from awx.main.models.jobs import * # noqa
|
||||
from awx.main.models.events import * # noqa
|
||||
from awx.main.models.ad_hoc_commands import * # noqa
|
||||
from awx.main.models.schedules import * # noqa
|
||||
from awx.main.models.activity_stream import * # noqa
|
||||
from awx.main.models.ha import * # noqa
|
||||
from awx.main.models.rbac import * # noqa
|
||||
from awx.main.models.mixins import * # noqa
|
||||
from awx.main.models.notifications import * # noqa
|
||||
from awx.main.models.fact import * # noqa
|
||||
from awx.main.models.label import * # noqa
|
||||
from awx.main.models.workflow import * # noqa
|
||||
from awx.main.models.channels import * # noqa
|
||||
from awx.main.models.base import ( # noqa
|
||||
BaseModel, PrimordialModel, prevent_search, CLOUD_INVENTORY_SOURCES, VERBOSITY_CHOICES
|
||||
)
|
||||
from awx.main.models.unified_jobs import ( # noqa
|
||||
UnifiedJob, UnifiedJobTemplate, StdoutMaxBytesExceeded
|
||||
)
|
||||
from awx.main.models.organization import ( # noqa
|
||||
Organization, Profile, Team, UserSessionMembership
|
||||
)
|
||||
from awx.main.models.credential import ( # noqa
|
||||
Credential, CredentialType, ManagedCredentialType, V1Credential, build_safe_env
|
||||
)
|
||||
from awx.main.models.projects import Project, ProjectUpdate # noqa
|
||||
from awx.main.models.inventory import ( # noqa
|
||||
CustomInventoryScript, Group, Host, Inventory, InventorySource,
|
||||
InventoryUpdate, SmartInventoryMembership
|
||||
)
|
||||
from awx.main.models.jobs import ( # noqa
|
||||
Job, JobHostSummary, JobLaunchConfig, JobTemplate, SystemJob,
|
||||
SystemJobTemplate,
|
||||
)
|
||||
from awx.main.models.events import ( # noqa
|
||||
AdHocCommandEvent, InventoryUpdateEvent, JobEvent, ProjectUpdateEvent,
|
||||
SystemJobEvent,
|
||||
)
|
||||
from awx.main.models.ad_hoc_commands import AdHocCommand # noqa
|
||||
from awx.main.models.schedules import Schedule # noqa
|
||||
from awx.main.models.activity_stream import ActivityStream # noqa
|
||||
from awx.main.models.ha import ( # noqa
|
||||
Instance, InstanceGroup, JobOrigin, TowerScheduleState,
|
||||
)
|
||||
from awx.main.models.rbac import ( # noqa
|
||||
Role, batch_role_ancestor_rebuilding, get_roles_on_resource,
|
||||
role_summary_fields_generator, ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
from awx.main.models.mixins import ( # noqa
|
||||
CustomVirtualEnvMixin, ResourceMixin, SurveyJobMixin,
|
||||
SurveyJobTemplateMixin, TaskManagerInventoryUpdateMixin,
|
||||
TaskManagerJobMixin, TaskManagerProjectUpdateMixin,
|
||||
TaskManagerUnifiedJobMixin,
|
||||
)
|
||||
from awx.main.models.notifications import Notification, NotificationTemplate # noqa
|
||||
from awx.main.models.fact import Fact # noqa
|
||||
from awx.main.models.label import Label # noqa
|
||||
from awx.main.models.workflow import ( # noqa
|
||||
WorkflowJob, WorkflowJobNode, WorkflowJobOptions, WorkflowJobTemplate,
|
||||
WorkflowJobTemplateNode,
|
||||
)
|
||||
from awx.main.models.channels import ChannelGroup # noqa
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.oauth import * # noqa
|
||||
from awx.main.models.oauth import ( # noqa
|
||||
OAuth2AccessToken, OAuth2Application
|
||||
)
|
||||
from oauth2_provider.models import Grant, RefreshToken # noqa -- needed django-oauth-toolkit model migrations
|
||||
|
||||
|
||||
@@ -50,7 +83,10 @@ _PythonSerializer.handle_m2m_field = _new_handle_m2m_field
|
||||
|
||||
# Add custom methods to User model for permissions checks.
|
||||
from django.contrib.auth.models import User # noqa
|
||||
from awx.main.access import * # noqa
|
||||
from awx.main.access import ( # noqa
|
||||
get_user_queryset, check_user_access, check_user_access_with_errors,
|
||||
user_accessible_objects
|
||||
)
|
||||
|
||||
|
||||
User.add_to_class('get_queryset', get_user_queryset)
|
||||
|
||||
@@ -14,9 +14,11 @@ from django.core.exceptions import ValidationError
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.base import (
|
||||
prevent_search, AD_HOC_JOB_TYPE_CHOICES, VERBOSITY_CHOICES, VarsDictProperty
|
||||
)
|
||||
from awx.main.models.events import AdHocCommandEvent
|
||||
from awx.main.models.unified_jobs import * # noqa
|
||||
from awx.main.models.unified_jobs import UnifiedJob
|
||||
from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate
|
||||
|
||||
logger = logging.getLogger('awx.main.models.ad_hoc_commands')
|
||||
@@ -43,8 +45,7 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
null=True,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
limit = models.CharField(
|
||||
max_length=1024,
|
||||
limit = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -24,27 +24,32 @@ def gce(cred, env, private_data_dir):
|
||||
'type': 'service_account',
|
||||
'private_key': cred.get_input('ssh_key_data', default=''),
|
||||
'client_email': username,
|
||||
'project_id': project
|
||||
'project_id': project,
|
||||
# need token uri for inventory plugins
|
||||
# should this really be hard coded? Good question.
|
||||
'token_uri': 'https://accounts.google.com/o/oauth2/token',
|
||||
}
|
||||
|
||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
||||
f = os.fdopen(handle, 'w')
|
||||
json.dump(json_cred, f)
|
||||
json.dump(json_cred, f, indent=2)
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
env['GCE_CREDENTIALS_FILE_PATH'] = path
|
||||
return path
|
||||
|
||||
|
||||
def azure_rm(cred, env, private_data_dir):
|
||||
client = cred.get_input('client', default='')
|
||||
tenant = cred.get_input('tenant', default='')
|
||||
|
||||
env['AZURE_SUBSCRIPTION_ID'] = cred.get_input('subscription', default='')
|
||||
|
||||
if len(client) and len(tenant):
|
||||
env['AZURE_CLIENT_ID'] = client
|
||||
env['AZURE_TENANT'] = tenant
|
||||
env['AZURE_SECRET'] = cred.get_input('secret', default='')
|
||||
env['AZURE_SUBSCRIPTION_ID'] = cred.get_input('subscription', default='')
|
||||
else:
|
||||
env['AZURE_SUBSCRIPTION_ID'] = cred.get_input('subscription', default='')
|
||||
env['AZURE_AD_USER'] = cred.get_input('username', default='')
|
||||
env['AZURE_PASSWORD'] = cred.get_input('password', default='')
|
||||
|
||||
|
||||
@@ -483,7 +483,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
job = self.job
|
||||
for host in hostnames:
|
||||
host_stats = {}
|
||||
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
|
||||
for stat in ('changed', 'dark', 'failures', 'ignored', 'ok', 'processed', 'rescued', 'skipped'):
|
||||
try:
|
||||
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
|
||||
except AttributeError: # in case event_data[stat] isn't a dict.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -25,9 +25,16 @@ from rest_framework.exceptions import ParseError
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.base import (
|
||||
BaseModel, CreatedModifiedModel,
|
||||
prevent_search,
|
||||
JOB_TYPE_CHOICES, VERBOSITY_CHOICES,
|
||||
VarsDictProperty
|
||||
)
|
||||
from awx.main.models.events import JobEvent, SystemJobEvent
|
||||
from awx.main.models.unified_jobs import * # noqa
|
||||
from awx.main.models.unified_jobs import (
|
||||
UnifiedJobTemplate, UnifiedJob
|
||||
)
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
@@ -805,13 +812,15 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def get_notification_friendly_name(self):
|
||||
return "Job"
|
||||
|
||||
def _get_inventory_hosts(self, only=['name', 'ansible_facts', 'ansible_facts_modified', 'modified',]):
|
||||
def _get_inventory_hosts(
|
||||
self,
|
||||
only=['name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id']
|
||||
):
|
||||
if not self.inventory:
|
||||
return []
|
||||
return self.inventory.hosts.only(*only)
|
||||
|
||||
def start_job_fact_cache(self, destination, modification_times, timeout=None):
|
||||
destination = os.path.join(destination, 'facts')
|
||||
os.makedirs(destination, mode=0o700)
|
||||
hosts = self._get_inventory_hosts()
|
||||
if timeout is None:
|
||||
@@ -836,7 +845,6 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
modification_times[filepath] = os.path.getmtime(filepath)
|
||||
|
||||
def finish_job_fact_cache(self, destination, modification_times):
|
||||
destination = os.path.join(destination, 'facts')
|
||||
for host in self._get_inventory_hosts():
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
@@ -1113,17 +1121,19 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
changed = models.PositiveIntegerField(default=0, editable=False)
|
||||
dark = models.PositiveIntegerField(default=0, editable=False)
|
||||
failures = models.PositiveIntegerField(default=0, editable=False)
|
||||
ignored = models.PositiveIntegerField(default=0, editable=False)
|
||||
ok = models.PositiveIntegerField(default=0, editable=False)
|
||||
processed = models.PositiveIntegerField(default=0, editable=False)
|
||||
rescued = models.PositiveIntegerField(default=0, editable=False)
|
||||
skipped = models.PositiveIntegerField(default=0, editable=False)
|
||||
failed = models.BooleanField(default=False, editable=False)
|
||||
|
||||
def __str__(self):
|
||||
host = getattr_dne(self, 'host')
|
||||
hostname = host.name if host else 'N/A'
|
||||
return '%s changed=%d dark=%d failures=%d ok=%d processed=%d skipped=%s' % \
|
||||
(hostname, self.changed, self.dark, self.failures, self.ok,
|
||||
self.processed, self.skipped)
|
||||
return '%s changed=%d dark=%d failures=%d ignored=%d ok=%d processed=%d rescued=%d skipped=%s' % \
|
||||
(hostname, self.changed, self.dark, self.failures, self.ignored, self.ok,
|
||||
self.processed, self.rescued, self.skipped)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:job_host_summary_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@@ -11,7 +11,7 @@ from django.utils.encoding import smart_str, force_text
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.base import CommonModelNameNotUnique, CreatedModifiedModel
|
||||
from awx.main.utils import encrypt_field, decrypt_field, set_environ
|
||||
from awx.main.notifications.email_backend import CustomEmailBackend
|
||||
from awx.main.notifications.slack_backend import SlackBackend
|
||||
|
||||
@@ -10,12 +10,16 @@ from django.db.models import Q
|
||||
from django.contrib.auth.models import User
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.fields import AutoOneToOneField, ImplicitRoleField
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.base import (
|
||||
BaseModel, CommonModel, CommonModelNameNotUnique, CreatedModifiedModel,
|
||||
NotificationFieldsModel
|
||||
)
|
||||
from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
@@ -39,6 +43,12 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
'InstanceGroup',
|
||||
blank=True,
|
||||
)
|
||||
max_hosts = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=0,
|
||||
help_text=_('Maximum number of hosts allowed to be managed by this organization.'),
|
||||
)
|
||||
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role='singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
)
|
||||
|
||||
@@ -18,7 +18,7 @@ from django.utils.timezone import now, make_aware, get_default_timezone
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.base import PROJECT_UPDATE_JOB_TYPE_CHOICES, PERM_INVENTORY_DEPLOY
|
||||
from awx.main.models.events import ProjectUpdateEvent
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
@@ -28,6 +28,7 @@ from awx.main.models.unified_jobs import (
|
||||
UnifiedJob,
|
||||
UnifiedJobTemplate,
|
||||
)
|
||||
from awx.main.models.jobs import Job
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
TaskManagerProjectUpdateMixin,
|
||||
|
||||
@@ -16,7 +16,6 @@ from django.utils.translation import ugettext_lazy as _
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from django.contrib.auth.models import User # noqa
|
||||
from awx.main.models.base import * # noqa
|
||||
|
||||
__all__ = [
|
||||
'Role',
|
||||
|
||||
@@ -18,7 +18,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.base import PrimordialModel
|
||||
from awx.main.models.jobs import LaunchTimeConfig
|
||||
from awx.main.utils import ignore_inventory_computed_fields
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
@@ -61,11 +61,12 @@ class ScheduleManager(ScheduleFilterMethods, models.Manager):
|
||||
return ScheduleQuerySet(self.model, using=self._db)
|
||||
|
||||
|
||||
class Schedule(CommonModel, LaunchTimeConfig):
|
||||
class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ['-next_run']
|
||||
unique_together = ('unified_job_template', 'name')
|
||||
|
||||
objects = ScheduleManager()
|
||||
|
||||
@@ -74,6 +75,9 @@ class Schedule(CommonModel, LaunchTimeConfig):
|
||||
related_name='schedules',
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
name = models.CharField(
|
||||
max_length=512,
|
||||
)
|
||||
enabled = models.BooleanField(
|
||||
default=True,
|
||||
help_text=_("Enables processing of this schedule.")
|
||||
|
||||
@@ -17,10 +17,12 @@ class CustomEmailBackend(EmailBackend):
|
||||
"use_tls": {"label": "Use TLS", "type": "bool"},
|
||||
"use_ssl": {"label": "Use SSL", "type": "bool"},
|
||||
"sender": {"label": "Sender Email", "type": "string"},
|
||||
"recipients": {"label": "Recipient List", "type": "list"}}
|
||||
"recipients": {"label": "Recipient List", "type": "list"},
|
||||
"timeout": {"label": "Timeout", "type": "int", "default": 30}}
|
||||
recipient_parameter = "recipients"
|
||||
sender_parameter = "sender"
|
||||
|
||||
|
||||
def format_body(self, body):
|
||||
if "body" in body:
|
||||
body_actual = body['body']
|
||||
|
||||
@@ -186,7 +186,7 @@ class TaskManager():
|
||||
workflow_job.status = new_status
|
||||
if reason:
|
||||
logger.info(reason)
|
||||
workflow_job.job_explanation = "No error handling paths found, marking workflow as failed"
|
||||
workflow_job.job_explanation = _("No error handling paths found, marking workflow as failed")
|
||||
update_fields.append('job_explanation')
|
||||
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
||||
workflow_job.save(update_fields=update_fields)
|
||||
|
||||
@@ -30,7 +30,14 @@ from crum.signals import current_user_getter
|
||||
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.models import (
|
||||
ActivityStream, AdHocCommandEvent, Group, Host, InstanceGroup, Inventory,
|
||||
InventorySource, InventoryUpdateEvent, Job, JobEvent, JobHostSummary,
|
||||
JobTemplate, OAuth2AccessToken, Organization, Project, ProjectUpdateEvent,
|
||||
Role, SystemJob, SystemJobEvent, SystemJobTemplate, UnifiedJob,
|
||||
UnifiedJobTemplate, User, UserSessionMembership,
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR
|
||||
)
|
||||
from awx.main.constants import CENSOR_VALUE
|
||||
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, get_current_apps
|
||||
from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates
|
||||
|
||||
1294
awx/main/tasks.py
1294
awx/main/tasks.py
File diff suppressed because it is too large
Load Diff
@@ -14,6 +14,8 @@ from awx.main.tests.factories import (
|
||||
create_workflow_job_template,
|
||||
)
|
||||
|
||||
from django.core.cache import cache
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
@@ -130,3 +132,10 @@ def mock_cache():
|
||||
|
||||
return MockCache()
|
||||
|
||||
|
||||
def pytest_runtest_teardown(item, nextitem):
|
||||
# clear Django cache at the end of every test ran
|
||||
# NOTE: this should not be memcache, see test_cache in test_env.py
|
||||
# this is a local test cache, so we want every test to start with empty cache
|
||||
cache.clear()
|
||||
|
||||
|
||||
8
awx/main/tests/data/inventory/plugins/azure_rm/env.json
Normal file
8
awx/main/tests/data/inventory/plugins/azure_rm/env.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"AZURE_SUBSCRIPTION_ID": "fooo",
|
||||
"AZURE_CLIENT_ID": "fooo",
|
||||
"AZURE_TENANT": "fooo",
|
||||
"AZURE_SECRET": "fooo",
|
||||
"AZURE_CLOUD_ENVIRONMENT": "fooo",
|
||||
"ANSIBLE_JINJA2_NATIVE": "True"
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
conditional_groups:
|
||||
azure: true
|
||||
default_host_filters: []
|
||||
exclude_host_filters:
|
||||
- resource_group not in ['foo_resources', 'bar_resources']
|
||||
- location not in ['southcentralus', 'westus']
|
||||
hostvar_expressions:
|
||||
ansible_host: private_ipv4_addresses[0]
|
||||
computer_name: name
|
||||
private_ip: private_ipv4_addresses[0]
|
||||
provisioning_state: provisioning_state | title
|
||||
public_ip: public_ipv4_addresses[0]
|
||||
tags: tags if tags else None
|
||||
type: resource_type
|
||||
keyed_groups:
|
||||
- key: location
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: tags.keys() | list if tags else []
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: security_group
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: resource_group
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: os_disk.operating_system_type
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: dict(tags.keys() | map("regex_replace", "^(.*)$", "\1_") | list | zip(tags.values() | list)) if tags else []
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plugin: azure_rm
|
||||
use_contrib_script_compatible_sanitization: true
|
||||
6
awx/main/tests/data/inventory/plugins/ec2/env.json
Normal file
6
awx/main/tests/data/inventory/plugins/ec2/env.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"AWS_ACCESS_KEY_ID": "fooo",
|
||||
"AWS_SECRET_ACCESS_KEY": "fooo",
|
||||
"AWS_SECURITY_TOKEN": "fooo",
|
||||
"ANSIBLE_JINJA2_NATIVE": "True"
|
||||
}
|
||||
82
awx/main/tests/data/inventory/plugins/ec2/files/aws_ec2.yml
Normal file
82
awx/main/tests/data/inventory/plugins/ec2/files/aws_ec2.yml
Normal file
@@ -0,0 +1,82 @@
|
||||
boto_profile: /tmp/my_boto_stuff
|
||||
compose:
|
||||
ansible_host: public_ip_address
|
||||
ec2_account_id: owner_id
|
||||
ec2_ami_launch_index: ami_launch_index | string
|
||||
ec2_architecture: architecture
|
||||
ec2_block_devices: dict(block_device_mappings | map(attribute='device_name') | list | zip(block_device_mappings | map(attribute='ebs.volume_id') | list))
|
||||
ec2_client_token: client_token
|
||||
ec2_dns_name: public_dns_name
|
||||
ec2_ebs_optimized: ebs_optimized
|
||||
ec2_eventsSet: events | default("")
|
||||
ec2_group_name: placement.group_name
|
||||
ec2_hypervisor: hypervisor
|
||||
ec2_id: instance_id
|
||||
ec2_image_id: image_id
|
||||
ec2_instance_profile: iam_instance_profile | default("")
|
||||
ec2_instance_type: instance_type
|
||||
ec2_ip_address: public_ip_address
|
||||
ec2_kernel: kernel_id | default("")
|
||||
ec2_key_name: key_name
|
||||
ec2_launch_time: launch_time | regex_replace(" ", "T") | regex_replace("(\+)(\d\d):(\d)(\d)$", ".\g<2>\g<3>Z")
|
||||
ec2_monitored: monitoring.state in ['enabled', 'pending']
|
||||
ec2_monitoring_state: monitoring.state
|
||||
ec2_persistent: persistent | default(false)
|
||||
ec2_placement: placement.availability_zone
|
||||
ec2_platform: platform | default("")
|
||||
ec2_private_dns_name: private_dns_name
|
||||
ec2_private_ip_address: private_ip_address
|
||||
ec2_public_dns_name: public_dns_name
|
||||
ec2_ramdisk: ramdisk_id | default("")
|
||||
ec2_reason: state_transition_reason
|
||||
ec2_region: placement.region
|
||||
ec2_requester_id: requester_id | default("")
|
||||
ec2_root_device_name: root_device_name
|
||||
ec2_root_device_type: root_device_type
|
||||
ec2_security_group_ids: security_groups | map(attribute='group_id') | list | join(',')
|
||||
ec2_security_group_names: security_groups | map(attribute='group_name') | list | join(',')
|
||||
ec2_sourceDestCheck: source_dest_check | default(false) | lower | string
|
||||
ec2_spot_instance_request_id: spot_instance_request_id | default("")
|
||||
ec2_state: state.name
|
||||
ec2_state_code: state.code
|
||||
ec2_state_reason: state_reason.message if state_reason is defined else ""
|
||||
ec2_subnet_id: subnet_id | default("")
|
||||
ec2_tag_Name: tags.Name
|
||||
ec2_virtualization_type: virtualization_type
|
||||
ec2_vpc_id: vpc_id | default("")
|
||||
filters:
|
||||
instance-state-name:
|
||||
- running
|
||||
groups:
|
||||
ec2: true
|
||||
hostnames:
|
||||
- network-interface.addresses.association.public-ip
|
||||
- dns-name
|
||||
- private-dns-name
|
||||
keyed_groups:
|
||||
- key: placement.availability_zone
|
||||
parent_group: zones
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: instance_type | regex_replace("[^A-Za-z0-9\_]", "_")
|
||||
parent_group: types
|
||||
prefix: type
|
||||
- key: placement.region
|
||||
parent_group: regions
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: dict(tags.keys() | map("regex_replace", "[^A-Za-z0-9\_]", "_") | list | zip(tags.values() | map("regex_replace", "[^A-Za-z0-9\_]", "_") | list))
|
||||
parent_group: tags
|
||||
prefix: tag
|
||||
- key: tags.keys() | map("regex_replace", "[^A-Za-z0-9\_]", "_") | list
|
||||
parent_group: tags
|
||||
prefix: tag
|
||||
- key: placement.availability_zone
|
||||
parent_group: '{{ placement.region }}'
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plugin: aws_ec2
|
||||
regions:
|
||||
- us-east-2
|
||||
- ap-south-1
|
||||
use_contrib_script_compatible_sanitization: true
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"type": "service_account",
|
||||
"private_key": "{{private_key}}",
|
||||
"client_email": "fooo",
|
||||
"project_id": "fooo",
|
||||
"token_uri": "https://accounts.google.com/o/oauth2/token"
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
auth_kind: serviceaccount
|
||||
compose:
|
||||
ansible_ssh_host: networkInterfaces[0].accessConfigs[0].natIP
|
||||
gce_description: description if description else None
|
||||
gce_id: id
|
||||
gce_machine_type: machineType
|
||||
gce_metadata: metadata.get("items", []) | items2dict(key_name="key", value_name="value")
|
||||
gce_name: name
|
||||
gce_network: networkInterfaces[0].network.name
|
||||
gce_private_ip: networkInterfaces[0].networkIP
|
||||
gce_public_ip: networkInterfaces[0].accessConfigs[0].natIP
|
||||
gce_status: status
|
||||
gce_subnetwork: networkInterfaces[0].subnetwork.name
|
||||
gce_tags: tags.get("items", [])
|
||||
gce_zone: zone
|
||||
hostnames:
|
||||
- name
|
||||
- public_ip
|
||||
- private_ip
|
||||
keyed_groups:
|
||||
- key: gce_subnetwork
|
||||
prefix: network
|
||||
- key: gce_private_ip
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: gce_public_ip
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: machineType
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: zone
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: gce_tags
|
||||
prefix: tag
|
||||
- key: status | lower
|
||||
prefix: status
|
||||
plugin: gcp_compute
|
||||
projects:
|
||||
- fooo
|
||||
service_account_file: {{ file_reference }}
|
||||
use_contrib_script_compatible_sanitization: true
|
||||
zones:
|
||||
- us-east4-a
|
||||
- us-west1-b
|
||||
@@ -0,0 +1,14 @@
|
||||
ansible:
|
||||
expand_hostvars: true
|
||||
fail_on_errors: true
|
||||
use_hostnames: false
|
||||
clouds:
|
||||
devstack:
|
||||
auth:
|
||||
auth_url: https://foo.invalid
|
||||
domain_name: fooo
|
||||
password: fooo
|
||||
project_name: fooo
|
||||
username: fooo
|
||||
private: false
|
||||
verify: false
|
||||
@@ -0,0 +1,6 @@
|
||||
clouds_yaml_path:
|
||||
- {{ file_reference }}
|
||||
expand_hostvars: true
|
||||
fail_on_errors: true
|
||||
inventory_hostname: uuid
|
||||
plugin: openstack
|
||||
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"FOREMAN_SERVER": "https://foo.invalid",
|
||||
"FOREMAN_USER": "fooo",
|
||||
"FOREMAN_PASSWORD": "fooo"
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
plugin: foreman
|
||||
6
awx/main/tests/data/inventory/plugins/tower/env.json
Normal file
6
awx/main/tests/data/inventory/plugins/tower/env.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"TOWER_HOST": "https://foo.invalid",
|
||||
"TOWER_USERNAME": "fooo",
|
||||
"TOWER_PASSWORD": "fooo",
|
||||
"TOWER_VERIFY_SSL": "False"
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
include_metadata: true
|
||||
inventory_id: 42
|
||||
plugin: tower
|
||||
8
awx/main/tests/data/inventory/scripts/azure_rm/env.json
Normal file
8
awx/main/tests/data/inventory/scripts/azure_rm/env.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"AZURE_SUBSCRIPTION_ID": "fooo",
|
||||
"AZURE_CLIENT_ID": "fooo",
|
||||
"AZURE_TENANT": "fooo",
|
||||
"AZURE_SECRET": "fooo",
|
||||
"AZURE_CLOUD_ENVIRONMENT": "fooo",
|
||||
"AZURE_INI_PATH": "{{ file_reference }}"
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
[azure]
|
||||
include_powerstate = yes
|
||||
group_by_resource_group = yes
|
||||
group_by_location = yes
|
||||
group_by_tag = yes
|
||||
locations = southcentralus,westus
|
||||
base_source_var = value_of_var
|
||||
use_private_ip = True
|
||||
resource_groups = foo_resources,bar_resources
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"CLOUDFORMS_INI_PATH": "{{ file_reference }}"
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
[cloudforms]
|
||||
url = https://foo.invalid
|
||||
username = fooo
|
||||
password = fooo
|
||||
ssl_verify = false
|
||||
version = 2.4
|
||||
purge_actions = maybe
|
||||
clean_group_keys = this_key
|
||||
nest_tags = yes
|
||||
suffix = .ppt
|
||||
prefer_ipv4 = yes
|
||||
|
||||
[cache]
|
||||
max_age = 0
|
||||
path = {{ cache_dir }}
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
<directory>
|
||||
6
awx/main/tests/data/inventory/scripts/ec2/env.json
Normal file
6
awx/main/tests/data/inventory/scripts/ec2/env.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"AWS_ACCESS_KEY_ID": "fooo",
|
||||
"AWS_SECRET_ACCESS_KEY": "fooo",
|
||||
"AWS_SECURITY_TOKEN": "fooo",
|
||||
"EC2_INI_PATH": "{{ file_reference }}"
|
||||
}
|
||||
32
awx/main/tests/data/inventory/scripts/ec2/files/EC2_INI_PATH
Normal file
32
awx/main/tests/data/inventory/scripts/ec2/files/EC2_INI_PATH
Normal file
@@ -0,0 +1,32 @@
|
||||
[ec2]
|
||||
base_source_var = value_of_var
|
||||
boto_profile = /tmp/my_boto_stuff
|
||||
regions = us-east-2,ap-south-1
|
||||
regions_exclude = us-gov-west-1,cn-north-1
|
||||
destination_variable = public_dns_name
|
||||
vpc_destination_variable = ip_address
|
||||
route53 = False
|
||||
all_instances = True
|
||||
all_rds_instances = False
|
||||
include_rds_clusters = False
|
||||
rds = False
|
||||
nested_groups = True
|
||||
elasticache = False
|
||||
stack_filters = False
|
||||
instance_filters = foobaa
|
||||
group_by_ami_id = False
|
||||
group_by_availability_zone = True
|
||||
group_by_aws_account = False
|
||||
group_by_instance_id = False
|
||||
group_by_instance_state = False
|
||||
group_by_platform = False
|
||||
group_by_instance_type = True
|
||||
group_by_key_pair = False
|
||||
group_by_region = True
|
||||
group_by_security_group = False
|
||||
group_by_tag_keys = True
|
||||
group_by_tag_none = False
|
||||
group_by_vpc_id = False
|
||||
cache_path = {{ cache_dir }}
|
||||
cache_max_age = 300
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
<directory>
|
||||
7
awx/main/tests/data/inventory/scripts/gce/env.json
Normal file
7
awx/main/tests/data/inventory/scripts/gce/env.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"GCE_EMAIL": "fooo",
|
||||
"GCE_PROJECT": "fooo",
|
||||
"GCE_CREDENTIALS_FILE_PATH": "{{ file_reference }}",
|
||||
"GCE_ZONE": "us-east4-a,us-west1-b",
|
||||
"GCE_INI_PATH": "{{ file_reference }}"
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"type": "service_account",
|
||||
"private_key": "{{private_key}}",
|
||||
"client_email": "fooo",
|
||||
"project_id": "fooo",
|
||||
"token_uri": "https://accounts.google.com/o/oauth2/token"
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
[cache]
|
||||
cache_max_age = 0
|
||||
|
||||
3
awx/main/tests/data/inventory/scripts/openstack/env.json
Normal file
3
awx/main/tests/data/inventory/scripts/openstack/env.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"OS_CLIENT_CONFIG_FILE": "{{ file_reference }}"
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
ansible:
|
||||
expand_hostvars: true
|
||||
fail_on_errors: true
|
||||
use_hostnames: false
|
||||
cache:
|
||||
path: {{ cache_dir }}
|
||||
clouds:
|
||||
devstack:
|
||||
auth:
|
||||
auth_url: https://foo.invalid
|
||||
domain_name: fooo
|
||||
password: fooo
|
||||
project_name: fooo
|
||||
username: fooo
|
||||
private: false
|
||||
verify: false
|
||||
@@ -0,0 +1 @@
|
||||
<directory>
|
||||
6
awx/main/tests/data/inventory/scripts/rhv/env.json
Normal file
6
awx/main/tests/data/inventory/scripts/rhv/env.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"OVIRT_INI_PATH": "{{ file_reference }}",
|
||||
"OVIRT_URL": "https://foo.invalid",
|
||||
"OVIRT_USERNAME": "fooo",
|
||||
"OVIRT_PASSWORD": "fooo"
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
[ovirt]
|
||||
ovirt_url=https://foo.invalid
|
||||
ovirt_username=fooo
|
||||
ovirt_password=fooo
|
||||
ovirt_ca_file=fooo
|
||||
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"FOREMAN_INI_PATH": "{{ file_reference }}"
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
[foreman]
|
||||
base_source_var = value_of_var
|
||||
ssl_verify = False
|
||||
url = https://foo.invalid
|
||||
user = fooo
|
||||
password = fooo
|
||||
|
||||
[ansible]
|
||||
group_patterns = foo_group_patterns
|
||||
want_facts = True
|
||||
want_hostcollections = True
|
||||
group_prefix = foo_group_prefix
|
||||
|
||||
[cache]
|
||||
path = /tmp
|
||||
max_age = 0
|
||||
|
||||
8
awx/main/tests/data/inventory/scripts/tower/env.json
Normal file
8
awx/main/tests/data/inventory/scripts/tower/env.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"TOWER_HOST": "https://foo.invalid",
|
||||
"TOWER_USERNAME": "fooo",
|
||||
"TOWER_PASSWORD": "fooo",
|
||||
"TOWER_VERIFY_SSL": "False",
|
||||
"TOWER_INVENTORY": "42",
|
||||
"TOWER_LICENSE_TYPE": "open"
|
||||
}
|
||||
7
awx/main/tests/data/inventory/scripts/vmware/env.json
Normal file
7
awx/main/tests/data/inventory/scripts/vmware/env.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"VMWARE_USER": "fooo",
|
||||
"VMWARE_PASSWORD": "fooo",
|
||||
"VMWARE_HOST": "https://foo.invalid",
|
||||
"VMWARE_VALIDATE_CERTS": "False",
|
||||
"VMWARE_INI_PATH": "{{ file_reference }}"
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
[vmware]
|
||||
cache_max_age = 0
|
||||
validate_certs = False
|
||||
username = fooo
|
||||
password = fooo
|
||||
server = https://foo.invalid
|
||||
base_source_var = value_of_var
|
||||
host_filters = foobaa
|
||||
groupby_patterns = fouo
|
||||
|
||||
@@ -1353,6 +1353,40 @@ def test_openstack_create_ok(post, organization, admin, version, params):
|
||||
assert response.status_code == 201
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('verify_ssl, expected', [
|
||||
[None, True],
|
||||
[True, True],
|
||||
[False, False],
|
||||
])
|
||||
def test_openstack_verify_ssl(get, post, organization, admin, verify_ssl, expected):
|
||||
openstack = CredentialType.defaults['openstack']()
|
||||
openstack.save()
|
||||
inputs = {
|
||||
'username': 'some_user',
|
||||
'password': 'some_password',
|
||||
'project': 'some_project',
|
||||
'host': 'some_host',
|
||||
}
|
||||
if verify_ssl is not None:
|
||||
inputs['verify_ssl'] = verify_ssl
|
||||
params = {
|
||||
'credential_type': openstack.id,
|
||||
'inputs': inputs,
|
||||
'name': 'Best credential ever',
|
||||
'organization': organization.id
|
||||
}
|
||||
response = post(
|
||||
reverse('api:credential_list', kwargs={'version': 'v2'}),
|
||||
params,
|
||||
admin
|
||||
)
|
||||
assert response.status_code == 201
|
||||
|
||||
cred = Credential.objects.get(pk=response.data['id'])
|
||||
assert cred.get_input('verify_ssl') == expected
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('version, params', [
|
||||
['v1', {}],
|
||||
|
||||
@@ -271,6 +271,74 @@ def test_create_with_required_inputs(get, post, admin):
|
||||
assert required == ['api_token']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('default, status_code', [
|
||||
['some default string', 201],
|
||||
[None, 400],
|
||||
[True, 400],
|
||||
[False, 400],
|
||||
])
|
||||
@pytest.mark.parametrize('secret', [True, False])
|
||||
def test_create_with_default_string(get, post, admin, default, status_code, secret):
|
||||
response = post(reverse('api:credential_type_list'), {
|
||||
'kind': 'cloud',
|
||||
'name': 'MyCloud',
|
||||
'inputs': {
|
||||
'fields': [{
|
||||
'id': 'api_token',
|
||||
'label': 'API Token',
|
||||
'type': 'string',
|
||||
'secret': secret,
|
||||
'default': default,
|
||||
}],
|
||||
'required': ['api_token'],
|
||||
},
|
||||
'injectors': {}
|
||||
}, admin)
|
||||
assert response.status_code == status_code
|
||||
if status_code == 201:
|
||||
cred = Credential(
|
||||
credential_type=CredentialType.objects.get(pk=response.data['id']),
|
||||
name='My Custom Cred'
|
||||
)
|
||||
assert cred.get_input('api_token') == default
|
||||
elif status_code == 400:
|
||||
assert "{} is not a string".format(default) in json.dumps(response.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('default, status_code', [
|
||||
['some default string', 400],
|
||||
[None, 400],
|
||||
[True, 201],
|
||||
[False, 201],
|
||||
])
|
||||
def test_create_with_default_bool(get, post, admin, default, status_code):
|
||||
response = post(reverse('api:credential_type_list'), {
|
||||
'kind': 'cloud',
|
||||
'name': 'MyCloud',
|
||||
'inputs': {
|
||||
'fields': [{
|
||||
'id': 'api_token',
|
||||
'label': 'API Token',
|
||||
'type': 'boolean',
|
||||
'default': default,
|
||||
}],
|
||||
'required': ['api_token'],
|
||||
},
|
||||
'injectors': {}
|
||||
}, admin)
|
||||
assert response.status_code == status_code
|
||||
if status_code == 201:
|
||||
cred = Credential(
|
||||
credential_type=CredentialType.objects.get(pk=response.data['id']),
|
||||
name='My Custom Cred'
|
||||
)
|
||||
assert cred.get_input('api_token') == default
|
||||
elif status_code == 400:
|
||||
assert "{} is not a boolean".format(default) in json.dumps(response.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('inputs', [
|
||||
True,
|
||||
|
||||
@@ -326,6 +326,24 @@ def test_create_inventory_host(post, inventory, alice, role_field, expected_stat
|
||||
post(reverse('api:inventory_hosts_list', kwargs={'pk': inventory.id}), data, alice, expect=expected_status_code)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("hosts,expected_status_code", [
|
||||
(1, 201),
|
||||
(2, 201),
|
||||
(3, 201),
|
||||
])
|
||||
@pytest.mark.django_db
|
||||
def test_create_inventory_host_with_limits(post, admin_user, inventory, hosts, expected_status_code):
|
||||
# The per-Organization host limits functionality should be a no-op on AWX.
|
||||
inventory.organization.max_hosts = 2
|
||||
inventory.organization.save()
|
||||
for i in range(hosts):
|
||||
inventory.hosts.create(name="Existing host %i" % i)
|
||||
|
||||
data = {'name': 'New name', 'description': 'Hello world'}
|
||||
post(reverse('api:inventory_hosts_list', kwargs={'pk': inventory.id}),
|
||||
data, admin_user, expect=expected_status_code)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("role_field,expected_status_code", [
|
||||
(None, 403),
|
||||
('admin_role', 201),
|
||||
@@ -356,6 +374,18 @@ def test_edit_inventory_host(put, host, alice, role_field, expected_status_code)
|
||||
put(reverse('api:host_detail', kwargs={'pk': host.id}), data, alice, expect=expected_status_code)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_edit_inventory_host_with_limits(put, host, admin_user):
|
||||
# The per-Organization host limits functionality should be a no-op on AWX.
|
||||
inventory = host.inventory
|
||||
inventory.organization.max_hosts = 1
|
||||
inventory.organization.save()
|
||||
inventory.hosts.create(name='Alternate host')
|
||||
|
||||
data = {'name': 'New name', 'description': 'Hello world'}
|
||||
put(reverse('api:host_detail', kwargs={'pk': host.id}), data, admin_user, expect=200)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("role_field,expected_status_code", [
|
||||
(None, 403),
|
||||
('admin_role', 204),
|
||||
|
||||
@@ -10,7 +10,7 @@ import pytest
|
||||
from unittest import mock
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.models import ProjectUpdate
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@@ -199,6 +199,30 @@ def test_update_organization(get, put, organization, alice, bob):
|
||||
put(reverse('api:organization_detail', kwargs={'pk': organization.id}), data, user=bob, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_update_organization_max_hosts(get, put, organization, admin, alice, bob):
|
||||
# Admin users can get and update max_hosts
|
||||
data = get(reverse('api:organization_detail', kwargs={'pk': organization.id}), user=admin, expect=200).data
|
||||
assert organization.max_hosts == 0
|
||||
data['max_hosts'] = 3
|
||||
put(reverse('api:organization_detail', kwargs={'pk': organization.id}), data, user=admin, expect=200)
|
||||
organization.refresh_from_db()
|
||||
assert organization.max_hosts == 3
|
||||
|
||||
# Organization admins can get the data and can update other fields, but not max_hosts
|
||||
organization.admin_role.members.add(alice)
|
||||
data = get(reverse('api:organization_detail', kwargs={'pk': organization.id}), user=alice, expect=200).data
|
||||
data['max_hosts'] = 5
|
||||
put(reverse('api:organization_detail', kwargs={'pk': organization.id}), data, user=alice, expect=400)
|
||||
organization.refresh_from_db()
|
||||
assert organization.max_hosts == 3
|
||||
|
||||
# Ordinary users shouldn't be able to update either.
|
||||
put(reverse('api:organization_detail', kwargs={'pk': organization.id}), data, user=bob, expect=403)
|
||||
organization.refresh_from_db()
|
||||
assert organization.max_hosts == 3
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.access.BaseAccess.check_license', lambda *a, **kw: True)
|
||||
def test_delete_organization(delete, organization, admin):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user