Compare commits

..

1 Commits

Author SHA1 Message Date
Jessica Steurer
4ba8dd4d98 Revert "Remove external script call to D3.js." 2022-07-18 19:11:15 -03:00
954 changed files with 21663 additions and 39836 deletions

View File

@@ -1,2 +1,3 @@
awx/ui/node_modules
Dockerfile Dockerfile
.git .git

View File

@@ -25,7 +25,7 @@ Instead use the bug or feature request.
<!--- Pick one below and delete the rest: --> <!--- Pick one below and delete the rest: -->
- Breaking Change - Breaking Change
- New or Enhanced Feature - New or Enhanced Feature
- Bug, Docs Fix or other nominal change - Bug or Docs Fix
##### COMPONENT NAME ##### COMPONENT NAME

View File

@@ -20,19 +20,6 @@ body:
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response. - label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
required: true required: true
- type: dropdown
id: feature-type
attributes:
label: Feature type
description: >-
What kind of feature is this?
multiple: false
options:
- "New Feature"
- "Enhancement to Existing Feature"
validations:
required: true
- type: textarea - type: textarea
id: summary id: summary
attributes: attributes:
@@ -53,36 +40,3 @@ body:
- label: CLI - label: CLI
- label: Other - label: Other
- type: textarea
id: steps-to-reproduce
attributes:
label: Steps to reproduce
description: >-
Describe the necessary steps to understand the scenario of the requested enhancement.
Include all the steps that will help the developer and QE team understand what you are requesting.
validations:
required: true
- type: textarea
id: current-results
attributes:
label: Current results
description: What is currently happening on the scenario?
validations:
required: true
- type: textarea
id: sugested-results
attributes:
label: Sugested feature result
description: What is the result this new feature will bring?
validations:
required: true
- type: textarea
id: additional-information
attributes:
label: Additional information
description: Please provide any other information you think is relevant that could help us understand your feature request.
validations:
required: false

View File

@@ -11,7 +11,7 @@ the change does.
<!--- Pick one below and delete the rest: --> <!--- Pick one below and delete the rest: -->
- Breaking Change - Breaking Change
- New or Enhanced Feature - New or Enhanced Feature
- Bug, Docs Fix or other nominal change - Bug or Docs Fix
##### COMPONENT NAME ##### COMPONENT NAME
<!--- Name of the module/plugin/module/task --> <!--- Name of the module/plugin/module/task -->

View File

@@ -53,16 +53,6 @@ https://github.com/ansible/awx/#get-involved \
Thank you once again for this and your interest in AWX! Thank you once again for this and your interest in AWX!
### Red Hat Support Team
- Hi! \
\
It appears that you are using an RPM build for RHEL. Please reach out to the Red Hat support team and submit a ticket. \
\
Here is the link to do so: \
\
https://access.redhat.com/support \
\
Thank you for your submission and for supporting AWX!
## Common ## Common
@@ -103,9 +93,6 @@ The Ansible Community is looking at building an EE that corresponds to all of th
- AWX: https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md - AWX: https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md
- AWX-Operator: https://github.com/ansible/awx-operator/blob/devel/CONTRIBUTING.md - AWX-Operator: https://github.com/ansible/awx-operator/blob/devel/CONTRIBUTING.md
### Oracle AWX
We'd be happy to help if you can reproduce this with AWX since we do not have Oracle's Linux Automation Manager. If you need help with this specific version of Oracles Linux Automation Manager you will need to contact your Oracle for support.
### AWX Release ### AWX Release
Subject: Announcing AWX Xa.Ya.za and AWX-Operator Xb.Yb.zb Subject: Announcing AWX Xa.Ya.za and AWX-Operator Xb.Yb.zb

View File

@@ -2,7 +2,6 @@
name: CI name: CI
env: env:
BRANCH: ${{ github.base_ref || 'devel' }} BRANCH: ${{ github.base_ref || 'devel' }}
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
pull_request: pull_request:
jobs: jobs:
@@ -28,9 +27,6 @@ jobs:
- name: awx-collection - name: awx-collection
command: /start_tests.sh test_collection_all command: /start_tests.sh test_collection_all
label: Run Collection Tests label: Run Collection Tests
- name: awx-collection-sanity
command: /start_tests.sh test_collection_sanity
label: Run Ansible core Collection Sanity tests
- name: api-schema - name: api-schema
label: Check API Schema label: Check API Schema
command: /start_tests.sh detect-schema-change SCHEMA_DIFF_BASE_BRANCH=${{ github.event.pull_request.base.ref }} command: /start_tests.sh detect-schema-change SCHEMA_DIFF_BASE_BRANCH=${{ github.event.pull_request.base.ref }}
@@ -115,18 +111,9 @@ jobs:
repository: ansible/awx-operator repository: ansible/awx-operator
path: awx-operator path: awx-operator
- name: Get python version from Makefile
working-directory: awx
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v2
with:
python-version: ${{ env.py_version }}
- name: Install playbook dependencies - name: Install playbook dependencies
run: | run: |
python3 -m pip install docker python3 -m pip install docker setuptools_scm
- name: Build AWX image - name: Build AWX image
working-directory: awx working-directory: awx

View File

@@ -1,7 +1,5 @@
--- ---
name: Build/Push Development Images name: Build/Push Development Images
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
push: push:
branches: branches:

View File

@@ -1,8 +1,5 @@
--- ---
name: E2E Tests name: E2E Tests
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
pull_request_target: pull_request_target:
types: [labeled] types: [labeled]

View File

@@ -1,26 +0,0 @@
---
name: Feature branch deletion cleanup
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on:
delete:
branches:
- feature_**
jobs:
push:
runs-on: ubuntu-latest
permissions:
packages: write
contents: read
steps:
- name: Delete API Schema
env:
AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }}
AWS_REGION: 'us-east-1'
run: |
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
ansible localhost -c local -m aws_s3 \
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read"

View File

@@ -19,34 +19,3 @@ jobs:
not-before: 2021-12-07T07:00:00Z not-before: 2021-12-07T07:00:00Z
configuration-path: .github/issue_labeler.yml configuration-path: .github/issue_labeler.yml
enable-versioned-regex: 0 enable-versioned-regex: 0
community:
runs-on: ubuntu-latest
name: Label Issue - Community
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v4
- name: Install python requests
run: pip install requests
- name: Check if user is a member of Ansible org
uses: jannekem/run-python-script-action@v1
id: check_user
with:
script: |
import requests
headers = {'Accept': 'application/vnd.github+json', 'Authorization': 'token ${{ secrets.GITHUB_TOKEN }}'}
response = requests.get('${{ fromJson(toJson(github.event.issue.user.url)) }}/orgs?per_page=100', headers=headers)
is_member = False
for org in response.json():
if org['login'] == 'ansible':
is_member = True
if is_member:
print("User is member")
else:
print("User is community")
- name: Add community label if not a member
if: contains(steps.check_user.outputs.stdout, 'community')
uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90
with:
add-labels: "community"
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -18,34 +18,3 @@ jobs:
with: with:
repo-token: "${{ secrets.GITHUB_TOKEN }}" repo-token: "${{ secrets.GITHUB_TOKEN }}"
configuration-path: .github/pr_labeler.yml configuration-path: .github/pr_labeler.yml
community:
runs-on: ubuntu-latest
name: Label PR - Community
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v4
- name: Install python requests
run: pip install requests
- name: Check if user is a member of Ansible org
uses: jannekem/run-python-script-action@v1
id: check_user
with:
script: |
import requests
headers = {'Accept': 'application/vnd.github+json', 'Authorization': 'token ${{ secrets.GITHUB_TOKEN }}'}
response = requests.get('${{ fromJson(toJson(github.event.pull_request.user.url)) }}/orgs?per_page=100', headers=headers)
is_member = False
for org in response.json():
if org['login'] == 'ansible':
is_member = True
if is_member:
print("User is member")
else:
print("User is community")
- name: Add community label if not a member
if: contains(steps.check_user.outputs.stdout, 'community')
uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90
with:
add-labels: "community"
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,37 +0,0 @@
---
name: PR Check
env:
BRANCH: ${{ github.base_ref || 'devel' }}
on:
pull_request:
types: [opened, edited, reopened, synchronize]
jobs:
pr-check:
name: Scan PR description for semantic versioning keywords
runs-on: ubuntu-latest
permissions:
packages: write
contents: read
steps:
- name: Check for each of the lines
env:
PR_BODY: ${{ github.event.pull_request.body }}
run: |
echo $PR_BODY | grep "Bug, Docs Fix or other nominal change" > Z
echo $PR_BODY | grep "New or Enhanced Feature" > Y
echo $PR_BODY | grep "Breaking Change" > X
exit 0
# We exit 0 and set the shell to prevent the returns from the greps from failing this step
# See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference
shell: bash {0}
- name: Check for exactly one item
run: |
if [ $(cat X Y Z | wc -l) != 1 ] ; then
echo "The PR body must contain exactly one of [ 'Bug, Docs Fix or other nominal change', 'New or Enhanced Feature', 'Breaking Change' ]"
echo "We counted $(cat X Y Z | wc -l)"
echo "See the default PR body for examples"
exit 255;
else
exit 0;
fi

View File

@@ -1,9 +1,5 @@
--- ---
name: Promote Release name: Promote Release
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
release: release:
types: [published] types: [published]

View File

@@ -1,9 +1,5 @@
--- ---
name: Stage Release name: Stage Release
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
@@ -69,7 +65,7 @@ jobs:
- name: Install playbook dependencies - name: Install playbook dependencies
run: | run: |
python3 -m pip install docker python3 -m pip install docker setuptools_scm
- name: Build and stage AWX - name: Build and stage AWX
working-directory: awx working-directory: awx

View File

@@ -1,29 +0,0 @@
---
name: Dependency Pr Update
on:
pull_request:
types: [labeled, opened, reopened]
jobs:
pr-check:
name: Update Dependabot Prs
if: contains(github.event.pull_request.labels.*.name, 'dependencies') && contains(github.event.pull_request.labels.*.name, 'component:ui')
runs-on: ubuntu-latest
steps:
- name: Checkout branch
uses: actions/checkout@v3
- name: Update PR Body
env:
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
OWNER: ${{ github.repository_owner }}
REPO: ${{ github.event.repository.name }}
PR: ${{github.event.pull_request.number}}
PR_BODY: ${{github.event.pull_request.body}}
run: |
gh pr checkout ${{ env.PR }}
echo "${{ env.PR_BODY }}" > my_pr_body.txt
echo "" >> my_pr_body.txt
echo "Bug, Docs Fix or other nominal change" >> my_pr_body.txt
gh pr edit ${{env.PR}} --body-file my_pr_body.txt

View File

@@ -1,15 +1,10 @@
--- ---
name: Upload API Schema name: Upload API Schema
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
push: push:
branches: branches:
- devel - devel
- release_** - release_**
- feature_**
jobs: jobs:
push: push:
runs-on: ubuntu-latest runs-on: ubuntu-latest

3
.gitignore vendored
View File

@@ -153,6 +153,9 @@ use_dev_supervisor.txt
/sanity/ /sanity/
/awx_collection_build/ /awx_collection_build/
# Setup for metrics gathering
tools/prometheus/prometheus.yml
.idea/* .idea/*
*.unison.tmp *.unison.tmp
*.# *.#

View File

@@ -8,8 +8,6 @@ ignore: |
awx/ui/test/e2e/tests/smoke-vars.yml awx/ui/test/e2e/tests/smoke-vars.yml
awx/ui/node_modules awx/ui/node_modules
tools/docker-compose/_sources tools/docker-compose/_sources
# django template files
awx/api/templates/instance_install_bundle/**
extends: default extends: default

View File

@@ -19,17 +19,16 @@ Have questions about this document or anything not covered here? Come chat with
- [Purging containers and images](#purging-containers-and-images) - [Purging containers and images](#purging-containers-and-images)
- [Pre commit hooks](#pre-commit-hooks) - [Pre commit hooks](#pre-commit-hooks)
- [What should I work on?](#what-should-i-work-on) - [What should I work on?](#what-should-i-work-on)
- [Translations](#translations)
- [Submitting Pull Requests](#submitting-pull-requests) - [Submitting Pull Requests](#submitting-pull-requests)
- [PR Checks run by Zuul](#pr-checks-run-by-zuul)
- [Reporting Issues](#reporting-issues) - [Reporting Issues](#reporting-issues)
- [Getting Help](#getting-help)
## Things to know prior to submitting code ## Things to know prior to submitting code
- All code submissions are done through pull requests against the `devel` branch. - All code submissions are done through pull requests against the `devel` branch.
- You must use `git commit --signoff` for any commit to be merged, and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md). - You must use `git commit --signoff` for any commit to be merged, and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md).
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason. - Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
- If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see [git push docs](https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt). - If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on irc.libera.chat, and talk about what you would like to do or add first. This not only helps everyone know what's going on, it also helps save time and effort, if the community decides some changes are needed. - If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on irc.libera.chat, and talk about what you would like to do or add first. This not only helps everyone know what's going on, it also helps save time and effort, if the community decides some changes are needed.
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com) - We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
@@ -43,7 +42,8 @@ The AWX development environment workflow and toolchain uses Docker and the docke
Prior to starting the development services, you'll need `docker` and `docker-compose`. On Linux, you can generally find these in your distro's packaging, but you may find that Docker themselves maintain a separate repo that tracks more closely to the latest releases. Prior to starting the development services, you'll need `docker` and `docker-compose`. On Linux, you can generally find these in your distro's packaging, but you may find that Docker themselves maintain a separate repo that tracks more closely to the latest releases.
For macOS and Windows, we recommend [Docker for Mac](https://www.docker.com/docker-mac) and [Docker for Windows](https://www.docker.com/docker-windows) respectively. For macOS and Windows, we recommend [Docker for Mac](https://www.docker.com/docker-mac) and [Docker for Windows](https://www.docker.com/docker-windows)
respectively.
For Linux platforms, refer to the following from Docker: For Linux platforms, refer to the following from Docker:
@@ -79,13 +79,17 @@ See the [README.md](./tools/docker-compose/README.md) for docs on how to build t
### Building API Documentation ### Building API Documentation
AWX includes support for building [Swagger/OpenAPI documentation](https://swagger.io). To build the documentation locally, run: AWX includes support for building [Swagger/OpenAPI
documentation](https://swagger.io). To build the documentation locally, run:
```bash ```bash
(container)/awx_devel$ make swagger (container)/awx_devel$ make swagger
``` ```
This will write a file named `swagger.json` that contains the API specification in OpenAPI format. A variety of online tools are available for translating this data into more consumable formats (such as HTML). http://editor.swagger.io is an example of one such service. This will write a file named `swagger.json` that contains the API specification
in OpenAPI format. A variety of online tools are available for translating
this data into more consumable formats (such as HTML). http://editor.swagger.io
is an example of one such service.
### Accessing the AWX web interface ### Accessing the AWX web interface
@@ -111,30 +115,20 @@ While you can use environment variables to skip the pre-commit hooks GitHub will
## What should I work on? ## What should I work on?
We have a ["good first issue" label](https://github.com/ansible/awx/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) we put on some issues that might be a good starting point for new contributors.
Fixing bugs and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start.
For feature work, take a look at the current [Enhancements](https://github.com/ansible/awx/issues?q=is%3Aissue+is%3Aopen+label%3Atype%3Aenhancement). For feature work, take a look at the current [Enhancements](https://github.com/ansible/awx/issues?q=is%3Aissue+is%3Aopen+label%3Atype%3Aenhancement).
If it has someone assigned to it then that person is the person responsible for working the enhancement. If you feel like you could contribute then reach out to that person. If it has someone assigned to it then that person is the person responsible for working the enhancement. If you feel like you could contribute then reach out to that person.
**NOTES** Fixing bugs, adding translations, and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start. For extra information on debugging tools, see [Debugging](./docs/debugging/).
> Issue assignment will only be done for maintainers of the project. If you decide to work on an issue, please feel free to add a comment in the issue to let others know that you are working on it; but know that we will accept the first pull request from whomever is able to fix an issue. Once your PR is accepted we can add you as an assignee to an issue upon request.
**NOTE**
> If you work in a part of the codebase that is going through active development, your changes may be rejected, or you may be asked to `rebase`. A good idea before starting work is to have a discussion with us in the `#ansible-awx` channel on irc.libera.chat, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project). > If you work in a part of the codebase that is going through active development, your changes may be rejected, or you may be asked to `rebase`. A good idea before starting work is to have a discussion with us in the `#ansible-awx` channel on irc.libera.chat, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
**NOTE**
> If you're planning to develop features or fixes for the UI, please review the [UI Developer doc](./awx/ui/README.md). > If you're planning to develop features or fixes for the UI, please review the [UI Developer doc](./awx/ui/README.md).
### Translations
At this time we do not accept PRs for adding additional language translations as we have an automated process for generating our translations. This is because translations require constant care as new strings are added and changed in the code base. Because of this the .po files are overwritten during every translation release cycle. We also can't support a lot of translations on AWX as its an open source project and each language adds time and cost to maintain. If you would like to see AWX translated into a new language please create an issue and ask others you know to upvote the issue. Our translation team will review the needs of the community and see what they can do around supporting additional language.
If you find an issue with an existing translation, please see the [Reporting Issues](#reporting-issues) section to open an issue and our translation team will work with you on a resolution.
## Submitting Pull Requests ## Submitting Pull Requests
Fixes and Features for AWX will go through the Github pull request process. Submit your pull request (PR) against the `devel` branch. Fixes and Features for AWX will go through the Github pull request process. Submit your pull request (PR) against the `devel` branch.
@@ -158,14 +152,28 @@ We like to keep our commit history clean, and will require resubmission of pull
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefully. Please be patient. Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefully. Please be patient.
When your PR is initially submitted the checks will not be run until a maintainer allows them to be. Once a maintainer has done a quick review of your work the PR will have the linter and unit tests run against them via GitHub Actions, and the status reported in the PR. All submitted PRs will have the linter and unit tests run against them via Zuul, and the status reported in the PR.
## PR Checks run by Zuul
Zuul jobs for awx are defined in the [zuul-jobs](https://github.com/ansible/zuul-jobs) repo.
Zuul runs the following checks that must pass:
1. `tox-awx-api-lint`
2. `tox-awx-ui-lint`
3. `tox-awx-api`
4. `tox-awx-ui`
5. `tox-awx-swagger`
Zuul runs the following checks that are non-voting (can not pass but serve to inform PR reviewers):
1. `tox-awx-detect-schema-change`
This check generates the schema and diffs it against a reference copy of the `devel` version of the schema.
Reviewers should inspect the `job-output.txt.gz` related to the check if their is a failure (grep for `diff -u -b` to find beginning of diff).
If the schema change is expected and makes sense in relation to the changes made by the PR, then you are good to go!
If not, the schema changes should be fixed, but this decision must be enforced by reviewers.
## Reporting Issues ## Reporting Issues
We welcome your feedback, and encourage you to file an issue when you run into a problem. But before opening a new issues, we ask that you please view our [Issues guide](./ISSUES.md). We welcome your feedback, and encourage you to file an issue when you run into a problem. But before opening a new issues, we ask that you please view our [Issues guide](./ISSUES.md).
## Getting Help
If you require additional assistance, please reach out to us at `#ansible-awx` on irc.libera.chat, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
For extra information on debugging tools, see [Debugging](./docs/debugging/).

View File

@@ -3,7 +3,7 @@ recursive-include awx *.po
recursive-include awx *.mo recursive-include awx *.mo
recursive-include awx/static * recursive-include awx/static *
recursive-include awx/templates *.html recursive-include awx/templates *.html
recursive-include awx/api/templates *.md *.html *.yml recursive-include awx/api/templates *.md *.html
recursive-include awx/ui/build *.html recursive-include awx/ui/build *.html
recursive-include awx/ui/build * recursive-include awx/ui/build *
recursive-include awx/playbooks *.yml recursive-include awx/playbooks *.yml
@@ -12,7 +12,7 @@ recursive-include awx/plugins *.ps1
recursive-include requirements *.txt recursive-include requirements *.txt
recursive-include requirements *.yml recursive-include requirements *.yml
recursive-include config * recursive-include config *
recursive-include licenses * recursive-include docs/licenses *
recursive-exclude awx devonly.py* recursive-exclude awx devonly.py*
recursive-exclude awx/api/tests * recursive-exclude awx/api/tests *
recursive-exclude awx/main/tests * recursive-exclude awx/main/tests *

142
Makefile
View File

@@ -6,9 +6,7 @@ CHROMIUM_BIN=/tmp/chrome-linux/chrome
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
MANAGEMENT_COMMAND ?= awx-manage MANAGEMENT_COMMAND ?= awx-manage
VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py) VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py)
COLLECTION_VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
# ansible-test requires semver compatable version, so we allow overrides to hack it
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
# NOTE: This defaults the container image version to the branch that's active # NOTE: This defaults the container image version to the branch that's active
COMPOSE_TAG ?= $(GIT_BRANCH) COMPOSE_TAG ?= $(GIT_BRANCH)
@@ -36,7 +34,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
# These should be upgraded in the AWX and Ansible venv before attempting # These should be upgraded in the AWX and Ansible venv before attempting
# to install the actual requirements # to install the actual requirements
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4 VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==58.2.0 setuptools_scm[toml]==6.4.2 wheel==0.36.2
NAME ?= awx NAME ?= awx
@@ -74,7 +72,7 @@ clean-languages:
rm -f $(I18N_FLAG_FILE) rm -f $(I18N_FLAG_FILE)
find ./awx/locale/ -type f -regex ".*\.mo$" -delete find ./awx/locale/ -type f -regex ".*\.mo$" -delete
## Remove temporary build files, compiled Python files. # Remove temporary build files, compiled Python files.
clean: clean-ui clean-api clean-awxkit clean-dist clean: clean-ui clean-api clean-awxkit clean-dist
rm -rf awx/public rm -rf awx/public
rm -rf awx/lib/site-packages rm -rf awx/lib/site-packages
@@ -87,7 +85,6 @@ clean: clean-ui clean-api clean-awxkit clean-dist
clean-api: clean-api:
rm -rf build $(NAME)-$(VERSION) *.egg-info rm -rf build $(NAME)-$(VERSION) *.egg-info
rm -rf .tox
find . -type f -regex ".*\.py[co]$$" -delete find . -type f -regex ".*\.py[co]$$" -delete
find . -type d -name "__pycache__" -delete find . -type d -name "__pycache__" -delete
rm -f awx/awx_test.sqlite3* rm -f awx/awx_test.sqlite3*
@@ -97,7 +94,7 @@ clean-api:
clean-awxkit: clean-awxkit:
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/* rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
## convenience target to assert environment variables are defined # convenience target to assert environment variables are defined
guard-%: guard-%:
@if [ "$${$*}" = "" ]; then \ @if [ "$${$*}" = "" ]; then \
echo "The required environment variable '$*' is not set"; \ echo "The required environment variable '$*' is not set"; \
@@ -120,7 +117,7 @@ virtualenv_awx:
fi; \ fi; \
fi fi
## Install third-party requirements needed for AWX's environment. # Install third-party requirements needed for AWX's environment.
# this does not use system site packages intentionally # this does not use system site packages intentionally
requirements_awx: virtualenv_awx requirements_awx: virtualenv_awx
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \ if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
@@ -139,7 +136,7 @@ requirements_dev: requirements_awx requirements_awx_dev
requirements_test: requirements requirements_test: requirements
## "Install" awx package in development mode. # "Install" awx package in development mode.
develop: develop:
@if [ "$(VIRTUAL_ENV)" ]; then \ @if [ "$(VIRTUAL_ENV)" ]; then \
pip uninstall -y awx; \ pip uninstall -y awx; \
@@ -156,21 +153,21 @@ version_file:
fi; \ fi; \
$(PYTHON) -c "import awx; print(awx.__version__)" > /var/lib/awx/.awx_version; \ $(PYTHON) -c "import awx; print(awx.__version__)" > /var/lib/awx/.awx_version; \
## Refresh development environment after pulling new code. # Refresh development environment after pulling new code.
refresh: clean requirements_dev version_file develop migrate refresh: clean requirements_dev version_file develop migrate
## Create Django superuser. # Create Django superuser.
adduser: adduser:
$(MANAGEMENT_COMMAND) createsuperuser $(MANAGEMENT_COMMAND) createsuperuser
## Create database tables and apply any new migrations. # Create database tables and apply any new migrations.
migrate: migrate:
if [ "$(VENV_BASE)" ]; then \ if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi; \ fi; \
$(MANAGEMENT_COMMAND) migrate --noinput $(MANAGEMENT_COMMAND) migrate --noinput
## Run after making changes to the models to create a new migration. # Run after making changes to the models to create a new migration.
dbchange: dbchange:
$(MANAGEMENT_COMMAND) makemigrations $(MANAGEMENT_COMMAND) makemigrations
@@ -184,7 +181,7 @@ collectstatic:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi; \ fi; \
$(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1 mkdir -p awx/public/static && $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:* DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:*
@@ -221,7 +218,7 @@ wsbroadcast:
fi; \ fi; \
$(PYTHON) manage.py run_wsbroadcast $(PYTHON) manage.py run_wsbroadcast
## Run to start the background task dispatcher for development. # Run to start the background task dispatcher for development.
dispatcher: dispatcher:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
@@ -229,7 +226,7 @@ dispatcher:
$(PYTHON) manage.py run_dispatcher $(PYTHON) manage.py run_dispatcher
## Run to start the zeromq callback receiver # Run to start the zeromq callback receiver
receiver: receiver:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
@@ -281,7 +278,7 @@ awx-link:
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
PYTEST_ARGS ?= -n auto PYTEST_ARGS ?= -n auto
## Run all API unit tests. # Run all API unit tests.
test: test:
if [ "$(VENV_BASE)" ]; then \ if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
@@ -302,8 +299,7 @@ test_collection:
if [ "$(VENV_BASE)" ]; then \ if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi && \ fi && \
if ! [ -x "$(shell command -v ansible-playbook)" ]; then pip install ansible-core; fi pip install ansible-core && \
ansible --version
py.test $(COLLECTION_TEST_DIRS) -v py.test $(COLLECTION_TEST_DIRS) -v
# The python path needs to be modified so that the tests can find Ansible within the container # The python path needs to be modified so that the tests can find Ansible within the container
# First we will use anything expility set as PYTHONPATH # First we will use anything expility set as PYTHONPATH
@@ -333,13 +329,8 @@ install_collection: build_collection
rm -rf $(COLLECTION_INSTALL) rm -rf $(COLLECTION_INSTALL)
ansible-galaxy collection install awx_collection_build/$(COLLECTION_NAMESPACE)-$(COLLECTION_PACKAGE)-$(COLLECTION_VERSION).tar.gz ansible-galaxy collection install awx_collection_build/$(COLLECTION_NAMESPACE)-$(COLLECTION_PACKAGE)-$(COLLECTION_VERSION).tar.gz
test_collection_sanity: test_collection_sanity: install_collection
rm -rf awx_collection_build/ cd $(COLLECTION_INSTALL) && ansible-test sanity
rm -rf $(COLLECTION_INSTALL)
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi
ansible --version
COLLECTION_VERSION=1.0.0 make install_collection
cd $(COLLECTION_INSTALL) && ansible-test sanity --exclude=plugins/modules/export.py
test_collection_integration: install_collection test_collection_integration: install_collection
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET) cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
@@ -350,24 +341,23 @@ test_unit:
fi; \ fi; \
py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit
## Run all API unit tests with coverage enabled. # Run all API unit tests with coverage enabled.
test_coverage: test_coverage:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi; \ fi; \
py.test --create-db --cov=awx --cov-report=xml --junitxml=./reports/junit.xml $(TEST_DIRS) py.test --create-db --cov=awx --cov-report=xml --junitxml=./reports/junit.xml $(TEST_DIRS)
## Output test coverage as HTML (into htmlcov directory). # Output test coverage as HTML (into htmlcov directory).
coverage_html: coverage_html:
coverage html coverage html
## Run API unit tests across multiple Python/Django versions with Tox. # Run API unit tests across multiple Python/Django versions with Tox.
test_tox: test_tox:
tox -v tox -v
# Make fake data
DATA_GEN_PRESET = "" DATA_GEN_PRESET = ""
## Make fake data
bulk_data: bulk_data:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
@@ -386,29 +376,28 @@ clean-ui:
rm -rf awx/ui/build rm -rf awx/ui/build
rm -rf awx/ui/src/locales/_build rm -rf awx/ui/src/locales/_build
rm -rf $(UI_BUILD_FLAG_FILE) rm -rf $(UI_BUILD_FLAG_FILE)
# the collectstatic command doesn't like it if this dir doesn't exist.
mkdir -p awx/ui/build/static
awx/ui/node_modules: awx/ui/node_modules:
NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn --force ci NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn ci
$(UI_BUILD_FLAG_FILE): $(UI_BUILD_FLAG_FILE): awx/ui/node_modules
$(MAKE) awx/ui/node_modules
$(PYTHON) tools/scripts/compilemessages.py $(PYTHON) tools/scripts/compilemessages.py
$(NPM_BIN) --prefix awx/ui --loglevel warn run compile-strings $(NPM_BIN) --prefix awx/ui --loglevel warn run compile-strings
$(NPM_BIN) --prefix awx/ui --loglevel warn run build $(NPM_BIN) --prefix awx/ui --loglevel warn run build
mkdir -p awx/public/static/css
mkdir -p awx/public/static/js
mkdir -p awx/public/static/media
cp -r awx/ui/build/static/css/* awx/public/static/css
cp -r awx/ui/build/static/js/* awx/public/static/js
cp -r awx/ui/build/static/media/* awx/public/static/media
touch $@ touch $@
ui-release: $(UI_BUILD_FLAG_FILE) ui-release: $(UI_BUILD_FLAG_FILE)
ui-devel: awx/ui/node_modules ui-devel: awx/ui/node_modules
@$(MAKE) -B $(UI_BUILD_FLAG_FILE) @$(MAKE) -B $(UI_BUILD_FLAG_FILE)
mkdir -p /var/lib/awx/public/static/css
mkdir -p /var/lib/awx/public/static/js
mkdir -p /var/lib/awx/public/static/media
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media
ui-devel-instrumented: awx/ui/node_modules ui-devel-instrumented: awx/ui/node_modules
$(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented $(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented
@@ -460,18 +449,12 @@ awx/projects:
COMPOSE_UP_OPTS ?= COMPOSE_UP_OPTS ?=
COMPOSE_OPTS ?= COMPOSE_OPTS ?=
CONTROL_PLANE_NODE_COUNT ?= 1 CONTROL_PLANE_NODE_COUNT ?= 1
EXECUTION_NODE_COUNT ?= 0 EXECUTION_NODE_COUNT ?= 2
MINIKUBE_CONTAINER_GROUP ?= false MINIKUBE_CONTAINER_GROUP ?= false
MINIKUBE_SETUP ?= false # if false, run minikube separately
EXTRA_SOURCES_ANSIBLE_OPTS ?=
ifneq ($(ADMIN_PASSWORD),)
EXTRA_SOURCES_ANSIBLE_OPTS := -e admin_password=$(ADMIN_PASSWORD) $(EXTRA_SOURCES_ANSIBLE_OPTS)
endif
docker-compose-sources: .git/hooks/pre-commit docker-compose-sources: .git/hooks/pre-commit
@if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\ @if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\
ansible-playbook -i tools/docker-compose/inventory -e minikube_setup=$(MINIKUBE_SETUP) tools/docker-compose-minikube/deploy.yml; \ ansible-playbook -i tools/docker-compose/inventory tools/docker-compose-minikube/deploy.yml; \
fi; fi;
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \ ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
@@ -485,8 +468,7 @@ docker-compose-sources: .git/hooks/pre-commit
-e enable_ldap=$(LDAP) \ -e enable_ldap=$(LDAP) \
-e enable_splunk=$(SPLUNK) \ -e enable_splunk=$(SPLUNK) \
-e enable_prometheus=$(PROMETHEUS) \ -e enable_prometheus=$(PROMETHEUS) \
-e enable_grafana=$(GRAFANA) $(EXTRA_SOURCES_ANSIBLE_OPTS) -e enable_grafana=$(GRAFANA)
docker-compose: awx/projects docker-compose-sources docker-compose: awx/projects docker-compose-sources
@@ -520,7 +502,7 @@ docker-compose-container-group-clean:
fi fi
rm -rf tools/docker-compose-minikube/_sources/ rm -rf tools/docker-compose-minikube/_sources/
## Base development image build # Base development image build
docker-compose-build: docker-compose-build:
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE) ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \ DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
@@ -538,7 +520,7 @@ docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker-refresh: docker-clean docker-compose docker-refresh: docker-clean docker-compose
## Docker Development Environment with Elastic Stack Connected # Docker Development Environment with Elastic Stack Connected
docker-compose-elk: awx/projects docker-compose-sources docker-compose-elk: awx/projects docker-compose-sources
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
@@ -575,73 +557,31 @@ Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
-e template_dest=_build_kube_dev \ -e template_dest=_build_kube_dev \
-e receptor_image=$(RECEPTOR_IMAGE) -e receptor_image=$(RECEPTOR_IMAGE)
## Build awx_kube_devel image for development on local Kubernetes environment.
awx-kube-dev-build: Dockerfile.kube-dev awx-kube-dev-build: Dockerfile.kube-dev
DOCKER_BUILDKIT=1 docker build -f Dockerfile.kube-dev \ DOCKER_BUILDKIT=1 docker build -f Dockerfile.kube-dev \
--build-arg BUILDKIT_INLINE_CACHE=1 \ --build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \ --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) . -t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
## Build awx image for deployment on Kubernetes environment.
awx-kube-build: Dockerfile
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
--build-arg VERSION=$(VERSION) \
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
--build-arg HEADLESS=$(HEADLESS) \
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
# Translation TASKS # Translation TASKS
# -------------------------------------- # --------------------------------------
## generate UI .pot file, an empty template of strings yet to be translated # generate UI .pot file, an empty template of strings yet to be translated
pot: $(UI_BUILD_FLAG_FILE) pot: $(UI_BUILD_FLAG_FILE)
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean $(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean
## generate UI .po files for each locale (will update translated strings for `en`) # generate UI .po files for each locale (will update translated strings for `en`)
po: $(UI_BUILD_FLAG_FILE) po: $(UI_BUILD_FLAG_FILE)
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean $(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
## generate API django .pot .po # generate API django .pot .po
LANG = "en-us"
messages: messages:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi; \ fi; \
$(PYTHON) manage.py makemessages -l en_us --keep-pot $(PYTHON) manage.py makemessages -l $(LANG) --keep-pot
print-%: print-%:
@echo $($*) @echo $($*)
# HELP related targets
# --------------------------------------
HELP_FILTER=.PHONY
## Display help targets
help:
@printf "Available targets:\n"
@make -s help/generate | grep -vE "\w($(HELP_FILTER))"
## Display help for all targets
help/all:
@printf "Available targets:\n"
@make -s help/generate
## Generate help output from MAKEFILE_LIST
help/generate:
@awk '/^[-a-zA-Z_0-9%:\\\.\/]+:/ { \
helpMessage = match(lastLine, /^## (.*)/); \
if (helpMessage) { \
helpCommand = $$1; \
helpMessage = substr(lastLine, RSTART + 3, RLENGTH); \
gsub("\\\\", "", helpCommand); \
gsub(":+$$", "", helpCommand); \
printf " \x1b[32;01m%-35s\x1b[0m %s\n", helpCommand, helpMessage; \
} else { \
helpCommand = $$1; \
gsub("\\\\", "", helpCommand); \
gsub(":+$$", "", helpCommand); \
printf " \x1b[32;01m%-35s\x1b[0m %s\n", helpCommand, "No help available"; \
} \
} \
{ lastLine = $$0 }' $(MAKEFILE_LIST) | sort -u
@printf "\n"

View File

@@ -190,7 +190,7 @@ def manage():
sys.stdout.write('%s\n' % __version__) sys.stdout.write('%s\n' % __version__)
# If running as a user without permission to read settings, display an # If running as a user without permission to read settings, display an
# error message. Allow --help to still work. # error message. Allow --help to still work.
elif not os.getenv('SKIP_SECRET_KEY_CHECK', False) and settings.SECRET_KEY == 'permission-denied': elif settings.SECRET_KEY == 'permission-denied':
if len(sys.argv) == 1 or len(sys.argv) >= 2 and sys.argv[1] in ('-h', '--help', 'help'): if len(sys.argv) == 1 or len(sys.argv) >= 2 and sys.argv[1] in ('-h', '--help', 'help'):
execute_from_command_line(sys.argv) execute_from_command_line(sys.argv)
sys.stdout.write('\n') sys.stdout.write('\n')

View File

@@ -157,7 +157,7 @@ class FieldLookupBackend(BaseFilterBackend):
# A list of fields that we know can be filtered on without the possiblity # A list of fields that we know can be filtered on without the possiblity
# of introducing duplicates # of introducing duplicates
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField, TextField) NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField)
def get_fields_from_lookup(self, model, lookup): def get_fields_from_lookup(self, model, lookup):
@@ -232,9 +232,6 @@ class FieldLookupBackend(BaseFilterBackend):
re.compile(value) re.compile(value)
except re.error as e: except re.error as e:
raise ValueError(e.args[0]) raise ValueError(e.args[0])
elif new_lookup.endswith('__iexact'):
if not isinstance(field, (CharField, TextField)):
raise ValueError(f'{field.name} is not a text field and cannot be filtered by case-insensitive search')
elif new_lookup.endswith('__search'): elif new_lookup.endswith('__search'):
related_model = getattr(field, 'related_model', None) related_model = getattr(field, 'related_model', None)
if not related_model: if not related_model:
@@ -261,8 +258,8 @@ class FieldLookupBackend(BaseFilterBackend):
search_filters = {} search_filters = {}
needs_distinct = False needs_distinct = False
# Can only have two values: 'AND', 'OR' # Can only have two values: 'AND', 'OR'
# If 'AND' is used, an item must satisfy all conditions to show up in the results. # If 'AND' is used, an iterm must satisfy all condition to show up in the results.
# If 'OR' is used, an item just needs to satisfy one condition to appear in results. # If 'OR' is used, an item just need to satisfy one condition to appear in results.
search_filter_relation = 'OR' search_filter_relation = 'OR'
for key, values in request.query_params.lists(): for key, values in request.query_params.lists():
if key in self.RESERVED_NAMES: if key in self.RESERVED_NAMES:

View File

@@ -6,6 +6,7 @@ import inspect
import logging import logging
import time import time
import uuid import uuid
import urllib.parse
# Django # Django
from django.conf import settings from django.conf import settings
@@ -13,7 +14,7 @@ from django.contrib.auth import views as auth_views
from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache from django.core.cache import cache
from django.core.exceptions import FieldDoesNotExist from django.core.exceptions import FieldDoesNotExist
from django.db import connection, transaction from django.db import connection
from django.db.models.fields.related import OneToOneRel from django.db.models.fields.related import OneToOneRel
from django.http import QueryDict from django.http import QueryDict
from django.shortcuts import get_object_or_404 from django.shortcuts import get_object_or_404
@@ -29,7 +30,7 @@ from rest_framework.response import Response
from rest_framework import status from rest_framework import status
from rest_framework import views from rest_framework import views
from rest_framework.permissions import AllowAny from rest_framework.permissions import AllowAny
from rest_framework.renderers import StaticHTMLRenderer from rest_framework.renderers import StaticHTMLRenderer, JSONRenderer
from rest_framework.negotiation import DefaultContentNegotiation from rest_framework.negotiation import DefaultContentNegotiation
# AWX # AWX
@@ -40,7 +41,7 @@ from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd,
from awx.main.utils.db import get_all_field_names from awx.main.utils.db import get_all_field_names
from awx.main.utils.licensing import server_product_name from awx.main.utils.licensing import server_product_name
from awx.main.views import ApiErrorView from awx.main.views import ApiErrorView
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
from awx.api.versioning import URLPathVersioning from awx.api.versioning import URLPathVersioning
from awx.api.metadata import SublistAttachDetatchMetadata, Metadata from awx.api.metadata import SublistAttachDetatchMetadata, Metadata
from awx.conf import settings_registry from awx.conf import settings_registry
@@ -62,9 +63,9 @@ __all__ = [
'SubDetailAPIView', 'SubDetailAPIView',
'ResourceAccessList', 'ResourceAccessList',
'ParentMixin', 'ParentMixin',
'DeleteLastUnattachLabelMixin',
'SubListAttachDetachAPIView', 'SubListAttachDetachAPIView',
'CopyAPIView', 'CopyAPIView',
'GenericCancelView',
'BaseUsersList', 'BaseUsersList',
] ]
@@ -90,9 +91,14 @@ class LoggedLoginView(auth_views.LoginView):
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
ret = super(LoggedLoginView, self).post(request, *args, **kwargs) ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
current_user = getattr(request, 'user', None)
if request.user.is_authenticated: if request.user.is_authenticated:
logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None)))) logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None))))
ret.set_cookie('userLoggedIn', 'true') ret.set_cookie('userLoggedIn', 'true')
current_user = UserSerializer(self.request.user)
current_user = smart_str(JSONRenderer().render(current_user.data))
current_user = urllib.parse.quote('%s' % current_user, '')
ret.set_cookie('current_user', current_user, secure=settings.SESSION_COOKIE_SECURE or None)
ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid')) ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
return ret return ret
@@ -249,7 +255,7 @@ class APIView(views.APIView):
response['X-API-Query-Time'] = '%0.3fs' % sum(q_times) response['X-API-Query-Time'] = '%0.3fs' % sum(q_times)
if getattr(self, 'deprecated', False): if getattr(self, 'deprecated', False):
response['Warning'] = '299 awx "This resource has been deprecated and will be removed in a future release."' response['Warning'] = '299 awx "This resource has been deprecated and will be removed in a future release."' # noqa
return response return response
@@ -769,6 +775,28 @@ class SubListAttachDetachAPIView(SubListCreateAttachDetachAPIView):
return {'id': None} return {'id': None}
class DeleteLastUnattachLabelMixin(object):
"""
Models for which you want the last instance to be deleted from the database
when the last disassociate is called should inherit from this class. Further,
the model should implement is_detached()
"""
def unattach(self, request, *args, **kwargs):
(sub_id, res) = super(DeleteLastUnattachLabelMixin, self).unattach_validate(request)
if res:
return res
res = super(DeleteLastUnattachLabelMixin, self).unattach_by_id(request, sub_id)
obj = self.model.objects.get(id=sub_id)
if obj.is_detached():
obj.delete()
return res
class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView): class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView):
pass pass
@@ -986,23 +1014,6 @@ class CopyAPIView(GenericAPIView):
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class GenericCancelView(RetrieveAPIView):
# In subclass set model, serializer_class
obj_permission_type = 'cancel'
@transaction.non_atomic_requests
def dispatch(self, *args, **kwargs):
return super(GenericCancelView, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class BaseUsersList(SubListCreateAttachDetachAPIView): class BaseUsersList(SubListCreateAttachDetachAPIView):
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
ret = super(BaseUsersList, self).post(request, *args, **kwargs) ret = super(BaseUsersList, self).post(request, *args, **kwargs)

View File

@@ -24,6 +24,7 @@ __all__ = [
'InventoryInventorySourcesUpdatePermission', 'InventoryInventorySourcesUpdatePermission',
'UserPermission', 'UserPermission',
'IsSystemAdminOrAuditor', 'IsSystemAdminOrAuditor',
'InstanceGroupTowerPermission',
'WorkflowApprovalPermission', 'WorkflowApprovalPermission',
] ]

View File

@@ -29,7 +29,7 @@ from django.utils.translation import gettext_lazy as _
from django.utils.encoding import force_str from django.utils.encoding import force_str
from django.utils.text import capfirst from django.utils.text import capfirst
from django.utils.timezone import now from django.utils.timezone import now
from django.core.validators import RegexValidator, MaxLengthValidator from django.utils.functional import cached_property
# Django REST Framework # Django REST Framework
from rest_framework.exceptions import ValidationError, PermissionDenied from rest_framework.exceptions import ValidationError, PermissionDenied
@@ -113,7 +113,7 @@ from awx.main.utils import (
) )
from awx.main.utils.filters import SmartFilter from awx.main.utils.filters import SmartFilter
from awx.main.utils.named_url_graph import reset_counters from awx.main.utils.named_url_graph import reset_counters
from awx.main.scheduler.task_manager_models import TaskManagerModels from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
from awx.main.redact import UriCleaner, REPLACE_STR from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.validators import vars_validate_or_raise from awx.main.validators import vars_validate_or_raise
@@ -121,9 +121,6 @@ from awx.main.validators import vars_validate_or_raise
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.api.fields import BooleanNullField, CharNullField, ChoiceNullField, VerbatimField, DeprecatedCredentialField from awx.api.fields import BooleanNullField, CharNullField, ChoiceNullField, VerbatimField, DeprecatedCredentialField
# AWX Utils
from awx.api.validators import HostnameRegexValidator
logger = logging.getLogger('awx.api.serializers') logger = logging.getLogger('awx.api.serializers')
# Fields that should be summarized regardless of object type. # Fields that should be summarized regardless of object type.
@@ -158,7 +155,6 @@ SUMMARIZABLE_FK_FIELDS = {
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'), 'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'), 'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'), 'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
'signature_validation_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'credential_type_id'),
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'), 'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'),
'job_template': DEFAULT_SUMMARY_FIELDS, 'job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job_template': DEFAULT_SUMMARY_FIELDS, 'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
@@ -619,7 +615,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
def validate(self, attrs): def validate(self, attrs):
attrs = super(BaseSerializer, self).validate(attrs) attrs = super(BaseSerializer, self).validate(attrs)
try: try:
# Create/update a model instance and run its full_clean() method to # Create/update a model instance and run it's full_clean() method to
# do any validation implemented on the model class. # do any validation implemented on the model class.
exclusions = self.get_validation_exclusions(self.instance) exclusions = self.get_validation_exclusions(self.instance)
obj = self.instance or self.Meta.model() obj = self.instance or self.Meta.model()
@@ -1475,7 +1471,6 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
'allow_override', 'allow_override',
'custom_virtualenv', 'custom_virtualenv',
'default_environment', 'default_environment',
'signature_validation_credential',
) + ( ) + (
'last_update_failed', 'last_update_failed',
'last_updated', 'last_updated',
@@ -1684,7 +1679,6 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
'total_inventory_sources', 'total_inventory_sources',
'inventory_sources_with_failures', 'inventory_sources_with_failures',
'pending_deletion', 'pending_deletion',
'prevent_instance_group_fallback',
) )
def get_related(self, obj): def get_related(self, obj):
@@ -2221,15 +2215,6 @@ class InventorySourceUpdateSerializer(InventorySourceSerializer):
class Meta: class Meta:
fields = ('can_update',) fields = ('can_update',)
def validate(self, attrs):
project = self.instance.source_project
if project:
failed_reason = project.get_reason_if_failed()
if failed_reason:
raise serializers.ValidationError(failed_reason)
return super(InventorySourceUpdateSerializer, self).validate(attrs)
class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer): class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):
@@ -2246,7 +2231,6 @@ class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSeri
'source_project_update', 'source_project_update',
'custom_virtualenv', 'custom_virtualenv',
'instance_group', 'instance_group',
'scm_revision',
) )
def get_related(self, obj): def get_related(self, obj):
@@ -2937,12 +2921,6 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
'ask_verbosity_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'ask_credential_on_launch',
'ask_execution_environment_on_launch',
'ask_labels_on_launch',
'ask_forks_on_launch',
'ask_job_slice_count_on_launch',
'ask_timeout_on_launch',
'ask_instance_groups_on_launch',
'survey_enabled', 'survey_enabled',
'become_enabled', 'become_enabled',
'diff_mode', 'diff_mode',
@@ -2951,7 +2929,6 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
'job_slice_count', 'job_slice_count',
'webhook_service', 'webhook_service',
'webhook_credential', 'webhook_credential',
'prevent_instance_group_fallback',
) )
read_only_fields = ('*', 'custom_virtualenv') read_only_fields = ('*', 'custom_virtualenv')
@@ -3206,7 +3183,7 @@ class JobRelaunchSerializer(BaseSerializer):
return attrs return attrs
class JobCreateScheduleSerializer(LabelsListMixin, BaseSerializer): class JobCreateScheduleSerializer(BaseSerializer):
can_schedule = serializers.SerializerMethodField() can_schedule = serializers.SerializerMethodField()
prompts = serializers.SerializerMethodField() prompts = serializers.SerializerMethodField()
@@ -3232,17 +3209,14 @@ class JobCreateScheduleSerializer(LabelsListMixin, BaseSerializer):
try: try:
config = obj.launch_config config = obj.launch_config
ret = config.prompts_dict(display=True) ret = config.prompts_dict(display=True)
for field_name in ('inventory', 'execution_environment'): if 'inventory' in ret:
if field_name in ret: ret['inventory'] = self._summarize('inventory', ret['inventory'])
ret[field_name] = self._summarize(field_name, ret[field_name]) if 'credentials' in ret:
for field_name, singular in (('credentials', 'credential'), ('instance_groups', 'instance_group')): all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]
if field_name in ret: ret['credentials'] = all_creds
ret[field_name] = [self._summarize(singular, obj) for obj in ret[field_name]]
if 'labels' in ret:
ret['labels'] = self._summary_field_labels(config)
return ret return ret
except JobLaunchConfig.DoesNotExist: except JobLaunchConfig.DoesNotExist:
return {'all': _('Unknown, job may have been run before launch configurations were saved.')} return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
class AdHocCommandSerializer(UnifiedJobSerializer): class AdHocCommandSerializer(UnifiedJobSerializer):
@@ -3412,9 +3386,6 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
class Meta: class Meta:
model = WorkflowJobTemplate model = WorkflowJobTemplate
fields = ( fields = (
@@ -3433,11 +3404,6 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
'webhook_service', 'webhook_service',
'webhook_credential', 'webhook_credential',
'-execution_environment', '-execution_environment',
'ask_labels_on_launch',
'ask_skip_tags_on_launch',
'ask_tags_on_launch',
'skip_tags',
'job_tags',
) )
def get_related(self, obj): def get_related(self, obj):
@@ -3481,7 +3447,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
# process char_prompts, these are not direct fields on the model # process char_prompts, these are not direct fields on the model
mock_obj = self.Meta.model() mock_obj = self.Meta.model()
for field_name in ('scm_branch', 'limit', 'skip_tags', 'job_tags'): for field_name in ('scm_branch', 'limit'):
if field_name in attrs: if field_name in attrs:
setattr(mock_obj, field_name, attrs[field_name]) setattr(mock_obj, field_name, attrs[field_name])
attrs.pop(field_name) attrs.pop(field_name)
@@ -3507,9 +3473,6 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
class Meta: class Meta:
model = WorkflowJob model = WorkflowJob
fields = ( fields = (
@@ -3529,8 +3492,6 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
'webhook_service', 'webhook_service',
'webhook_credential', 'webhook_credential',
'webhook_guid', 'webhook_guid',
'skip_tags',
'job_tags',
) )
def get_related(self, obj): def get_related(self, obj):
@@ -3647,9 +3608,6 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None) diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None)
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES) verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES)
forks = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None)
job_slice_count = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None)
timeout = serializers.IntegerField(required=False, allow_null=True, default=None)
exclude_errors = () exclude_errors = ()
class Meta: class Meta:
@@ -3665,21 +3623,13 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
'skip_tags', 'skip_tags',
'diff_mode', 'diff_mode',
'verbosity', 'verbosity',
'execution_environment',
'forks',
'job_slice_count',
'timeout',
) )
def get_related(self, obj): def get_related(self, obj):
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj) res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
if obj.inventory_id: if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id}) res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
if obj.execution_environment_id:
res['execution_environment'] = self.reverse('api:execution_environment_detail', kwargs={'pk': obj.execution_environment_id})
res['labels'] = self.reverse('api:{}_labels_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
res['credentials'] = self.reverse('api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk}) res['credentials'] = self.reverse('api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:{}_instance_groups_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
return res return res
def _build_mock_obj(self, attrs): def _build_mock_obj(self, attrs):
@@ -3759,11 +3709,7 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
# Build unsaved version of this config, use it to detect prompts errors # Build unsaved version of this config, use it to detect prompts errors
mock_obj = self._build_mock_obj(attrs) mock_obj = self._build_mock_obj(attrs)
if set(list(ujt.get_ask_mapping().keys()) + ['extra_data']) & set(attrs.keys()): accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
else:
# Only perform validation of prompts if prompts fields are provided
errors = {}
# Remove all unprocessed $encrypted$ strings, indicating default usage # Remove all unprocessed $encrypted$ strings, indicating default usage
if 'extra_data' in attrs and password_dict: if 'extra_data' in attrs and password_dict:
@@ -4135,6 +4081,7 @@ class SystemJobEventSerializer(AdHocCommandEventSerializer):
class JobLaunchSerializer(BaseSerializer): class JobLaunchSerializer(BaseSerializer):
# Representational fields # Representational fields
passwords_needed_to_start = serializers.ReadOnlyField() passwords_needed_to_start = serializers.ReadOnlyField()
can_start_without_user_input = serializers.BooleanField(read_only=True) can_start_without_user_input = serializers.BooleanField(read_only=True)
@@ -4157,12 +4104,6 @@ class JobLaunchSerializer(BaseSerializer):
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True) skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True) limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True) verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
execution_environment = serializers.PrimaryKeyRelatedField(queryset=ExecutionEnvironment.objects.all(), required=False, write_only=True)
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
forks = serializers.IntegerField(required=False, write_only=True, min_value=0)
job_slice_count = serializers.IntegerField(required=False, write_only=True, min_value=0)
timeout = serializers.IntegerField(required=False, write_only=True)
instance_groups = serializers.PrimaryKeyRelatedField(many=True, queryset=InstanceGroup.objects.all(), required=False, write_only=True)
class Meta: class Meta:
model = JobTemplate model = JobTemplate
@@ -4190,12 +4131,6 @@ class JobLaunchSerializer(BaseSerializer):
'ask_verbosity_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'ask_credential_on_launch',
'ask_execution_environment_on_launch',
'ask_labels_on_launch',
'ask_forks_on_launch',
'ask_job_slice_count_on_launch',
'ask_timeout_on_launch',
'ask_instance_groups_on_launch',
'survey_enabled', 'survey_enabled',
'variables_needed_to_start', 'variables_needed_to_start',
'credential_needed_to_start', 'credential_needed_to_start',
@@ -4203,12 +4138,6 @@ class JobLaunchSerializer(BaseSerializer):
'job_template_data', 'job_template_data',
'defaults', 'defaults',
'verbosity', 'verbosity',
'execution_environment',
'labels',
'forks',
'job_slice_count',
'timeout',
'instance_groups',
) )
read_only_fields = ( read_only_fields = (
'ask_scm_branch_on_launch', 'ask_scm_branch_on_launch',
@@ -4221,12 +4150,6 @@ class JobLaunchSerializer(BaseSerializer):
'ask_verbosity_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'ask_credential_on_launch',
'ask_execution_environment_on_launch',
'ask_labels_on_launch',
'ask_forks_on_launch',
'ask_job_slice_count_on_launch',
'ask_timeout_on_launch',
'ask_instance_groups_on_launch',
) )
def get_credential_needed_to_start(self, obj): def get_credential_needed_to_start(self, obj):
@@ -4251,17 +4174,6 @@ class JobLaunchSerializer(BaseSerializer):
if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields: if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields:
cred_dict['vault_id'] = cred.get_input('vault_id', default=None) cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
defaults_dict.setdefault(field_name, []).append(cred_dict) defaults_dict.setdefault(field_name, []).append(cred_dict)
elif field_name == 'execution_environment':
if obj.execution_environment_id:
defaults_dict[field_name] = {'id': obj.execution_environment.id, 'name': obj.execution_environment.name}
else:
defaults_dict[field_name] = {}
elif field_name == 'labels':
for label in obj.labels.all():
label_dict = {'id': label.id, 'name': label.name}
defaults_dict.setdefault(field_name, []).append(label_dict)
elif field_name == 'instance_groups':
defaults_dict[field_name] = []
else: else:
defaults_dict[field_name] = getattr(obj, field_name) defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict return defaults_dict
@@ -4281,10 +4193,8 @@ class JobLaunchSerializer(BaseSerializer):
# Basic validation - cannot run a playbook without a playbook # Basic validation - cannot run a playbook without a playbook
if not template.project: if not template.project:
errors['project'] = _("A project is required to run a job.") errors['project'] = _("A project is required to run a job.")
else: elif template.project.status in ('error', 'failed'):
failure_reason = template.project.get_reason_if_failed() errors['playbook'] = _("Missing a revision to run due to failed project update.")
if failure_reason:
errors['playbook'] = failure_reason
# cannot run a playbook without an inventory # cannot run a playbook without an inventory
if template.inventory and template.inventory.pending_deletion is True: if template.inventory and template.inventory.pending_deletion is True:
@@ -4362,10 +4272,6 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True) scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True)
workflow_job_template_data = serializers.SerializerMethodField() workflow_job_template_data = serializers.SerializerMethodField()
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
class Meta: class Meta:
model = WorkflowJobTemplate model = WorkflowJobTemplate
fields = ( fields = (
@@ -4385,22 +4291,8 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
'workflow_job_template_data', 'workflow_job_template_data',
'survey_enabled', 'survey_enabled',
'ask_variables_on_launch', 'ask_variables_on_launch',
'ask_labels_on_launch',
'labels',
'ask_skip_tags_on_launch',
'ask_tags_on_launch',
'skip_tags',
'job_tags',
)
read_only_fields = (
'ask_inventory_on_launch',
'ask_variables_on_launch',
'ask_skip_tags_on_launch',
'ask_labels_on_launch',
'ask_limit_on_launch',
'ask_scm_branch_on_launch',
'ask_tags_on_launch',
) )
read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')
def get_survey_enabled(self, obj): def get_survey_enabled(self, obj):
if obj: if obj:
@@ -4408,15 +4300,10 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
return False return False
def get_defaults(self, obj): def get_defaults(self, obj):
defaults_dict = {} defaults_dict = {}
for field_name in WorkflowJobTemplate.get_ask_mapping().keys(): for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
if field_name == 'inventory': if field_name == 'inventory':
defaults_dict[field_name] = dict(name=getattrd(obj, '%s.name' % field_name, None), id=getattrd(obj, '%s.pk' % field_name, None)) defaults_dict[field_name] = dict(name=getattrd(obj, '%s.name' % field_name, None), id=getattrd(obj, '%s.pk' % field_name, None))
elif field_name == 'labels':
for label in obj.labels.all():
label_dict = {"id": label.id, "name": label.name}
defaults_dict.setdefault(field_name, []).append(label_dict)
else: else:
defaults_dict[field_name] = getattr(obj, field_name) defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict return defaults_dict
@@ -4425,7 +4312,6 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
return dict(name=obj.name, id=obj.id, description=obj.description) return dict(name=obj.name, id=obj.id, description=obj.description)
def validate(self, attrs): def validate(self, attrs):
template = self.instance template = self.instance
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs) accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
@@ -4443,7 +4329,6 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
WFJT_inventory = template.inventory WFJT_inventory = template.inventory
WFJT_limit = template.limit WFJT_limit = template.limit
WFJT_scm_branch = template.scm_branch WFJT_scm_branch = template.scm_branch
super(WorkflowJobLaunchSerializer, self).validate(attrs) super(WorkflowJobLaunchSerializer, self).validate(attrs)
template.extra_vars = WFJT_extra_vars template.extra_vars = WFJT_extra_vars
template.inventory = WFJT_inventory template.inventory = WFJT_inventory
@@ -4835,8 +4720,6 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
if isinstance(obj.unified_job_template, SystemJobTemplate): if isinstance(obj.unified_job_template, SystemJobTemplate):
summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type
# We are not showing instance groups on summary fields because JTs don't either
if 'inventory' in summary_fields: if 'inventory' in summary_fields:
return summary_fields return summary_fields
@@ -4871,7 +4754,7 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
class InstanceLinkSerializer(BaseSerializer): class InstanceLinkSerializer(BaseSerializer):
class Meta: class Meta:
model = InstanceLink model = InstanceLink
fields = ('source', 'target', 'link_state') fields = ('source', 'target')
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True) source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True) target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
@@ -4880,93 +4763,63 @@ class InstanceLinkSerializer(BaseSerializer):
class InstanceNodeSerializer(BaseSerializer): class InstanceNodeSerializer(BaseSerializer):
class Meta: class Meta:
model = Instance model = Instance
fields = ('id', 'hostname', 'node_type', 'node_state', 'enabled') fields = ('id', 'hostname', 'node_type', 'node_state')
node_state = serializers.SerializerMethodField()
def get_node_state(self, obj):
if not obj.enabled:
return "disabled"
return "error" if obj.errors else "healthy"
class InstanceSerializer(BaseSerializer): class InstanceSerializer(BaseSerializer):
show_capabilities = ['edit']
consumed_capacity = serializers.SerializerMethodField() consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField() percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True) jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that ' 'are targeted for this instance'), read_only=True)
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True) jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
health_check_pending = serializers.SerializerMethodField()
class Meta: class Meta:
model = Instance model = Instance
read_only_fields = ('ip_address', 'uuid', 'version') read_only_fields = ('uuid', 'hostname', 'version', 'node_type')
fields = ( fields = (
'id', "id",
'hostname', "type",
'type', "url",
'url', "related",
'related', "uuid",
'summary_fields', "hostname",
'uuid', "created",
'created', "modified",
'modified', "last_seen",
'last_seen', "last_health_check",
'health_check_started', "errors",
'health_check_pending',
'last_health_check',
'errors',
'capacity_adjustment', 'capacity_adjustment',
'version', "version",
'capacity', "capacity",
'consumed_capacity', "consumed_capacity",
'percent_capacity_remaining', "percent_capacity_remaining",
'jobs_running', "jobs_running",
'jobs_total', "jobs_total",
'cpu', "cpu",
'memory', "memory",
'cpu_capacity', "cpu_capacity",
'mem_capacity', "mem_capacity",
'enabled', "enabled",
'managed_by_policy', "managed_by_policy",
'node_type', "node_type",
'node_state',
'ip_address',
'listener_port',
) )
extra_kwargs = {
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
'node_state': {'initial': Instance.States.INSTALLED, 'default': Instance.States.INSTALLED},
'hostname': {
'validators': [
MaxLengthValidator(limit_value=250),
validators.UniqueValidator(queryset=Instance.objects.all()),
RegexValidator(
regex=r'^localhost$|^127(?:\.[0-9]+){0,2}\.[0-9]+$|^(?:0*\:)*?:?0*1$',
flags=re.IGNORECASE,
inverse_match=True,
message="hostname cannot be localhost or 127.0.0.1",
),
HostnameRegexValidator(),
],
},
}
def get_related(self, obj): def get_related(self, obj):
res = super(InstanceSerializer, self).get_related(obj) res = super(InstanceSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk}) res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk}) res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
if settings.IS_K8S and obj.node_type in (Instance.Types.EXECUTION,):
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor: if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
if obj.node_type == 'execution': if obj.node_type != 'hop':
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk}) res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
return res return res
def get_summary_fields(self, obj):
summary = super().get_summary_fields(obj)
# use this handle to distinguish between a listView and a detailView
if self.is_detail_view:
summary['links'] = InstanceLinkSerializer(InstanceLink.objects.select_related('target', 'source').filter(source=obj), many=True).data
return summary
def get_consumed_capacity(self, obj): def get_consumed_capacity(self, obj):
return obj.consumed_capacity return obj.consumed_capacity
@@ -4976,58 +4829,10 @@ class InstanceSerializer(BaseSerializer):
else: else:
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100)) return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
def get_health_check_pending(self, obj): def validate(self, attrs):
return obj.health_check_pending if self.instance.node_type == 'hop':
raise serializers.ValidationError(_('Hop node instances may not be changed.'))
def validate(self, data): return attrs
if self.instance:
if self.instance.node_type == Instance.Types.HOP:
raise serializers.ValidationError("Hop node instances may not be changed.")
else:
if not settings.IS_K8S:
raise serializers.ValidationError("Can only create instances on Kubernetes or OpenShift.")
return data
def validate_node_type(self, value):
if not self.instance:
if value not in (Instance.Types.EXECUTION,):
raise serializers.ValidationError("Can only create execution nodes.")
else:
if self.instance.node_type != value:
raise serializers.ValidationError("Cannot change node type.")
return value
def validate_node_state(self, value):
if self.instance:
if value != self.instance.node_state:
if not settings.IS_K8S:
raise serializers.ValidationError("Can only change the state on Kubernetes or OpenShift.")
if value != Instance.States.DEPROVISIONING:
raise serializers.ValidationError("Can only change instances to the 'deprovisioning' state.")
if self.instance.node_type not in (Instance.Types.EXECUTION,):
raise serializers.ValidationError("Can only deprovision execution nodes.")
else:
if value and value != Instance.States.INSTALLED:
raise serializers.ValidationError("Can only create instances in the 'installed' state.")
return value
def validate_hostname(self, value):
"""
- Hostname cannot be "localhost" - but can be something like localhost.domain
- Cannot change the hostname of an-already instantiated & initialized Instance object
"""
if self.instance and self.instance.hostname != value:
raise serializers.ValidationError("Cannot change hostname.")
return value
def validate_listener_port(self, value):
if self.instance and self.instance.listener_port != value:
raise serializers.ValidationError("Cannot change listener port.")
return value
class InstanceHealthCheckSerializer(BaseSerializer): class InstanceHealthCheckSerializer(BaseSerializer):
@@ -5040,10 +4845,12 @@ class InstanceHealthCheckSerializer(BaseSerializer):
class InstanceGroupSerializer(BaseSerializer): class InstanceGroupSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete'] show_capabilities = ['edit', 'delete']
capacity = serializers.SerializerMethodField()
consumed_capacity = serializers.SerializerMethodField() consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField() percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.SerializerMethodField() jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that ' 'are targeted for this instance group'), read_only=True
)
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance group'), read_only=True) jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance group'), read_only=True)
instances = serializers.SerializerMethodField() instances = serializers.SerializerMethodField()
is_container_group = serializers.BooleanField( is_container_group = serializers.BooleanField(
@@ -5069,22 +4876,6 @@ class InstanceGroupSerializer(BaseSerializer):
label=_('Policy Instance Minimum'), label=_('Policy Instance Minimum'),
help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."), help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."),
) )
max_concurrent_jobs = serializers.IntegerField(
default=0,
min_value=0,
required=False,
initial=0,
label=_('Max Concurrent Jobs'),
help_text=_("Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced."),
)
max_forks = serializers.IntegerField(
default=0,
min_value=0,
required=False,
initial=0,
label=_('Max Forks'),
help_text=_("Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced."),
)
policy_instance_list = serializers.ListField( policy_instance_list = serializers.ListField(
child=serializers.CharField(), child=serializers.CharField(),
required=False, required=False,
@@ -5106,8 +4897,6 @@ class InstanceGroupSerializer(BaseSerializer):
"consumed_capacity", "consumed_capacity",
"percent_capacity_remaining", "percent_capacity_remaining",
"jobs_running", "jobs_running",
"max_concurrent_jobs",
"max_forks",
"jobs_total", "jobs_total",
"instances", "instances",
"is_container_group", "is_container_group",
@@ -5189,39 +4978,28 @@ class InstanceGroupSerializer(BaseSerializer):
# Store capacity values (globally computed) in the context # Store capacity values (globally computed) in the context
if 'task_manager_igs' not in self.context: if 'task_manager_igs' not in self.context:
instance_groups_queryset = None instance_groups_queryset = None
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
if self.parent: # Is ListView: if self.parent: # Is ListView:
instance_groups_queryset = self.parent.instance instance_groups_queryset = self.parent.instance
tm_models = TaskManagerModels.init_with_consumed_capacity( instances = TaskManagerInstances(jobs_qs)
instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'], instance_groups = TaskManagerInstanceGroups(instances_by_hostname=instances, instance_groups_queryset=instance_groups_queryset)
instance_groups_queryset=instance_groups_queryset,
)
self.context['task_manager_igs'] = tm_models.instance_groups self.context['task_manager_igs'] = instance_groups
return self.context['task_manager_igs'] return self.context['task_manager_igs']
def get_consumed_capacity(self, obj): def get_consumed_capacity(self, obj):
ig_mgr = self.get_ig_mgr() ig_mgr = self.get_ig_mgr()
return ig_mgr.get_consumed_capacity(obj.name) return ig_mgr.get_consumed_capacity(obj.name)
def get_capacity(self, obj):
ig_mgr = self.get_ig_mgr()
return ig_mgr.get_capacity(obj.name)
def get_percent_capacity_remaining(self, obj): def get_percent_capacity_remaining(self, obj):
capacity = self.get_capacity(obj) if not obj.capacity:
if not capacity:
return 0.0 return 0.0
consumed_capacity = self.get_consumed_capacity(obj) ig_mgr = self.get_ig_mgr()
return float("{0:.2f}".format(((float(capacity) - float(consumed_capacity)) / (float(capacity))) * 100)) return float("{0:.2f}".format((float(ig_mgr.get_remaining_capacity(obj.name)) / (float(obj.capacity))) * 100))
def get_instances(self, obj): def get_instances(self, obj):
ig_mgr = self.get_ig_mgr() return obj.instances.count()
return len(ig_mgr.get_instances(obj.name))
def get_jobs_running(self, obj):
ig_mgr = self.get_ig_mgr()
return ig_mgr.get_jobs_running(obj.name)
class ActivityStreamSerializer(BaseSerializer): class ActivityStreamSerializer(BaseSerializer):
@@ -5230,7 +5008,8 @@ class ActivityStreamSerializer(BaseSerializer):
object_association = serializers.SerializerMethodField(help_text=_("When present, shows the field name of the role or relationship that changed.")) object_association = serializers.SerializerMethodField(help_text=_("When present, shows the field name of the role or relationship that changed."))
object_type = serializers.SerializerMethodField(help_text=_("When present, shows the model on which the role or relationship was defined.")) object_type = serializers.SerializerMethodField(help_text=_("When present, shows the model on which the role or relationship was defined."))
def _local_summarizable_fk_fields(self, obj): @cached_property
def _local_summarizable_fk_fields(self):
summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS) summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)
# Special requests # Special requests
summary_dict['group'] = summary_dict['group'] + ('inventory_id',) summary_dict['group'] = summary_dict['group'] + ('inventory_id',)
@@ -5250,13 +5029,7 @@ class ActivityStreamSerializer(BaseSerializer):
('workflow_approval', ('id', 'name', 'unified_job_id')), ('workflow_approval', ('id', 'name', 'unified_job_id')),
('instance', ('id', 'hostname')), ('instance', ('id', 'hostname')),
] ]
# Optimization - do not attempt to summarize all fields, pair down to only relations that exist return field_list
if not obj:
return field_list
existing_association_types = [obj.object1, obj.object2]
if 'user' in existing_association_types:
existing_association_types.append('role')
return [entry for entry in field_list if entry[0] in existing_association_types]
class Meta: class Meta:
model = ActivityStream model = ActivityStream
@@ -5340,7 +5113,7 @@ class ActivityStreamSerializer(BaseSerializer):
data = {} data = {}
if obj.actor is not None: if obj.actor is not None:
data['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk}) data['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk})
for fk, __ in self._local_summarizable_fk_fields(obj): for fk, __ in self._local_summarizable_fk_fields:
if not hasattr(obj, fk): if not hasattr(obj, fk):
continue continue
m2m_list = self._get_related_objects(obj, fk) m2m_list = self._get_related_objects(obj, fk)
@@ -5397,7 +5170,7 @@ class ActivityStreamSerializer(BaseSerializer):
def get_summary_fields(self, obj): def get_summary_fields(self, obj):
summary_fields = OrderedDict() summary_fields = OrderedDict()
for fk, related_fields in self._local_summarizable_fk_fields(obj): for fk, related_fields in self._local_summarizable_fk_fields:
try: try:
if not hasattr(obj, fk): if not hasattr(obj, fk):
continue continue

View File

@@ -1,5 +1,5 @@
Launch a Job Template: Launch a Job Template:
{% ifmeth GET %}
Make a GET request to this resource to determine if the job_template can be Make a GET request to this resource to determine if the job_template can be
launched and whether any passwords are required to launch the job_template. launched and whether any passwords are required to launch the job_template.
The response will include the following fields: The response will include the following fields:
@@ -29,8 +29,8 @@ The response will include the following fields:
* `inventory_needed_to_start`: Flag indicating the presence of an inventory * `inventory_needed_to_start`: Flag indicating the presence of an inventory
associated with the job template. If not then one should be supplied when associated with the job template. If not then one should be supplied when
launching the job (boolean, read-only) launching the job (boolean, read-only)
{% endifmeth %}
{% ifmeth POST %}Make a POST request to this resource to launch the job_template. If any Make a POST request to this resource to launch the job_template. If any
passwords, inventory, or extra variables (extra_vars) are required, they must passwords, inventory, or extra variables (extra_vars) are required, they must
be passed via POST data, with extra_vars given as a YAML or JSON string and be passed via POST data, with extra_vars given as a YAML or JSON string and
escaped parentheses. If the `inventory_needed_to_start` is `True` then the escaped parentheses. If the `inventory_needed_to_start` is `True` then the
@@ -41,4 +41,3 @@ are not provided, a 400 status code will be returned. If the job cannot be
launched, a 405 status code will be returned. If the provided credential or launched, a 405 status code will be returned. If the provided credential or
inventory are not allowed to be used by the user, then a 403 status code will inventory are not allowed to be used by the user, then a 403 status code will
be returned. be returned.
{% endifmeth %}

View File

@@ -1,23 +0,0 @@
receptor_user: awx
receptor_group: awx
receptor_verify: true
receptor_tls: true
receptor_work_commands:
ansible-runner:
command: ansible-runner
params: worker
allowruntimeparams: true
verifysignature: true
custom_worksign_public_keyfile: receptor/work-public-key.pem
custom_tls_certfile: receptor/tls/receptor.crt
custom_tls_keyfile: receptor/tls/receptor.key
custom_ca_certfile: receptor/tls/ca/receptor-ca.crt
receptor_protocol: 'tcp'
receptor_listener: true
receptor_port: {{ instance.listener_port }}
receptor_dependencies:
- python39-pip
{% verbatim %}
podman_user: "{{ receptor_user }}"
podman_group: "{{ receptor_group }}"
{% endverbatim %}

View File

@@ -1,20 +0,0 @@
{% verbatim %}
---
- hosts: all
become: yes
tasks:
- name: Create the receptor user
user:
name: "{{ receptor_user }}"
shell: /bin/bash
- name: Enable Copr repo for Receptor
command: dnf copr enable ansible-awx/receptor -y
- import_role:
name: ansible.receptor.podman
- import_role:
name: ansible.receptor.setup
- name: Install ansible-runner
pip:
name: ansible-runner
executable: pip3.9
{% endverbatim %}

View File

@@ -1,7 +0,0 @@
---
all:
hosts:
remote-execution:
ansible_host: {{ instance.hostname }}
ansible_user: <username> # user provided
ansible_ssh_private_key_file: ~/.ssh/id_rsa

View File

@@ -1,4 +0,0 @@
---
collections:
- name: ansible.receptor
version: 1.1.0

View File

@@ -1,17 +0,0 @@
from django.urls import re_path
from awx.api.views.debug import (
DebugRootView,
TaskManagerDebugView,
DependencyManagerDebugView,
WorkflowManagerDebugView,
)
urls = [
re_path(r'^$', DebugRootView.as_view(), name='debug'),
re_path(r'^task_manager/$', TaskManagerDebugView.as_view(), name='task_manager'),
re_path(r'^dependency_manager/$', DependencyManagerDebugView.as_view(), name='dependency_manager'),
re_path(r'^workflow_manager/$', WorkflowManagerDebugView.as_view(), name='workflow_manager'),
]
__all__ = ['urls']

View File

@@ -3,15 +3,7 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views import ( from awx.api.views import InstanceList, InstanceDetail, InstanceUnifiedJobsList, InstanceInstanceGroupsList, InstanceHealthCheck
InstanceList,
InstanceDetail,
InstanceUnifiedJobsList,
InstanceInstanceGroupsList,
InstanceHealthCheck,
InstancePeersList,
)
from awx.api.views.instance_install_bundle import InstanceInstallBundle
urls = [ urls = [
@@ -20,8 +12,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/jobs/$', InstanceUnifiedJobsList.as_view(), name='instance_unified_jobs_list'), re_path(r'^(?P<pk>[0-9]+)/jobs/$', InstanceUnifiedJobsList.as_view(), name='instance_unified_jobs_list'),
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'), re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'),
re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'), re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'),
re_path(r'^(?P<pk>[0-9]+)/peers/$', InstancePeersList.as_view(), name='instance_peers_list'),
re_path(r'^(?P<pk>[0-9]+)/install_bundle/$', InstanceInstallBundle.as_view(), name='instance_install_bundle'),
] ]
__all__ = ['urls'] __all__ = ['urls']

View File

@@ -3,28 +3,26 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views.inventory import ( from awx.api.views import (
InventoryList, InventoryList,
InventoryDetail, InventoryDetail,
InventoryHostsList,
InventoryGroupsList,
InventoryRootGroupsList,
InventoryVariableData,
InventoryScriptView,
InventoryTreeView,
InventoryInventorySourcesList,
InventoryInventorySourcesUpdate,
InventoryActivityStreamList, InventoryActivityStreamList,
InventoryJobTemplateList, InventoryJobTemplateList,
InventoryAdHocCommandsList,
InventoryAccessList, InventoryAccessList,
InventoryObjectRolesList, InventoryObjectRolesList,
InventoryInstanceGroupsList, InventoryInstanceGroupsList,
InventoryLabelList, InventoryLabelList,
InventoryCopy, InventoryCopy,
) )
from awx.api.views import (
InventoryHostsList,
InventoryGroupsList,
InventoryInventorySourcesList,
InventoryInventorySourcesUpdate,
InventoryAdHocCommandsList,
InventoryRootGroupsList,
InventoryScriptView,
InventoryTreeView,
InventoryVariableData,
)
urls = [ urls = [

View File

@@ -3,9 +3,6 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views.inventory import (
InventoryUpdateEventsList,
)
from awx.api.views import ( from awx.api.views import (
InventoryUpdateList, InventoryUpdateList,
InventoryUpdateDetail, InventoryUpdateDetail,
@@ -13,6 +10,7 @@ from awx.api.views import (
InventoryUpdateStdout, InventoryUpdateStdout,
InventoryUpdateNotificationsList, InventoryUpdateNotificationsList,
InventoryUpdateCredentialsList, InventoryUpdateCredentialsList,
InventoryUpdateEventsList,
) )

View File

@@ -3,7 +3,7 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views.labels import LabelList, LabelDetail from awx.api.views import LabelList, LabelDetail
urls = [re_path(r'^$', LabelList.as_view(), name='label_list'), re_path(r'^(?P<pk>[0-9]+)/$', LabelDetail.as_view(), name='label_detail')] urls = [re_path(r'^$', LabelList.as_view(), name='label_list'), re_path(r'^(?P<pk>[0-9]+)/$', LabelDetail.as_view(), name='label_detail')]

View File

@@ -10,7 +10,7 @@ from oauthlib import oauth2
from oauth2_provider import views from oauth2_provider import views
from awx.main.models import RefreshToken from awx.main.models import RefreshToken
from awx.api.views.root import ApiOAuthAuthorizationRootView from awx.api.views import ApiOAuthAuthorizationRootView
class TokenView(views.TokenView): class TokenView(views.TokenView):

View File

@@ -3,7 +3,7 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views.organization import ( from awx.api.views import (
OrganizationList, OrganizationList,
OrganizationDetail, OrganizationDetail,
OrganizationUsersList, OrganizationUsersList,
@@ -14,6 +14,7 @@ from awx.api.views.organization import (
OrganizationJobTemplatesList, OrganizationJobTemplatesList,
OrganizationWorkflowJobTemplatesList, OrganizationWorkflowJobTemplatesList,
OrganizationTeamsList, OrganizationTeamsList,
OrganizationCredentialList,
OrganizationActivityStreamList, OrganizationActivityStreamList,
OrganizationNotificationTemplatesList, OrganizationNotificationTemplatesList,
OrganizationNotificationTemplatesErrorList, OrganizationNotificationTemplatesErrorList,
@@ -24,8 +25,8 @@ from awx.api.views.organization import (
OrganizationGalaxyCredentialsList, OrganizationGalaxyCredentialsList,
OrganizationObjectRolesList, OrganizationObjectRolesList,
OrganizationAccessList, OrganizationAccessList,
OrganizationApplicationList,
) )
from awx.api.views import OrganizationCredentialList, OrganizationApplicationList
urls = [ urls = [

View File

@@ -3,7 +3,7 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList, ScheduleLabelsList, ScheduleInstanceGroupList from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList
urls = [ urls = [
@@ -11,8 +11,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule_detail'), re_path(r'^(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule_detail'),
re_path(r'^(?P<pk>[0-9]+)/jobs/$', ScheduleUnifiedJobsList.as_view(), name='schedule_unified_jobs_list'), re_path(r'^(?P<pk>[0-9]+)/jobs/$', ScheduleUnifiedJobsList.as_view(), name='schedule_unified_jobs_list'),
re_path(r'^(?P<pk>[0-9]+)/credentials/$', ScheduleCredentialsList.as_view(), name='schedule_credentials_list'), re_path(r'^(?P<pk>[0-9]+)/credentials/$', ScheduleCredentialsList.as_view(), name='schedule_credentials_list'),
re_path(r'^(?P<pk>[0-9]+)/labels/$', ScheduleLabelsList.as_view(), name='schedule_labels_list'),
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', ScheduleInstanceGroupList.as_view(), name='schedule_instance_groups_list'),
] ]
__all__ = ['urls'] __all__ = ['urls']

View File

@@ -2,19 +2,17 @@
# All Rights Reserved. # All Rights Reserved.
from __future__ import absolute_import, unicode_literals from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.urls import include, re_path from django.urls import include, re_path
from awx import MODE
from awx.api.generics import LoggedLoginView, LoggedLogoutView from awx.api.generics import LoggedLoginView, LoggedLogoutView
from awx.api.views.root import ( from awx.api.views import (
ApiRootView, ApiRootView,
ApiV2RootView, ApiV2RootView,
ApiV2PingView, ApiV2PingView,
ApiV2ConfigView, ApiV2ConfigView,
ApiV2SubscriptionView, ApiV2SubscriptionView,
ApiV2AttachView, ApiV2AttachView,
)
from awx.api.views import (
AuthView, AuthView,
UserMeList, UserMeList,
DashboardView, DashboardView,
@@ -30,8 +28,8 @@ from awx.api.views import (
OAuth2TokenList, OAuth2TokenList,
ApplicationOAuth2TokenList, ApplicationOAuth2TokenList,
OAuth2ApplicationDetail, OAuth2ApplicationDetail,
MeshVisualizer,
) )
from awx.api.views.mesh_visualizer import MeshVisualizer
from awx.api.views.metrics import MetricsView from awx.api.views.metrics import MetricsView
@@ -147,12 +145,7 @@ urlpatterns = [
re_path(r'^logout/$', LoggedLogoutView.as_view(next_page='/api/', redirect_field_name='next'), name='logout'), re_path(r'^logout/$', LoggedLogoutView.as_view(next_page='/api/', redirect_field_name='next'), name='logout'),
re_path(r'^o/', include(oauth2_root_urls)), re_path(r'^o/', include(oauth2_root_urls)),
] ]
if MODE == 'development': if settings.SETTINGS_MODULE == 'awx.settings.development':
# Only include these if we are in the development environment
from awx.api.swagger import SwaggerSchemaView from awx.api.swagger import SwaggerSchemaView
urlpatterns += [re_path(r'^swagger/$', SwaggerSchemaView.as_view(), name='swagger_view')] urlpatterns += [re_path(r'^swagger/$', SwaggerSchemaView.as_view(), name='swagger_view')]
from awx.api.urls.debug import urls as debug_urls
urlpatterns += [re_path(r'^debug/', include(debug_urls))]

View File

@@ -1,6 +1,6 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver from awx.api.views import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver
urlpatterns = [ urlpatterns = [

View File

@@ -10,8 +10,6 @@ from awx.api.views import (
WorkflowJobNodeFailureNodesList, WorkflowJobNodeFailureNodesList,
WorkflowJobNodeAlwaysNodesList, WorkflowJobNodeAlwaysNodesList,
WorkflowJobNodeCredentialsList, WorkflowJobNodeCredentialsList,
WorkflowJobNodeLabelsList,
WorkflowJobNodeInstanceGroupsList,
) )
@@ -22,8 +20,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'), re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'),
re_path(r'^(?P<pk>[0-9]+)/labels/$', WorkflowJobNodeLabelsList.as_view(), name='workflow_job_node_labels_list'),
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', WorkflowJobNodeInstanceGroupsList.as_view(), name='workflow_job_node_instance_groups_list'),
] ]
__all__ = ['urls'] __all__ = ['urls']

View File

@@ -11,8 +11,6 @@ from awx.api.views import (
WorkflowJobTemplateNodeAlwaysNodesList, WorkflowJobTemplateNodeAlwaysNodesList,
WorkflowJobTemplateNodeCredentialsList, WorkflowJobTemplateNodeCredentialsList,
WorkflowJobTemplateNodeCreateApproval, WorkflowJobTemplateNodeCreateApproval,
WorkflowJobTemplateNodeLabelsList,
WorkflowJobTemplateNodeInstanceGroupsList,
) )
@@ -23,8 +21,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobTemplateNodeFailureNodesList.as_view(), name='workflow_job_template_node_failure_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobTemplateNodeFailureNodesList.as_view(), name='workflow_job_template_node_failure_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobTemplateNodeAlwaysNodesList.as_view(), name='workflow_job_template_node_always_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobTemplateNodeAlwaysNodesList.as_view(), name='workflow_job_template_node_always_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobTemplateNodeCredentialsList.as_view(), name='workflow_job_template_node_credentials_list'), re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobTemplateNodeCredentialsList.as_view(), name='workflow_job_template_node_credentials_list'),
re_path(r'^(?P<pk>[0-9]+)/labels/$', WorkflowJobTemplateNodeLabelsList.as_view(), name='workflow_job_template_node_labels_list'),
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', WorkflowJobTemplateNodeInstanceGroupsList.as_view(), name='workflow_job_template_node_instance_groups_list'),
re_path(r'^(?P<pk>[0-9]+)/create_approval_template/$', WorkflowJobTemplateNodeCreateApproval.as_view(), name='workflow_job_template_node_create_approval'), re_path(r'^(?P<pk>[0-9]+)/create_approval_template/$', WorkflowJobTemplateNodeCreateApproval.as_view(), name='workflow_job_template_node_create_approval'),
] ]

View File

@@ -1,55 +0,0 @@
import re
from django.core.validators import RegexValidator, validate_ipv46_address
from django.core.exceptions import ValidationError
class HostnameRegexValidator(RegexValidator):
"""
Fully validates a domain name that is compliant with norms in Linux/RHEL
- Cannot start with a hyphen
- Cannot begin with, or end with a "."
- Cannot contain any whitespaces
- Entire hostname is max 255 chars (including dots)
- Each domain/label is between 1 and 63 characters, except top level domain, which must be at least 2 characters
- Supports ipv4, ipv6, simple hostnames and FQDNs
- Follows RFC 9210 (modern RFC 1123, 1178) requirements
Accepts an IP Address or Hostname as the argument
"""
regex = '^[a-z0-9][-a-z0-9]*$|^([a-z0-9][-a-z0-9]{0,62}[.])*[a-z0-9][-a-z0-9]{1,62}$'
flags = re.IGNORECASE
def __call__(self, value):
regex_matches, err = self.__validate(value)
invalid_input = regex_matches if self.inverse_match else not regex_matches
if invalid_input:
if err is None:
err = ValidationError(self.message, code=self.code, params={"value": value})
raise err
def __str__(self):
return f"regex={self.regex}, message={self.message}, code={self.code}, inverse_match={self.inverse_match}, flags={self.flags}"
def __validate(self, value):
if ' ' in value:
return False, ValidationError("whitespaces in hostnames are illegal")
"""
If we have an IP address, try and validate it.
"""
try:
validate_ipv46_address(value)
return True, None
except ValidationError:
pass
"""
By this point in the code, we probably have a simple hostname, FQDN or a strange hostname like "192.localhost.domain.101"
"""
if not self.regex.match(value):
return False, ValidationError(f"illegal characters detected in hostname={value}. Please verify.")
return True, None

View File

@@ -5,7 +5,6 @@
import dateutil import dateutil
import functools import functools
import html import html
import itertools
import logging import logging
import re import re
import requests import requests
@@ -21,10 +20,8 @@ from urllib3.exceptions import ConnectTimeoutError
# Django # Django
from django.conf import settings from django.conf import settings
from django.core.exceptions import FieldError, ObjectDoesNotExist from django.core.exceptions import FieldError, ObjectDoesNotExist
from django.db.models import Q, Sum, Count from django.db.models import Q, Sum
from django.db import IntegrityError, ProgrammingError, transaction, connection from django.db import IntegrityError, ProgrammingError, transaction, connection
from django.db.models.fields.related import ManyToManyField, ForeignKey
from django.db.models.functions import Trunc
from django.shortcuts import get_object_or_404 from django.shortcuts import get_object_or_404
from django.utils.safestring import mark_safe from django.utils.safestring import mark_safe
from django.utils.timezone import now from django.utils.timezone import now
@@ -49,6 +46,9 @@ from rest_framework import status
from rest_framework_yaml.parsers import YAMLParser from rest_framework_yaml.parsers import YAMLParser
from rest_framework_yaml.renderers import YAMLRenderer from rest_framework_yaml.renderers import YAMLRenderer
# QSStats
import qsstats
# ANSIConv # ANSIConv
import ansiconv import ansiconv
@@ -68,7 +68,7 @@ from awx.api.generics import (
APIView, APIView,
BaseUsersList, BaseUsersList,
CopyAPIView, CopyAPIView,
GenericCancelView, DeleteLastUnattachLabelMixin,
GenericAPIView, GenericAPIView,
ListAPIView, ListAPIView,
ListCreateAPIView, ListCreateAPIView,
@@ -85,7 +85,6 @@ from awx.api.generics import (
SubListCreateAttachDetachAPIView, SubListCreateAttachDetachAPIView,
SubListDestroyAPIView, SubListDestroyAPIView,
) )
from awx.api.views.labels import LabelSubListCreateAttachDetachView
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main import models from awx.main import models
from awx.main.utils import ( from awx.main.utils import (
@@ -94,7 +93,7 @@ from awx.main.utils import (
get_object_or_400, get_object_or_400,
getattrd, getattrd,
get_pk_from_dict, get_pk_from_dict,
ScheduleWorkflowManager, schedule_task_manager,
ignore_inventory_computed_fields, ignore_inventory_computed_fields,
) )
from awx.main.utils.encryption import encrypt_value from awx.main.utils.encryption import encrypt_value
@@ -122,9 +121,59 @@ from awx.api.views.mixin import (
UnifiedJobDeletionMixin, UnifiedJobDeletionMixin,
NoTruncateMixin, NoTruncateMixin,
) )
from awx.api.views.organization import ( # noqa
OrganizationList,
OrganizationDetail,
OrganizationInventoriesList,
OrganizationUsersList,
OrganizationAdminsList,
OrganizationExecutionEnvironmentsList,
OrganizationProjectsList,
OrganizationJobTemplatesList,
OrganizationWorkflowJobTemplatesList,
OrganizationTeamsList,
OrganizationActivityStreamList,
OrganizationNotificationTemplatesList,
OrganizationNotificationTemplatesAnyList,
OrganizationNotificationTemplatesErrorList,
OrganizationNotificationTemplatesStartedList,
OrganizationNotificationTemplatesSuccessList,
OrganizationNotificationTemplatesApprovalList,
OrganizationInstanceGroupsList,
OrganizationGalaxyCredentialsList,
OrganizationAccessList,
OrganizationObjectRolesList,
)
from awx.api.views.inventory import ( # noqa
InventoryList,
InventoryDetail,
InventoryUpdateEventsList,
InventoryList,
InventoryDetail,
InventoryActivityStreamList,
InventoryInstanceGroupsList,
InventoryAccessList,
InventoryObjectRolesList,
InventoryJobTemplateList,
InventoryLabelList,
InventoryCopy,
)
from awx.api.views.mesh_visualizer import MeshVisualizer # noqa
from awx.api.views.root import ( # noqa
ApiRootView,
ApiOAuthAuthorizationRootView,
ApiVersionRootView,
ApiV2RootView,
ApiV2PingView,
ApiV2ConfigView,
ApiV2SubscriptionView,
ApiV2AttachView,
)
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver # noqa
from awx.api.pagination import UnifiedJobEventPagination from awx.api.pagination import UnifiedJobEventPagination
from awx.main.utils import set_environ from awx.main.utils import set_environ
logger = logging.getLogger('awx.api.views') logger = logging.getLogger('awx.api.views')
@@ -282,54 +331,34 @@ class DashboardJobsGraphView(APIView):
success_query = success_query.filter(instance_of=models.ProjectUpdate) success_query = success_query.filter(instance_of=models.ProjectUpdate)
failed_query = failed_query.filter(instance_of=models.ProjectUpdate) failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
end = now() success_qss = qsstats.QuerySetStats(success_query, 'finished')
interval = 'day' failed_qss = qsstats.QuerySetStats(failed_query, 'finished')
start_date = now()
if period == 'month': if period == 'month':
start = end - dateutil.relativedelta.relativedelta(months=1) end_date = start_date - dateutil.relativedelta.relativedelta(months=1)
interval = 'days'
elif period == 'two_weeks': elif period == 'two_weeks':
start = end - dateutil.relativedelta.relativedelta(weeks=2) end_date = start_date - dateutil.relativedelta.relativedelta(weeks=2)
interval = 'days'
elif period == 'week': elif period == 'week':
start = end - dateutil.relativedelta.relativedelta(weeks=1) end_date = start_date - dateutil.relativedelta.relativedelta(weeks=1)
interval = 'days'
elif period == 'day': elif period == 'day':
start = end - dateutil.relativedelta.relativedelta(days=1) end_date = start_date - dateutil.relativedelta.relativedelta(days=1)
interval = 'hour' interval = 'hours'
else: else:
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST) return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
dashboard_data = {"jobs": {"successful": [], "failed": []}} dashboard_data = {"jobs": {"successful": [], "failed": []}}
for element in success_qss.time_series(end_date, start_date, interval=interval):
succ_list = dashboard_data['jobs']['successful'] dashboard_data['jobs']['successful'].append([time.mktime(element[0].timetuple()), element[1]])
fail_list = dashboard_data['jobs']['failed'] for element in failed_qss.time_series(end_date, start_date, interval=interval):
dashboard_data['jobs']['failed'].append([time.mktime(element[0].timetuple()), element[1]])
qs_s = (
success_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_s = {item['d']: item['agg'] for item in qs_s}
qs_f = (
failed_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_f = {item['d']: item['agg'] for item in qs_f}
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
for d in itertools.count():
date = start_date + dateutil.relativedelta.relativedelta(days=d)
if date > end:
break
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
return Response(dashboard_data) return Response(dashboard_data)
class InstanceList(ListCreateAPIView): class InstanceList(ListAPIView):
name = _("Instances") name = _("Instances")
model = models.Instance model = models.Instance
@@ -344,13 +373,6 @@ class InstanceDetail(RetrieveUpdateAPIView):
model = models.Instance model = models.Instance
serializer_class = serializers.InstanceSerializer serializer_class = serializers.InstanceSerializer
def update_raw_data(self, data):
# these fields are only valid on creation of an instance, so they unwanted on detail view
data.pop('listener_port', None)
data.pop('node_type', None)
data.pop('hostname', None)
return super(InstanceDetail, self).update_raw_data(data)
def update(self, request, *args, **kwargs): def update(self, request, *args, **kwargs):
r = super(InstanceDetail, self).update(request, *args, **kwargs) r = super(InstanceDetail, self).update(request, *args, **kwargs)
if status.is_success(r.status_code): if status.is_success(r.status_code):
@@ -375,17 +397,6 @@ class InstanceUnifiedJobsList(SubListAPIView):
return qs return qs
class InstancePeersList(SubListAPIView):
name = _("Instance Peers")
parent_model = models.Instance
model = models.Instance
serializer_class = serializers.InstanceSerializer
parent_access = 'read'
search_fields = {'hostname'}
relationship = 'peers'
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView): class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
name = _("Instance's Instance Groups") name = _("Instance's Instance Groups")
@@ -418,8 +429,8 @@ class InstanceHealthCheck(GenericAPIView):
permission_classes = (IsSystemAdminOrAuditor,) permission_classes = (IsSystemAdminOrAuditor,)
def get_queryset(self): def get_queryset(self):
return super().get_queryset().filter(node_type='execution')
# FIXME: For now, we don't have a good way of checking the health of a hop node. # FIXME: For now, we don't have a good way of checking the health of a hop node.
return super().get_queryset().exclude(node_type='hop')
def get(self, request, *args, **kwargs): def get(self, request, *args, **kwargs):
obj = self.get_object() obj = self.get_object()
@@ -428,22 +439,40 @@ class InstanceHealthCheck(GenericAPIView):
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
obj = self.get_object() obj = self.get_object()
if obj.health_check_pending:
return Response({'msg': f"Health check was already in progress for {obj.hostname}."}, status=status.HTTP_200_OK)
# Note: hop nodes are already excluded by the get_queryset method if obj.node_type == 'execution':
obj.health_check_started = now()
obj.save(update_fields=['health_check_started'])
if obj.node_type == models.Instance.Types.EXECUTION:
from awx.main.tasks.system import execution_node_health_check from awx.main.tasks.system import execution_node_health_check
execution_node_health_check.apply_async([obj.hostname]) runner_data = execution_node_health_check(obj.hostname)
obj.refresh_from_db()
data = self.get_serializer(data=request.data).to_representation(obj)
# Add in some extra unsaved fields
for extra_field in ('transmit_timing', 'run_timing'):
if extra_field in runner_data:
data[extra_field] = runner_data[extra_field]
else: else:
return Response( from awx.main.tasks.system import cluster_node_health_check
{"error": f"Cannot run a health check on instances of type {obj.node_type}. Health checks can only be run on execution nodes."},
status=status.HTTP_400_BAD_REQUEST, if settings.CLUSTER_HOST_ID == obj.hostname:
) cluster_node_health_check(obj.hostname)
return Response({'msg': f"Health check is running for {obj.hostname}."}, status=status.HTTP_200_OK) else:
cluster_node_health_check.apply_async([obj.hostname], queue=obj.hostname)
start_time = time.time()
prior_check_time = obj.last_health_check
while time.time() - start_time < 50.0:
obj.refresh_from_db(fields=['last_health_check'])
if obj.last_health_check != prior_check_time:
break
if time.time() - start_time < 1.0:
time.sleep(0.1)
else:
time.sleep(1.0)
else:
obj.mark_offline(errors=_('Health check initiated by user determined this instance to be unresponsive'))
obj.refresh_from_db()
data = self.get_serializer(data=request.data).to_representation(obj)
return Response(data, status=status.HTTP_200_OK)
class InstanceGroupList(ListCreateAPIView): class InstanceGroupList(ListCreateAPIView):
@@ -588,19 +617,6 @@ class ScheduleCredentialsList(LaunchConfigCredentialsBase):
parent_model = models.Schedule parent_model = models.Schedule
class ScheduleLabelsList(LabelSubListCreateAttachDetachView):
parent_model = models.Schedule
class ScheduleInstanceGroupList(SubListAttachDetachAPIView):
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
parent_model = models.Schedule
relationship = 'instance_groups'
class ScheduleUnifiedJobsList(SubListAPIView): class ScheduleUnifiedJobsList(SubListAPIView):
model = models.UnifiedJob model = models.UnifiedJob
@@ -1004,11 +1020,20 @@ class SystemJobEventsList(SubListAPIView):
return job.get_event_queryset() return job.get_event_queryset()
class ProjectUpdateCancel(GenericCancelView): class ProjectUpdateCancel(RetrieveAPIView):
model = models.ProjectUpdate model = models.ProjectUpdate
obj_permission_type = 'cancel'
serializer_class = serializers.ProjectUpdateCancelSerializer serializer_class = serializers.ProjectUpdateCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class ProjectUpdateNotificationsList(SubListAPIView): class ProjectUpdateNotificationsList(SubListAPIView):
@@ -2247,8 +2272,6 @@ class InventorySourceUpdateView(RetrieveAPIView):
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
obj = self.get_object() obj = self.get_object()
serializer = self.get_serializer(instance=obj, data=request.data)
serializer.is_valid(raise_exception=True)
if obj.can_update: if obj.can_update:
update = obj.update() update = obj.update()
if not update: if not update:
@@ -2283,11 +2306,20 @@ class InventoryUpdateCredentialsList(SubListAPIView):
relationship = 'credentials' relationship = 'credentials'
class InventoryUpdateCancel(GenericCancelView): class InventoryUpdateCancel(RetrieveAPIView):
model = models.InventoryUpdate model = models.InventoryUpdate
obj_permission_type = 'cancel'
serializer_class = serializers.InventoryUpdateCancelSerializer serializer_class = serializers.InventoryUpdateCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class InventoryUpdateNotificationsList(SubListAPIView): class InventoryUpdateNotificationsList(SubListAPIView):
@@ -2349,13 +2381,10 @@ class JobTemplateLaunch(RetrieveAPIView):
for field, ask_field_name in modified_ask_mapping.items(): for field, ask_field_name in modified_ask_mapping.items():
if not getattr(obj, ask_field_name): if not getattr(obj, ask_field_name):
data.pop(field, None) data.pop(field, None)
elif isinstance(getattr(obj.__class__, field).field, ForeignKey): elif field == 'inventory':
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None) data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
elif isinstance(getattr(obj.__class__, field).field, ManyToManyField): elif field == 'credentials':
if field == 'instance_groups': data[field] = [cred.id for cred in obj.credentials.all()]
data[field] = []
continue
data[field] = [item.id for item in getattr(obj, field).all()]
else: else:
data[field] = getattr(obj, field) data[field] = getattr(obj, field)
return data return data
@@ -2368,8 +2397,9 @@ class JobTemplateLaunch(RetrieveAPIView):
""" """
modern_data = data.copy() modern_data = data.copy()
if 'inventory' not in modern_data and 'inventory_id' in modern_data: id_fd = '{}_id'.format('inventory')
modern_data['inventory'] = modern_data['inventory_id'] if 'inventory' not in modern_data and id_fd in modern_data:
modern_data['inventory'] = modern_data[id_fd]
# credential passwords were historically provided as top-level attributes # credential passwords were historically provided as top-level attributes
if 'credential_passwords' not in modern_data: if 'credential_passwords' not in modern_data:
@@ -2689,9 +2719,28 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created) return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created)
class JobTemplateLabelList(LabelSubListCreateAttachDetachView): class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
parent_model = models.JobTemplate parent_model = models.JobTemplate
relationship = 'labels'
def post(self, request, *args, **kwargs):
# If a label already exists in the database, attach it instead of erroring out
# that it already exists
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
existing = models.Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
if existing.exists():
existing = existing[0]
request.data['id'] = existing.id
del request.data['name']
del request.data['organization']
if models.Label.objects.filter(unifiedjobtemplate_labels=self.kwargs['pk']).count() > 100:
return Response(
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
)
return super(JobTemplateLabelList, self).post(request, *args, **kwargs)
class JobTemplateCallback(GenericAPIView): class JobTemplateCallback(GenericAPIView):
@@ -2917,22 +2966,6 @@ class WorkflowJobNodeCredentialsList(SubListAPIView):
relationship = 'credentials' relationship = 'credentials'
class WorkflowJobNodeLabelsList(SubListAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
parent_model = models.WorkflowJobNode
relationship = 'labels'
class WorkflowJobNodeInstanceGroupsList(SubListAttachDetachAPIView):
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
parent_model = models.WorkflowJobNode
relationship = 'instance_groups'
class WorkflowJobTemplateNodeList(ListCreateAPIView): class WorkflowJobTemplateNodeList(ListCreateAPIView):
model = models.WorkflowJobTemplateNode model = models.WorkflowJobTemplateNode
@@ -2951,19 +2984,6 @@ class WorkflowJobTemplateNodeCredentialsList(LaunchConfigCredentialsBase):
parent_model = models.WorkflowJobTemplateNode parent_model = models.WorkflowJobTemplateNode
class WorkflowJobTemplateNodeLabelsList(LabelSubListCreateAttachDetachView):
parent_model = models.WorkflowJobTemplateNode
class WorkflowJobTemplateNodeInstanceGroupsList(SubListAttachDetachAPIView):
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
parent_model = models.WorkflowJobTemplateNode
relationship = 'instance_groups'
class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView): class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
model = models.WorkflowJobTemplateNode model = models.WorkflowJobTemplateNode
@@ -3062,7 +3082,8 @@ class WorkflowJobNodeChildrenBaseList(SubListAPIView):
search_fields = ('unified_job_template__name', 'unified_job_template__description') search_fields = ('unified_job_template__name', 'unified_job_template__description')
# #
# Limit the set of WorkflowJobNodes to the related nodes of specified by self.relationship # Limit the set of WorkflowJobeNodes to the related nodes of specified by
#'relationship'
# #
def get_queryset(self): def get_queryset(self):
parent = self.get_parent_object() parent = self.get_parent_object()
@@ -3175,17 +3196,13 @@ class WorkflowJobTemplateLaunch(RetrieveAPIView):
data['extra_vars'] = extra_vars data['extra_vars'] = extra_vars
modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping() modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping()
modified_ask_mapping.pop('extra_vars') modified_ask_mapping.pop('extra_vars')
for field_name, ask_field_name in obj.get_ask_mapping().items():
for field, ask_field_name in modified_ask_mapping.items():
if not getattr(obj, ask_field_name): if not getattr(obj, ask_field_name):
data.pop(field, None) data.pop(field_name, None)
elif isinstance(getattr(obj.__class__, field).field, ForeignKey): elif field_name == 'inventory':
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None) data[field_name] = getattrd(obj, "%s.%s" % (field_name, 'id'), None)
elif isinstance(getattr(obj.__class__, field).field, ManyToManyField):
data[field] = [item.id for item in getattr(obj, field).all()]
else: else:
data[field] = getattr(obj, field) data[field_name] = getattr(obj, field_name)
return data return data
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
@@ -3364,15 +3381,20 @@ class WorkflowJobWorkflowNodesList(SubListAPIView):
return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id') return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id')
class WorkflowJobCancel(GenericCancelView): class WorkflowJobCancel(RetrieveAPIView):
model = models.WorkflowJob model = models.WorkflowJob
obj_permission_type = 'cancel'
serializer_class = serializers.WorkflowJobCancelSerializer serializer_class = serializers.WorkflowJobCancelSerializer
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
r = super().post(request, *args, **kwargs) obj = self.get_object()
ScheduleWorkflowManager().schedule() if obj.can_cancel:
return r obj.cancel()
schedule_task_manager()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class WorkflowJobNotificationsList(SubListAPIView): class WorkflowJobNotificationsList(SubListAPIView):
@@ -3528,11 +3550,20 @@ class JobActivityStreamList(SubListAPIView):
search_fields = ('changes',) search_fields = ('changes',)
class JobCancel(GenericCancelView): class JobCancel(RetrieveAPIView):
model = models.Job model = models.Job
obj_permission_type = 'cancel'
serializer_class = serializers.JobCancelSerializer serializer_class = serializers.JobCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class JobRelaunch(RetrieveAPIView): class JobRelaunch(RetrieveAPIView):
@@ -3658,21 +3689,15 @@ class JobCreateSchedule(RetrieveAPIView):
extra_data=config.extra_data, extra_data=config.extra_data,
survey_passwords=config.survey_passwords, survey_passwords=config.survey_passwords,
inventory=config.inventory, inventory=config.inventory,
execution_environment=config.execution_environment,
char_prompts=config.char_prompts, char_prompts=config.char_prompts,
credentials=set(config.credentials.all()), credentials=set(config.credentials.all()),
labels=set(config.labels.all()),
instance_groups=list(config.instance_groups.all()),
) )
if not request.user.can_access(models.Schedule, 'add', schedule_data): if not request.user.can_access(models.Schedule, 'add', schedule_data):
raise PermissionDenied() raise PermissionDenied()
related_fields = ('credentials', 'labels', 'instance_groups') creds_list = schedule_data.pop('credentials')
related = [schedule_data.pop(relationship) for relationship in related_fields]
schedule = models.Schedule.objects.create(**schedule_data) schedule = models.Schedule.objects.create(**schedule_data)
for relationship, items in zip(related_fields, related): schedule.credentials.add(*creds_list)
for item in items:
getattr(schedule, relationship).add(item)
data = serializers.ScheduleSerializer(schedule, context=self.get_serializer_context()).data data = serializers.ScheduleSerializer(schedule, context=self.get_serializer_context()).data
data.serializer.instance = None # hack to avoid permissions.py assuming this is Job model data.serializer.instance = None # hack to avoid permissions.py assuming this is Job model
@@ -3814,7 +3839,7 @@ class JobJobEventsList(BaseJobEventsList):
def get_queryset(self): def get_queryset(self):
job = self.get_parent_object() job = self.get_parent_object()
self.check_parent_access(job) self.check_parent_access(job)
return job.get_event_queryset().prefetch_related('job__job_template', 'host').order_by('start_line') return job.get_event_queryset().select_related('host').order_by('start_line')
class JobJobEventsChildrenSummary(APIView): class JobJobEventsChildrenSummary(APIView):
@@ -4003,11 +4028,20 @@ class AdHocCommandDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
serializer_class = serializers.AdHocCommandDetailSerializer serializer_class = serializers.AdHocCommandDetailSerializer
class AdHocCommandCancel(GenericCancelView): class AdHocCommandCancel(RetrieveAPIView):
model = models.AdHocCommand model = models.AdHocCommand
obj_permission_type = 'cancel'
serializer_class = serializers.AdHocCommandCancelSerializer serializer_class = serializers.AdHocCommandCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class AdHocCommandRelaunch(GenericAPIView): class AdHocCommandRelaunch(GenericAPIView):
@@ -4142,11 +4176,20 @@ class SystemJobDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
serializer_class = serializers.SystemJobSerializer serializer_class = serializers.SystemJobSerializer
class SystemJobCancel(GenericCancelView): class SystemJobCancel(RetrieveAPIView):
model = models.SystemJob model = models.SystemJob
obj_permission_type = 'cancel'
serializer_class = serializers.SystemJobCancelSerializer serializer_class = serializers.SystemJobCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class SystemJobNotificationsList(SubListAPIView): class SystemJobNotificationsList(SubListAPIView):
@@ -4385,6 +4428,18 @@ class NotificationDetail(RetrieveAPIView):
serializer_class = serializers.NotificationSerializer serializer_class = serializers.NotificationSerializer
class LabelList(ListCreateAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
class LabelDetail(RetrieveUpdateAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
class ActivityStreamList(SimpleListAPIView): class ActivityStreamList(SimpleListAPIView):
model = models.ActivityStream model = models.ActivityStream

View File

@@ -1,68 +0,0 @@
from collections import OrderedDict
from django.conf import settings
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from awx.api.generics import APIView
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
class TaskManagerDebugView(APIView):
_ignore_model_permissions = True
exclude_from_schema = True
permission_classes = [AllowAny]
prefix = 'Task'
def get(self, request):
TaskManager().schedule()
if not settings.AWX_DISABLE_TASK_MANAGERS:
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
else:
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
return Response(msg)
class DependencyManagerDebugView(APIView):
_ignore_model_permissions = True
exclude_from_schema = True
permission_classes = [AllowAny]
prefix = 'Dependency'
def get(self, request):
DependencyManager().schedule()
if not settings.AWX_DISABLE_TASK_MANAGERS:
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
else:
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
return Response(msg)
class WorkflowManagerDebugView(APIView):
_ignore_model_permissions = True
exclude_from_schema = True
permission_classes = [AllowAny]
prefix = 'Workflow'
def get(self, request):
WorkflowManager().schedule()
if not settings.AWX_DISABLE_TASK_MANAGERS:
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
else:
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
return Response(msg)
class DebugRootView(APIView):
_ignore_model_permissions = True
exclude_from_schema = True
permission_classes = [AllowAny]
def get(self, request, format=None):
'''List of available debug urls'''
data = OrderedDict()
data['task_manager'] = '/api/debug/task_manager/'
data['dependency_manager'] = '/api/debug/dependency_manager/'
data['workflow_manager'] = '/api/debug/workflow_manager/'
return Response(data)

View File

@@ -1,199 +0,0 @@
# Copyright (c) 2018 Red Hat, Inc.
# All Rights Reserved.
import datetime
import io
import ipaddress
import os
import tarfile
import asn1
from awx.api import serializers
from awx.api.generics import GenericAPIView, Response
from awx.api.permissions import IsSystemAdminOrAuditor
from awx.main import models
from cryptography import x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509 import DNSName, IPAddress, ObjectIdentifier, OtherName
from cryptography.x509.oid import NameOID
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from rest_framework import status
# Red Hat has an OID namespace (RHANANA). Receptor has its own designation under that.
RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
# generate install bundle for the instance
# install bundle directory structure
# ├── install_receptor.yml (playbook)
# ├── inventory.yml
# ├── group_vars
# │ └── all.yml
# ├── receptor
# │ ├── tls
# │ │ ├── ca
# │ │ │ └── receptor-ca.crt
# │ │ ├── receptor.crt
# │ │ └── receptor.key
# │ └── work-public-key.pem
# └── requirements.yml
class InstanceInstallBundle(GenericAPIView):
name = _('Install Bundle')
model = models.Instance
serializer_class = serializers.InstanceSerializer
permission_classes = (IsSystemAdminOrAuditor,)
def get(self, request, *args, **kwargs):
instance_obj = self.get_object()
if instance_obj.node_type not in ('execution',):
return Response(
data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
status=status.HTTP_400_BAD_REQUEST,
)
with io.BytesIO() as f:
with tarfile.open(fileobj=f, mode='w:gz') as tar:
# copy /etc/receptor/tls/ca/receptor-ca.crt to receptor/tls/ca in the tar file
tar.add(
os.path.realpath('/etc/receptor/tls/ca/receptor-ca.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/receptor-ca.crt"
)
# copy /etc/receptor/signing/work-public-key.pem to receptor/work-public-key.pem
tar.add('/etc/receptor/signing/work-public-key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work-public-key.pem")
# generate and write the receptor key to receptor/tls/receptor.key in the tar file
key, cert = generate_receptor_tls(instance_obj)
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
key_tarinfo.size = len(key)
tar.addfile(key_tarinfo, io.BytesIO(key))
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
cert_tarinfo.size = len(cert)
tar.addfile(cert_tarinfo, io.BytesIO(cert))
# generate and write install_receptor.yml to the tar file
playbook = generate_playbook().encode('utf-8')
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
playbook_tarinfo.size = len(playbook)
tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
# generate and write inventory.yml to the tar file
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
inventory_yml_tarinfo.size = len(inventory_yml)
tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
# generate and write group_vars/all.yml to the tar file
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
group_vars_tarinfo.size = len(group_vars)
tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
# generate and write requirements.yml to the tar file
requirements_yml = generate_requirements_yml().encode('utf-8')
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
requirements_yml_tarinfo.size = len(requirements_yml)
tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
# respond with the tarfile
f.seek(0)
response = HttpResponse(f.read(), status=status.HTTP_200_OK)
response['Content-Disposition'] = f"attachment; filename={instance_obj.hostname}_install_bundle.tar.gz"
return response
def generate_playbook():
return render_to_string("instance_install_bundle/install_receptor.yml")
def generate_requirements_yml():
return render_to_string("instance_install_bundle/requirements.yml")
def generate_inventory_yml(instance_obj):
return render_to_string("instance_install_bundle/inventory.yml", context=dict(instance=instance_obj))
def generate_group_vars_all_yml(instance_obj):
return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
def generate_receptor_tls(instance_obj):
# generate private key for the receptor
key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
# encode receptor hostname to asn1
hostname = instance_obj.hostname
encoder = asn1.Encoder()
encoder.start()
encoder.write(hostname.encode(), nr=asn1.Numbers.UTF8String)
hostname_asn1 = encoder.output()
san_params = [
DNSName(hostname),
OtherName(ObjectIdentifier(RECEPTOR_OID), hostname_asn1),
]
try:
san_params.append(IPAddress(ipaddress.IPv4Address(hostname)))
except ipaddress.AddressValueError:
pass
# generate certificate for the receptor
csr = (
x509.CertificateSigningRequestBuilder()
.subject_name(
x509.Name(
[
x509.NameAttribute(NameOID.COMMON_NAME, hostname),
]
)
)
.add_extension(
x509.SubjectAlternativeName(san_params),
critical=False,
)
.sign(key, hashes.SHA256())
)
# sign csr with the receptor ca key from /etc/receptor/ca/receptor-ca.key
with open('/etc/receptor/tls/ca/receptor-ca.key', 'rb') as f:
ca_key = serialization.load_pem_private_key(
f.read(),
password=None,
)
with open('/etc/receptor/tls/ca/receptor-ca.crt', 'rb') as f:
ca_cert = x509.load_pem_x509_certificate(f.read())
cert = (
x509.CertificateBuilder()
.subject_name(csr.subject)
.issuer_name(ca_cert.issuer)
.public_key(csr.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=3650))
.add_extension(
csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).value,
critical=csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).critical,
)
.sign(ca_key, hashes.SHA256())
)
key = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
cert = cert.public_bytes(
encoding=serialization.Encoding.PEM,
)
return key, cert

View File

@@ -18,6 +18,8 @@ from rest_framework import status
# AWX # AWX
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
from awx.main.models.label import Label
from awx.api.generics import ( from awx.api.generics import (
ListCreateAPIView, ListCreateAPIView,
RetrieveUpdateDestroyAPIView, RetrieveUpdateDestroyAPIView,
@@ -25,8 +27,9 @@ from awx.api.generics import (
SubListAttachDetachAPIView, SubListAttachDetachAPIView,
ResourceAccessList, ResourceAccessList,
CopyAPIView, CopyAPIView,
DeleteLastUnattachLabelMixin,
SubListCreateAttachDetachAPIView,
) )
from awx.api.views.labels import LabelSubListCreateAttachDetachView
from awx.api.serializers import ( from awx.api.serializers import (
@@ -36,6 +39,7 @@ from awx.api.serializers import (
InstanceGroupSerializer, InstanceGroupSerializer,
InventoryUpdateEventSerializer, InventoryUpdateEventSerializer,
JobTemplateSerializer, JobTemplateSerializer,
LabelSerializer,
) )
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin from awx.api.views.mixin import RelatedJobsPreventDeleteMixin
@@ -153,9 +157,28 @@ class InventoryJobTemplateList(SubListAPIView):
return qs.filter(inventory=parent) return qs.filter(inventory=parent)
class InventoryLabelList(LabelSubListCreateAttachDetachView): class InventoryLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView, SubListAPIView):
model = Label
serializer_class = LabelSerializer
parent_model = Inventory parent_model = Inventory
relationship = 'labels'
def post(self, request, *args, **kwargs):
# If a label already exists in the database, attach it instead of erroring out
# that it already exists
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
if existing.exists():
existing = existing[0]
request.data['id'] = existing.id
del request.data['name']
del request.data['organization']
if Label.objects.filter(inventory_labels=self.kwargs['pk']).count() > 100:
return Response(
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
)
return super(InventoryLabelList, self).post(request, *args, **kwargs)
class InventoryCopy(CopyAPIView): class InventoryCopy(CopyAPIView):

View File

@@ -1,71 +0,0 @@
# AWX
from awx.api.generics import SubListCreateAttachDetachAPIView, RetrieveUpdateAPIView, ListCreateAPIView
from awx.main.models import Label
from awx.api.serializers import LabelSerializer
# Django
from django.utils.translation import gettext_lazy as _
# Django REST Framework
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
class LabelSubListCreateAttachDetachView(SubListCreateAttachDetachAPIView):
"""
For related labels lists like /api/v2/inventories/N/labels/
We want want the last instance to be deleted from the database
when the last disassociate happens.
Subclasses need to define parent_model
"""
model = Label
serializer_class = LabelSerializer
relationship = 'labels'
def unattach(self, request, *args, **kwargs):
(sub_id, res) = super().unattach_validate(request)
if res:
return res
res = super().unattach_by_id(request, sub_id)
obj = self.model.objects.get(id=sub_id)
if obj.is_detached():
obj.delete()
return res
def post(self, request, *args, **kwargs):
# If a label already exists in the database, attach it instead of erroring out
# that it already exists
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
if existing.exists():
existing = existing[0]
request.data['id'] = existing.id
del request.data['name']
del request.data['organization']
# Give a 400 error if we have attached too many labels to this object
label_filter = self.parent_model._meta.get_field(self.relationship).remote_field.name
if Label.objects.filter(**{label_filter: self.kwargs['pk']}).count() > 100:
return Response(dict(msg=_(f'Maximum number of labels for {self.parent_model._meta.verbose_name_raw} reached.')), status=HTTP_400_BAD_REQUEST)
return super().post(request, *args, **kwargs)
class LabelDetail(RetrieveUpdateAPIView):
model = Label
serializer_class = LabelSerializer
class LabelList(ListCreateAPIView):
name = _("Labels")
model = Label
serializer_class = LabelSerializer

View File

@@ -16,7 +16,7 @@ from rest_framework import status
from awx.main.constants import ACTIVE_STATES from awx.main.constants import ACTIVE_STATES
from awx.main.utils import get_object_or_400 from awx.main.utils import get_object_or_400
from awx.main.models.ha import Instance, InstanceGroup, schedule_policy_task from awx.main.models.ha import Instance, InstanceGroup
from awx.main.models.organization import Team from awx.main.models.organization import Team
from awx.main.models.projects import Project from awx.main.models.projects import Project
from awx.main.models.inventory import Inventory from awx.main.models.inventory import Inventory
@@ -107,11 +107,6 @@ class InstanceGroupMembershipMixin(object):
if inst_name in ig_obj.policy_instance_list: if inst_name in ig_obj.policy_instance_list:
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name)) ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
ig_obj.save(update_fields=['policy_instance_list']) ig_obj.save(update_fields=['policy_instance_list'])
# sometimes removing an instance has a non-obvious consequence
# this is almost always true if policy_instance_percentage or _minimum is non-zero
# after removing a single instance, the other memberships need to be re-balanced
schedule_policy_task()
return response return response

View File

@@ -80,7 +80,7 @@ def _ctit_db_wrapper(trans_safe=False):
yield yield
except DBError as exc: except DBError as exc:
if trans_safe: if trans_safe:
level = logger.warning level = logger.exception
if isinstance(exc, ProgrammingError): if isinstance(exc, ProgrammingError):
if 'relation' in str(exc) and 'does not exist' in str(exc): if 'relation' in str(exc) and 'does not exist' in str(exc):
# this generally means we can't fetch Tower configuration # this generally means we can't fetch Tower configuration
@@ -89,7 +89,7 @@ def _ctit_db_wrapper(trans_safe=False):
# has come up *before* the database has finished migrating, and # has come up *before* the database has finished migrating, and
# especially that the conf.settings table doesn't exist yet # especially that the conf.settings table doesn't exist yet
level = logger.debug level = logger.debug
level(f'Database settings are not available, using defaults. error: {str(exc)}') level('Database settings are not available, using defaults.')
else: else:
logger.exception('Error modifying something related to database settings.') logger.exception('Error modifying something related to database settings.')
finally: finally:

View File

@@ -6237,5 +6237,4 @@ msgstr "%s se está actualizando."
#: awx/ui/urls.py:24 #: awx/ui/urls.py:24
msgid "This page will refresh when complete." msgid "This page will refresh when complete."
msgstr "Esta página se actualizará cuando se complete." msgstr "Esta página se actualizará cuando se complete."

View File

@@ -721,7 +721,7 @@ msgstr "DTSTART valide obligatoire dans rrule. La valeur doit commencer par : DT
#: awx/api/serializers.py:4657 #: awx/api/serializers.py:4657
msgid "" msgid ""
"DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ." "DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ."
msgstr "DTSTART ne peut correspondre à une date-heure naïve. Spécifier ;TZINFO= ou YYYYMMDDTHHMMSSZZ." msgstr "DTSTART ne peut correspondre à une DateHeure naïve. Spécifier ;TZINFO= ou YYYYMMDDTHHMMSSZZ."
#: awx/api/serializers.py:4659 #: awx/api/serializers.py:4659
msgid "Multiple DTSTART is not supported." msgid "Multiple DTSTART is not supported."
@@ -6239,5 +6239,4 @@ msgstr "%s est en cours de mise à niveau."
#: awx/ui/urls.py:24 #: awx/ui/urls.py:24
msgid "This page will refresh when complete." msgid "This page will refresh when complete."
msgstr "Cette page sera rafraîchie une fois terminée." msgstr "Cette page sera rafraîchie une fois terminée."

View File

@@ -1440,7 +1440,7 @@ msgstr "指定した認証情報は無効 (HTTP 401) です。"
#: awx/api/views/root.py:193 awx/api/views/root.py:234 #: awx/api/views/root.py:193 awx/api/views/root.py:234
msgid "Unable to connect to proxy server." msgid "Unable to connect to proxy server."
msgstr "プロキシサーバーに接続できません。" msgstr "プロキシサーバーに接続できません。"
#: awx/api/views/root.py:195 awx/api/views/root.py:236 #: awx/api/views/root.py:195 awx/api/views/root.py:236
msgid "Could not connect to subscription service." msgid "Could not connect to subscription service."
@@ -1976,7 +1976,7 @@ msgstr "リモートホスト名または IP を判別するために検索す
#: awx/main/conf.py:85 #: awx/main/conf.py:85
msgid "Proxy IP Allowed List" msgid "Proxy IP Allowed List"
msgstr "プロキシ IP 許可リスト" msgstr "プロキシ IP 許可リスト"
#: awx/main/conf.py:87 #: awx/main/conf.py:87
msgid "" msgid ""
@@ -2198,7 +2198,7 @@ msgid ""
"Follow symbolic links when scanning for playbooks. Be aware that setting " "Follow symbolic links when scanning for playbooks. Be aware that setting "
"this to True can lead to infinite recursion if a link points to a parent " "this to True can lead to infinite recursion if a link points to a parent "
"directory of itself." "directory of itself."
msgstr "Playbook スキャン時にシンボリックリンクをたどります。リンクが親ディレクトリーを参照している場合は、この設定を True に定すると無限再帰が発生する可能性があります。" msgstr "Playbook スキャンするときは、シンボリックリンクをたどってください。リンクがそれ自体の親ディレクトリーをしている場合は、こを True に定すると無限再帰が発生する可能性があることに注意してください。"
#: awx/main/conf.py:337 #: awx/main/conf.py:337
msgid "Ignore Ansible Galaxy SSL Certificate Verification" msgid "Ignore Ansible Galaxy SSL Certificate Verification"
@@ -2499,7 +2499,7 @@ msgstr "Insights for Ansible Automation Platform の最終収集日。"
msgid "" msgid ""
"Last gathered entries for expensive collectors for Insights for Ansible " "Last gathered entries for expensive collectors for Insights for Ansible "
"Automation Platform." "Automation Platform."
msgstr "Insights for Ansible Automation Platform でコストがかかっているコレクターに関して最後に収集されたエントリー" msgstr "Insights for Ansible Automation Platform の高価なコレクター最後に収集されたエントリー"
#: awx/main/conf.py:686 #: awx/main/conf.py:686
msgid "Insights for Ansible Automation Platform Gather Interval" msgid "Insights for Ansible Automation Platform Gather Interval"
@@ -3692,7 +3692,7 @@ msgstr "タスクの開始"
#: awx/main/models/events.py:189 #: awx/main/models/events.py:189
msgid "Variables Prompted" msgid "Variables Prompted"
msgstr "提示される変数" msgstr "変数のプロモート"
#: awx/main/models/events.py:190 #: awx/main/models/events.py:190
msgid "Gathering Facts" msgid "Gathering Facts"
@@ -3741,15 +3741,15 @@ msgstr "エラー"
#: awx/main/models/execution_environments.py:17 #: awx/main/models/execution_environments.py:17
msgid "Always pull container before running." msgid "Always pull container before running."
msgstr "実行前に必ずコンテナーをプルする" msgstr "実行前に必ずコンテナーをプルしてください。"
#: awx/main/models/execution_environments.py:18 #: awx/main/models/execution_environments.py:18
msgid "Only pull the image if not present before running." msgid "Only pull the image if not present before running."
msgstr "イメージが存在しない場合のみ実行前にプルする" msgstr "実行する前に、存在しない場合のみイメージをプルしてください。"
#: awx/main/models/execution_environments.py:19 #: awx/main/models/execution_environments.py:19
msgid "Never pull container before running." msgid "Never pull container before running."
msgstr "実行前にコンテナーをプルしない" msgstr "実行前にコンテナーをプルしないでください。"
#: awx/main/models/execution_environments.py:29 #: awx/main/models/execution_environments.py:29
msgid "" msgid ""
@@ -5228,7 +5228,7 @@ msgid ""
"SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be " "SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be "
"specified by separating with spaces or commas. LDAP authentication is " "specified by separating with spaces or commas. LDAP authentication is "
"disabled if this parameter is empty." "disabled if this parameter is empty."
msgstr "\"ldap://ldap.example.com:389\" (非 SSL) または \"ldaps://ldap.example.com:636\" (SSL) などの LDAP サーバーに接続する URI です。複数の LDAP サーバーをスペースまたはンマで区切って指定できます。LDAP 認証は、このパラメーターが空の場合は無効になります。" msgstr "\"ldap://ldap.example.com:389\" (非 SSL) または \"ldaps://ldap.example.com:636\" (SSL) などの LDAP サーバーに接続する URI です。複数の LDAP サーバーをスペースまたはンマで区切って指定できます。LDAP 認証は、このパラメーターが空の場合は無効になります。"
#: awx/sso/conf.py:170 awx/sso/conf.py:187 awx/sso/conf.py:198 #: awx/sso/conf.py:170 awx/sso/conf.py:187 awx/sso/conf.py:198
#: awx/sso/conf.py:209 awx/sso/conf.py:226 awx/sso/conf.py:244 #: awx/sso/conf.py:209 awx/sso/conf.py:226 awx/sso/conf.py:244
@@ -6236,5 +6236,4 @@ msgstr "%s が現在アップグレード中です。"
#: awx/ui/urls.py:24 #: awx/ui/urls.py:24
msgid "This page will refresh when complete." msgid "This page will refresh when complete."
msgstr "このページは完了すると更新されます。" msgstr "このページは完了すると更新されます。"

View File

@@ -956,7 +956,7 @@ msgstr "인스턴스 그룹의 인스턴스"
#: awx/api/views/__init__.py:450 #: awx/api/views/__init__.py:450
msgid "Schedules" msgid "Schedules"
msgstr "스케줄" msgstr "일정"
#: awx/api/views/__init__.py:464 #: awx/api/views/__init__.py:464
msgid "Schedule Recurrence Rule Preview" msgid "Schedule Recurrence Rule Preview"
@@ -3261,7 +3261,7 @@ msgstr "JSON 또는 YAML 구문을 사용하여 인젝터를 입력합니다.
#: awx/main/models/credential/__init__.py:412 #: awx/main/models/credential/__init__.py:412
#, python-format #, python-format
msgid "adding %s credential type" msgid "adding %s credential type"
msgstr "인증 정보 유형 %s 추가 중" msgstr "인증 정보 유형 %s 추가 중"
#: awx/main/models/credential/__init__.py:590 #: awx/main/models/credential/__init__.py:590
#: awx/main/models/credential/__init__.py:672 #: awx/main/models/credential/__init__.py:672
@@ -6236,5 +6236,4 @@ msgstr "%s 현재 업그레이드 중입니다."
#: awx/ui/urls.py:24 #: awx/ui/urls.py:24
msgid "This page will refresh when complete." msgid "This page will refresh when complete."
msgstr "완료되면 이 페이지가 새로 고침됩니다." msgstr "완료되면 이 페이지가 새로 고침됩니다."

View File

@@ -6237,5 +6237,4 @@ msgstr "Er wordt momenteel een upgrade van%s geïnstalleerd."
#: awx/ui/urls.py:24 #: awx/ui/urls.py:24
msgid "This page will refresh when complete." msgid "This page will refresh when complete."
msgstr "Deze pagina wordt vernieuwd als hij klaar is." msgstr "Deze pagina wordt vernieuwd als hij klaar is."

View File

@@ -348,7 +348,7 @@ msgstr "SCM track_submodules 只能用于 git 项目。"
msgid "" msgid ""
"Only Container Registry credentials can be associated with an Execution " "Only Container Registry credentials can be associated with an Execution "
"Environment" "Environment"
msgstr "只有容器注册表凭证可以与执行环境关联" msgstr "只有容器 registry 凭证可以与执行环境关联"
#: awx/api/serializers.py:1440 #: awx/api/serializers.py:1440
msgid "Cannot change the organization of an execution environment" msgid "Cannot change the organization of an execution environment"
@@ -629,7 +629,7 @@ msgstr "不支持在不替换的情况下在启动时删除 {} 凭证。提供
#: awx/api/serializers.py:4338 #: awx/api/serializers.py:4338
msgid "The inventory associated with this Workflow is being deleted." msgid "The inventory associated with this Workflow is being deleted."
msgstr "与此工作流关联的清单将被删除。" msgstr "与此 Workflow 关联的清单将被删除。"
#: awx/api/serializers.py:4405 #: awx/api/serializers.py:4405
msgid "Message type '{}' invalid, must be either 'message' or 'body'" msgid "Message type '{}' invalid, must be either 'message' or 'body'"
@@ -3229,7 +3229,7 @@ msgstr "云"
#: awx/main/models/credential/__init__.py:336 #: awx/main/models/credential/__init__.py:336
#: awx/main/models/credential/__init__.py:1113 #: awx/main/models/credential/__init__.py:1113
msgid "Container Registry" msgid "Container Registry"
msgstr "容器注册表" msgstr "容器 Registry"
#: awx/main/models/credential/__init__.py:337 #: awx/main/models/credential/__init__.py:337
msgid "Personal Access Token" msgid "Personal Access Token"
@@ -3560,7 +3560,7 @@ msgstr "身份验证 URL"
#: awx/main/models/credential/__init__.py:1120 #: awx/main/models/credential/__init__.py:1120
msgid "Authentication endpoint for the container registry." msgid "Authentication endpoint for the container registry."
msgstr "容器注册表的身份验证端点。" msgstr "容器 registry 的身份验证端点。"
#: awx/main/models/credential/__init__.py:1130 #: awx/main/models/credential/__init__.py:1130
msgid "Password or Token" msgid "Password or Token"
@@ -3764,7 +3764,7 @@ msgstr "镜像位置"
msgid "" msgid ""
"The full image location, including the container registry, image name, and " "The full image location, including the container registry, image name, and "
"version tag." "version tag."
msgstr "完整镜像位置,包括容器注册表、镜像名称和版本标签。" msgstr "完整镜像位置,包括容器 registry、镜像名称和版本标签。"
#: awx/main/models/execution_environments.py:51 #: awx/main/models/execution_environments.py:51
msgid "Pull image before running?" msgid "Pull image before running?"
@@ -6238,5 +6238,4 @@ msgstr "%s 当前正在升级。"
#: awx/ui/urls.py:24 #: awx/ui/urls.py:24
msgid "This page will refresh when complete." msgid "This page will refresh when complete."
msgstr "完成后,此页面会刷新。" msgstr "完成后,此页面会刷新。"

View File

@@ -12,7 +12,7 @@ from django.conf import settings
from django.db.models import Q, Prefetch from django.db.models import Q, Prefetch
from django.contrib.auth.models import User from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist from django.core.exceptions import ObjectDoesNotExist
# Django REST Framework # Django REST Framework
from rest_framework.exceptions import ParseError, PermissionDenied from rest_framework.exceptions import ParseError, PermissionDenied
@@ -281,23 +281,13 @@ class BaseAccess(object):
""" """
return True return True
def assure_relationship_exists(self, obj, relationship):
if '.' in relationship:
return # not attempting validation for complex relationships now
try:
obj._meta.get_field(relationship)
except FieldDoesNotExist:
raise NotImplementedError(f'The relationship {relationship} does not exist for model {type(obj)}')
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
self.assure_relationship_exists(obj, relationship)
if skip_sub_obj_read_check: if skip_sub_obj_read_check:
return self.can_change(obj, None) return self.can_change(obj, None)
else: else:
return bool(self.can_change(obj, None) and self.user.can_access(type(sub_obj), 'read', sub_obj)) return bool(self.can_change(obj, None) and self.user.can_access(type(sub_obj), 'read', sub_obj))
def can_unattach(self, obj, sub_obj, relationship, data=None): def can_unattach(self, obj, sub_obj, relationship, data=None):
self.assure_relationship_exists(obj, relationship)
return self.can_change(obj, data) return self.can_change(obj, data)
def check_related(self, field, Model, data, role_field='admin_role', obj=None, mandatory=False): def check_related(self, field, Model, data, role_field='admin_role', obj=None, mandatory=False):
@@ -338,8 +328,6 @@ class BaseAccess(object):
role = getattr(resource, role_field, None) role = getattr(resource, role_field, None)
if role is None: if role is None:
# Handle special case where resource does not have direct roles # Handle special case where resource does not have direct roles
if role_field == 'read_role':
return self.user.can_access(type(resource), 'read', resource)
access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field] access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field]
return self.user.can_access(type(resource), access_method_type, resource, None) return self.user.can_access(type(resource), access_method_type, resource, None)
return self.user in role return self.user in role
@@ -511,21 +499,6 @@ class BaseAccess(object):
return False return False
class UnifiedCredentialsMixin(BaseAccess):
"""
The credentials many-to-many is a standard relationship for JT, jobs, and others
Permission to attach is always use permission, and permission to unattach is admin to the parent object
"""
@check_superuser
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship == 'credentials':
if not isinstance(sub_obj, Credential):
raise RuntimeError(f'Can only attach credentials to credentials relationship, got {type(sub_obj)}')
return self.can_change(obj, None) and (self.user in sub_obj.use_role)
return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
class NotificationAttachMixin(BaseAccess): class NotificationAttachMixin(BaseAccess):
"""For models that can have notifications attached """For models that can have notifications attached
@@ -579,8 +552,7 @@ class InstanceAccess(BaseAccess):
return super(InstanceAccess, self).can_unattach(obj, sub_obj, relationship, relationship, data=data) return super(InstanceAccess, self).can_unattach(obj, sub_obj, relationship, relationship, data=data)
def can_add(self, data): def can_add(self, data):
return False
return self.user.is_superuser
def can_change(self, obj, data): def can_change(self, obj, data):
return False return False
@@ -993,6 +965,9 @@ class HostAccess(BaseAccess):
if data and 'name' in data: if data and 'name' in data:
self.check_license(add_host_name=data['name']) self.check_license(add_host_name=data['name'])
# Check the per-org limit
self.check_org_host_limit({'inventory': obj.inventory}, add_host_name=data['name'])
# Checks for admin or change permission on inventory, controls whether # Checks for admin or change permission on inventory, controls whether
# the user can edit variable data. # the user can edit variable data.
return obj and self.user in obj.inventory.admin_role return obj and self.user in obj.inventory.admin_role
@@ -1056,7 +1031,7 @@ class GroupAccess(BaseAccess):
return bool(obj and self.user in obj.inventory.admin_role) return bool(obj and self.user in obj.inventory.admin_role)
class InventorySourceAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess): class InventorySourceAccess(NotificationAttachMixin, BaseAccess):
""" """
I can see inventory sources whenever I can see their inventory. I can see inventory sources whenever I can see their inventory.
I can change inventory sources whenever I can change their inventory. I can change inventory sources whenever I can change their inventory.
@@ -1100,6 +1075,18 @@ class InventorySourceAccess(NotificationAttachMixin, UnifiedCredentialsMixin, Ba
return self.user in obj.inventory.update_role return self.user in obj.inventory.update_role
return False return False
@check_superuser
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship == 'credentials' and isinstance(sub_obj, Credential):
return obj and obj.inventory and self.user in obj.inventory.admin_role and self.user in sub_obj.use_role
return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
@check_superuser
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
if relationship == 'credentials' and isinstance(sub_obj, Credential):
return obj and obj.inventory and self.user in obj.inventory.admin_role
return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
class InventoryUpdateAccess(BaseAccess): class InventoryUpdateAccess(BaseAccess):
""" """
@@ -1498,7 +1485,7 @@ class ProjectUpdateAccess(BaseAccess):
return obj and self.user in obj.project.admin_role return obj and self.user in obj.project.admin_role
class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess): class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
""" """
I can see job templates when: I can see job templates when:
- I have read role for the job template. - I have read role for the job template.
@@ -1562,7 +1549,8 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
if self.user not in inventory.use_role: if self.user not in inventory.use_role:
return False return False
if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'): ee = get_value(ExecutionEnvironment, 'execution_environment')
if ee and not self.user.can_access(ExecutionEnvironment, 'read', ee):
return False return False
project = get_value(Project, 'project') project = get_value(Project, 'project')
@@ -1612,8 +1600,10 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
if self.changes_are_non_sensitive(obj, data): if self.changes_are_non_sensitive(obj, data):
return True return True
if not self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role'): if data.get('execution_environment'):
return False ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
return False
for required_field, cls in (('inventory', Inventory), ('project', Project)): for required_field, cls in (('inventory', Inventory), ('project', Project)):
is_mandatory = True is_mandatory = True
@@ -1677,13 +1667,17 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
if not obj.organization: if not obj.organization:
return False return False
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
if relationship == 'credentials' and isinstance(sub_obj, Credential):
return self.user in obj.admin_role and self.user in sub_obj.use_role
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
@check_superuser @check_superuser
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs): def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
if relationship == "instance_groups": if relationship == "instance_groups":
return self.can_attach(obj, sub_obj, relationship, *args, **kwargs) return self.can_attach(obj, sub_obj, relationship, *args, **kwargs)
return super(JobTemplateAccess, self).can_unattach(obj, sub_obj, relationship, *args, **kwargs) if relationship == 'credentials' and isinstance(sub_obj, Credential):
return self.user in obj.admin_role
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
class JobAccess(BaseAccess): class JobAccess(BaseAccess):
@@ -1830,7 +1824,7 @@ class SystemJobAccess(BaseAccess):
return False # no relaunching of system jobs return False # no relaunching of system jobs
class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess): class JobLaunchConfigAccess(BaseAccess):
""" """
Launch configs must have permissions checked for Launch configs must have permissions checked for
- relaunching - relaunching
@@ -1838,69 +1832,63 @@ class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
In order to create a new object with a copy of this launch config, I need: In order to create a new object with a copy of this launch config, I need:
- use access to related inventory (if present) - use access to related inventory (if present)
- read access to Execution Environment (if present), unless the specified ee is already in the template
- use role to many-related credentials (if any present) - use role to many-related credentials (if any present)
- read access to many-related labels (if any present), unless the specified label is already in the template
- read access to many-related instance groups (if any present), unless the specified instance group is already in the template
""" """
model = JobLaunchConfig model = JobLaunchConfig
select_related = 'job' select_related = 'job'
prefetch_related = ('credentials', 'inventory') prefetch_related = ('credentials', 'inventory')
M2M_CHECKS = {'credentials': Credential, 'labels': Label, 'instance_groups': InstanceGroup} def _unusable_creds_exist(self, qs):
return qs.exclude(pk__in=Credential._accessible_pk_qs(Credential, self.user, 'use_role')).exists()
def _related_filtered_queryset(self, cls): def has_credentials_access(self, obj):
if cls is Label: # user has access if no related credentials exist that the user lacks use role for
return LabelAccess(self.user).filtered_queryset() return not self._unusable_creds_exist(obj.credentials)
elif cls is InstanceGroup:
return InstanceGroupAccess(self.user).filtered_queryset()
else:
return cls._accessible_pk_qs(cls, self.user, 'use_role')
def has_obj_m2m_access(self, obj):
for relationship, cls in self.M2M_CHECKS.items():
if getattr(obj, relationship).exclude(pk__in=self._related_filtered_queryset(cls)).exists():
return False
return True
@check_superuser @check_superuser
def can_add(self, data, template=None): def can_add(self, data, template=None):
# This is a special case, we don't check related many-to-many elsewhere # This is a special case, we don't check related many-to-many elsewhere
# launch RBAC checks use this # launch RBAC checks use this
if 'reference_obj' in data: if 'credentials' in data and data['credentials'] or 'reference_obj' in data:
if not self.has_obj_m2m_access(data['reference_obj']): if 'reference_obj' in data:
prompted_cred_qs = data['reference_obj'].credentials.all()
else:
# If given model objects, only use the primary key from them
cred_pks = [cred.pk for cred in data['credentials']]
if template:
for cred in template.credentials.all():
if cred.pk in cred_pks:
cred_pks.remove(cred.pk)
prompted_cred_qs = Credential.objects.filter(pk__in=cred_pks)
if self._unusable_creds_exist(prompted_cred_qs):
return False return False
else: return self.check_related('inventory', Inventory, data, role_field='use_role')
for relationship, cls in self.M2M_CHECKS.items():
if relationship in data and data[relationship]:
# If given model objects, only use the primary key from them
sub_obj_pks = [sub_obj.pk for sub_obj in data[relationship]]
if template:
for sub_obj in getattr(template, relationship).all():
if sub_obj.pk in sub_obj_pks:
sub_obj_pks.remove(sub_obj.pk)
if cls.objects.filter(pk__in=sub_obj_pks).exclude(pk__in=self._related_filtered_queryset(cls)).exists():
return False
return self.check_related('inventory', Inventory, data, role_field='use_role') and self.check_related(
'execution_environment', ExecutionEnvironment, data, role_field='read_role'
)
@check_superuser @check_superuser
def can_use(self, obj): def can_use(self, obj):
return ( return self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True) and self.has_credentials_access(obj)
self.has_obj_m2m_access(obj)
and self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True)
and self.check_related('execution_environment', ExecutionEnvironment, {}, obj=obj, role_field='read_role')
)
def can_change(self, obj, data): def can_change(self, obj, data):
return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role') and self.check_related( return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role')
'execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role'
) def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if isinstance(sub_obj, Credential) and relationship == 'credentials':
return self.user in sub_obj.use_role
else:
raise NotImplementedError('Only credentials can be attached to launch configurations.')
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if isinstance(sub_obj, Credential) and relationship == 'credentials':
if skip_sub_obj_read_check:
return True
else:
return self.user in sub_obj.read_role
else:
raise NotImplementedError('Only credentials can be attached to launch configurations.')
class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess): class WorkflowJobTemplateNodeAccess(BaseAccess):
""" """
I can see/use a WorkflowJobTemplateNode if I have read permission I can see/use a WorkflowJobTemplateNode if I have read permission
to associated Workflow Job Template to associated Workflow Job Template
@@ -1923,7 +1911,7 @@ class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess):
""" """
model = WorkflowJobTemplateNode model = WorkflowJobTemplateNode
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'workflow_job_template') prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'credentials', 'workflow_job_template')
def filtered_queryset(self): def filtered_queryset(self):
return self.model.objects.filter(workflow_job_template__in=WorkflowJobTemplate.accessible_objects(self.user, 'read_role')) return self.model.objects.filter(workflow_job_template__in=WorkflowJobTemplate.accessible_objects(self.user, 'read_role'))
@@ -1935,8 +1923,7 @@ class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess):
return ( return (
self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True) self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True)
and self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role') and self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role')
and self.check_related('inventory', Inventory, data, role_field='use_role') and JobLaunchConfigAccess(self.user).can_add(data)
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role')
) )
def wfjt_admin(self, obj): def wfjt_admin(self, obj):
@@ -1945,14 +1932,17 @@ class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess):
else: else:
return self.user in obj.workflow_job_template.admin_role return self.user in obj.workflow_job_template.admin_role
def ujt_execute(self, obj, data=None): def ujt_execute(self, obj):
if not obj.unified_job_template: if not obj.unified_job_template:
return True return True
return self.check_related('unified_job_template', UnifiedJobTemplate, data, obj=obj, role_field='execute_role', mandatory=True) return self.check_related('unified_job_template', UnifiedJobTemplate, {}, obj=obj, role_field='execute_role', mandatory=True)
def can_change(self, obj, data): def can_change(self, obj, data):
if not data:
return True
# should not be able to edit the prompts if lacking access to UJT or WFJT # should not be able to edit the prompts if lacking access to UJT or WFJT
return self.ujt_execute(obj, data=data) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data) return self.ujt_execute(obj) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data)
def can_delete(self, obj): def can_delete(self, obj):
return self.wfjt_admin(obj) return self.wfjt_admin(obj)
@@ -1965,14 +1955,29 @@ class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess):
return True return True
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'): if not self.wfjt_admin(obj):
return self.wfjt_admin(obj) and self.check_same_WFJT(obj, sub_obj) return False
return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) if relationship == 'credentials':
# Need permission to related template to attach a credential
if not self.ujt_execute(obj):
return False
return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
return self.check_same_WFJT(obj, sub_obj)
else:
raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship))
def can_unattach(self, obj, sub_obj, relationship, data=None): def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'): if not self.wfjt_admin(obj):
return self.wfjt_admin(obj) return False
return super().can_unattach(obj, sub_obj, relationship, data=None) if relationship == 'credentials':
if not self.ujt_execute(obj):
return False
return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
return self.check_same_WFJT(obj, sub_obj)
else:
raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship))
class WorkflowJobNodeAccess(BaseAccess): class WorkflowJobNodeAccess(BaseAccess):
@@ -2047,10 +2052,13 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
if not data: # So the browseable API will work if not data: # So the browseable API will work
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists() return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
return bool( if data.get('execution_environment'):
self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
and self.check_related('inventory', Inventory, data, role_field='use_role') if not self.user.can_access(ExecutionEnvironment, 'read', ee):
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role') return False
return self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) and self.check_related(
'inventory', Inventory, data, role_field='use_role'
) )
def can_copy(self, obj): def can_copy(self, obj):
@@ -2096,10 +2104,14 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
if self.user.is_superuser: if self.user.is_superuser:
return True return True
if data and data.get('execution_environment'):
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
return False
return ( return (
self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj) self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj)
and self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj) and self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj)
and self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role')
and self.user in obj.admin_role and self.user in obj.admin_role
) )
@@ -2506,7 +2518,7 @@ class UnifiedJobAccess(BaseAccess):
return super(UnifiedJobAccess, self).get_queryset().filter(workflowapproval__isnull=True) return super(UnifiedJobAccess, self).get_queryset().filter(workflowapproval__isnull=True)
class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess): class ScheduleAccess(BaseAccess):
""" """
I can see a schedule if I can see it's related unified job, I can create them or update them if I have write access I can see a schedule if I can see it's related unified job, I can create them or update them if I have write access
""" """
@@ -2547,6 +2559,12 @@ class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess):
def can_delete(self, obj): def can_delete(self, obj):
return self.can_change(obj, {}) return self.can_change(obj, {})
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
class NotificationTemplateAccess(BaseAccess): class NotificationTemplateAccess(BaseAccess):
""" """
@@ -2697,66 +2715,46 @@ class ActivityStreamAccess(BaseAccess):
# 'job_template', 'job', 'project', 'project_update', 'workflow_job', # 'job_template', 'job', 'project', 'project_update', 'workflow_job',
# 'inventory_source', 'workflow_job_template' # 'inventory_source', 'workflow_job_template'
q = Q(user=self.user) inventory_set = Inventory.accessible_objects(self.user, 'read_role')
inventory_set = Inventory.accessible_pk_qs(self.user, 'read_role') credential_set = Credential.accessible_objects(self.user, 'read_role')
if inventory_set:
q |= (
Q(ad_hoc_command__inventory__in=inventory_set)
| Q(inventory__in=inventory_set)
| Q(host__inventory__in=inventory_set)
| Q(group__inventory__in=inventory_set)
| Q(inventory_source__inventory__in=inventory_set)
| Q(inventory_update__inventory_source__inventory__in=inventory_set)
)
credential_set = Credential.accessible_pk_qs(self.user, 'read_role')
if credential_set:
q |= Q(credential__in=credential_set)
auditing_orgs = ( auditing_orgs = (
(Organization.accessible_objects(self.user, 'admin_role') | Organization.accessible_objects(self.user, 'auditor_role')) (Organization.accessible_objects(self.user, 'admin_role') | Organization.accessible_objects(self.user, 'auditor_role'))
.distinct() .distinct()
.values_list('id', flat=True) .values_list('id', flat=True)
) )
if auditing_orgs: project_set = Project.accessible_objects(self.user, 'read_role')
q |= ( jt_set = JobTemplate.accessible_objects(self.user, 'read_role')
Q(user__in=auditing_orgs.values('member_role__members')) team_set = Team.accessible_objects(self.user, 'read_role')
| Q(organization__in=auditing_orgs) wfjt_set = WorkflowJobTemplate.accessible_objects(self.user, 'read_role')
| Q(notification_template__organization__in=auditing_orgs)
| Q(notification__notification_template__organization__in=auditing_orgs)
| Q(label__organization__in=auditing_orgs)
| Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else [])
)
project_set = Project.accessible_pk_qs(self.user, 'read_role')
if project_set:
q |= Q(project__in=project_set) | Q(project_update__project__in=project_set)
jt_set = JobTemplate.accessible_pk_qs(self.user, 'read_role')
if jt_set:
q |= Q(job_template__in=jt_set) | Q(job__job_template__in=jt_set)
wfjt_set = WorkflowJobTemplate.accessible_pk_qs(self.user, 'read_role')
if wfjt_set:
q |= (
Q(workflow_job_template__in=wfjt_set)
| Q(workflow_job_template_node__workflow_job_template__in=wfjt_set)
| Q(workflow_job__workflow_job_template__in=wfjt_set)
)
team_set = Team.accessible_pk_qs(self.user, 'read_role')
if team_set:
q |= Q(team__in=team_set)
app_set = OAuth2ApplicationAccess(self.user).filtered_queryset() app_set = OAuth2ApplicationAccess(self.user).filtered_queryset()
if app_set:
q |= Q(o_auth2_application__in=app_set)
token_set = OAuth2TokenAccess(self.user).filtered_queryset() token_set = OAuth2TokenAccess(self.user).filtered_queryset()
if token_set:
q |= Q(o_auth2_access_token__in=token_set)
return qs.filter(q).distinct() return qs.filter(
Q(ad_hoc_command__inventory__in=inventory_set)
| Q(o_auth2_application__in=app_set)
| Q(o_auth2_access_token__in=token_set)
| Q(user__in=auditing_orgs.values('member_role__members'))
| Q(user=self.user)
| Q(organization__in=auditing_orgs)
| Q(inventory__in=inventory_set)
| Q(host__inventory__in=inventory_set)
| Q(group__inventory__in=inventory_set)
| Q(inventory_source__inventory__in=inventory_set)
| Q(inventory_update__inventory_source__inventory__in=inventory_set)
| Q(credential__in=credential_set)
| Q(team__in=team_set)
| Q(project__in=project_set)
| Q(project_update__project__in=project_set)
| Q(job_template__in=jt_set)
| Q(job__job_template__in=jt_set)
| Q(workflow_job_template__in=wfjt_set)
| Q(workflow_job_template_node__workflow_job_template__in=wfjt_set)
| Q(workflow_job__workflow_job_template__in=wfjt_set)
| Q(notification_template__organization__in=auditing_orgs)
| Q(notification__notification_template__organization__in=auditing_orgs)
| Q(label__organization__in=auditing_orgs)
| Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else [])
).distinct()
def can_add(self, data): def can_add(self, data):
return False return False

View File

@@ -1,8 +1,8 @@
import datetime import datetime
import asyncio import asyncio
import logging import logging
import aioredis
import redis import redis
import redis.asyncio
import re import re
from prometheus_client import ( from prometheus_client import (
@@ -82,7 +82,7 @@ class BroadcastWebsocketStatsManager:
async def run_loop(self): async def run_loop(self):
try: try:
redis_conn = await redis.asyncio.Redis.from_url(settings.BROKER_URL) redis_conn = await aioredis.create_redis_pool(settings.BROKER_URL)
while True: while True:
stats_data_str = ''.join(stat.serialize() for stat in self._stats.values()) stats_data_str = ''.join(stat.serialize() for stat in self._stats.values())
await redis_conn.set(self._redis_key, stats_data_str) await redis_conn.set(self._redis_key, stats_data_str)
@@ -122,8 +122,8 @@ class BroadcastWebsocketStats:
'Number of messages received, to be forwarded, by the broadcast websocket system', 'Number of messages received, to be forwarded, by the broadcast websocket system',
registry=self._registry, registry=self._registry,
) )
self._messages_received_current_conn = Gauge( self._messages_received = Gauge(
f'awx_{self.remote_name}_messages_received_currrent_conn', f'awx_{self.remote_name}_messages_received',
'Number forwarded messages received by the broadcast websocket system, for the duration of the current connection', 'Number forwarded messages received by the broadcast websocket system, for the duration of the current connection',
registry=self._registry, registry=self._registry,
) )
@@ -144,13 +144,13 @@ class BroadcastWebsocketStats:
def record_message_received(self): def record_message_received(self):
self._internal_messages_received_per_minute.record() self._internal_messages_received_per_minute.record()
self._messages_received_current_conn.inc() self._messages_received.inc()
self._messages_received_total.inc() self._messages_received_total.inc()
def record_connection_established(self): def record_connection_established(self):
self._connection.state('connected') self._connection.state('connected')
self._connection_start.set_to_current_time() self._connection_start.set_to_current_time()
self._messages_received_current_conn.set(0) self._messages_received.set(0)
def record_connection_lost(self): def record_connection_lost(self):
self._connection.state('disconnected') self._connection.state('disconnected')

View File

@@ -16,7 +16,6 @@ from awx.conf.license import get_license
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
from awx.main import models from awx.main import models
from awx.main.analytics import register from awx.main.analytics import register
from awx.main.scheduler.task_manager_models import TaskManagerModels
""" """
This module is used to define metrics collected by awx.main.analytics.gather() This module is used to define metrics collected by awx.main.analytics.gather()
@@ -236,25 +235,25 @@ def projects_by_scm_type(since, **kwargs):
@register('instance_info', '1.2', description=_('Cluster topology and capacity')) @register('instance_info', '1.2', description=_('Cluster topology and capacity'))
def instance_info(since, include_hostnames=False, **kwargs): def instance_info(since, include_hostnames=False, **kwargs):
info = {} info = {}
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance instances = models.Instance.objects.values_list('hostname').values(
tm_models = TaskManagerModels.init_with_consumed_capacity(instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled']) 'uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'hostname', 'enabled'
for tm_instance in tm_models.instances.instances_by_hostname.values(): )
instance = tm_instance.obj for instance in instances:
consumed_capacity = sum(x.task_impact for x in models.UnifiedJob.objects.filter(execution_node=instance['hostname'], status__in=('running', 'waiting')))
instance_info = { instance_info = {
'uuid': instance.uuid, 'uuid': instance['uuid'],
'version': instance.version, 'version': instance['version'],
'capacity': instance.capacity, 'capacity': instance['capacity'],
'cpu': instance.cpu, 'cpu': instance['cpu'],
'memory': instance.memory, 'memory': instance['memory'],
'managed_by_policy': instance.managed_by_policy, 'managed_by_policy': instance['managed_by_policy'],
'enabled': instance.enabled, 'enabled': instance['enabled'],
'consumed_capacity': tm_instance.consumed_capacity, 'consumed_capacity': consumed_capacity,
'remaining_capacity': instance.capacity - tm_instance.consumed_capacity, 'remaining_capacity': instance['capacity'] - consumed_capacity,
'node_type': instance.node_type,
} }
if include_hostnames is True: if include_hostnames is True:
instance_info['hostname'] = instance.hostname instance_info['hostname'] = instance['hostname']
info[instance.uuid] = instance_info info[instance['uuid']] = instance_info
return info return info

View File

@@ -3,7 +3,6 @@ from prometheus_client import CollectorRegistry, Gauge, Info, generate_latest
from awx.conf.license import get_license from awx.conf.license import get_license
from awx.main.utils import get_awx_version from awx.main.utils import get_awx_version
from awx.main.models import UnifiedJob
from awx.main.analytics.collectors import ( from awx.main.analytics.collectors import (
counts, counts,
instance_info, instance_info,
@@ -57,7 +56,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -85,7 +83,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -113,7 +110,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -123,7 +119,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -174,9 +169,8 @@ def metrics():
all_job_data = job_counts(None) all_job_data = job_counts(None)
statuses = all_job_data.get('status', {}) statuses = all_job_data.get('status', {})
states = set(dict(UnifiedJob.STATUS_CHOICES).keys()) - set(['new']) for status, value in statuses.items():
for state in states: STATUS.labels(status=status).set(value)
STATUS.labels(status=state).set(statuses.get(state, 0))
RUNNING_JOBS.set(current_counts['running_jobs']) RUNNING_JOBS.set(current_counts['running_jobs'])
PENDING_JOBS.set(current_counts['pending_jobs']) PENDING_JOBS.set(current_counts['pending_jobs'])
@@ -184,13 +178,12 @@ def metrics():
instance_data = instance_info(None, include_hostnames=True) instance_data = instance_info(None, include_hostnames=True)
for uuid, info in instance_data.items(): for uuid, info in instance_data.items():
hostname = info['hostname'] hostname = info['hostname']
node_type = info['node_type'] INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['capacity'])
INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['capacity'])
INSTANCE_CPU.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['cpu']) INSTANCE_CPU.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['cpu'])
INSTANCE_MEMORY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['memory']) INSTANCE_MEMORY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['memory'])
INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['consumed_capacity']) INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['consumed_capacity'])
INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['remaining_capacity']) INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['remaining_capacity'])
INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).info( INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid).info(
{ {
'enabled': str(instance_data[uuid]['enabled']), 'enabled': str(instance_data[uuid]['enabled']),
'managed_by_policy': str(instance_data[uuid]['managed_by_policy']), 'managed_by_policy': str(instance_data[uuid]['managed_by_policy']),

View File

@@ -5,9 +5,7 @@ import logging
from django.conf import settings from django.conf import settings
from django.apps import apps from django.apps import apps
from awx.main.consumers import emit_channel_notification from awx.main.consumers import emit_channel_notification
from awx.main.utils import is_testing
root_key = 'awx_metrics' root_key = 'awx_metrics'
logger = logging.getLogger('awx.main.analytics') logger = logging.getLogger('awx.main.analytics')
@@ -165,10 +163,10 @@ class Metrics:
Instance = apps.get_model('main', 'Instance') Instance = apps.get_model('main', 'Instance')
if instance_name: if instance_name:
self.instance_name = instance_name self.instance_name = instance_name
elif is_testing(): elif settings.IS_TESTING():
self.instance_name = "awx_testing" self.instance_name = "awx_testing"
else: else:
self.instance_name = Instance.objects.my_hostname() self.instance_name = Instance.objects.me().hostname
# metric name, help_text # metric name, help_text
METRICSLIST = [ METRICSLIST = [
@@ -186,29 +184,19 @@ class Metrics:
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'), FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'), IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'), FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'), SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading all tasks from db'),
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'), SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'), SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'), SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
SetFloatM('task_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
SetFloatM('task_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow jobs'),
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'), SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'), IntM('task_manager_schedule_calls', 'Number of calls to task manager schedule'),
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'), SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
SetIntM('task_manager_tasks_started', 'Number of tasks started'), SetIntM('task_manager_tasks_started', 'Number of tasks started'),
SetIntM('task_manager_running_processed', 'Number of running tasks processed'), SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'), SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'), SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
] ]
# turn metric list into dictionary with the metric name as a key # turn metric list into dictionary with the metric name as a key
self.METRICS = {} self.METRICS = {}
@@ -315,12 +303,7 @@ class Metrics:
self.previous_send_metrics.set(current_time) self.previous_send_metrics.set(current_time)
self.previous_send_metrics.store_value(self.conn) self.previous_send_metrics.store_value(self.conn)
finally: finally:
try: lock.release()
lock.release()
except Exception as exc:
# After system failures, we might throw redis.exceptions.LockNotOwnedError
# this is to avoid print a Traceback, and importantly, avoid raising an exception into parent context
logger.warning(f'Error releasing subsystem metrics redis lock, error: {str(exc)}')
def load_other_metrics(self, request): def load_other_metrics(self, request):
# data received from other nodes are stored in their own keys # data received from other nodes are stored in their own keys

View File

@@ -446,7 +446,7 @@ register(
label=_('Default Job Idle Timeout'), label=_('Default Job Idle Timeout'),
help_text=_( help_text=_(
'If no output is detected from ansible in this number of seconds the execution will be terminated. ' 'If no output is detected from ansible in this number of seconds the execution will be terminated. '
'Use value of 0 to indicate that no idle timeout should be imposed.' 'Use value of 0 to used default idle_timeout is 600s.'
), ),
category=_('Jobs'), category=_('Jobs'),
category_slug='jobs', category_slug='jobs',
@@ -569,7 +569,7 @@ register(
register( register(
'LOG_AGGREGATOR_LOGGERS', 'LOG_AGGREGATOR_LOGGERS',
field_class=fields.StringListField, field_class=fields.StringListField,
default=['awx', 'activity_stream', 'job_events', 'system_tracking', 'broadcast_websocket'], default=['awx', 'activity_stream', 'job_events', 'system_tracking'],
label=_('Loggers Sending Data to Log Aggregator Form'), label=_('Loggers Sending Data to Log Aggregator Form'),
help_text=_( help_text=_(
'List of loggers that will send HTTP logs to the collector, these can ' 'List of loggers that will send HTTP logs to the collector, these can '
@@ -577,8 +577,7 @@ register(
'awx - service logs\n' 'awx - service logs\n'
'activity_stream - activity stream records\n' 'activity_stream - activity stream records\n'
'job_events - callback data from Ansible job events\n' 'job_events - callback data from Ansible job events\n'
'system_tracking - facts gathered from scan jobs\n' 'system_tracking - facts gathered from scan jobs.'
'broadcast_websocket - errors pertaining to websockets broadcast metrics\n'
), ),
category=_('Logging'), category=_('Logging'),
category_slug='logging', category_slug='logging',

View File

@@ -9,16 +9,10 @@ aim_inputs = {
'fields': [ 'fields': [
{ {
'id': 'url', 'id': 'url',
'label': _('CyberArk CCP URL'), 'label': _('CyberArk AIM URL'),
'type': 'string', 'type': 'string',
'format': 'url', 'format': 'url',
}, },
{
'id': 'webservice_id',
'label': _('Web Service ID'),
'type': 'string',
'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),
},
{ {
'id': 'app_id', 'id': 'app_id',
'label': _('Application ID'), 'label': _('Application ID'),
@@ -70,13 +64,10 @@ def aim_backend(**kwargs):
client_cert = kwargs.get('client_cert', None) client_cert = kwargs.get('client_cert', None)
client_key = kwargs.get('client_key', None) client_key = kwargs.get('client_key', None)
verify = kwargs['verify'] verify = kwargs['verify']
webservice_id = kwargs['webservice_id']
app_id = kwargs['app_id'] app_id = kwargs['app_id']
object_query = kwargs['object_query'] object_query = kwargs['object_query']
object_query_format = kwargs['object_query_format'] object_query_format = kwargs['object_query_format']
reason = kwargs.get('reason', None) reason = kwargs.get('reason', None)
if webservice_id == '':
webservice_id = 'AIMWebService'
query_params = { query_params = {
'AppId': app_id, 'AppId': app_id,
@@ -87,7 +78,7 @@ def aim_backend(**kwargs):
query_params['reason'] = reason query_params['reason'] = reason
request_qs = '?' + urlencode(query_params, quote_via=quote) request_qs = '?' + urlencode(query_params, quote_via=quote)
request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts'])) request_url = urljoin(url, '/'.join(['AIMWebService', 'api', 'Accounts']))
with CertFiles(client_cert, client_key) as cert: with CertFiles(client_cert, client_key) as cert:
res = requests.get( res = requests.get(
@@ -101,4 +92,4 @@ def aim_backend(**kwargs):
return res.json()['Content'] return res.json()['Content']
aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend) aim_plugin = CredentialPlugin('CyberArk AIM Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)

View File

@@ -1,5 +1,6 @@
from .plugin import CredentialPlugin, CertFiles, raise_for_status from .plugin import CredentialPlugin, CertFiles, raise_for_status
import base64
from urllib.parse import urljoin, quote from urllib.parse import urljoin, quote
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
@@ -60,7 +61,7 @@ def conjur_backend(**kwargs):
cacert = kwargs.get('cacert', None) cacert = kwargs.get('cacert', None)
auth_kwargs = { auth_kwargs = {
'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'}, 'headers': {'Content-Type': 'text/plain'},
'data': api_key, 'data': api_key,
'allow_redirects': False, 'allow_redirects': False,
} }
@@ -68,9 +69,9 @@ def conjur_backend(**kwargs):
with CertFiles(cacert) as cert: with CertFiles(cacert) as cert:
# https://www.conjur.org/api.html#authentication-authenticate-post # https://www.conjur.org/api.html#authentication-authenticate-post
auth_kwargs['verify'] = cert auth_kwargs['verify'] = cert
resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs) resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
raise_for_status(resp) raise_for_status(resp)
token = resp.content.decode('utf-8') token = base64.b64encode(resp.content).decode('utf-8')
lookup_kwargs = { lookup_kwargs = {
'headers': {'Authorization': 'Token token="{}"'.format(token)}, 'headers': {'Authorization': 'Token token="{}"'.format(token)},
@@ -78,10 +79,9 @@ def conjur_backend(**kwargs):
} }
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path])) path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))
if version: if version:
ver = "version={}".format(version) path = '?'.join([path, version])
path = '?'.join([path, ver])
with CertFiles(cacert) as cert: with CertFiles(cacert) as cert:
lookup_kwargs['verify'] = cert lookup_kwargs['verify'] = cert
@@ -90,4 +90,4 @@ def conjur_backend(**kwargs):
return resp.text return resp.text
conjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend) conjur_plugin = CredentialPlugin('CyberArk Conjur Secret Lookup', inputs=conjur_inputs, backend=conjur_backend)

View File

@@ -4,7 +4,6 @@ import select
from contextlib import contextmanager from contextlib import contextmanager
from django.conf import settings from django.conf import settings
from django.db import connection as pg_connection
NOT_READY = ([], [], []) NOT_READY = ([], [], [])
@@ -16,6 +15,7 @@ def get_local_queuename():
class PubSub(object): class PubSub(object):
def __init__(self, conn): def __init__(self, conn):
assert conn.autocommit, "Connection must be in autocommit mode."
self.conn = conn self.conn = conn
def listen(self, channel): def listen(self, channel):
@@ -31,9 +31,6 @@ class PubSub(object):
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload)) cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
def events(self, select_timeout=5, yield_timeouts=False): def events(self, select_timeout=5, yield_timeouts=False):
if not self.conn.autocommit:
raise RuntimeError('Listening for events can only be done in autocommit mode')
while True: while True:
if select.select([self.conn], [], [], select_timeout) == NOT_READY: if select.select([self.conn], [], [], select_timeout) == NOT_READY:
if yield_timeouts: if yield_timeouts:
@@ -48,32 +45,11 @@ class PubSub(object):
@contextmanager @contextmanager
def pg_bus_conn(new_connection=False): def pg_bus_conn():
''' conf = settings.DATABASES['default']
Any listeners probably want to establish a new database connection, conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {}))
separate from the Django connection used for queries, because that will prevent # Django connection.cursor().connection doesn't have autocommit=True on
losing connection to the channel whenever a .close() happens. conn.set_session(autocommit=True)
Any publishers probably want to use the existing connection
so that messages follow postgres transaction rules
https://www.postgresql.org/docs/current/sql-notify.html
'''
if new_connection:
conf = settings.DATABASES['default']
conn = psycopg2.connect(
dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {})
)
# Django connection.cursor().connection doesn't have autocommit=True on by default
conn.set_session(autocommit=True)
else:
if pg_connection.connection is None:
pg_connection.connect()
if pg_connection.connection is None:
raise RuntimeError('Unexpectedly could not connect to postgres for pg_notify actions')
conn = pg_connection.connection
pubsub = PubSub(conn) pubsub = PubSub(conn)
yield pubsub yield pubsub
if new_connection: conn.close()
conn.close()

View File

@@ -3,7 +3,6 @@ import uuid
import json import json
from django.conf import settings from django.conf import settings
from django.db import connection
import redis import redis
from awx.main.dispatch import get_local_queuename from awx.main.dispatch import get_local_queuename
@@ -38,27 +37,18 @@ class Control(object):
def running(self, *args, **kwargs): def running(self, *args, **kwargs):
return self.control_with_reply('running', *args, **kwargs) return self.control_with_reply('running', *args, **kwargs)
def cancel(self, task_ids, *args, **kwargs):
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
@classmethod @classmethod
def generate_reply_queue_name(cls): def generate_reply_queue_name(cls):
return f"reply_to_{str(uuid.uuid4()).replace('-','_')}" return f"reply_to_{str(uuid.uuid4()).replace('-','_')}"
def control_with_reply(self, command, timeout=5, extra_data=None): def control_with_reply(self, command, timeout=5):
logger.warning('checking {} {} for {}'.format(self.service, command, self.queuename)) logger.warning('checking {} {} for {}'.format(self.service, command, self.queuename))
reply_queue = Control.generate_reply_queue_name() reply_queue = Control.generate_reply_queue_name()
self.result = None self.result = None
if not connection.get_autocommit():
raise RuntimeError('Control-with-reply messages can only be done in autocommit mode')
with pg_bus_conn() as conn: with pg_bus_conn() as conn:
conn.listen(reply_queue) conn.listen(reply_queue)
send_data = {'control': command, 'reply_to': reply_queue} conn.notify(self.queuename, json.dumps({'control': command, 'reply_to': reply_queue}))
if extra_data:
send_data.update(extra_data)
conn.notify(self.queuename, json.dumps(send_data))
for reply in conn.events(select_timeout=timeout, yield_timeouts=True): for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
if reply is None: if reply is None:

View File

@@ -16,14 +16,13 @@ from queue import Full as QueueFull, Empty as QueueEmpty
from django.conf import settings from django.conf import settings
from django.db import connection as django_connection, connections from django.db import connection as django_connection, connections
from django.core.cache import cache as django_cache from django.core.cache import cache as django_cache
from django.utils.timezone import now as tz_now
from django_guid import set_guid from django_guid import set_guid
from jinja2 import Template from jinja2 import Template
import psutil import psutil
from awx.main.models import UnifiedJob from awx.main.models import UnifiedJob
from awx.main.dispatch import reaper from awx.main.dispatch import reaper
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity, log_excess_runtime from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
if 'run_callback_receiver' in sys.argv: if 'run_callback_receiver' in sys.argv:
logger = logging.getLogger('awx.main.commands.run_callback_receiver') logger = logging.getLogger('awx.main.commands.run_callback_receiver')
@@ -329,16 +328,12 @@ class AutoscalePool(WorkerPool):
# Get same number as max forks based on memory, this function takes memory as bytes # Get same number as max forks based on memory, this function takes memory as bytes
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30) self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
# add magic prime number of extra workers to ensure
# we have a few extra workers to run the heartbeat
self.max_workers += 7
# max workers can't be less than min_workers # max workers can't be less than min_workers
self.max_workers = max(self.min_workers, self.max_workers) self.max_workers = max(self.min_workers, self.max_workers)
# the task manager enforces settings.TASK_MANAGER_TIMEOUT on its own def debug(self, *args, **kwargs):
# but if the task takes longer than the time defined here, we will force it to stop here self.cleanup()
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT + settings.TASK_MANAGER_TIMEOUT_GRACE_PERIOD return super(AutoscalePool, self).debug(*args, **kwargs)
@property @property
def should_grow(self): def should_grow(self):
@@ -356,7 +351,6 @@ class AutoscalePool(WorkerPool):
def debug_meta(self): def debug_meta(self):
return 'min={} max={}'.format(self.min_workers, self.max_workers) return 'min={} max={}'.format(self.min_workers, self.max_workers)
@log_excess_runtime(logger)
def cleanup(self): def cleanup(self):
""" """
Perform some internal account and cleanup. This is run on Perform some internal account and cleanup. This is run on
@@ -365,6 +359,8 @@ class AutoscalePool(WorkerPool):
1. Discover worker processes that exited, and recover messages they 1. Discover worker processes that exited, and recover messages they
were handling. were handling.
2. Clean up unnecessary, idle workers. 2. Clean up unnecessary, idle workers.
3. Check to see if the database says this node is running any tasks
that aren't actually running. If so, reap them.
IMPORTANT: this function is one of the few places in the dispatcher IMPORTANT: this function is one of the few places in the dispatcher
(aside from setting lookups) where we talk to the database. As such, (aside from setting lookups) where we talk to the database. As such,
@@ -387,8 +383,6 @@ class AutoscalePool(WorkerPool):
reaper.reap_job(j, 'failed') reaper.reap_job(j, 'failed')
except Exception: except Exception:
logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid'])) logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))
else:
logger.warning(f'Worker was told to quit but has not, pid={w.pid}')
orphaned.extend(w.orphaned_tasks) orphaned.extend(w.orphaned_tasks)
self.workers.remove(w) self.workers.remove(w)
elif w.idle and len(self.workers) > self.min_workers: elif w.idle and len(self.workers) > self.min_workers:
@@ -407,15 +401,13 @@ class AutoscalePool(WorkerPool):
# the task manager to never do more work # the task manager to never do more work
current_task = w.current_task current_task = w.current_task
if current_task and isinstance(current_task, dict): if current_task and isinstance(current_task, dict):
endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager'] if current_task.get('task', '').endswith('tasks.run_task_manager'):
current_task_name = current_task.get('task', '')
if any(current_task_name.endswith(e) for e in endings):
if 'started' not in current_task: if 'started' not in current_task:
w.managed_tasks[current_task['uuid']]['started'] = time.time() w.managed_tasks[current_task['uuid']]['started'] = time.time()
age = time.time() - current_task['started'] age = time.time() - current_task['started']
w.managed_tasks[current_task['uuid']]['age'] = age w.managed_tasks[current_task['uuid']]['age'] = age
if age > self.task_manager_timeout: if age > (60 * 5):
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}') logger.error(f'run_task_manager has held the advisory lock for >5m, sending SIGTERM to {w.pid}') # noqa
os.kill(w.pid, signal.SIGTERM) os.kill(w.pid, signal.SIGTERM)
for m in orphaned: for m in orphaned:
@@ -425,17 +417,13 @@ class AutoscalePool(WorkerPool):
idx = random.choice(range(len(self.workers))) idx = random.choice(range(len(self.workers)))
self.write(idx, m) self.write(idx, m)
def add_bind_kwargs(self, body): # if the database says a job is running on this node, but it's *not*,
bind_kwargs = body.pop('bind_kwargs', []) # then reap it
body.setdefault('kwargs', {}) running_uuids = []
if 'dispatch_time' in bind_kwargs: for worker in self.workers:
body['kwargs']['dispatch_time'] = tz_now().isoformat() worker.calculate_managed_tasks()
if 'worker_tasks' in bind_kwargs: running_uuids.extend(list(worker.managed_tasks.keys()))
worker_tasks = {} reaper.reap(excluded_uuids=running_uuids)
for worker in self.workers:
worker.calculate_managed_tasks()
worker_tasks[worker.pid] = list(worker.managed_tasks.keys())
body['kwargs']['worker_tasks'] = worker_tasks
def up(self): def up(self):
if self.full: if self.full:
@@ -450,8 +438,9 @@ class AutoscalePool(WorkerPool):
if 'guid' in body: if 'guid' in body:
set_guid(body['guid']) set_guid(body['guid'])
try: try:
if isinstance(body, dict) and body.get('bind_kwargs'): # when the cluster heartbeat occurs, clean up internally
self.add_bind_kwargs(body) if isinstance(body, dict) and 'cluster_node_heartbeat' in body['task']:
self.cleanup()
if self.should_grow: if self.should_grow:
self.up() self.up()
# we don't care about "preferred queue" round robin distribution, just # we don't care about "preferred queue" round robin distribution, just
@@ -463,10 +452,6 @@ class AutoscalePool(WorkerPool):
w.put(body) w.put(body)
break break
else: else:
task_name = 'unknown'
if isinstance(body, dict):
task_name = body.get('task')
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
return super(AutoscalePool, self).write(preferred_queue, body) return super(AutoscalePool, self).write(preferred_queue, body)
except Exception: except Exception:
for conn in connections.all(): for conn in connections.all():

View File

@@ -1,13 +1,13 @@
import inspect import inspect
import logging import logging
import sys
import json import json
import time
from uuid import uuid4 from uuid import uuid4
from django.conf import settings
from django_guid import get_guid from django_guid import get_guid
from . import pg_bus_conn from . import pg_bus_conn
from awx.main.utils import is_testing
logger = logging.getLogger('awx.main.dispatch') logger = logging.getLogger('awx.main.dispatch')
@@ -49,21 +49,13 @@ class task:
@task(queue='tower_broadcast') @task(queue='tower_broadcast')
def announce(): def announce():
print("Run this everywhere!") print("Run this everywhere!")
# The special parameter bind_kwargs tells the main dispatcher process to add certain kwargs
@task(bind_kwargs=['dispatch_time'])
def print_time(dispatch_time=None):
print(f"Time I was dispatched: {dispatch_time}")
""" """
def __init__(self, queue=None, bind_kwargs=None): def __init__(self, queue=None):
self.queue = queue self.queue = queue
self.bind_kwargs = bind_kwargs
def __call__(self, fn=None): def __call__(self, fn=None):
queue = self.queue queue = self.queue
bind_kwargs = self.bind_kwargs
class PublisherMixin(object): class PublisherMixin(object):
@@ -83,16 +75,14 @@ class task:
msg = f'{cls.name}: Queue value required and may not be None' msg = f'{cls.name}: Queue value required and may not be None'
logger.error(msg) logger.error(msg)
raise ValueError(msg) raise ValueError(msg)
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name, 'time_pub': time.time()} obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name}
guid = get_guid() guid = get_guid()
if guid: if guid:
obj['guid'] = guid obj['guid'] = guid
if bind_kwargs:
obj['bind_kwargs'] = bind_kwargs
obj.update(**kw) obj.update(**kw)
if callable(queue): if callable(queue):
queue = queue() queue = queue()
if not is_testing(): if not settings.IS_TESTING(sys.argv):
with pg_bus_conn() as conn: with pg_bus_conn() as conn:
conn.notify(queue, json.dumps(obj)) conn.notify(queue, json.dumps(obj))
return (obj, queue) return (obj, queue)

View File

@@ -2,7 +2,6 @@ from datetime import timedelta
import logging import logging
from django.db.models import Q from django.db.models import Q
from django.conf import settings
from django.utils.timezone import now as tz_now from django.utils.timezone import now as tz_now
from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.models import ContentType
@@ -16,73 +15,58 @@ def startup_reaping():
If this particular instance is starting, then we know that any running jobs are invalid If this particular instance is starting, then we know that any running jobs are invalid
so we will reap those jobs as a special action here so we will reap those jobs as a special action here
""" """
jobs = UnifiedJob.objects.filter(status='running', controller_node=Instance.objects.my_hostname()) me = Instance.objects.me()
jobs = UnifiedJob.objects.filter(status='running', controller_node=me.hostname)
job_ids = [] job_ids = []
for j in jobs: for j in jobs:
job_ids.append(j.id) job_ids.append(j.id)
reap_job( j.status = 'failed'
j, j.start_args = ''
'failed', j.job_explanation += 'Task was marked as running at system start up. The system must have not shut down properly, so it has been marked as failed.'
job_explanation='Task was marked as running at system start up. The system must have not shut down properly, so it has been marked as failed.', j.save(update_fields=['status', 'start_args', 'job_explanation'])
) if hasattr(j, 'send_notification_templates'):
j.send_notification_templates('failed')
j.websocket_emit_status('failed')
if job_ids: if job_ids:
logger.error(f'Unified jobs {job_ids} were reaped on dispatch startup') logger.error(f'Unified jobs {job_ids} were reaped on dispatch startup')
def reap_job(j, status, job_explanation=None): def reap_job(j, status):
j.refresh_from_db(fields=['status', 'job_explanation']) if UnifiedJob.objects.get(id=j.id).status not in ('running', 'waiting'):
status_before = j.status
if status_before not in ('running', 'waiting'):
# just in case, don't reap jobs that aren't running # just in case, don't reap jobs that aren't running
return return
j.status = status j.status = status
j.start_args = '' # blank field to remove encrypted passwords j.start_args = '' # blank field to remove encrypted passwords
if j.job_explanation: j.job_explanation += ' '.join(
j.job_explanation += ' ' # Separate messages for readability (
if job_explanation is None: 'Task was marked as running but was not present in',
j.job_explanation += 'Task was marked as running but was not present in the job queue, so it has been marked as failed.' 'the job queue, so it has been marked as failed.',
else: )
j.job_explanation += job_explanation )
j.save(update_fields=['status', 'start_args', 'job_explanation']) j.save(update_fields=['status', 'start_args', 'job_explanation'])
if hasattr(j, 'send_notification_templates'): if hasattr(j, 'send_notification_templates'):
j.send_notification_templates('failed') j.send_notification_templates('failed')
j.websocket_emit_status(status) j.websocket_emit_status(status)
logger.error(f'{j.log_format} is no longer {status_before}; reaping') logger.error('{} is no longer running; reaping'.format(j.log_format))
def reap_waiting(instance=None, status='failed', job_explanation=None, grace_period=None, excluded_uuids=None, ref_time=None): def reap(instance=None, status='failed', excluded_uuids=[]):
""" """
Reap all jobs in waiting for this instance. Reap all jobs in waiting|running for this instance.
""" """
if grace_period is None: me = instance
grace_period = settings.JOB_WAITING_GRACE_PERIOD + settings.TASK_MANAGER_TIMEOUT if me is None:
try:
if instance is None: me = Instance.objects.me()
hostname = Instance.objects.my_hostname() except RuntimeError as e:
else: logger.warning(f'Local instance is not registered, not running reaper: {e}')
hostname = instance.hostname return
if ref_time is None: now = tz_now()
ref_time = tz_now()
jobs = UnifiedJob.objects.filter(status='waiting', modified__lte=ref_time - timedelta(seconds=grace_period), controller_node=hostname)
if excluded_uuids:
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
for j in jobs:
reap_job(j, status, job_explanation=job_explanation)
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None):
"""
Reap all jobs in running for this instance.
"""
if instance is None:
hostname = Instance.objects.my_hostname()
else:
hostname = instance.hostname
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
jobs = UnifiedJob.objects.filter( jobs = UnifiedJob.objects.filter(
Q(status='running') & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id) (Q(status='running') | Q(status='waiting', modified__lte=now - timedelta(seconds=60)))
) & (Q(execution_node=me.hostname) | Q(controller_node=me.hostname))
if excluded_uuids: & ~Q(polymorphic_ctype_id=workflow_ctype_id)
jobs = jobs.exclude(celery_task_id__in=excluded_uuids) ).exclude(celery_task_id__in=excluded_uuids)
for j in jobs: for j in jobs:
reap_job(j, status, job_explanation=job_explanation) reap_job(j, status)

View File

@@ -17,7 +17,6 @@ from django.conf import settings
from awx.main.dispatch.pool import WorkerPool from awx.main.dispatch.pool import WorkerPool
from awx.main.dispatch import pg_bus_conn from awx.main.dispatch import pg_bus_conn
from awx.main.utils.common import log_excess_runtime
if 'run_callback_receiver' in sys.argv: if 'run_callback_receiver' in sys.argv:
logger = logging.getLogger('awx.main.commands.run_callback_receiver') logger = logging.getLogger('awx.main.commands.run_callback_receiver')
@@ -63,7 +62,7 @@ class AWXConsumerBase(object):
def control(self, body): def control(self, body):
logger.warning(f'Received control signal:\n{body}') logger.warning(f'Received control signal:\n{body}')
control = body.get('control') control = body.get('control')
if control in ('status', 'running', 'cancel'): if control in ('status', 'running'):
reply_queue = body['reply_to'] reply_queue = body['reply_to']
if control == 'status': if control == 'status':
msg = '\n'.join([self.listening_on, self.pool.debug()]) msg = '\n'.join([self.listening_on, self.pool.debug()])
@@ -72,17 +71,6 @@ class AWXConsumerBase(object):
for worker in self.pool.workers: for worker in self.pool.workers:
worker.calculate_managed_tasks() worker.calculate_managed_tasks()
msg.extend(worker.managed_tasks.keys()) msg.extend(worker.managed_tasks.keys())
elif control == 'cancel':
msg = []
task_ids = set(body['task_ids'])
for worker in self.pool.workers:
task = worker.current_task
if task and task['uuid'] in task_ids:
logger.warn(f'Sending SIGTERM to task id={task["uuid"]}, task={task.get("task")}, args={task.get("args")}')
os.kill(worker.pid, signal.SIGTERM)
msg.append(task['uuid'])
if task_ids and not msg:
logger.info(f'Could not locate running tasks to cancel with ids={task_ids}')
with pg_bus_conn() as conn: with pg_bus_conn() as conn:
conn.notify(reply_queue, json.dumps(msg)) conn.notify(reply_queue, json.dumps(msg))
@@ -93,9 +81,6 @@ class AWXConsumerBase(object):
logger.error('unrecognized control message: {}'.format(control)) logger.error('unrecognized control message: {}'.format(control))
def process_task(self, body): def process_task(self, body):
if isinstance(body, dict):
body['time_ack'] = time.time()
if 'control' in body: if 'control' in body:
try: try:
return self.control(body) return self.control(body)
@@ -114,8 +99,8 @@ class AWXConsumerBase(object):
queue = 0 queue = 0
self.pool.write(queue, body) self.pool.write(queue, body)
self.total_messages += 1 self.total_messages += 1
self.record_statistics()
@log_excess_runtime(logger)
def record_statistics(self): def record_statistics(self):
if time.time() - self.last_stats > 1: # buffer stat recording to once per second if time.time() - self.last_stats > 1: # buffer stat recording to once per second
try: try:
@@ -155,16 +140,6 @@ class AWXConsumerPG(AWXConsumerBase):
# if no successful loops have ran since startup, then we should fail right away # if no successful loops have ran since startup, then we should fail right away
self.pg_is_down = True # set so that we fail if we get database errors on startup self.pg_is_down = True # set so that we fail if we get database errors on startup
self.pg_down_time = time.time() - self.pg_max_wait # allow no grace period self.pg_down_time = time.time() - self.pg_max_wait # allow no grace period
self.last_cleanup = time.time()
def run_periodic_tasks(self):
self.record_statistics() # maintains time buffer in method
if time.time() - self.last_cleanup > 60: # same as cluster_node_heartbeat
# NOTE: if we run out of database connections, it is important to still run cleanup
# so that we scale down workers and free up connections
self.pool.cleanup()
self.last_cleanup = time.time()
def run(self, *args, **kwargs): def run(self, *args, **kwargs):
super(AWXConsumerPG, self).run(*args, **kwargs) super(AWXConsumerPG, self).run(*args, **kwargs)
@@ -174,16 +149,14 @@ class AWXConsumerPG(AWXConsumerBase):
while True: while True:
try: try:
with pg_bus_conn(new_connection=True) as conn: with pg_bus_conn() as conn:
for queue in self.queues: for queue in self.queues:
conn.listen(queue) conn.listen(queue)
if init is False: if init is False:
self.worker.on_start() self.worker.on_start()
init = True init = True
for e in conn.events(yield_timeouts=True): for e in conn.events():
if e is not None: self.process_task(json.loads(e.payload))
self.process_task(json.loads(e.payload))
self.run_periodic_tasks()
self.pg_is_down = False self.pg_is_down = False
if self.should_stop: if self.should_stop:
return return
@@ -240,8 +213,6 @@ class BaseWorker(object):
# so we can establish a new connection # so we can establish a new connection
conn.close_if_unusable_or_obsolete() conn.close_if_unusable_or_obsolete()
self.perform_work(body, *args) self.perform_work(body, *args)
except Exception:
logger.exception(f'Unhandled exception in perform_work in worker pid={os.getpid()}')
finally: finally:
if 'uuid' in body: if 'uuid' in body:
uuid = body['uuid'] uuid = body['uuid']

View File

@@ -167,27 +167,17 @@ class CallbackBrokerWorker(BaseWorker):
try: try:
cls.objects.bulk_create(events) cls.objects.bulk_create(events)
metrics_bulk_events_saved += len(events) metrics_bulk_events_saved += len(events)
except Exception as exc: except Exception:
logger.warning(f'Error in events bulk_create, will try indiviually up to 5 errors, error {str(exc)}')
# if an exception occurs, we should re-attempt to save the # if an exception occurs, we should re-attempt to save the
# events one-by-one, because something in the list is # events one-by-one, because something in the list is
# broken/stale # broken/stale
consecutive_errors = 0
events_saved = 0
metrics_events_batch_save_errors += 1 metrics_events_batch_save_errors += 1
for e in events: for e in events:
try: try:
e.save() e.save()
events_saved += 1 metrics_singular_events_saved += 1
consecutive_errors = 0 except Exception:
except Exception as exc_indv: logger.exception('Database Error Saving Job Event')
consecutive_errors += 1
logger.info(f'Database Error Saving individual Job Event, error {str(exc_indv)}')
if consecutive_errors >= 5:
raise
metrics_singular_events_saved += events_saved
if events_saved == 0:
raise
metrics_duration_to_save = time.perf_counter() - metrics_duration_to_save metrics_duration_to_save = time.perf_counter() - metrics_duration_to_save
for e in events: for e in events:
if not getattr(e, '_skip_websocket_message', False): if not getattr(e, '_skip_websocket_message', False):
@@ -267,18 +257,17 @@ class CallbackBrokerWorker(BaseWorker):
try: try:
self.flush(force=flush) self.flush(force=flush)
break break
except (OperationalError, InterfaceError, InternalError) as exc: except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES: if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.') logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
return return
delay = 60 * retries delay = 60 * retries
logger.warning(f'Database Error Flushing Job Events, retry #{retries + 1} in {delay} seconds: {str(exc)}') logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(i=retries + 1, delay=delay))
django_connection.close() django_connection.close()
time.sleep(delay) time.sleep(delay)
retries += 1 retries += 1
except DatabaseError: except DatabaseError:
logger.exception('Database Error Flushing Job Events') logger.exception('Database Error Saving Job Event')
django_connection.close()
break break
except Exception as exc: except Exception as exc:
tb = traceback.format_exc() tb = traceback.format_exc()

View File

@@ -3,7 +3,6 @@ import logging
import importlib import importlib
import sys import sys
import traceback import traceback
import time
from kubernetes.config import kube_config from kubernetes.config import kube_config
@@ -61,19 +60,8 @@ class TaskWorker(BaseWorker):
# the callable is a class, e.g., RunJob; instantiate and # the callable is a class, e.g., RunJob; instantiate and
# return its `run()` method # return its `run()` method
_call = _call().run _call = _call().run
log_extra = ''
logger_method = logger.debug
if ('time_ack' in body) and ('time_pub' in body):
time_publish = body['time_ack'] - body['time_pub']
time_waiting = time.time() - body['time_ack']
if time_waiting > 5.0 or time_publish > 5.0:
# If task too a very long time to process, add this information to the log
log_extra = f' took {time_publish:.4f} to ack, {time_waiting:.4f} in local dispatcher'
logger_method = logger.info
# don't print kwargs, they often contain launch-time secrets # don't print kwargs, they often contain launch-time secrets
logger_method(f'task {uuid} starting {task}(*{args}){log_extra}') logger.debug('task {} starting {}(*{})'.format(uuid, task, args))
return _call(*args, **kwargs) return _call(*args, **kwargs)
def perform_work(self, body): def perform_work(self, body):

View File

@@ -25,7 +25,7 @@ class Command(BaseCommand):
with connection.cursor() as cursor: with connection.cursor() as cursor:
cursor.execute( cursor.execute(
f''' f'''
SELECT SELECT
b.id, b.job_id, b.host_name, b.created - a.created delta, b.id, b.job_id, b.host_name, b.created - a.created delta,
b.task task, b.task task,
b.event_data::json->'task_action' task_action, b.event_data::json->'task_action' task_action,

View File

@@ -862,7 +862,7 @@ class Command(BaseCommand):
overwrite_vars=bool(options.get('overwrite_vars', False)), overwrite_vars=bool(options.get('overwrite_vars', False)),
) )
inventory_update = inventory_source.create_inventory_update( inventory_update = inventory_source.create_inventory_update(
_eager_fields=dict(status='running', job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd()) _eager_fields=dict(job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
) )
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load() data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()

View File

@@ -54,7 +54,7 @@ class Command(BaseCommand):
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else '' capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
version = f" version={x.version or '?'}" if x.node_type != 'hop' else '' version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else '' heartbeat = f' heartbeat="{x.modified:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m') print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m')
print() print()

View File

@@ -38,14 +38,7 @@ class Command(BaseCommand):
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register() RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
RegisterQueue( RegisterQueue(
settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE
100,
0,
[],
is_container_group=True,
pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE,
max_forks=settings.DEFAULT_EXECUTION_QUEUE_MAX_FORKS,
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
).register() ).register()
else: else:
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid)

View File

@@ -32,14 +32,8 @@ class Command(BaseCommand):
def handle(self, **options): def handle(self, **options):
self.old_key = settings.SECRET_KEY self.old_key = settings.SECRET_KEY
custom_key = os.environ.get("TOWER_SECRET_KEY") custom_key = os.environ.get("TOWER_SECRET_KEY")
if options.get("use_custom_key"): if options.get("use_custom_key") and custom_key:
if custom_key: self.new_key = custom_key
self.new_key = custom_key
else:
print("Use custom key was specified but the env var TOWER_SECRET_KEY was not available")
import sys
sys.exit(1)
else: else:
self.new_key = base64.encodebytes(os.urandom(33)).decode().rstrip() self.new_key = base64.encodebytes(os.urandom(33)).decode().rstrip()
self._notification_templates() self._notification_templates()

View File

@@ -27,9 +27,7 @@ class Command(BaseCommand):
) )
def handle(self, **options): def handle(self, **options):
# provides a mapping of hostname to Instance objects
nodes = Instance.objects.in_bulk(field_name='hostname') nodes = Instance.objects.in_bulk(field_name='hostname')
if options['source'] not in nodes: if options['source'] not in nodes:
raise CommandError(f"Host {options['source']} is not a registered instance.") raise CommandError(f"Host {options['source']} is not a registered instance.")
if not (options['peers'] or options['disconnect'] or options['exact'] is not None): if not (options['peers'] or options['disconnect'] or options['exact'] is not None):
@@ -59,9 +57,7 @@ class Command(BaseCommand):
results = 0 results = 0
for target in options['peers']: for target in options['peers']:
_, created = InstanceLink.objects.update_or_create( _, created = InstanceLink.objects.get_or_create(source=nodes[options['source']], target=nodes[target])
source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
)
if created: if created:
results += 1 results += 1
@@ -84,9 +80,7 @@ class Command(BaseCommand):
links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__hostname', flat=True)) links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__hostname', flat=True))
removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__hostname__in=links - peers).delete() removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__hostname__in=links - peers).delete()
for target in peers - links: for target in peers - links:
_, created = InstanceLink.objects.update_or_create( _, created = InstanceLink.objects.get_or_create(source=nodes[options['source']], target=nodes[target])
source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
)
if created: if created:
additions += 1 additions += 1

View File

@@ -17,9 +17,7 @@ class InstanceNotFound(Exception):
class RegisterQueue: class RegisterQueue:
def __init__( def __init__(self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None, pod_spec_override=None):
self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None, pod_spec_override=None, max_forks=None, max_concurrent_jobs=None
):
self.instance_not_found_err = None self.instance_not_found_err = None
self.queuename = queuename self.queuename = queuename
self.instance_percent = instance_percent self.instance_percent = instance_percent
@@ -27,8 +25,6 @@ class RegisterQueue:
self.hostname_list = hostname_list self.hostname_list = hostname_list
self.is_container_group = is_container_group self.is_container_group = is_container_group
self.pod_spec_override = pod_spec_override self.pod_spec_override = pod_spec_override
self.max_forks = max_forks
self.max_concurrent_jobs = max_concurrent_jobs
def get_create_update_instance_group(self): def get_create_update_instance_group(self):
created = False created = False
@@ -49,14 +45,6 @@ class RegisterQueue:
ig.pod_spec_override = self.pod_spec_override ig.pod_spec_override = self.pod_spec_override
changed = True changed = True
if self.max_forks and (ig.max_forks != self.max_forks):
ig.max_forks = self.max_forks
changed = True
if self.max_concurrent_jobs and (ig.max_concurrent_jobs != self.max_concurrent_jobs):
ig.max_concurrent_jobs = self.max_concurrent_jobs
changed = True
if changed: if changed:
ig.save() ig.save()

View File

@@ -1,14 +1,13 @@
# Copyright (c) 2015 Ansible, Inc. # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved. # All Rights Reserved.
import logging import logging
import yaml
from django.conf import settings from django.conf import settings
from django.core.cache import cache as django_cache from django.core.cache import cache as django_cache
from django.core.management.base import BaseCommand from django.core.management.base import BaseCommand
from django.db import connection as django_connection from django.db import connection as django_connection
from awx.main.dispatch import get_local_queuename from awx.main.dispatch import get_local_queuename, reaper
from awx.main.dispatch.control import Control from awx.main.dispatch.control import Control
from awx.main.dispatch.pool import AutoscalePool from awx.main.dispatch.pool import AutoscalePool
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
@@ -31,16 +30,7 @@ class Command(BaseCommand):
'--reload', '--reload',
dest='reload', dest='reload',
action='store_true', action='store_true',
help=('cause the dispatcher to recycle all of its worker processes; running jobs will run to completion first'), help=('cause the dispatcher to recycle all of its worker processes;' 'running jobs will run to completion first'),
)
parser.add_argument(
'--cancel',
dest='cancel',
help=(
'Cancel a particular task id. Takes either a single id string, or a JSON list of multiple ids. '
'Can take in output from the --running argument as input to cancel all tasks. '
'Only running tasks can be canceled, queued tasks must be started before they can be canceled.'
),
) )
def handle(self, *arg, **options): def handle(self, *arg, **options):
@@ -52,16 +42,6 @@ class Command(BaseCommand):
return return
if options.get('reload'): if options.get('reload'):
return Control('dispatcher').control({'control': 'reload'}) return Control('dispatcher').control({'control': 'reload'})
if options.get('cancel'):
cancel_str = options.get('cancel')
try:
cancel_data = yaml.safe_load(cancel_str)
except Exception:
cancel_data = [cancel_str]
if not isinstance(cancel_data, list):
cancel_data = [cancel_str]
print(Control('dispatcher').cancel(cancel_data))
return
# It's important to close these because we're _about_ to fork, and we # It's important to close these because we're _about_ to fork, and we
# don't want the forked processes to inherit the open sockets # don't want the forked processes to inherit the open sockets
@@ -73,6 +53,7 @@ class Command(BaseCommand):
# (like the node heartbeat) # (like the node heartbeat)
periodic.run_continuously() periodic.run_continuously()
reaper.startup_reaping()
consumer = None consumer = None
try: try:

View File

@@ -53,7 +53,7 @@ class Command(BaseCommand):
return lines return lines
@classmethod @classmethod
def get_connection_status(cls, hostnames, data): def get_connection_status(cls, me, hostnames, data):
host_stats = [('hostname', 'state', 'start time', 'duration (sec)')] host_stats = [('hostname', 'state', 'start time', 'duration (sec)')]
for h in hostnames: for h in hostnames:
connection_color = '91' # red connection_color = '91' # red
@@ -78,7 +78,7 @@ class Command(BaseCommand):
return host_stats return host_stats
@classmethod @classmethod
def get_connection_stats(cls, hostnames, data): def get_connection_stats(cls, me, hostnames, data):
host_stats = [('hostname', 'total', 'per minute')] host_stats = [('hostname', 'total', 'per minute')]
for h in hostnames: for h in hostnames:
h_safe = safe_name(h) h_safe = safe_name(h)
@@ -95,13 +95,8 @@ class Command(BaseCommand):
# database migrations are still running # database migrations are still running
from awx.main.models.ha import Instance from awx.main.models.ha import Instance
try: executor = MigrationExecutor(connection)
executor = MigrationExecutor(connection) migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
except Exception as exc:
logger.info(f'Error on startup of run_wsbroadcast (error: {exc}), retry in 10s...')
time.sleep(10)
return
# In containerized deployments, migrations happen in the task container, # In containerized deployments, migrations happen in the task container,
# and the services running there don't start until migrations are # and the services running there don't start until migrations are
@@ -119,8 +114,8 @@ class Command(BaseCommand):
return return
try: try:
my_hostname = Instance.objects.my_hostname() me = Instance.objects.me()
logger.info('Active instance with hostname {} is registered.'.format(my_hostname)) logger.info('Active instance with hostname {} is registered.'.format(me.hostname))
except RuntimeError as e: except RuntimeError as e:
# the CLUSTER_HOST_ID in the task, and web instance must match and # the CLUSTER_HOST_ID in the task, and web instance must match and
# ensure network connectivity between the task and web instance # ensure network connectivity between the task and web instance
@@ -145,19 +140,19 @@ class Command(BaseCommand):
else: else:
data[family.name] = family.samples[0].value data[family.name] = family.samples[0].value
my_hostname = Instance.objects.my_hostname() me = Instance.objects.me()
hostnames = [i.hostname for i in Instance.objects.exclude(hostname=my_hostname)] hostnames = [i.hostname for i in Instance.objects.exclude(hostname=me.hostname)]
host_stats = Command.get_connection_status(hostnames, data) host_stats = Command.get_connection_status(me, hostnames, data)
lines = Command._format_lines(host_stats) lines = Command._format_lines(host_stats)
print(f'Broadcast websocket connection status from "{my_hostname}" to:') print(f'Broadcast websocket connection status from "{me.hostname}" to:')
print('\n'.join(lines)) print('\n'.join(lines))
host_stats = Command.get_connection_stats(hostnames, data) host_stats = Command.get_connection_stats(me, hostnames, data)
lines = Command._format_lines(host_stats) lines = Command._format_lines(host_stats)
print(f'\nBroadcast websocket connection stats from "{my_hostname}" to:') print(f'\nBroadcast websocket connection stats from "{me.hostname}" to:')
print('\n'.join(lines)) print('\n'.join(lines))
return return

View File

@@ -99,12 +99,9 @@ class InstanceManager(models.Manager):
instance or role. instance or role.
""" """
def my_hostname(self):
return settings.CLUSTER_HOST_ID
def me(self): def me(self):
"""Return the currently active instance.""" """Return the currently active instance."""
node = self.filter(hostname=self.my_hostname()) node = self.filter(hostname=settings.CLUSTER_HOST_ID)
if node.exists(): if node.exists():
return node[0] return node[0]
raise RuntimeError("No instance found with the current cluster host id") raise RuntimeError("No instance found with the current cluster host id")
@@ -132,13 +129,10 @@ class InstanceManager(models.Manager):
# if instance was not retrieved by uuid and hostname was, use the hostname # if instance was not retrieved by uuid and hostname was, use the hostname
instance = self.filter(hostname=hostname) instance = self.filter(hostname=hostname)
from awx.main.models import Instance
# Return existing instance # Return existing instance
if instance.exists(): if instance.exists():
instance = instance.first() # in the unusual occasion that there is more than one, only get one instance = instance.first() # in the unusual occasion that there is more than one, only get one
instance.node_state = Instance.States.INSTALLED # Wait for it to show up on the mesh update_fields = []
update_fields = ['node_state']
# if instance was retrieved by uuid and hostname has changed, update hostname # if instance was retrieved by uuid and hostname has changed, update hostname
if instance.hostname != hostname: if instance.hostname != hostname:
logger.warning("passed in hostname {0} is different from the original hostname {1}, updating to {0}".format(hostname, instance.hostname)) logger.warning("passed in hostname {0} is different from the original hostname {1}, updating to {0}".format(hostname, instance.hostname))
@@ -147,7 +141,6 @@ class InstanceManager(models.Manager):
# if any other fields are to be updated # if any other fields are to be updated
if instance.ip_address != ip_address: if instance.ip_address != ip_address:
instance.ip_address = ip_address instance.ip_address = ip_address
update_fields.append('ip_address')
if instance.node_type != node_type: if instance.node_type != node_type:
instance.node_type = node_type instance.node_type = node_type
update_fields.append('node_type') update_fields.append('node_type')
@@ -158,16 +151,12 @@ class InstanceManager(models.Manager):
return (False, instance) return (False, instance)
# Create new instance, and fill in default values # Create new instance, and fill in default values
create_defaults = { create_defaults = dict(capacity=0)
'node_state': Instance.States.INSTALLED,
'capacity': 0,
'listener_port': 27199,
}
if defaults is not None: if defaults is not None:
create_defaults.update(defaults) create_defaults.update(defaults)
uuid_option = {} uuid_option = {}
if uuid is not None: if uuid is not None:
uuid_option = {'uuid': uuid} uuid_option = dict(uuid=uuid)
if node_type == 'execution' and 'version' not in create_defaults: if node_type == 'execution' and 'version' not in create_defaults:
create_defaults['version'] = RECEPTOR_PENDING create_defaults['version'] = RECEPTOR_PENDING
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option) instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)

View File

@@ -1,14 +1,24 @@
# Generated by Django 3.2.13 on 2022-06-21 21:29 # Generated by Django 3.2.13 on 2022-06-21 21:29
from django.db import migrations from django.db import migrations
import logging
logger = logging.getLogger("awx")
def forwards(apps, schema_editor): def forwards(apps, schema_editor):
InventorySource = apps.get_model('main', 'InventorySource') InventorySource = apps.get_model('main', 'InventorySource')
InventorySource.objects.filter(update_on_project_update=True).update(update_on_launch=True) sources = InventorySource.objects.filter(update_on_project_update=True)
for src in sources:
Project = apps.get_model('main', 'Project') if src.update_on_launch == False:
Project.objects.filter(scm_inventory_sources__update_on_project_update=True).update(scm_update_on_launch=True) src.update_on_launch = True
src.save(update_fields=['update_on_launch'])
logger.info(f"Setting update_on_launch to True for {src}")
proj = src.source_project
if proj and proj.scm_update_on_launch is False:
proj.scm_update_on_launch = True
proj.save(update_fields=['scm_update_on_launch'])
logger.warning(f"Setting scm_update_on_launch to True for {proj}")
class Migration(migrations.Migration): class Migration(migrations.Migration):

View File

@@ -1,35 +0,0 @@
# Generated by Django 3.2.13 on 2022-08-10 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0164_remove_inventorysource_update_on_project_update'),
]
operations = [
migrations.AddField(
model_name='unifiedjob',
name='preferred_instance_groups_cache',
field=models.JSONField(
blank=True, default=None, editable=False, help_text='A cached list with pk values from preferred instance groups.', null=True
),
),
migrations.AddField(
model_name='unifiedjob',
name='task_impact',
field=models.PositiveIntegerField(default=0, editable=False, help_text='Number of forks an instance consumes when running this job.'),
),
migrations.AddField(
model_name='workflowapproval',
name='expires',
field=models.DateTimeField(
default=None,
editable=False,
help_text='The time this approval will expire. This is the created time plus timeout, used for filtering.',
null=True,
),
),
]

View File

@@ -1,40 +0,0 @@
# Generated by Django 3.2.13 on 2022-07-06 13:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0165_task_manager_refactor'),
]
operations = [
migrations.AlterField(
model_name='adhoccommandevent',
name='host',
field=models.ForeignKey(
db_constraint=False,
default=None,
editable=False,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name='ad_hoc_command_events',
to='main.host',
),
),
migrations.AlterField(
model_name='jobevent',
name='host',
field=models.ForeignKey(
db_constraint=False,
default=None,
editable=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name='job_events_as_primary_host',
to='main.host',
),
),
]

View File

@@ -1,57 +0,0 @@
# Generated by Django 3.2.13 on 2022-08-24 14:02
from django.db import migrations, models
import django.db.models.deletion
from awx.main.models import CredentialType
from awx.main.utils.common import set_current_apps
def setup_tower_managed_defaults(apps, schema_editor):
set_current_apps(apps)
CredentialType.setup_tower_managed_defaults(apps)
class Migration(migrations.Migration):
dependencies = [
('main', '0166_alter_jobevent_host'),
]
operations = [
migrations.AddField(
model_name='project',
name='signature_validation_credential',
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name='projects_signature_validation',
to='main.credential',
help_text='An optional credential used for validating files in the project against unexpected changes.',
),
),
migrations.AlterField(
model_name='credentialtype',
name='kind',
field=models.CharField(
choices=[
('ssh', 'Machine'),
('vault', 'Vault'),
('net', 'Network'),
('scm', 'Source Control'),
('cloud', 'Cloud'),
('registry', 'Container Registry'),
('token', 'Personal Access Token'),
('insights', 'Insights'),
('external', 'External'),
('kubernetes', 'Kubernetes'),
('galaxy', 'Galaxy/Automation Hub'),
('cryptography', 'Cryptography'),
],
max_length=32,
),
),
migrations.RunPython(setup_tower_managed_defaults),
]

View File

@@ -1,25 +0,0 @@
# Generated by Django 3.2.13 on 2022-09-08 16:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0167_project_signature_validation_credential'),
]
operations = [
migrations.AddField(
model_name='inventoryupdate',
name='scm_revision',
field=models.CharField(
blank=True,
default='',
editable=False,
help_text='The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm',
max_length=1024,
verbose_name='SCM Revision',
),
),
]

View File

@@ -1,225 +0,0 @@
# Generated by Django 3.2.13 on 2022-09-15 14:07
import awx.main.fields
import awx.main.utils.polymorphic
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0168_inventoryupdate_scm_revision'),
]
operations = [
migrations.AddField(
model_name='joblaunchconfig',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='joblaunchconfig_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='joblaunchconfig',
name='labels',
field=models.ManyToManyField(related_name='joblaunchconfig_labels', to='main.Label'),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_execution_environment_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_forks_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_instance_groups_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_job_slice_count_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_labels_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_timeout_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='schedule',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='schedule_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='schedule',
name='labels',
field=models.ManyToManyField(related_name='schedule_labels', to='main.Label'),
),
migrations.AddField(
model_name='workflowjobnode',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='workflowjobnode_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='workflowjobnode',
name='labels',
field=models.ManyToManyField(related_name='workflowjobnode_labels', to='main.Label'),
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_labels_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_skip_tags_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_tags_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='workflowjobtemplatenode_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='labels',
field=models.ManyToManyField(related_name='workflowjobtemplatenode_labels', to='main.Label'),
),
migrations.CreateModel(
name='WorkflowJobTemplateNodeBaseInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('workflowjobtemplatenode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjobtemplatenode')),
],
),
migrations.CreateModel(
name='WorkflowJobNodeBaseInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('workflowjobnode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjobnode')),
],
),
migrations.CreateModel(
name='WorkflowJobInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('workflowjobnode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjob')),
],
),
migrations.CreateModel(
name='ScheduleInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.schedule')),
],
),
migrations.CreateModel(
name='JobLaunchConfigInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('joblaunchconfig', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.joblaunchconfig')),
],
),
migrations.AddField(
model_name='joblaunchconfig',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True, editable=False, related_name='joblaunchconfigs', through='main.JobLaunchConfigInstanceGroupMembership', to='main.InstanceGroup'
),
),
migrations.AddField(
model_name='schedule',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True, editable=False, related_name='schedule_instance_groups', through='main.ScheduleInstanceGroupMembership', to='main.InstanceGroup'
),
),
migrations.AddField(
model_name='workflowjob',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True,
editable=False,
related_name='workflow_job_instance_groups',
through='main.WorkflowJobInstanceGroupMembership',
to='main.InstanceGroup',
),
),
migrations.AddField(
model_name='workflowjobnode',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True,
editable=False,
related_name='workflow_job_node_instance_groups',
through='main.WorkflowJobNodeBaseInstanceGroupMembership',
to='main.InstanceGroup',
),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True,
editable=False,
related_name='workflow_job_template_node_instance_groups',
through='main.WorkflowJobTemplateNodeBaseInstanceGroupMembership',
to='main.InstanceGroup',
),
),
]

View File

@@ -1,79 +0,0 @@
# Generated by Django 3.2.13 on 2022-08-02 17:53
import django.core.validators
from django.db import migrations, models
def forwards(apps, schema_editor):
# All existing InstanceLink objects need to be in the state
# 'Established', which is the default, so nothing needs to be done
# for that.
Instance = apps.get_model('main', 'Instance')
for instance in Instance.objects.all():
instance.node_state = 'ready' if not instance.errors else 'unavailable'
instance.save(update_fields=['node_state'])
class Migration(migrations.Migration):
dependencies = [
('main', '0169_jt_prompt_everything_on_launch'),
]
operations = [
migrations.AddField(
model_name='instance',
name='listener_port',
field=models.PositiveIntegerField(
blank=True,
default=27199,
help_text='Port that Receptor will listen for incoming connections on.',
validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(65535)],
),
),
migrations.AddField(
model_name='instance',
name='node_state',
field=models.CharField(
choices=[
('provisioning', 'Provisioning'),
('provision-fail', 'Provisioning Failure'),
('installed', 'Installed'),
('ready', 'Ready'),
('unavailable', 'Unavailable'),
('deprovisioning', 'De-provisioning'),
('deprovision-fail', 'De-provisioning Failure'),
],
default='ready',
help_text='Indicates the current life cycle stage of this instance.',
max_length=16,
),
),
migrations.AddField(
model_name='instancelink',
name='link_state',
field=models.CharField(
choices=[('adding', 'Adding'), ('established', 'Established'), ('removing', 'Removing')],
default='established',
help_text='Indicates the current life cycle stage of this peer link.',
max_length=16,
),
),
migrations.AlterField(
model_name='instance',
name='node_type',
field=models.CharField(
choices=[
('control', 'Control plane node'),
('execution', 'Execution plane node'),
('hybrid', 'Controller and execution'),
('hop', 'Message-passing node, no execution capability'),
],
default='hybrid',
help_text='Role that this node plays in the mesh.',
max_length=16,
),
),
migrations.RunPython(forwards, reverse_code=migrations.RunPython.noop),
]

View File

@@ -1,18 +0,0 @@
# Generated by Django 3.2.13 on 2022-09-26 20:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0170_node_and_link_state'),
]
operations = [
migrations.AddField(
model_name='instance',
name='health_check_started',
field=models.DateTimeField(editable=False, help_text='The last time a health check was initiated on this instance.', null=True),
),
]

View File

@@ -1,29 +0,0 @@
# Generated by Django 3.2.13 on 2022-09-29 18:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0171_add_health_check_started'),
]
operations = [
migrations.AddField(
model_name='inventory',
name='prevent_instance_group_fallback',
field=models.BooleanField(
default=False,
help_text='If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied.',
),
),
migrations.AddField(
model_name='jobtemplate',
name='prevent_instance_group_fallback',
field=models.BooleanField(
default=False,
help_text='If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied.',
),
),
]

View File

@@ -1,23 +0,0 @@
# Generated by Django 3.2.13 on 2022-10-24 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0172_prevent_instance_fallback'),
]
operations = [
migrations.AddField(
model_name='instancegroup',
name='max_concurrent_jobs',
field=models.IntegerField(default=0, help_text='Maximum number of concurrent jobs to run on this group. Zero means no limit.'),
),
migrations.AddField(
model_name='instancegroup',
name='max_forks',
field=models.IntegerField(default=0, help_text='Max forks to execute on this group. Zero means no limit.'),
),
]

View File

@@ -1,18 +0,0 @@
# Generated by Django 3.2.16 on 2022-12-07 21:11
from django.db import migrations
from awx.main.migrations import _rbac as rbac
from awx.main.migrations import _migration_utils as migration_utils
class Migration(migrations.Migration):
dependencies = [
('main', '0173_instancegroup_max_limits'),
]
operations = [
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
migrations.RunPython(rbac.create_roles),
]

View File

@@ -4,7 +4,7 @@ from django.utils.timezone import now
logger = logging.getLogger('awx.main.migrations') logger = logging.getLogger('awx.main.migrations')
__all__ = ['create_clearsessions_jt', 'create_cleartokens_jt'] __all__ = ['create_collection_jt', 'create_clearsessions_jt', 'create_cleartokens_jt']
''' '''
These methods are called by migrations to create various system job templates These methods are called by migrations to create various system job templates
@@ -36,7 +36,7 @@ def create_clearsessions_jt(apps, schema_editor):
if created: if created:
sched = Schedule( sched = Schedule(
name='Cleanup Expired Sessions', name='Cleanup Expired Sessions',
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1' % schedule_time, rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;COUNT=1' % schedule_time,
description='Cleans out expired browser sessions', description='Cleans out expired browser sessions',
enabled=True, enabled=True,
created=now_dt, created=now_dt,
@@ -69,7 +69,7 @@ def create_cleartokens_jt(apps, schema_editor):
if created: if created:
sched = Schedule( sched = Schedule(
name='Cleanup Expired OAuth 2 Tokens', name='Cleanup Expired OAuth 2 Tokens',
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1' % schedule_time, rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;COUNT=1' % schedule_time,
description='Removes expired OAuth 2 access and refresh tokens', description='Removes expired OAuth 2 access and refresh tokens',
enabled=True, enabled=True,
created=now_dt, created=now_dt,

View File

@@ -44,7 +44,7 @@ def migrate_galaxy_settings(apps, schema_editor):
credential_type=galaxy_type, credential_type=galaxy_type,
inputs={'url': 'https://galaxy.ansible.com/'}, inputs={'url': 'https://galaxy.ansible.com/'},
) )
except Exception: except:
# Needed for new migrations, tests # Needed for new migrations, tests
public_galaxy_credential = Credential( public_galaxy_credential = Credential(
created=now(), modified=now(), name='Ansible Galaxy', managed=True, credential_type=galaxy_type, inputs={'url': 'https://galaxy.ansible.com/'} created=now(), modified=now(), name='Ansible Galaxy', managed=True, credential_type=galaxy_type, inputs={'url': 'https://galaxy.ansible.com/'}

View File

@@ -90,9 +90,6 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
extra_vars_dict = VarsDictProperty('extra_vars', True) extra_vars_dict = VarsDictProperty('extra_vars', True)
def _set_default_dependencies_processed(self):
self.dependencies_processed = True
def clean_inventory(self): def clean_inventory(self):
inv = self.inventory inv = self.inventory
if not inv: if not inv:
@@ -181,12 +178,12 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
def get_passwords_needed_to_start(self): def get_passwords_needed_to_start(self):
return self.passwords_needed_to_start return self.passwords_needed_to_start
def _get_task_impact(self): @property
def task_impact(self):
# NOTE: We sorta have to assume the host count matches and that forks default to 5 # NOTE: We sorta have to assume the host count matches and that forks default to 5
if self.inventory: from awx.main.models.inventory import Host
count_hosts = self.inventory.total_hosts
else: count_hosts = Host.objects.filter(enabled=True, inventory__ad_hoc_commands__pk=self.pk).count()
count_hosts = 5
return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1 return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1
def copy(self): def copy(self):
@@ -210,32 +207,23 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
def save(self, *args, **kwargs): def save(self, *args, **kwargs):
update_fields = kwargs.get('update_fields', []) update_fields = kwargs.get('update_fields', [])
def add_to_update_fields(name):
if name not in update_fields:
update_fields.append(name)
if not self.preferred_instance_groups_cache:
self.preferred_instance_groups_cache = self._get_preferred_instance_group_cache()
add_to_update_fields("preferred_instance_groups_cache")
if not self.name: if not self.name:
self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512) self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)
add_to_update_fields("name") if 'name' not in update_fields:
if self.task_impact == 0: update_fields.append('name')
self.task_impact = self._get_task_impact()
add_to_update_fields("task_impact")
super(AdHocCommand, self).save(*args, **kwargs) super(AdHocCommand, self).save(*args, **kwargs)
@property @property
def preferred_instance_groups(self): def preferred_instance_groups(self):
selected_groups = [] if self.inventory is not None and self.inventory.organization is not None:
organization_groups = [x for x in self.inventory.organization.instance_groups.all()]
else:
organization_groups = []
if self.inventory is not None: if self.inventory is not None:
for instance_group in self.inventory.instance_groups.all(): inventory_groups = [x for x in self.inventory.instance_groups.all()]
selected_groups.append(instance_group) else:
if not self.inventory.prevent_instance_group_fallback and self.inventory.organization is not None: inventory_groups = []
for instance_group in self.inventory.organization.instance_groups.all(): selected_groups = inventory_groups + organization_groups
selected_groups.append(instance_group)
if not selected_groups: if not selected_groups:
return self.global_instance_groups return self.global_instance_groups
return selected_groups return selected_groups

View File

@@ -316,17 +316,16 @@ class PrimordialModel(HasEditsMixin, CreatedModifiedModel):
user = get_current_user() user = get_current_user()
if user and not user.id: if user and not user.id:
user = None user = None
if (not self.pk) and (user is not None) and (not self.created_by): if not self.pk and not self.created_by:
self.created_by = user self.created_by = user
if 'created_by' not in update_fields: if 'created_by' not in update_fields:
update_fields.append('created_by') update_fields.append('created_by')
# Update modified_by if any editable fields have changed # Update modified_by if any editable fields have changed
new_values = self._get_fields_snapshot() new_values = self._get_fields_snapshot()
if (not self.pk and not self.modified_by) or self._values_have_edits(new_values): if (not self.pk and not self.modified_by) or self._values_have_edits(new_values):
if self.modified_by != user: self.modified_by = user
self.modified_by = user if 'modified_by' not in update_fields:
if 'modified_by' not in update_fields: update_fields.append('modified_by')
update_fields.append('modified_by')
super(PrimordialModel, self).save(*args, **kwargs) super(PrimordialModel, self).save(*args, **kwargs)
self._prior_values_store = new_values self._prior_values_store = new_values

View File

@@ -282,7 +282,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
return field['default'] return field['default']
if 'default' in kwargs: if 'default' in kwargs:
return kwargs['default'] return kwargs['default']
raise AttributeError(field_name) raise AttributeError
if field_name in self.inputs: if field_name in self.inputs:
return self.inputs[field_name] return self.inputs[field_name]
if 'default' in kwargs: if 'default' in kwargs:
@@ -336,7 +336,6 @@ class CredentialType(CommonModelNameNotUnique):
('external', _('External')), ('external', _('External')),
('kubernetes', _('Kubernetes')), ('kubernetes', _('Kubernetes')),
('galaxy', _('Galaxy/Automation Hub')), ('galaxy', _('Galaxy/Automation Hub')),
('cryptography', _('Cryptography')),
) )
kind = models.CharField(max_length=32, choices=KIND_CHOICES) kind = models.CharField(max_length=32, choices=KIND_CHOICES)
@@ -1172,25 +1171,6 @@ ManagedCredentialType(
}, },
) )
ManagedCredentialType(
namespace='gpg_public_key',
kind='cryptography',
name=gettext_noop('GPG Public Key'),
inputs={
'fields': [
{
'id': 'gpg_public_key',
'label': gettext_noop('GPG Public Key'),
'type': 'string',
'secret': True,
'multiline': True,
'help_text': gettext_noop('GPG Public Key used to validate content signatures.'),
},
],
'required': ['gpg_public_key'],
},
)
class CredentialInputSource(PrimordialModel): class CredentialInputSource(PrimordialModel):
class Meta: class Meta:

View File

@@ -15,7 +15,6 @@ def aws(cred, env, private_data_dir):
if cred.has_input('security_token'): if cred.has_input('security_token'):
env['AWS_SECURITY_TOKEN'] = cred.get_input('security_token', default='') env['AWS_SECURITY_TOKEN'] = cred.get_input('security_token', default='')
env['AWS_SESSION_TOKEN'] = env['AWS_SECURITY_TOKEN']
def gce(cred, env, private_data_dir): def gce(cred, env, private_data_dir):

Some files were not shown because too many files have changed in this diff Show More