Compare commits

..

1 Commits

Author SHA1 Message Date
Jeff Bradberry
6d0a3149f1 Create and register page types for the new RBAC endpoints 2024-06-14 14:48:05 -04:00
136 changed files with 1449 additions and 2680 deletions

View File

@@ -24,7 +24,7 @@ runs:
- name: Pre-pull latest devel image to warm cache
shell: bash
run: docker pull -q ghcr.io/${OWNER_LC}/awx_devel:${{ github.base_ref }}
run: docker pull ghcr.io/${OWNER_LC}/awx_devel:${{ github.base_ref }}
- name: Build image for current source checkout
shell: bash

View File

@@ -57,6 +57,16 @@ runs:
awx-manage update_password --username=admin --password=password
EOSH
- name: Build UI
# This must be a string comparison in composite actions:
# https://github.com/actions/runner/issues/2238
if: ${{ inputs.build-ui == 'true' }}
shell: bash
run: |
docker exec -i tools_awx_1 sh <<-EOSH
make ui-devel
EOSH
- name: Get instance data
id: data
shell: bash

View File

@@ -1,7 +1,7 @@
## General
- For the roundup of all the different mailing lists available from AWX, Ansible, and beyond visit: https://docs.ansible.com/ansible/latest/community/communication.html
- Hello, we think your question is answered in our FAQ. Does this: https://www.ansible.com/products/awx-project/faq cover your question?
- You can find the latest documentation here: https://ansible.readthedocs.io/projects/awx/en/latest/userguide/index.html
- You can find the latest documentation here: https://docs.ansible.com/automation-controller/latest/html/userguide/index.html

View File

@@ -38,9 +38,7 @@ jobs:
- name: ui-test-general
command: make ui-test-general
steps:
- uses: actions/checkout@v4
with:
show-progress: false
- uses: actions/checkout@v3
- name: Build awx_devel image for running checks
uses: ./.github/actions/awx_devel_image
@@ -54,9 +52,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
with:
show-progress: false
- uses: actions/checkout@v3
- uses: ./.github/actions/run_awx_devel
id: awx
@@ -74,15 +70,13 @@ jobs:
DEBUG_OUTPUT_DIR: /tmp/awx_operator_molecule_test
steps:
- name: Checkout awx
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
show-progress: false
path: awx
- name: Checkout awx-operator
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
show-progress: false\
repository: ansible/awx-operator
path: awx-operator
@@ -136,9 +130,7 @@ jobs:
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v4
with:
show-progress: false
- uses: actions/checkout@v3
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
- name: Upgrade ansible-core
@@ -162,9 +154,7 @@ jobs:
- name: r-z0-9
regex: ^[r-z0-9]
steps:
- uses: actions/checkout@v4
with:
show-progress: false
- uses: actions/checkout@v3
- uses: ./.github/actions/run_awx_devel
id: awx
@@ -210,9 +200,7 @@ jobs:
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v4
with:
show-progress: false
- uses: actions/checkout@v3
- name: Upgrade ansible-core
run: python3 -m pip install --upgrade ansible-core

View File

@@ -1,57 +0,0 @@
---
name: django-ansible-base requirements update
on:
workflow_dispatch:
schedule:
- cron: '0 6 * * *' # once an day @ 6 AM
permissions:
pull-requests: write
contents: write
jobs:
dab-pin-newest:
if: (github.repository_owner == 'ansible' && endsWith(github.repository, 'awx')) || github.event_name != 'schedule'
runs-on: ubuntu-latest
steps:
- id: dab-release
name: Get current django-ansible-base release version
uses: pozetroninc/github-action-get-latest-release@2a61c339ea7ef0a336d1daa35ef0cb1418e7676c # v0.8.0
with:
owner: ansible
repo: django-ansible-base
excludes: prerelease, draft
- name: Check out respository code
uses: actions/checkout@v4
- id: dab-pinned
name: Get current django-ansible-base pinned version
run:
echo "version=$(requirements/django-ansible-base-pinned-version.sh)" >> "$GITHUB_OUTPUT"
- name: Update django-ansible-base pinned version to upstream release
run:
requirements/django-ansible-base-pinned-version.sh -s ${{ steps.dab-release.outputs.release }}
- name: Create Pull Request
uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v6
with:
base: devel
branch: bump-django-ansible-base
title: Bump django-ansible-base to ${{ steps.dab-release.outputs.release }}
body: |
##### SUMMARY
Automated .github/workflows/dab-release.yml
django-ansible-base upstream released version == ${{ steps.dab-release.outputs.release }}
requirements_git.txt django-ansible-base pinned version == ${{ steps.dab-pinned.outputs.version }}
##### ISSUE TYPE
- Bug, Docs Fix or other nominal change
##### COMPONENT NAME
- API
commit-message: |
Update django-ansible-base version to ${{ steps.dab-pinned.outputs.version }}
add-paths:
requirements/requirements_git.txt

View File

@@ -2,7 +2,6 @@
name: Build/Push Development Images
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
DOCKER_CACHE: "--no-cache" # using the cache will not rebuild git requirements and other things
on:
workflow_dispatch:
push:
@@ -35,9 +34,7 @@ jobs:
exit 0
if: matrix.build-targets.image-name == 'awx' && !endsWith(github.repository, '/awx')
- uses: actions/checkout@v4
with:
show-progress: false
- uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -62,14 +59,16 @@ jobs:
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Setup node and npm for the new UI build
- name: Setup node and npm
uses: actions/setup-node@v2
with:
node-version: '18'
node-version: '16.13.1'
if: matrix.build-targets.image-name == 'awx'
- name: Prebuild new UI for awx image (to speed up build process)
- name: Prebuild UI for awx image (to speed up build process)
run: |
sudo apt-get install gettext
make ui-release
make ui-next
if: matrix.build-targets.image-name == 'awx'

View File

@@ -8,9 +8,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
with:
show-progress: false
- uses: actions/checkout@v3
- name: install tox
run: pip install tox

View File

@@ -30,10 +30,7 @@ jobs:
timeout-minutes: 20
name: Label Issue - Community
steps:
- uses: actions/checkout@v4
with:
show-progress: false
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- name: Install python requests
run: pip install requests

View File

@@ -29,10 +29,7 @@ jobs:
timeout-minutes: 20
name: Label PR - Community
steps:
- uses: actions/checkout@v4
with:
show-progress: false
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- name: Install python requests
run: pip install requests

View File

@@ -32,9 +32,7 @@ jobs:
echo "TAG_NAME=${{ github.event.release.tag_name }}" >> $GITHUB_ENV
- name: Checkout awx
uses: actions/checkout@v4
with:
show-progress: false
uses: actions/checkout@v3
- name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV

View File

@@ -45,22 +45,19 @@ jobs:
exit 0
- name: Checkout awx
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
show-progress: false
path: awx
- name: Checkout awx-operator
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
show-progress: false
repository: ${{ github.repository_owner }}/awx-operator
path: awx-operator
- name: Checkout awx-logos
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
show-progress: false
repository: ansible/awx-logos
path: awx-logos
@@ -89,14 +86,17 @@ jobs:
run: |
cp ../awx-logos/awx/ui/client/assets/* awx/ui/public/static/media/
- name: Setup node and npm for new UI build
- name: Setup node and npm
uses: actions/setup-node@v2
with:
node-version: '18'
node-version: '16.13.1'
- name: Prebuild new UI for awx image (to speed up build process)
- name: Prebuild UI for awx image (to speed up build process)
working-directory: awx
run: make ui-next
run: |
sudo apt-get install gettext
make ui-release
make ui-next
- name: Set build env variables
run: |
@@ -136,9 +136,9 @@ jobs:
- name: Pulling images for test deployment with awx-operator
# awx operator molecue test expect to kind load image and buildx exports image to registry and not local
run: |
docker pull -q ${AWX_OPERATOR_TEST_IMAGE}
docker pull -q ${AWX_EE_TEST_IMAGE}
docker pull -q ${AWX_TEST_IMAGE}:${AWX_TEST_VERSION}
docker pull ${AWX_OPERATOR_TEST_IMAGE}
docker pull ${AWX_EE_TEST_IMAGE}
docker pull ${AWX_TEST_IMAGE}:${AWX_TEST_VERSION}
- name: Run test deployment with awx-operator
working-directory: awx-operator

View File

@@ -13,9 +13,7 @@ jobs:
steps:
- name: Checkout branch
uses: actions/checkout@v4
with:
show-progress: false
uses: actions/checkout@v3
- name: Update PR Body
env:

View File

@@ -18,9 +18,7 @@ jobs:
packages: write
contents: read
steps:
- uses: actions/checkout@v4
with:
show-progress: false
- uses: actions/checkout@v3
- name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
@@ -36,7 +34,7 @@ jobs:
- name: Pre-pull image to warm build cache
run: |
docker pull -q ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
- name: Build image
run: |

View File

@@ -67,7 +67,7 @@ If you're not using Docker for Mac, or Docker for Windows, you may need, or choo
#### Frontend Development
See [the ansible-ui development documentation](https://github.com/ansible/ansible-ui/blob/main/CONTRIBUTING.md).
See [the ui development documentation](awx/ui/CONTRIBUTING.md).
#### Fork and clone the AWX repo
@@ -121,7 +121,7 @@ If it has someone assigned to it then that person is the person responsible for
**NOTES**
> Issue assignment will only be done for maintainers of the project. If you decide to work on an issue, please feel free to add a comment in the issue to let others know that you are working on it; but know that we will accept the first pull request from whomever is able to fix an issue. Once your PR is accepted we can add you as an assignee to an issue upon request.
> Issue assignment will only be done for maintainers of the project. If you decide to work on an issue, please feel free to add a comment in the issue to let others know that you are working on it; but know that we will accept the first pull request from whomever is able to fix an issue. Once your PR is accepted we can add you as an assignee to an issue upon request.
> If you work in a part of the codebase that is going through active development, your changes may be rejected, or you may be asked to `rebase`. A good idea before starting work is to have a discussion with us in the `#ansible-awx` channel on irc.libera.chat, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
@@ -132,7 +132,7 @@ If it has someone assigned to it then that person is the person responsible for
At this time we do not accept PRs for adding additional language translations as we have an automated process for generating our translations. This is because translations require constant care as new strings are added and changed in the code base. Because of this the .po files are overwritten during every translation release cycle. We also can't support a lot of translations on AWX as its an open source project and each language adds time and cost to maintain. If you would like to see AWX translated into a new language please create an issue and ask others you know to upvote the issue. Our translation team will review the needs of the community and see what they can do around supporting additional language.
If you find an issue with an existing translation, please see the [Reporting Issues](#reporting-issues) section to open an issue and our translation team will work with you on a resolution.
If you find an issue with an existing translation, please see the [Reporting Issues](#reporting-issues) section to open an issue and our translation team will work with you on a resolution.
## Submitting Pull Requests
@@ -161,7 +161,7 @@ Sometimes it might take us a while to fully review your PR. We try to keep the `
When your PR is initially submitted the checks will not be run until a maintainer allows them to be. Once a maintainer has done a quick review of your work the PR will have the linter and unit tests run against them via GitHub Actions, and the status reported in the PR.
## Reporting Issues
We welcome your feedback, and encourage you to file an issue when you run into a problem. But before opening a new issues, we ask that you please view our [Issues guide](./ISSUES.md).
## Getting Help

View File

@@ -63,11 +63,6 @@ DEV_DOCKER_OWNER ?= ansible
DEV_DOCKER_OWNER_LOWER = $(shell echo $(DEV_DOCKER_OWNER) | tr A-Z a-z)
DEV_DOCKER_TAG_BASE ?= ghcr.io/$(DEV_DOCKER_OWNER_LOWER)
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
IMAGE_KUBE_DEV=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
IMAGE_KUBE=$(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG)
# Common command to use for running ansible-playbook
ANSIBLE_PLAYBOOK ?= ansible-playbook -e ansible_python_interpreter=$(PYTHON)
RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
@@ -91,18 +86,6 @@ I18N_FLAG_FILE = .i18n_built
## PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
PLATFORMS ?= linux/amd64,linux/arm64 # linux/ppc64le,linux/s390x
# Set up cache variables for image builds, allowing to control whether cache is used or not, ex:
# DOCKER_CACHE=--no-cache make docker-compose-build
ifeq ($(DOCKER_CACHE),)
DOCKER_DEVEL_CACHE_FLAG=--cache-from=$(DEVEL_IMAGE_NAME)
DOCKER_KUBE_DEV_CACHE_FLAG=--cache-from=$(IMAGE_KUBE_DEV)
DOCKER_KUBE_CACHE_FLAG=--cache-from=$(IMAGE_KUBE)
else
DOCKER_DEVEL_CACHE_FLAG=$(DOCKER_CACHE)
DOCKER_KUBE_DEV_CACHE_FLAG=$(DOCKER_CACHE)
DOCKER_KUBE_CACHE_FLAG=$(DOCKER_CACHE)
endif
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
develop refresh adduser migrate dbchange \
receiver test test_unit test_coverage coverage_html \
@@ -385,7 +368,7 @@ symlink_collection:
ln -s $(shell pwd)/awx_collection $(COLLECTION_INSTALL)
awx_collection_build: $(shell find awx_collection -type f)
$(ANSIBLE_PLAYBOOK) -i localhost, awx_collection/tools/template_galaxy.yml \
ansible-playbook -i localhost, awx_collection/tools/template_galaxy.yml \
-e collection_package=$(COLLECTION_PACKAGE) \
-e collection_namespace=$(COLLECTION_NAMESPACE) \
-e collection_version=$(COLLECTION_VERSION) \
@@ -502,7 +485,13 @@ ui-test-general:
$(NPM_BIN) run --prefix awx/ui pretest
$(NPM_BIN) run --prefix awx/ui/ test-general --runInBand
# NOTE: The make target ui-next is imported from awx/ui_next/Makefile
HEADLESS ?= no
ifeq ($(HEADLESS), yes)
dist/$(SDIST_TAR_FILE):
else
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE) ui-next
endif
$(PYTHON) -m build -s
ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz
@@ -533,10 +522,10 @@ endif
docker-compose-sources: .git/hooks/pre-commit
@if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\
$(ANSIBLE_PLAYBOOK) -i tools/docker-compose/inventory -e minikube_setup=$(MINIKUBE_SETUP) tools/docker-compose-minikube/deploy.yml; \
ansible-playbook -i tools/docker-compose/inventory -e minikube_setup=$(MINIKUBE_SETUP) tools/docker-compose-minikube/deploy.yml; \
fi;
$(ANSIBLE_PLAYBOOK) -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
-e awx_image=$(DEV_DOCKER_TAG_BASE)/awx_devel \
-e awx_image_tag=$(COMPOSE_TAG) \
-e receptor_image=$(RECEPTOR_IMAGE) \
@@ -560,7 +549,7 @@ docker-compose-sources: .git/hooks/pre-commit
docker-compose: awx/projects docker-compose-sources
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
$(ANSIBLE_PLAYBOOK) -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
-e enable_vault=$(VAULT) \
-e vault_tls=$(VAULT_TLS) \
-e enable_ldap=$(LDAP); \
@@ -603,7 +592,7 @@ docker-compose-container-group-clean:
.PHONY: Dockerfile.dev
## Generate Dockerfile.dev for awx_devel image
Dockerfile.dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
$(ANSIBLE_PLAYBOOK) tools/ansible/dockerfile.yml \
ansible-playbook tools/ansible/dockerfile.yml \
-e dockerfile_name=Dockerfile.dev \
-e build_dev=True \
-e receptor_image=$(RECEPTOR_IMAGE)
@@ -614,7 +603,8 @@ docker-compose-build: Dockerfile.dev
-f Dockerfile.dev \
-t $(DEVEL_IMAGE_NAME) \
--build-arg BUILDKIT_INLINE_CACHE=1 \
$(DOCKER_DEVEL_CACHE_FLAG) .
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
.PHONY: docker-compose-buildx
## Build awx_devel image for docker compose development environment for multiple architectures
@@ -624,7 +614,7 @@ docker-compose-buildx: Dockerfile.dev
- docker buildx build \
--push \
--build-arg BUILDKIT_INLINE_CACHE=1 \
$(DOCKER_DEVEL_CACHE_FLAG) \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) \
--platform=$(PLATFORMS) \
--tag $(DEVEL_IMAGE_NAME) \
-f Dockerfile.dev .
@@ -677,7 +667,7 @@ version-for-buildyml:
.PHONY: Dockerfile
## Generate Dockerfile for awx image
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
$(ANSIBLE_PLAYBOOK) tools/ansible/dockerfile.yml \
ansible-playbook tools/ansible/dockerfile.yml \
-e receptor_image=$(RECEPTOR_IMAGE) \
-e headless=$(HEADLESS)
@@ -687,8 +677,7 @@ awx-kube-build: Dockerfile
--build-arg VERSION=$(VERSION) \
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
--build-arg HEADLESS=$(HEADLESS) \
$(DOCKER_KUBE_CACHE_FLAG) \
-t $(IMAGE_KUBE) .
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
## Build multi-arch awx image for deployment on Kubernetes environment.
awx-kube-buildx: Dockerfile
@@ -700,8 +689,7 @@ awx-kube-buildx: Dockerfile
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
--build-arg HEADLESS=$(HEADLESS) \
--platform=$(PLATFORMS) \
$(DOCKER_KUBE_CACHE_FLAG) \
--tag $(IMAGE_KUBE) \
--tag $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) \
-f Dockerfile .
- docker buildx rm awx-kube-buildx
@@ -709,7 +697,7 @@ awx-kube-buildx: Dockerfile
.PHONY: Dockerfile.kube-dev
## Generate Docker.kube-dev for awx_kube_devel image
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
$(ANSIBLE_PLAYBOOK) tools/ansible/dockerfile.yml \
ansible-playbook tools/ansible/dockerfile.yml \
-e dockerfile_name=Dockerfile.kube-dev \
-e kube_dev=True \
-e template_dest=_build_kube_dev \
@@ -719,8 +707,8 @@ Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
awx-kube-dev-build: Dockerfile.kube-dev
DOCKER_BUILDKIT=1 docker build -f Dockerfile.kube-dev \
--build-arg BUILDKIT_INLINE_CACHE=1 \
$(DOCKER_KUBE_DEV_CACHE_FLAG) \
-t $(IMAGE_KUBE_DEV) .
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
## Build and push multi-arch awx_kube_devel image for development on local Kubernetes environment.
awx-kube-dev-buildx: Dockerfile.kube-dev
@@ -729,14 +717,14 @@ awx-kube-dev-buildx: Dockerfile.kube-dev
- docker buildx build \
--push \
--build-arg BUILDKIT_INLINE_CACHE=1 \
$(DOCKER_KUBE_DEV_CACHE_FLAG) \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
--platform=$(PLATFORMS) \
--tag $(IMAGE_KUBE_DEV) \
--tag $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
-f Dockerfile.kube-dev .
- docker buildx rm awx-kube-dev-buildx
kind-dev-load: awx-kube-dev-build
$(KIND_BIN) load docker-image $(IMAGE_KUBE_DEV)
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
# Translation TASKS
# --------------------------------------

View File

@@ -36,7 +36,6 @@ from ansible_base.lib.utils.models import get_all_field_names
from ansible_base.lib.utils.requests import get_remote_host
from ansible_base.rbac.models import RoleEvaluation, RoleDefinition
from ansible_base.rbac.permission_registry import permission_registry
from ansible_base.jwt_consumer.common.util import validate_x_trusted_proxy_header
# AWX
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
@@ -44,7 +43,6 @@ from awx.main.models.rbac import give_creator_permissions
from awx.main.access import optimize_queryset
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
from awx.main.utils.licensing import server_product_name
from awx.main.utils.proxy import is_proxy_in_headers, delete_headers_starting_with_http
from awx.main.views import ApiErrorView
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer
from awx.api.versioning import URLPathVersioning
@@ -155,23 +153,22 @@ class APIView(views.APIView):
Store the Django REST Framework Request object as an attribute on the
normal Django request, store time the request started.
"""
remote_headers = ['REMOTE_ADDR', 'REMOTE_HOST']
self.time_started = time.time()
if getattr(settings, 'SQL_DEBUG', False):
self.queries_before = len(connection.queries)
if 'HTTP_X_TRUSTED_PROXY' in request.environ:
if validate_x_trusted_proxy_header(request.environ['HTTP_X_TRUSTED_PROXY']):
remote_headers = settings.REMOTE_HOST_HEADERS
else:
logger.warning("Request appeared to be a trusted upstream proxy but failed to provide a matching shared secret.")
# If there are any custom headers in REMOTE_HOST_HEADERS, make sure
# they respect the allowed proxy list
if settings.PROXY_IP_ALLOWED_LIST:
if not is_proxy_in_headers(self.request, settings.PROXY_IP_ALLOWED_LIST, remote_headers):
delete_headers_starting_with_http(request, settings.REMOTE_HOST_HEADERS)
if all(
[
settings.PROXY_IP_ALLOWED_LIST,
request.environ.get('REMOTE_ADDR') not in settings.PROXY_IP_ALLOWED_LIST,
request.environ.get('REMOTE_HOST') not in settings.PROXY_IP_ALLOWED_LIST,
]
):
for custom_header in settings.REMOTE_HOST_HEADERS:
if custom_header.startswith('HTTP_'):
request.environ.pop(custom_header, None)
drf_request = super(APIView, self).initialize_request(request, *args, **kwargs)
request.drf_request = drf_request
@@ -227,10 +224,7 @@ class APIView(views.APIView):
if type(response.data) is dict:
msg_data['error'] = response.data.get('error', response.status_text)
elif type(response.data) is list:
if len(response.data) > 0 and isinstance(response.data[0], str):
msg_data['error'] = str(response.data[0])
else:
msg_data['error'] = ", ".join(list(map(lambda x: x.get('error', response.status_text), response.data)))
msg_data['error'] = ", ".join(list(map(lambda x: x.get('error', response.status_text), response.data)))
else:
msg_data['error'] = response.status_text

View File

@@ -103,7 +103,7 @@ class Metadata(metadata.SimpleMetadata):
default = field.get_default()
if type(default) is UUID:
default = 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
if field.field_name == 'TOWER_URL_BASE' and default == 'https://platformhost':
if field.field_name == 'TOWER_URL_BASE' and default == 'https://towerhost':
default = '{}://{}'.format(self.request.scheme, self.request.get_host())
field_info['default'] = default
except serializers.SkipField:

View File

@@ -2,12 +2,6 @@
- hosts: all
become: yes
tasks:
- name: Create the receptor group
group:
{% verbatim %}
name: "{{ receptor_group }}"
{% endverbatim %}
state: present
- name: Create the receptor user
user:
{% verbatim %}

View File

@@ -61,7 +61,6 @@ import pytz
from wsgiref.util import FileWrapper
# django-ansible-base
from ansible_base.lib.utils.requests import get_remote_hosts
from ansible_base.rbac.models import RoleEvaluation, ObjectRole
from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType
@@ -2392,14 +2391,6 @@ class JobTemplateList(ListCreateAPIView):
serializer_class = serializers.JobTemplateSerializer
always_allow_superuser = False
def check_permissions(self, request):
if request.method == 'POST':
can_access, messages = request.user.can_access_with_errors(self.model, 'add', request.data)
if not can_access:
self.permission_denied(request, message=messages)
super(JobTemplateList, self).check_permissions(request)
class JobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = models.JobTemplate
@@ -2779,7 +2770,12 @@ class JobTemplateCallback(GenericAPIView):
host for the current request.
"""
# Find the list of remote host names/IPs to check.
remote_hosts = set(get_remote_hosts(self.request))
remote_hosts = set()
for header in settings.REMOTE_HOST_HEADERS:
for value in self.request.META.get(header, '').split(','):
value = value.strip()
if value:
remote_hosts.add(value)
# Add the reverse lookup of IP addresses.
for rh in list(remote_hosts):
try:
@@ -3119,14 +3115,6 @@ class WorkflowJobTemplateList(ListCreateAPIView):
serializer_class = serializers.WorkflowJobTemplateSerializer
always_allow_superuser = False
def check_permissions(self, request):
if request.method == 'POST':
can_access, messages = request.user.can_access_with_errors(self.model, 'add', request.data)
if not can_access:
self.permission_denied(request, message=messages)
super(WorkflowJobTemplateList, self).check_permissions(request)
class WorkflowJobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = models.WorkflowJobTemplate

View File

@@ -598,7 +598,7 @@ class InstanceGroupAccess(BaseAccess):
- a superuser
- admin role on the Instance group
I can add/delete Instance Groups:
- a superuser(system administrator), because these are not org-scoped
- a superuser(system administrator)
I can use Instance Groups when I have:
- use_role on the instance group
"""
@@ -627,7 +627,7 @@ class InstanceGroupAccess(BaseAccess):
def can_delete(self, obj):
if obj.name in [settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]:
return False
return self.user.has_obj_perm(obj, 'delete')
return self.user.is_superuser
class UserAccess(BaseAccess):
@@ -1387,11 +1387,12 @@ class TeamAccess(BaseAccess):
class ExecutionEnvironmentAccess(BaseAccess):
"""
I can see an execution environment when:
- I can see its organization
- It is a global ExecutionEnvironment
- I'm a superuser
- I'm a member of the same organization
- it is a global ExecutionEnvironment
I can create/change an execution environment when:
- I'm a superuser
- I have an organization or object role that gives access
- I'm an admin for the organization(s)
"""
model = ExecutionEnvironment
@@ -1400,9 +1401,7 @@ class ExecutionEnvironmentAccess(BaseAccess):
def filtered_queryset(self):
return ExecutionEnvironment.objects.filter(
Q(organization__in=Organization.accessible_pk_qs(self.user, 'read_role'))
| Q(organization__isnull=True)
| Q(id__in=ExecutionEnvironment.access_ids_qs(self.user, 'change'))
Q(organization__in=Organization.accessible_pk_qs(self.user, 'read_role')) | Q(organization__isnull=True)
).distinct()
@check_superuser
@@ -1417,17 +1416,15 @@ class ExecutionEnvironmentAccess(BaseAccess):
raise PermissionDenied
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
if not self.user.has_obj_perm(obj, 'change'):
return False
raise PermissionDenied
else:
if self.user not in obj.organization.execution_environment_admin_role:
raise PermissionDenied
if not self.check_related('organization', Organization, data, obj=obj, role_field='execution_environment_admin_role'):
return False
# Special case that check_related does not catch, org users can not remove the organization from the EE
if data and ('organization' in data or 'organization_id' in data):
if (not data.get('organization')) and (not data.get('organization_id')):
if data and 'organization' in data:
new_org = get_object_from_data('organization', Organization, data, obj=obj)
if not new_org or self.user not in new_org.execution_environment_admin_role:
return False
return True
return self.check_related('organization', Organization, data, obj=obj, mandatory=True, role_field='execution_environment_admin_role')
def can_delete(self, obj):
if obj.managed:
@@ -1599,8 +1596,6 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
inventory = get_value(Inventory, 'inventory')
if inventory:
if self.user not in inventory.use_role:
if self.save_messages:
self.messages['inventory'] = [_('You do not have use permission on Inventory')]
return False
if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'):
@@ -1609,16 +1604,11 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
project = get_value(Project, 'project')
# If the user has admin access to the project (as an org admin), should
# be able to proceed without additional checks.
if not project:
if project:
return self.user in project.use_role
else:
return False
if self.user not in project.use_role:
if self.save_messages:
self.messages['project'] = [_('You do not have use permission on Project')]
return False
return True
@check_superuser
def can_copy_related(self, obj):
"""
@@ -2102,23 +2092,11 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
if not data: # So the browseable API will work
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
if not self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True):
if data.get('organization', None) is None:
if self.save_messages:
self.messages['organization'] = [_('An organization is required to create a workflow job template for normal user')]
return False
if not self.check_related('inventory', Inventory, data, role_field='use_role'):
if self.save_messages:
self.messages['inventory'] = [_('You do not have use_role to the inventory')]
return False
if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'):
if self.save_messages:
self.messages['execution_environment'] = [_('You do not have read_role to the execution environment')]
return False
return True
return bool(
self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True)
and self.check_related('inventory', Inventory, data, role_field='use_role')
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role')
)
def can_copy(self, obj):
if self.save_messages:
@@ -2650,7 +2628,7 @@ class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess):
class NotificationTemplateAccess(BaseAccess):
"""
Run standard logic from DAB RBAC
I can see/use a notification_template if I have permission to
"""
model = NotificationTemplate
@@ -2671,7 +2649,10 @@ class NotificationTemplateAccess(BaseAccess):
@check_superuser
def can_change(self, obj, data):
return self.user.has_obj_perm(obj, 'change') and self.check_related('organization', Organization, data, obj=obj, role_field='notification_admin_role')
if obj.organization is None:
# only superusers are allowed to edit orphan notification templates
return False
return self.check_related('organization', Organization, data, obj=obj, role_field='notification_admin_role', mandatory=True)
def can_admin(self, obj, data):
return self.can_change(obj, data)
@@ -2681,7 +2662,9 @@ class NotificationTemplateAccess(BaseAccess):
@check_superuser
def can_start(self, obj, validate_license=True):
return self.can_change(obj, None)
if obj.organization is None:
return False
return self.user in obj.organization.notification_admin_role
class NotificationAccess(BaseAccess):

View File

@@ -66,8 +66,10 @@ class FixedSlidingWindow:
class RelayWebsocketStatsManager:
def __init__(self, local_hostname):
def __init__(self, event_loop, local_hostname):
self._local_hostname = local_hostname
self._event_loop = event_loop
self._stats = dict()
self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME
@@ -92,10 +94,7 @@ class RelayWebsocketStatsManager:
self.start()
def start(self):
self.async_task = asyncio.get_running_loop().create_task(
self.run_loop(),
name='RelayWebsocketStatsManager.run_loop',
)
self.async_task = self._event_loop.create_task(self.run_loop())
return self.async_task
@classmethod

View File

@@ -929,16 +929,6 @@ register(
category_slug='debug',
)
register(
'RECEPTOR_KEEP_WORK_ON_ERROR',
field_class=fields.BooleanField,
label=_('Keep receptor work on error'),
default=False,
help_text=_('Prevent receptor work from being released on when error is detected'),
category=('Debug'),
category_slug='debug',
)
def logging_validate(serializer, attrs):
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):

View File

@@ -43,7 +43,6 @@ STANDARD_INVENTORY_UPDATE_ENV = {
}
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
ACTIVE_STATES = CAN_CANCEL
ERROR_STATES = ('error',)
MINIMAL_EVENTS = set(['playbook_on_play_start', 'playbook_on_task_start', 'playbook_on_stats', 'EOF'])
CENSOR_VALUE = '************'
ENV_BLOCKLIST = frozenset(

View File

@@ -102,8 +102,7 @@ def create_listener_connection():
# Apply overrides specifically for the listener connection
for k, v in settings.LISTENER_DATABASES.get('default', {}).items():
if k != 'OPTIONS':
conf[k] = v
conf[k] = v
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
conf['OPTIONS'][k] = v

View File

@@ -2,7 +2,6 @@
# All Rights Reserved
from django.core.management.base import BaseCommand
from django.db import transaction
from crum import impersonate
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate
from awx.main.signals import disable_computed_fields
@@ -14,12 +13,6 @@ class Command(BaseCommand):
help = 'Creates a preload tower data if there is none.'
def handle(self, *args, **kwargs):
# Wrap the operation in an atomic block, so we do not on accident
# create the organization but not create the project, etc.
with transaction.atomic():
self._handle()
def _handle(self):
changed = False
# Create a default organization as the first superuser found.
@@ -50,11 +43,10 @@ class Command(BaseCommand):
ssh_type = CredentialType.objects.filter(namespace='ssh').first()
c, _ = Credential.objects.get_or_create(
credential_type=ssh_type, name='Demo Credential', inputs={'username': getattr(superuser, 'username', 'null')}, created_by=superuser
credential_type=ssh_type, name='Demo Credential', inputs={'username': superuser.username}, created_by=superuser
)
if superuser:
c.admin_role.members.add(superuser)
c.admin_role.members.add(superuser)
public_galaxy_credential, _ = Credential.objects.get_or_create(
name='Ansible Galaxy',

View File

@@ -1,26 +0,0 @@
# Generated by Django 4.2.6 on 2024-06-20 15:55
from django.db import migrations
def delete_execution_environment_read_role(apps, schema_editor):
permission_classes = [apps.get_model('auth', 'Permission'), apps.get_model('dab_rbac', 'DABPermission')]
for permission_cls in permission_classes:
ee_read_perm = permission_cls.objects.filter(codename='view_executionenvironment').first()
if ee_read_perm:
ee_read_perm.delete()
class Migration(migrations.Migration):
dependencies = [
('main', '0194_alter_inventorysource_source_and_more'),
]
operations = [
migrations.AlterModelOptions(
name='executionenvironment',
options={'default_permissions': ('add', 'change', 'delete'), 'ordering': ('-created',)},
),
migrations.RunPython(delete_execution_environment_read_role, migrations.RunPython.noop),
]

View File

@@ -134,7 +134,8 @@ def get_permissions_for_role(role_field, children_map, apps):
# more special cases for those same above special org-level roles
if role_field.name == 'auditor_role':
perm_list.append(Permission.objects.get(codename='view_notificationtemplate'))
for codename in ('view_notificationtemplate', 'view_executionenvironment'):
perm_list.append(Permission.objects.get(codename=codename))
return perm_list
@@ -289,15 +290,14 @@ def setup_managed_role_definitions(apps, schema_editor):
managed_role_definitions = []
org_perms = set()
for cls in permission_registry.all_registered_models:
for cls in permission_registry._registry:
ct = ContentType.objects.get_for_model(cls)
cls_name = cls._meta.model_name
object_perms = set(Permission.objects.filter(content_type=ct))
# Special case for InstanceGroup which has an organiation field, but is not an organization child object
if cls_name != 'instancegroup':
if cls._meta.model_name != 'instancegroup':
org_perms.update(object_perms)
if 'object_admin' in to_create and cls_name != 'organization':
if 'object_admin' in to_create and cls != Organization:
indiv_perms = object_perms.copy()
add_perms = [perm for perm in indiv_perms if perm.codename.startswith('add_')]
if add_perms:
@@ -310,7 +310,7 @@ def setup_managed_role_definitions(apps, schema_editor):
)
)
if 'org_children' in to_create and (cls_name not in ('organization', 'instancegroup', 'team')):
if 'org_children' in to_create and cls != Organization:
org_child_perms = object_perms.copy()
org_child_perms.add(Permission.objects.get(codename='view_organization'))
@@ -327,25 +327,17 @@ def setup_managed_role_definitions(apps, schema_editor):
if 'special' in to_create:
special_perms = []
for perm in object_perms:
# Organization auditor is handled separately
if perm.codename.split('_')[0] not in ('add', 'change', 'delete', 'view', 'audit'):
if perm.codename.split('_')[0] not in ('add', 'change', 'update', 'delete', 'view'):
special_perms.append(perm)
for perm in special_perms:
action = perm.codename.split('_')[0]
view_perm = Permission.objects.get(content_type=ct, codename__startswith='view_')
perm_list = [perm, view_perm]
# Handle special-case where adhoc role also listed use permission
if action == 'adhoc':
for other_perm in object_perms:
if other_perm.codename == 'use_inventory':
perm_list.append(other_perm)
break
managed_role_definitions.append(
get_or_create_managed(
to_create['special'].format(cls=cls, action=action.title()),
f'Has {action} permissions to a single {cls._meta.verbose_name}',
ct,
perm_list,
[perm, view_perm],
RoleDefinition,
)
)
@@ -361,41 +353,6 @@ def setup_managed_role_definitions(apps, schema_editor):
)
)
# Special "organization action" roles
audit_permissions = [perm for perm in org_perms if perm.codename.startswith('view_')]
audit_permissions.append(Permission.objects.get(codename='audit_organization'))
managed_role_definitions.append(
get_or_create_managed(
'Organization Audit',
'Has permission to view all objects inside of a single organization',
org_ct,
audit_permissions,
RoleDefinition,
)
)
org_execute_permissions = {'view_jobtemplate', 'execute_jobtemplate', 'view_workflowjobtemplate', 'execute_workflowjobtemplate', 'view_organization'}
managed_role_definitions.append(
get_or_create_managed(
'Organization Execute',
'Has permission to execute all runnable objects in the organization',
org_ct,
[perm for perm in org_perms if perm.codename in org_execute_permissions],
RoleDefinition,
)
)
org_approval_permissions = {'view_organization', 'view_workflowjobtemplate', 'approve_workflowjobtemplate'}
managed_role_definitions.append(
get_or_create_managed(
'Organization Approval',
'Has permission to approve any workflow steps within a single organization',
org_ct,
[perm for perm in org_perms if perm.codename in org_approval_permissions],
RoleDefinition,
)
)
unexpected_role_definitions = RoleDefinition.objects.filter(managed=True).exclude(pk__in=[rd.pk for rd in managed_role_definitions])
for role_definition in unexpected_role_definitions:
logger.info(f'Deleting old managed role definition {role_definition.name}, pk={role_definition.pk}')

View File

@@ -176,17 +176,17 @@ pre_delete.connect(cleanup_created_modified_by, sender=User)
@property
def user_get_organizations(user):
return Organization.access_qs(user, 'member')
return Organization.objects.filter(member_role__members=user)
@property
def user_get_admin_of_organizations(user):
return Organization.access_qs(user, 'change')
return Organization.objects.filter(admin_role__members=user)
@property
def user_get_auditor_of_organizations(user):
return Organization.access_qs(user, 'audit')
return Organization.objects.filter(auditor_role__members=user)
@property

View File

@@ -15,16 +15,12 @@ from jinja2 import sandbox
# Django
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils.translation import gettext_lazy as _, gettext_noop
from django.core.exceptions import ValidationError
from django.conf import settings
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.timezone import now
from django.contrib.auth.models import User
# DRF
from rest_framework.serializers import ValidationError as DRFValidationError
# AWX
from awx.api.versioning import reverse
@@ -45,9 +41,8 @@ from awx.main.models.rbac import (
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
ROLE_SINGLETON_SYSTEM_AUDITOR,
)
from awx.main.models import Team, Organization
from awx.main.utils import encrypt_field
from awx_plugins.credentials import injectors as builtin_injectors
from . import injectors as builtin_injectors
__all__ = ['Credential', 'CredentialType', 'CredentialInputSource', 'build_safe_env']
@@ -320,16 +315,6 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
else:
raise ValueError('{} is not a dynamic input field'.format(field_name))
def validate_role_assignment(self, actor, role_definition):
if self.organization:
if isinstance(actor, User):
if actor.is_superuser or Organization.access_qs(actor, 'member').filter(id=self.organization.id).exists():
return
if isinstance(actor, Team):
if actor.organization == self.organization:
return
raise DRFValidationError({'detail': _(f"You cannot grant credential access to a {actor._meta.object_name} not in the credentials' organization")})
class CredentialType(CommonModelNameNotUnique):
"""
@@ -601,6 +586,666 @@ class ManagedCredentialType(SimpleNamespace):
return CredentialType(**self.get_creation_params())
ManagedCredentialType(
namespace='ssh',
kind='ssh',
name=gettext_noop('Machine'),
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{'id': 'password', 'label': gettext_noop('Password'), 'type': 'string', 'secret': True, 'ask_at_runtime': True},
{'id': 'ssh_key_data', 'label': gettext_noop('SSH Private Key'), 'type': 'string', 'format': 'ssh_private_key', 'secret': True, 'multiline': True},
{
'id': 'ssh_public_key_data',
'label': gettext_noop('Signed SSH Certificate'),
'type': 'string',
'multiline': True,
'secret': True,
},
{'id': 'ssh_key_unlock', 'label': gettext_noop('Private Key Passphrase'), 'type': 'string', 'secret': True, 'ask_at_runtime': True},
{
'id': 'become_method',
'label': gettext_noop('Privilege Escalation Method'),
'type': 'string',
'help_text': gettext_noop('Specify a method for "become" operations. This is equivalent to specifying the --become-method Ansible parameter.'),
},
{
'id': 'become_username',
'label': gettext_noop('Privilege Escalation Username'),
'type': 'string',
},
{'id': 'become_password', 'label': gettext_noop('Privilege Escalation Password'), 'type': 'string', 'secret': True, 'ask_at_runtime': True},
],
},
)
ManagedCredentialType(
namespace='scm',
kind='scm',
name=gettext_noop('Source Control'),
managed=True,
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{'id': 'password', 'label': gettext_noop('Password'), 'type': 'string', 'secret': True},
{'id': 'ssh_key_data', 'label': gettext_noop('SCM Private Key'), 'type': 'string', 'format': 'ssh_private_key', 'secret': True, 'multiline': True},
{'id': 'ssh_key_unlock', 'label': gettext_noop('Private Key Passphrase'), 'type': 'string', 'secret': True},
],
},
)
ManagedCredentialType(
namespace='vault',
kind='vault',
name=gettext_noop('Vault'),
managed=True,
inputs={
'fields': [
{'id': 'vault_password', 'label': gettext_noop('Vault Password'), 'type': 'string', 'secret': True, 'ask_at_runtime': True},
{
'id': 'vault_id',
'label': gettext_noop('Vault Identifier'),
'type': 'string',
'format': 'vault_id',
'help_text': gettext_noop(
'Specify an (optional) Vault ID. This is '
'equivalent to specifying the --vault-id '
'Ansible parameter for providing multiple Vault '
'passwords. Note: this feature only works in '
'Ansible 2.4+.'
),
},
],
'required': ['vault_password'],
},
)
ManagedCredentialType(
namespace='net',
kind='net',
name=gettext_noop('Network'),
managed=True,
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
{'id': 'ssh_key_data', 'label': gettext_noop('SSH Private Key'), 'type': 'string', 'format': 'ssh_private_key', 'secret': True, 'multiline': True},
{
'id': 'ssh_key_unlock',
'label': gettext_noop('Private Key Passphrase'),
'type': 'string',
'secret': True,
},
{
'id': 'authorize',
'label': gettext_noop('Authorize'),
'type': 'boolean',
},
{
'id': 'authorize_password',
'label': gettext_noop('Authorize Password'),
'type': 'string',
'secret': True,
},
],
'dependencies': {
'authorize_password': ['authorize'],
},
'required': ['username'],
},
)
ManagedCredentialType(
namespace='aws',
kind='cloud',
name=gettext_noop('Amazon Web Services'),
managed=True,
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Access Key'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Secret Key'),
'type': 'string',
'secret': True,
},
{
'id': 'security_token',
'label': gettext_noop('STS Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop(
'Security Token Service (STS) is a web service '
'that enables you to request temporary, '
'limited-privilege credentials for AWS Identity '
'and Access Management (IAM) users.'
),
},
],
'required': ['username', 'password'],
},
)
ManagedCredentialType(
namespace='openstack',
kind='cloud',
name=gettext_noop('OpenStack'),
managed=True,
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password (API Key)'),
'type': 'string',
'secret': True,
},
{
'id': 'host',
'label': gettext_noop('Host (Authentication URL)'),
'type': 'string',
'help_text': gettext_noop('The host to authenticate with. For example, https://openstack.business.com/v2.0/'),
},
{
'id': 'project',
'label': gettext_noop('Project (Tenant Name)'),
'type': 'string',
},
{
'id': 'project_domain_name',
'label': gettext_noop('Project (Domain Name)'),
'type': 'string',
},
{
'id': 'domain',
'label': gettext_noop('Domain Name'),
'type': 'string',
'help_text': gettext_noop(
'OpenStack domains define administrative boundaries. '
'It is only needed for Keystone v3 authentication '
'URLs. Refer to the documentation for '
'common scenarios.'
),
},
{
'id': 'region',
'label': gettext_noop('Region Name'),
'type': 'string',
'help_text': gettext_noop('For some cloud providers, like OVH, region must be specified'),
},
{
'id': 'verify_ssl',
'label': gettext_noop('Verify SSL'),
'type': 'boolean',
'default': True,
},
],
'required': ['username', 'password', 'host', 'project'],
},
)
ManagedCredentialType(
namespace='vmware',
kind='cloud',
name=gettext_noop('VMware vCenter'),
managed=True,
inputs={
'fields': [
{
'id': 'host',
'label': gettext_noop('VCenter Host'),
'type': 'string',
'help_text': gettext_noop('Enter the hostname or IP address that corresponds to your VMware vCenter.'),
},
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
],
'required': ['host', 'username', 'password'],
},
)
ManagedCredentialType(
namespace='satellite6',
kind='cloud',
name=gettext_noop('Red Hat Satellite 6'),
managed=True,
inputs={
'fields': [
{
'id': 'host',
'label': gettext_noop('Satellite 6 URL'),
'type': 'string',
'help_text': gettext_noop('Enter the URL that corresponds to your Red Hat Satellite 6 server. For example, https://satellite.example.org'),
},
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
],
'required': ['host', 'username', 'password'],
},
)
ManagedCredentialType(
namespace='gce',
kind='cloud',
name=gettext_noop('Google Compute Engine'),
managed=True,
inputs={
'fields': [
{
'id': 'username',
'label': gettext_noop('Service Account Email Address'),
'type': 'string',
'help_text': gettext_noop('The email address assigned to the Google Compute Engine service account.'),
},
{
'id': 'project',
'label': 'Project',
'type': 'string',
'help_text': gettext_noop(
'The Project ID is the GCE assigned identification. '
'It is often constructed as three words or two words '
'followed by a three-digit number. Examples: project-id-000 '
'and another-project-id'
),
},
{
'id': 'ssh_key_data',
'label': gettext_noop('RSA Private Key'),
'type': 'string',
'format': 'ssh_private_key',
'secret': True,
'multiline': True,
'help_text': gettext_noop('Paste the contents of the PEM file associated with the service account email.'),
},
],
'required': ['username', 'ssh_key_data'],
},
)
ManagedCredentialType(
namespace='azure_rm',
kind='cloud',
name=gettext_noop('Microsoft Azure Resource Manager'),
managed=True,
inputs={
'fields': [
{
'id': 'subscription',
'label': gettext_noop('Subscription ID'),
'type': 'string',
'help_text': gettext_noop('Subscription ID is an Azure construct, which is mapped to a username.'),
},
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
{'id': 'client', 'label': gettext_noop('Client ID'), 'type': 'string'},
{
'id': 'secret',
'label': gettext_noop('Client Secret'),
'type': 'string',
'secret': True,
},
{'id': 'tenant', 'label': gettext_noop('Tenant ID'), 'type': 'string'},
{
'id': 'cloud_environment',
'label': gettext_noop('Azure Cloud Environment'),
'type': 'string',
'help_text': gettext_noop('Environment variable AZURE_CLOUD_ENVIRONMENT when using Azure GovCloud or Azure stack.'),
},
],
'required': ['subscription'],
},
)
ManagedCredentialType(
namespace='github_token',
kind='token',
name=gettext_noop('GitHub Personal Access Token'),
managed=True,
inputs={
'fields': [
{
'id': 'token',
'label': gettext_noop('Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('This token needs to come from your profile settings in GitHub'),
}
],
'required': ['token'],
},
)
ManagedCredentialType(
namespace='gitlab_token',
kind='token',
name=gettext_noop('GitLab Personal Access Token'),
managed=True,
inputs={
'fields': [
{
'id': 'token',
'label': gettext_noop('Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('This token needs to come from your profile settings in GitLab'),
}
],
'required': ['token'],
},
)
ManagedCredentialType(
namespace='bitbucket_dc_token',
kind='token',
name=gettext_noop('Bitbucket Data Center HTTP Access Token'),
managed=True,
inputs={
'fields': [
{
'id': 'token',
'label': gettext_noop('Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('This token needs to come from your user settings in Bitbucket'),
}
],
'required': ['token'],
},
)
ManagedCredentialType(
namespace='insights',
kind='insights',
name=gettext_noop('Insights'),
managed=True,
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{'id': 'password', 'label': gettext_noop('Password'), 'type': 'string', 'secret': True},
],
'required': ['username', 'password'],
},
injectors={
'extra_vars': {
"scm_username": "{{username}}",
"scm_password": "{{password}}",
},
'env': {
'INSIGHTS_USER': '{{username}}',
'INSIGHTS_PASSWORD': '{{password}}',
},
},
)
ManagedCredentialType(
namespace='rhv',
kind='cloud',
name=gettext_noop('Red Hat Virtualization'),
managed=True,
inputs={
'fields': [
{'id': 'host', 'label': gettext_noop('Host (Authentication URL)'), 'type': 'string', 'help_text': gettext_noop('The host to authenticate with.')},
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
{
'id': 'ca_file',
'label': gettext_noop('CA File'),
'type': 'string',
'help_text': gettext_noop('Absolute file path to the CA file to use (optional)'),
},
],
'required': ['host', 'username', 'password'],
},
injectors={
# The duplication here is intentional; the ovirt4 inventory plugin
# writes a .ini file for authentication, while the ansible modules for
# ovirt4 use a separate authentication process that support
# environment variables; by injecting both, we support both
'file': {
'template': '\n'.join(
[
'[ovirt]',
'ovirt_url={{host}}',
'ovirt_username={{username}}',
'ovirt_password={{password}}',
'{% if ca_file %}ovirt_ca_file={{ca_file}}{% endif %}',
]
)
},
'env': {'OVIRT_INI_PATH': '{{tower.filename}}', 'OVIRT_URL': '{{host}}', 'OVIRT_USERNAME': '{{username}}', 'OVIRT_PASSWORD': '{{password}}'},
},
)
ManagedCredentialType(
namespace='controller',
kind='cloud',
name=gettext_noop('Red Hat Ansible Automation Platform'),
managed=True,
inputs={
'fields': [
{
'id': 'host',
'label': gettext_noop('Red Hat Ansible Automation Platform'),
'type': 'string',
'help_text': gettext_noop('Red Hat Ansible Automation Platform base URL to authenticate with.'),
},
{
'id': 'username',
'label': gettext_noop('Username'),
'type': 'string',
'help_text': gettext_noop(
'Red Hat Ansible Automation Platform username id to authenticate as.This should not be set if an OAuth token is being used.'
),
},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
{
'id': 'oauth_token',
'label': gettext_noop('OAuth Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('An OAuth token to use to authenticate with.This should not be set if username/password are being used.'),
},
{'id': 'verify_ssl', 'label': gettext_noop('Verify SSL'), 'type': 'boolean', 'secret': False},
],
'required': ['host'],
},
injectors={
'env': {
'TOWER_HOST': '{{host}}',
'TOWER_USERNAME': '{{username}}',
'TOWER_PASSWORD': '{{password}}',
'TOWER_VERIFY_SSL': '{{verify_ssl}}',
'TOWER_OAUTH_TOKEN': '{{oauth_token}}',
'CONTROLLER_HOST': '{{host}}',
'CONTROLLER_USERNAME': '{{username}}',
'CONTROLLER_PASSWORD': '{{password}}',
'CONTROLLER_VERIFY_SSL': '{{verify_ssl}}',
'CONTROLLER_OAUTH_TOKEN': '{{oauth_token}}',
}
},
)
ManagedCredentialType(
namespace='kubernetes_bearer_token',
kind='kubernetes',
name=gettext_noop('OpenShift or Kubernetes API Bearer Token'),
inputs={
'fields': [
{
'id': 'host',
'label': gettext_noop('OpenShift or Kubernetes API Endpoint'),
'type': 'string',
'help_text': gettext_noop('The OpenShift or Kubernetes API Endpoint to authenticate with.'),
},
{
'id': 'bearer_token',
'label': gettext_noop('API authentication bearer token'),
'type': 'string',
'secret': True,
},
{
'id': 'verify_ssl',
'label': gettext_noop('Verify SSL'),
'type': 'boolean',
'default': True,
},
{
'id': 'ssl_ca_cert',
'label': gettext_noop('Certificate Authority data'),
'type': 'string',
'secret': True,
'multiline': True,
},
],
'required': ['host', 'bearer_token'],
},
)
ManagedCredentialType(
namespace='registry',
kind='registry',
name=gettext_noop('Container Registry'),
inputs={
'fields': [
{
'id': 'host',
'label': gettext_noop('Authentication URL'),
'type': 'string',
'help_text': gettext_noop('Authentication endpoint for the container registry.'),
'default': 'quay.io',
},
{
'id': 'username',
'label': gettext_noop('Username'),
'type': 'string',
},
{
'id': 'password',
'label': gettext_noop('Password or Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('A password or token used to authenticate with'),
},
{
'id': 'verify_ssl',
'label': gettext_noop('Verify SSL'),
'type': 'boolean',
'default': True,
},
],
'required': ['host'],
},
)
ManagedCredentialType(
namespace='galaxy_api_token',
kind='galaxy',
name=gettext_noop('Ansible Galaxy/Automation Hub API Token'),
inputs={
'fields': [
{
'id': 'url',
'label': gettext_noop('Galaxy Server URL'),
'type': 'string',
'help_text': gettext_noop('The URL of the Galaxy instance to connect to.'),
},
{
'id': 'auth_url',
'label': gettext_noop('Auth Server URL'),
'type': 'string',
'help_text': gettext_noop('The URL of a Keycloak server token_endpoint, if using SSO auth.'),
},
{
'id': 'token',
'label': gettext_noop('API Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('A token to use for authentication against the Galaxy instance.'),
},
],
'required': ['url'],
},
)
ManagedCredentialType(
namespace='gpg_public_key',
kind='cryptography',
name=gettext_noop('GPG Public Key'),
inputs={
'fields': [
{
'id': 'gpg_public_key',
'label': gettext_noop('GPG Public Key'),
'type': 'string',
'secret': True,
'multiline': True,
'help_text': gettext_noop('GPG Public Key used to validate content signatures.'),
},
],
'required': ['gpg_public_key'],
},
)
ManagedCredentialType(
namespace='terraform',
kind='cloud',
name=gettext_noop('Terraform backend configuration'),
managed=True,
inputs={
'fields': [
{
'id': 'configuration',
'label': gettext_noop('Backend configuration'),
'type': 'string',
'secret': True,
'multiline': True,
'help_text': gettext_noop('Terraform backend config as Hashicorp configuration language.'),
},
{
'id': 'gce_credentials',
'label': gettext_noop('Google Cloud Platform account credentials'),
'type': 'string',
'secret': True,
'multiline': True,
'help_text': gettext_noop('Google Cloud Platform account credentials in JSON format.'),
},
],
'required': ['configuration'],
},
)
class CredentialInputSource(PrimordialModel):
class Meta:
app_label = 'main'
@@ -664,7 +1309,6 @@ class CredentialInputSource(PrimordialModel):
view_name = 'api:credential_input_source_detail'
return reverse(view_name, kwargs={'pk': self.pk}, request=request)
from awx_plugins.credentials.plugins import *
for ns, plugin in credential_plugins.items():
CredentialType.load_plugin(ns, plugin)

View File

@@ -1,8 +1,6 @@
from django.db import models
from django.utils.translation import gettext_lazy as _
from rest_framework.exceptions import ValidationError
from awx.api.versioning import reverse
from awx.main.models.base import CommonModel
from awx.main.validators import validate_container_image_name
@@ -14,8 +12,6 @@ __all__ = ['ExecutionEnvironment']
class ExecutionEnvironment(CommonModel):
class Meta:
ordering = ('-created',)
# Remove view permission, as a temporary solution, defer to organization read permission
default_permissions = ('add', 'change', 'delete')
PULL_CHOICES = [
('always', _("Always pull container before running.")),
@@ -57,12 +53,3 @@ class ExecutionEnvironment(CommonModel):
def get_absolute_url(self, request=None):
return reverse('api:execution_environment_detail', kwargs={'pk': self.pk}, request=request)
def validate_role_assignment(self, actor, role_definition):
if self.managed:
raise ValidationError({'object_id': _('Can not assign object roles to managed Execution Environments')})
if self.organization_id is None:
raise ValidationError({'object_id': _('Can not assign object roles to global Execution Environments')})
if actor._meta.model_name == 'user' and (not actor.has_obj_perm(self.organization, 'view')):
raise ValidationError({'user': _('User must have view permission to Execution Environment organization')})

View File

@@ -10,6 +10,10 @@ import copy
import os.path
from urllib.parse import urljoin
import yaml
import tempfile
import stat
# Django
from django.conf import settings
from django.db import models, connection
@@ -24,7 +28,6 @@ from django.db.models import Q
from rest_framework.exceptions import ParseError
from ansible_base.lib.utils.models import prevent_search
from awx_plugins.inventory.plugins import PluginFileInjector
# AWX
from awx.api.versioning import reverse
@@ -49,9 +52,11 @@ from awx.main.models.notifications import (
NotificationTemplate,
JobNotificationMixin,
)
from awx.main.models.credential.injectors import _openstack_data
from awx.main.utils import _inventory_updates
from awx.main.utils.safe_yaml import sanitize_jinja
from awx.main.utils.execution_environments import get_control_plane_execution_environment
from awx.main.utils.execution_environments import to_container_path, get_control_plane_execution_environment
from awx.main.utils.licensing import server_product_name
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership', 'HostMetric', 'HostMetricSummaryMonthly']
@@ -1422,5 +1427,297 @@ class CustomInventoryScript(CommonModelNameNotUnique):
return reverse('api:inventory_script_detail', kwargs={'pk': self.pk}, request=request)
class PluginFileInjector(object):
plugin_name = None # Ansible core name used to reference plugin
# base injector should be one of None, "managed", or "template"
# this dictates which logic to borrow from playbook injectors
base_injector = None
# every source should have collection, these are for the collection name
namespace = None
collection = None
collection_migration = '2.9' # Starting with this version, we use collections
use_fqcn = False # plugin: name versus plugin: namespace.collection.name
# TODO: delete this method and update unit tests
@classmethod
def get_proper_name(cls):
if cls.plugin_name is None:
return None
return f'{cls.namespace}.{cls.collection}.{cls.plugin_name}'
@property
def filename(self):
"""Inventory filename for using the inventory plugin
This is created dynamically, but the auto plugin requires this exact naming
"""
return '{0}.yml'.format(self.plugin_name)
def inventory_contents(self, inventory_update, private_data_dir):
"""Returns a string that is the content for the inventory file for the inventory plugin"""
return yaml.safe_dump(self.inventory_as_dict(inventory_update, private_data_dir), default_flow_style=False, width=1000)
def inventory_as_dict(self, inventory_update, private_data_dir):
source_vars = dict(inventory_update.source_vars_dict) # make a copy
'''
None conveys that we should use the user-provided plugin.
Note that a plugin value of '' should still be overridden.
'''
if self.plugin_name is not None:
if hasattr(self, 'downstream_namespace') and server_product_name() != 'AWX':
source_vars['plugin'] = f'{self.downstream_namespace}.{self.downstream_collection}.{self.plugin_name}'
elif self.use_fqcn:
source_vars['plugin'] = f'{self.namespace}.{self.collection}.{self.plugin_name}'
else:
source_vars['plugin'] = self.plugin_name
return source_vars
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files)
env.update(injector_env)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
return env
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
"""By default, we will apply the standard managed injectors"""
injected_env = {}
credential = inventory_update.get_cloud_credential()
# some sources may have no credential, specifically ec2
if credential is None:
return injected_env
if self.base_injector in ('managed', 'template'):
injected_env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk) # so injector knows this is inventory
if self.base_injector == 'managed':
from awx.main.models.credential import injectors as builtin_injectors
cred_kind = inventory_update.source.replace('ec2', 'aws')
if cred_kind in dir(builtin_injectors):
getattr(builtin_injectors, cred_kind)(credential, injected_env, private_data_dir)
elif self.base_injector == 'template':
safe_env = injected_env.copy()
args = []
credential.credential_type.inject_credential(credential, injected_env, safe_env, args, private_data_dir)
# NOTE: safe_env is handled externally to injector class by build_safe_env static method
# that means that managed injectors must only inject detectable env keys
# enforcement of this is accomplished by tests
return injected_env
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
return env
def build_private_data(self, inventory_update, private_data_dir):
return self.build_plugin_private_data(inventory_update, private_data_dir)
def build_plugin_private_data(self, inventory_update, private_data_dir):
return None
class azure_rm(PluginFileInjector):
plugin_name = 'azure_rm'
base_injector = 'managed'
namespace = 'azure'
collection = 'azcollection'
def get_plugin_env(self, *args, **kwargs):
ret = super(azure_rm, self).get_plugin_env(*args, **kwargs)
# We need native jinja2 types so that tags can give JSON null value
ret['ANSIBLE_JINJA2_NATIVE'] = str(True)
return ret
class ec2(PluginFileInjector):
plugin_name = 'aws_ec2'
base_injector = 'managed'
namespace = 'amazon'
collection = 'aws'
def get_plugin_env(self, *args, **kwargs):
ret = super(ec2, self).get_plugin_env(*args, **kwargs)
# We need native jinja2 types so that ec2_state_code will give integer
ret['ANSIBLE_JINJA2_NATIVE'] = str(True)
return ret
class gce(PluginFileInjector):
plugin_name = 'gcp_compute'
base_injector = 'managed'
namespace = 'google'
collection = 'cloud'
def get_plugin_env(self, *args, **kwargs):
ret = super(gce, self).get_plugin_env(*args, **kwargs)
# We need native jinja2 types so that ip addresses can give JSON null value
ret['ANSIBLE_JINJA2_NATIVE'] = str(True)
return ret
def inventory_as_dict(self, inventory_update, private_data_dir):
ret = super().inventory_as_dict(inventory_update, private_data_dir)
credential = inventory_update.get_cloud_credential()
# InventorySource.source_vars take precedence over ENV vars
if 'projects' not in ret:
ret['projects'] = [credential.get_input('project', default='')]
return ret
class vmware(PluginFileInjector):
plugin_name = 'vmware_vm_inventory'
base_injector = 'managed'
namespace = 'community'
collection = 'vmware'
class openstack(PluginFileInjector):
plugin_name = 'openstack'
namespace = 'openstack'
collection = 'cloud'
def _get_clouds_dict(self, inventory_update, cred, private_data_dir):
openstack_data = _openstack_data(cred)
openstack_data['clouds']['devstack']['private'] = inventory_update.source_vars_dict.get('private', True)
ansible_variables = {
'use_hostnames': True,
'expand_hostvars': False,
'fail_on_errors': True,
}
provided_count = 0
for var_name in ansible_variables:
if var_name in inventory_update.source_vars_dict:
ansible_variables[var_name] = inventory_update.source_vars_dict[var_name]
provided_count += 1
if provided_count:
# Must we provide all 3 because the user provides any 1 of these??
# this probably results in some incorrect mangling of the defaults
openstack_data['ansible'] = ansible_variables
return openstack_data
def build_plugin_private_data(self, inventory_update, private_data_dir):
credential = inventory_update.get_cloud_credential()
private_data = {'credentials': {}}
openstack_data = self._get_clouds_dict(inventory_update, credential, private_data_dir)
private_data['credentials'][credential] = yaml.safe_dump(openstack_data, default_flow_style=False, allow_unicode=True)
return private_data
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
env = super(openstack, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
credential = inventory_update.get_cloud_credential()
cred_data = private_data_files['credentials']
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_data[credential], private_data_dir)
return env
class rhv(PluginFileInjector):
"""ovirt uses the custom credential templating, and that is all"""
plugin_name = 'ovirt'
base_injector = 'template'
initial_version = '2.9'
namespace = 'ovirt'
collection = 'ovirt'
downstream_namespace = 'redhat'
downstream_collection = 'rhv'
use_fqcn = True
class satellite6(PluginFileInjector):
plugin_name = 'foreman'
namespace = 'theforeman'
collection = 'foreman'
downstream_namespace = 'redhat'
downstream_collection = 'satellite'
use_fqcn = True
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
# this assumes that this is merged
# https://github.com/ansible/ansible/pull/52693
credential = inventory_update.get_cloud_credential()
ret = super(satellite6, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
if credential:
ret['FOREMAN_SERVER'] = credential.get_input('host', default='')
ret['FOREMAN_USER'] = credential.get_input('username', default='')
ret['FOREMAN_PASSWORD'] = credential.get_input('password', default='')
return ret
class terraform(PluginFileInjector):
plugin_name = 'terraform_state'
namespace = 'cloud'
collection = 'terraform'
use_fqcn = True
def inventory_as_dict(self, inventory_update, private_data_dir):
ret = super().inventory_as_dict(inventory_update, private_data_dir)
credential = inventory_update.get_cloud_credential()
config_cred = credential.get_input('configuration')
if config_cred:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
with os.fdopen(handle, 'w') as f:
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
f.write(config_cred)
ret['backend_config_files'] = to_container_path(path, private_data_dir)
return ret
def build_plugin_private_data(self, inventory_update, private_data_dir):
credential = inventory_update.get_cloud_credential()
private_data = {'credentials': {}}
gce_cred = credential.get_input('gce_credentials', default=None)
if gce_cred:
private_data['credentials'][credential] = gce_cred
return private_data
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
env = super(terraform, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
credential = inventory_update.get_cloud_credential()
cred_data = private_data_files['credentials']
if credential in cred_data:
env['GOOGLE_BACKEND_CREDENTIALS'] = to_container_path(cred_data[credential], private_data_dir)
return env
class controller(PluginFileInjector):
plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection
base_injector = 'template'
namespace = 'awx'
collection = 'awx'
downstream_namespace = 'ansible'
downstream_collection = 'controller'
class insights(PluginFileInjector):
plugin_name = 'insights'
base_injector = 'template'
namespace = 'redhatinsights'
collection = 'insights'
downstream_namespace = 'redhat'
downstream_collection = 'insights'
use_fqcn = True
class openshift_virtualization(PluginFileInjector):
plugin_name = 'kubevirt'
base_injector = 'template'
namespace = 'kubevirt'
collection = 'core'
downstream_namespace = 'redhat'
downstream_collection = 'openshift_virtualization'
use_fqcn = True
class constructed(PluginFileInjector):
plugin_name = 'constructed'
namespace = 'ansible'
collection = 'builtin'
def build_env(self, *args, **kwargs):
env = super().build_env(*args, **kwargs)
# Enable script inventory plugin so we pick up the script files from source inventories
env['ANSIBLE_INVENTORY_ENABLED'] += ',script'
env['ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED'] = 'True'
return env
for cls in PluginFileInjector.__subclasses__():
InventorySourceOptions.injectors[cls.__name__] = cls

View File

@@ -396,11 +396,11 @@ class JobNotificationMixin(object):
'verbosity': 0,
},
'job_friendly_name': 'Job',
'url': 'https://platformhost/#/jobs/playbook/1010',
'url': 'https://towerhost/#/jobs/playbook/1010',
'approval_status': 'approved',
'approval_node_name': 'Approve Me',
'workflow_url': 'https://platformhost/#/jobs/workflow/1010',
'job_metadata': """{'url': 'https://platformhost/$/jobs/playbook/13',
'workflow_url': 'https://towerhost/#/jobs/workflow/1010',
'job_metadata': """{'url': 'https://towerhost/$/jobs/playbook/13',
'traceback': '',
'status': 'running',
'started': '2019-08-07T21:46:38.362630+00:00',

View File

@@ -591,20 +591,14 @@ def get_role_from_object_role(object_role):
role_name = role_name.lower()
model_cls = apps.get_model('main', target_model_name)
target_model_name = get_type_for_model(model_cls)
# exception cases completely specific to one model naming convention
if target_model_name == 'notification_template':
target_model_name = 'notification'
elif target_model_name == 'workflow_job_template':
target_model_name = 'workflow'
target_model_name = 'notification' # total exception
role_name = f'{target_model_name}_admin_role'
elif rd.name.endswith(' Admin'):
# cases like "project-admin"
role_name = 'admin_role'
elif rd.name == 'Organization Audit':
role_name = 'auditor_role'
else:
print(rd.name)
model_name, role_name = rd.name.split()
role_name = role_name.lower()
role_name += '_role'
@@ -689,15 +683,9 @@ def sync_parents_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs)
for role_id in pk_set:
if reverse:
try:
child_role = Role.objects.get(id=role_id)
except Role.DoesNotExist:
continue
child_role = Role.objects.get(id=role_id)
else:
try:
parent_role = Role.objects.get(id=role_id)
except Role.DoesNotExist:
continue
parent_role = Role.objects.get(id=role_id)
# To a fault, we want to avoid running this if triggered from implicit_parents management
# we only want to do anything if we know for sure this is a non-implicit team role

View File

@@ -31,7 +31,6 @@ from rest_framework.exceptions import ParseError
from polymorphic.models import PolymorphicModel
from ansible_base.lib.utils.models import prevent_search, get_type_for_model
from ansible_base.rbac import permission_registry
# AWX
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel
@@ -198,7 +197,9 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
@classmethod
def _submodels_with_roles(cls):
return [c for c in cls.__subclasses__() if permission_registry.is_registered(c)]
ujt_classes = [c for c in cls.__subclasses__() if c._meta.model_name not in ['inventorysource', 'systemjobtemplate']]
ct_dict = ContentType.objects.get_for_models(*ujt_classes)
return [ct.id for ct in ct_dict.values()]
@classmethod
def accessible_pk_qs(cls, accessor, role_field):
@@ -214,16 +215,8 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
action = to_permissions[role_field]
# Special condition for super auditor
role_subclasses = cls._submodels_with_roles()
role_cts = ContentType.objects.get_for_models(*role_subclasses).values()
all_codenames = {f'{action}_{cls._meta.model_name}' for cls in role_subclasses}
if not (all_codenames - accessor.singleton_permissions()):
qs = cls.objects.filter(polymorphic_ctype__in=role_cts)
return qs.values_list('id', flat=True)
return (
RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__in=all_codenames, content_type_id__in=[ct.id for ct in role_cts])
RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__startswith=action, content_type_id__in=cls._submodels_with_roles())
.values_list('object_id')
.distinct()
)

View File

@@ -138,8 +138,7 @@ class TaskBase:
# Lock
with task_manager_bulk_reschedule():
lock_session_timeout_milliseconds = settings.TASK_MANAGER_LOCK_TIMEOUT * 1000 # convert to milliseconds
with advisory_lock(f"{self.prefix}_lock", lock_session_timeout_milliseconds=lock_session_timeout_milliseconds, wait=False) as acquired:
with advisory_lock(f"{self.prefix}_lock", wait=False) as acquired:
with transaction.atomic():
if acquired is False:
logger.debug(f"Not running {self.prefix} scheduler, another task holds lock")

View File

@@ -405,11 +405,10 @@ class AWXReceptorJob:
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
if settings.RECPETOR_KEEP_WORK_ON_ERROR and getattr(res, 'status', 'error') == 'error':
try:
receptor_ctl.simple_command(f"work release {self.unit_id}")
except Exception:
logger.exception(f"Error releasing work unit {self.unit_id}.")
try:
receptor_ctl.simple_command(f"work release {self.unit_id}")
except Exception:
logger.exception(f"Error releasing work unit {self.unit_id}.")
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload

View File

@@ -54,7 +54,7 @@ from awx.main.models import (
Job,
convert_jsonfields,
)
from awx.main.constants import ACTIVE_STATES, ERROR_STATES
from awx.main.constants import ACTIVE_STATES
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename, reaper
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal
@@ -685,8 +685,6 @@ def awx_receptor_workunit_reaper():
unit_ids = [id for id in receptor_work_list]
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
if settings.RECEPTOR_KEEP_WORK_ON_ERROR:
jobs_with_unreleased_receptor_units = jobs_with_unreleased_receptor_units.exclude(status__in=ERROR_STATES)
for job in jobs_with_unreleased_receptor_units:
logger.debug(f"{job.log_format} is not active, reaping receptor work unit {job.work_unit_id}")
receptor_ctl.simple_command(f"work cancel {job.work_unit_id}")
@@ -706,10 +704,7 @@ def awx_k8s_reaper():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
time_cutoff = now() - timedelta(seconds=settings.K8S_POD_REAPER_GRACE_PERIOD)
reap_job_candidates = UnifiedJob.objects.filter(pk__in=pods.keys(), finished__lte=time_cutoff).exclude(status__in=ACTIVE_STATES)
if settings.RECEPTOR_KEEP_WORK_ON_ERROR:
reap_job_candidates = reap_job_candidates.exclude(status__in=ERROR_STATES)
for job in reap_job_candidates:
for job in UnifiedJob.objects.filter(pk__in=pods.keys(), finished__lte=time_cutoff).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
@@ -720,8 +715,7 @@ def awx_k8s_reaper():
@task(queue=get_task_queuename)
def awx_periodic_scheduler():
lock_session_timeout_milliseconds = settings.TASK_MANAGER_LOCK_TIMEOUT * 1000
with advisory_lock('awx_periodic_scheduler_lock', lock_session_timeout_milliseconds=lock_session_timeout_milliseconds, wait=False) as acquired:
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
@@ -985,15 +979,5 @@ def periodic_resource_sync():
if acquired is False:
logger.debug("Not running periodic_resource_sync, another task holds lock")
return
logger.debug("Running periodic resource sync")
executor = SyncExecutor()
executor.run()
for key, item_list in executor.results.items():
if not item_list or key == 'noop':
continue
# Log creations and conflicts
if len(item_list) > 10 and settings.LOG_AGGREGATOR_LEVEL != 'DEBUG':
logger.info(f'Periodic resource sync {key}, first 10 items:\n{item_list[:10]}')
else:
logger.info(f'Periodic resource sync {key}:\n{item_list}')
SyncExecutor().run()

View File

@@ -1,30 +1,22 @@
import pytest
from unittest import mock
from awx.api.versioning import reverse
from django.test.utils import override_settings
from ansible_base.jwt_consumer.common.util import generate_x_trusted_proxy_header
from ansible_base.lib.testing.fixtures import rsa_keypair_factory, rsa_keypair # noqa: F401; pylint: disable=unused-import
class HeaderTrackingMiddleware(object):
def __init__(self):
self.environ = {}
def process_request(self, request):
pass
def process_response(self, request, response):
self.environ = request.environ
@pytest.mark.django_db
def test_proxy_ip_allowed(get, patch, admin):
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'system'})
patch(url, user=admin, data={'REMOTE_HOST_HEADERS': ['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST']})
class HeaderTrackingMiddleware(object):
environ = {}
def process_request(self, request):
pass
def process_response(self, request, response):
self.environ = request.environ
# By default, `PROXY_IP_ALLOWED_LIST` is disabled, so custom `REMOTE_HOST_HEADERS`
# should just pass through
middleware = HeaderTrackingMiddleware()
@@ -53,51 +45,6 @@ def test_proxy_ip_allowed(get, patch, admin):
assert middleware.environ['HTTP_X_FROM_THE_LOAD_BALANCER'] == 'some-actual-ip'
@pytest.mark.django_db
class TestTrustedProxyAllowListIntegration:
@pytest.fixture
def url(self, patch, admin):
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'system'})
patch(url, user=admin, data={'REMOTE_HOST_HEADERS': ['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST']})
patch(url, user=admin, data={'PROXY_IP_ALLOWED_LIST': ['my.proxy.example.org']})
return url
@pytest.fixture
def middleware(self):
return HeaderTrackingMiddleware()
def test_x_trusted_proxy_valid_signature(self, get, admin, rsa_keypair, url, middleware): # noqa: F811
# Headers should NOT get deleted
headers = {
'HTTP_X_TRUSTED_PROXY': generate_x_trusted_proxy_header(rsa_keypair.private),
'HTTP_X_FROM_THE_LOAD_BALANCER': 'some-actual-ip',
}
with mock.patch('ansible_base.jwt_consumer.common.cache.JWTCache.get_key_from_cache', lambda self: None):
with override_settings(ANSIBLE_BASE_JWT_KEY=rsa_keypair.public, PROXY_IP_ALLOWED_LIST=[]):
get(url, user=admin, middleware=middleware, **headers)
assert middleware.environ['HTTP_X_FROM_THE_LOAD_BALANCER'] == 'some-actual-ip'
def test_x_trusted_proxy_invalid_signature(self, get, admin, url, patch, middleware):
# Headers should NOT get deleted
headers = {
'HTTP_X_TRUSTED_PROXY': 'DEAD-BEEF',
'HTTP_X_FROM_THE_LOAD_BALANCER': 'some-actual-ip',
}
with override_settings(PROXY_IP_ALLOWED_LIST=[]):
get(url, user=admin, middleware=middleware, **headers)
assert middleware.environ['HTTP_X_FROM_THE_LOAD_BALANCER'] == 'some-actual-ip'
def test_x_trusted_proxy_invalid_signature_valid_proxy(self, get, admin, url, middleware):
# A valid explicit proxy SHOULD result in sensitive headers NOT being deleted, regardless of the trusted proxy signature results
headers = {
'HTTP_X_TRUSTED_PROXY': 'DEAD-BEEF',
'REMOTE_ADDR': 'my.proxy.example.org',
'HTTP_X_FROM_THE_LOAD_BALANCER': 'some-actual-ip',
}
get(url, user=admin, middleware=middleware, **headers)
assert middleware.environ['HTTP_X_FROM_THE_LOAD_BALANCER'] == 'some-actual-ip'
@pytest.mark.django_db
class TestDeleteViews:
def test_sublist_delete_permission_check(self, inventory_source, host, rando, delete):

View File

@@ -32,6 +32,13 @@ def node_type_instance():
return fn
@pytest.fixture
def instance_group(job_factory):
ig = InstanceGroup(name="east")
ig.save()
return ig
@pytest.fixture
def containerized_instance_group(instance_group, kube_credential):
ig = InstanceGroup(name="container")

View File

@@ -1,5 +1,4 @@
import pytest
from unittest import mock
# AWX
from awx.api.serializers import JobTemplateSerializer
@@ -9,15 +8,10 @@ from awx.main.migrations import _save_password_keys as save_password_keys
# Django
from django.apps import apps
from django.test.utils import override_settings
# DRF
from rest_framework.exceptions import ValidationError
# DAB
from ansible_base.jwt_consumer.common.util import generate_x_trusted_proxy_header
from ansible_base.lib.testing.fixtures import rsa_keypair_factory, rsa_keypair # noqa: F401; pylint: disable=unused-import
@pytest.mark.django_db
@pytest.mark.parametrize(
@@ -375,113 +369,3 @@ def test_job_template_missing_inventory(project, inventory, admin_user, post):
)
assert r.status_code == 400
assert "Cannot start automatically, an inventory is required." in str(r.data)
@pytest.mark.django_db
class TestJobTemplateCallbackProxyIntegration:
"""
Test the interaction of provision job template callback feature and:
settings.PROXY_IP_ALLOWED_LIST
x-trusted-proxy http header
"""
@pytest.fixture
def job_template(self, inventory, project):
jt = JobTemplate.objects.create(name='test-jt', inventory=inventory, project=project, playbook='helloworld.yml', host_config_key='abcd')
return jt
@override_settings(REMOTE_HOST_HEADERS=['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST'], PROXY_IP_ALLOWED_LIST=['my.proxy.example.org'])
def test_host_not_found(self, job_template, admin_user, post, rsa_keypair): # noqa: F811
job_template.inventory.hosts.create(name='foobar')
headers = {
'HTTP_X_FROM_THE_LOAD_BALANCER': 'baz',
'REMOTE_HOST': 'baz',
'REMOTE_ADDR': 'baz',
}
r = post(
url=reverse('api:job_template_callback', kwargs={'pk': job_template.pk}), data={'host_config_key': 'abcd'}, user=admin_user, expect=400, **headers
)
assert r.data['msg'] == 'No matching host could be found!'
@pytest.mark.parametrize(
'headers, expected',
(
pytest.param(
{
'HTTP_X_FROM_THE_LOAD_BALANCER': 'foobar',
'REMOTE_HOST': 'my.proxy.example.org',
},
201,
),
pytest.param(
{
'HTTP_X_FROM_THE_LOAD_BALANCER': 'foobar',
'REMOTE_HOST': 'not-my-proxy.org',
},
400,
),
),
)
@override_settings(REMOTE_HOST_HEADERS=['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST'], PROXY_IP_ALLOWED_LIST=['my.proxy.example.org'])
def test_proxy_ip_allowed_list(self, job_template, admin_user, post, headers, expected): # noqa: F811
job_template.inventory.hosts.create(name='my.proxy.example.org')
post(
url=reverse('api:job_template_callback', kwargs={'pk': job_template.pk}),
data={'host_config_key': 'abcd'},
user=admin_user,
expect=expected,
**headers
)
@override_settings(REMOTE_HOST_HEADERS=['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST'], PROXY_IP_ALLOWED_LIST=[])
def test_no_proxy_trust_all_headers(self, job_template, admin_user, post):
job_template.inventory.hosts.create(name='foobar')
headers = {
'HTTP_X_FROM_THE_LOAD_BALANCER': 'foobar',
'REMOTE_ADDR': 'bar',
'REMOTE_HOST': 'baz',
}
post(url=reverse('api:job_template_callback', kwargs={'pk': job_template.pk}), data={'host_config_key': 'abcd'}, user=admin_user, expect=201, **headers)
@override_settings(REMOTE_HOST_HEADERS=['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST'], PROXY_IP_ALLOWED_LIST=['my.proxy.example.org'])
def test_trusted_proxy(self, job_template, admin_user, post, rsa_keypair): # noqa: F811
job_template.inventory.hosts.create(name='foobar')
headers = {
'HTTP_X_TRUSTED_PROXY': generate_x_trusted_proxy_header(rsa_keypair.private),
'HTTP_X_FROM_THE_LOAD_BALANCER': 'foobar, my.proxy.example.org',
}
with mock.patch('ansible_base.jwt_consumer.common.cache.JWTCache.get_key_from_cache', lambda self: None):
with override_settings(ANSIBLE_BASE_JWT_KEY=rsa_keypair.public):
post(
url=reverse('api:job_template_callback', kwargs={'pk': job_template.pk}),
data={'host_config_key': 'abcd'},
user=admin_user,
expect=201,
**headers
)
@override_settings(REMOTE_HOST_HEADERS=['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST'], PROXY_IP_ALLOWED_LIST=['my.proxy.example.org'])
def test_trusted_proxy_host_not_found(self, job_template, admin_user, post, rsa_keypair): # noqa: F811
job_template.inventory.hosts.create(name='foobar')
headers = {
'HTTP_X_TRUSTED_PROXY': generate_x_trusted_proxy_header(rsa_keypair.private),
'HTTP_X_FROM_THE_LOAD_BALANCER': 'baz, my.proxy.example.org',
'REMOTE_ADDR': 'bar',
'REMOTE_HOST': 'baz',
}
with mock.patch('ansible_base.jwt_consumer.common.cache.JWTCache.get_key_from_cache', lambda self: None):
with override_settings(ANSIBLE_BASE_JWT_KEY=rsa_keypair.public):
post(
url=reverse('api:job_template_callback', kwargs={'pk': job_template.pk}),
data={'host_config_key': 'abcd'},
user=admin_user,
expect=400,
**headers
)

File diff suppressed because one or more lines are too long

View File

@@ -20,7 +20,7 @@ from awx.main.migrations._dab_rbac import setup_managed_role_definitions
# AWX
from awx.main.models.projects import Project
from awx.main.models.ha import Instance, InstanceGroup
from awx.main.models.ha import Instance
from rest_framework.test import (
APIRequestFactory,
@@ -92,11 +92,6 @@ def deploy_jobtemplate(project, inventory, credential):
return jt
@pytest.fixture()
def execution_environment():
return ExecutionEnvironment.objects.create(name="test-ee", description="test-ee", managed=True)
@pytest.fixture
def setup_managed_roles():
"Run the migration script to pre-create managed role definitions"
@@ -735,11 +730,6 @@ def jt_linked(organization, project, inventory, machine_credential, credential,
return jt
@pytest.fixture
def instance_group():
return InstanceGroup.objects.create(name="east")
@pytest.fixture
def workflow_job_template(organization):
wjt = WorkflowJobTemplate.objects.create(name='test-workflow_job_template', organization=organization)

View File

@@ -109,17 +109,3 @@ def test_team_indirect_access(get, team, admin_user, inventory):
assert len(by_username['u1']['summary_fields']['indirect_access']) == 0
access_entry = by_username['u1']['summary_fields']['direct_access'][0]
assert sorted(access_entry['descendant_roles']) == sorted(['adhoc_role', 'use_role', 'update_role', 'read_role', 'admin_role'])
@pytest.mark.django_db
def test_workflow_access_list(workflow_job_template, alice, bob, setup_managed_roles, get, admin_user):
"""Basic verification that WFJT access_list is functional"""
workflow_job_template.admin_role.members.add(alice)
workflow_job_template.organization.workflow_admin_role.members.add(bob)
url = reverse('api:workflow_job_template_access_list', kwargs={'pk': workflow_job_template.pk})
for u in (alice, bob, admin_user):
response = get(url, user=u, expect=200)
user_ids = [item['id'] for item in response.data['results']]
assert alice.pk in user_ids
assert bob.pk in user_ids

View File

@@ -1,41 +0,0 @@
import pytest
from awx.main.access import InstanceGroupAccess, NotificationTemplateAccess
from ansible_base.rbac.models import RoleDefinition
@pytest.mark.django_db
def test_instance_group_object_role_delete(rando, instance_group, setup_managed_roles):
"""Basic functionality of IG object-level admin role function AAP-25506"""
rd = RoleDefinition.objects.get(name='InstanceGroup Admin')
rd.give_permission(rando, instance_group)
access = InstanceGroupAccess(rando)
assert access.can_delete(instance_group)
@pytest.mark.django_db
def test_notification_template_object_role_change(rando, notification_template, setup_managed_roles):
"""Basic functionality of NT object-level admin role function AAP-25493"""
rd = RoleDefinition.objects.get(name='NotificationTemplate Admin')
rd.give_permission(rando, notification_template)
access = NotificationTemplateAccess(rando)
assert access.can_change(notification_template, {'name': 'new name'})
@pytest.mark.django_db
def test_organization_auditor_role(rando, setup_managed_roles, organization, inventory, project, jt_linked):
obj_list = (inventory, project, jt_linked)
for obj in obj_list:
assert obj.organization == organization, obj # sanity
assert [rando.has_obj_perm(obj, 'view') for obj in obj_list] == [False for i in range(3)], obj_list
rd = RoleDefinition.objects.get(name='Organization Audit')
rd.give_permission(rando, organization)
codename_set = set(rd.permissions.values_list('codename', flat=True))
assert not ({'view_inventory', 'view_jobtemplate', 'audit_organization'} - codename_set) # sanity
assert [obj in type(obj).access_qs(rando) for obj in obj_list] == [True for i in range(3)], obj_list
assert [rando.has_obj_perm(obj, 'view') for obj in obj_list] == [True for i in range(3)], obj_list

View File

@@ -2,11 +2,9 @@ import pytest
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse as django_reverse
from django.test.utils import override_settings
from awx.api.versioning import reverse
from awx.main.models import JobTemplate, Inventory, Organization
from awx.main.access import JobTemplateAccess, WorkflowJobTemplateAccess
from ansible_base.rbac.models import RoleDefinition
@@ -68,17 +66,13 @@ def test_assign_managed_role(admin_user, alice, rando, inventory, post, setup_ma
@pytest.mark.django_db
def test_assign_custom_delete_role(admin_user, rando, inventory, delete, patch):
# TODO: just a delete_inventory, without change_inventory
rd, _ = RoleDefinition.objects.get_or_create(
name='inventory-delete',
permissions=['delete_inventory', 'view_inventory', 'change_inventory'],
content_type=ContentType.objects.get_for_model(Inventory),
name='inventory-delete', permissions=['delete_inventory', 'view_inventory'], content_type=ContentType.objects.get_for_model(Inventory)
)
rd.give_permission(rando, inventory)
inv_id = inventory.pk
inv_url = reverse('api:inventory_detail', kwargs={'pk': inv_id})
# TODO: eventually this will be valid test, for now ignore
# patch(url=inv_url, data={"description": "new"}, user=rando, expect=403)
patch(url=inv_url, data={"description": "new"}, user=rando, expect=403)
delete(url=inv_url, user=rando, expect=202)
assert Inventory.objects.get(id=inv_id).pending_deletion
@@ -94,63 +88,3 @@ def test_assign_custom_add_role(admin_user, rando, organization, post, setup_man
inv_id = r.data['id']
inventory = Inventory.objects.get(id=inv_id)
assert rando.has_obj_perm(inventory, 'change')
@pytest.mark.django_db
def test_jt_creation_permissions(setup_managed_roles, inventory, project, rando):
"""This tests that if you assign someone required permissions in the new API
using the managed roles, then that works to give permissions to create a job template"""
inv_rd = RoleDefinition.objects.get(name='Inventory Admin')
proj_rd = RoleDefinition.objects.get(name='Project Admin')
# establish prior state
access = JobTemplateAccess(rando)
assert not access.can_add({'inventory': inventory.pk, 'project': project.pk, 'name': 'foo-jt'})
inv_rd.give_permission(rando, inventory)
proj_rd.give_permission(rando, project)
assert access.can_add({'inventory': inventory.pk, 'project': project.pk, 'name': 'foo-jt'})
@pytest.mark.django_db
def test_workflow_creation_permissions(setup_managed_roles, organization, workflow_job_template, rando):
"""Similar to JT, assigning new roles gives creator permissions"""
org_wf_rd = RoleDefinition.objects.get(name='Organization WorkflowJobTemplate Admin')
assert workflow_job_template.organization == organization # sanity
# establish prior state
access = WorkflowJobTemplateAccess(rando)
assert not access.can_add({'name': 'foo-flow', 'organization': organization.pk})
org_wf_rd.give_permission(rando, organization)
assert access.can_add({'name': 'foo-flow', 'organization': organization.pk})
@pytest.mark.django_db
def test_assign_credential_to_user_of_another_org(setup_managed_roles, credential, admin_user, rando, org_admin, organization, post):
'''Test that a credential can only be assigned to a user in the same organization'''
# cannot assign credential to rando, as rando is not in the same org as the credential
rd = RoleDefinition.objects.get(name="Credential Admin")
credential.organization = organization
credential.save(update_fields=['organization'])
assert credential.organization not in Organization.access_qs(rando, 'member')
url = django_reverse('roleuserassignment-list')
resp = post(url=url, data={"user": rando.id, "role_definition": rd.id, "object_id": credential.id}, user=admin_user, expect=400)
assert "You cannot grant credential access to a User not in the credentials' organization" in str(resp.data)
# can assign credential to superuser
rando.is_superuser = True
rando.save()
post(url=url, data={"user": rando.id, "role_definition": rd.id, "object_id": credential.id}, user=admin_user, expect=201)
# can assign credential to org_admin
assert credential.organization in Organization.access_qs(org_admin, 'member')
post(url=url, data={"user": org_admin.id, "role_definition": rd.id, "object_id": credential.id}, user=admin_user, expect=201)
@pytest.mark.django_db
@override_settings(ALLOW_LOCAL_RESOURCE_MANAGEMENT=False)
def test_team_member_role_not_assignable(team, rando, post, admin_user, setup_managed_roles):
member_rd = RoleDefinition.objects.get(name='Organization Member')
url = django_reverse('roleuserassignment-list')
r = post(url, data={'object_id': team.id, 'role_definition': member_rd.id, 'user': rando.id}, user=admin_user, expect=400)
assert 'Not managed locally' in str(r.data)

View File

@@ -1,120 +0,0 @@
import pytest
from django.apps import apps
from ansible_base.rbac.managed import SystemAuditor
from ansible_base.rbac import permission_registry
from awx.main.access import check_user_access, get_user_queryset
from awx.main.models import User, AdHocCommandEvent
from awx.api.versioning import reverse
@pytest.fixture
def ext_auditor_rd():
info = SystemAuditor(overrides={'name': 'Alien Auditor', 'shortname': 'ext_auditor'})
rd, _ = info.get_or_create(apps)
return rd
@pytest.fixture
def ext_auditor(ext_auditor_rd):
u = User.objects.create(username='external-auditor-user')
ext_auditor_rd.give_global_permission(u)
return u
@pytest.fixture
def obj_factory(request):
def _rf(fixture_name):
obj = request.getfixturevalue(fixture_name)
# special case to make obj organization-scoped
if obj._meta.model_name == 'executionenvironment':
obj.organization = request.getfixturevalue('organization')
obj.save(update_fields=['organization'])
return obj
return _rf
@pytest.mark.django_db
def test_access_qs_external_auditor(ext_auditor_rd, rando, job_template):
ext_auditor_rd.give_global_permission(rando)
jt_cls = apps.get_model('main', 'JobTemplate')
ujt_cls = apps.get_model('main', 'UnifiedJobTemplate')
assert job_template in jt_cls.access_qs(rando)
assert job_template.id in jt_cls.access_ids_qs(rando)
assert job_template.id in ujt_cls.accessible_pk_qs(rando, 'read_role')
@pytest.mark.django_db
@pytest.mark.parametrize('model', sorted(permission_registry.all_registered_models, key=lambda cls: cls._meta.model_name))
class TestExternalAuditorRoleAllModels:
def test_access_can_read_method(self, obj_factory, model, ext_auditor, rando):
fixture_name = model._meta.verbose_name.replace(' ', '_')
obj = obj_factory(fixture_name)
assert check_user_access(rando, model, 'read', obj) is False
assert check_user_access(ext_auditor, model, 'read', obj) is True
def test_access_get_queryset(self, obj_factory, model, ext_auditor, rando):
fixture_name = model._meta.verbose_name.replace(' ', '_')
obj = obj_factory(fixture_name)
assert obj not in get_user_queryset(rando, model)
assert obj in get_user_queryset(ext_auditor, model)
def test_global_list(self, obj_factory, model, ext_auditor, rando, get):
fixture_name = model._meta.verbose_name.replace(' ', '_')
obj_factory(fixture_name)
url = reverse(f'api:{fixture_name}_list')
r = get(url, user=rando, expect=200)
initial_ct = r.data['count']
r = get(url, user=ext_auditor, expect=200)
assert r.data['count'] == initial_ct + 1
if fixture_name in ('job_template', 'workflow_job_template'):
url = reverse('api:unified_job_template_list')
r = get(url, user=rando, expect=200)
initial_ct = r.data['count']
r = get(url, user=ext_auditor, expect=200)
assert r.data['count'] == initial_ct + 1
def test_detail_view(self, obj_factory, model, ext_auditor, rando, get):
fixture_name = model._meta.verbose_name.replace(' ', '_')
obj = obj_factory(fixture_name)
url = reverse(f'api:{fixture_name}_detail', kwargs={'pk': obj.pk})
get(url, user=rando, expect=403) # NOTE: should be 401
get(url, user=ext_auditor, expect=200)
@pytest.mark.django_db
class TestExternalAuditorNonRoleModels:
def test_ad_hoc_command_view(self, ad_hoc_command_factory, rando, ext_auditor, get):
"""The AdHocCommandAccess class references is_system_auditor
this is to prove it works with other system-level view roles"""
ad_hoc_command = ad_hoc_command_factory()
url = reverse('api:ad_hoc_command_list')
r = get(url, user=rando, expect=200)
assert r.data['count'] == 0
r = get(url, user=ext_auditor, expect=200)
assert r.data['count'] == 1
assert r.data['results'][0]['id'] == ad_hoc_command.id
event = AdHocCommandEvent.objects.create(ad_hoc_command=ad_hoc_command)
url = reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': ad_hoc_command.id})
r = get(url, user=rando, expect=403)
r = get(url, user=ext_auditor, expect=200)
assert r.data['count'] == 1
url = reverse('api:ad_hoc_command_event_detail', kwargs={'pk': event.id})
r = get(url, user=rando, expect=403)
r = get(url, user=ext_auditor, expect=200)
assert r.data['id'] == event.id

View File

@@ -1,31 +0,0 @@
import pytest
from ansible_base.rbac.models import RoleDefinition, DABPermission
@pytest.mark.django_db
def test_roles_to_not_create(setup_managed_roles):
assert RoleDefinition.objects.filter(name='Organization Admin').count() == 1
SHOULD_NOT_EXIST = ('Organization Organization Admin', 'Organization Team Admin', 'Organization InstanceGroup Admin')
bad_rds = RoleDefinition.objects.filter(name__in=SHOULD_NOT_EXIST)
if bad_rds.exists():
bad_names = list(bad_rds.values_list('name', flat=True))
raise Exception(f'Found RoleDefinitions that should not exist: {bad_names}')
@pytest.mark.django_db
def test_project_update_role(setup_managed_roles):
"""Role to allow updating a project on the object-level should exist"""
assert RoleDefinition.objects.filter(name='Project Update').count() == 1
@pytest.mark.django_db
def test_org_child_add_permission(setup_managed_roles):
for model_name in ('Project', 'NotificationTemplate', 'WorkflowJobTemplate', 'Inventory'):
rd = RoleDefinition.objects.get(name=f'Organization {model_name} Admin')
assert 'add_' in str(rd.permissions.values_list('codename', flat=True)), f'The {rd.name} role definition expected to contain add_ permissions'
# special case for JobTemplate, anyone can create one with use permission to project/inventory
assert not DABPermission.objects.filter(codename='add_jobtemplate').exists()

View File

@@ -1,5 +1,4 @@
from unittest import mock
import json
import pytest
@@ -7,29 +6,17 @@ from django.contrib.contenttypes.models import ContentType
from crum import impersonate
from awx.main.fields import ImplicitRoleField
from awx.main.models.rbac import get_role_from_object_role, give_creator_permissions, get_role_codenames, get_role_definition
from awx.main.models.rbac import get_role_from_object_role, give_creator_permissions
from awx.main.models import User, Organization, WorkflowJobTemplate, WorkflowJobTemplateNode, Team
from awx.api.versioning import reverse
from ansible_base.rbac.models import RoleUserAssignment, RoleDefinition
from ansible_base.rbac import permission_registry
@pytest.mark.django_db
@pytest.mark.parametrize(
'role_name',
[
'execution_environment_admin_role',
'workflow_admin_role',
'project_admin_role',
'admin_role',
'auditor_role',
'read_role',
'execute_role',
'approval_role',
'notification_admin_role',
],
['execution_environment_admin_role', 'project_admin_role', 'admin_role', 'auditor_role', 'read_role', 'execute_role', 'notification_admin_role'],
)
def test_round_trip_roles(organization, rando, role_name, setup_managed_roles):
"""
@@ -39,41 +26,11 @@ def test_round_trip_roles(organization, rando, role_name, setup_managed_roles):
"""
getattr(organization, role_name).members.add(rando)
assignment = RoleUserAssignment.objects.get(user=rando)
print(assignment.role_definition.name)
old_role = get_role_from_object_role(assignment.object_role)
assert old_role.id == getattr(organization, role_name).id
@pytest.mark.django_db
@pytest.mark.parametrize('model', sorted(permission_registry.all_registered_models, key=lambda cls: cls._meta.model_name))
def test_role_migration_matches(request, model, setup_managed_roles):
fixture_name = model._meta.verbose_name.replace(' ', '_')
obj = request.getfixturevalue(fixture_name)
role_ct = 0
for field in obj._meta.get_fields():
if isinstance(field, ImplicitRoleField):
if field.name == 'read_role':
continue # intentionally left as "Compat" roles
role_ct += 1
old_role = getattr(obj, field.name)
old_codenames = set(get_role_codenames(old_role))
rd = get_role_definition(old_role)
new_codenames = set(rd.permissions.values_list('codename', flat=True))
# all the old roles should map to a non-Compat role definition
if 'Compat' not in rd.name:
model_rds = RoleDefinition.objects.filter(content_type=ContentType.objects.get_for_model(obj))
rd_data = {}
for rd in model_rds:
rd_data[rd.name] = list(rd.permissions.values_list('codename', flat=True))
assert (
'Compat' not in rd.name
), f'Permissions for old vs new roles did not match.\nold {field.name}: {old_codenames}\nnew:\n{json.dumps(rd_data, indent=2)}'
assert new_codenames == set(old_codenames)
# In the old system these models did not have object-level roles, all others expect some model roles
if model._meta.model_name not in ('notificationtemplate', 'executionenvironment'):
assert role_ct > 0
@pytest.mark.django_db
def test_role_naming(setup_managed_roles):
qs = RoleDefinition.objects.filter(content_type=ContentType.objects.get(model='jobtemplate'), name__endswith='dmin')
@@ -184,11 +141,3 @@ def test_implicit_parents_no_assignments(organization):
with mock.patch('awx.main.models.rbac.give_or_remove_permission') as mck:
Team.objects.create(name='random team', organization=organization)
mck.assert_not_called()
@pytest.mark.django_db
def test_user_auditor_rel(organization, rando, setup_managed_roles):
assert rando not in organization.auditor_role
audit_rd = RoleDefinition.objects.get(name='Organization Audit')
audit_rd.give_permission(rando, organization)
assert list(rando.auditor_of_organizations) == [organization]

View File

@@ -4,19 +4,25 @@ import pytest
# CRUM
from crum import impersonate
# Django
from django.contrib.contenttypes.models import ContentType
# AWX
from awx.main.models import UnifiedJobTemplate, Job, JobTemplate, WorkflowJobTemplate, Project, WorkflowJob, Schedule, Credential
from awx.main.models import UnifiedJobTemplate, Job, JobTemplate, WorkflowJobTemplate, WorkflowApprovalTemplate, Project, WorkflowJob, Schedule, Credential
from awx.api.versioning import reverse
from awx.main.constants import JOB_VARIABLE_PREFIXES
@pytest.mark.django_db
def test_subclass_types():
assert set(UnifiedJobTemplate._submodels_with_roles()) == {
JobTemplate,
Project,
WorkflowJobTemplate,
}
assert set(UnifiedJobTemplate._submodels_with_roles()) == set(
[
ContentType.objects.get_for_model(JobTemplate).id,
ContentType.objects.get_for_model(Project).id,
ContentType.objects.get_for_model(WorkflowJobTemplate).id,
ContentType.objects.get_for_model(WorkflowApprovalTemplate).id,
]
)
@pytest.mark.django_db

View File

@@ -85,17 +85,3 @@ class TestMigrationSmoke:
RoleUserAssignment = new_state.apps.get_model('dab_rbac', 'RoleUserAssignment')
assert RoleUserAssignment.objects.filter(user=user.id, object_id=org.id).exists()
# Regression testing for bug that comes from current vs past models mismatch
RoleDefinition = new_state.apps.get_model('dab_rbac', 'RoleDefinition')
assert not RoleDefinition.objects.filter(name='Organization Organization Admin').exists()
# Test special cases in managed role creation
assert not RoleDefinition.objects.filter(name='Organization Team Admin').exists()
assert not RoleDefinition.objects.filter(name='Organization InstanceGroup Admin').exists()
# Test that a removed EE model permission has been deleted
new_state = migrator.apply_tested_migration(
('main', '0195_EE_permissions'),
)
DABPermission = new_state.apps.get_model('dab_rbac', 'DABPermission')
assert not DABPermission.objects.filter(codename='view_executionenvironment').exists()

View File

@@ -1,148 +0,0 @@
import pytest
from django.contrib.contenttypes.models import ContentType
from awx.main.access import ExecutionEnvironmentAccess
from awx.main.models import ExecutionEnvironment, Organization, Team
from awx.main.models.rbac import get_role_codenames
from awx.api.versioning import reverse
from django.urls import reverse as django_reverse
from ansible_base.rbac.models import RoleDefinition
@pytest.fixture
def ee_rd():
return RoleDefinition.objects.create_from_permissions(
name='EE object admin',
permissions=['change_executionenvironment', 'delete_executionenvironment'],
content_type=ContentType.objects.get_for_model(ExecutionEnvironment),
)
@pytest.fixture
def org_ee_rd():
return RoleDefinition.objects.create_from_permissions(
name='EE org admin',
permissions=['add_executionenvironment', 'change_executionenvironment', 'delete_executionenvironment', 'view_organization'],
content_type=ContentType.objects.get_for_model(Organization),
)
@pytest.mark.django_db
def test_old_ee_role_maps_to_correct_permissions(organization):
assert set(get_role_codenames(organization.execution_environment_admin_role)) == {
'view_organization',
'add_executionenvironment',
'change_executionenvironment',
'delete_executionenvironment',
}
@pytest.fixture
def org_ee(organization):
return ExecutionEnvironment.objects.create(name='some user ee', organization=organization)
@pytest.fixture
def check_user_capabilities(get, setup_managed_roles):
def _rf(user, obj, expected):
url = reverse('api:execution_environment_list')
r = get(url, user=user, expect=200)
for item in r.data['results']:
if item['id'] == obj.pk:
assert expected == item['summary_fields']['user_capabilities']
break
else:
raise RuntimeError(f'Could not find expected object ({obj}) in EE list result: {r.data}')
return _rf
# ___ begin tests ___
@pytest.mark.django_db
def test_any_user_can_view_global_ee(control_plane_execution_environment, rando):
assert ExecutionEnvironmentAccess(rando).can_read(control_plane_execution_environment)
@pytest.mark.django_db
def test_managed_ee_not_assignable(control_plane_execution_environment, ee_rd, rando, admin_user, post):
url = django_reverse('roleuserassignment-list')
r = post(url, {'role_definition': ee_rd.pk, 'user': rando.id, 'object_id': control_plane_execution_environment.pk}, user=admin_user, expect=400)
assert 'Can not assign object roles to managed Execution Environment' in str(r.data)
@pytest.mark.django_db
def test_org_member_required_for_assignment(org_ee, ee_rd, rando, admin_user, post):
url = django_reverse('roleuserassignment-list')
r = post(url, {'role_definition': ee_rd.pk, 'user': rando.id, 'object_id': org_ee.pk}, user=admin_user, expect=400)
assert 'User must have view permission to Execution Environment organization' in str(r.data)
@pytest.mark.django_db
def test_team_can_have_permission(org_ee, ee_rd, rando, admin_user, post):
org2 = Organization.objects.create(name='a different team')
team = Team.objects.create(name='a team', organization=org2)
team.member_role.members.add(rando)
assert org_ee not in ExecutionEnvironmentAccess(rando).get_queryset() # user can not view the EE
url = django_reverse('roleteamassignment-list')
# can give object roles to the team now
post(url, {'role_definition': ee_rd.pk, 'team': team.id, 'object_id': org_ee.pk}, user=admin_user, expect=201)
assert rando.has_obj_perm(org_ee, 'change')
assert org_ee in ExecutionEnvironmentAccess(rando).get_queryset() # user can view the EE now
@pytest.mark.django_db
def test_give_object_permission_to_ee(org_ee, ee_rd, org_member, check_user_capabilities):
access = ExecutionEnvironmentAccess(org_member)
assert access.can_read(org_ee) # by virtue of being an org member
assert not access.can_change(org_ee, {'name': 'new'})
check_user_capabilities(org_member, org_ee, {'edit': False, 'delete': False, 'copy': False})
ee_rd.give_permission(org_member, org_ee)
assert access.can_change(org_ee, {'name': 'new', 'organization': org_ee.organization.id})
check_user_capabilities(org_member, org_ee, {'edit': True, 'delete': True, 'copy': False})
@pytest.mark.django_db
def test_need_related_organization_access(org_ee, ee_rd, org_member):
org2 = Organization.objects.create(name='another organization')
ee_rd.give_permission(org_member, org_ee)
org2.member_role.members.add(org_member)
access = ExecutionEnvironmentAccess(org_member)
assert access.can_change(org_ee, {'name': 'new', 'organization': org_ee.organization})
assert access.can_change(org_ee, {'name': 'new', 'organization': org_ee.organization.id})
assert not access.can_change(org_ee, {'name': 'new', 'organization': org2.id})
assert not access.can_change(org_ee, {'name': 'new', 'organization': org2})
# User can make the change if they have relevant permission to the new organization
org_ee.organization.execution_environment_admin_role.members.add(org_member)
org2.execution_environment_admin_role.members.add(org_member)
assert access.can_change(org_ee, {'name': 'new', 'organization': org2.id})
assert access.can_change(org_ee, {'name': 'new', 'organization': org2})
@pytest.mark.django_db
@pytest.mark.parametrize('style', ['new', 'old'])
def test_give_org_permission_to_ee(org_ee, organization, org_member, check_user_capabilities, style, org_ee_rd):
access = ExecutionEnvironmentAccess(org_member)
assert not access.can_change(org_ee, {'name': 'new'})
check_user_capabilities(org_member, org_ee, {'edit': False, 'delete': False, 'copy': False})
if style == 'new':
org_ee_rd.give_permission(org_member, organization)
assert org_member.has_obj_perm(org_ee.organization, 'add_executionenvironment') # sanity
else:
organization.execution_environment_admin_role.members.add(org_member)
assert access.can_change(org_ee, {'name': 'new', 'organization': organization.id})
check_user_capabilities(org_member, org_ee, {'edit': True, 'delete': True, 'copy': True})
# Extra check, user can not remove the EE from the organization
assert not access.can_change(org_ee, {'name': 'new', 'organization': None})

View File

@@ -182,14 +182,8 @@ def test_job_template_creator_access(project, organization, rando, post, setup_m
@pytest.mark.django_db
@pytest.mark.job_permissions
@pytest.mark.parametrize(
'lacking,reason',
[
('project', 'You do not have use permission on Project'),
('inventory', 'You do not have use permission on Inventory'),
],
)
def test_job_template_insufficient_creator_permissions(lacking, reason, project, inventory, organization, rando, post):
@pytest.mark.parametrize('lacking', ['project', 'inventory'])
def test_job_template_insufficient_creator_permissions(lacking, project, inventory, organization, rando, post):
if lacking != 'project':
project.use_role.members.add(rando)
else:
@@ -198,13 +192,12 @@ def test_job_template_insufficient_creator_permissions(lacking, reason, project,
inventory.use_role.members.add(rando)
else:
inventory.read_role.members.add(rando)
response = post(
post(
url=reverse('api:job_template_list'),
data=dict(name='newly-created-jt', inventory=inventory.id, project=project.pk, playbook='helloworld.yml'),
user=rando,
expect=403,
)
assert reason in response.data[lacking]
@pytest.mark.django_db

View File

@@ -99,9 +99,7 @@ def test_notification_template_access_org_user(notification_template, user):
@pytest.mark.django_db
def test_notificaiton_template_orphan_access_org_admin(notification_template, organization, org_admin):
notification_template.organization = None
notification_template.save(update_fields=['organization'])
access = NotificationTemplateAccess(org_admin)
assert not org_admin.has_obj_perm(notification_template, 'change')
assert not access.can_change(notification_template, {'organization': organization.id})

View File

@@ -48,17 +48,3 @@ def test_org_resource_role(ext_auth, organization, rando, org_admin):
assert access.can_attach(organization, rando, 'member_role.members') == ext_auth
organization.member_role.members.add(rando)
assert access.can_unattach(organization, rando, 'member_role.members') == ext_auth
@pytest.mark.django_db
def test_delete_org_while_workflow_active(workflow_job_template):
'''
Delete org while workflow job is active (i.e. changing status)
'''
assert workflow_job_template.organization # sanity check
wj = workflow_job_template.create_unified_job() # status should be new
workflow_job_template.organization.delete()
wj.refresh_from_db()
assert wj.status != 'pending' # sanity check
wj.status = 'pending' # status needs to change in order to trigger workflow_job_template.save()
wj.save(update_fields=['status'])

View File

@@ -35,13 +35,6 @@ class TestWorkflowJobTemplateAccess:
assert org_member in wfjt.execute_role
assert org_member in wfjt.read_role
def test_non_super_admin_no_add_without_org(self, wfjt, organization, rando):
organization.member_role.members.add(rando)
wfjt.admin_role.members.add(rando)
access = WorkflowJobTemplateAccess(rando, save_messages=True)
assert not access.can_add({'name': 'without org'})
assert 'An organization is required to create a workflow job template for normal user' in access.messages['organization']
@pytest.mark.django_db
class TestWorkflowJobTemplateNodeAccess:

View File

@@ -8,22 +8,9 @@ from django.db import connection
@contextmanager
def advisory_lock(*args, lock_session_timeout_milliseconds=0, **kwargs):
def advisory_lock(*args, **kwargs):
if connection.vendor == 'postgresql':
cur = None
idle_in_transaction_session_timeout = None
idle_session_timeout = None
if lock_session_timeout_milliseconds > 0:
with connection.cursor() as cur:
idle_in_transaction_session_timeout = cur.execute('SHOW idle_in_transaction_session_timeout').fetchone()[0]
idle_session_timeout = cur.execute('SHOW idle_session_timeout').fetchone()[0]
cur.execute(f"SET idle_in_transaction_session_timeout = '{lock_session_timeout_milliseconds}'")
cur.execute(f"SET idle_session_timeout = '{lock_session_timeout_milliseconds}'")
with django_pglocks_advisory_lock(*args, **kwargs) as internal_lock:
yield internal_lock
if lock_session_timeout_milliseconds > 0:
with connection.cursor() as cur:
cur.execute(f"SET idle_in_transaction_session_timeout = '{idle_in_transaction_session_timeout}'")
cur.execute(f"SET idle_session_timeout = '{idle_session_timeout}'")
else:
yield True

View File

@@ -1,48 +0,0 @@
# Copyright (c) 2024 Ansible, Inc.
# All Rights Reserved.
# DRF
from rest_framework.request import Request
"""
Note that these methods operate on request.environ. This data is from uwsgi.
It is the source data from which request.headers (read-only) is constructed.
"""
def is_proxy_in_headers(request: Request, proxy_list: list[str], headers: list[str]) -> bool:
"""
Determine if the request went through at least one proxy in the list.
Example:
request.environ = {
"HTTP_X_FOO": "8.8.8.8, 192.168.2.1",
"REMOTE_ADDR": "192.168.2.1",
"REMOTE_HOST": "foobar"
}
proxy_list = ["192.168.2.1"]
headers = ["HTTP_X_FOO", "REMOTE_ADDR", "REMOTE_HOST"]
The above would return True since 192.168.2.1 is a value for the header HTTP_X_FOO
request: The DRF/Django request. request.environ dict will be used for searching for proxies
proxy_list: A list of known and trusted proxies may be ip or hostnames
headers: A list of keys for which to consider values that may contain a proxy
"""
remote_hosts = set()
for header in headers:
for value in request.environ.get(header, '').split(','):
value = value.strip()
if value:
remote_hosts.add(value)
return bool(remote_hosts.intersection(set(proxy_list)))
def delete_headers_starting_with_http(request: Request, headers: list[str]):
for header in headers:
if header.startswith('HTTP_'):
request.environ.pop(header, None)

View File

@@ -47,6 +47,7 @@ class WebsocketRelayConnection:
verify_ssl: bool = settings.BROADCAST_WEBSOCKET_VERIFY_CERT,
):
self.name = name
self.event_loop = asyncio.get_event_loop()
self.stats = stats
self.remote_host = remote_host
self.remote_port = remote_port
@@ -109,10 +110,7 @@ class WebsocketRelayConnection:
self.stats.record_connection_lost()
def start(self):
self.async_task = asyncio.get_running_loop().create_task(
self.connect(),
name=f"WebsocketRelayConnection.connect.{self.name}",
)
self.async_task = self.event_loop.create_task(self.connect())
return self.async_task
def cancel(self):
@@ -123,10 +121,7 @@ class WebsocketRelayConnection:
# metrics messages
# the "metrics" group is not subscribed to in the typical fashion, so we
# just explicitly create it
producer = asyncio.get_running_loop().create_task(
self.run_producer("metrics", websocket, "metrics"),
name="WebsocketRelayConnection.run_producer.metrics",
)
producer = self.event_loop.create_task(self.run_producer("metrics", websocket, "metrics"))
self.producers["metrics"] = {"task": producer, "subscriptions": {"metrics"}}
async for msg in websocket:
self.stats.record_message_received()
@@ -148,10 +143,7 @@ class WebsocketRelayConnection:
name = f"{self.remote_host}-{group}"
origin_channel = payload['origin_channel']
if not self.producers.get(name):
producer = asyncio.get_running_loop().create_task(
self.run_producer(name, websocket, group),
name=f"WebsocketRelayConnection.run_producer.{name}",
)
producer = self.event_loop.create_task(self.run_producer(name, websocket, group))
self.producers[name] = {"task": producer, "subscriptions": {origin_channel}}
logger.debug(f"Producer {name} started.")
else:
@@ -305,15 +297,16 @@ class WebSocketRelayManager(object):
pass
async def run(self):
self.stats_mgr = RelayWebsocketStatsManager(self.local_hostname)
event_loop = asyncio.get_running_loop()
self.stats_mgr = RelayWebsocketStatsManager(event_loop, self.local_hostname)
self.stats_mgr.start()
database_conf = deepcopy(settings.DATABASES['default'])
database_conf['OPTIONS'] = deepcopy(database_conf.get('OPTIONS', {}))
for k, v in settings.LISTENER_DATABASES.get('default', {}).items():
if k != 'OPTIONS':
database_conf[k] = v
database_conf[k] = v
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
database_conf['OPTIONS'][k] = v
@@ -329,10 +322,7 @@ class WebSocketRelayManager(object):
)
await async_conn.set_autocommit(True)
on_ws_heartbeat_task = asyncio.get_running_loop().create_task(
self.on_ws_heartbeat(async_conn),
name="WebSocketRelayManager.on_ws_heartbeat",
)
on_ws_heartbeat_task = event_loop.create_task(self.on_ws_heartbeat(async_conn))
# Establishes a websocket connection to /websocket/relay on all API servers
while True:

View File

@@ -262,7 +262,6 @@ START_TASK_LIMIT = 100
# We have the grace period so the task manager can bail out before the timeout.
TASK_MANAGER_TIMEOUT = 300
TASK_MANAGER_TIMEOUT_GRACE_PERIOD = 60
TASK_MANAGER_LOCK_TIMEOUT = TASK_MANAGER_TIMEOUT + TASK_MANAGER_TIMEOUT_GRACE_PERIOD
# Number of seconds _in addition to_ the task manager timeout a job can stay
# in waiting without being reaped
@@ -828,7 +827,7 @@ MANAGE_ORGANIZATION_AUTH = True
DISABLE_LOCAL_AUTH = False
# Note: This setting may be overridden by database settings.
TOWER_URL_BASE = "https://platformhost"
TOWER_URL_BASE = "https://towerhost"
INSIGHTS_URL_BASE = "https://example.org"
INSIGHTS_AGENT_MIME = 'application/example'
@@ -1009,7 +1008,6 @@ AWX_RUNNER_KEEPALIVE_SECONDS = 0
# Delete completed work units in receptor
RECEPTOR_RELEASE_WORK = True
RECPETOR_KEEP_WORK_ON_ERROR = False
# K8S only. Use receptor_log_level on AWX spec to set this properly
RECEPTOR_LOG_LEVEL = 'info'

View File

@@ -64,7 +64,7 @@
<div class="col-sm-6">
</div>
<div class="col-sm-6 footer-copyright">
Copyright &copy; 2024 <a href="http://www.redhat.com" target="_blank">Red Hat</a>, Inc. All Rights Reserved.
Copyright &copy; 2021 <a href="http://www.redhat.com" target="_blank">Red Hat</a>, Inc. All Rights Reserved.
</div>
</div>
</div>

View File

@@ -59,7 +59,7 @@ function ActivityStream() {
{
page: 1,
page_size: 20,
order_by: '-id',
order_by: '-timestamp',
},
['id', 'page', 'page_size']
);

View File

@@ -89,7 +89,7 @@
"LC_ALL": "en_US.UTF-8",
"MFLAGS": "-w",
"OLDPWD": "/awx_devel",
"AWX_HOST": "https://platformhost",
"AWX_HOST": "https://towerhost",
"HOSTNAME": "awx",
"LANGUAGE": "en_US:en",
"SDB_HOST": "0.0.0.0",

View File

@@ -89,7 +89,7 @@
"LC_ALL": "en_US.UTF-8",
"MFLAGS": "-w",
"OLDPWD": "/awx_devel",
"AWX_HOST": "https://platformhost",
"AWX_HOST": "https://towerhost",
"HOSTNAME": "awx",
"LANGUAGE": "en_US:en",
"SDB_HOST": "0.0.0.0",

View File

@@ -164,7 +164,7 @@
"ANSIBLE_RETRY_FILES_ENABLED": "False",
"MAX_EVENT_RES": "700000",
"ANSIBLE_CALLBACK_PLUGINS": "/awx_devel/awx/plugins/callback",
"AWX_HOST": "https://platformhost",
"AWX_HOST": "https://towerhost",
"ANSIBLE_SSH_CONTROL_PATH_DIR": "/tmp/awx_2_a4b1afiw/cp",
"ANSIBLE_STDOUT_CALLBACK": "awx_display"
},

View File

@@ -16,7 +16,7 @@ describe('<AzureAD />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_AZUREAD_OAUTH2_CALLBACK_URL:
'https://platformhost/sso/complete/azuread-oauth2/',
'https://towerhost/sso/complete/azuread-oauth2/',
SOCIAL_AUTH_AZUREAD_OAUTH2_KEY: 'mock key',
SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET: '$encrypted$',
SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP: {},

View File

@@ -22,7 +22,7 @@ describe('<AzureADDetail />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_AZUREAD_OAUTH2_CALLBACK_URL:
'https://platformhost/sso/complete/azuread-oauth2/',
'https://towerhost/sso/complete/azuread-oauth2/',
SOCIAL_AUTH_AZUREAD_OAUTH2_KEY: 'mock key',
SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET: '$encrypted$',
SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP: {},
@@ -62,7 +62,7 @@ describe('<AzureADDetail />', () => {
assertDetail(
wrapper,
'Azure AD OAuth2 Callback URL',
'https://platformhost/sso/complete/azuread-oauth2/'
'https://towerhost/sso/complete/azuread-oauth2/'
);
assertDetail(wrapper, 'Azure AD OAuth2 Key', 'mock key');
assertDetail(wrapper, 'Azure AD OAuth2 Secret', 'Encrypted');

View File

@@ -22,7 +22,7 @@ describe('<AzureADEdit />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_AZUREAD_OAUTH2_CALLBACK_URL:
'https://platformhost/sso/complete/azuread-oauth2/',
'https://towerhost/sso/complete/azuread-oauth2/',
SOCIAL_AUTH_AZUREAD_OAUTH2_KEY: 'mock key',
SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET: '$encrypted$',
SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP: {},

View File

@@ -19,7 +19,7 @@ describe('<GitHub />', () => {
SettingsAPI.readCategory.mockResolvedValueOnce({
data: {
SOCIAL_AUTH_GITHUB_CALLBACK_URL:
'https://platformhost/sso/complete/github/',
'https://towerhost/sso/complete/github/',
SOCIAL_AUTH_GITHUB_KEY: 'mock github key',
SOCIAL_AUTH_GITHUB_SECRET: '$encrypted$',
SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP: null,
@@ -29,7 +29,7 @@ describe('<GitHub />', () => {
SettingsAPI.readCategory.mockResolvedValueOnce({
data: {
SOCIAL_AUTH_GITHUB_ORG_CALLBACK_URL:
'https://platformhost/sso/complete/github-org/',
'https://towerhost/sso/complete/github-org/',
SOCIAL_AUTH_GITHUB_ORG_KEY: '',
SOCIAL_AUTH_GITHUB_ORG_SECRET: '$encrypted$',
SOCIAL_AUTH_GITHUB_ORG_NAME: '',
@@ -40,7 +40,7 @@ describe('<GitHub />', () => {
SettingsAPI.readCategory.mockResolvedValueOnce({
data: {
SOCIAL_AUTH_GITHUB_TEAM_CALLBACK_URL:
'https://platformhost/sso/complete/github-team/',
'https://towerhost/sso/complete/github-team/',
SOCIAL_AUTH_GITHUB_TEAM_KEY: 'OAuth2 key (Client ID)',
SOCIAL_AUTH_GITHUB_TEAM_SECRET: '$encrypted$',
SOCIAL_AUTH_GITHUB_TEAM_ID: 'team_id',
@@ -51,7 +51,7 @@ describe('<GitHub />', () => {
SettingsAPI.readCategory.mockResolvedValueOnce({
data: {
SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL:
'https://platformhost/sso/complete/github-enterprise/',
'https://towerhost/sso/complete/github-enterprise/',
SOCIAL_AUTH_GITHUB_ENTERPRISE_URL: 'https://localhost/url',
SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL: 'https://localhost/apiurl',
SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY: 'ent_key',
@@ -63,7 +63,7 @@ describe('<GitHub />', () => {
SettingsAPI.readCategory.mockResolvedValueOnce({
data: {
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_CALLBACK_URL:
'https://platformhost/sso/complete/github-enterprise-org/',
'https://towerhost/sso/complete/github-enterprise-org/',
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL: 'https://localhost/url',
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL: 'https://localhost/apiurl',
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY: 'ent_org_key',
@@ -76,7 +76,7 @@ describe('<GitHub />', () => {
SettingsAPI.readCategory.mockResolvedValueOnce({
data: {
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_CALLBACK_URL:
'https://platformhost/sso/complete/github-enterprise-team/',
'https://towerhost/sso/complete/github-enterprise-team/',
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL: 'https://localhost/url',
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL: 'https://localhost/apiurl',
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY: 'ent_team_key',

View File

@@ -22,8 +22,7 @@ jest.mock('../../../../api');
const mockDefault = {
data: {
SOCIAL_AUTH_GITHUB_CALLBACK_URL:
'https://platformhost/sso/complete/github/',
SOCIAL_AUTH_GITHUB_CALLBACK_URL: 'https://towerhost/sso/complete/github/',
SOCIAL_AUTH_GITHUB_KEY: 'mock github key',
SOCIAL_AUTH_GITHUB_SECRET: '$encrypted$',
SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP: null,
@@ -33,7 +32,7 @@ const mockDefault = {
const mockOrg = {
data: {
SOCIAL_AUTH_GITHUB_ORG_CALLBACK_URL:
'https://platformhost/sso/complete/github-org/',
'https://towerhost/sso/complete/github-org/',
SOCIAL_AUTH_GITHUB_ORG_KEY: '',
SOCIAL_AUTH_GITHUB_ORG_SECRET: '$encrypted$',
SOCIAL_AUTH_GITHUB_ORG_NAME: '',
@@ -44,7 +43,7 @@ const mockOrg = {
const mockTeam = {
data: {
SOCIAL_AUTH_GITHUB_TEAM_CALLBACK_URL:
'https://platformhost/sso/complete/github-team/',
'https://towerhost/sso/complete/github-team/',
SOCIAL_AUTH_GITHUB_TEAM_KEY: 'OAuth2 key (Client ID)',
SOCIAL_AUTH_GITHUB_TEAM_SECRET: '$encrypted$',
SOCIAL_AUTH_GITHUB_TEAM_ID: 'team_id',
@@ -55,7 +54,7 @@ const mockTeam = {
const mockEnterprise = {
data: {
SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL:
'https://platformhost/sso/complete/github-enterprise/',
'https://towerhost/sso/complete/github-enterprise/',
SOCIAL_AUTH_GITHUB_ENTERPRISE_URL: 'https://localhost/enterpriseurl',
SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL: 'https://localhost/enterpriseapi',
SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY: 'foobar',
@@ -67,7 +66,7 @@ const mockEnterprise = {
const mockEnterpriseOrg = {
data: {
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_CALLBACK_URL:
'https://platformhost/sso/complete/github-enterprise-org/',
'https://towerhost/sso/complete/github-enterprise-org/',
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL: 'https://localhost/orgurl',
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL: 'https://localhost/orgapi',
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY: 'foobar',
@@ -80,7 +79,7 @@ const mockEnterpriseOrg = {
const mockEnterpriseTeam = {
data: {
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_CALLBACK_URL:
'https://platformhost/sso/complete/github-enterprise-team/',
'https://towerhost/sso/complete/github-enterprise-team/',
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL: 'https://localhost/teamurl',
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL: 'https://localhost/teamapi',
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY: 'foobar',
@@ -144,7 +143,7 @@ describe('<GitHubDetail />', () => {
assertDetail(
wrapper,
'GitHub OAuth2 Callback URL',
'https://platformhost/sso/complete/github/'
'https://towerhost/sso/complete/github/'
);
assertDetail(wrapper, 'GitHub OAuth2 Key', 'mock github key');
assertDetail(wrapper, 'GitHub OAuth2 Secret', 'Encrypted');
@@ -219,7 +218,7 @@ describe('<GitHubDetail />', () => {
assertDetail(
wrapper,
'GitHub Organization OAuth2 Callback URL',
'https://platformhost/sso/complete/github-org/'
'https://towerhost/sso/complete/github-org/'
);
assertDetail(wrapper, 'GitHub Organization OAuth2 Key', 'Not configured');
assertDetail(wrapper, 'GitHub Organization OAuth2 Secret', 'Encrypted');
@@ -270,7 +269,7 @@ describe('<GitHubDetail />', () => {
assertDetail(
wrapper,
'GitHub Team OAuth2 Callback URL',
'https://platformhost/sso/complete/github-team/'
'https://towerhost/sso/complete/github-team/'
);
assertDetail(wrapper, 'GitHub Team OAuth2 Key', 'OAuth2 key (Client ID)');
assertDetail(wrapper, 'GitHub Team OAuth2 Secret', 'Encrypted');
@@ -317,7 +316,7 @@ describe('<GitHubDetail />', () => {
assertDetail(
wrapper,
'GitHub Enterprise OAuth2 Callback URL',
'https://platformhost/sso/complete/github-enterprise/'
'https://towerhost/sso/complete/github-enterprise/'
);
assertDetail(
wrapper,
@@ -344,7 +343,7 @@ describe('<GitHubDetail />', () => {
});
});
describe('Enterprise Organization', () => {
describe('Enterprise Org', () => {
let wrapper;
beforeAll(async () => {
@@ -377,7 +376,7 @@ describe('<GitHubDetail />', () => {
assertDetail(
wrapper,
'GitHub Enterprise Organization OAuth2 Callback URL',
'https://platformhost/sso/complete/github-enterprise-org/'
'https://towerhost/sso/complete/github-enterprise-org/'
);
assertDetail(
wrapper,
@@ -446,7 +445,7 @@ describe('<GitHubDetail />', () => {
assertDetail(
wrapper,
'GitHub Enterprise Team OAuth2 Callback URL',
'https://platformhost/sso/complete/github-enterprise-team/'
'https://towerhost/sso/complete/github-enterprise-team/'
);
assertDetail(
wrapper,
@@ -477,4 +476,23 @@ describe('<GitHubDetail />', () => {
);
});
});
describe('Redirect', () => {
test('should render redirect when user navigates to erroneous category', async () => {
let wrapper;
useRouteMatch.mockImplementation(() => ({
url: '/settings/github/foo/details',
path: '/settings/github/:category/details',
params: { category: 'foo' },
}));
await act(async () => {
wrapper = mountWithContexts(
<SettingsProvider value={mockAllOptions.actions}>
<GitHubDetail />
</SettingsProvider>
);
});
await waitForElement(wrapper, 'Redirect');
});
});
});

View File

@@ -22,7 +22,7 @@ describe('<GitHubEnterpriseEdit />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL:
'https://platformhost/sso/complete/github-enterprise/',
'https://towerhost/sso/complete/github-enterprise/',
SOCIAL_AUTH_GITHUB_ENTERPRISE_URL: '',
SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL: '',
SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY: '',

View File

@@ -22,7 +22,7 @@ describe('<GitHubEnterpriseOrgEdit />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_CALLBACK_URL:
'https://platformhost/sso/complete/github-enterprise-org/',
'https://towerhost/sso/complete/github-enterprise-org/',
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL: '',
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL: '',
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY: '',

View File

@@ -22,7 +22,7 @@ describe('<GitHubEnterpriseTeamEdit />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_CALLBACK_URL:
'https://platformhost/sso/complete/github-enterprise-team/',
'https://towerhost/sso/complete/github-enterprise-team/',
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL: '',
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL: '',
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY: '',

View File

@@ -22,7 +22,7 @@ describe('<GitHubOrgEdit />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_GITHUB_ORG_CALLBACK_URL:
'https://platformhost/sso/complete/github-org/',
'https://towerhost/sso/complete/github-org/',
SOCIAL_AUTH_GITHUB_ORG_KEY: '',
SOCIAL_AUTH_GITHUB_ORG_SECRET: '$encrypted$',
SOCIAL_AUTH_GITHUB_ORG_NAME: '',

View File

@@ -22,7 +22,7 @@ describe('<GitHubTeamEdit />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_GITHUB_TEAM_CALLBACK_URL:
'https://platformhost/sso/complete/github-team/',
'https://towerhost/sso/complete/github-team/',
SOCIAL_AUTH_GITHUB_TEAM_KEY: 'OAuth2 key (Client ID)',
SOCIAL_AUTH_GITHUB_TEAM_SECRET: '$encrypted$',
SOCIAL_AUTH_GITHUB_TEAM_ID: 'team_id',

View File

@@ -16,7 +16,7 @@ describe('<GoogleOAuth2 />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_GOOGLE_OAUTH2_CALLBACK_URL:
'https://platformhost/sso/complete/google-oauth2/',
'https://towerhost/sso/complete/google-oauth2/',
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY: 'mock key',
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET: '$encrypted$',
SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS: [

View File

@@ -22,7 +22,7 @@ describe('<GoogleOAuth2Detail />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_GOOGLE_OAUTH2_CALLBACK_URL:
'https://platformhost/sso/complete/google-oauth2/',
'https://towerhost/sso/complete/google-oauth2/',
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY: 'mock key',
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET: '$encrypted$',
SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS: [
@@ -68,7 +68,7 @@ describe('<GoogleOAuth2Detail />', () => {
assertDetail(
wrapper,
'Google OAuth2 Callback URL',
'https://platformhost/sso/complete/google-oauth2/'
'https://towerhost/sso/complete/google-oauth2/'
);
assertDetail(wrapper, 'Google OAuth2 Key', 'mock key');
assertDetail(wrapper, 'Google OAuth2 Secret', 'Encrypted');

View File

@@ -22,7 +22,7 @@ describe('<GoogleOAuth2Edit />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_GOOGLE_OAUTH2_CALLBACK_URL:
'https://platformhost/sso/complete/google-oauth2/',
'https://towerhost/sso/complete/google-oauth2/',
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY: 'mock key',
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET: '$encrypted$',
SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS: [

View File

@@ -26,7 +26,7 @@ describe('<MiscSystemDetail />', () => {
ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC: false,
ORG_ADMINS_CAN_SEE_ALL_USERS: true,
MANAGE_ORGANIZATION_AUTH: true,
TOWER_URL_BASE: 'https://platformhost',
TOWER_URL_BASE: 'https://towerhost',
REMOTE_HOST_HEADERS: [],
PROXY_IP_ALLOWED_LIST: [],
CSRF_TRUSTED_ORIGINS: [],
@@ -94,7 +94,7 @@ describe('<MiscSystemDetail />', () => {
'Automation Analytics upload URL',
'https://example.com'
);
assertDetail(wrapper, 'Base URL of the service', 'https://platformhost');
assertDetail(wrapper, 'Base URL of the service', 'https://towerhost');
assertDetail(wrapper, 'Gather data for Automation Analytics', 'Off');
assertDetail(
wrapper,

View File

@@ -15,10 +15,8 @@ describe('<SAML />', () => {
beforeEach(() => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_SAML_CALLBACK_URL:
'https://platformhost/sso/complete/saml/',
SOCIAL_AUTH_SAML_METADATA_URL:
'https://platformhost/sso/metadata/saml/',
SOCIAL_AUTH_SAML_CALLBACK_URL: 'https://towerhost/sso/complete/saml/',
SOCIAL_AUTH_SAML_METADATA_URL: 'https://towerhost/sso/metadata/saml/',
SOCIAL_AUTH_SAML_SP_ENTITY_ID: '',
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT: '',
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY: '',

View File

@@ -21,10 +21,8 @@ describe('<SAMLDetail />', () => {
beforeEach(() => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SOCIAL_AUTH_SAML_CALLBACK_URL:
'https://platformhost/sso/complete/saml/',
SOCIAL_AUTH_SAML_METADATA_URL:
'https://platformhost/sso/metadata/saml/',
SOCIAL_AUTH_SAML_CALLBACK_URL: 'https://towerhost/sso/complete/saml/',
SOCIAL_AUTH_SAML_METADATA_URL: 'https://towerhost/sso/metadata/saml/',
SOCIAL_AUTH_SAML_SP_ENTITY_ID: 'mock_id',
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT: 'mock_cert',
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY: '',
@@ -73,12 +71,12 @@ describe('<SAMLDetail />', () => {
assertDetail(
wrapper,
'SAML Assertion Consumer Service (ACS) URL',
'https://platformhost/sso/complete/saml/'
'https://towerhost/sso/complete/saml/'
);
assertDetail(
wrapper,
'SAML Service Provider Metadata URL',
'https://platformhost/sso/metadata/saml/'
'https://towerhost/sso/metadata/saml/'
);
assertDetail(wrapper, 'SAML Service Provider Entity ID', 'mock_id');
assertVariableDetail(

View File

@@ -22,10 +22,8 @@ describe('<SAMLEdit />', () => {
SettingsAPI.readCategory.mockResolvedValue({
data: {
SAML_AUTO_CREATE_OBJECTS: true,
SOCIAL_AUTH_SAML_CALLBACK_URL:
'https://platformhost/sso/complete/saml/',
SOCIAL_AUTH_SAML_METADATA_URL:
'https://platformhost/sso/metadata/saml/',
SOCIAL_AUTH_SAML_CALLBACK_URL: 'https://towerhost/sso/complete/saml/',
SOCIAL_AUTH_SAML_METADATA_URL: 'https://towerhost/sso/metadata/saml/',
SOCIAL_AUTH_SAML_SP_ENTITY_ID: 'mock_id',
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT: 'mock_cert',
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY: '$encrypted$',

View File

@@ -117,10 +117,6 @@ function TroubleshootingEdit() {
name="RECEPTOR_RELEASE_WORK"
config={debug.RECEPTOR_RELEASE_WORK}
/>
<BooleanField
name="RECEPTOR_KEEP_WORK_ON_ERROR"
config={debug.RECEPTOR_KEEP_WORK_ON_ERROR}
/>
{submitError && <FormSubmitError error={submitError} />}
{revertError && <FormSubmitError error={revertError} />}
</FormColumnLayout>

View File

@@ -1,6 +1,5 @@
{
"AWX_CLEANUP_PATHS": false,
"AWX_REQUEST_PROFILE": false,
"RECEPTOR_RELEASE_WORK": false,
"RECEPTOR_KEEP_WORK_ON_ERROR": false
}
"RECEPTOR_RELEASE_WORK": false
}

View File

@@ -830,15 +830,6 @@
"category_slug": "debug",
"default": true
},
"RECEPTOR_KEEP_WORK_ON_ERROR": {
"type": "boolean",
"required": false,
"label": "Keep receptor work on error",
"help_text": "Prevent receptor work from being released on when error is detected",
"category": "Debug",
"category_slug": "debug",
"default": false
},
"SESSION_COOKIE_AGE": {
"type": "integer",
"required": true,
@@ -5182,14 +5173,6 @@
"category_slug": "debug",
"defined_in_file": false
},
"RECEPTOR_KEEP_WORK_ON_ERROR": {
"type": "boolean",
"label": "Keep receptor work on error",
"help_text": "Prevent receptor work from being released on when error is detected",
"category": "Debug",
"category_slug": "debug",
"defined_in_file": false
},
"SESSION_COOKIE_AGE": {
"type": "integer",
"label": "Idle Time Force Log Out",

View File

@@ -91,7 +91,6 @@
"slirp4netns:enable_ipv6=true"
],
"RECEPTOR_RELEASE_WORK": true,
"RECEPTOR_KEEP_WORK_ON_ERROR": false,
"SESSION_COOKIE_AGE": 1800,
"SESSIONS_PER_USER": -1,
"DISABLE_LOCAL_AUTH": false,

View File

@@ -35,7 +35,7 @@ ui-next/src/build: $(UI_NEXT_DIR)/src/build/awx
## True target for ui-next/src/build. Build ui_next from source.
$(UI_NEXT_DIR)/src/build/awx: $(UI_NEXT_DIR)/src $(UI_NEXT_DIR)/src/node_modules/webpack
@echo "=== Building ui_next ==="
@cd $(UI_NEXT_DIR)/src && PRODUCT="$(PRODUCT)" PUBLIC_PATH=/static/awx/ ROUTE_PREFIX=/ npm run build:awx
@cd $(UI_NEXT_DIR)/src && PRODUCT="$(PRODUCT)" PUBLIC_PATH=/static/awx/ ROUTE_PREFIX=/ui_next npm run build:awx
@mv $(UI_NEXT_DIR)/src/build/awx/index.html $(UI_NEXT_DIR)/src/build/awx/index_awx.html
.PHONY: ui-next/src

View File

@@ -1,3 +1,5 @@
from django.conf import settings
from django.http import Http404
from django.urls import re_path
from django.views.generic.base import TemplateView
@@ -5,6 +7,12 @@ from django.views.generic.base import TemplateView
class IndexView(TemplateView):
template_name = 'index_awx.html'
def get_context_data(self, **kwargs):
if settings.UI_NEXT is False:
raise Http404()
return super().get_context_data(**kwargs)
app_name = 'ui_next'

View File

@@ -18,6 +18,8 @@ def get_urlpatterns(prefix=None):
prefix = f'/{prefix}/'
urlpatterns = [
re_path(r'', include('awx.ui.urls', namespace='ui')),
re_path(r'^ui_next/.*', include('awx.ui_next.urls', namespace='ui_next')),
path(f'api{prefix}', include('awx.api.urls', namespace='api')),
]
@@ -34,9 +36,6 @@ def get_urlpatterns(prefix=None):
re_path(r'^(?:api/)?500.html$', handle_500),
re_path(r'^csp-violation/', handle_csp_violation),
re_path(r'^login/', handle_login_redirect),
# want api/v2/doesnotexist to return a 404, not match the ui_next urls,
# so use a negative lookahead assertion here
re_path(r'^(?!api/|sso/).*', include('awx.ui_next.urls', namespace='ui_next')),
]
if settings.SETTINGS_MODULE == 'awx.settings.development':

View File

@@ -32,7 +32,7 @@ Installing the `tar.gz` involves no special instructions.
## Running
Non-deprecated modules in this collection have no Python requirements, but
may require the official [AWX CLI](https://pypi.org/project/awxkit/)
may require the official [AWX CLI](https://docs.ansible.com/ansible-tower/latest/html/towercli/index.html)
in the future. The `DOCUMENTATION` for each module will report this.
You can specify authentication by a combination of either:
@@ -41,7 +41,8 @@ You can specify authentication by a combination of either:
- host, OAuth2 token
The OAuth2 token is the preferred method. You can obtain a token via the
``login`` command with the AWX CLI.
AWX CLI [login](https://docs.ansible.com/ansible-tower/latest/html/towercli/reference.html#awx-login)
command.
These can be specified via (from highest to lowest precedence):

View File

@@ -12,7 +12,7 @@ requirements:
- None
description:
- Returns GET requests from the Automation Platform Controller API. See
U(https://docs.ansible.com/automation-controller/latest/html/towerapi/) for API usage.
U(https://docs.ansible.com/ansible-tower/latest/html/towerapi/index.html) for API usage.
- For use that is cross-compatible between the awx.awx and ansible.controller collection
see the controller_meta module
options:

View File

@@ -16,9 +16,9 @@ DOCUMENTATION = '''
---
module: job_template
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy job templates.
short_description: create, update, or destroy Automation Platform Controller job templates.
description:
- Create, update, or destroy job templates. See
- Create, update, or destroy Automation Platform Controller job templates. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
@@ -320,8 +320,8 @@ extends_documentation_fragment: awx.awx.auth
notes:
- JSON for survey_spec can be found in the API Documentation. See
U(https://docs.ansible.com/automation-controller/latest/html/towerapi)
for job template survey creation and POST operation payload example.
U(https://docs.ansible.com/ansible-tower/latest/html/towerapi/api_ref.html#/Job_Templates/Job_Templates_job_templates_survey_spec_create)
for POST operation payload example.
'''

View File

@@ -37,7 +37,7 @@ This collection should be installed from [Content Hub](https://cloud.redhat.com/
## Running
Non-deprecated modules in this collection have no Python requirements, but
may require the AWX CLI
may require the official [AWX CLI](https://docs.ansible.com/ansible-tower/latest/html/towercli/index.html)
in the future. The `DOCUMENTATION` for each module will report this.
You can specify authentication by a combination of either:
@@ -46,7 +46,8 @@ You can specify authentication by a combination of either:
- host, OAuth2 token
The OAuth2 token is the preferred method. You can obtain a token via the
``login`` command with the AWX CLI.
AWX CLI [login](https://docs.ansible.com/ansible-tower/latest/html/towercli/reference.html#awx-login)
command.
These can be specified via (from highest to lowest precedence):

View File

@@ -1,666 +0,0 @@
# Django
from django.utils.translation import gettext_noop
# AWX
from awx.main.models.credential import ManagedCredentialType
ManagedCredentialType(
namespace='ssh',
kind='ssh',
name=gettext_noop('Machine'),
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{'id': 'password', 'label': gettext_noop('Password'), 'type': 'string', 'secret': True, 'ask_at_runtime': True},
{'id': 'ssh_key_data', 'label': gettext_noop('SSH Private Key'), 'type': 'string', 'format': 'ssh_private_key', 'secret': True, 'multiline': True},
{
'id': 'ssh_public_key_data',
'label': gettext_noop('Signed SSH Certificate'),
'type': 'string',
'multiline': True,
'secret': True,
},
{'id': 'ssh_key_unlock', 'label': gettext_noop('Private Key Passphrase'), 'type': 'string', 'secret': True, 'ask_at_runtime': True},
{
'id': 'become_method',
'label': gettext_noop('Privilege Escalation Method'),
'type': 'string',
'help_text': gettext_noop('Specify a method for "become" operations. This is equivalent to specifying the --become-method Ansible parameter.'),
},
{
'id': 'become_username',
'label': gettext_noop('Privilege Escalation Username'),
'type': 'string',
},
{'id': 'become_password', 'label': gettext_noop('Privilege Escalation Password'), 'type': 'string', 'secret': True, 'ask_at_runtime': True},
],
},
)
ManagedCredentialType(
namespace='scm',
kind='scm',
name=gettext_noop('Source Control'),
managed=True,
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{'id': 'password', 'label': gettext_noop('Password'), 'type': 'string', 'secret': True},
{'id': 'ssh_key_data', 'label': gettext_noop('SCM Private Key'), 'type': 'string', 'format': 'ssh_private_key', 'secret': True, 'multiline': True},
{'id': 'ssh_key_unlock', 'label': gettext_noop('Private Key Passphrase'), 'type': 'string', 'secret': True},
],
},
)
ManagedCredentialType(
namespace='vault',
kind='vault',
name=gettext_noop('Vault'),
managed=True,
inputs={
'fields': [
{'id': 'vault_password', 'label': gettext_noop('Vault Password'), 'type': 'string', 'secret': True, 'ask_at_runtime': True},
{
'id': 'vault_id',
'label': gettext_noop('Vault Identifier'),
'type': 'string',
'format': 'vault_id',
'help_text': gettext_noop(
'Specify an (optional) Vault ID. This is '
'equivalent to specifying the --vault-id '
'Ansible parameter for providing multiple Vault '
'passwords. Note: this feature only works in '
'Ansible 2.4+.'
),
},
],
'required': ['vault_password'],
},
)
ManagedCredentialType(
namespace='net',
kind='net',
name=gettext_noop('Network'),
managed=True,
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
{'id': 'ssh_key_data', 'label': gettext_noop('SSH Private Key'), 'type': 'string', 'format': 'ssh_private_key', 'secret': True, 'multiline': True},
{
'id': 'ssh_key_unlock',
'label': gettext_noop('Private Key Passphrase'),
'type': 'string',
'secret': True,
},
{
'id': 'authorize',
'label': gettext_noop('Authorize'),
'type': 'boolean',
},
{
'id': 'authorize_password',
'label': gettext_noop('Authorize Password'),
'type': 'string',
'secret': True,
},
],
'dependencies': {
'authorize_password': ['authorize'],
},
'required': ['username'],
},
)
ManagedCredentialType(
namespace='aws',
kind='cloud',
name=gettext_noop('Amazon Web Services'),
managed=True,
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Access Key'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Secret Key'),
'type': 'string',
'secret': True,
},
{
'id': 'security_token',
'label': gettext_noop('STS Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop(
'Security Token Service (STS) is a web service '
'that enables you to request temporary, '
'limited-privilege credentials for AWS Identity '
'and Access Management (IAM) users.'
),
},
],
'required': ['username', 'password'],
},
)
ManagedCredentialType(
namespace='openstack',
kind='cloud',
name=gettext_noop('OpenStack'),
managed=True,
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password (API Key)'),
'type': 'string',
'secret': True,
},
{
'id': 'host',
'label': gettext_noop('Host (Authentication URL)'),
'type': 'string',
'help_text': gettext_noop('The host to authenticate with. For example, https://openstack.business.com/v2.0/'),
},
{
'id': 'project',
'label': gettext_noop('Project (Tenant Name)'),
'type': 'string',
},
{
'id': 'project_domain_name',
'label': gettext_noop('Project (Domain Name)'),
'type': 'string',
},
{
'id': 'domain',
'label': gettext_noop('Domain Name'),
'type': 'string',
'help_text': gettext_noop(
'OpenStack domains define administrative boundaries. '
'It is only needed for Keystone v3 authentication '
'URLs. Refer to the documentation for '
'common scenarios.'
),
},
{
'id': 'region',
'label': gettext_noop('Region Name'),
'type': 'string',
'help_text': gettext_noop('For some cloud providers, like OVH, region must be specified'),
},
{
'id': 'verify_ssl',
'label': gettext_noop('Verify SSL'),
'type': 'boolean',
'default': True,
},
],
'required': ['username', 'password', 'host', 'project'],
},
)
ManagedCredentialType(
namespace='vmware',
kind='cloud',
name=gettext_noop('VMware vCenter'),
managed=True,
inputs={
'fields': [
{
'id': 'host',
'label': gettext_noop('VCenter Host'),
'type': 'string',
'help_text': gettext_noop('Enter the hostname or IP address that corresponds to your VMware vCenter.'),
},
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
],
'required': ['host', 'username', 'password'],
},
)
ManagedCredentialType(
namespace='satellite6',
kind='cloud',
name=gettext_noop('Red Hat Satellite 6'),
managed=True,
inputs={
'fields': [
{
'id': 'host',
'label': gettext_noop('Satellite 6 URL'),
'type': 'string',
'help_text': gettext_noop('Enter the URL that corresponds to your Red Hat Satellite 6 server. For example, https://satellite.example.org'),
},
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
],
'required': ['host', 'username', 'password'],
},
)
ManagedCredentialType(
namespace='gce',
kind='cloud',
name=gettext_noop('Google Compute Engine'),
managed=True,
inputs={
'fields': [
{
'id': 'username',
'label': gettext_noop('Service Account Email Address'),
'type': 'string',
'help_text': gettext_noop('The email address assigned to the Google Compute Engine service account.'),
},
{
'id': 'project',
'label': 'Project',
'type': 'string',
'help_text': gettext_noop(
'The Project ID is the GCE assigned identification. '
'It is often constructed as three words or two words '
'followed by a three-digit number. Examples: project-id-000 '
'and another-project-id'
),
},
{
'id': 'ssh_key_data',
'label': gettext_noop('RSA Private Key'),
'type': 'string',
'format': 'ssh_private_key',
'secret': True,
'multiline': True,
'help_text': gettext_noop('Paste the contents of the PEM file associated with the service account email.'),
},
],
'required': ['username', 'ssh_key_data'],
},
)
ManagedCredentialType(
namespace='azure_rm',
kind='cloud',
name=gettext_noop('Microsoft Azure Resource Manager'),
managed=True,
inputs={
'fields': [
{
'id': 'subscription',
'label': gettext_noop('Subscription ID'),
'type': 'string',
'help_text': gettext_noop('Subscription ID is an Azure construct, which is mapped to a username.'),
},
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
{'id': 'client', 'label': gettext_noop('Client ID'), 'type': 'string'},
{
'id': 'secret',
'label': gettext_noop('Client Secret'),
'type': 'string',
'secret': True,
},
{'id': 'tenant', 'label': gettext_noop('Tenant ID'), 'type': 'string'},
{
'id': 'cloud_environment',
'label': gettext_noop('Azure Cloud Environment'),
'type': 'string',
'help_text': gettext_noop('Environment variable AZURE_CLOUD_ENVIRONMENT when using Azure GovCloud or Azure stack.'),
},
],
'required': ['subscription'],
},
)
ManagedCredentialType(
namespace='github_token',
kind='token',
name=gettext_noop('GitHub Personal Access Token'),
managed=True,
inputs={
'fields': [
{
'id': 'token',
'label': gettext_noop('Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('This token needs to come from your profile settings in GitHub'),
}
],
'required': ['token'],
},
)
ManagedCredentialType(
namespace='gitlab_token',
kind='token',
name=gettext_noop('GitLab Personal Access Token'),
managed=True,
inputs={
'fields': [
{
'id': 'token',
'label': gettext_noop('Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('This token needs to come from your profile settings in GitLab'),
}
],
'required': ['token'],
},
)
ManagedCredentialType(
namespace='bitbucket_dc_token',
kind='token',
name=gettext_noop('Bitbucket Data Center HTTP Access Token'),
managed=True,
inputs={
'fields': [
{
'id': 'token',
'label': gettext_noop('Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('This token needs to come from your user settings in Bitbucket'),
}
],
'required': ['token'],
},
)
ManagedCredentialType(
namespace='insights',
kind='insights',
name=gettext_noop('Insights'),
managed=True,
inputs={
'fields': [
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{'id': 'password', 'label': gettext_noop('Password'), 'type': 'string', 'secret': True},
],
'required': ['username', 'password'],
},
injectors={
'extra_vars': {
"scm_username": "{{username}}",
"scm_password": "{{password}}",
},
'env': {
'INSIGHTS_USER': '{{username}}',
'INSIGHTS_PASSWORD': '{{password}}',
},
},
)
ManagedCredentialType(
namespace='rhv',
kind='cloud',
name=gettext_noop('Red Hat Virtualization'),
managed=True,
inputs={
'fields': [
{'id': 'host', 'label': gettext_noop('Host (Authentication URL)'), 'type': 'string', 'help_text': gettext_noop('The host to authenticate with.')},
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
{
'id': 'ca_file',
'label': gettext_noop('CA File'),
'type': 'string',
'help_text': gettext_noop('Absolute file path to the CA file to use (optional)'),
},
],
'required': ['host', 'username', 'password'],
},
injectors={
# The duplication here is intentional; the ovirt4 inventory plugin
# writes a .ini file for authentication, while the ansible modules for
# ovirt4 use a separate authentication process that support
# environment variables; by injecting both, we support both
'file': {
'template': '\n'.join(
[
'[ovirt]',
'ovirt_url={{host}}',
'ovirt_username={{username}}',
'ovirt_password={{password}}',
'{% if ca_file %}ovirt_ca_file={{ca_file}}{% endif %}',
]
)
},
'env': {'OVIRT_INI_PATH': '{{tower.filename}}', 'OVIRT_URL': '{{host}}', 'OVIRT_USERNAME': '{{username}}', 'OVIRT_PASSWORD': '{{password}}'},
},
)
ManagedCredentialType(
namespace='controller',
kind='cloud',
name=gettext_noop('Red Hat Ansible Automation Platform'),
managed=True,
inputs={
'fields': [
{
'id': 'host',
'label': gettext_noop('Red Hat Ansible Automation Platform'),
'type': 'string',
'help_text': gettext_noop('Red Hat Ansible Automation Platform base URL to authenticate with.'),
},
{
'id': 'username',
'label': gettext_noop('Username'),
'type': 'string',
'help_text': gettext_noop(
'Red Hat Ansible Automation Platform username id to authenticate as.This should not be set if an OAuth token is being used.'
),
},
{
'id': 'password',
'label': gettext_noop('Password'),
'type': 'string',
'secret': True,
},
{
'id': 'oauth_token',
'label': gettext_noop('OAuth Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('An OAuth token to use to authenticate with.This should not be set if username/password are being used.'),
},
{'id': 'verify_ssl', 'label': gettext_noop('Verify SSL'), 'type': 'boolean', 'secret': False},
],
'required': ['host'],
},
injectors={
'env': {
'TOWER_HOST': '{{host}}',
'TOWER_USERNAME': '{{username}}',
'TOWER_PASSWORD': '{{password}}',
'TOWER_VERIFY_SSL': '{{verify_ssl}}',
'TOWER_OAUTH_TOKEN': '{{oauth_token}}',
'CONTROLLER_HOST': '{{host}}',
'CONTROLLER_USERNAME': '{{username}}',
'CONTROLLER_PASSWORD': '{{password}}',
'CONTROLLER_VERIFY_SSL': '{{verify_ssl}}',
'CONTROLLER_OAUTH_TOKEN': '{{oauth_token}}',
}
},
)
ManagedCredentialType(
namespace='kubernetes_bearer_token',
kind='kubernetes',
name=gettext_noop('OpenShift or Kubernetes API Bearer Token'),
inputs={
'fields': [
{
'id': 'host',
'label': gettext_noop('OpenShift or Kubernetes API Endpoint'),
'type': 'string',
'help_text': gettext_noop('The OpenShift or Kubernetes API Endpoint to authenticate with.'),
},
{
'id': 'bearer_token',
'label': gettext_noop('API authentication bearer token'),
'type': 'string',
'secret': True,
},
{
'id': 'verify_ssl',
'label': gettext_noop('Verify SSL'),
'type': 'boolean',
'default': True,
},
{
'id': 'ssl_ca_cert',
'label': gettext_noop('Certificate Authority data'),
'type': 'string',
'secret': True,
'multiline': True,
},
],
'required': ['host', 'bearer_token'],
},
)
ManagedCredentialType(
namespace='registry',
kind='registry',
name=gettext_noop('Container Registry'),
inputs={
'fields': [
{
'id': 'host',
'label': gettext_noop('Authentication URL'),
'type': 'string',
'help_text': gettext_noop('Authentication endpoint for the container registry.'),
'default': 'quay.io',
},
{
'id': 'username',
'label': gettext_noop('Username'),
'type': 'string',
},
{
'id': 'password',
'label': gettext_noop('Password or Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('A password or token used to authenticate with'),
},
{
'id': 'verify_ssl',
'label': gettext_noop('Verify SSL'),
'type': 'boolean',
'default': True,
},
],
'required': ['host'],
},
)
ManagedCredentialType(
namespace='galaxy_api_token',
kind='galaxy',
name=gettext_noop('Ansible Galaxy/Automation Hub API Token'),
inputs={
'fields': [
{
'id': 'url',
'label': gettext_noop('Galaxy Server URL'),
'type': 'string',
'help_text': gettext_noop('The URL of the Galaxy instance to connect to.'),
},
{
'id': 'auth_url',
'label': gettext_noop('Auth Server URL'),
'type': 'string',
'help_text': gettext_noop('The URL of a Keycloak server token_endpoint, if using SSO auth.'),
},
{
'id': 'token',
'label': gettext_noop('API Token'),
'type': 'string',
'secret': True,
'help_text': gettext_noop('A token to use for authentication against the Galaxy instance.'),
},
],
'required': ['url'],
},
)
ManagedCredentialType(
namespace='gpg_public_key',
kind='cryptography',
name=gettext_noop('GPG Public Key'),
inputs={
'fields': [
{
'id': 'gpg_public_key',
'label': gettext_noop('GPG Public Key'),
'type': 'string',
'secret': True,
'multiline': True,
'help_text': gettext_noop('GPG Public Key used to validate content signatures.'),
},
],
'required': ['gpg_public_key'],
},
)
ManagedCredentialType(
namespace='terraform',
kind='cloud',
name=gettext_noop('Terraform backend configuration'),
managed=True,
inputs={
'fields': [
{
'id': 'configuration',
'label': gettext_noop('Backend configuration'),
'type': 'string',
'secret': True,
'multiline': True,
'help_text': gettext_noop('Terraform backend config as Hashicorp configuration language.'),
},
{
'id': 'gce_credentials',
'label': gettext_noop('Google Cloud Platform account credentials'),
'type': 'string',
'secret': True,
'multiline': True,
'help_text': gettext_noop('Google Cloud Platform account credentials in JSON format.'),
},
],
'required': ['configuration'],
},
)

View File

@@ -1,303 +0,0 @@
import yaml
import stat
import tempfile
import os.path
from awx_plugins.credentials.injectors import _openstack_data
from awx.main.utils.execution_environments import to_container_path
from awx.main.utils.licensing import server_product_name
class PluginFileInjector(object):
plugin_name = None # Ansible core name used to reference plugin
# base injector should be one of None, "managed", or "template"
# this dictates which logic to borrow from playbook injectors
base_injector = None
# every source should have collection, these are for the collection name
namespace = None
collection = None
collection_migration = '2.9' # Starting with this version, we use collections
use_fqcn = False # plugin: name versus plugin: namespace.collection.name
# TODO: delete this method and update unit tests
@classmethod
def get_proper_name(cls):
if cls.plugin_name is None:
return None
return f'{cls.namespace}.{cls.collection}.{cls.plugin_name}'
@property
def filename(self):
"""Inventory filename for using the inventory plugin
This is created dynamically, but the auto plugin requires this exact naming
"""
return '{0}.yml'.format(self.plugin_name)
def inventory_contents(self, inventory_update, private_data_dir):
"""Returns a string that is the content for the inventory file for the inventory plugin"""
return yaml.safe_dump(self.inventory_as_dict(inventory_update, private_data_dir), default_flow_style=False, width=1000)
def inventory_as_dict(self, inventory_update, private_data_dir):
source_vars = dict(inventory_update.source_vars_dict) # make a copy
'''
None conveys that we should use the user-provided plugin.
Note that a plugin value of '' should still be overridden.
'''
if self.plugin_name is not None:
if hasattr(self, 'downstream_namespace') and server_product_name() != 'AWX':
source_vars['plugin'] = f'{self.downstream_namespace}.{self.downstream_collection}.{self.plugin_name}'
elif self.use_fqcn:
source_vars['plugin'] = f'{self.namespace}.{self.collection}.{self.plugin_name}'
else:
source_vars['plugin'] = self.plugin_name
return source_vars
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files)
env.update(injector_env)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
return env
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
"""By default, we will apply the standard managed injectors"""
injected_env = {}
credential = inventory_update.get_cloud_credential()
# some sources may have no credential, specifically ec2
if credential is None:
return injected_env
if self.base_injector in ('managed', 'template'):
injected_env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk) # so injector knows this is inventory
if self.base_injector == 'managed':
from awx_plugins.credentials import injectors as builtin_injectors
cred_kind = inventory_update.source.replace('ec2', 'aws')
if cred_kind in dir(builtin_injectors):
getattr(builtin_injectors, cred_kind)(credential, injected_env, private_data_dir)
elif self.base_injector == 'template':
safe_env = injected_env.copy()
args = []
credential.credential_type.inject_credential(credential, injected_env, safe_env, args, private_data_dir)
# NOTE: safe_env is handled externally to injector class by build_safe_env static method
# that means that managed injectors must only inject detectable env keys
# enforcement of this is accomplished by tests
return injected_env
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
return env
def build_private_data(self, inventory_update, private_data_dir):
return self.build_plugin_private_data(inventory_update, private_data_dir)
def build_plugin_private_data(self, inventory_update, private_data_dir):
return None
class azure_rm(PluginFileInjector):
plugin_name = 'azure_rm'
base_injector = 'managed'
namespace = 'azure'
collection = 'azcollection'
def get_plugin_env(self, *args, **kwargs):
ret = super(azure_rm, self).get_plugin_env(*args, **kwargs)
# We need native jinja2 types so that tags can give JSON null value
ret['ANSIBLE_JINJA2_NATIVE'] = str(True)
return ret
class ec2(PluginFileInjector):
plugin_name = 'aws_ec2'
base_injector = 'managed'
namespace = 'amazon'
collection = 'aws'
def get_plugin_env(self, *args, **kwargs):
ret = super(ec2, self).get_plugin_env(*args, **kwargs)
# We need native jinja2 types so that ec2_state_code will give integer
ret['ANSIBLE_JINJA2_NATIVE'] = str(True)
return ret
class gce(PluginFileInjector):
plugin_name = 'gcp_compute'
base_injector = 'managed'
namespace = 'google'
collection = 'cloud'
def get_plugin_env(self, *args, **kwargs):
ret = super(gce, self).get_plugin_env(*args, **kwargs)
# We need native jinja2 types so that ip addresses can give JSON null value
ret['ANSIBLE_JINJA2_NATIVE'] = str(True)
return ret
def inventory_as_dict(self, inventory_update, private_data_dir):
ret = super().inventory_as_dict(inventory_update, private_data_dir)
credential = inventory_update.get_cloud_credential()
# InventorySource.source_vars take precedence over ENV vars
if 'projects' not in ret:
ret['projects'] = [credential.get_input('project', default='')]
return ret
class vmware(PluginFileInjector):
plugin_name = 'vmware_vm_inventory'
base_injector = 'managed'
namespace = 'community'
collection = 'vmware'
class openstack(PluginFileInjector):
plugin_name = 'openstack'
namespace = 'openstack'
collection = 'cloud'
def _get_clouds_dict(self, inventory_update, cred, private_data_dir):
openstack_data = _openstack_data(cred)
openstack_data['clouds']['devstack']['private'] = inventory_update.source_vars_dict.get('private', True)
ansible_variables = {
'use_hostnames': True,
'expand_hostvars': False,
'fail_on_errors': True,
}
provided_count = 0
for var_name in ansible_variables:
if var_name in inventory_update.source_vars_dict:
ansible_variables[var_name] = inventory_update.source_vars_dict[var_name]
provided_count += 1
if provided_count:
# Must we provide all 3 because the user provides any 1 of these??
# this probably results in some incorrect mangling of the defaults
openstack_data['ansible'] = ansible_variables
return openstack_data
def build_plugin_private_data(self, inventory_update, private_data_dir):
credential = inventory_update.get_cloud_credential()
private_data = {'credentials': {}}
openstack_data = self._get_clouds_dict(inventory_update, credential, private_data_dir)
private_data['credentials'][credential] = yaml.safe_dump(openstack_data, default_flow_style=False, allow_unicode=True)
return private_data
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
env = super(openstack, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
credential = inventory_update.get_cloud_credential()
cred_data = private_data_files['credentials']
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_data[credential], private_data_dir)
return env
class rhv(PluginFileInjector):
"""ovirt uses the custom credential templating, and that is all"""
plugin_name = 'ovirt'
base_injector = 'template'
initial_version = '2.9'
namespace = 'ovirt'
collection = 'ovirt'
downstream_namespace = 'redhat'
downstream_collection = 'rhv'
use_fqcn = True
class satellite6(PluginFileInjector):
plugin_name = 'foreman'
namespace = 'theforeman'
collection = 'foreman'
downstream_namespace = 'redhat'
downstream_collection = 'satellite'
use_fqcn = True
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
# this assumes that this is merged
# https://github.com/ansible/ansible/pull/52693
credential = inventory_update.get_cloud_credential()
ret = super(satellite6, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
if credential:
ret['FOREMAN_SERVER'] = credential.get_input('host', default='')
ret['FOREMAN_USER'] = credential.get_input('username', default='')
ret['FOREMAN_PASSWORD'] = credential.get_input('password', default='')
return ret
class terraform(PluginFileInjector):
plugin_name = 'terraform_state'
namespace = 'cloud'
collection = 'terraform'
use_fqcn = True
def inventory_as_dict(self, inventory_update, private_data_dir):
ret = super().inventory_as_dict(inventory_update, private_data_dir)
credential = inventory_update.get_cloud_credential()
config_cred = credential.get_input('configuration')
if config_cred:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
with os.fdopen(handle, 'w') as f:
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
f.write(config_cred)
ret['backend_config_files'] = to_container_path(path, private_data_dir)
return ret
def build_plugin_private_data(self, inventory_update, private_data_dir):
credential = inventory_update.get_cloud_credential()
private_data = {'credentials': {}}
gce_cred = credential.get_input('gce_credentials', default=None)
if gce_cred:
private_data['credentials'][credential] = gce_cred
return private_data
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
env = super(terraform, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
credential = inventory_update.get_cloud_credential()
cred_data = private_data_files['credentials']
if credential in cred_data:
env['GOOGLE_BACKEND_CREDENTIALS'] = to_container_path(cred_data[credential], private_data_dir)
return env
class controller(PluginFileInjector):
plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection
base_injector = 'template'
namespace = 'awx'
collection = 'awx'
downstream_namespace = 'ansible'
downstream_collection = 'controller'
class insights(PluginFileInjector):
plugin_name = 'insights'
base_injector = 'template'
namespace = 'redhatinsights'
collection = 'insights'
downstream_namespace = 'redhat'
downstream_collection = 'insights'
use_fqcn = True
class openshift_virtualization(PluginFileInjector):
plugin_name = 'kubevirt'
base_injector = 'template'
namespace = 'kubevirt'
collection = 'core'
downstream_namespace = 'redhat'
downstream_collection = 'openshift_virtualization'
use_fqcn = True
class constructed(PluginFileInjector):
plugin_name = 'constructed'
namespace = 'ansible'
collection = 'builtin'
def build_env(self, *args, **kwargs):
env = super().build_env(*args, **kwargs)
# Enable script inventory plugin so we pick up the script files from source inventories
env['ANSIBLE_INVENTORY_ENABLED'] += ',script'
env['ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED'] = 'True'
return env

View File

@@ -0,0 +1,48 @@
import logging
# from awxkit.api.mixins import DSAdapter, HasCreate, HasCopy
# from awxkit.api.pages import (
# Credential,
# Organization,
# )
from awxkit.api.resources import resources
# from awxkit.utils import random_title, PseudoNamespace, filter_by_class
from . import base
from . import page
log = logging.getLogger(__name__)
class RoleTeamAssignment(base.Base):
NATURAL_KEY = ('team', 'content_object', 'role_definition')
page.register_page(
[resources.role_team_assignment, (resources.role_definition_team_assignments, 'post'), (resources.role_team_assignments, 'post')], RoleTeamAssignment
)
class RoleUserAssignment(base.Base):
NATURAL_KEY = ('user', 'content_object', 'role_definition')
page.register_page(
[resources.role_user_assignment, (resources.role_definition_user_assignments, 'post'), (resources.role_user_assignments, 'post')], RoleUserAssignment
)
class RoleTeamAssignments(page.PageList, RoleTeamAssignment):
pass
page.register_page([resources.role_definition_team_assignments, resources.role_team_assignments], RoleTeamAssignments)
class RoleUserAssignments(page.PageList, RoleUserAssignment):
pass
page.register_page([resources.role_definition_user_assignments, resources.role_user_assignments], RoleUserAssignments)

View File

@@ -0,0 +1,30 @@
import logging
# from awxkit.api.mixins import DSAdapter, HasCreate, HasCopy
# from awxkit.api.pages import (
# Credential,
# Organization,
# )
from awxkit.api.resources import resources
# from awxkit.utils import random_title, PseudoNamespace, filter_by_class
from . import base
from . import page
log = logging.getLogger(__name__)
class RoleDefinition(base.Base):
NATURAL_KEY = ('name',)
page.register_page([resources.role_definition, (resources.role_definitions, 'post')], RoleDefinition)
class RoleDefinitions(page.PageList, RoleDefinition):
pass
page.register_page([resources.role_definitions], RoleDefinitions)

Some files were not shown because too many files have changed in this diff Show More