diff --git a/.github/actions/awx_devel_image/action.yml b/.github/actions/awx_devel_image/action.yml index 8b7081292b..0d7ea9ac37 100644 --- a/.github/actions/awx_devel_image/action.yml +++ b/.github/actions/awx_devel_image/action.yml @@ -11,6 +11,12 @@ runs: shell: bash run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV + - name: Set lower case owner name + shell: bash + run: echo "OWNER_LC=${OWNER,,}" >> $GITHUB_ENV + env: + OWNER: '${{ github.repository_owner }}' + - name: Log in to registry shell: bash run: | @@ -18,11 +24,11 @@ runs: - name: Pre-pull latest devel image to warm cache shell: bash - run: docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }} + run: docker pull ghcr.io/${OWNER_LC}/awx_devel:${{ github.base_ref }} - name: Build image for current source checkout shell: bash run: | - DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \ + DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} \ COMPOSE_TAG=${{ github.base_ref }} \ make docker-compose-build diff --git a/.github/actions/run_awx_devel/action.yml b/.github/actions/run_awx_devel/action.yml index 79c795c85d..9ce4a0fbfe 100644 --- a/.github/actions/run_awx_devel/action.yml +++ b/.github/actions/run_awx_devel/action.yml @@ -35,7 +35,7 @@ runs: - name: Start AWX shell: bash run: | - DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \ + DEV_DOCKER_OWNER=${{ github.repository_owner }} \ COMPOSE_TAG=${{ github.base_ref }} \ COMPOSE_UP_OPTS="-d" \ make docker-compose @@ -71,7 +71,7 @@ runs: id: data shell: bash run: | - AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1) + AWX_IP=$(docker inspect -f '{{.NetworkSettings.Networks.awx.IPAddress}}' tools_awx_1) ADMIN_TOKEN=$(docker exec -i tools_awx_1 awx-manage create_oauth2_token --user admin) echo "ip=$AWX_IP" >> $GITHUB_OUTPUT echo "admin_token=$ADMIN_TOKEN" >> $GITHUB_OUTPUT diff --git a/.github/pr_labeler.yml b/.github/pr_labeler.yml index 652cfaec75..c886330875 100644 --- a/.github/pr_labeler.yml +++ b/.github/pr_labeler.yml @@ -15,5 +15,4 @@ "dependencies": - any: ["awx/ui/package.json"] - - any: ["requirements/*.txt"] - - any: ["requirements/requirements.in"] + - any: ["requirements/*"] diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index afdbd69eb6..3b6258672f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -66,6 +66,8 @@ jobs: awx-operator: runs-on: ubuntu-latest timeout-minutes: 60 + env: + DEBUG_OUTPUT_DIR: /tmp/awx_operator_molecule_test steps: - name: Checkout awx uses: actions/checkout@v3 @@ -94,11 +96,11 @@ jobs: - name: Build AWX image working-directory: awx run: | - ansible-playbook -v tools/ansible/build.yml \ - -e headless=yes \ - -e awx_image=awx \ - -e awx_image_tag=ci \ - -e ansible_python_interpreter=$(which python3) + VERSION=`make version-for-buildyml` make awx-kube-build + env: + COMPOSE_TAG: ci + DEV_DOCKER_TAG_BASE: local + HEADLESS: yes - name: Run test deployment with awx-operator working-directory: awx-operator @@ -107,10 +109,19 @@ jobs: ansible-galaxy collection install -r molecule/requirements.yml sudo rm -f $(which kustomize) make kustomize - KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule -v test -s kind + KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule -v test -s kind -- --skip-tags=replicas env: - AWX_TEST_IMAGE: awx + AWX_TEST_IMAGE: local/awx AWX_TEST_VERSION: ci + AWX_EE_TEST_IMAGE: quay.io/ansible/awx-ee:latest + STORE_DEBUG_OUTPUT: true + + - name: Upload debug output + if: failure() + uses: actions/upload-artifact@v3 + with: + name: awx-operator-debug-output + path: ${{ env.DEBUG_OUTPUT_DIR }} collection-sanity: name: awx_collection sanity @@ -127,10 +138,6 @@ jobs: - name: Run sanity tests run: make test_collection_sanity - env: - # needed due to cgroupsv2. This is fixed, but a stable release - # with the fix has not been made yet. - ANSIBLE_TEST_PREFER_PODMAN: 1 collection-integration: name: awx_collection integration diff --git a/.github/workflows/devel_images.yml b/.github/workflows/devel_images.yml index 7f8c791bc4..2d10f01c05 100644 --- a/.github/workflows/devel_images.yml +++ b/.github/workflows/devel_images.yml @@ -3,28 +3,50 @@ name: Build/Push Development Images env: LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting on: + workflow_dispatch: push: branches: - devel - release_* - feature_* jobs: - push: - if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_') + push-development-images: runs-on: ubuntu-latest - timeout-minutes: 60 + timeout-minutes: 120 permissions: packages: write contents: read + strategy: + fail-fast: false + matrix: + build-targets: + - image-name: awx_devel + make-target: docker-compose-buildx + - image-name: awx_kube_devel + make-target: awx-kube-dev-buildx + - image-name: awx + make-target: awx-kube-buildx steps: + + - name: Skipping build of awx image for non-awx repository + run: | + echo "Skipping build of awx image for non-awx repository" + exit 0 + if: matrix.build-targets.image-name == 'awx' && !endsWith(github.repository, '/awx') + - uses: actions/checkout@v3 - - name: Get python version from Makefile - run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 - - name: Set lower case owner name + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Set GITHUB_ENV variables run: | - echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV} + echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV + echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV + echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV env: OWNER: '${{ github.repository_owner }}' @@ -37,23 +59,19 @@ jobs: run: | echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin - - name: Pre-pull image to warm build cache - run: | - docker pull ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} || : - docker pull ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} || : - docker pull ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} || : + - name: Setup node and npm + uses: actions/setup-node@v2 + with: + node-version: '16.13.1' + if: matrix.build-targets.image-name == 'awx' - - name: Build images + - name: Prebuild UI for awx image (to speed up build process) run: | - DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build - DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build - DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build + sudo apt-get install gettext + make ui-release + make ui-next + if: matrix.build-targets.image-name == 'awx' - - name: Push development images + - name: Build and push AWX devel images run: | - docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} - docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} - - - name: Push AWX k8s image, only for upstream and feature branches - run: docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} - if: endsWith(github.repository, '/awx') + make ${{ matrix.build-targets.make-target }} diff --git a/.github/workflows/feature_branch_deletion.yml b/.github/workflows/feature_branch_deletion.yml index 3fbd287cb1..4893f8267d 100644 --- a/.github/workflows/feature_branch_deletion.yml +++ b/.github/workflows/feature_branch_deletion.yml @@ -2,12 +2,10 @@ name: Feature branch deletion cleanup env: LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting -on: - delete: - branches: - - feature_** +on: delete jobs: - push: + branch_delete: + if: ${{ github.event.ref_type == 'branch' && startsWith(github.event.ref, 'feature_') }} runs-on: ubuntu-latest timeout-minutes: 20 permissions: @@ -22,6 +20,4 @@ jobs: run: | ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}" ansible localhost -c local -m aws_s3 \ - -a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read" - - + -a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delobj permission=public-read" diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index 81a7878d6e..cbbd7d0270 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -7,7 +7,11 @@ env: on: release: types: [published] - + workflow_dispatch: + inputs: + tag_name: + description: 'Name for the tag of the release.' + required: true permissions: contents: read # to fetch code (actions/checkout) @@ -17,6 +21,16 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 90 steps: + - name: Set GitHub Env vars for workflow_dispatch event + if: ${{ github.event_name == 'workflow_dispatch' }} + run: | + echo "TAG_NAME=${{ github.event.inputs.tag_name }}" >> $GITHUB_ENV + + - name: Set GitHub Env vars if release event + if: ${{ github.event_name == 'release' }} + run: | + echo "TAG_NAME=${{ env.TAG_NAME }}" >> $GITHUB_ENV + - name: Checkout awx uses: actions/checkout@v3 @@ -43,16 +57,18 @@ jobs: - name: Build collection and publish to galaxy env: COLLECTION_NAMESPACE: ${{ env.collection_namespace }} - COLLECTION_VERSION: ${{ github.event.release.tag_name }} + COLLECTION_VERSION: ${{ env.TAG_NAME }} COLLECTION_TEMPLATE_VERSION: true run: | make build_collection - if [ "$(curl -L --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \ - echo "Galaxy release already done"; \ - else \ + curl_with_redirects=$(curl --head -sLw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ env.TAG_NAME }}.tar.gz | tail -1) + curl_without_redirects=$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ env.TAG_NAME }}.tar.gz | tail -1) + if [[ "$curl_with_redirects" == "302" ]] || [[ "$curl_without_redirects" == "302" ]]; then + echo "Galaxy release already done"; + else ansible-galaxy collection publish \ --token=${{ secrets.GALAXY_TOKEN }} \ - awx_collection_build/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz; \ + awx_collection_build/${{ env.collection_namespace }}-awx-${{ env.TAG_NAME }}.tar.gz; fi - name: Set official pypi info @@ -64,9 +80,11 @@ jobs: if: ${{ github.repository_owner != 'ansible' }} - name: Build awxkit and upload to pypi + env: + SETUPTOOLS_SCM_PRETEND_VERSION: ${{ env.TAG_NAME }} run: | git reset --hard - cd awxkit && python3 setup.py bdist_wheel + cd awxkit && python3 setup.py sdist bdist_wheel twine upload \ -r ${{ env.pypi_repo }} \ -u ${{ secrets.PYPI_USERNAME }} \ @@ -83,11 +101,15 @@ jobs: - name: Re-tag and promote awx image run: | - docker pull ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} - docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:${{ github.event.release.tag_name }} - docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest - docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }} - docker push quay.io/${{ github.repository }}:latest - docker pull ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} - docker tag ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} - docker push quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} + docker buildx imagetools create \ + ghcr.io/${{ github.repository }}:${{ env.TAG_NAME }} \ + --tag quay.io/${{ github.repository }}:${{ env.TAG_NAME }} + docker buildx imagetools create \ + ghcr.io/${{ github.repository }}:${{ env.TAG_NAME }} \ + --tag quay.io/${{ github.repository }}:latest + + - name: Re-tag and promote awx-ee image + run: | + docker buildx imagetools create \ + ghcr.io/${{ github.repository_owner }}/awx-ee:${{ env.TAG_NAME }} \ + --tag quay.io/${{ github.repository_owner }}/awx-ee:${{ env.TAG_NAME }} diff --git a/.github/workflows/stage.yml b/.github/workflows/stage.yml index f38cda55ec..ddf4d9c6ee 100644 --- a/.github/workflows/stage.yml +++ b/.github/workflows/stage.yml @@ -49,13 +49,11 @@ jobs: with: path: awx - - name: Get python version from Makefile - run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV - - - name: Install python ${{ env.py_version }} - uses: actions/setup-python@v4 + - name: Checkout awx-operator + uses: actions/checkout@v3 with: - python-version: ${{ env.py_version }} + repository: ${{ github.repository_owner }}/awx-operator + path: awx-operator - name: Checkout awx-logos uses: actions/checkout@v3 @@ -63,50 +61,84 @@ jobs: repository: ansible/awx-logos path: awx-logos - - name: Checkout awx-operator - uses: actions/checkout@v3 + - name: Get python version from Makefile + working-directory: awx + run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV + + - name: Install python ${{ env.py_version }} + uses: actions/setup-python@v4 with: - repository: ${{ github.repository_owner }}/awx-operator - path: awx-operator + python-version: ${{ env.py_version }} - name: Install playbook dependencies run: | python3 -m pip install docker - - name: Build and stage AWX + - name: Log into registry ghcr.io + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Copy logos for inclusion in sdist for official build working-directory: awx run: | - ansible-playbook -v tools/ansible/build.yml \ - -e registry=ghcr.io \ - -e registry_username=${{ github.actor }} \ - -e registry_password=${{ secrets.GITHUB_TOKEN }} \ - -e awx_image=${{ github.repository }} \ - -e awx_version=${{ github.event.inputs.version }} \ - -e ansible_python_interpreter=$(which python3) \ - -e push=yes \ - -e awx_official=yes + cp ../awx-logos/awx/ui/client/assets/* awx/ui/public/static/media/ - - name: Log in to GHCR - run: | - echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin + - name: Setup node and npm + uses: actions/setup-node@v2 + with: + node-version: '16.13.1' - - name: Log in to Quay + - name: Prebuild UI for awx image (to speed up build process) + working-directory: awx run: | - echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin + sudo apt-get install gettext + make ui-release + make ui-next + + - name: Set build env variables + run: | + echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV + echo "COMPOSE_TAG=${{ github.event.inputs.version }}" >> $GITHUB_ENV + echo "VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV + echo "AWX_TEST_VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV + echo "AWX_TEST_IMAGE=ghcr.io/${OWNER,,}/awx" >> $GITHUB_ENV + echo "AWX_EE_TEST_IMAGE=ghcr.io/${OWNER,,}/awx-ee:${{ github.event.inputs.version }}" >> $GITHUB_ENV + echo "AWX_OPERATOR_TEST_IMAGE=ghcr.io/${OWNER,,}/awx-operator:${{ github.event.inputs.operator_version }}" >> $GITHUB_ENV + env: + OWNER: ${{ github.repository_owner }} + + - name: Build and stage AWX + working-directory: awx + env: + DOCKER_BUILDX_PUSH: true + HEADLESS: false + PLATFORMS: linux/amd64,linux/arm64 + run: | + make awx-kube-buildx - name: tag awx-ee:latest with version input run: | - docker pull quay.io/ansible/awx-ee:latest - docker tag quay.io/ansible/awx-ee:latest ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }} - docker push ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }} + docker buildx imagetools create \ + quay.io/ansible/awx-ee:latest \ + --tag ${AWX_EE_TEST_IMAGE} - - name: Build and stage awx-operator + - name: Stage awx-operator image working-directory: awx-operator run: | - BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version }} \ - --build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \ - IMAGE_TAG_BASE=ghcr.io/${{ github.repository_owner }}/awx-operator \ - VERSION=${{ github.event.inputs.operator_version }} make docker-build docker-push + BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version}} \ + --build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \ + IMG=${AWX_OPERATOR_TEST_IMAGE} \ + make docker-buildx + + - name: Pulling images for test deployment with awx-operator + # awx operator molecue test expect to kind load image and buildx exports image to registry and not local + run: | + docker pull ${AWX_OPERATOR_TEST_IMAGE} + docker pull ${AWX_EE_TEST_IMAGE} + docker pull ${AWX_TEST_IMAGE}:${AWX_TEST_VERSION} - name: Run test deployment with awx-operator working-directory: awx-operator @@ -116,10 +148,6 @@ jobs: sudo rm -f $(which kustomize) make kustomize KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule test -s kind - env: - AWX_TEST_IMAGE: ${{ github.repository }} - AWX_TEST_VERSION: ${{ github.event.inputs.version }} - AWX_EE_TEST_IMAGE: ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }} - name: Create draft release for AWX working-directory: awx diff --git a/.gitignore b/.gitignore index b41763166c..6176ad6aed 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,11 @@ tools/docker-compose/overrides/ tools/docker-compose-minikube/_sources tools/docker-compose/keycloak.awx.realm.json +!tools/docker-compose/editable_dependencies +tools/docker-compose/editable_dependencies/* +!tools/docker-compose/editable_dependencies/README.md +!tools/docker-compose/editable_dependencies/install.sh + # Tower setup playbook testing setup/test/roles/postgresql **/provision_docker @@ -169,3 +174,6 @@ awx/ui_next/build # Docs build stuff docs/docsite/build/ _readthedocs/ + +# Pyenv +.python-version diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000000..6228912256 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,113 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "run_ws_heartbeat", + "type": "debugpy", + "request": "launch", + "program": "manage.py", + "args": ["run_ws_heartbeat"], + "django": true, + "preLaunchTask": "stop awx-ws-heartbeat", + "postDebugTask": "start awx-ws-heartbeat" + }, + { + "name": "run_cache_clear", + "type": "debugpy", + "request": "launch", + "program": "manage.py", + "args": ["run_cache_clear"], + "django": true, + "preLaunchTask": "stop awx-cache-clear", + "postDebugTask": "start awx-cache-clear" + }, + { + "name": "run_callback_receiver", + "type": "debugpy", + "request": "launch", + "program": "manage.py", + "args": ["run_callback_receiver"], + "django": true, + "preLaunchTask": "stop awx-receiver", + "postDebugTask": "start awx-receiver" + }, + { + "name": "run_dispatcher", + "type": "debugpy", + "request": "launch", + "program": "manage.py", + "args": ["run_dispatcher"], + "django": true, + "preLaunchTask": "stop awx-dispatcher", + "postDebugTask": "start awx-dispatcher" + }, + { + "name": "run_rsyslog_configurer", + "type": "debugpy", + "request": "launch", + "program": "manage.py", + "args": ["run_rsyslog_configurer"], + "django": true, + "preLaunchTask": "stop awx-rsyslog-configurer", + "postDebugTask": "start awx-rsyslog-configurer" + }, + { + "name": "run_cache_clear", + "type": "debugpy", + "request": "launch", + "program": "manage.py", + "args": ["run_cache_clear"], + "django": true, + "preLaunchTask": "stop awx-cache-clear", + "postDebugTask": "start awx-cache-clear" + }, + { + "name": "run_wsrelay", + "type": "debugpy", + "request": "launch", + "program": "manage.py", + "args": ["run_wsrelay"], + "django": true, + "preLaunchTask": "stop awx-wsrelay", + "postDebugTask": "start awx-wsrelay" + }, + { + "name": "daphne", + "type": "debugpy", + "request": "launch", + "program": "/var/lib/awx/venv/awx/bin/daphne", + "args": ["-b", "127.0.0.1", "-p", "8051", "awx.asgi:channel_layer"], + "django": true, + "preLaunchTask": "stop awx-daphne", + "postDebugTask": "start awx-daphne" + }, + { + "name": "runserver(uwsgi alternative)", + "type": "debugpy", + "request": "launch", + "program": "manage.py", + "args": ["runserver", "127.0.0.1:8052"], + "django": true, + "preLaunchTask": "stop awx-uwsgi", + "postDebugTask": "start awx-uwsgi" + }, + { + "name": "runserver_plus(uwsgi alternative)", + "type": "debugpy", + "request": "launch", + "program": "manage.py", + "args": ["runserver_plus", "127.0.0.1:8052"], + "django": true, + "preLaunchTask": "stop awx-uwsgi and install Werkzeug", + "postDebugTask": "start awx-uwsgi" + }, + { + "name": "shell_plus", + "type": "debugpy", + "request": "launch", + "program": "manage.py", + "args": ["shell_plus"], + "django": true, + }, + ] +} diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 0000000000..0b878663c3 --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,100 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "label": "start awx-cache-clear", + "type": "shell", + "command": "supervisorctl start tower-processes:awx-cache-clear" + }, + { + "label": "stop awx-cache-clear", + "type": "shell", + "command": "supervisorctl stop tower-processes:awx-cache-clear" + }, + { + "label": "start awx-daphne", + "type": "shell", + "command": "supervisorctl start tower-processes:awx-daphne" + }, + { + "label": "stop awx-daphne", + "type": "shell", + "command": "supervisorctl stop tower-processes:awx-daphne" + }, + { + "label": "start awx-dispatcher", + "type": "shell", + "command": "supervisorctl start tower-processes:awx-dispatcher" + }, + { + "label": "stop awx-dispatcher", + "type": "shell", + "command": "supervisorctl stop tower-processes:awx-dispatcher" + }, + { + "label": "start awx-receiver", + "type": "shell", + "command": "supervisorctl start tower-processes:awx-receiver" + }, + { + "label": "stop awx-receiver", + "type": "shell", + "command": "supervisorctl stop tower-processes:awx-receiver" + }, + { + "label": "start awx-rsyslog-configurer", + "type": "shell", + "command": "supervisorctl start tower-processes:awx-rsyslog-configurer" + }, + { + "label": "stop awx-rsyslog-configurer", + "type": "shell", + "command": "supervisorctl stop tower-processes:awx-rsyslog-configurer" + }, + { + "label": "start awx-rsyslogd", + "type": "shell", + "command": "supervisorctl start tower-processes:awx-rsyslogd" + }, + { + "label": "stop awx-rsyslogd", + "type": "shell", + "command": "supervisorctl stop tower-processes:awx-rsyslogd" + }, + { + "label": "start awx-uwsgi", + "type": "shell", + "command": "supervisorctl start tower-processes:awx-uwsgi" + }, + { + "label": "stop awx-uwsgi", + "type": "shell", + "command": "supervisorctl stop tower-processes:awx-uwsgi" + }, + { + "label": "stop awx-uwsgi and install Werkzeug", + "type": "shell", + "command": "pip install Werkzeug; supervisorctl stop tower-processes:awx-uwsgi" + }, + { + "label": "start awx-ws-heartbeat", + "type": "shell", + "command": "supervisorctl start tower-processes:awx-ws-heartbeat" + }, + { + "label": "stop awx-ws-heartbeat", + "type": "shell", + "command": "supervisorctl stop tower-processes:awx-ws-heartbeat" + }, + { + "label": "start awx-wsrelay", + "type": "shell", + "command": "supervisorctl start tower-processes:awx-wsrelay" + }, + { + "label": "stop awx-wsrelay", + "type": "shell", + "command": "supervisorctl stop tower-processes:awx-wsrelay" + } + ] +} diff --git a/MANIFEST.in b/MANIFEST.in index 09a5392c50..3db512ee13 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -22,7 +22,7 @@ recursive-exclude awx/settings local_settings.py* include tools/scripts/request_tower_configuration.sh include tools/scripts/request_tower_configuration.ps1 include tools/scripts/automation-controller-service -include tools/scripts/failure-event-handler +include tools/scripts/rsyslog-4xx-recovery include tools/scripts/awx-python include awx/playbooks/library/mkfifo.py include tools/sosreport/* diff --git a/Makefile b/Makefile index 1694c1994a..5aa49ee9ab 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ -include awx/ui_next/Makefile -PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q)) +PYTHON := $(notdir $(shell for i in python3.11 python3; do command -v $$i; done|sed 1q)) SHELL := bash -DOCKER_COMPOSE ?= docker-compose +DOCKER_COMPOSE ?= docker compose OFFICIAL ?= no NODE ?= node NPM_BIN ?= npm @@ -10,7 +10,7 @@ KIND_BIN ?= $(shell which kind) CHROMIUM_BIN=/tmp/chrome-linux/chrome GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) MANAGEMENT_COMMAND ?= awx-manage -VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py) +VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py 2> /dev/null) # ansible-test requires semver compatable version, so we allow overrides to hack it COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3) @@ -47,6 +47,8 @@ VAULT ?= false VAULT_TLS ?= false # If set to true docker-compose will also start a tacacs+ instance TACACS ?= false +# If set to true docker-compose will install editable dependencies +EDITABLE_DEPENDENCIES ?= false VENV_BASE ?= /var/lib/awx/venv @@ -63,7 +65,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio # These should be upgraded in the AWX and Ansible venv before attempting # to install the actual requirements -VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4 +VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==69.0.2 setuptools_scm[toml]==8.0.4 wheel==0.42.0 NAME ?= awx @@ -75,6 +77,9 @@ SDIST_TAR_FILE ?= $(SDIST_TAR_NAME).tar.gz I18N_FLAG_FILE = .i18n_built +## PLATFORMS defines the target platforms for the manager image be build to provide support to multiple +PLATFORMS ?= linux/amd64,linux/arm64 # linux/ppc64le,linux/s390x + .PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \ develop refresh adduser migrate dbchange \ receiver test test_unit test_coverage coverage_html \ @@ -213,8 +218,6 @@ collectstatic: fi; \ $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1 -DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:* - uwsgi: collectstatic @if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/awx/bin/activate; \ @@ -222,7 +225,7 @@ uwsgi: collectstatic uwsgi /etc/tower/uwsgi.ini awx-autoreload: - @/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)" + @/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx daphne: @if [ "$(VENV_BASE)" ]; then \ @@ -302,7 +305,7 @@ swagger: reports @if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/awx/bin/activate; \ fi; \ - (set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs --release=$(VERSION_TARGET) | tee reports/$@.report) + (set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report) check: black @@ -532,15 +535,23 @@ docker-compose-sources: .git/hooks/pre-commit -e enable_vault=$(VAULT) \ -e vault_tls=$(VAULT_TLS) \ -e enable_tacacs=$(TACACS) \ - $(EXTRA_SOURCES_ANSIBLE_OPTS) + -e install_editable_dependencies=$(EDITABLE_DEPENDENCIES) \ + $(EXTRA_SOURCES_ANSIBLE_OPTS) docker-compose: awx/projects docker-compose-sources ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml; ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \ -e enable_vault=$(VAULT) \ - -e vault_tls=$(VAULT_TLS); + -e vault_tls=$(VAULT_TLS) \ + -e enable_ldap=$(LDAP); \ + $(MAKE) docker-compose-up + +docker-compose-up: $(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans +docker-compose-down: + $(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) down --remove-orphans + docker-compose-credential-plugins: awx/projects docker-compose-sources echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m" $(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans @@ -585,12 +596,27 @@ docker-compose-build: Dockerfile.dev --build-arg BUILDKIT_INLINE_CACHE=1 \ --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) . + +.PHONY: docker-compose-buildx +## Build awx_devel image for docker compose development environment for multiple architectures +docker-compose-buildx: Dockerfile.dev + - docker buildx create --name docker-compose-buildx + docker buildx use docker-compose-buildx + - docker buildx build \ + --push \ + --build-arg BUILDKIT_INLINE_CACHE=1 \ + --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) \ + --platform=$(PLATFORMS) \ + --tag $(DEVEL_IMAGE_NAME) \ + -f Dockerfile.dev . + - docker buildx rm docker-compose-buildx + docker-clean: -$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);) -$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);) docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean - docker volume rm -f tools_awx_db tools_vault_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q) + docker volume rm -f tools_var_lib_awx tools_awx_db tools_awx_db_15 tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q) docker-refresh: docker-clean docker-compose @@ -612,9 +638,6 @@ clean-elk: docker rm tools_elasticsearch_1 docker rm tools_kibana_1 -psql-container: - docker run -it --net tools_default --rm postgres:12 sh -c 'exec psql -h "postgres" -p "5432" -U postgres' - VERSION: @echo "awx: $(VERSION)" @@ -647,6 +670,21 @@ awx-kube-build: Dockerfile --build-arg HEADLESS=$(HEADLESS) \ -t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) . +## Build multi-arch awx image for deployment on Kubernetes environment. +awx-kube-buildx: Dockerfile + - docker buildx create --name awx-kube-buildx + docker buildx use awx-kube-buildx + - docker buildx build \ + --push \ + --build-arg VERSION=$(VERSION) \ + --build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \ + --build-arg HEADLESS=$(HEADLESS) \ + --platform=$(PLATFORMS) \ + --tag $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) \ + -f Dockerfile . + - docker buildx rm awx-kube-buildx + + .PHONY: Dockerfile.kube-dev ## Generate Docker.kube-dev for awx_kube_devel image Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2 @@ -663,6 +701,18 @@ awx-kube-dev-build: Dockerfile.kube-dev --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \ -t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) . +## Build and push multi-arch awx_kube_devel image for development on local Kubernetes environment. +awx-kube-dev-buildx: Dockerfile.kube-dev + - docker buildx create --name awx-kube-dev-buildx + docker buildx use awx-kube-dev-buildx + - docker buildx build \ + --push \ + --build-arg BUILDKIT_INLINE_CACHE=1 \ + --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \ + --platform=$(PLATFORMS) \ + --tag $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \ + -f Dockerfile.kube-dev . + - docker buildx rm awx-kube-dev-buildx kind-dev-load: awx-kube-dev-build $(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) diff --git a/README.md b/README.md index 395f943d21..99ee6b523d 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ AWX provides a web-based user interface, REST API, and task engine built on top To install AWX, please view the [Install guide](./INSTALL.md). -To learn more about using AWX, and Tower, view the [Tower docs site](http://docs.ansible.com/ansible-tower/index.html). +To learn more about using AWX, view the [AWX docs site](https://ansible.readthedocs.io/projects/awx/en/latest/). The AWX Project Frequently Asked Questions can be found [here](https://www.ansible.com/awx-project-faq). diff --git a/awx/__init__.py b/awx/__init__.py index 703e06daf1..a23703c940 100644 --- a/awx/__init__.py +++ b/awx/__init__.py @@ -154,10 +154,12 @@ def manage(): from django.conf import settings from django.core.management import execute_from_command_line - # enforce the postgres version is equal to 12. if not, then terminate program with exit code of 1 + # enforce the postgres version is a minimum of 12 (we need this for partitioning); if not, then terminate program with exit code of 1 + # In the future if we require a feature of a version of postgres > 12 this should be updated to reflect that. + # The return of connection.pg_version is something like 12013 if not os.getenv('SKIP_PG_VERSION_CHECK', False) and not MODE == 'development': if (connection.pg_version // 10000) < 12: - sys.stderr.write("Postgres version 12 is required\n") + sys.stderr.write("At a minimum, postgres version 12 is required\n") sys.exit(1) if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover diff --git a/awx/api/conf.py b/awx/api/conf.py index 0697f40c56..72aaf3eec3 100644 --- a/awx/api/conf.py +++ b/awx/api/conf.py @@ -93,6 +93,7 @@ register( default='', label=_('Login redirect override URL'), help_text=_('URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page.'), + warning_text=_('Changing the redirect URL could impact the ability to login if local authentication is also disabled.'), category=_('Authentication'), category_slug='authentication', ) diff --git a/awx/api/filters.py b/awx/api/filters.py deleted file mode 100644 index 6169dc548a..0000000000 --- a/awx/api/filters.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -# Python -import re -import json -from functools import reduce - -# Django -from django.core.exceptions import FieldError, ValidationError, FieldDoesNotExist -from django.db import models -from django.db.models import Q, CharField, IntegerField, BooleanField, TextField, JSONField -from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey -from django.db.models.functions import Cast -from django.contrib.contenttypes.models import ContentType -from django.contrib.contenttypes.fields import GenericForeignKey -from django.utils.encoding import force_str -from django.utils.translation import gettext_lazy as _ - -# Django REST Framework -from rest_framework.exceptions import ParseError, PermissionDenied -from rest_framework.filters import BaseFilterBackend - -# AWX -from awx.main.utils import get_type_for_model, to_python_boolean -from awx.main.utils.db import get_all_field_names - - -class TypeFilterBackend(BaseFilterBackend): - """ - Filter on type field now returned with all objects. - """ - - def filter_queryset(self, request, queryset, view): - try: - types = None - for key, value in request.query_params.items(): - if key == 'type': - if ',' in value: - types = value.split(',') - else: - types = (value,) - if types: - types_map = {} - for ct in ContentType.objects.filter(Q(app_label='main') | Q(app_label='auth', model='user')): - ct_model = ct.model_class() - if not ct_model: - continue - ct_type = get_type_for_model(ct_model) - types_map[ct_type] = ct.pk - model = queryset.model - model_type = get_type_for_model(model) - if 'polymorphic_ctype' in get_all_field_names(model): - types_pks = set([v for k, v in types_map.items() if k in types]) - queryset = queryset.filter(polymorphic_ctype_id__in=types_pks) - elif model_type in types: - queryset = queryset - else: - queryset = queryset.none() - return queryset - except FieldError as e: - # Return a 400 for invalid field names. - raise ParseError(*e.args) - - -def get_fields_from_path(model, path): - """ - Given a Django ORM lookup path (possibly over multiple models) - Returns the fields in the line, and also the revised lookup path - ex., given - model=Organization - path='project__timeout' - returns tuple of fields traversed as well and a corrected path, - for special cases we do substitutions - ([], 'project__timeout') - """ - # Store of all the fields used to detect repeats - field_list = [] - new_parts = [] - for name in path.split('__'): - if model is None: - raise ParseError(_('No related model for field {}.').format(name)) - # HACK: Make project and inventory source filtering by old field names work for backwards compatibility. - if model._meta.object_name in ('Project', 'InventorySource'): - name = {'current_update': 'current_job', 'last_update': 'last_job', 'last_update_failed': 'last_job_failed', 'last_updated': 'last_job_run'}.get( - name, name - ) - - if name == 'type' and 'polymorphic_ctype' in get_all_field_names(model): - name = 'polymorphic_ctype' - new_parts.append('polymorphic_ctype__model') - else: - new_parts.append(name) - - if name in getattr(model, 'PASSWORD_FIELDS', ()): - raise PermissionDenied(_('Filtering on password fields is not allowed.')) - elif name == 'pk': - field = model._meta.pk - else: - name_alt = name.replace("_", "") - if name_alt in model._meta.fields_map.keys(): - field = model._meta.fields_map[name_alt] - new_parts.pop() - new_parts.append(name_alt) - else: - field = model._meta.get_field(name) - if isinstance(field, ForeignObjectRel) and getattr(field.field, '__prevent_search__', False): - raise PermissionDenied(_('Filtering on %s is not allowed.' % name)) - elif getattr(field, '__prevent_search__', False): - raise PermissionDenied(_('Filtering on %s is not allowed.' % name)) - if field in field_list: - # Field traversed twice, could create infinite JOINs, DoSing Tower - raise ParseError(_('Loops not allowed in filters, detected on field {}.').format(field.name)) - field_list.append(field) - model = getattr(field, 'related_model', None) - - return field_list, '__'.join(new_parts) - - -def get_field_from_path(model, path): - """ - Given a Django ORM lookup path (possibly over multiple models) - Returns the last field in the line, and the revised lookup path - ex. - (, 'project__timeout') - """ - field_list, new_path = get_fields_from_path(model, path) - return (field_list[-1], new_path) - - -class FieldLookupBackend(BaseFilterBackend): - """ - Filter using field lookups provided via query string parameters. - """ - - RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by', 'search', 'type', 'host_filter', 'count_disabled', 'no_truncate', 'limit') - - SUPPORTED_LOOKUPS = ( - 'exact', - 'iexact', - 'contains', - 'icontains', - 'startswith', - 'istartswith', - 'endswith', - 'iendswith', - 'regex', - 'iregex', - 'gt', - 'gte', - 'lt', - 'lte', - 'in', - 'isnull', - 'search', - ) - - # A list of fields that we know can be filtered on without the possibility - # of introducing duplicates - NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField, TextField) - - def get_fields_from_lookup(self, model, lookup): - if '__' in lookup and lookup.rsplit('__', 1)[-1] in self.SUPPORTED_LOOKUPS: - path, suffix = lookup.rsplit('__', 1) - else: - path = lookup - suffix = 'exact' - - if not path: - raise ParseError(_('Query string field name not provided.')) - - # FIXME: Could build up a list of models used across relationships, use - # those lookups combined with request.user.get_queryset(Model) to make - # sure user cannot query using objects he could not view. - field_list, new_path = get_fields_from_path(model, path) - - new_lookup = new_path - new_lookup = '__'.join([new_path, suffix]) - return field_list, new_lookup - - def get_field_from_lookup(self, model, lookup): - '''Method to match return type of single field, if needed.''' - field_list, new_lookup = self.get_fields_from_lookup(model, lookup) - return (field_list[-1], new_lookup) - - def to_python_related(self, value): - value = force_str(value) - if value.lower() in ('none', 'null'): - return None - else: - return int(value) - - def value_to_python_for_field(self, field, value): - if isinstance(field, models.BooleanField): - return to_python_boolean(value) - elif isinstance(field, (ForeignObjectRel, ManyToManyField, GenericForeignKey, ForeignKey)): - try: - return self.to_python_related(value) - except ValueError: - raise ParseError(_('Invalid {field_name} id: {field_id}').format(field_name=getattr(field, 'name', 'related field'), field_id=value)) - else: - return field.to_python(value) - - def value_to_python(self, model, lookup, value): - try: - lookup.encode("ascii") - except UnicodeEncodeError: - raise ValueError("%r is not an allowed field name. Must be ascii encodable." % lookup) - - field_list, new_lookup = self.get_fields_from_lookup(model, lookup) - field = field_list[-1] - - needs_distinct = not all(isinstance(f, self.NO_DUPLICATES_ALLOW_LIST) for f in field_list) - - # Type names are stored without underscores internally, but are presented and - # and serialized over the API containing underscores so we remove `_` - # for polymorphic_ctype__model lookups. - if new_lookup.startswith('polymorphic_ctype__model'): - value = value.replace('_', '') - elif new_lookup.endswith('__isnull'): - value = to_python_boolean(value) - elif new_lookup.endswith('__in'): - items = [] - if not value: - raise ValueError('cannot provide empty value for __in') - for item in value.split(','): - items.append(self.value_to_python_for_field(field, item)) - value = items - elif new_lookup.endswith('__regex') or new_lookup.endswith('__iregex'): - try: - re.compile(value) - except re.error as e: - raise ValueError(e.args[0]) - elif new_lookup.endswith('__iexact'): - if not isinstance(field, (CharField, TextField)): - raise ValueError(f'{field.name} is not a text field and cannot be filtered by case-insensitive search') - elif new_lookup.endswith('__search'): - related_model = getattr(field, 'related_model', None) - if not related_model: - raise ValueError('%s is not searchable' % new_lookup[:-8]) - new_lookups = [] - for rm_field in related_model._meta.fields: - if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description', 'playbook'): - new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name)) - return value, new_lookups, needs_distinct - else: - if isinstance(field, JSONField): - new_lookup = new_lookup.replace(field.name, f'{field.name}_as_txt') - value = self.value_to_python_for_field(field, value) - return value, new_lookup, needs_distinct - - def filter_queryset(self, request, queryset, view): - try: - # Apply filters specified via query_params. Each entry in the lists - # below is (negate, field, value). - and_filters = [] - or_filters = [] - chain_filters = [] - role_filters = [] - search_filters = {} - needs_distinct = False - # Can only have two values: 'AND', 'OR' - # If 'AND' is used, an item must satisfy all conditions to show up in the results. - # If 'OR' is used, an item just needs to satisfy one condition to appear in results. - search_filter_relation = 'OR' - for key, values in request.query_params.lists(): - if key in self.RESERVED_NAMES: - continue - - # HACK: make `created` available via API for the Django User ORM model - # so it keep compatibility with other objects which exposes the `created` attr. - if queryset.model._meta.object_name == 'User' and key.startswith('created'): - key = key.replace('created', 'date_joined') - - # HACK: Make job event filtering by host name mostly work even - # when not capturing job event hosts M2M. - if queryset.model._meta.object_name == 'JobEvent' and key.startswith('hosts__name'): - key = key.replace('hosts__name', 'or__host__name') - or_filters.append((False, 'host__name__isnull', True)) - - # Custom __int filter suffix (internal use only). - q_int = False - if key.endswith('__int'): - key = key[:-5] - q_int = True - - # RBAC filtering - if key == 'role_level': - role_filters.append(values[0]) - continue - - # Search across related objects. - if key.endswith('__search'): - if values and ',' in values[0]: - search_filter_relation = 'AND' - values = reduce(lambda list1, list2: list1 + list2, [i.split(',') for i in values]) - for value in values: - search_value, new_keys, _ = self.value_to_python(queryset.model, key, force_str(value)) - assert isinstance(new_keys, list) - search_filters[search_value] = new_keys - # by definition, search *only* joins across relations, - # so it _always_ needs a .distinct() - needs_distinct = True - continue - - # Custom chain__ and or__ filters, mutually exclusive (both can - # precede not__). - q_chain = False - q_or = False - if key.startswith('chain__'): - key = key[7:] - q_chain = True - elif key.startswith('or__'): - key = key[4:] - q_or = True - - # Custom not__ filter prefix. - q_not = False - if key.startswith('not__'): - key = key[5:] - q_not = True - - # Convert value(s) to python and add to the appropriate list. - for value in values: - if q_int: - value = int(value) - value, new_key, distinct = self.value_to_python(queryset.model, key, value) - if distinct: - needs_distinct = True - if '_as_txt' in new_key: - fname = next(item for item in new_key.split('__') if item.endswith('_as_txt')) - queryset = queryset.annotate(**{fname: Cast(fname[:-7], output_field=TextField())}) - if q_chain: - chain_filters.append((q_not, new_key, value)) - elif q_or: - or_filters.append((q_not, new_key, value)) - else: - and_filters.append((q_not, new_key, value)) - - # Now build Q objects for database query filter. - if and_filters or or_filters or chain_filters or role_filters or search_filters: - args = [] - for n, k, v in and_filters: - if n: - args.append(~Q(**{k: v})) - else: - args.append(Q(**{k: v})) - for role_name in role_filters: - if not hasattr(queryset.model, 'accessible_pk_qs'): - raise ParseError(_('Cannot apply role_level filter to this list because its model does not use roles for access control.')) - args.append(Q(pk__in=queryset.model.accessible_pk_qs(request.user, role_name))) - if or_filters: - q = Q() - for n, k, v in or_filters: - if n: - q |= ~Q(**{k: v}) - else: - q |= Q(**{k: v}) - args.append(q) - if search_filters and search_filter_relation == 'OR': - q = Q() - for term, constrains in search_filters.items(): - for constrain in constrains: - q |= Q(**{constrain: term}) - args.append(q) - elif search_filters and search_filter_relation == 'AND': - for term, constrains in search_filters.items(): - q_chain = Q() - for constrain in constrains: - q_chain |= Q(**{constrain: term}) - queryset = queryset.filter(q_chain) - for n, k, v in chain_filters: - if n: - q = ~Q(**{k: v}) - else: - q = Q(**{k: v}) - queryset = queryset.filter(q) - queryset = queryset.filter(*args) - if needs_distinct: - queryset = queryset.distinct() - return queryset - except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e: - raise ParseError(e.args[0]) - except ValidationError as e: - raise ParseError(json.dumps(e.messages, ensure_ascii=False)) - - -class OrderByBackend(BaseFilterBackend): - """ - Filter to apply ordering based on query string parameters. - """ - - def filter_queryset(self, request, queryset, view): - try: - order_by = None - for key, value in request.query_params.items(): - if key in ('order', 'order_by'): - order_by = value - if ',' in value: - order_by = value.split(',') - else: - order_by = (value,) - default_order_by = self.get_default_ordering(view) - # glue the order by and default order by together so that the default is the backup option - order_by = list(order_by or []) + list(default_order_by or []) - if order_by: - order_by = self._validate_ordering_fields(queryset.model, order_by) - # Special handling of the type field for ordering. In this - # case, we're not sorting exactly on the type field, but - # given the limited number of views with multiple types, - # sorting on polymorphic_ctype.model is effectively the same. - new_order_by = [] - if 'polymorphic_ctype' in get_all_field_names(queryset.model): - for field in order_by: - if field == 'type': - new_order_by.append('polymorphic_ctype__model') - elif field == '-type': - new_order_by.append('-polymorphic_ctype__model') - else: - new_order_by.append(field) - else: - for field in order_by: - if field not in ('type', '-type'): - new_order_by.append(field) - queryset = queryset.order_by(*new_order_by) - return queryset - except FieldError as e: - # Return a 400 for invalid field names. - raise ParseError(*e.args) - - def get_default_ordering(self, view): - ordering = getattr(view, 'ordering', None) - if isinstance(ordering, str): - return (ordering,) - return ordering - - def _validate_ordering_fields(self, model, order_by): - for field_name in order_by: - # strip off the negation prefix `-` if it exists - prefix = '' - path = field_name - if field_name[0] == '-': - prefix = field_name[0] - path = field_name[1:] - try: - field, new_path = get_field_from_path(model, path) - new_path = '{}{}'.format(prefix, new_path) - except (FieldError, FieldDoesNotExist) as e: - raise ParseError(e.args[0]) - yield new_path diff --git a/awx/api/generics.py b/awx/api/generics.py index 0c16a3790f..7c7fda877e 100644 --- a/awx/api/generics.py +++ b/awx/api/generics.py @@ -30,12 +30,17 @@ from rest_framework.permissions import IsAuthenticated from rest_framework.renderers import StaticHTMLRenderer from rest_framework.negotiation import DefaultContentNegotiation +# django-ansible-base +from ansible_base.rest_filters.rest_framework.field_lookup_backend import FieldLookupBackend +from ansible_base.lib.utils.models import get_all_field_names +from ansible_base.rbac.models import RoleEvaluation, RoleDefinition +from ansible_base.rbac.permission_registry import permission_registry + # AWX -from awx.api.filters import FieldLookupBackend from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate +from awx.main.models.rbac import give_creator_permissions from awx.main.access import optimize_queryset from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version -from awx.main.utils.db import get_all_field_names from awx.main.utils.licensing import server_product_name from awx.main.views import ApiErrorView from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer @@ -90,7 +95,7 @@ class LoggedLoginView(auth_views.LoginView): ret = super(LoggedLoginView, self).post(request, *args, **kwargs) if request.user.is_authenticated: logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None)))) - ret.set_cookie('userLoggedIn', 'true') + ret.set_cookie('userLoggedIn', 'true', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False)) ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid')) return ret @@ -106,7 +111,7 @@ class LoggedLogoutView(auth_views.LogoutView): original_user = getattr(request, 'user', None) ret = super(LoggedLogoutView, self).dispatch(request, *args, **kwargs) current_user = getattr(request, 'user', None) - ret.set_cookie('userLoggedIn', 'false') + ret.set_cookie('userLoggedIn', 'false', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False)) if (not current_user or not getattr(current_user, 'pk', True)) and current_user != original_user: logger.info("User {} logged out.".format(original_user.username)) return ret @@ -471,7 +476,11 @@ class ListAPIView(generics.ListAPIView, GenericAPIView): class ListCreateAPIView(ListAPIView, generics.ListCreateAPIView): # Base class for a list view that allows creating new objects. - pass + def perform_create(self, serializer): + super().perform_create(serializer) + if serializer.Meta.model in permission_registry.all_registered_models: + if self.request and self.request.user: + give_creator_permissions(self.request.user, serializer.instance) class ParentMixin(object): @@ -791,6 +800,7 @@ class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, DestroyAPIView): class ResourceAccessList(ParentMixin, ListAPIView): + deprecated = True serializer_class = ResourceAccessListElementSerializer ordering = ('username',) @@ -798,6 +808,15 @@ class ResourceAccessList(ParentMixin, ListAPIView): obj = self.get_parent_object() content_type = ContentType.objects.get_for_model(obj) + + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + ancestors = set(RoleEvaluation.objects.filter(content_type_id=content_type.id, object_id=obj.id).values_list('role_id', flat=True)) + qs = User.objects.filter(has_roles__in=ancestors) | User.objects.filter(is_superuser=True) + auditor_role = RoleDefinition.objects.filter(name="System Auditor").first() + if auditor_role: + qs |= User.objects.filter(role_assignments__role_definition=auditor_role) + return qs.distinct() + roles = set(Role.objects.filter(content_type=content_type, object_id=obj.id)) ancestors = set() @@ -957,7 +976,7 @@ class CopyAPIView(GenericAPIView): None, None, self.model, obj, request.user, create_kwargs=create_kwargs, copy_name=serializer.validated_data.get('name', '') ) if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all(): - new_obj.admin_role.members.add(request.user) + give_creator_permissions(request.user, new_obj) if sub_objs: permission_check_func = None if hasattr(type(self), 'deep_copy_permission_check_func'): diff --git a/awx/api/metadata.py b/awx/api/metadata.py index 4218946890..5adb7d8f12 100644 --- a/awx/api/metadata.py +++ b/awx/api/metadata.py @@ -36,11 +36,13 @@ class Metadata(metadata.SimpleMetadata): field_info = OrderedDict() field_info['type'] = self.label_lookup[field] field_info['required'] = getattr(field, 'required', False) + field_info['hidden'] = getattr(field, 'hidden', False) text_attrs = [ 'read_only', 'label', 'help_text', + 'warning_text', 'min_length', 'max_length', 'min_value', diff --git a/awx/api/serializers.py b/awx/api/serializers.py index 392a068d06..75844e9d84 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -6,7 +6,7 @@ import copy import json import logging import re -from collections import OrderedDict +from collections import Counter, OrderedDict from datetime import timedelta from uuid import uuid4 @@ -43,9 +43,14 @@ from rest_framework.utils.serializer_helpers import ReturnList # Django-Polymorphic from polymorphic.models import PolymorphicModel +# django-ansible-base +from ansible_base.lib.utils.models import get_type_for_model +from ansible_base.rbac.models import RoleEvaluation, ObjectRole +from ansible_base.rbac import permission_registry + # AWX from awx.main.access import get_user_capabilities -from awx.main.constants import ACTIVE_STATES, CENSOR_VALUE +from awx.main.constants import ACTIVE_STATES, CENSOR_VALUE, org_role_to_permission from awx.main.models import ( ActivityStream, AdHocCommand, @@ -80,6 +85,7 @@ from awx.main.models import ( Project, ProjectUpdate, ProjectUpdateEvent, + ReceptorAddress, RefreshToken, Role, Schedule, @@ -99,10 +105,9 @@ from awx.main.models import ( CLOUD_INVENTORY_SOURCES, ) from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES -from awx.main.models.rbac import get_roles_on_resource, role_summary_fields_generator +from awx.main.models.rbac import role_summary_fields_generator, give_creator_permissions, get_role_codenames, to_permissions, get_role_from_object_role from awx.main.fields import ImplicitRoleField from awx.main.utils import ( - get_type_for_model, get_model_for_type, camelcase_to_underscore, getattrd, @@ -189,6 +194,7 @@ SUMMARIZABLE_FK_FIELDS = { 'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'), 'approved_or_denied_by': ('id', 'username', 'first_name', 'last_name'), 'credential_type': DEFAULT_SUMMARY_FIELDS, + 'resource': ('ansible_id', 'resource_type'), } @@ -635,7 +641,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl exclusions = self.get_validation_exclusions(self.instance) obj = self.instance or self.Meta.model() for k, v in attrs.items(): - if k not in exclusions: + if k not in exclusions and k != 'canonical_address_port': setattr(obj, k, v) obj.full_clean(exclude=exclusions) # full_clean may modify values on the instance; copy those changes @@ -2201,6 +2207,99 @@ class BulkHostCreateSerializer(serializers.Serializer): return return_data +class BulkHostDeleteSerializer(serializers.Serializer): + hosts = serializers.ListField( + allow_empty=False, + max_length=100000, + write_only=True, + help_text=_('List of hosts ids to be deleted, e.g. [105, 130, 131, 200]'), + ) + + class Meta: + model = Host + fields = ('hosts',) + + def validate(self, attrs): + request = self.context.get('request', None) + max_hosts = settings.BULK_HOST_MAX_DELETE + # Validating the number of hosts to be deleted + if len(attrs['hosts']) > max_hosts: + raise serializers.ValidationError( + { + "ERROR": 'Number of hosts exceeds system setting BULK_HOST_MAX_DELETE', + "BULK_HOST_MAX_DELETE": max_hosts, + "Hosts_count": len(attrs['hosts']), + } + ) + + # Getting list of all host objects, filtered by the list of the hosts to delete + attrs['host_qs'] = Host.objects.get_queryset().filter(pk__in=attrs['hosts']).only('id', 'inventory_id', 'name') + + # Converting the queryset data in a dict. to reduce the number of queries when + # manipulating the data + attrs['hosts_data'] = attrs['host_qs'].values() + + if len(attrs['host_qs']) == 0: + error_hosts = {host: "Hosts do not exist or you lack permission to delete it" for host in attrs['hosts']} + raise serializers.ValidationError({'hosts': error_hosts}) + + if len(attrs['host_qs']) < len(attrs['hosts']): + hosts_exists = [host['id'] for host in attrs['hosts_data']] + failed_hosts = list(set(attrs['hosts']).difference(hosts_exists)) + error_hosts = {host: "Hosts do not exist or you lack permission to delete it" for host in failed_hosts} + raise serializers.ValidationError({'hosts': error_hosts}) + + # Getting all inventories that the hosts can be in + inv_list = list(set([host['inventory_id'] for host in attrs['hosts_data']])) + + # Checking that the user have permission to all inventories + errors = dict() + for inv in Inventory.objects.get_queryset().filter(pk__in=inv_list): + if request and not request.user.is_superuser: + if request.user not in inv.admin_role: + errors[inv.name] = "Lack permissions to delete hosts from this inventory." + if errors != {}: + raise PermissionDenied({"inventories": errors}) + + # check the inventory type only if the user have permission to it. + errors = dict() + for inv in Inventory.objects.get_queryset().filter(pk__in=inv_list): + if inv.kind != '': + errors[inv.name] = "Hosts can only be deleted from manual inventories." + if errors != {}: + raise serializers.ValidationError({"inventories": errors}) + attrs['inventories'] = inv_list + return attrs + + def delete(self, validated_data): + result = {"hosts": dict()} + changes = {'deleted_hosts': dict()} + for inventory in validated_data['inventories']: + changes['deleted_hosts'][inventory] = list() + + for host in validated_data['hosts_data']: + result["hosts"][host["id"]] = f"The host {host['name']} was deleted" + changes['deleted_hosts'][host["inventory_id"]].append({"host_id": host["id"], "host_name": host["name"]}) + + try: + validated_data['host_qs'].delete() + except Exception as e: + raise serializers.ValidationError({"detail": _(f"cannot delete hosts, host deletion error {e}")}) + + request = self.context.get('request', None) + + for inventory in validated_data['inventories']: + activity_entry = ActivityStream.objects.create( + operation='update', + object1='inventory', + changes=json.dumps(changes['deleted_hosts'][inventory]), + actor=request.user, + ) + activity_entry.inventory.add(inventory) + + return result + + class GroupTreeSerializer(GroupSerializer): children = serializers.SerializerMethodField() @@ -2664,6 +2763,30 @@ class ResourceAccessListElementSerializer(UserSerializer): if 'summary_fields' not in ret: ret['summary_fields'] = {} + team_content_type = ContentType.objects.get_for_model(Team) + content_type = ContentType.objects.get_for_model(obj) + + reversed_org_map = {} + for k, v in org_role_to_permission.items(): + reversed_org_map[v] = k + reversed_role_map = {} + for k, v in to_permissions.items(): + reversed_role_map[v] = k + + def get_roles_from_perms(perm_list): + """given a list of permission codenames return a list of role names""" + role_names = set() + for codename in perm_list: + action = codename.split('_', 1)[0] + if action in reversed_role_map: + role_names.add(reversed_role_map[action]) + elif codename in reversed_org_map: + if isinstance(obj, Organization): + role_names.add(reversed_org_map[codename]) + if 'view_organization' not in role_names: + role_names.add('read_role') + return list(role_names) + def format_role_perm(role): role_dict = {'id': role.id, 'name': role.name, 'description': role.description} try: @@ -2679,13 +2802,21 @@ class ResourceAccessListElementSerializer(UserSerializer): else: # Singleton roles should not be managed from this view, as per copy/edit rework spec role_dict['user_capabilities'] = {'unattach': False} - return {'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)} + + model_name = content_type.model + if isinstance(obj, Organization): + descendant_perms = [codename for codename in get_role_codenames(role) if codename.endswith(model_name) or codename.startswith('add_')] + else: + descendant_perms = [codename for codename in get_role_codenames(role) if codename.endswith(model_name)] + + return {'role': role_dict, 'descendant_roles': get_roles_from_perms(descendant_perms)} def format_team_role_perm(naive_team_role, permissive_role_ids): ret = [] + team = naive_team_role.content_object team_role = naive_team_role if naive_team_role.role_field == 'admin_role': - team_role = naive_team_role.content_object.member_role + team_role = team.member_role for role in team_role.children.filter(id__in=permissive_role_ids).all(): role_dict = { 'id': role.id, @@ -2705,13 +2836,87 @@ class ResourceAccessListElementSerializer(UserSerializer): else: # Singleton roles should not be managed from this view, as per copy/edit rework spec role_dict['user_capabilities'] = {'unattach': False} - ret.append({'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)}) + + descendant_perms = list( + RoleEvaluation.objects.filter(role__in=team.has_roles.all(), object_id=obj.id, content_type_id=content_type.id) + .values_list('codename', flat=True) + .distinct() + ) + + ret.append({'role': role_dict, 'descendant_roles': get_roles_from_perms(descendant_perms)}) return ret - team_content_type = ContentType.objects.get_for_model(Team) - content_type = ContentType.objects.get_for_model(obj) + gfk_kwargs = dict(content_type_id=content_type.id, object_id=obj.id) + direct_permissive_role_ids = Role.objects.filter(**gfk_kwargs).values_list('id', flat=True) + + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + ret['summary_fields']['direct_access'] = [] + ret['summary_fields']['indirect_access'] = [] + + new_roles_seen = set() + all_team_roles = set() + all_permissive_role_ids = set() + for evaluation in RoleEvaluation.objects.filter(role__in=user.has_roles.all(), **gfk_kwargs).prefetch_related('role'): + new_role = evaluation.role + if new_role.id in new_roles_seen: + continue + new_roles_seen.add(new_role.id) + old_role = get_role_from_object_role(new_role) + all_permissive_role_ids.add(old_role.id) + + if int(new_role.object_id) == obj.id and new_role.content_type_id == content_type.id: + ret['summary_fields']['direct_access'].append(format_role_perm(old_role)) + elif new_role.content_type_id == team_content_type.id: + all_team_roles.add(old_role) + else: + ret['summary_fields']['indirect_access'].append(format_role_perm(old_role)) + + # Lazy role creation gives us a big problem, where some intermediate roles are not easy to find + # like when a team has indirect permission, so here we get all roles the users teams have + # these contribute to all potential permission-granting roles of the object + user_teams_qs = permission_registry.team_model.objects.filter(member_roles__in=ObjectRole.objects.filter(users=user)) + team_obj_roles = ObjectRole.objects.filter(teams__in=user_teams_qs) + for evaluation in RoleEvaluation.objects.filter(role__in=team_obj_roles, **gfk_kwargs).prefetch_related('role'): + new_role = evaluation.role + if new_role.id in new_roles_seen: + continue + new_roles_seen.add(new_role.id) + old_role = get_role_from_object_role(new_role) + all_permissive_role_ids.add(old_role.id) + + # In DAB RBAC, superuser is strictly a user flag, and global roles are not in the RoleEvaluation table + if user.is_superuser: + ret['summary_fields'].setdefault('indirect_access', []) + all_role_names = [field.name for field in obj._meta.get_fields() if isinstance(field, ImplicitRoleField)] + ret['summary_fields']['indirect_access'].append( + { + "role": { + "id": None, + "name": _("System Administrator"), + "description": _("Can manage all aspects of the system"), + "user_capabilities": {"unattach": False}, + }, + "descendant_roles": all_role_names, + } + ) + elif user.is_system_auditor: + ret['summary_fields'].setdefault('indirect_access', []) + ret['summary_fields']['indirect_access'].append( + { + "role": { + "id": None, + "name": _("System Auditor"), + "description": _("Can view all aspects of the system"), + "user_capabilities": {"unattach": False}, + }, + "descendant_roles": ["read_role"], + } + ) + + ret['summary_fields']['direct_access'].extend([y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in all_team_roles) for y in x]) + + return ret - direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True) all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True) direct_access_roles = user.roles.filter(id__in=direct_permissive_role_ids).all() @@ -2980,7 +3185,7 @@ class CredentialSerializerCreate(CredentialSerializer): credential = super(CredentialSerializerCreate, self).create(validated_data) if user: - credential.admin_role.members.add(user) + give_creator_permissions(user, credential) if team: if not credential.organization or team.organization.id != credential.organization.id: raise serializers.ValidationError({"detail": _("Credential organization must be set and match before assigning to a team")}) @@ -5074,16 +5279,21 @@ class NotificationTemplateSerializer(BaseSerializer): body = messages[event].get('body', {}) if body: try: - rendered_body = ( - sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub()) - ) - potential_body = json.loads(rendered_body) - if not isinstance(potential_body, dict): - error_list.append( - _("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__)) - ) - except json.JSONDecodeError as exc: - error_list.append(_("Webhook body for '{}' is not a valid json dictionary ({}).".format(event, exc))) + sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub()) + + # https://github.com/ansible/awx/issues/14410 + + # When rendering something such as "{{ job.id }}" + # the return type is not a dict, unlike "{{ job_metadata }}" which is a dict + + # potential_body = json.loads(rendered_body) + + # if not isinstance(potential_body, dict): + # error_list.append( + # _("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__)) + # ) + except Exception as exc: + error_list.append(_("Webhook body for '{}' is not valid. The following gave an error ({}).".format(event, exc))) if error_list: raise serializers.ValidationError(error_list) @@ -5356,17 +5566,25 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria class InstanceLinkSerializer(BaseSerializer): class Meta: model = InstanceLink - fields = ('id', 'url', 'related', 'source', 'target', 'link_state') + fields = ('id', 'related', 'source', 'target', 'target_full_address', 'link_state') source = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all()) - target = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all()) + + target = serializers.SerializerMethodField() + target_full_address = serializers.SerializerMethodField() def get_related(self, obj): res = super(InstanceLinkSerializer, self).get_related(obj) res['source_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.source.id}) - res['target_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.target.id}) + res['target_address'] = self.reverse('api:receptor_address_detail', kwargs={'pk': obj.target.id}) return res + def get_target(self, obj): + return obj.target.instance.hostname + + def get_target_full_address(self, obj): + return obj.target.get_full_address() + class InstanceNodeSerializer(BaseSerializer): class Meta: @@ -5374,6 +5592,29 @@ class InstanceNodeSerializer(BaseSerializer): fields = ('id', 'hostname', 'node_type', 'node_state', 'enabled') +class ReceptorAddressSerializer(BaseSerializer): + full_address = serializers.SerializerMethodField() + + class Meta: + model = ReceptorAddress + fields = ( + 'id', + 'url', + 'address', + 'port', + 'protocol', + 'websocket_path', + 'is_internal', + 'canonical', + 'instance', + 'peers_from_control_nodes', + 'full_address', + ) + + def get_full_address(self, obj): + return obj.get_full_address() + + class InstanceSerializer(BaseSerializer): show_capabilities = ['edit'] @@ -5382,11 +5623,17 @@ class InstanceSerializer(BaseSerializer): jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True) jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True) health_check_pending = serializers.SerializerMethodField() - peers = serializers.SlugRelatedField(many=True, required=False, slug_field="hostname", queryset=Instance.objects.all()) + peers = serializers.PrimaryKeyRelatedField( + help_text=_('Primary keys of receptor addresses to peer to.'), many=True, required=False, queryset=ReceptorAddress.objects.all() + ) + reverse_peers = serializers.SerializerMethodField() + listener_port = serializers.IntegerField(source='canonical_address_port', required=False, allow_null=True) + peers_from_control_nodes = serializers.BooleanField(source='canonical_address_peers_from_control_nodes', required=False) + protocol = serializers.SerializerMethodField() class Meta: model = Instance - read_only_fields = ('ip_address', 'uuid', 'version') + read_only_fields = ('ip_address', 'uuid', 'version', 'managed', 'reverse_peers') fields = ( 'id', 'hostname', @@ -5417,10 +5664,13 @@ class InstanceSerializer(BaseSerializer): 'managed_by_policy', 'node_type', 'node_state', + 'managed', 'ip_address', - 'listener_port', 'peers', + 'reverse_peers', + 'listener_port', 'peers_from_control_nodes', + 'protocol', ) extra_kwargs = { 'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION}, @@ -5442,16 +5692,54 @@ class InstanceSerializer(BaseSerializer): def get_related(self, obj): res = super(InstanceSerializer, self).get_related(obj) + res['receptor_addresses'] = self.reverse('api:instance_receptor_addresses_list', kwargs={'pk': obj.pk}) res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk}) - res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk}) - if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP]: - res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk}) res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk}) + res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk}) + if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP] and not obj.managed: + res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk}) if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor: if obj.node_type == 'execution': res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk}) return res + def create_or_update(self, validated_data, obj=None, create=True): + # create a managed receptor address if listener port is defined + port = validated_data.pop('listener_port', -1) + peers_from_control_nodes = validated_data.pop('peers_from_control_nodes', -1) + + # delete the receptor address if the port is explicitly set to None + if obj and port == None: + obj.receptor_addresses.filter(address=obj.hostname).delete() + + if create: + instance = super(InstanceSerializer, self).create(validated_data) + else: + instance = super(InstanceSerializer, self).update(obj, validated_data) + instance.refresh_from_db() # instance canonical address lookup is deferred, so needs to be reloaded + + # only create or update if port is defined in validated_data or already exists in the + # canonical address + # this prevents creating a receptor address if peers_from_control_nodes is in + # validated_data but a port is not set + if (port != None and port != -1) or instance.canonical_address_port: + kwargs = {} + if port != -1: + kwargs['port'] = port + if peers_from_control_nodes != -1: + kwargs['peers_from_control_nodes'] = peers_from_control_nodes + if kwargs: + kwargs['canonical'] = True + instance.receptor_addresses.update_or_create(address=instance.hostname, defaults=kwargs) + + return instance + + def create(self, validated_data): + return self.create_or_update(validated_data, create=True) + + def update(self, obj, validated_data): + return self.create_or_update(validated_data, obj, create=False) + def get_summary_fields(self, obj): summary = super().get_summary_fields(obj) @@ -5461,6 +5749,16 @@ class InstanceSerializer(BaseSerializer): return summary + def get_reverse_peers(self, obj): + return Instance.objects.prefetch_related('peers').filter(peers__in=obj.receptor_addresses.all()).values_list('id', flat=True) + + def get_protocol(self, obj): + # note: don't create a different query for receptor addresses, as this is prefetched on the View for optimization + for addr in obj.receptor_addresses.all(): + if addr.canonical: + return addr.protocol + return "" + def get_consumed_capacity(self, obj): return obj.consumed_capacity @@ -5474,47 +5772,20 @@ class InstanceSerializer(BaseSerializer): return obj.health_check_pending def validate(self, attrs): - def get_field_from_model_or_attrs(fd): - return attrs.get(fd, self.instance and getattr(self.instance, fd) or None) - - def check_peers_changed(): - ''' - return True if - - 'peers' in attrs - - instance peers matches peers in attrs - ''' - return self.instance and 'peers' in attrs and set(self.instance.peers.all()) != set(attrs['peers']) + # Oddly, using 'source' on a DRF field populates attrs with the source name, so we should rename it back + if 'canonical_address_port' in attrs: + attrs['listener_port'] = attrs.pop('canonical_address_port') + if 'canonical_address_peers_from_control_nodes' in attrs: + attrs['peers_from_control_nodes'] = attrs.pop('canonical_address_peers_from_control_nodes') if not self.instance and not settings.IS_K8S: raise serializers.ValidationError(_("Can only create instances on Kubernetes or OpenShift.")) - node_type = get_field_from_model_or_attrs("node_type") - peers_from_control_nodes = get_field_from_model_or_attrs("peers_from_control_nodes") - listener_port = get_field_from_model_or_attrs("listener_port") - peers = attrs.get('peers', []) - - if peers_from_control_nodes and node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP): - raise serializers.ValidationError(_("peers_from_control_nodes can only be enabled for execution or hop nodes.")) - - if node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]: - if check_peers_changed(): - raise serializers.ValidationError( - _("Setting peers manually for control nodes is not allowed. Enable peers_from_control_nodes on the hop and execution nodes instead.") - ) - - if not listener_port and peers_from_control_nodes: - raise serializers.ValidationError(_("Field listener_port must be a valid integer when peers_from_control_nodes is enabled.")) - - if not listener_port and self.instance and self.instance.peers_from.exists(): - raise serializers.ValidationError(_("Field listener_port must be a valid integer when other nodes peer to it.")) - - for peer in peers: - if peer.listener_port is None: - raise serializers.ValidationError(_("Field listener_port must be set on peer ") + peer.hostname + ".") - - if not settings.IS_K8S: - if check_peers_changed(): - raise serializers.ValidationError(_("Cannot change peers.")) + # cannot enable peers_from_control_nodes if listener_port is not set + if attrs.get('peers_from_control_nodes'): + port = attrs.get('listener_port', -1) # -1 denotes missing, None denotes explicit null + if (port is None) or (port == -1 and self.instance and self.instance.canonical_address is None): + raise serializers.ValidationError(_("Cannot enable peers_from_control_nodes if listener_port is not set.")) return super().validate(attrs) @@ -5534,8 +5805,8 @@ class InstanceSerializer(BaseSerializer): raise serializers.ValidationError(_("Can only change the state on Kubernetes or OpenShift.")) if value != Instance.States.DEPROVISIONING: raise serializers.ValidationError(_("Can only change instances to the 'deprovisioning' state.")) - if self.instance.node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP): - raise serializers.ValidationError(_("Can only deprovision execution or hop nodes.")) + if self.instance.managed: + raise serializers.ValidationError(_("Cannot deprovision managed nodes.")) else: if value and value != Instance.States.INSTALLED: raise serializers.ValidationError(_("Can only create instances in the 'installed' state.")) @@ -5554,18 +5825,48 @@ class InstanceSerializer(BaseSerializer): def validate_listener_port(self, value): """ Cannot change listener port, unless going from none to integer, and vice versa + If instance is managed, cannot change listener port at all """ - if value and self.instance and self.instance.listener_port and self.instance.listener_port != value: - raise serializers.ValidationError(_("Cannot change listener port.")) + if self.instance: + canonical_address_port = self.instance.canonical_address_port + if value and canonical_address_port and canonical_address_port != value: + raise serializers.ValidationError(_("Cannot change listener port.")) + if self.instance.managed and value != canonical_address_port: + raise serializers.ValidationError(_("Cannot change listener port for managed nodes.")) + return value + + def validate_peers(self, value): + # cannot peer to an instance more than once + peers_instances = Counter(p.instance_id for p in value) + if any(count > 1 for count in peers_instances.values()): + raise serializers.ValidationError(_("Cannot peer to the same instance more than once.")) + + if self.instance: + instance_addresses = set(self.instance.receptor_addresses.all()) + setting_peers = set(value) + peers_changed = set(self.instance.peers.all()) != setting_peers + + if not settings.IS_K8S and peers_changed: + raise serializers.ValidationError(_("Cannot change peers.")) + + if self.instance.managed and peers_changed: + raise serializers.ValidationError(_("Setting peers manually for managed nodes is not allowed.")) + + # cannot peer to self + if instance_addresses & setting_peers: + raise serializers.ValidationError(_("Instance cannot peer to its own address.")) + + # cannot peer to an instance that is already peered to this instance + if instance_addresses: + for p in setting_peers: + if set(p.instance.peers.all()) & instance_addresses: + raise serializers.ValidationError(_(f"Instance {p.instance.hostname} is already peered to this instance.")) return value def validate_peers_from_control_nodes(self, value): - """ - Can only enable for K8S based deployments - """ - if value and not settings.IS_K8S: - raise serializers.ValidationError(_("Can only be enabled on Kubernetes or Openshift.")) + if self.instance and self.instance.managed and self.instance.canonical_address_peers_from_control_nodes != value: + raise serializers.ValidationError(_("Cannot change peers_from_control_nodes for managed nodes.")) return value diff --git a/awx/api/templates/api/bulk_host_delete_view.md b/awx/api/templates/api/bulk_host_delete_view.md new file mode 100644 index 0000000000..1fff2a7e3c --- /dev/null +++ b/awx/api/templates/api/bulk_host_delete_view.md @@ -0,0 +1,22 @@ +# Bulk Host Delete + +This endpoint allows the client to delete multiple hosts from inventories. +They may do this by providing a list of hosts ID's to be deleted. + +Example: + + { + "hosts": [1, 2, 3, 4, 5] + } + +Return data: + + { + "hosts": { + "1": "The host a1 was deleted", + "2": "The host a2 was deleted", + "3": "The host a3 was deleted", + "4": "The host a4 was deleted", + "5": "The host a5 was deleted", + } + } \ No newline at end of file diff --git a/awx/api/templates/instance_install_bundle/group_vars/all.yml b/awx/api/templates/instance_install_bundle/group_vars/all.yml index 861572748c..7c7c815d67 100644 --- a/awx/api/templates/instance_install_bundle/group_vars/all.yml +++ b/awx/api/templates/instance_install_bundle/group_vars/all.yml @@ -17,19 +17,18 @@ custom_worksign_public_keyfile: receptor/work_public_key.pem custom_tls_certfile: receptor/tls/receptor.crt custom_tls_keyfile: receptor/tls/receptor.key custom_ca_certfile: receptor/tls/ca/mesh-CA.crt -receptor_protocol: 'tcp' -{% if instance.listener_port %} +{% if listener_port %} +receptor_protocol: {{ listener_protocol }} receptor_listener: true -receptor_port: {{ instance.listener_port }} +receptor_port: {{ listener_port }} {% else %} receptor_listener: false {% endif %} {% if peers %} receptor_peers: {% for peer in peers %} - - host: {{ peer.host }} - port: {{ peer.port }} - protocol: tcp + - address: {{ peer.address }} + protocol: {{ peer.protocol }} {% endfor %} {% endif %} {% verbatim %} diff --git a/awx/api/templates/instance_install_bundle/requirements.yml b/awx/api/templates/instance_install_bundle/requirements.yml index 69dbf5dcb7..65df80b51d 100644 --- a/awx/api/templates/instance_install_bundle/requirements.yml +++ b/awx/api/templates/instance_install_bundle/requirements.yml @@ -1,4 +1,4 @@ --- collections: - name: ansible.receptor - version: 2.0.2 + version: 2.0.3 diff --git a/awx/api/urls/instance.py b/awx/api/urls/instance.py index 0d4df1df45..84a3904657 100644 --- a/awx/api/urls/instance.py +++ b/awx/api/urls/instance.py @@ -10,6 +10,7 @@ from awx.api.views import ( InstanceInstanceGroupsList, InstanceHealthCheck, InstancePeersList, + InstanceReceptorAddressesList, ) from awx.api.views.instance_install_bundle import InstanceInstallBundle @@ -21,6 +22,7 @@ urls = [ re_path(r'^(?P[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'), re_path(r'^(?P[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'), re_path(r'^(?P[0-9]+)/peers/$', InstancePeersList.as_view(), name='instance_peers_list'), + re_path(r'^(?P[0-9]+)/receptor_addresses/$', InstanceReceptorAddressesList.as_view(), name='instance_receptor_addresses_list'), re_path(r'^(?P[0-9]+)/install_bundle/$', InstanceInstallBundle.as_view(), name='instance_install_bundle'), ] diff --git a/awx/api/urls/receptor_address.py b/awx/api/urls/receptor_address.py new file mode 100644 index 0000000000..fe630f3da4 --- /dev/null +++ b/awx/api/urls/receptor_address.py @@ -0,0 +1,17 @@ +# Copyright (c) 2017 Ansible, Inc. +# All Rights Reserved. + +from django.urls import re_path + +from awx.api.views import ( + ReceptorAddressesList, + ReceptorAddressDetail, +) + + +urls = [ + re_path(r'^$', ReceptorAddressesList.as_view(), name='receptor_addresses_list'), + re_path(r'^(?P[0-9]+)/$', ReceptorAddressDetail.as_view(), name='receptor_address_detail'), +] + +__all__ = ['urls'] diff --git a/awx/api/urls/urls.py b/awx/api/urls/urls.py index c74f9f97e6..c2218e5ed8 100644 --- a/awx/api/urls/urls.py +++ b/awx/api/urls/urls.py @@ -36,6 +36,7 @@ from awx.api.views import ( from awx.api.views.bulk import ( BulkView, BulkHostCreateView, + BulkHostDeleteView, BulkJobLaunchView, ) @@ -84,6 +85,7 @@ from .oauth2_root import urls as oauth2_root_urls from .workflow_approval_template import urls as workflow_approval_template_urls from .workflow_approval import urls as workflow_approval_urls from .analytics import urls as analytics_urls +from .receptor_address import urls as receptor_address_urls v2_urls = [ re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'), @@ -152,7 +154,9 @@ v2_urls = [ re_path(r'^workflow_approvals/', include(workflow_approval_urls)), re_path(r'^bulk/$', BulkView.as_view(), name='bulk'), re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'), + re_path(r'^bulk/host_delete/$', BulkHostDeleteView.as_view(), name='bulk_host_delete'), re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'), + re_path(r'^receptor_addresses/', include(receptor_address_urls)), ] diff --git a/awx/api/urls/webhooks.py b/awx/api/urls/webhooks.py index b57ca135d8..bbbf1ebd2d 100644 --- a/awx/api/urls/webhooks.py +++ b/awx/api/urls/webhooks.py @@ -1,10 +1,11 @@ from django.urls import re_path -from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver +from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver, BitbucketDcWebhookReceiver urlpatterns = [ re_path(r'^webhook_key/$', WebhookKeyView.as_view(), name='webhook_key'), re_path(r'^github/$', GithubWebhookReceiver.as_view(), name='webhook_receiver_github'), re_path(r'^gitlab/$', GitlabWebhookReceiver.as_view(), name='webhook_receiver_gitlab'), + re_path(r'^bitbucket_dc/$', BitbucketDcWebhookReceiver.as_view(), name='webhook_receiver_bitbucket_dc'), ] diff --git a/awx/api/versioning.py b/awx/api/versioning.py index 9fc57ac71e..ff10d9875b 100644 --- a/awx/api/versioning.py +++ b/awx/api/versioning.py @@ -2,28 +2,21 @@ # All Rights Reserved. from django.conf import settings -from django.urls import NoReverseMatch -from rest_framework.reverse import _reverse +from rest_framework.reverse import reverse as drf_reverse from rest_framework.versioning import URLPathVersioning as BaseVersioning -def drf_reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra): - """ - Copy and monkey-patch `rest_framework.reverse.reverse` to prevent adding unwarranted - query string parameters. - """ - scheme = getattr(request, 'versioning_scheme', None) - if scheme is not None: - try: - url = scheme.reverse(viewname, args, kwargs, request, format, **extra) - except NoReverseMatch: - # In case the versioning scheme reversal fails, fallback to the - # default implementation - url = _reverse(viewname, args, kwargs, request, format, **extra) - else: - url = _reverse(viewname, args, kwargs, request, format, **extra) +def is_optional_api_urlpattern_prefix_request(request): + if settings.OPTIONAL_API_URLPATTERN_PREFIX and request: + if request.path.startswith(f"/api/{settings.OPTIONAL_API_URLPATTERN_PREFIX}"): + return True + return False + +def transform_optional_api_urlpattern_prefix_url(request, url): + if is_optional_api_urlpattern_prefix_request(request): + url = url.replace('/api', f"/api/{settings.OPTIONAL_API_URLPATTERN_PREFIX}") return url diff --git a/awx/api/views/__init__.py b/awx/api/views/__init__.py index 221db21b00..9bc8bad286 100644 --- a/awx/api/views/__init__.py +++ b/awx/api/views/__init__.py @@ -60,6 +60,9 @@ from oauth2_provider.models import get_access_token_model import pytz from wsgiref.util import FileWrapper +# django-ansible-base +from ansible_base.rbac.models import RoleEvaluation, ObjectRole + # AWX from awx.main.tasks.system import send_notifications, update_inventory_computed_fields from awx.main.access import get_user_queryset @@ -87,6 +90,7 @@ from awx.api.generics import ( from awx.api.views.labels import LabelSubListCreateAttachDetachView from awx.api.versioning import reverse from awx.main import models +from awx.main.models.rbac import get_role_definition from awx.main.utils import ( camelcase_to_underscore, extract_ansible_vars, @@ -272,16 +276,24 @@ class DashboardJobsGraphView(APIView): success_query = user_unified_jobs.filter(status='successful') failed_query = user_unified_jobs.filter(status='failed') + canceled_query = user_unified_jobs.filter(status='canceled') + error_query = user_unified_jobs.filter(status='error') if job_type == 'inv_sync': success_query = success_query.filter(instance_of=models.InventoryUpdate) failed_query = failed_query.filter(instance_of=models.InventoryUpdate) + canceled_query = canceled_query.filter(instance_of=models.InventoryUpdate) + error_query = error_query.filter(instance_of=models.InventoryUpdate) elif job_type == 'playbook_run': success_query = success_query.filter(instance_of=models.Job) failed_query = failed_query.filter(instance_of=models.Job) + canceled_query = canceled_query.filter(instance_of=models.Job) + error_query = error_query.filter(instance_of=models.Job) elif job_type == 'scm_update': success_query = success_query.filter(instance_of=models.ProjectUpdate) failed_query = failed_query.filter(instance_of=models.ProjectUpdate) + canceled_query = canceled_query.filter(instance_of=models.ProjectUpdate) + error_query = error_query.filter(instance_of=models.ProjectUpdate) end = now() interval = 'day' @@ -297,10 +309,12 @@ class DashboardJobsGraphView(APIView): else: return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST) - dashboard_data = {"jobs": {"successful": [], "failed": []}} + dashboard_data = {"jobs": {"successful": [], "failed": [], "canceled": [], "error": []}} succ_list = dashboard_data['jobs']['successful'] fail_list = dashboard_data['jobs']['failed'] + canceled_list = dashboard_data['jobs']['canceled'] + error_list = dashboard_data['jobs']['error'] qs_s = ( success_query.filter(finished__range=(start, end)) @@ -318,6 +332,22 @@ class DashboardJobsGraphView(APIView): .annotate(agg=Count('id', distinct=True)) ) data_f = {item['d']: item['agg'] for item in qs_f} + qs_c = ( + canceled_query.filter(finished__range=(start, end)) + .annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo)) + .order_by() + .values('d') + .annotate(agg=Count('id', distinct=True)) + ) + data_c = {item['d']: item['agg'] for item in qs_c} + qs_e = ( + error_query.filter(finished__range=(start, end)) + .annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo)) + .order_by() + .values('d') + .annotate(agg=Count('id', distinct=True)) + ) + data_e = {item['d']: item['agg'] for item in qs_e} start_date = start.replace(hour=0, minute=0, second=0, microsecond=0) for d in itertools.count(): @@ -326,6 +356,8 @@ class DashboardJobsGraphView(APIView): break succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)]) fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)]) + canceled_list.append([time.mktime(date.timetuple()), data_c.get(date, 0)]) + error_list.append([time.mktime(date.timetuple()), data_e.get(date, 0)]) return Response(dashboard_data) @@ -337,12 +369,20 @@ class InstanceList(ListCreateAPIView): search_fields = ('hostname',) ordering = ('id',) + def get_queryset(self): + qs = super().get_queryset().prefetch_related('receptor_addresses') + return qs + class InstanceDetail(RetrieveUpdateAPIView): name = _("Instance Detail") model = models.Instance serializer_class = serializers.InstanceSerializer + def get_queryset(self): + qs = super().get_queryset().prefetch_related('receptor_addresses') + return qs + def update_raw_data(self, data): # these fields are only valid on creation of an instance, so they unwanted on detail view data.pop('node_type', None) @@ -375,13 +415,37 @@ class InstanceUnifiedJobsList(SubListAPIView): class InstancePeersList(SubListAPIView): - name = _("Instance Peers") + name = _("Peers") + model = models.ReceptorAddress + serializer_class = serializers.ReceptorAddressSerializer parent_model = models.Instance - model = models.Instance - serializer_class = serializers.InstanceSerializer parent_access = 'read' - search_fields = {'hostname'} relationship = 'peers' + search_fields = ('address',) + + +class InstanceReceptorAddressesList(SubListAPIView): + name = _("Receptor Addresses") + model = models.ReceptorAddress + parent_key = 'instance' + parent_model = models.Instance + serializer_class = serializers.ReceptorAddressSerializer + search_fields = ('address',) + + +class ReceptorAddressesList(ListAPIView): + name = _("Receptor Addresses") + model = models.ReceptorAddress + serializer_class = serializers.ReceptorAddressSerializer + search_fields = ('address',) + + +class ReceptorAddressDetail(RetrieveAPIView): + name = _("Receptor Address Detail") + model = models.ReceptorAddress + serializer_class = serializers.ReceptorAddressSerializer + parent_model = models.Instance + relationship = 'receptor_addresses' class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView): @@ -476,6 +540,7 @@ class InstanceGroupAccessList(ResourceAccessList): class InstanceGroupObjectRolesList(SubListAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializer parent_model = models.InstanceGroup @@ -664,6 +729,7 @@ class TeamUsersList(BaseUsersList): class TeamRolesList(SubListAttachDetachAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializerWithParentAccess metadata_class = RoleMetadata @@ -703,10 +769,12 @@ class TeamRolesList(SubListAttachDetachAPIView): class TeamObjectRolesList(SubListAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializer parent_model = models.Team search_fields = ('role_field', 'content_type__model') + deprecated = True def get_queryset(self): po = self.get_parent_object() @@ -724,8 +792,15 @@ class TeamProjectsList(SubListAPIView): self.check_parent_access(team) model_ct = ContentType.objects.get_for_model(self.model) parent_ct = ContentType.objects.get_for_model(self.parent_model) - proj_roles = models.Role.objects.filter(Q(ancestors__content_type=parent_ct) & Q(ancestors__object_id=team.pk), content_type=model_ct) - return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in proj_roles]) + + rd = get_role_definition(team.member_role) + role = ObjectRole.objects.filter(object_id=team.id, content_type=parent_ct, role_definition=rd).first() + if role is None: + # Team has no permissions, therefore team has no projects + return self.model.objects.none() + else: + project_qs = self.model.accessible_objects(self.request.user, 'read_role') + return project_qs.filter(id__in=RoleEvaluation.objects.filter(content_type_id=model_ct.id, role=role).values_list('object_id')) class TeamActivityStreamList(SubListAPIView): @@ -740,10 +815,23 @@ class TeamActivityStreamList(SubListAPIView): self.check_parent_access(parent) qs = self.request.user.get_queryset(self.model) + return qs.filter( Q(team=parent) - | Q(project__in=models.Project.accessible_objects(parent, 'read_role')) - | Q(credential__in=models.Credential.accessible_objects(parent, 'read_role')) + | Q( + project__in=RoleEvaluation.objects.filter( + role__in=parent.has_roles.all(), content_type_id=ContentType.objects.get_for_model(models.Project).id, codename='view_project' + ) + .values_list('object_id') + .distinct() + ) + | Q( + credential__in=RoleEvaluation.objects.filter( + role__in=parent.has_roles.all(), content_type_id=ContentType.objects.get_for_model(models.Credential).id, codename='view_credential' + ) + .values_list('object_id') + .distinct() + ) ) @@ -995,10 +1083,12 @@ class ProjectAccessList(ResourceAccessList): class ProjectObjectRolesList(SubListAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializer parent_model = models.Project search_fields = ('role_field', 'content_type__model') + deprecated = True def get_queryset(self): po = self.get_parent_object() @@ -1156,6 +1246,7 @@ class UserTeamsList(SubListAPIView): class UserRolesList(SubListAttachDetachAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializerWithParentAccess metadata_class = RoleMetadata @@ -1397,7 +1488,7 @@ class OrganizationCredentialList(SubListCreateAPIView): self.check_parent_access(organization) user_visible = models.Credential.accessible_objects(self.request.user, 'read_role').all() - org_set = models.Credential.accessible_objects(organization.admin_role, 'read_role').all() + org_set = models.Credential.objects.filter(organization=organization) if self.request.user.is_superuser or self.request.user.is_system_auditor: return org_set @@ -1430,10 +1521,12 @@ class CredentialAccessList(ResourceAccessList): class CredentialObjectRolesList(SubListAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializer parent_model = models.Credential search_fields = ('role_field', 'content_type__model') + deprecated = True def get_queryset(self): po = self.get_parent_object() @@ -2220,13 +2313,6 @@ class JobTemplateList(ListCreateAPIView): serializer_class = serializers.JobTemplateSerializer always_allow_superuser = False - def post(self, request, *args, **kwargs): - ret = super(JobTemplateList, self).post(request, *args, **kwargs) - if ret.status_code == 201: - job_template = models.JobTemplate.objects.get(id=ret.data['id']) - job_template.admin_role.members.add(request.user) - return ret - class JobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView): model = models.JobTemplate @@ -2772,10 +2858,12 @@ class JobTemplateAccessList(ResourceAccessList): class JobTemplateObjectRolesList(SubListAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializer parent_model = models.JobTemplate search_fields = ('role_field', 'content_type__model') + deprecated = True def get_queryset(self): po = self.get_parent_object() @@ -3158,10 +3246,12 @@ class WorkflowJobTemplateAccessList(ResourceAccessList): class WorkflowJobTemplateObjectRolesList(SubListAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializer parent_model = models.WorkflowJobTemplate search_fields = ('role_field', 'content_type__model') + deprecated = True def get_queryset(self): po = self.get_parent_object() @@ -4170,6 +4260,7 @@ class ActivityStreamDetail(RetrieveAPIView): class RoleList(ListAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializer permission_classes = (IsAuthenticated,) @@ -4177,11 +4268,13 @@ class RoleList(ListAPIView): class RoleDetail(RetrieveAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializer class RoleUsersList(SubListAttachDetachAPIView): + deprecated = True model = models.User serializer_class = serializers.UserSerializer parent_model = models.Role @@ -4216,6 +4309,7 @@ class RoleUsersList(SubListAttachDetachAPIView): class RoleTeamsList(SubListAttachDetachAPIView): + deprecated = True model = models.Team serializer_class = serializers.TeamSerializer parent_model = models.Role @@ -4260,10 +4354,12 @@ class RoleTeamsList(SubListAttachDetachAPIView): team.member_role.children.remove(role) else: team.member_role.children.add(role) + return Response(status=status.HTTP_204_NO_CONTENT) class RoleParentsList(SubListAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializer parent_model = models.Role @@ -4277,6 +4373,7 @@ class RoleParentsList(SubListAPIView): class RoleChildrenList(SubListAPIView): + deprecated = True model = models.Role serializer_class = serializers.RoleSerializer parent_model = models.Role diff --git a/awx/api/views/analytics.py b/awx/api/views/analytics.py index 9f6066084f..b19acd7d15 100644 --- a/awx/api/views/analytics.py +++ b/awx/api/views/analytics.py @@ -48,23 +48,23 @@ class AnalyticsRootView(APIView): def get(self, request, format=None): data = OrderedDict() - data['authorized'] = reverse('api:analytics_authorized') - data['reports'] = reverse('api:analytics_reports_list') - data['report_options'] = reverse('api:analytics_report_options_list') - data['adoption_rate'] = reverse('api:analytics_adoption_rate') - data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options') - data['event_explorer'] = reverse('api:analytics_event_explorer') - data['event_explorer_options'] = reverse('api:analytics_event_explorer_options') - data['host_explorer'] = reverse('api:analytics_host_explorer') - data['host_explorer_options'] = reverse('api:analytics_host_explorer_options') - data['job_explorer'] = reverse('api:analytics_job_explorer') - data['job_explorer_options'] = reverse('api:analytics_job_explorer_options') - data['probe_templates'] = reverse('api:analytics_probe_templates_explorer') - data['probe_templates_options'] = reverse('api:analytics_probe_templates_options') - data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer') - data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options') - data['roi_templates'] = reverse('api:analytics_roi_templates_explorer') - data['roi_templates_options'] = reverse('api:analytics_roi_templates_options') + data['authorized'] = reverse('api:analytics_authorized', request=request) + data['reports'] = reverse('api:analytics_reports_list', request=request) + data['report_options'] = reverse('api:analytics_report_options_list', request=request) + data['adoption_rate'] = reverse('api:analytics_adoption_rate', request=request) + data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options', request=request) + data['event_explorer'] = reverse('api:analytics_event_explorer', request=request) + data['event_explorer_options'] = reverse('api:analytics_event_explorer_options', request=request) + data['host_explorer'] = reverse('api:analytics_host_explorer', request=request) + data['host_explorer_options'] = reverse('api:analytics_host_explorer_options', request=request) + data['job_explorer'] = reverse('api:analytics_job_explorer', request=request) + data['job_explorer_options'] = reverse('api:analytics_job_explorer_options', request=request) + data['probe_templates'] = reverse('api:analytics_probe_templates_explorer', request=request) + data['probe_templates_options'] = reverse('api:analytics_probe_templates_options', request=request) + data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer', request=request) + data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options', request=request) + data['roi_templates'] = reverse('api:analytics_roi_templates_explorer', request=request) + data['roi_templates_options'] = reverse('api:analytics_roi_templates_options', request=request) return Response(data) diff --git a/awx/api/views/bulk.py b/awx/api/views/bulk.py index f8d52354ce..a78dc43a37 100644 --- a/awx/api/views/bulk.py +++ b/awx/api/views/bulk.py @@ -34,6 +34,7 @@ class BulkView(APIView): '''List top level resources''' data = OrderedDict() data['host_create'] = reverse('api:bulk_host_create', request=request) + data['host_delete'] = reverse('api:bulk_host_delete', request=request) data['job_launch'] = reverse('api:bulk_job_launch', request=request) return Response(data) @@ -72,3 +73,20 @@ class BulkHostCreateView(GenericAPIView): result = serializer.create(serializer.validated_data) return Response(result, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + +class BulkHostDeleteView(GenericAPIView): + permission_classes = [IsAuthenticated] + model = Host + serializer_class = serializers.BulkHostDeleteSerializer + allowed_methods = ['GET', 'POST', 'OPTIONS'] + + def get(self, request): + return Response({"detail": "Bulk delete hosts with this endpoint"}, status=status.HTTP_200_OK) + + def post(self, request): + serializer = serializers.BulkHostDeleteSerializer(data=request.data, context={'request': request}) + if serializer.is_valid(): + result = serializer.delete(serializer.validated_data) + return Response(result, status=status.HTTP_201_CREATED) + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) diff --git a/awx/api/views/instance_install_bundle.py b/awx/api/views/instance_install_bundle.py index 9ae7f7c460..6e4d802ed0 100644 --- a/awx/api/views/instance_install_bundle.py +++ b/awx/api/views/instance_install_bundle.py @@ -124,10 +124,19 @@ def generate_inventory_yml(instance_obj): def generate_group_vars_all_yml(instance_obj): + # get peers peers = [] - for instance in instance_obj.peers.all(): - peers.append(dict(host=instance.hostname, port=instance.listener_port)) - all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj, peers=peers)) + for addr in instance_obj.peers.select_related('instance'): + peers.append(dict(address=addr.get_full_address(), protocol=addr.protocol)) + context = dict(instance=instance_obj, peers=peers) + + canonical_addr = instance_obj.canonical_address + if canonical_addr: + context['listener_port'] = canonical_addr.port + protocol = canonical_addr.protocol if canonical_addr.protocol != 'wss' else 'ws' + context['listener_protocol'] = protocol + + all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=context) # convert consecutive newlines with a single newline return re.sub(r'\n+', '\n', all_yaml) diff --git a/awx/api/views/inventory.py b/awx/api/views/inventory.py index 4085cf9bff..fb4f8e482e 100644 --- a/awx/api/views/inventory.py +++ b/awx/api/views/inventory.py @@ -152,6 +152,7 @@ class InventoryObjectRolesList(SubListAPIView): serializer_class = RoleSerializer parent_model = Inventory search_fields = ('role_field', 'content_type__model') + deprecated = True def get_queryset(self): po = self.get_parent_object() diff --git a/awx/api/views/mesh_visualizer.py b/awx/api/views/mesh_visualizer.py index d09dab0732..e768989729 100644 --- a/awx/api/views/mesh_visualizer.py +++ b/awx/api/views/mesh_visualizer.py @@ -17,7 +17,7 @@ class MeshVisualizer(APIView): def get(self, request, format=None): data = { 'nodes': InstanceNodeSerializer(Instance.objects.all(), many=True).data, - 'links': InstanceLinkSerializer(InstanceLink.objects.select_related('target', 'source'), many=True).data, + 'links': InstanceLinkSerializer(InstanceLink.objects.select_related('target__instance', 'source'), many=True).data, } return Response(data) diff --git a/awx/api/views/organization.py b/awx/api/views/organization.py index fc8610d347..b82f4b3a4b 100644 --- a/awx/api/views/organization.py +++ b/awx/api/views/organization.py @@ -226,6 +226,7 @@ class OrganizationObjectRolesList(SubListAPIView): serializer_class = RoleSerializer parent_model = Organization search_fields = ('role_field', 'content_type__model') + deprecated = True def get_queryset(self): po = self.get_parent_object() diff --git a/awx/api/views/root.py b/awx/api/views/root.py index 3a9a910e1c..a9f973244e 100644 --- a/awx/api/views/root.py +++ b/awx/api/views/root.py @@ -13,6 +13,7 @@ from django.utils.decorators import method_decorator from django.views.decorators.csrf import ensure_csrf_cookie from django.template.loader import render_to_string from django.utils.translation import gettext_lazy as _ +from django.urls import reverse as django_reverse from rest_framework.permissions import AllowAny, IsAuthenticated from rest_framework.response import Response @@ -27,7 +28,7 @@ from awx.main.analytics import all_collectors from awx.main.ha import is_ha_environment from awx.main.utils import get_awx_version, get_custom_venv_choices from awx.main.utils.licensing import validate_entitlement_manifest -from awx.api.versioning import reverse, drf_reverse +from awx.api.versioning import URLPathVersioning, is_optional_api_urlpattern_prefix_request, reverse, drf_reverse from awx.main.constants import PRIVILEGE_ESCALATION_METHODS from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate from awx.main.utils import set_environ @@ -39,19 +40,19 @@ logger = logging.getLogger('awx.api.views.root') class ApiRootView(APIView): permission_classes = (AllowAny,) name = _('REST API') - versioning_class = None + versioning_class = URLPathVersioning swagger_topic = 'Versioning' @method_decorator(ensure_csrf_cookie) def get(self, request, format=None): '''List supported API versions''' - - v2 = reverse('api:api_v2_root_view', kwargs={'version': 'v2'}) + v2 = reverse('api:api_v2_root_view', request=request, kwargs={'version': 'v2'}) data = OrderedDict() data['description'] = _('AWX REST API') data['current_version'] = v2 data['available_versions'] = dict(v2=v2) - data['oauth2'] = drf_reverse('api:oauth_authorization_root_view') + if not is_optional_api_urlpattern_prefix_request(request): + data['oauth2'] = drf_reverse('api:oauth_authorization_root_view') data['custom_logo'] = settings.CUSTOM_LOGO data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO data['login_redirect_override'] = settings.LOGIN_REDIRECT_OVERRIDE @@ -84,6 +85,7 @@ class ApiVersionRootView(APIView): data['ping'] = reverse('api:api_v2_ping_view', request=request) data['instances'] = reverse('api:instance_list', request=request) data['instance_groups'] = reverse('api:instance_group_list', request=request) + data['receptor_addresses'] = reverse('api:receptor_addresses_list', request=request) data['config'] = reverse('api:api_v2_config_view', request=request) data['settings'] = reverse('api:setting_category_list', request=request) data['me'] = reverse('api:user_me_list', request=request) @@ -129,6 +131,10 @@ class ApiVersionRootView(APIView): data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request) data['bulk'] = reverse('api:bulk', request=request) data['analytics'] = reverse('api:analytics_root_view', request=request) + data['service_index'] = django_reverse('service-index-root') + data['role_definitions'] = django_reverse('roledefinition-list') + data['role_user_assignments'] = django_reverse('roleuserassignment-list') + data['role_team_assignments'] = django_reverse('roleteamassignment-list') return Response(data) diff --git a/awx/api/views/webhooks.py b/awx/api/views/webhooks.py index a1d3e27203..c0fa81380e 100644 --- a/awx/api/views/webhooks.py +++ b/awx/api/views/webhooks.py @@ -1,4 +1,4 @@ -from hashlib import sha1 +from hashlib import sha1, sha256 import hmac import logging import urllib.parse @@ -99,14 +99,31 @@ class WebhookReceiverBase(APIView): def get_signature(self): raise NotImplementedError + def must_check_signature(self): + return True + + def is_ignored_request(self): + return False + def check_signature(self, obj): if not obj.webhook_key: raise PermissionDenied + if not self.must_check_signature(): + logger.debug("skipping signature validation") + return - mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha1) - logger.debug("header signature: %s", self.get_signature()) + hash_alg, expected_digest = self.get_signature() + if hash_alg == 'sha1': + mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha1) + elif hash_alg == 'sha256': + mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha256) + else: + logger.debug("Unsupported signature type, supported: sha1, sha256, received: {}".format(hash_alg)) + raise PermissionDenied + + logger.debug("header signature: %s", expected_digest) logger.debug("calculated signature: %s", force_bytes(mac.hexdigest())) - if not hmac.compare_digest(force_bytes(mac.hexdigest()), self.get_signature()): + if not hmac.compare_digest(force_bytes(mac.hexdigest()), expected_digest): raise PermissionDenied @csrf_exempt @@ -118,6 +135,10 @@ class WebhookReceiverBase(APIView): obj = self.get_object() self.check_signature(obj) + if self.is_ignored_request(): + # This was an ignored request type (e.g. ping), don't act on it + return Response({'message': _("Webhook ignored")}, status=status.HTTP_200_OK) + event_type = self.get_event_type() event_guid = self.get_event_guid() event_ref = self.get_event_ref() @@ -186,7 +207,7 @@ class GithubWebhookReceiver(WebhookReceiverBase): if hash_alg != 'sha1': logger.debug("Unsupported signature type, expected: sha1, received: {}".format(hash_alg)) raise PermissionDenied - return force_bytes(signature) + return hash_alg, force_bytes(signature) class GitlabWebhookReceiver(WebhookReceiverBase): @@ -214,15 +235,73 @@ class GitlabWebhookReceiver(WebhookReceiverBase): return "{}://{}/api/v4/projects/{}/statuses/{}".format(parsed.scheme, parsed.netloc, project['id'], self.get_event_ref()) - def get_signature(self): - return force_bytes(self.request.META.get('HTTP_X_GITLAB_TOKEN') or '') - def check_signature(self, obj): if not obj.webhook_key: raise PermissionDenied + token_from_request = force_bytes(self.request.META.get('HTTP_X_GITLAB_TOKEN') or '') + # GitLab only returns the secret token, not an hmac hash. Use # the hmac `compare_digest` helper function to prevent timing # analysis by attackers. - if not hmac.compare_digest(force_bytes(obj.webhook_key), self.get_signature()): + if not hmac.compare_digest(force_bytes(obj.webhook_key), token_from_request): raise PermissionDenied + + +class BitbucketDcWebhookReceiver(WebhookReceiverBase): + service = 'bitbucket_dc' + + ref_keys = { + 'repo:refs_changed': 'changes.0.toHash', + 'mirror:repo_synchronized': 'changes.0.toHash', + 'pr:opened': 'pullRequest.toRef.latestCommit', + 'pr:from_ref_updated': 'pullRequest.toRef.latestCommit', + 'pr:modified': 'pullRequest.toRef.latestCommit', + } + + def get_event_type(self): + return self.request.META.get('HTTP_X_EVENT_KEY') + + def get_event_guid(self): + return self.request.META.get('HTTP_X_REQUEST_ID') + + def get_event_status_api(self): + # https:///rest/build-status/1.0/commits/ + if self.get_event_type() not in self.ref_keys.keys(): + return + if self.get_event_ref() is None: + return + any_url = None + if 'actor' in self.request.data: + any_url = self.request.data['actor'].get('links', {}).get('self') + if any_url is None and 'repository' in self.request.data: + any_url = self.request.data['repository'].get('links', {}).get('self') + if any_url is None: + return + any_url = any_url[0].get('href') + if any_url is None: + return + parsed = urllib.parse.urlparse(any_url) + + return "{}://{}/rest/build-status/1.0/commits/{}".format(parsed.scheme, parsed.netloc, self.get_event_ref()) + + def is_ignored_request(self): + return self.get_event_type() not in [ + 'repo:refs_changed', + 'mirror:repo_synchronized', + 'pr:opened', + 'pr:from_ref_updated', + 'pr:modified', + ] + + def must_check_signature(self): + # Bitbucket does not sign ping requests... + return self.get_event_type() != 'diagnostics:ping' + + def get_signature(self): + header_sig = self.request.META.get('HTTP_X_HUB_SIGNATURE') + if not header_sig: + logger.debug("Expected signature missing from header key HTTP_X_HUB_SIGNATURE") + raise PermissionDenied + hash_alg, signature = header_sig.split('=') + return hash_alg, force_bytes(signature) diff --git a/awx/conf/conf.py b/awx/conf/conf.py index 019bd1d068..69aa5d3515 100644 --- a/awx/conf/conf.py +++ b/awx/conf/conf.py @@ -55,6 +55,7 @@ register( # Optional; category_slug will be slugified version of category if not # explicitly provided. category_slug='cows', + hidden=True, ) diff --git a/awx/conf/models.py b/awx/conf/models.py index 25cf0cd584..e4442fcecf 100644 --- a/awx/conf/models.py +++ b/awx/conf/models.py @@ -7,8 +7,10 @@ import json # Django from django.db import models +from ansible_base.lib.utils.models import prevent_search + # AWX -from awx.main.models.base import CreatedModifiedModel, prevent_search +from awx.main.models.base import CreatedModifiedModel from awx.main.utils import encrypt_field from awx.conf import settings_registry diff --git a/awx/conf/registry.py b/awx/conf/registry.py index da056e99db..fdce19b305 100644 --- a/awx/conf/registry.py +++ b/awx/conf/registry.py @@ -127,6 +127,8 @@ class SettingsRegistry(object): encrypted = bool(field_kwargs.pop('encrypted', False)) defined_in_file = bool(field_kwargs.pop('defined_in_file', False)) unit = field_kwargs.pop('unit', None) + hidden = field_kwargs.pop('hidden', False) + warning_text = field_kwargs.pop('warning_text', None) if getattr(field_kwargs.get('child', None), 'source', None) is not None: field_kwargs['child'].source = None field_instance = field_class(**field_kwargs) @@ -134,12 +136,14 @@ class SettingsRegistry(object): field_instance.category = category field_instance.depends_on = depends_on field_instance.unit = unit + field_instance.hidden = hidden if placeholder is not empty: field_instance.placeholder = placeholder field_instance.defined_in_file = defined_in_file if field_instance.defined_in_file: field_instance.help_text = str(_('This value has been set manually in a settings file.')) + '\n\n' + str(field_instance.help_text) field_instance.encrypted = encrypted + field_instance.warning_text = warning_text original_field_instance = field_instance if field_class != original_field_class: original_field_instance = original_field_class(**field_kwargs) diff --git a/awx/conf/settings.py b/awx/conf/settings.py index 7d9ca68b37..dfe2e50602 100644 --- a/awx/conf/settings.py +++ b/awx/conf/settings.py @@ -1,6 +1,7 @@ # Python import contextlib import logging +import psycopg import threading import time import os @@ -13,7 +14,7 @@ from django.conf import settings, UserSettingsHolder from django.core.cache import cache as django_cache from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation from django.db import transaction, connection -from django.db.utils import Error as DBError, ProgrammingError +from django.db.utils import DatabaseError, ProgrammingError from django.utils.functional import cached_property # Django REST Framework @@ -80,18 +81,26 @@ def _ctit_db_wrapper(trans_safe=False): logger.debug('Obtaining database settings in spite of broken transaction.') transaction.set_rollback(False) yield - except DBError as exc: + except ProgrammingError as e: + # Exception raised for programming errors + # Examples may be table not found or already exists, + # this generally means we can't fetch Tower configuration + # because the database hasn't actually finished migrating yet; + # this is usually a sign that a service in a container (such as ws_broadcast) + # has come up *before* the database has finished migrating, and + # especially that the conf.settings table doesn't exist yet + # syntax error in the SQL statement, wrong number of parameters specified, etc. if trans_safe: - level = logger.warning - if isinstance(exc, ProgrammingError): - if 'relation' in str(exc) and 'does not exist' in str(exc): - # this generally means we can't fetch Tower configuration - # because the database hasn't actually finished migrating yet; - # this is usually a sign that a service in a container (such as ws_broadcast) - # has come up *before* the database has finished migrating, and - # especially that the conf.settings table doesn't exist yet - level = logger.debug - level(f'Database settings are not available, using defaults. error: {str(exc)}') + logger.debug(f'Database settings are not available, using defaults. error: {str(e)}') + else: + logger.exception('Error modifying something related to database settings.') + except DatabaseError as e: + if trans_safe: + cause = e.__cause__ + if cause and hasattr(cause, 'sqlstate'): + sqlstate = cause.sqlstate + sqlstate_str = psycopg.errors.lookup(sqlstate) + logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str)) else: logger.exception('Error modifying something related to database settings.') finally: diff --git a/awx/main/access.py b/awx/main/access.py index 9e40d1fb50..f89d05cd2b 100644 --- a/awx/main/access.py +++ b/awx/main/access.py @@ -20,11 +20,15 @@ from rest_framework.exceptions import ParseError, PermissionDenied # Django OAuth Toolkit from awx.main.models.oauth import OAuth2Application, OAuth2AccessToken +# django-ansible-base +from ansible_base.lib.utils.validation import to_python_boolean +from ansible_base.rbac.models import RoleEvaluation +from ansible_base.rbac import permission_registry + # AWX from awx.main.utils import ( get_object_or_400, get_pk_from_dict, - to_python_boolean, get_licenser, ) from awx.main.models import ( @@ -56,6 +60,7 @@ from awx.main.models import ( Project, ProjectUpdate, ProjectUpdateEvent, + ReceptorAddress, Role, Schedule, SystemJob, @@ -70,8 +75,6 @@ from awx.main.models import ( WorkflowJobTemplateNode, WorkflowApproval, WorkflowApprovalTemplate, - ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, - ROLE_SINGLETON_SYSTEM_AUDITOR, ) from awx.main.models.mixins import ResourceMixin @@ -262,7 +265,11 @@ class BaseAccess(object): return self.can_change(obj, data) def can_delete(self, obj): - return self.user.is_superuser + if self.user.is_superuser: + return True + if obj._meta.model_name in [cls._meta.model_name for cls in permission_registry.all_registered_models]: + return self.user.has_obj_perm(obj, 'delete') + return False def can_copy(self, obj): return self.can_add({'reference_obj': obj}) @@ -637,7 +644,10 @@ class UserAccess(BaseAccess): """ model = User - prefetch_related = ('profile',) + prefetch_related = ( + 'profile', + 'resource', + ) def filtered_queryset(self): if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()): @@ -646,9 +656,7 @@ class UserAccess(BaseAccess): qs = ( User.objects.filter(pk__in=Organization.accessible_objects(self.user, 'read_role').values('member_role__members')) | User.objects.filter(pk=self.user.id) - | User.objects.filter( - pk__in=Role.objects.filter(singleton_name__in=[ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR]).values('members') - ) + | User.objects.filter(is_superuser=True) ).distinct() return qs @@ -706,6 +714,15 @@ class UserAccess(BaseAccess): if not allow_orphans: # in these cases only superusers can modify orphan users return False + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + # Permission granted if the user has all permissions that the target user has + target_perms = set( + RoleEvaluation.objects.filter(role__in=obj.has_roles.all()).values_list('object_id', 'content_type_id', 'codename').distinct() + ) + user_perms = set( + RoleEvaluation.objects.filter(role__in=self.user.has_roles.all()).values_list('object_id', 'content_type_id', 'codename').distinct() + ) + return not (target_perms - user_perms) return not obj.roles.all().exclude(ancestors__in=self.user.roles.all()).exists() else: return self.is_all_org_admin(obj) @@ -833,6 +850,7 @@ class OrganizationAccess(NotificationAttachMixin, BaseAccess): prefetch_related = ( 'created_by', 'modified_by', + 'resource', # dab_resource_registry ) # organization admin_role is not a parent of organization auditor_role notification_attach_roles = ['admin_role', 'auditor_role'] @@ -943,9 +961,6 @@ class InventoryAccess(BaseAccess): def can_update(self, obj): return self.user in obj.update_role - def can_delete(self, obj): - return self.can_admin(obj, None) - def can_run_ad_hoc_commands(self, obj): return self.user in obj.adhoc_role @@ -1301,6 +1316,7 @@ class TeamAccess(BaseAccess): 'created_by', 'modified_by', 'organization', + 'resource', # dab_resource_registry ) def filtered_queryset(self): @@ -1398,8 +1414,12 @@ class ExecutionEnvironmentAccess(BaseAccess): def can_change(self, obj, data): if obj and obj.organization_id is None: raise PermissionDenied - if self.user not in obj.organization.execution_environment_admin_role: - raise PermissionDenied + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + if not self.user.has_obj_perm(obj, 'change'): + raise PermissionDenied + else: + if self.user not in obj.organization.execution_environment_admin_role: + raise PermissionDenied if data and 'organization' in data: new_org = get_object_from_data('organization', Organization, data, obj=obj) if not new_org or self.user not in new_org.execution_environment_admin_role: @@ -2429,6 +2449,29 @@ class InventoryUpdateEventAccess(BaseAccess): return False +class ReceptorAddressAccess(BaseAccess): + """ + I can see receptor address records whenever I can access the instance + """ + + model = ReceptorAddress + + def filtered_queryset(self): + return self.model.objects.filter(Q(instance__in=Instance.accessible_pk_qs(self.user, 'read_role'))) + + @check_superuser + def can_add(self, data): + return False + + @check_superuser + def can_change(self, obj, data): + return False + + @check_superuser + def can_delete(self, obj): + return False + + class SystemJobEventAccess(BaseAccess): """ I can only see manage System Jobs events if I'm a super user @@ -2562,6 +2605,8 @@ class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess): if not JobLaunchConfigAccess(self.user).can_add(data): return False if not data: + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + return self.user.has_roles.filter(permission_partials__codename__in=['execute_jobtemplate', 'update_project', 'update_inventory']).exists() return Role.objects.filter(role_field__in=['update_role', 'execute_role'], ancestors__in=self.user.roles.all()).exists() return self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role', mandatory=True) @@ -2590,6 +2635,8 @@ class NotificationTemplateAccess(BaseAccess): prefetch_related = ('created_by', 'modified_by', 'organization') def filtered_queryset(self): + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + return self.model.access_qs(self.user, 'view') return self.model.objects.filter( Q(organization__in=Organization.accessible_objects(self.user, 'notification_admin_role')) | Q(organization__in=self.user.auditor_of_organizations) ).distinct() @@ -2758,7 +2805,7 @@ class ActivityStreamAccess(BaseAccess): | Q(notification_template__organization__in=auditing_orgs) | Q(notification__notification_template__organization__in=auditing_orgs) | Q(label__organization__in=auditing_orgs) - | Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else []) + | Q(role__in=Role.visible_roles(self.user) if auditing_orgs else []) ) project_set = Project.accessible_pk_qs(self.user, 'read_role') @@ -2815,13 +2862,10 @@ class RoleAccess(BaseAccess): def filtered_queryset(self): result = Role.visible_roles(self.user) - # Sanity check: is the requesting user an orphaned non-admin/auditor? - # if yes, make system admin/auditor mandatorily visible. - if not self.user.is_superuser and not self.user.is_system_auditor and not self.user.organizations.exists(): - mandatories = ('system_administrator', 'system_auditor') - super_qs = Role.objects.filter(singleton_name__in=mandatories) - result = result | super_qs - return result + # Make system admin/auditor mandatorily visible. + mandatories = ('system_administrator', 'system_auditor') + super_qs = Role.objects.filter(singleton_name__in=mandatories) + return result | super_qs def can_add(self, obj, data): # Unsupported for now diff --git a/awx/main/analytics/analytics_tasks.py b/awx/main/analytics/analytics_tasks.py index 3072577466..6aa08ab9a4 100644 --- a/awx/main/analytics/analytics_tasks.py +++ b/awx/main/analytics/analytics_tasks.py @@ -2,7 +2,7 @@ import logging # AWX -from awx.main.analytics.subsystem_metrics import Metrics +from awx.main.analytics.subsystem_metrics import DispatcherMetrics, CallbackReceiverMetrics from awx.main.dispatch.publish import task from awx.main.dispatch import get_task_queuename @@ -11,4 +11,5 @@ logger = logging.getLogger('awx.main.scheduler') @task(queue=get_task_queuename) def send_subsystem_metrics(): - Metrics().send_metrics() + DispatcherMetrics().send_metrics() + CallbackReceiverMetrics().send_metrics() diff --git a/awx/main/analytics/collectors.py b/awx/main/analytics/collectors.py index c080bb5af9..979ebfffa9 100644 --- a/awx/main/analytics/collectors.py +++ b/awx/main/analytics/collectors.py @@ -419,7 +419,7 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create resolved_action, resolved_role, -- '-' operator listed here: - -- https://www.postgresql.org/docs/12/functions-json.html + -- https://www.postgresql.org/docs/15/functions-json.html -- note that operator is only supported by jsonb objects -- https://www.postgresql.org/docs/current/datatype-json.html (CASE WHEN event = 'playbook_on_stats' THEN {event_data} - 'artifact_data' END) as playbook_on_stats, diff --git a/awx/main/analytics/subsystem_metrics.py b/awx/main/analytics/subsystem_metrics.py index 9b93b98bda..2662a257fc 100644 --- a/awx/main/analytics/subsystem_metrics.py +++ b/awx/main/analytics/subsystem_metrics.py @@ -1,10 +1,15 @@ +import itertools import redis import json import time import logging +import prometheus_client +from prometheus_client.core import GaugeMetricFamily, HistogramMetricFamily +from prometheus_client.registry import CollectorRegistry from django.conf import settings -from django.apps import apps +from django.http import HttpRequest +from rest_framework.request import Request from awx.main.consumers import emit_channel_notification from awx.main.utils import is_testing @@ -13,6 +18,30 @@ root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX logger = logging.getLogger('awx.main.analytics') +class MetricsNamespace: + def __init__(self, namespace): + self._namespace = namespace + + +class MetricsServerSettings(MetricsNamespace): + def port(self): + return settings.METRICS_SUBSYSTEM_CONFIG['server'][self._namespace]['port'] + + +class MetricsServer(MetricsServerSettings): + def __init__(self, namespace, registry): + MetricsNamespace.__init__(self, namespace) + self._registry = registry + + def start(self): + try: + # TODO: addr for ipv6 ? + prometheus_client.start_http_server(self.port(), addr='localhost', registry=self._registry) + except Exception: + logger.error(f"MetricsServer failed to start for service '{self._namespace}.") + raise + + class BaseM: def __init__(self, field, help_text): self.field = field @@ -148,76 +177,40 @@ class HistogramM(BaseM): return output_text -class Metrics: - def __init__(self, auto_pipe_execute=False, instance_name=None): +class Metrics(MetricsNamespace): + # metric name, help_text + METRICSLIST = [] + _METRICSLIST = [ + FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'), + IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'), + FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'), + ] + + def __init__(self, namespace, auto_pipe_execute=False, instance_name=None, metrics_have_changed=True, **kwargs): + MetricsNamespace.__init__(self, namespace) + self.pipe = redis.Redis.from_url(settings.BROKER_URL).pipeline() self.conn = redis.Redis.from_url(settings.BROKER_URL) self.last_pipe_execute = time.time() # track if metrics have been modified since last saved to redis # start with True so that we get an initial save to redis - self.metrics_have_changed = True + self.metrics_have_changed = metrics_have_changed self.pipe_execute_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SAVE_TO_REDIS self.send_metrics_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SEND_METRICS # auto pipe execute will commit transaction of metric data to redis # at a regular interval (pipe_execute_interval). If set to False, # the calling function should call .pipe_execute() explicitly self.auto_pipe_execute = auto_pipe_execute - Instance = apps.get_model('main', 'Instance') if instance_name: self.instance_name = instance_name elif is_testing(): self.instance_name = "awx_testing" else: - self.instance_name = Instance.objects.my_hostname() + self.instance_name = settings.CLUSTER_HOST_ID # Same as Instance.objects.my_hostname() BUT we do not need to import Instance - # metric name, help_text - METRICSLIST = [ - SetIntM('callback_receiver_events_queue_size_redis', 'Current number of events in redis queue'), - IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'), - IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'), - IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'), - FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'), - IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'), - IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'), - HistogramM( - 'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS - ), - SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'), - FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'), - IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'), - FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'), - SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'), - SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'), - SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'), - SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'), - SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'), - IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'), - SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'), - SetIntM('task_manager_tasks_started', 'Number of tasks started'), - SetIntM('task_manager_running_processed', 'Number of running tasks processed'), - SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'), - SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'), - SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'), - SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'), - SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'), - SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'), - IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'), - SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'), - SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'), - SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'), - IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'), - SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'), - SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'), - SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'), - # dispatcher subsystem metrics - SetIntM('dispatcher_pool_scale_up_events', 'Number of times local dispatcher scaled up a worker since startup'), - SetIntM('dispatcher_pool_active_task_count', 'Number of active tasks in the worker pool when last task was submitted'), - SetIntM('dispatcher_pool_max_worker_count', 'Highest number of workers in worker pool in last collection interval, about 20s'), - SetFloatM('dispatcher_availability', 'Fraction of time (in last collection interval) dispatcher was able to receive messages'), - ] # turn metric list into dictionary with the metric name as a key self.METRICS = {} - for m in METRICSLIST: + for m in itertools.chain(self.METRICSLIST, self._METRICSLIST): self.METRICS[m.field] = m # track last time metrics were sent to other nodes @@ -230,7 +223,7 @@ class Metrics: m.reset_value(self.conn) self.metrics_have_changed = True self.conn.delete(root_key + "_lock") - for m in self.conn.scan_iter(root_key + '_instance_*'): + for m in self.conn.scan_iter(root_key + '-' + self._namespace + '_instance_*'): self.conn.delete(m) def inc(self, field, value): @@ -297,7 +290,7 @@ class Metrics: def send_metrics(self): # more than one thread could be calling this at the same time, so should # acquire redis lock before sending metrics - lock = self.conn.lock(root_key + '_lock') + lock = self.conn.lock(root_key + '-' + self._namespace + '_lock') if not lock.acquire(blocking=False): return try: @@ -307,9 +300,10 @@ class Metrics: payload = { 'instance': self.instance_name, 'metrics': serialized_metrics, + 'metrics_namespace': self._namespace, } # store the serialized data locally as well, so that load_other_metrics will read it - self.conn.set(root_key + '_instance_' + self.instance_name, serialized_metrics) + self.conn.set(root_key + '-' + self._namespace + '_instance_' + self.instance_name, serialized_metrics) emit_channel_notification("metrics", payload) self.previous_send_metrics.set(current_time) @@ -331,14 +325,14 @@ class Metrics: instances_filter = request.query_params.getlist("node") # get a sorted list of instance names instance_names = [self.instance_name] - for m in self.conn.scan_iter(root_key + '_instance_*'): + for m in self.conn.scan_iter(root_key + '-' + self._namespace + '_instance_*'): instance_names.append(m.decode('UTF-8').split('_instance_')[1]) instance_names.sort() # load data, including data from the this local instance instance_data = {} for instance in instance_names: if len(instances_filter) == 0 or instance in instances_filter: - instance_data_from_redis = self.conn.get(root_key + '_instance_' + instance) + instance_data_from_redis = self.conn.get(root_key + '-' + self._namespace + '_instance_' + instance) # data from other instances may not be available. That is OK. if instance_data_from_redis: instance_data[instance] = json.loads(instance_data_from_redis.decode('UTF-8')) @@ -357,6 +351,120 @@ class Metrics: return output_text +class DispatcherMetrics(Metrics): + METRICSLIST = [ + SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'), + SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'), + SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'), + SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'), + SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'), + IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'), + SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'), + SetIntM('task_manager_tasks_started', 'Number of tasks started'), + SetIntM('task_manager_running_processed', 'Number of running tasks processed'), + SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'), + SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'), + SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'), + SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'), + SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'), + SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'), + IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'), + SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'), + SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'), + SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'), + IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'), + SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'), + SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'), + SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'), + # dispatcher subsystem metrics + SetIntM('dispatcher_pool_scale_up_events', 'Number of times local dispatcher scaled up a worker since startup'), + SetIntM('dispatcher_pool_active_task_count', 'Number of active tasks in the worker pool when last task was submitted'), + SetIntM('dispatcher_pool_max_worker_count', 'Highest number of workers in worker pool in last collection interval, about 20s'), + SetFloatM('dispatcher_availability', 'Fraction of time (in last collection interval) dispatcher was able to receive messages'), + ] + + def __init__(self, *args, **kwargs): + super().__init__(settings.METRICS_SERVICE_DISPATCHER, *args, **kwargs) + + +class CallbackReceiverMetrics(Metrics): + METRICSLIST = [ + SetIntM('callback_receiver_events_queue_size_redis', 'Current number of events in redis queue'), + IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'), + IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'), + IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'), + FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'), + IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'), + IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'), + HistogramM( + 'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS + ), + SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'), + ] + + def __init__(self, *args, **kwargs): + super().__init__(settings.METRICS_SERVICE_CALLBACK_RECEIVER, *args, **kwargs) + + def metrics(request): - m = Metrics() - return m.generate_metrics(request) + output_text = '' + for m in [DispatcherMetrics(), CallbackReceiverMetrics()]: + output_text += m.generate_metrics(request) + return output_text + + +class CustomToPrometheusMetricsCollector(prometheus_client.registry.Collector): + """ + Takes the metric data from redis -> our custom metric fields -> prometheus + library metric fields. + + The plan is to get rid of the use of redis, our custom metric fields, and + to switch fully to the prometheus library. At that point, this translation + code will be deleted. + """ + + def __init__(self, metrics_obj, *args, **kwargs): + super().__init__(*args, **kwargs) + self._metrics = metrics_obj + + def collect(self): + my_hostname = settings.CLUSTER_HOST_ID + + instance_data = self._metrics.load_other_metrics(Request(HttpRequest())) + if not instance_data: + logger.debug(f"No metric data not found in redis for metric namespace '{self._metrics._namespace}'") + return None + + host_metrics = instance_data.get(my_hostname) + for _, metric in self._metrics.METRICS.items(): + entry = host_metrics.get(metric.field) + if not entry: + logger.debug(f"{self._metrics._namespace} metric '{metric.field}' not found in redis data payload {json.dumps(instance_data, indent=2)}") + continue + if isinstance(metric, HistogramM): + buckets = list(zip(metric.buckets, entry['counts'])) + buckets = [[str(i[0]), str(i[1])] for i in buckets] + yield HistogramMetricFamily(metric.field, metric.help_text, buckets=buckets, sum_value=entry['sum']) + else: + yield GaugeMetricFamily(metric.field, metric.help_text, value=entry) + + +class CallbackReceiverMetricsServer(MetricsServer): + def __init__(self): + registry = CollectorRegistry(auto_describe=True) + registry.register(CustomToPrometheusMetricsCollector(DispatcherMetrics(metrics_have_changed=False))) + super().__init__(settings.METRICS_SERVICE_CALLBACK_RECEIVER, registry) + + +class DispatcherMetricsServer(MetricsServer): + def __init__(self): + registry = CollectorRegistry(auto_describe=True) + registry.register(CustomToPrometheusMetricsCollector(CallbackReceiverMetrics(metrics_have_changed=False))) + super().__init__(settings.METRICS_SERVICE_DISPATCHER, registry) + + +class WebsocketsMetricsServer(MetricsServer): + def __init__(self): + registry = CollectorRegistry(auto_describe=True) + # registry.register() + super().__init__(settings.METRICS_SERVICE_WEBSOCKETS, registry) diff --git a/awx/main/apps.py b/awx/main/apps.py index 7db39ca088..099caea96a 100644 --- a/awx/main/apps.py +++ b/awx/main/apps.py @@ -1,7 +1,40 @@ from django.apps import AppConfig from django.utils.translation import gettext_lazy as _ +from awx.main.utils.named_url_graph import _customize_graph, generate_graph +from awx.conf import register, fields class MainConfig(AppConfig): name = 'awx.main' verbose_name = _('Main') + + def load_named_url_feature(self): + models = [m for m in self.get_models() if hasattr(m, 'get_absolute_url')] + generate_graph(models) + _customize_graph() + register( + 'NAMED_URL_FORMATS', + field_class=fields.DictField, + read_only=True, + label=_('Formats of all available named urls'), + help_text=_('Read-only list of key-value pairs that shows the standard format of all available named URLs.'), + category=_('Named URL'), + category_slug='named-url', + ) + register( + 'NAMED_URL_GRAPH_NODES', + field_class=fields.DictField, + read_only=True, + label=_('List of all named url graph nodes.'), + help_text=_( + 'Read-only list of key-value pairs that exposes named URL graph topology.' + ' Use this list to programmatically generate named URLs for resources' + ), + category=_('Named URL'), + category_slug='named-url', + ) + + def ready(self): + super().ready() + + self.load_named_url_feature() diff --git a/awx/main/conf.py b/awx/main/conf.py index e11bb15ae4..b05c4e70c9 100644 --- a/awx/main/conf.py +++ b/awx/main/conf.py @@ -92,6 +92,7 @@ register( ), category=_('System'), category_slug='system', + required=False, ) register( @@ -774,6 +775,7 @@ register( allow_null=True, category=_('System'), category_slug='system', + required=False, ) register( 'AUTOMATION_ANALYTICS_LAST_ENTRIES', @@ -815,6 +817,7 @@ register( help_text=_('Max jobs to allow bulk jobs to launch'), category=_('Bulk Actions'), category_slug='bulk', + hidden=True, ) register( @@ -825,6 +828,18 @@ register( help_text=_('Max number of hosts to allow to be created in a single bulk action'), category=_('Bulk Actions'), category_slug='bulk', + hidden=True, +) + +register( + 'BULK_HOST_MAX_DELETE', + field_class=fields.IntegerField, + default=250, + label=_('Max number of hosts to allow to be deleted in a single bulk action'), + help_text=_('Max number of hosts to allow to be deleted in a single bulk action'), + category=_('Bulk Actions'), + category_slug='bulk', + hidden=True, ) register( @@ -835,6 +850,7 @@ register( help_text=_('Enable preview of new user interface.'), category=_('System'), category_slug='system', + hidden=True, ) register( diff --git a/awx/main/constants.py b/awx/main/constants.py index 66666f875f..115b062604 100644 --- a/awx/main/constants.py +++ b/awx/main/constants.py @@ -14,7 +14,7 @@ __all__ = [ 'STANDARD_INVENTORY_UPDATE_ENV', ] -CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights') +CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights', 'terraform') PRIVILEGE_ESCALATION_METHODS = [ ('sudo', _('Sudo')), ('su', _('Su')), @@ -114,3 +114,28 @@ SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS = 'unique_managed_hosts' # Shared prefetch to use for creating a queryset for the purpose of writing or saving facts HOST_FACTS_FIELDS = ('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id') + +# Data for RBAC compatibility layer +role_name_to_perm_mapping = { + 'adhoc_role': ['adhoc_'], + 'approval_role': ['approve_'], + 'auditor_role': ['audit_'], + 'admin_role': ['change_', 'add_', 'delete_'], + 'execute_role': ['execute_'], + 'read_role': ['view_'], + 'update_role': ['update_'], + 'member_role': ['member_'], + 'use_role': ['use_'], +} + +org_role_to_permission = { + 'notification_admin_role': 'add_notificationtemplate', + 'project_admin_role': 'add_project', + 'execute_role': 'execute_jobtemplate', + 'inventory_admin_role': 'add_inventory', + 'credential_admin_role': 'add_credential', + 'workflow_admin_role': 'add_workflowjobtemplate', + 'job_template_admin_role': 'change_jobtemplate', # TODO: this doesnt really work, solution not clear + 'execution_environment_admin_role': 'add_executionenvironment', + 'auditor_role': 'view_project', # TODO: also doesnt really work +} diff --git a/awx/main/consumers.py b/awx/main/consumers.py index f856ca915e..ccaf46e0d9 100644 --- a/awx/main/consumers.py +++ b/awx/main/consumers.py @@ -106,7 +106,7 @@ class RelayConsumer(AsyncJsonWebsocketConsumer): if group == "metrics": message = json.loads(message['text']) conn = redis.Redis.from_url(settings.BROKER_URL) - conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics']) + conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "-" + message['metrics_namespace'] + "_instance_" + message['instance'], message['metrics']) else: await self.channel_layer.group_send(group, message) diff --git a/awx/main/credential_plugins/aim.py b/awx/main/credential_plugins/aim.py index 048bd1b324..2476042b5f 100644 --- a/awx/main/credential_plugins/aim.py +++ b/awx/main/credential_plugins/aim.py @@ -58,7 +58,7 @@ aim_inputs = { 'id': 'object_property', 'label': _('Object Property'), 'type': 'string', - 'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'), + 'help_text': _('The property of the object to return. Available properties: Username, Password and Address.'), }, { 'id': 'reason', @@ -111,8 +111,12 @@ def aim_backend(**kwargs): object_property = 'Content' elif object_property.lower() == 'username': object_property = 'UserName' + elif object_property.lower() == 'password': + object_property = 'Content' + elif object_property.lower() == 'address': + object_property = 'Address' elif object_property not in res: - raise KeyError('Property {} not found in object'.format(object_property)) + raise KeyError('Property {} not found in object, available properties: Username, Password and Address'.format(object_property)) else: object_property = object_property.capitalize() diff --git a/awx/main/credential_plugins/azure_kv.py b/awx/main/credential_plugins/azure_kv.py index eecfde65b1..8910a0726d 100644 --- a/awx/main/credential_plugins/azure_kv.py +++ b/awx/main/credential_plugins/azure_kv.py @@ -1,9 +1,10 @@ +from azure.keyvault.secrets import SecretClient +from azure.identity import ClientSecretCredential +from msrestazure import azure_cloud + from .plugin import CredentialPlugin from django.utils.translation import gettext_lazy as _ -from azure.keyvault import KeyVaultClient, KeyVaultAuthentication -from azure.common.credentials import ServicePrincipalCredentials -from msrestazure import azure_cloud # https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py @@ -54,22 +55,9 @@ azure_keyvault_inputs = { def azure_keyvault_backend(**kwargs): - url = kwargs['url'] - [cloud] = [c for c in clouds if c.name == kwargs.get('cloud_name', default_cloud.name)] - - def auth_callback(server, resource, scope): - credentials = ServicePrincipalCredentials( - url=url, - client_id=kwargs['client'], - secret=kwargs['secret'], - tenant=kwargs['tenant'], - resource=f"https://{cloud.suffixes.keyvault_dns.split('.', 1).pop()}", - ) - token = credentials.token - return token['token_type'], token['access_token'] - - kv = KeyVaultClient(KeyVaultAuthentication(auth_callback)) - return kv.get_secret(url, kwargs['secret_field'], kwargs.get('secret_version', '')).value + csc = ClientSecretCredential(tenant_id=kwargs['tenant'], client_id=kwargs['client'], client_secret=kwargs['secret']) + kv = SecretClient(credential=csc, vault_url=kwargs['url']) + return kv.get_secret(name=kwargs['secret_field'], version=kwargs.get('secret_version', '')).value azure_keyvault_plugin = CredentialPlugin('Microsoft Azure Key Vault', inputs=azure_keyvault_inputs, backend=azure_keyvault_backend) diff --git a/awx/main/credential_plugins/hashivault.py b/awx/main/credential_plugins/hashivault.py index 62aabade61..f3dcd53b5d 100644 --- a/awx/main/credential_plugins/hashivault.py +++ b/awx/main/credential_plugins/hashivault.py @@ -87,6 +87,20 @@ base_inputs = { ' see https://www.vaultproject.io/docs/auth/kubernetes#configuration' ), }, + { + 'id': 'username', + 'label': _('Username'), + 'type': 'string', + 'secret': False, + 'help_text': _('Username for user authentication.'), + }, + { + 'id': 'password', + 'label': _('Password'), + 'type': 'string', + 'secret': True, + 'help_text': _('Password for user authentication.'), + }, { 'id': 'default_auth_path', 'label': _('Path to Auth'), @@ -185,9 +199,10 @@ hashi_ssh_inputs['required'].extend(['public_key', 'role']) def handle_auth(**kwargs): token = None - if kwargs.get('token'): token = kwargs['token'] + elif kwargs.get('username') and kwargs.get('password'): + token = method_auth(**kwargs, auth_param=userpass_auth(**kwargs)) elif kwargs.get('role_id') and kwargs.get('secret_id'): token = method_auth(**kwargs, auth_param=approle_auth(**kwargs)) elif kwargs.get('kubernetes_role'): @@ -195,11 +210,14 @@ def handle_auth(**kwargs): elif kwargs.get('client_cert_public') and kwargs.get('client_cert_private'): token = method_auth(**kwargs, auth_param=client_cert_auth(**kwargs)) else: - raise Exception('Either a token or AppRole, Kubernetes, or TLS authentication parameters must be set') - + raise Exception('Token, Username/Password, AppRole, Kubernetes, or TLS authentication parameters must be set') return token +def userpass_auth(**kwargs): + return {'username': kwargs['username'], 'password': kwargs['password']} + + def approle_auth(**kwargs): return {'role_id': kwargs['role_id'], 'secret_id': kwargs['secret_id']} @@ -227,11 +245,14 @@ def method_auth(**kwargs): cacert = kwargs.get('cacert', None) sess = requests.Session() + sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5)) # Namespace support if kwargs.get('namespace'): sess.headers['X-Vault-Namespace'] = kwargs['namespace'] request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/') + if kwargs['auth_param'].get('username'): + request_url = request_url + '/' + (kwargs['username']) with CertFiles(cacert) as cert: request_kwargs['verify'] = cert # TLS client certificate support @@ -263,6 +284,7 @@ def kv_backend(**kwargs): } sess = requests.Session() + sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5)) sess.headers['Authorization'] = 'Bearer {}'.format(token) # Compatibility header for older installs of Hashicorp Vault sess.headers['X-Vault-Token'] = token @@ -333,6 +355,7 @@ def ssh_backend(**kwargs): request_kwargs['json']['valid_principals'] = kwargs['valid_principals'] sess = requests.Session() + sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5)) sess.headers['Authorization'] = 'Bearer {}'.format(token) if kwargs.get('namespace'): sess.headers['X-Vault-Namespace'] = kwargs['namespace'] diff --git a/awx/main/dispatch/__init__.py b/awx/main/dispatch/__init__.py index d5349e02f5..5a6606ebdc 100644 --- a/awx/main/dispatch/__init__.py +++ b/awx/main/dispatch/__init__.py @@ -1,6 +1,7 @@ import os import psycopg import select +from copy import deepcopy from contextlib import contextmanager @@ -93,6 +94,26 @@ class PubSub(object): self.conn.close() +def create_listener_connection(): + conf = deepcopy(settings.DATABASES['default']) + conf['OPTIONS'] = deepcopy(conf.get('OPTIONS', {})) + # Modify the application name to distinguish from other connections the process might use + conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener') + + # Apply overrides specifically for the listener connection + for k, v in settings.LISTENER_DATABASES.get('default', {}).items(): + conf[k] = v + for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items(): + conf['OPTIONS'][k] = v + + # Allow password-less authentication + if 'PASSWORD' in conf: + conf['OPTIONS']['password'] = conf.pop('PASSWORD') + + connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} port={conf['PORT']}" + return psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS']) + + @contextmanager def pg_bus_conn(new_connection=False, select_timeout=None): ''' @@ -106,12 +127,7 @@ def pg_bus_conn(new_connection=False, select_timeout=None): ''' if new_connection: - conf = settings.DATABASES['default'].copy() - conf['OPTIONS'] = conf.get('OPTIONS', {}).copy() - # Modify the application name to distinguish from other connections the process might use - conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener') - connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} password={conf['PASSWORD']} port={conf['PORT']}" - conn = psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS']) + conn = create_listener_connection() else: if pg_connection.connection is None: pg_connection.connect() diff --git a/awx/main/dispatch/worker/base.py b/awx/main/dispatch/worker/base.py index 2ff8752f06..264205a8ed 100644 --- a/awx/main/dispatch/worker/base.py +++ b/awx/main/dispatch/worker/base.py @@ -162,13 +162,13 @@ class AWXConsumerRedis(AWXConsumerBase): class AWXConsumerPG(AWXConsumerBase): def __init__(self, *args, schedule=None, **kwargs): super().__init__(*args, **kwargs) - self.pg_max_wait = settings.DISPATCHER_DB_DOWNTIME_TOLERANCE + self.pg_max_wait = getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) # if no successful loops have ran since startup, then we should fail right away self.pg_is_down = True # set so that we fail if we get database errors on startup init_time = time.time() self.pg_down_time = init_time - self.pg_max_wait # allow no grace period self.last_cleanup = init_time - self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False) + self.subsystem_metrics = s_metrics.DispatcherMetrics(auto_pipe_execute=False) self.last_metrics_gather = init_time self.listen_cumulative_time = 0.0 if schedule: @@ -214,7 +214,10 @@ class AWXConsumerPG(AWXConsumerBase): # bypasses pg_notify for scheduled tasks self.dispatch_task(body) - self.pg_is_down = False + if self.pg_is_down: + logger.info('Dispatcher listener connection established') + self.pg_is_down = False + self.listen_start = time.time() return self.scheduler.time_until_next_run() @@ -256,6 +259,12 @@ class AWXConsumerPG(AWXConsumerBase): current_downtime = time.time() - self.pg_down_time if current_downtime > self.pg_max_wait: logger.exception(f"Postgres event consumer has not recovered in {current_downtime} s, exiting") + # Sending QUIT to multiprocess queue to signal workers to exit + for worker in self.pool.workers: + try: + worker.quit() + except Exception: + logger.exception(f"Error sending QUIT to worker {worker}") raise # Wait for a second before next attempt, but still listen for any shutdown signals for i in range(10): @@ -267,6 +276,12 @@ class AWXConsumerPG(AWXConsumerBase): except Exception: # Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata logger.exception('Encountered unhandled error in dispatcher main loop') + # Sending QUIT to multiprocess queue to signal workers to exit + for worker in self.pool.workers: + try: + worker.quit() + except Exception: + logger.exception(f"Error sending QUIT to worker {worker}") raise diff --git a/awx/main/dispatch/worker/callback.py b/awx/main/dispatch/worker/callback.py index 8dca8dd894..199302c76c 100644 --- a/awx/main/dispatch/worker/callback.py +++ b/awx/main/dispatch/worker/callback.py @@ -72,7 +72,7 @@ class CallbackBrokerWorker(BaseWorker): def __init__(self): self.buff = {} self.redis = redis.Redis.from_url(settings.BROKER_URL) - self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False) + self.subsystem_metrics = s_metrics.CallbackReceiverMetrics(auto_pipe_execute=False) self.queue_pop = 0 self.queue_name = settings.CALLBACK_QUEUE self.prof = AWXProfiler("CallbackBrokerWorker") diff --git a/awx/main/fields.py b/awx/main/fields.py index f3b99e2e70..c6d01f2795 100644 --- a/awx/main/fields.py +++ b/awx/main/fields.py @@ -5,6 +5,7 @@ import copy import json import re +import sys import urllib.parse from jinja2 import sandbox, StrictUndefined @@ -406,11 +407,13 @@ class SmartFilterField(models.TextField): # https://docs.python.org/2/library/stdtypes.html#truth-value-testing if not value: return None - value = urllib.parse.unquote(value) - try: - SmartFilter().query_from_string(value) - except RuntimeError as e: - raise models.base.ValidationError(e) + # avoid doing too much during migrations + if 'migrate' not in sys.argv: + value = urllib.parse.unquote(value) + try: + SmartFilter().query_from_string(value) + except RuntimeError as e: + raise models.base.ValidationError(e) return super(SmartFilterField, self).get_prep_value(value) diff --git a/awx/main/management/commands/add_receptor_address.py b/awx/main/management/commands/add_receptor_address.py new file mode 100644 index 0000000000..7ac7ac8be7 --- /dev/null +++ b/awx/main/management/commands/add_receptor_address.py @@ -0,0 +1,53 @@ +# Copyright (c) 2015 Ansible, Inc. +# All Rights Reserved + +from django.core.management.base import BaseCommand + +from awx.main.models import Instance, ReceptorAddress + + +def add_address(**kwargs): + try: + instance = Instance.objects.get(hostname=kwargs.pop('instance')) + kwargs['instance'] = instance + + if kwargs.get('canonical') and instance.receptor_addresses.filter(canonical=True).exclude(address=kwargs['address']).exists(): + print(f"Instance {instance.hostname} already has a canonical address, skipping") + return False + # if ReceptorAddress already exists with address, just update + # otherwise, create new ReceptorAddress + addr, _ = ReceptorAddress.objects.update_or_create(address=kwargs.pop('address'), defaults=kwargs) + print(f"Successfully added receptor address {addr.get_full_address()}") + return True + except Exception as e: + print(f"Error adding receptor address: {e}") + return False + + +class Command(BaseCommand): + """ + Internal controller command. + Register receptor address to an already-registered instance. + """ + + help = "Add receptor address to an instance." + + def add_arguments(self, parser): + parser.add_argument('--instance', dest='instance', required=True, type=str, help="Instance hostname this address is added to") + parser.add_argument('--address', dest='address', required=True, type=str, help="Receptor address") + parser.add_argument('--port', dest='port', type=int, help="Receptor listener port") + parser.add_argument('--websocket_path', dest='websocket_path', type=str, default="", help="Path for websockets") + parser.add_argument('--is_internal', action='store_true', help="If true, address only resolvable within the Kubernetes cluster") + parser.add_argument('--protocol', type=str, default='tcp', choices=['tcp', 'ws', 'wss'], help="Protocol to use for the Receptor listener") + parser.add_argument('--canonical', action='store_true', help="If true, address is the canonical address for the instance") + parser.add_argument('--peers_from_control_nodes', action='store_true', help="If true, control nodes will peer to this address") + + def handle(self, **options): + address_options = { + k: options[k] + for k in ('instance', 'address', 'port', 'websocket_path', 'is_internal', 'protocol', 'peers_from_control_nodes', 'canonical') + if options[k] + } + changed = add_address(**address_options) + if changed: + print("(changed: True)") diff --git a/awx/main/management/commands/dump_auth_config.py b/awx/main/management/commands/dump_auth_config.py new file mode 100644 index 0000000000..ce8b778486 --- /dev/null +++ b/awx/main/management/commands/dump_auth_config.py @@ -0,0 +1,179 @@ +import json +import os +import sys +import re + +from typing import Any +from django.core.management.base import BaseCommand +from django.conf import settings +from awx.conf import settings_registry + + +class Command(BaseCommand): + help = 'Dump the current auth configuration in django_ansible_base.authenticator format, currently supports LDAP and SAML' + + DAB_SAML_AUTHENTICATOR_KEYS = { + "SP_ENTITY_ID": True, + "SP_PUBLIC_CERT": True, + "SP_PRIVATE_KEY": True, + "ORG_INFO": True, + "TECHNICAL_CONTACT": True, + "SUPPORT_CONTACT": True, + "SP_EXTRA": False, + "SECURITY_CONFIG": False, + "EXTRA_DATA": False, + "ENABLED_IDPS": True, + "CALLBACK_URL": False, + } + + DAB_LDAP_AUTHENTICATOR_KEYS = { + "SERVER_URI": True, + "BIND_DN": False, + "BIND_PASSWORD": False, + "CONNECTION_OPTIONS": False, + "GROUP_TYPE": True, + "GROUP_TYPE_PARAMS": True, + "GROUP_SEARCH": False, + "START_TLS": False, + "USER_DN_TEMPLATE": True, + "USER_ATTR_MAP": True, + "USER_SEARCH": False, + } + + def get_awx_ldap_settings(self) -> dict[str, dict[str, Any]]: + awx_ldap_settings = {} + + for awx_ldap_setting in settings_registry.get_registered_settings(category_slug='ldap'): + key = awx_ldap_setting.removeprefix("AUTH_LDAP_") + value = getattr(settings, awx_ldap_setting, None) + awx_ldap_settings[key] = value + + grouped_settings = {} + + for key, value in awx_ldap_settings.items(): + match = re.search(r'(\d+)', key) + index = int(match.group()) if match else 0 + new_key = re.sub(r'\d+_', '', key) + + if index not in grouped_settings: + grouped_settings[index] = {} + + grouped_settings[index][new_key] = value + if new_key == "GROUP_TYPE" and value: + grouped_settings[index][new_key] = type(value).__name__ + + if new_key == "SERVER_URI" and value: + value = value.split(", ") + + return grouped_settings + + def is_enabled(self, settings, keys): + for key, required in keys.items(): + if required and not settings.get(key): + return False + return True + + def get_awx_saml_settings(self) -> dict[str, Any]: + awx_saml_settings = {} + for awx_saml_setting in settings_registry.get_registered_settings(category_slug='saml'): + awx_saml_settings[awx_saml_setting.removeprefix("SOCIAL_AUTH_SAML_")] = getattr(settings, awx_saml_setting, None) + + return awx_saml_settings + + def format_config_data(self, enabled, awx_settings, type, keys, name): + config = { + "type": f"awx.authentication.authenticator_plugins.{type}", + "name": name, + "enabled": enabled, + "create_objects": True, + "users_unique": False, + "remove_users": True, + "configuration": {}, + } + for k in keys: + v = awx_settings.get(k) + config["configuration"].update({k: v}) + + if type == "saml": + idp_to_key_mapping = { + "url": "IDP_URL", + "x509cert": "IDP_X509_CERT", + "entity_id": "IDP_ENTITY_ID", + "attr_email": "IDP_ATTR_EMAIL", + "attr_groups": "IDP_GROUPS", + "attr_username": "IDP_ATTR_USERNAME", + "attr_last_name": "IDP_ATTR_LAST_NAME", + "attr_first_name": "IDP_ATTR_FIRST_NAME", + "attr_user_permanent_id": "IDP_ATTR_USER_PERMANENT_ID", + } + for idp_name in awx_settings.get("ENABLED_IDPS", {}): + for key in idp_to_key_mapping: + value = awx_settings["ENABLED_IDPS"][idp_name].get(key) + if value is not None: + config["name"] = idp_name + config["configuration"].update({idp_to_key_mapping[key]: value}) + + return config + + def add_arguments(self, parser): + parser.add_argument( + "output_file", + nargs="?", + type=str, + default=None, + help="Output JSON file path", + ) + + def handle(self, *args, **options): + try: + data = [] + + # dump SAML settings + awx_saml_settings = self.get_awx_saml_settings() + awx_saml_enabled = self.is_enabled(awx_saml_settings, self.DAB_SAML_AUTHENTICATOR_KEYS) + if awx_saml_enabled: + awx_saml_name = awx_saml_settings["ENABLED_IDPS"] + data.append( + self.format_config_data( + awx_saml_enabled, + awx_saml_settings, + "saml", + self.DAB_SAML_AUTHENTICATOR_KEYS, + awx_saml_name, + ) + ) + + # dump LDAP settings + awx_ldap_group_settings = self.get_awx_ldap_settings() + for awx_ldap_name, awx_ldap_settings in enumerate(awx_ldap_group_settings.values()): + enabled = self.is_enabled(awx_ldap_settings, self.DAB_LDAP_AUTHENTICATOR_KEYS) + if enabled: + data.append( + self.format_config_data( + enabled, + awx_ldap_settings, + "ldap", + self.DAB_LDAP_AUTHENTICATOR_KEYS, + str(awx_ldap_name), + ) + ) + + # write to file if requested + if options["output_file"]: + # Define the path for the output JSON file + output_file = options["output_file"] + + # Ensure the directory exists + os.makedirs(os.path.dirname(output_file), exist_ok=True) + + # Write data to the JSON file + with open(output_file, "w") as f: + json.dump(data, f, indent=4) + + self.stdout.write(self.style.SUCCESS(f"Auth config data dumped to {output_file}")) + else: + self.stdout.write(json.dumps(data, indent=4)) + + except Exception as e: + self.stdout.write(self.style.ERROR(f"An error occurred: {str(e)}")) + sys.exit(1) diff --git a/awx/main/management/commands/list_instances.py b/awx/main/management/commands/list_instances.py index a434e2299e..b2bbcfea29 100644 --- a/awx/main/management/commands/list_instances.py +++ b/awx/main/management/commands/list_instances.py @@ -55,7 +55,7 @@ class Command(BaseCommand): capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else '' version = f" version={x.version or '?'}" if x.node_type != 'hop' else '' - heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else '' + heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.last_seen else '' print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}{end_color}') print() diff --git a/awx/main/management/commands/provision_instance.py b/awx/main/management/commands/provision_instance.py index e7f8063d61..5a60328d96 100644 --- a/awx/main/management/commands/provision_instance.py +++ b/awx/main/management/commands/provision_instance.py @@ -25,20 +25,17 @@ class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning") - parser.add_argument('--listener_port', dest='listener_port', type=int, help="Receptor listener port") parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type") parser.add_argument('--uuid', type=str, help="Instance UUID") - def _register_hostname(self, hostname, node_type, uuid, listener_port): + def _register_hostname(self, hostname, node_type, uuid): if not hostname: if not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering with values from settings only intended for use in K8s installs') from awx.main.management.commands.register_queue import RegisterQueue - (changed, instance) = Instance.objects.register( - ip_address=os.environ.get('MY_POD_IP'), listener_port=listener_port, node_type='control', node_uuid=settings.SYSTEM_UUID - ) + (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', node_uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, @@ -51,16 +48,17 @@ class Command(BaseCommand): max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS, ).register() else: - (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid, listener_port=listener_port) + (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid) if changed: print("Successfully registered instance {}".format(hostname)) else: print("Instance already registered {}".format(instance.hostname)) + self.changed = changed @transaction.atomic def handle(self, **options): self.changed = False - self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'), options.get('listener_port')) + self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid')) if self.changed: print("(changed: True)") diff --git a/awx/main/management/commands/register_peers.py b/awx/main/management/commands/register_peers.py index 078edb08c7..7142f4b663 100644 --- a/awx/main/management/commands/register_peers.py +++ b/awx/main/management/commands/register_peers.py @@ -1,9 +1,7 @@ -import warnings - from django.core.management.base import BaseCommand, CommandError from django.db import transaction -from awx.main.models import Instance, InstanceLink +from awx.main.models import Instance, InstanceLink, ReceptorAddress class Command(BaseCommand): @@ -28,7 +26,9 @@ class Command(BaseCommand): def handle(self, **options): # provides a mapping of hostname to Instance objects - nodes = Instance.objects.in_bulk(field_name='hostname') + nodes = Instance.objects.all().in_bulk(field_name='hostname') + # provides a mapping of address to ReceptorAddress objects + addresses = ReceptorAddress.objects.all().in_bulk(field_name='address') if options['source'] not in nodes: raise CommandError(f"Host {options['source']} is not a registered instance.") @@ -39,6 +39,14 @@ class Command(BaseCommand): if options['exact'] is not None and options['disconnect']: raise CommandError("The option --disconnect may not be used with --exact.") + # make sure each target has a receptor address + peers = options['peers'] or [] + disconnect = options['disconnect'] or [] + exact = options['exact'] or [] + for peer in peers + disconnect + exact: + if peer not in addresses: + raise CommandError(f"Peer {peer} does not have a receptor address.") + # No 1-cycles for collection in ('peers', 'disconnect', 'exact'): if options[collection] is not None and options['source'] in options[collection]: @@ -47,9 +55,12 @@ class Command(BaseCommand): # No 2-cycles if options['peers'] or options['exact'] is not None: peers = set(options['peers'] or options['exact']) - incoming = set(InstanceLink.objects.filter(target=nodes[options['source']]).values_list('source__hostname', flat=True)) + if options['source'] in addresses: + incoming = set(InstanceLink.objects.filter(target=addresses[options['source']]).values_list('source__hostname', flat=True)) + else: + incoming = set() if peers & incoming: - warnings.warn(f"Source node {options['source']} should not link to nodes already peering to it: {peers & incoming}.") + raise CommandError(f"Source node {options['source']} should not link to nodes already peering to it: {peers & incoming}.") if options['peers']: missing_peers = set(options['peers']) - set(nodes) @@ -60,7 +71,7 @@ class Command(BaseCommand): results = 0 for target in options['peers']: _, created = InstanceLink.objects.update_or_create( - source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED} + source=nodes[options['source']], target=addresses[target], defaults={'link_state': InstanceLink.States.ESTABLISHED} ) if created: results += 1 @@ -70,9 +81,9 @@ class Command(BaseCommand): if options['disconnect']: results = 0 for target in options['disconnect']: - if target not in nodes: # Be permissive, the node might have already been de-registered. + if target not in addresses: # Be permissive, the node might have already been de-registered. continue - n, _ = InstanceLink.objects.filter(source=nodes[options['source']], target=nodes[target]).delete() + n, _ = InstanceLink.objects.filter(source=nodes[options['source']], target=addresses[target]).delete() results += n print(f"{results} peer links removed from the database.") @@ -81,11 +92,11 @@ class Command(BaseCommand): additions = 0 with transaction.atomic(): peers = set(options['exact']) - links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__hostname', flat=True)) - removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__hostname__in=links - peers).delete() + links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__address', flat=True)) + removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__instance__hostname__in=links - peers).delete() for target in peers - links: _, created = InstanceLink.objects.update_or_create( - source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED} + source=nodes[options['source']], target=addresses[target], defaults={'link_state': InstanceLink.States.ESTABLISHED} ) if created: additions += 1 diff --git a/awx/main/management/commands/remove_receptor_address.py b/awx/main/management/commands/remove_receptor_address.py new file mode 100644 index 0000000000..de7426a53f --- /dev/null +++ b/awx/main/management/commands/remove_receptor_address.py @@ -0,0 +1,26 @@ +# Copyright (c) 2015 Ansible, Inc. +# All Rights Reserved + +from django.core.management.base import BaseCommand + +from awx.main.models import ReceptorAddress + + +class Command(BaseCommand): + """ + Internal controller command. + Delete a receptor address. + """ + + help = "Add receptor address to an instance." + + def add_arguments(self, parser): + parser.add_argument('--address', dest='address', type=str, help="Receptor address to remove") + + def handle(self, **options): + deleted = ReceptorAddress.objects.filter(address=options['address']).delete() + if deleted[0]: + print(f"Successfully removed {options['address']}") + print("(changed: True)") + else: + print(f"Did not remove {options['address']}, not found") diff --git a/awx/main/management/commands/run_callback_receiver.py b/awx/main/management/commands/run_callback_receiver.py index cb3ab781b5..6ab2158cae 100644 --- a/awx/main/management/commands/run_callback_receiver.py +++ b/awx/main/management/commands/run_callback_receiver.py @@ -3,6 +3,7 @@ from django.conf import settings from django.core.management.base import BaseCommand +from awx.main.analytics.subsystem_metrics import CallbackReceiverMetricsServer from awx.main.dispatch.control import Control from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker @@ -25,6 +26,9 @@ class Command(BaseCommand): print(Control('callback_receiver').status()) return consumer = None + + CallbackReceiverMetricsServer().start() + try: consumer = AWXConsumerRedis( 'callback_receiver', diff --git a/awx/main/management/commands/run_dispatcher.py b/awx/main/management/commands/run_dispatcher.py index 111b5ab0e1..28d954abff 100644 --- a/awx/main/management/commands/run_dispatcher.py +++ b/awx/main/management/commands/run_dispatcher.py @@ -10,6 +10,7 @@ from awx.main.dispatch import get_task_queuename from awx.main.dispatch.control import Control from awx.main.dispatch.pool import AutoscalePool from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker +from awx.main.analytics.subsystem_metrics import DispatcherMetricsServer logger = logging.getLogger('awx.main.dispatch') @@ -62,6 +63,8 @@ class Command(BaseCommand): consumer = None + DispatcherMetricsServer().start() + try: queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()] consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE) diff --git a/awx/main/management/commands/run_wsrelay.py b/awx/main/management/commands/run_wsrelay.py index 90edafdfc5..a3165cd669 100644 --- a/awx/main/management/commands/run_wsrelay.py +++ b/awx/main/management/commands/run_wsrelay.py @@ -16,6 +16,7 @@ from awx.main.analytics.broadcast_websocket import ( RelayWebsocketStatsManager, safe_name, ) +from awx.main.analytics.subsystem_metrics import WebsocketsMetricsServer from awx.main.wsrelay import WebSocketRelayManager @@ -163,8 +164,15 @@ class Command(BaseCommand): return - try: - websocket_relay_manager = WebSocketRelayManager() - asyncio.run(websocket_relay_manager.run()) - except KeyboardInterrupt: - logger.info('Terminating Websocket Relayer') + WebsocketsMetricsServer().start() + websocket_relay_manager = WebSocketRelayManager() + + while True: + try: + asyncio.run(websocket_relay_manager.run()) + except KeyboardInterrupt: + logger.info('Shutting down Websocket Relayer') + break + except Exception as e: + logger.exception('Error in Websocket Relayer, exception: {}. Restarting in 10 seconds'.format(e)) + time.sleep(10) diff --git a/awx/main/managers.py b/awx/main/managers.py index 747f9d4467..c501d7b0d3 100644 --- a/awx/main/managers.py +++ b/awx/main/managers.py @@ -115,7 +115,14 @@ class InstanceManager(models.Manager): return node[0] raise RuntimeError("No instance found with the current cluster host id") - def register(self, node_uuid=None, hostname=None, ip_address="", listener_port=None, node_type='hybrid', defaults=None): + def register( + self, + node_uuid=None, + hostname=None, + ip_address="", + node_type='hybrid', + defaults=None, + ): if not hostname: hostname = settings.CLUSTER_HOST_ID @@ -161,9 +168,6 @@ class InstanceManager(models.Manager): if instance.node_type != node_type: instance.node_type = node_type update_fields.append('node_type') - if instance.listener_port != listener_port: - instance.listener_port = listener_port - update_fields.append('listener_port') if update_fields: instance.save(update_fields=update_fields) return (True, instance) @@ -174,11 +178,13 @@ class InstanceManager(models.Manager): create_defaults = { 'node_state': Instance.States.INSTALLED, 'capacity': 0, + 'managed': True, } if defaults is not None: create_defaults.update(defaults) uuid_option = {'uuid': node_uuid if node_uuid is not None else uuid.uuid4()} if node_type == 'execution' and 'version' not in create_defaults: create_defaults['version'] = RECEPTOR_PENDING - instance = self.create(hostname=hostname, ip_address=ip_address, listener_port=listener_port, node_type=node_type, **create_defaults, **uuid_option) + instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option) + return (True, instance) diff --git a/awx/main/middleware.py b/awx/main/middleware.py index 7299e2589e..d485ce45f7 100644 --- a/awx/main/middleware.py +++ b/awx/main/middleware.py @@ -1,25 +1,25 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. +import functools import logging import threading import time import urllib.parse +from pathlib import Path from django.conf import settings from django.contrib.auth import logout -from django.contrib.auth.models import User -from django.db.migrations.executor import MigrationExecutor +from django.db.migrations.recorder import MigrationRecorder from django.db import connection from django.shortcuts import redirect -from django.apps import apps from django.utils.deprecation import MiddlewareMixin -from django.utils.translation import gettext_lazy as _ from django.urls import reverse, resolve -from awx.main.utils.named_url_graph import generate_graph, GraphNode -from awx.conf import fields, register +from awx.main import migrations from awx.main.utils.profiling import AWXProfiler +from awx.main.utils.common import memoize +from awx.urls import get_urlpatterns logger = logging.getLogger('awx.main.middleware') @@ -97,49 +97,7 @@ class DisableLocalAuthMiddleware(MiddlewareMixin): logout(request) -def _customize_graph(): - from awx.main.models import Instance, Schedule, UnifiedJobTemplate - - for model in [Schedule, UnifiedJobTemplate]: - if model in settings.NAMED_URL_GRAPH: - settings.NAMED_URL_GRAPH[model].remove_bindings() - settings.NAMED_URL_GRAPH.pop(model) - if User not in settings.NAMED_URL_GRAPH: - settings.NAMED_URL_GRAPH[User] = GraphNode(User, ['username'], []) - settings.NAMED_URL_GRAPH[User].add_bindings() - if Instance not in settings.NAMED_URL_GRAPH: - settings.NAMED_URL_GRAPH[Instance] = GraphNode(Instance, ['hostname'], []) - settings.NAMED_URL_GRAPH[Instance].add_bindings() - - class URLModificationMiddleware(MiddlewareMixin): - def __init__(self, get_response): - models = [m for m in apps.get_app_config('main').get_models() if hasattr(m, 'get_absolute_url')] - generate_graph(models) - _customize_graph() - register( - 'NAMED_URL_FORMATS', - field_class=fields.DictField, - read_only=True, - label=_('Formats of all available named urls'), - help_text=_('Read-only list of key-value pairs that shows the standard format of all available named URLs.'), - category=_('Named URL'), - category_slug='named-url', - ) - register( - 'NAMED_URL_GRAPH_NODES', - field_class=fields.DictField, - read_only=True, - label=_('List of all named url graph nodes.'), - help_text=_( - 'Read-only list of key-value pairs that exposes named URL graph topology.' - ' Use this list to programmatically generate named URLs for resources' - ), - category=_('Named URL'), - category_slug='named-url', - ) - super().__init__(get_response) - @staticmethod def _hijack_for_old_jt_name(node, kwargs, named_url): try: @@ -198,9 +156,46 @@ class URLModificationMiddleware(MiddlewareMixin): request.path_info = new_path +@memoize(ttl=20) +def is_migrating(): + latest_number = 0 + latest_name = '' + for migration_path in Path(migrations.__path__[0]).glob('[0-9]*.py'): + try: + migration_number = int(migration_path.name.split('_', 1)[0]) + except ValueError: + continue + if migration_number > latest_number: + latest_number = migration_number + latest_name = migration_path.name[: -len('.py')] + return not MigrationRecorder(connection).migration_qs.filter(app='main', name=latest_name).exists() + + class MigrationRanCheckMiddleware(MiddlewareMixin): def process_request(self, request): - executor = MigrationExecutor(connection) - plan = executor.migration_plan(executor.loader.graph.leaf_nodes()) - if bool(plan) and getattr(resolve(request.path), 'url_name', '') != 'migrations_notran': + if is_migrating() and getattr(resolve(request.path), 'url_name', '') != 'migrations_notran': return redirect(reverse("ui:migrations_notran")) + + +class OptionalURLPrefixPath(MiddlewareMixin): + @functools.lru_cache + def _url_optional(self, prefix): + # Relavant Django code path https://github.com/django/django/blob/stable/4.2.x/django/core/handlers/base.py#L300 + # + # resolve_request(request) + # get_resolver(request.urlconf) + # _get_cached_resolver(request.urlconf) <-- cached via @functools.cache + # + # Django will attempt to cache the value(s) of request.urlconf + # Being hashable is a prerequisit for being cachable. + # tuple() is hashable list() is not. + # Hence the tuple(list()) wrap. + return tuple(get_urlpatterns(prefix=prefix)) + + def process_request(self, request): + prefix = settings.OPTIONAL_API_URLPATTERN_PREFIX + + if request.path.startswith(f"/api/{prefix}"): + request.urlconf = self._url_optional(prefix) + else: + request.urlconf = 'awx.urls' diff --git a/awx/main/migrations/0188_add_bitbucket_dc_webhook.py b/awx/main/migrations/0188_add_bitbucket_dc_webhook.py new file mode 100644 index 0000000000..ae067b2cbe --- /dev/null +++ b/awx/main/migrations/0188_add_bitbucket_dc_webhook.py @@ -0,0 +1,52 @@ +# Generated by Django 4.2.6 on 2023-11-16 21:00 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ('main', '0187_hop_nodes'), + ] + + operations = [ + migrations.AlterField( + model_name='job', + name='webhook_service', + field=models.CharField( + blank=True, + choices=[('github', 'GitHub'), ('gitlab', 'GitLab'), ('bitbucket_dc', 'BitBucket DataCenter')], + help_text='Service that webhook requests will be accepted from', + max_length=16, + ), + ), + migrations.AlterField( + model_name='jobtemplate', + name='webhook_service', + field=models.CharField( + blank=True, + choices=[('github', 'GitHub'), ('gitlab', 'GitLab'), ('bitbucket_dc', 'BitBucket DataCenter')], + help_text='Service that webhook requests will be accepted from', + max_length=16, + ), + ), + migrations.AlterField( + model_name='workflowjob', + name='webhook_service', + field=models.CharField( + blank=True, + choices=[('github', 'GitHub'), ('gitlab', 'GitLab'), ('bitbucket_dc', 'BitBucket DataCenter')], + help_text='Service that webhook requests will be accepted from', + max_length=16, + ), + ), + migrations.AlterField( + model_name='workflowjobtemplate', + name='webhook_service', + field=models.CharField( + blank=True, + choices=[('github', 'GitHub'), ('gitlab', 'GitLab'), ('bitbucket_dc', 'BitBucket DataCenter')], + help_text='Service that webhook requests will be accepted from', + max_length=16, + ), + ), + ] diff --git a/awx/main/migrations/0189_inbound_hop_nodes.py b/awx/main/migrations/0189_inbound_hop_nodes.py new file mode 100644 index 0000000000..aaaaff9aec --- /dev/null +++ b/awx/main/migrations/0189_inbound_hop_nodes.py @@ -0,0 +1,150 @@ +# Generated by Django 4.2.6 on 2024-01-19 19:24 + +import django.core.validators +from django.db import migrations, models +import django.db.models.deletion + + +def create_receptor_addresses(apps, schema_editor): + """ + If listener_port was defined on an instance, create a receptor address for it + """ + Instance = apps.get_model('main', 'Instance') + ReceptorAddress = apps.get_model('main', 'ReceptorAddress') + for instance in Instance.objects.exclude(listener_port=None): + ReceptorAddress.objects.create( + instance=instance, + address=instance.hostname, + port=instance.listener_port, + peers_from_control_nodes=instance.peers_from_control_nodes, + protocol='tcp', + is_internal=False, + canonical=True, + ) + + +def link_to_receptor_addresses(apps, schema_editor): + """ + Modify each InstanceLink to point to the newly created + ReceptorAddresses, using the new target field + """ + InstanceLink = apps.get_model('main', 'InstanceLink') + for link in InstanceLink.objects.all(): + link.target = link.target_old.receptor_addresses.get() + link.save() + + +class Migration(migrations.Migration): + dependencies = [ + ('main', '0188_add_bitbucket_dc_webhook'), + ] + + operations = [ + migrations.CreateModel( + name='ReceptorAddress', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('address', models.CharField(help_text='Routable address for this instance.', max_length=255)), + ( + 'port', + models.IntegerField( + default=27199, + help_text='Port for the address.', + validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(65535)], + ), + ), + ('websocket_path', models.CharField(blank=True, default='', help_text='Websocket path.', max_length=255)), + ( + 'protocol', + models.CharField( + choices=[('tcp', 'TCP'), ('ws', 'WS'), ('wss', 'WSS')], + default='tcp', + help_text="Protocol to use for the Receptor listener, 'tcp', 'wss', or 'ws'.", + max_length=10, + ), + ), + ('is_internal', models.BooleanField(default=False, help_text='If True, only routable within the Kubernetes cluster.')), + ('canonical', models.BooleanField(default=False, help_text='If True, this address is the canonical address for the instance.')), + ( + 'peers_from_control_nodes', + models.BooleanField(default=False, help_text='If True, control plane cluster nodes should automatically peer to it.'), + ), + ], + ), + migrations.RemoveConstraint( + model_name='instancelink', + name='source_and_target_can_not_be_equal', + ), + migrations.RenameField( + model_name='instancelink', + old_name='target', + new_name='target_old', + ), + migrations.AlterUniqueTogether( + name='instancelink', + unique_together=set(), + ), + migrations.AddField( + model_name='instance', + name='managed', + field=models.BooleanField(default=False, editable=False, help_text='If True, this instance is managed by the control plane.'), + ), + migrations.AlterField( + model_name='instancelink', + name='source', + field=models.ForeignKey(help_text='The source instance of this peer link.', on_delete=django.db.models.deletion.CASCADE, to='main.instance'), + ), + migrations.AddField( + model_name='receptoraddress', + name='instance', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='receptor_addresses', to='main.instance'), + ), + migrations.AddField( + model_name='activitystream', + name='receptor_address', + field=models.ManyToManyField(blank=True, to='main.receptoraddress'), + ), + migrations.AddConstraint( + model_name='receptoraddress', + constraint=models.UniqueConstraint(fields=('address',), name='unique_receptor_address', violation_error_message='Receptor address must be unique.'), + ), + migrations.AddField( + model_name='instancelink', + name='target', + field=models.ForeignKey( + help_text='The target receptor address of this peer link.', null=True, on_delete=django.db.models.deletion.CASCADE, to='main.receptoraddress' + ), + ), + migrations.RunPython(create_receptor_addresses), + migrations.RunPython(link_to_receptor_addresses), + migrations.RemoveField( + model_name='instance', + name='peers_from_control_nodes', + ), + migrations.RemoveField( + model_name='instance', + name='listener_port', + ), + migrations.RemoveField( + model_name='instancelink', + name='target_old', + ), + migrations.AlterField( + model_name='instance', + name='peers', + field=models.ManyToManyField(related_name='peers_from', through='main.InstanceLink', to='main.receptoraddress'), + ), + migrations.AlterField( + model_name='instancelink', + name='target', + field=models.ForeignKey( + help_text='The target receptor address of this peer link.', on_delete=django.db.models.deletion.CASCADE, to='main.receptoraddress' + ), + ), + migrations.AddConstraint( + model_name='instancelink', + constraint=models.UniqueConstraint( + fields=('source', 'target'), name='unique_source_target', violation_error_message='Field source and target must be unique together.' + ), + ), + ] diff --git a/awx/main/migrations/0190_alter_inventorysource_source_and_more.py b/awx/main/migrations/0190_alter_inventorysource_source_and_more.py new file mode 100644 index 0000000000..0c1eb703ed --- /dev/null +++ b/awx/main/migrations/0190_alter_inventorysource_source_and_more.py @@ -0,0 +1,59 @@ +# Generated by Django 4.2.6 on 2024-02-15 20:51 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('main', '0189_inbound_hop_nodes'), + ] + + operations = [ + migrations.AlterField( + model_name='inventorysource', + name='source', + field=models.CharField( + choices=[ + ('file', 'File, Directory or Script'), + ('constructed', 'Template additional groups and hostvars at runtime'), + ('scm', 'Sourced from a Project'), + ('ec2', 'Amazon EC2'), + ('gce', 'Google Compute Engine'), + ('azure_rm', 'Microsoft Azure Resource Manager'), + ('vmware', 'VMware vCenter'), + ('satellite6', 'Red Hat Satellite 6'), + ('openstack', 'OpenStack'), + ('rhv', 'Red Hat Virtualization'), + ('controller', 'Red Hat Ansible Automation Platform'), + ('insights', 'Red Hat Insights'), + ('terraform', 'Terraform State'), + ], + default=None, + max_length=32, + ), + ), + migrations.AlterField( + model_name='inventoryupdate', + name='source', + field=models.CharField( + choices=[ + ('file', 'File, Directory or Script'), + ('constructed', 'Template additional groups and hostvars at runtime'), + ('scm', 'Sourced from a Project'), + ('ec2', 'Amazon EC2'), + ('gce', 'Google Compute Engine'), + ('azure_rm', 'Microsoft Azure Resource Manager'), + ('vmware', 'VMware vCenter'), + ('satellite6', 'Red Hat Satellite 6'), + ('openstack', 'OpenStack'), + ('rhv', 'Red Hat Virtualization'), + ('controller', 'Red Hat Ansible Automation Platform'), + ('insights', 'Red Hat Insights'), + ('terraform', 'Terraform State'), + ], + default=None, + max_length=32, + ), + ), + ] diff --git a/awx/main/migrations/0191_add_django_permissions.py b/awx/main/migrations/0191_add_django_permissions.py new file mode 100644 index 0000000000..a3074e45ab --- /dev/null +++ b/awx/main/migrations/0191_add_django_permissions.py @@ -0,0 +1,85 @@ +# Generated by Django 4.2.6 on 2023-11-13 20:10 + +from django.db import migrations + + +class Migration(migrations.Migration): + dependencies = [ + ('main', '0190_alter_inventorysource_source_and_more'), + ('dab_rbac', '__first__'), + ] + + operations = [ + # Add custom permissions for all special actions, like update, use, adhoc, and so on + migrations.AlterModelOptions( + name='credential', + options={'ordering': ('name',), 'permissions': [('use_credential', 'Can use credential in a job or related resource')]}, + ), + migrations.AlterModelOptions( + name='instancegroup', + options={'permissions': [('use_instancegroup', 'Can use instance group in a preference list of a resource')]}, + ), + migrations.AlterModelOptions( + name='inventory', + options={ + 'ordering': ('name',), + 'permissions': [ + ('use_inventory', 'Can use inventory in a job template'), + ('adhoc_inventory', 'Can run ad hoc commands'), + ('update_inventory', 'Can update inventory sources in inventory'), + ], + 'verbose_name_plural': 'inventories', + }, + ), + migrations.AlterModelOptions( + name='jobtemplate', + options={'ordering': ('name',), 'permissions': [('execute_jobtemplate', 'Can run this job template')]}, + ), + migrations.AlterModelOptions( + name='project', + options={ + 'ordering': ('id',), + 'permissions': [('update_project', 'Can run a project update'), ('use_project', 'Can use project in a job template')], + }, + ), + migrations.AlterModelOptions( + name='workflowjobtemplate', + options={ + 'permissions': [ + ('execute_workflowjobtemplate', 'Can run this workflow job template'), + ('approve_workflowjobtemplate', 'Can approve steps in this workflow job template'), + ] + }, + ), + migrations.AlterModelOptions( + name='organization', + options={ + 'default_permissions': ('change', 'delete', 'view'), + 'ordering': ('name',), + 'permissions': [ + ('member_organization', 'Basic participation permissions for organization'), + ('audit_organization', 'Audit everything inside the organization'), + ], + }, + ), + migrations.AlterModelOptions( + name='team', + options={'ordering': ('organization__name', 'name'), 'permissions': [('member_team', 'Inherit all roles assigned to this team')]}, + ), + # Remove add default permission for a few models + migrations.AlterModelOptions( + name='jobtemplate', + options={ + 'default_permissions': ('change', 'delete', 'view'), + 'ordering': ('name',), + 'permissions': [('execute_jobtemplate', 'Can run this job template')], + }, + ), + migrations.AlterModelOptions( + name='instancegroup', + options={ + 'default_permissions': ('change', 'delete', 'view'), + 'permissions': [('use_instancegroup', 'Can use instance group in a preference list of a resource')], + }, + ), + ] diff --git a/awx/main/migrations/0192_custom_roles.py b/awx/main/migrations/0192_custom_roles.py new file mode 100644 index 0000000000..c91823aa34 --- /dev/null +++ b/awx/main/migrations/0192_custom_roles.py @@ -0,0 +1,20 @@ +# Generated by Django 4.2.6 on 2023-11-21 02:06 + +from django.db import migrations + +from awx.main.migrations._dab_rbac import migrate_to_new_rbac, create_permissions_as_operation, setup_managed_role_definitions + + +class Migration(migrations.Migration): + dependencies = [ + ('main', '0191_add_django_permissions'), + ('dab_rbac', '__first__'), + ] + + operations = [ + # make sure permissions and content types have been created by now + # these normally run in a post_migrate signal but we need them for our logic + migrations.RunPython(create_permissions_as_operation, migrations.RunPython.noop), + migrations.RunPython(setup_managed_role_definitions, migrations.RunPython.noop), + migrations.RunPython(migrate_to_new_rbac, migrations.RunPython.noop), + ] diff --git a/awx/main/migrations/_dab_rbac.py b/awx/main/migrations/_dab_rbac.py new file mode 100644 index 0000000000..6e3c04882f --- /dev/null +++ b/awx/main/migrations/_dab_rbac.py @@ -0,0 +1,343 @@ +import json +import logging + +from django.apps import apps as global_apps +from django.db.models import ForeignKey +from django.conf import settings +from ansible_base.rbac.migrations._utils import give_permissions +from ansible_base.rbac.management import create_dab_permissions + +from awx.main.fields import ImplicitRoleField +from awx.main.constants import role_name_to_perm_mapping + +from ansible_base.rbac.permission_registry import permission_registry + + +logger = logging.getLogger('awx.main.migrations._dab_rbac') + + +def create_permissions_as_operation(apps, schema_editor): + create_dab_permissions(global_apps.get_app_config("main"), apps=apps) + + +""" +Data structures and methods for the migration of old Role model to ObjectRole +""" + +system_admin = ImplicitRoleField(name='system_administrator') +system_auditor = ImplicitRoleField(name='system_auditor') +system_admin.model = None +system_auditor.model = None + + +def resolve_parent_role(f, role_path): + """ + Given a field and a path declared in parent_role from the field definition, like + execute_role = ImplicitRoleField(parent_role='admin_role') + This expects to be passed in (execute_role object, "admin_role") + It hould return the admin_role from that object + """ + if role_path == 'singleton:system_administrator': + return system_admin + elif role_path == 'singleton:system_auditor': + return system_auditor + else: + related_field = f + current_model = f.model + for related_field_name in role_path.split('.'): + related_field = current_model._meta.get_field(related_field_name) + if isinstance(related_field, ForeignKey) and not isinstance(related_field, ImplicitRoleField): + current_model = related_field.related_model + return related_field + + +def build_role_map(apps): + """ + For the old Role model, this builds and returns dictionaries (children, parents) + which give a global mapping of the ImplicitRoleField instances according to the graph + """ + models = set(apps.get_app_config('main').get_models()) + + all_fields = set() + parents = {} + children = {} + + all_fields.add(system_admin) + all_fields.add(system_auditor) + + for cls in models: + for f in cls._meta.get_fields(): + if isinstance(f, ImplicitRoleField): + all_fields.add(f) + + for f in all_fields: + if f.parent_role is not None: + if isinstance(f.parent_role, str): + parent_roles = [f.parent_role] + else: + parent_roles = f.parent_role + + # SPECIAL CASE: organization auditor_role is not a child of admin_role + # this makes no practical sense and conflicts with expected managed role + # so we put it in as a hack here + if f.name == 'auditor_role' and f.model._meta.model_name == 'organization': + parent_roles.append('admin_role') + + parent_list = [] + for rel_name in parent_roles: + parent_list.append(resolve_parent_role(f, rel_name)) + + parents[f] = parent_list + + # build children lookup from parents lookup + for child_field, parent_list in parents.items(): + for parent_field in parent_list: + children.setdefault(parent_field, []) + children[parent_field].append(child_field) + + return (parents, children) + + +def get_descendents(f, children_map): + """ + Given ImplicitRoleField F and the children mapping, returns all descendents + of that field, as a set of other fields, including itself + """ + ret = {f} + if f in children_map: + for child_field in children_map[f]: + ret.update(get_descendents(child_field, children_map)) + return ret + + +def get_permissions_for_role(role_field, children_map, apps): + Permission = apps.get_model('dab_rbac', 'DABPermission') + ContentType = apps.get_model('contenttypes', 'ContentType') + + perm_list = [] + for child_field in get_descendents(role_field, children_map): + if child_field.name in role_name_to_perm_mapping: + for perm_name in role_name_to_perm_mapping[child_field.name]: + if perm_name == 'add_' and role_field.model._meta.model_name != 'organization': + continue # only organizations can contain add permissions + perm = Permission.objects.filter(content_type=ContentType.objects.get_for_model(child_field.model), codename__startswith=perm_name).first() + if perm is not None and perm not in perm_list: + perm_list.append(perm) + + # special case for two models that have object roles but no organization roles in old system + if role_field.name == 'notification_admin_role' or (role_field.name == 'admin_role' and role_field.model._meta.model_name == 'organization'): + ct = ContentType.objects.get_for_model(apps.get_model('main', 'NotificationTemplate')) + perm_list.extend(list(Permission.objects.filter(content_type=ct))) + if role_field.name == 'execution_environment_admin_role' or (role_field.name == 'admin_role' and role_field.model._meta.model_name == 'organization'): + ct = ContentType.objects.get_for_model(apps.get_model('main', 'ExecutionEnvironment')) + perm_list.extend(list(Permission.objects.filter(content_type=ct))) + + # more special cases for those same above special org-level roles + if role_field.name == 'auditor_role': + for codename in ('view_notificationtemplate', 'view_executionenvironment'): + perm_list.append(Permission.objects.get(codename=codename)) + + return perm_list + + +def migrate_to_new_rbac(apps, schema_editor): + """ + This method moves the assigned permissions from the old rbac.py models + to the new RoleDefinition and ObjectRole models + """ + Role = apps.get_model('main', 'Role') + RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition') + RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment') + Permission = apps.get_model('dab_rbac', 'DABPermission') + + # remove add premissions that are not valid for migrations from old versions + for perm_str in ('add_organization', 'add_jobtemplate'): + perm = Permission.objects.filter(codename=perm_str).first() + if perm: + perm.delete() + + managed_definitions = dict() + for role_definition in RoleDefinition.objects.filter(managed=True): + permissions = frozenset(role_definition.permissions.values_list('id', flat=True)) + managed_definitions[permissions] = role_definition + + # Build map of old role model + parents, children = build_role_map(apps) + + # NOTE: this import is expected to break at some point, and then just move the data here + from awx.main.models.rbac import role_descriptions + + for role in Role.objects.prefetch_related('members', 'parents').iterator(): + if role.singleton_name: + continue # only bothering to migrate object roles + + team_roles = [] + for parent in role.parents.all(): + if parent.id not in json.loads(role.implicit_parents): + team_roles.append(parent) + + # we will not create any roles that do not have any users or teams + if not (role.members.all() or team_roles): + logger.debug(f'Skipping role {role.role_field} for {role.content_type.model}-{role.object_id} due to no members') + continue + + # get a list of permissions that the old role would grant + object_cls = apps.get_model(f'main.{role.content_type.model}') + object = object_cls.objects.get(pk=role.object_id) # WORKAROUND, role.content_object does not work in migrations + f = object._meta.get_field(role.role_field) # should be ImplicitRoleField + perm_list = get_permissions_for_role(f, children, apps) + + permissions = frozenset(perm.id for perm in perm_list) + + # With the needed permissions established, obtain the RoleDefinition this will need, priorities: + # 1. If it exists as a managed RoleDefinition then obviously use that + # 2. If we already created this for a prior role, use that + # 3. Create a new RoleDefinition that lists those permissions + if permissions in managed_definitions: + role_definition = managed_definitions[permissions] + else: + action = role.role_field.rsplit('_', 1)[0] # remove the _field ending of the name + role_definition_name = f'{role.content_type.model_class().__name__} {action.title()}' + + description = role_descriptions[role.role_field] + if type(description) == dict: + if role.content_type.model in description: + description = description.get(role.content_type.model) + else: + description = description.get('default') + if '%s' in description: + description = description % role.content_type.model + + role_definition, created = RoleDefinition.objects.get_or_create( + name=role_definition_name, + defaults={'description': description, 'content_type_id': role.content_type_id}, + ) + + if created: + logger.info(f'Created custom Role Definition {role_definition_name}, pk={role_definition.pk}') + role_definition.permissions.set(perm_list) + + # Create the object role and add users to it + give_permissions( + apps, + role_definition, + users=role.members.all(), + teams=[tr.object_id for tr in team_roles], + object_id=role.object_id, + content_type_id=role.content_type_id, + ) + + # Create new replacement system auditor role + new_system_auditor, created = RoleDefinition.objects.get_or_create( + name='System Auditor', + defaults={'description': 'Migrated singleton role giving read permission to everything', 'managed': True}, + ) + new_system_auditor.permissions.add(*list(Permission.objects.filter(codename__startswith='view'))) + + # migrate is_system_auditor flag, because it is no longer handled by a system role + old_system_auditor = Role.objects.filter(singleton_name='system_auditor').first() + if old_system_auditor: + # if the system auditor role is not present, this is a new install and no users should exist + ct = 0 + for user in role.members.all(): + RoleUserAssignment.objects.create(user=user, role_definition=new_system_auditor) + ct += 1 + if ct: + logger.info(f'Migrated {ct} users to new system auditor flag') + + +def get_or_create_managed(name, description, ct, permissions, RoleDefinition): + role_definition, created = RoleDefinition.objects.get_or_create(name=name, defaults={'managed': True, 'description': description, 'content_type': ct}) + role_definition.permissions.set(list(permissions)) + + if not role_definition.managed: + role_definition.managed = True + role_definition.save(update_fields=['managed']) + + if created: + logger.info(f'Created RoleDefinition {role_definition.name} pk={role_definition} with {len(permissions)} permissions') + + return role_definition + + +def setup_managed_role_definitions(apps, schema_editor): + """ + Idepotent method to create or sync the managed role definitions + """ + to_create = settings.ANSIBLE_BASE_ROLE_PRECREATE + + ContentType = apps.get_model('contenttypes', 'ContentType') + Permission = apps.get_model('dab_rbac', 'DABPermission') + RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition') + Organization = apps.get_model(settings.ANSIBLE_BASE_ORGANIZATION_MODEL) + org_ct = ContentType.objects.get_for_model(Organization) + managed_role_definitions = [] + + org_perms = set() + for cls in permission_registry._registry: + ct = ContentType.objects.get_for_model(cls) + object_perms = set(Permission.objects.filter(content_type=ct)) + # Special case for InstanceGroup which has an organiation field, but is not an organization child object + if cls._meta.model_name != 'instancegroup': + org_perms.update(object_perms) + + if 'object_admin' in to_create and cls != Organization: + indiv_perms = object_perms.copy() + add_perms = [perm for perm in indiv_perms if perm.codename.startswith('add_')] + if add_perms: + for perm in add_perms: + indiv_perms.remove(perm) + + managed_role_definitions.append( + get_or_create_managed( + to_create['object_admin'].format(cls=cls), f'Has all permissions to a single {cls._meta.verbose_name}', ct, indiv_perms, RoleDefinition + ) + ) + + if 'org_children' in to_create and cls != Organization: + org_child_perms = object_perms.copy() + org_child_perms.add(Permission.objects.get(codename='view_organization')) + + managed_role_definitions.append( + get_or_create_managed( + to_create['org_children'].format(cls=cls), + f'Has all permissions to {cls._meta.verbose_name_plural} within an organization', + org_ct, + org_child_perms, + RoleDefinition, + ) + ) + + if 'special' in to_create: + special_perms = [] + for perm in object_perms: + if perm.codename.split('_')[0] not in ('add', 'change', 'update', 'delete', 'view'): + special_perms.append(perm) + for perm in special_perms: + action = perm.codename.split('_')[0] + view_perm = Permission.objects.get(content_type=ct, codename__startswith='view_') + managed_role_definitions.append( + get_or_create_managed( + to_create['special'].format(cls=cls, action=action.title()), + f'Has {action} permissions to a single {cls._meta.verbose_name}', + ct, + [perm, view_perm], + RoleDefinition, + ) + ) + + if 'org_admin' in to_create: + managed_role_definitions.append( + get_or_create_managed( + to_create['org_admin'].format(cls=Organization), + 'Has all permissions to a single organization and all objects inside of it', + org_ct, + org_perms, + RoleDefinition, + ) + ) + + unexpected_role_definitions = RoleDefinition.objects.filter(managed=True).exclude(pk__in=[rd.pk for rd in managed_role_definitions]) + for role_definition in unexpected_role_definitions: + logger.info(f'Deleting old managed role definition {role_definition.name}, pk={role_definition.pk}') + role_definition.delete() diff --git a/awx/main/migrations/_inventory_source_vars.py b/awx/main/migrations/_inventory_source_vars.py index 7660d63972..779655d73a 100644 --- a/awx/main/migrations/_inventory_source_vars.py +++ b/awx/main/migrations/_inventory_source_vars.py @@ -76,7 +76,7 @@ class azure_rm(PluginFileInjector): user_filters = [] old_filterables = [ ('resource_groups', 'resource_group'), - ('tags', 'tags') + ('tags', 'tags'), # locations / location would be an entry # but this would conflict with source_regions ] diff --git a/awx/main/models/__init__.py b/awx/main/models/__init__.py index 8803842fd0..dce3f371d0 100644 --- a/awx/main/models/__init__.py +++ b/awx/main/models/__init__.py @@ -1,17 +1,27 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. +import json + # Django from django.conf import settings # noqa from django.db import connection from django.db.models.signals import pre_delete # noqa +# django-ansible-base +from ansible_base.resource_registry.fields import AnsibleResourceField +from ansible_base.rbac import permission_registry +from ansible_base.rbac.models import RoleDefinition, RoleUserAssignment +from ansible_base.lib.utils.models import prevent_search +from ansible_base.lib.utils.models import user_summary_fields + # AWX -from awx.main.models.base import BaseModel, PrimordialModel, prevent_search, accepts_json, CLOUD_INVENTORY_SOURCES, VERBOSITY_CHOICES # noqa +from awx.main.models.base import BaseModel, PrimordialModel, accepts_json, CLOUD_INVENTORY_SOURCES, VERBOSITY_CHOICES # noqa from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate, StdoutMaxBytesExceeded # noqa from awx.main.models.organization import Organization, Profile, Team, UserSessionMembership # noqa from awx.main.models.credential import Credential, CredentialType, CredentialInputSource, ManagedCredentialType, build_safe_env # noqa from awx.main.models.projects import Project, ProjectUpdate # noqa +from awx.main.models.receptor_address import ReceptorAddress # noqa from awx.main.models.inventory import ( # noqa CustomInventoryScript, Group, @@ -96,6 +106,8 @@ from awx.main.access import get_user_queryset, check_user_access, check_user_acc User.add_to_class('get_queryset', get_user_queryset) User.add_to_class('can_access', check_user_access) User.add_to_class('can_access_with_errors', check_user_access_with_errors) +User.add_to_class('resource', AnsibleResourceField(primary_key_field="id")) +User.add_to_class('summary_fields', user_summary_fields) def convert_jsonfields(): @@ -188,11 +200,21 @@ User.add_to_class('auditor_of_organizations', user_get_auditor_of_organizations) User.add_to_class('created', created) +def get_system_auditor_role(): + rd, created = RoleDefinition.objects.get_or_create( + name='System Auditor', defaults={'description': 'Migrated singleton role giving read permission to everything'} + ) + if created: + rd.permissions.add(*list(permission_registry.permission_qs.filter(codename__startswith='view'))) + return rd + + @property def user_is_system_auditor(user): if not hasattr(user, '_is_system_auditor'): if user.pk: - user._is_system_auditor = user.roles.filter(singleton_name='system_auditor', role_field='system_auditor').exists() + rd = get_system_auditor_role() + user._is_system_auditor = RoleUserAssignment.objects.filter(user=user, role_definition=rd).exists() else: # Odd case where user is unsaved, this should never be relied on return False @@ -206,17 +228,17 @@ def user_is_system_auditor(user, tf): # time they've logged in, and we've just created the new User in this # request), we need one to set up the system auditor role user.save() - if tf: - role = Role.singleton('system_auditor') - # must check if member to not duplicate activity stream - if user not in role.members.all(): - role.members.add(user) - user._is_system_auditor = True - else: - role = Role.singleton('system_auditor') - if user in role.members.all(): - role.members.remove(user) - user._is_system_auditor = False + rd = get_system_auditor_role() + assignment = RoleUserAssignment.objects.filter(user=user, role_definition=rd).first() + prior_value = bool(assignment) + if prior_value != bool(tf): + if assignment: + assignment.delete() + else: + rd.give_global_permission(user) + user._is_system_auditor = bool(tf) + entry = ActivityStream.objects.create(changes=json.dumps({"is_system_auditor": [prior_value, bool(tf)]}), object1='user', operation='update') + entry.user.add(user) User.add_to_class('is_system_auditor', user_is_system_auditor) @@ -284,6 +306,10 @@ activity_stream_registrar.connect(WorkflowApprovalTemplate) activity_stream_registrar.connect(OAuth2Application) activity_stream_registrar.connect(OAuth2AccessToken) +# Register models +permission_registry.register(Project, Team, WorkflowJobTemplate, JobTemplate, Inventory, Organization, Credential, NotificationTemplate, ExecutionEnvironment) +permission_registry.register(InstanceGroup, parent_field_name=None) # Not part of an organization + # prevent API filtering on certain Django-supplied sensitive fields prevent_search(User._meta.get_field('password')) prevent_search(OAuth2AccessToken._meta.get_field('token')) diff --git a/awx/main/models/activity_stream.py b/awx/main/models/activity_stream.py index 7bce0c4fe0..2dccf3158f 100644 --- a/awx/main/models/activity_stream.py +++ b/awx/main/models/activity_stream.py @@ -77,6 +77,7 @@ class ActivityStream(models.Model): notification_template = models.ManyToManyField("NotificationTemplate", blank=True) notification = models.ManyToManyField("Notification", blank=True) label = models.ManyToManyField("Label", blank=True) + receptor_address = models.ManyToManyField("ReceptorAddress", blank=True) role = models.ManyToManyField("Role", blank=True) instance = models.ManyToManyField("Instance", blank=True) instance_group = models.ManyToManyField("InstanceGroup", blank=True) diff --git a/awx/main/models/ad_hoc_commands.py b/awx/main/models/ad_hoc_commands.py index 3b71119031..0583e07c04 100644 --- a/awx/main/models/ad_hoc_commands.py +++ b/awx/main/models/ad_hoc_commands.py @@ -12,9 +12,11 @@ from django.utils.text import Truncator from django.utils.translation import gettext_lazy as _ from django.core.exceptions import ValidationError +from ansible_base.lib.utils.models import prevent_search + # AWX from awx.api.versioning import reverse -from awx.main.models.base import prevent_search, AD_HOC_JOB_TYPE_CHOICES, VERBOSITY_CHOICES, VarsDictProperty +from awx.main.models.base import AD_HOC_JOB_TYPE_CHOICES, VERBOSITY_CHOICES, VarsDictProperty from awx.main.models.events import AdHocCommandEvent, UnpartitionedAdHocCommandEvent from awx.main.models.unified_jobs import UnifiedJob from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate diff --git a/awx/main/models/base.py b/awx/main/models/base.py index 0ef5b244f2..1d80923ee2 100644 --- a/awx/main/models/base.py +++ b/awx/main/models/base.py @@ -7,6 +7,9 @@ from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.utils.translation import gettext_lazy as _ from django.utils.timezone import now +# django-ansible-base +from ansible_base.lib.utils.models import get_type_for_model + # Django-CRUM from crum import get_current_user @@ -15,7 +18,6 @@ from awx.main.utils import encrypt_field, parse_yaml_or_json from awx.main.constants import CLOUD_PROVIDERS __all__ = [ - 'prevent_search', 'VarsDictProperty', 'BaseModel', 'CreatedModifiedModel', @@ -140,6 +142,23 @@ class BaseModel(models.Model): self.save(update_fields=update_fields) return update_fields + def summary_fields(self): + """ + This exists for use by django-ansible-base, + which has standard patterns that differ from AWX, but we enable views from DAB + for those views to list summary_fields for AWX models, those models need to provide this + """ + from awx.api.serializers import SUMMARIZABLE_FK_FIELDS + + model_name = get_type_for_model(self) + related_fields = SUMMARIZABLE_FK_FIELDS.get(model_name, {}) + summary_data = {} + for field_name in related_fields: + fval = getattr(self, field_name, None) + if fval is not None: + summary_data[field_name] = fval + return summary_data + class CreatedModifiedModel(BaseModel): """ @@ -384,23 +403,6 @@ class NotificationFieldsModel(BaseModel): notification_templates_started = models.ManyToManyField("NotificationTemplate", blank=True, related_name='%(class)s_notification_templates_for_started') -def prevent_search(relation): - """ - Used to mark a model field or relation as "restricted from filtering" - e.g., - - class AuthToken(BaseModel): - user = prevent_search(models.ForeignKey(...)) - sensitive_data = prevent_search(models.CharField(...)) - - The flag set by this function is used by - `awx.api.filters.FieldLookupBackend` to block fields and relations that - should not be searchable/filterable via search query params - """ - setattr(relation, '__prevent_search__', True) - return relation - - def accepts_json(relation): """ Used to mark a model field as allowing JSON e.g,. JobTemplate.extra_vars diff --git a/awx/main/models/credential/__init__.py b/awx/main/models/credential/__init__.py index 5de77ff62d..12b09095c2 100644 --- a/awx/main/models/credential/__init__.py +++ b/awx/main/models/credential/__init__.py @@ -83,6 +83,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin): app_label = 'main' ordering = ('name',) unique_together = ('organization', 'name', 'credential_type') + permissions = [('use_credential', 'Can use credential in a job or related resource')] PASSWORD_FIELDS = ['inputs'] FIELDS_TO_PRESERVE_AT_COPY = ['input_sources'] @@ -953,6 +954,25 @@ ManagedCredentialType( }, ) +ManagedCredentialType( + namespace='bitbucket_dc_token', + kind='token', + name=gettext_noop('Bitbucket Data Center HTTP Access Token'), + managed=True, + inputs={ + 'fields': [ + { + 'id': 'token', + 'label': gettext_noop('Token'), + 'type': 'string', + 'secret': True, + 'help_text': gettext_noop('This token needs to come from your user settings in Bitbucket'), + } + ], + 'required': ['token'], + }, +) + ManagedCredentialType( namespace='insights', kind='insights', @@ -1197,6 +1217,26 @@ ManagedCredentialType( }, ) +ManagedCredentialType( + namespace='terraform', + kind='cloud', + name=gettext_noop('Terraform backend configuration'), + managed=True, + inputs={ + 'fields': [ + { + 'id': 'configuration', + 'label': gettext_noop('Backend configuration'), + 'type': 'string', + 'secret': True, + 'multiline': True, + 'help_text': gettext_noop('Terraform backend config as Hashicorp configuration language.'), + }, + ], + 'required': ['configuration'], + }, +) + class CredentialInputSource(PrimordialModel): class Meta: diff --git a/awx/main/models/credential/injectors.py b/awx/main/models/credential/injectors.py index 93c1a37d8b..585bd9597d 100644 --- a/awx/main/models/credential/injectors.py +++ b/awx/main/models/credential/injectors.py @@ -122,3 +122,11 @@ def kubernetes_bearer_token(cred, env, private_data_dir): env['K8S_AUTH_SSL_CA_CERT'] = to_container_path(path, private_data_dir) else: env['K8S_AUTH_VERIFY_SSL'] = 'False' + + +def terraform(cred, env, private_data_dir): + handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env')) + with os.fdopen(handle, 'w') as f: + os.chmod(path, stat.S_IRUSR | stat.S_IWUSR) + f.write(cred.get_input('configuration')) + env['TF_BACKEND_CONFIG_FILE'] = to_container_path(path, private_data_dir) diff --git a/awx/main/models/events.py b/awx/main/models/events.py index b499155f8e..a794152c05 100644 --- a/awx/main/models/events.py +++ b/awx/main/models/events.py @@ -124,8 +124,6 @@ class BasePlaybookEvent(CreatedModifiedModel): 'parent_uuid', 'start_line', 'end_line', - 'host_id', - 'host_name', 'verbosity', ] WRAPUP_EVENT = 'playbook_on_stats' @@ -473,7 +471,7 @@ class JobEvent(BasePlaybookEvent): An event/message logged from the callback when running a job. """ - VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id', 'job_created'] + VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id', 'job_created', 'host_id', 'host_name'] JOB_REFERENCE = 'job_id' objects = DeferJobCreatedManager() diff --git a/awx/main/models/ha.py b/awx/main/models/ha.py index 5d370d24c9..8c8c9f919b 100644 --- a/awx/main/models/ha.py +++ b/awx/main/models/ha.py @@ -5,7 +5,7 @@ from decimal import Decimal import logging import os -from django.core.validators import MinValueValidator, MaxValueValidator +from django.core.validators import MinValueValidator from django.db import models, connection from django.db.models.signals import post_save, post_delete from django.dispatch import receiver @@ -17,6 +17,8 @@ from django.db.models import Sum, Q import redis from solo.models import SingletonModel +from ansible_base.lib.utils.models import prevent_search + # AWX from awx import __version__ as awx_application_version from awx.main.utils import is_testing @@ -24,7 +26,7 @@ from awx.api.versioning import reverse from awx.main.fields import ImplicitRoleField from awx.main.managers import InstanceManager, UUID_DEFAULT from awx.main.constants import JOB_FOLDER_PREFIX -from awx.main.models.base import BaseModel, HasEditsMixin, prevent_search +from awx.main.models.base import BaseModel, HasEditsMixin from awx.main.models.rbac import ( ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR, @@ -32,6 +34,7 @@ from awx.main.models.rbac import ( from awx.main.models.unified_jobs import UnifiedJob from awx.main.utils.common import get_corrected_cpu, get_cpu_effective_capacity, get_corrected_memory, get_mem_effective_capacity from awx.main.models.mixins import RelatedJobsMixin, ResourceMixin +from awx.main.models.receptor_address import ReceptorAddress # ansible-runner from ansible_runner.utils.capacity import get_cpu_count, get_mem_in_bytes @@ -62,8 +65,19 @@ class HasPolicyEditsMixin(HasEditsMixin): class InstanceLink(BaseModel): - source = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='+') - target = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='reverse_peers') + class Meta: + ordering = ("id",) + # add constraint for source and target to be unique together + constraints = [ + models.UniqueConstraint( + fields=["source", "target"], + name="unique_source_target", + violation_error_message=_("Field source and target must be unique together."), + ) + ] + + source = models.ForeignKey('Instance', on_delete=models.CASCADE, help_text=_("The source instance of this peer link.")) + target = models.ForeignKey('ReceptorAddress', on_delete=models.CASCADE, help_text=_("The target receptor address of this peer link.")) class States(models.TextChoices): ADDING = 'adding', _('Adding') @@ -74,11 +88,6 @@ class InstanceLink(BaseModel): choices=States.choices, default=States.ADDING, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.") ) - class Meta: - unique_together = ('source', 'target') - ordering = ("id",) - constraints = [models.CheckConstraint(check=~models.Q(source=models.F('target')), name='source_and_target_can_not_be_equal')] - class Instance(HasPolicyEditsMixin, BaseModel): """A model representing an AWX instance running against this database.""" @@ -108,6 +117,7 @@ class Instance(HasPolicyEditsMixin, BaseModel): default="", max_length=50, ) + # Auto-fields, implementation is different from BaseModel created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) @@ -183,16 +193,9 @@ class Instance(HasPolicyEditsMixin, BaseModel): node_state = models.CharField( choices=States.choices, default=States.READY, max_length=16, help_text=_("Indicates the current life cycle stage of this instance.") ) - listener_port = models.PositiveIntegerField( - blank=True, - null=True, - default=None, - validators=[MinValueValidator(1024), MaxValueValidator(65535)], - help_text=_("Port that Receptor will listen for incoming connections on."), - ) - peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'), related_name='peers_from') - peers_from_control_nodes = models.BooleanField(default=False, help_text=_("If True, control plane cluster nodes should automatically peer to it.")) + managed = models.BooleanField(help_text=_("If True, this instance is managed by the control plane."), default=False, editable=False) + peers = models.ManyToManyField('ReceptorAddress', through=InstanceLink, through_fields=('source', 'target'), related_name='peers_from') POLICY_FIELDS = frozenset(('managed_by_policy', 'hostname', 'capacity_adjustment')) @@ -239,6 +242,26 @@ class Instance(HasPolicyEditsMixin, BaseModel): return True return self.health_check_started > self.last_health_check + @property + def canonical_address(self): + return self.receptor_addresses.filter(canonical=True).first() + + @property + def canonical_address_port(self): + # note: don't create a different query for receptor addresses, as this is prefetched on the View for optimization + for addr in self.receptor_addresses.all(): + if addr.canonical: + return addr.port + return None + + @property + def canonical_address_peers_from_control_nodes(self): + # note: don't create a different query for receptor addresses, as this is prefetched on the View for optimization + for addr in self.receptor_addresses.all(): + if addr.canonical: + return addr.peers_from_control_nodes + return False + def get_cleanup_task_kwargs(self, **kwargs): """ Produce options to use for the command: ansible-runner worker cleanup @@ -462,6 +485,9 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin, ResourceMi class Meta: app_label = 'main' + permissions = [('use_instancegroup', 'Can use instance group in a preference list of a resource')] + # Since this has no direct organization field only superuser can add, so remove add permission + default_permissions = ('change', 'delete', 'view') def set_default_policy_fields(self): self.policy_instance_list = [] @@ -499,6 +525,35 @@ def schedule_write_receptor_config(broadcast=True): write_receptor_config() # just run locally +@receiver(post_save, sender=ReceptorAddress) +def receptor_address_saved(sender, instance, **kwargs): + from awx.main.signals import disable_activity_stream + + address = instance + + control_instances = set(Instance.objects.filter(node_type__in=[Instance.Types.CONTROL, Instance.Types.HYBRID])) + if address.peers_from_control_nodes: + # if control_instances is not a subset of current peers of address, then + # that means we need to add some InstanceLinks + if not control_instances <= set(address.peers_from.all()): + with disable_activity_stream(): + for control_instance in control_instances: + InstanceLink.objects.update_or_create(source=control_instance, target=address) + schedule_write_receptor_config() + else: + if address.peers_from.exists(): + with disable_activity_stream(): + address.peers_from.remove(*control_instances) + schedule_write_receptor_config() + + +@receiver(post_delete, sender=ReceptorAddress) +def receptor_address_deleted(sender, instance, **kwargs): + address = instance + if address.peers_from_control_nodes: + schedule_write_receptor_config() + + @receiver(post_save, sender=Instance) def on_instance_saved(sender, instance, created=False, raw=False, **kwargs): ''' @@ -509,11 +564,14 @@ def on_instance_saved(sender, instance, created=False, raw=False, **kwargs): 2. a node changes its value of peers_from_control_nodes 3. a new control node comes online and has instances to peer to ''' + from awx.main.signals import disable_activity_stream + if created and settings.IS_K8S and instance.node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]: - inst = Instance.objects.filter(peers_from_control_nodes=True) - if set(instance.peers.all()) != set(inst): - instance.peers.set(inst) - schedule_write_receptor_config(broadcast=False) + peers_addresses = ReceptorAddress.objects.filter(peers_from_control_nodes=True) + if peers_addresses.exists(): + with disable_activity_stream(): + instance.peers.add(*peers_addresses) + schedule_write_receptor_config(broadcast=False) if settings.IS_K8S and instance.node_type in [Instance.Types.HOP, Instance.Types.EXECUTION]: if instance.node_state == Instance.States.DEPROVISIONING: @@ -522,16 +580,6 @@ def on_instance_saved(sender, instance, created=False, raw=False, **kwargs): # wait for jobs on the node to complete, then delete the # node and kick off write_receptor_config connection.on_commit(lambda: remove_deprovisioned_node.apply_async([instance.hostname])) - else: - control_instances = set(Instance.objects.filter(node_type__in=[Instance.Types.CONTROL, Instance.Types.HYBRID])) - if instance.peers_from_control_nodes: - if (control_instances & set(instance.peers_from.all())) != set(control_instances): - instance.peers_from.add(*control_instances) - schedule_write_receptor_config() # keep method separate to make pytest mocking easier - else: - if set(control_instances) & set(instance.peers_from.all()): - instance.peers_from.remove(*control_instances) - schedule_write_receptor_config() if created or instance.has_policy_changes(): schedule_policy_task() @@ -546,8 +594,6 @@ def on_instance_group_deleted(sender, instance, using, **kwargs): @receiver(post_delete, sender=Instance) def on_instance_deleted(sender, instance, using, **kwargs): schedule_policy_task() - if settings.IS_K8S and instance.node_type in (Instance.Types.EXECUTION, Instance.Types.HOP) and instance.peers_from_control_nodes: - schedule_write_receptor_config() class UnifiedJobTemplateInstanceGroupMembership(models.Model): diff --git a/awx/main/models/inventory.py b/awx/main/models/inventory.py index 7cf7f0710e..e4310f08ff 100644 --- a/awx/main/models/inventory.py +++ b/awx/main/models/inventory.py @@ -25,6 +25,8 @@ from django.db.models import Q # REST Framework from rest_framework.exceptions import ParseError +from ansible_base.lib.utils.models import prevent_search + # AWX from awx.api.versioning import reverse from awx.main.constants import CLOUD_PROVIDERS @@ -35,7 +37,7 @@ from awx.main.fields import ( OrderedManyToManyField, ) from awx.main.managers import HostManager, HostMetricActiveManager -from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json +from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, accepts_json from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate from awx.main.models.mixins import ( @@ -87,6 +89,11 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin): verbose_name_plural = _('inventories') unique_together = [('name', 'organization')] ordering = ('name',) + permissions = [ + ('use_inventory', 'Can use inventory in a job template'), + ('adhoc_inventory', 'Can run ad hoc commands'), + ('update_inventory', 'Can update inventory sources in inventory'), + ] organization = models.ForeignKey( 'Organization', @@ -923,6 +930,7 @@ class InventorySourceOptions(BaseModel): ('rhv', _('Red Hat Virtualization')), ('controller', _('Red Hat Ansible Automation Platform')), ('insights', _('Red Hat Insights')), + ('terraform', _('Terraform State')), ] # From the options of the Django management base command @@ -1397,7 +1405,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin, return selected_groups -class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin): +class CustomInventoryScript(CommonModelNameNotUnique): class Meta: app_label = 'main' ordering = ('name',) @@ -1628,6 +1636,20 @@ class satellite6(PluginFileInjector): return ret +class terraform(PluginFileInjector): + plugin_name = 'terraform_state' + base_injector = 'managed' + namespace = 'cloud' + collection = 'terraform' + use_fqcn = True + + def inventory_as_dict(self, inventory_update, private_data_dir): + env = super(terraform, self).get_plugin_env(inventory_update, private_data_dir, None) + ret = super().inventory_as_dict(inventory_update, private_data_dir) + ret['backend_config_files'] = env["TF_BACKEND_CONFIG_FILE"] + return ret + + class controller(PluginFileInjector): plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection base_injector = 'template' diff --git a/awx/main/models/jobs.py b/awx/main/models/jobs.py index edbc3de0bd..551dd631d9 100644 --- a/awx/main/models/jobs.py +++ b/awx/main/models/jobs.py @@ -20,13 +20,14 @@ from django.core.exceptions import FieldDoesNotExist # REST Framework from rest_framework.exceptions import ParseError +from ansible_base.lib.utils.models import prevent_search + # AWX from awx.api.versioning import reverse from awx.main.constants import HOST_FACTS_FIELDS from awx.main.models.base import ( BaseModel, CreatedModifiedModel, - prevent_search, accepts_json, JOB_TYPE_CHOICES, NEW_JOB_TYPE_CHOICES, @@ -204,6 +205,9 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour class Meta: app_label = 'main' ordering = ('name',) + permissions = [('execute_jobtemplate', 'Can run this job template')] + # Remove add permission, ability to add comes from use permission for inventory, project, credentials + default_permissions = ('change', 'delete', 'view') job_type = models.CharField( max_length=64, diff --git a/awx/main/models/mixins.py b/awx/main/models/mixins.py index 8f7ed9ade3..5df78e15b6 100644 --- a/awx/main/models/mixins.py +++ b/awx/main/models/mixins.py @@ -9,7 +9,6 @@ import requests # Django from django.apps import apps from django.conf import settings -from django.contrib.auth.models import User # noqa from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ValidationError from django.db import models @@ -17,15 +16,17 @@ from django.db.models.query import QuerySet from django.utils.crypto import get_random_string from django.utils.translation import gettext_lazy as _ +from ansible_base.lib.utils.models import prevent_search + # AWX -from awx.main.models.base import prevent_search -from awx.main.models.rbac import Role, RoleAncestorEntry + +from awx.main.models.rbac import Role, RoleAncestorEntry, to_permissions from awx.main.utils import parse_yaml_or_json, get_custom_venv_choices, get_licenser, polymorphic from awx.main.utils.execution_environments import get_default_execution_environment from awx.main.utils.encryption import decrypt_value, get_encryption_key, is_encrypted from awx.main.utils.polymorphic import build_polymorphic_ctypes_map from awx.main.fields import AskForField -from awx.main.constants import ACTIVE_STATES +from awx.main.constants import ACTIVE_STATES, org_role_to_permission logger = logging.getLogger('awx.main.models.mixins') @@ -64,13 +65,24 @@ class ResourceMixin(models.Model): @staticmethod def _accessible_pk_qs(cls, accessor, role_field, content_types=None): + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + if cls._meta.model_name == 'organization' and role_field in org_role_to_permission: + # Organization roles can not use the DAB RBAC shortcuts + # like Organization.access_qs(user, 'change_jobtemplate') is needed + # not just Organization.access_qs(user, 'change') is needed + if accessor.is_superuser: + return cls.objects.values_list('id') + + codename = org_role_to_permission[role_field] + + return cls.access_ids_qs(accessor, codename, content_types=content_types) + return cls.access_ids_qs(accessor, to_permissions[role_field], content_types=content_types) if accessor._meta.model_name == 'user': ancestor_roles = accessor.roles.all() elif type(accessor) == Role: ancestor_roles = [accessor] else: - accessor_type = ContentType.objects.get_for_model(accessor) - ancestor_roles = Role.objects.filter(content_type__pk=accessor_type.id, object_id=accessor.id) + raise RuntimeError(f'Role filters only valid for users and ancestor role, received {accessor}') if content_types is None: ct_kwarg = dict(content_type_id=ContentType.objects.get_for_model(cls).id) @@ -528,7 +540,6 @@ class CustomVirtualEnvMixin(models.Model): class RelatedJobsMixin(object): - """ This method is intended to be overwritten. Called by get_active_jobs() @@ -563,6 +574,7 @@ class WebhookTemplateMixin(models.Model): SERVICES = [ ('github', "GitHub"), ('gitlab', "GitLab"), + ('bitbucket_dc', "BitBucket DataCenter"), ] webhook_service = models.CharField(max_length=16, choices=SERVICES, blank=True, help_text=_('Service that webhook requests will be accepted from')) @@ -623,6 +635,7 @@ class WebhookMixin(models.Model): service_header = { 'github': ('Authorization', 'token {}'), 'gitlab': ('PRIVATE-TOKEN', '{}'), + 'bitbucket_dc': ('Authorization', 'Bearer {}'), } service_statuses = { 'github': { @@ -640,6 +653,14 @@ class WebhookMixin(models.Model): 'error': 'failed', # GitLab doesn't have an 'error' status distinct from 'failed' :( 'canceled': 'canceled', }, + 'bitbucket_dc': { + 'pending': 'INPROGRESS', # Bitbucket DC doesn't have any other statuses distinct from INPROGRESS, SUCCESSFUL, FAILED :( + 'running': 'INPROGRESS', + 'successful': 'SUCCESSFUL', + 'failed': 'FAILED', + 'error': 'FAILED', + 'canceled': 'FAILED', + }, } statuses = service_statuses[self.webhook_service] @@ -648,11 +669,18 @@ class WebhookMixin(models.Model): return try: license_type = get_licenser().validate().get('license_type') - data = { - 'state': statuses[status], - 'context': 'ansible/awx' if license_type == 'open' else 'ansible/tower', - 'target_url': self.get_ui_url(), - } + if self.webhook_service == 'bitbucket_dc': + data = { + 'state': statuses[status], + 'key': 'ansible/awx' if license_type == 'open' else 'ansible/tower', + 'url': self.get_ui_url(), + } + else: + data = { + 'state': statuses[status], + 'context': 'ansible/awx' if license_type == 'open' else 'ansible/tower', + 'target_url': self.get_ui_url(), + } k, v = service_header[self.webhook_service] headers = {k: v.format(self.webhook_credential.get_input('token')), 'Content-Type': 'application/json'} response = requests.post(status_api, data=json.dumps(data), headers=headers, timeout=30) diff --git a/awx/main/models/notifications.py b/awx/main/models/notifications.py index ef8304b6a6..da03a7dd47 100644 --- a/awx/main/models/notifications.py +++ b/awx/main/models/notifications.py @@ -5,6 +5,7 @@ from copy import deepcopy import datetime import logging import json +import traceback from django.db import models from django.conf import settings @@ -15,9 +16,11 @@ from django.utils.encoding import smart_str, force_str from jinja2 import sandbox, ChainableUndefined from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError +from ansible_base.lib.utils.models import prevent_search + # AWX from awx.api.versioning import reverse -from awx.main.models.base import CommonModelNameNotUnique, CreatedModifiedModel, prevent_search +from awx.main.models.base import CommonModelNameNotUnique, CreatedModifiedModel from awx.main.utils import encrypt_field, decrypt_field, set_environ from awx.main.notifications.email_backend import CustomEmailBackend from awx.main.notifications.slack_backend import SlackBackend @@ -482,14 +485,29 @@ class JobNotificationMixin(object): if msg_template: try: msg = env.from_string(msg_template).render(**context) - except (TemplateSyntaxError, UndefinedError, SecurityError): - msg = '' + except (TemplateSyntaxError, UndefinedError, SecurityError) as e: + msg = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))]) if body_template: try: body = env.from_string(body_template).render(**context) - except (TemplateSyntaxError, UndefinedError, SecurityError): - body = '' + except (TemplateSyntaxError, UndefinedError, SecurityError) as e: + body = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))]) + + # https://datatracker.ietf.org/doc/html/rfc2822#section-2.2 + # Body should have at least 2 CRLF, some clients will interpret + # the email incorrectly with blank body. So we will check that + + if len(body.strip().splitlines()) < 1: + # blank body + body = '\r\n'.join( + [ + "The template rendering return a blank body.", + "Please check the template.", + "Refer to https://github.com/ansible/awx/issues/13983", + "for further information.", + ] + ) return (msg, body) diff --git a/awx/main/models/organization.py b/awx/main/models/organization.py index 5e90c51ace..939595ea9e 100644 --- a/awx/main/models/organization.py +++ b/awx/main/models/organization.py @@ -10,6 +10,8 @@ from django.contrib.sessions.models import Session from django.utils.timezone import now as tz_now from django.utils.translation import gettext_lazy as _ +# django-ansible-base +from ansible_base.resource_registry.fields import AnsibleResourceField # AWX from awx.api.versioning import reverse @@ -33,6 +35,12 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi class Meta: app_label = 'main' ordering = ('name',) + permissions = [ + ('member_organization', 'Basic participation permissions for organization'), + ('audit_organization', 'Audit everything inside the organization'), + ] + # Remove add permission, only superuser can add + default_permissions = ('change', 'delete', 'view') instance_groups = OrderedManyToManyField('InstanceGroup', blank=True, through='OrganizationInstanceGroupMembership') galaxy_credentials = OrderedManyToManyField( @@ -103,6 +111,7 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi approval_role = ImplicitRoleField( parent_role='admin_role', ) + resource = AnsibleResourceField(primary_key_field="id") def get_absolute_url(self, request=None): return reverse('api:organization_detail', kwargs={'pk': self.pk}, request=request) @@ -134,6 +143,7 @@ class Team(CommonModelNameNotUnique, ResourceMixin): app_label = 'main' unique_together = [('organization', 'name')] ordering = ('organization__name', 'name') + permissions = [('member_team', 'Inherit all roles assigned to this team')] organization = models.ForeignKey( 'Organization', @@ -151,6 +161,7 @@ class Team(CommonModelNameNotUnique, ResourceMixin): read_role = ImplicitRoleField( parent_role=['organization.auditor_role', 'member_role'], ) + resource = AnsibleResourceField(primary_key_field="id") def get_absolute_url(self, request=None): return reverse('api:team_detail', kwargs={'pk': self.pk}, request=request) diff --git a/awx/main/models/projects.py b/awx/main/models/projects.py index a22973dd62..0a571194b0 100644 --- a/awx/main/models/projects.py +++ b/awx/main/models/projects.py @@ -259,6 +259,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn class Meta: app_label = 'main' ordering = ('id',) + permissions = [('update_project', 'Can run a project update'), ('use_project', 'Can use project in a job template')] default_environment = models.ForeignKey( 'ExecutionEnvironment', diff --git a/awx/main/models/rbac.py b/awx/main/models/rbac.py index a210a56cb1..c3cdeb5f6b 100644 --- a/awx/main/models/rbac.py +++ b/awx/main/models/rbac.py @@ -7,20 +7,31 @@ import threading import contextlib import re +# django-rest-framework +from rest_framework.serializers import ValidationError + # Django from django.db import models, transaction, connection +from django.db.models.signals import m2m_changed +from django.contrib.auth import get_user_model from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.fields import GenericForeignKey from django.utils.translation import gettext_lazy as _ +from django.apps import apps +from django.conf import settings + +# Ansible_base app +from ansible_base.rbac.models import RoleDefinition +from ansible_base.lib.utils.models import get_type_for_model # AWX from awx.api.versioning import reverse -from django.contrib.auth.models import User # noqa +from awx.main.migrations._dab_rbac import build_role_map, get_permissions_for_role +from awx.main.constants import role_name_to_perm_mapping, org_role_to_permission __all__ = [ 'Role', 'batch_role_ancestor_rebuilding', - 'get_roles_on_resource', 'ROLE_SINGLETON_SYSTEM_ADMINISTRATOR', 'ROLE_SINGLETON_SYSTEM_AUDITOR', 'role_summary_fields_generator', @@ -77,6 +88,11 @@ role_descriptions = { } +to_permissions = {} +for k, v in role_name_to_perm_mapping.items(): + to_permissions[k] = v[0].strip('_') + + tls = threading.local() # thread local storage @@ -88,10 +104,8 @@ def check_singleton(func): """ def wrapper(*args, **kwargs): - sys_admin = Role.singleton(ROLE_SINGLETON_SYSTEM_ADMINISTRATOR) - sys_audit = Role.singleton(ROLE_SINGLETON_SYSTEM_AUDITOR) user = args[0] - if user in sys_admin or user in sys_audit: + if user.is_superuser or user.is_system_auditor: if len(args) == 2: return args[1] return Role.objects.all() @@ -171,15 +185,27 @@ class Role(models.Model): def __contains__(self, accessor): if accessor._meta.model_name == 'user': + if accessor.is_superuser: + return True + if self.role_field == 'system_administrator': + return accessor.is_superuser + elif self.role_field == 'system_auditor': + return accessor.is_system_auditor + elif self.role_field in ('read_role', 'auditor_role') and accessor.is_system_auditor: + return True + + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + if self.content_object and self.content_object._meta.model_name == 'organization' and self.role_field in org_role_to_permission: + codename = org_role_to_permission[self.role_field] + + return accessor.has_obj_perm(self.content_object, codename) + + if self.role_field not in to_permissions: + raise Exception(f'{self.role_field} evaluated but not a translatable permission') + return accessor.has_obj_perm(self.content_object, to_permissions[self.role_field]) return self.ancestors.filter(members=accessor).exists() - elif accessor.__class__.__name__ == 'Team': - return self.ancestors.filter(pk=accessor.member_role.id).exists() - elif type(accessor) == Role: - return self.ancestors.filter(pk=accessor.pk).exists() else: - accessor_type = ContentType.objects.get_for_model(accessor) - roles = Role.objects.filter(content_type__pk=accessor_type.id, object_id=accessor.id) - return self.ancestors.filter(pk__in=roles).exists() + raise RuntimeError(f'Role evaluations only valid for users, received {accessor}') @property def name(self): @@ -288,6 +314,9 @@ class Role(models.Model): # # + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + return + if len(additions) == 0 and len(removals) == 0: return @@ -420,6 +449,12 @@ class Role(models.Model): in their organization, but some of those roles descend from organization admin_role, but not auditor_role. """ + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + from ansible_base.rbac.models import RoleEvaluation + + q = RoleEvaluation.objects.filter(role__in=user.has_roles.all()).values_list('object_id', 'content_type_id').query + return roles_qs.extra(where=[f'(object_id,content_type_id) in ({q})']) + return roles_qs.filter( id__in=RoleAncestorEntry.objects.filter( descendent__in=RoleAncestorEntry.objects.filter(ancestor_id__in=list(user.roles.values_list('id', flat=True))).values_list( @@ -442,6 +477,13 @@ class Role(models.Model): return self.singleton_name in [ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR] +class AncestorManager(models.Manager): + def get_queryset(self): + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + raise RuntimeError('The old RBAC system has been disabled, this should never be called') + return super(AncestorManager, self).get_queryset() + + class RoleAncestorEntry(models.Model): class Meta: app_label = 'main' @@ -459,30 +501,7 @@ class RoleAncestorEntry(models.Model): content_type_id = models.PositiveIntegerField(null=False) object_id = models.PositiveIntegerField(null=False) - -def get_roles_on_resource(resource, accessor): - """ - Returns a string list of the roles a accessor has for a given resource. - An accessor can be either a User, Role, or an arbitrary resource that - contains one or more Roles associated with it. - """ - - if type(accessor) == User: - roles = accessor.roles.all() - elif type(accessor) == Role: - roles = [accessor] - else: - accessor_type = ContentType.objects.get_for_model(accessor) - roles = Role.objects.filter(content_type__pk=accessor_type.id, object_id=accessor.id) - - return [ - role_field - for role_field in RoleAncestorEntry.objects.filter( - ancestor__in=roles, content_type_id=ContentType.objects.get_for_model(resource).id, object_id=resource.id - ) - .values_list('role_field', flat=True) - .distinct() - ] + objects = AncestorManager() def role_summary_fields_generator(content_object, role_field): @@ -512,3 +531,168 @@ def role_summary_fields_generator(content_object, role_field): summary['name'] = role_names[role_field] summary['id'] = getattr(content_object, '{}_id'.format(role_field)) return summary + + +# ----------------- Custom Role Compatibility ------------------------- +# The following are methods to connect this (old) RBAC system to the new +# system which allows custom roles +# this follows the ORM interface layer documented in docs/rbac.md +def get_role_codenames(role): + obj = role.content_object + if obj is None: + return + f = obj._meta.get_field(role.role_field) + parents, children = build_role_map(apps) + return [perm.codename for perm in get_permissions_for_role(f, children, apps)] + + +def get_role_definition(role): + """Given a old-style role, this gives a role definition in the new RBAC system for it""" + obj = role.content_object + if obj is None: + return + f = obj._meta.get_field(role.role_field) + action_name = f.name.rsplit("_", 1)[0] + rd_name = f'{type(obj).__name__} {action_name.title()} Compat' + perm_list = get_role_codenames(role) + defaults = {'content_type_id': role.content_type_id} + try: + rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults) + except ValidationError: + # This is a tricky case - practically speaking, users should not be allowed to create team roles + # or roles that include the team member permission. + # If we need to create this for compatibility purposes then we will create it as a managed non-editable role + defaults['managed'] = True + rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults) + return rd + + +def get_role_from_object_role(object_role): + """ + Given an object role from the new system, return the corresponding role from the old system + reverses naming from get_role_definition, and the ANSIBLE_BASE_ROLE_PRECREATE setting. + """ + rd = object_role.role_definition + if rd.name.endswith(' Compat'): + model_name, role_name, _ = rd.name.split() + role_name = role_name.lower() + role_name += '_role' + elif rd.name.endswith(' Admin') and rd.name.count(' ') == 2: + # cases like "Organization Project Admin" + model_name, target_model_name, role_name = rd.name.split() + role_name = role_name.lower() + model_cls = apps.get_model('main', target_model_name) + target_model_name = get_type_for_model(model_cls) + if target_model_name == 'notification_template': + target_model_name = 'notification' # total exception + role_name = f'{target_model_name}_admin_role' + elif rd.name.endswith(' Admin'): + # cases like "project-admin" + role_name = 'admin_role' + else: + print(rd.name) + model_name, role_name = rd.name.split() + role_name = role_name.lower() + role_name += '_role' + return getattr(object_role.content_object, role_name) + + +def give_or_remove_permission(role, actor, giving=True): + obj = role.content_object + if obj is None: + return + rd = get_role_definition(role) + rd.give_or_remove_permission(actor, obj, giving=giving) + + +class SyncEnabled(threading.local): + def __init__(self): + self.enabled = True + + +rbac_sync_enabled = SyncEnabled() + + +@contextlib.contextmanager +def disable_rbac_sync(): + try: + previous_value = rbac_sync_enabled.enabled + rbac_sync_enabled.enabled = False + yield + finally: + rbac_sync_enabled.enabled = previous_value + + +def give_creator_permissions(user, obj): + assignment = RoleDefinition.objects.give_creator_permissions(user, obj) + if assignment: + with disable_rbac_sync(): + old_role = get_role_from_object_role(assignment.object_role) + old_role.members.add(user) + + +def sync_members_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs): + if action.startswith('pre_'): + return + if not rbac_sync_enabled.enabled: + return + + if action == 'post_add': + is_giving = True + elif action == 'post_remove': + is_giving = False + elif action == 'post_clear': + raise RuntimeError('Clearing of role members not supported') + + if reverse: + user = instance + else: + role = instance + + for user_or_role_id in pk_set: + if reverse: + role = Role.objects.get(pk=user_or_role_id) + else: + user = get_user_model().objects.get(pk=user_or_role_id) + give_or_remove_permission(role, user, giving=is_giving) + + +def sync_parents_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs): + if action.startswith('pre_'): + return + + if action == 'post_add': + is_giving = True + elif action == 'post_remove': + is_giving = False + elif action == 'post_clear': + raise RuntimeError('Clearing of role members not supported') + + if reverse: + parent_role = instance + else: + child_role = instance + + for role_id in pk_set: + if reverse: + child_role = Role.objects.get(id=role_id) + else: + parent_role = Role.objects.get(id=role_id) + + # To a fault, we want to avoid running this if triggered from implicit_parents management + # we only want to do anything if we know for sure this is a non-implicit team role + if parent_role.role_field == 'member_role' and parent_role.content_type.model == 'team': + # Team internal parents are member_role->read_role and admin_role->member_role + # for the same object, this parenting will also be implicit_parents management + # do nothing for internal parents, but OTHER teams may still be assigned permissions to a team + if (child_role.content_type_id == parent_role.content_type_id) and (child_role.object_id == parent_role.object_id): + return + + from awx.main.models.organization import Team + + team = Team.objects.get(pk=parent_role.object_id) + give_or_remove_permission(child_role, team, giving=is_giving) + + +m2m_changed.connect(sync_members_to_new_rbac, Role.members.through) +m2m_changed.connect(sync_parents_to_new_rbac, Role.parents.through) diff --git a/awx/main/models/receptor_address.py b/awx/main/models/receptor_address.py new file mode 100644 index 0000000000..1fa66bd647 --- /dev/null +++ b/awx/main/models/receptor_address.py @@ -0,0 +1,67 @@ +from django.db import models +from django.core.validators import MinValueValidator, MaxValueValidator +from django.utils.translation import gettext_lazy as _ +from awx.api.versioning import reverse + + +class Protocols(models.TextChoices): + TCP = 'tcp', 'TCP' + WS = 'ws', 'WS' + WSS = 'wss', 'WSS' + + +class ReceptorAddress(models.Model): + class Meta: + app_label = 'main' + constraints = [ + models.UniqueConstraint( + fields=["address"], + name="unique_receptor_address", + violation_error_message=_("Receptor address must be unique."), + ) + ] + + address = models.CharField(help_text=_("Routable address for this instance."), max_length=255) + port = models.IntegerField(help_text=_("Port for the address."), default=27199, validators=[MinValueValidator(0), MaxValueValidator(65535)]) + websocket_path = models.CharField(help_text=_("Websocket path."), max_length=255, default="", blank=True) + protocol = models.CharField( + help_text=_("Protocol to use for the Receptor listener, 'tcp', 'wss', or 'ws'."), max_length=10, default=Protocols.TCP, choices=Protocols.choices + ) + is_internal = models.BooleanField(help_text=_("If True, only routable within the Kubernetes cluster."), default=False) + canonical = models.BooleanField(help_text=_("If True, this address is the canonical address for the instance."), default=False) + peers_from_control_nodes = models.BooleanField(help_text=_("If True, control plane cluster nodes should automatically peer to it."), default=False) + instance = models.ForeignKey( + 'Instance', + related_name='receptor_addresses', + on_delete=models.CASCADE, + null=False, + ) + + def __str__(self): + return self.get_full_address() + + def get_full_address(self): + scheme = "" + path = "" + port = "" + if self.protocol == "ws": + scheme = "wss://" + + if self.protocol == "ws" and self.websocket_path: + path = f"/{self.websocket_path}" + + if self.port: + port = f":{self.port}" + + return f"{scheme}{self.address}{port}{path}" + + def get_peer_type(self): + if self.protocol == 'tcp': + return 'tcp-peer' + elif self.protocol in ['ws', 'wss']: + return 'ws-peer' + else: + return None + + def get_absolute_url(self, request=None): + return reverse('api:receptor_address_detail', kwargs={'pk': self.pk}, request=request) diff --git a/awx/main/models/unified_jobs.py b/awx/main/models/unified_jobs.py index 6ba605c0d4..305ca29073 100644 --- a/awx/main/models/unified_jobs.py +++ b/awx/main/models/unified_jobs.py @@ -30,19 +30,21 @@ from rest_framework.exceptions import ParseError # Django-Polymorphic from polymorphic.models import PolymorphicModel +from ansible_base.lib.utils.models import prevent_search, get_type_for_model + # AWX -from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel, prevent_search +from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel from awx.main.dispatch import get_task_queuename from awx.main.dispatch.control import Control as ControlDispatcher from awx.main.registrar import activity_stream_registrar -from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin +from awx.main.models.mixins import TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin +from awx.main.models.rbac import to_permissions from awx.main.utils.common import ( camelcase_to_underscore, get_model_for_type, _inventory_updates, copy_model_by_class, copy_m2m_relationships, - get_type_for_model, parse_yaml_or_json, getattr_dne, ScheduleDependencyManager, @@ -209,7 +211,15 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn # do not use this if in a subclass if cls != UnifiedJobTemplate: return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field) - return ResourceMixin._accessible_pk_qs(cls, accessor, role_field, content_types=cls._submodels_with_roles()) + from ansible_base.rbac.models import RoleEvaluation + + action = to_permissions[role_field] + + return ( + RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__startswith=action, content_type_id__in=cls._submodels_with_roles()) + .values_list('object_id') + .distinct() + ) def _perform_unique_checks(self, unique_checks): # Handle the list of unique fields returned above. Replace with an @@ -1598,7 +1608,8 @@ class UnifiedJob( extra["controller_node"] = self.controller_node or "NOT_SET" elif state == "execution_node_chosen": extra["execution_node"] = self.execution_node or "NOT_SET" - logger_job_lifecycle.info(msg, extra=extra) + + logger_job_lifecycle.info(f"{msg} {json.dumps(extra)}") @property def launched_by(self): diff --git a/awx/main/models/workflow.py b/awx/main/models/workflow.py index 3038db2967..0451daf5bd 100644 --- a/awx/main/models/workflow.py +++ b/awx/main/models/workflow.py @@ -23,9 +23,11 @@ from crum import get_current_user from jinja2 import sandbox from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError +from ansible_base.lib.utils.models import prevent_search + # AWX from awx.api.versioning import reverse -from awx.main.models import prevent_search, accepts_json, UnifiedJobTemplate, UnifiedJob +from awx.main.models import accepts_json, UnifiedJobTemplate, UnifiedJob from awx.main.models.notifications import NotificationTemplate, JobNotificationMixin from awx.main.models.base import CreatedModifiedModel, VarsDictProperty from awx.main.models.rbac import ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR @@ -465,6 +467,10 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl class Meta: app_label = 'main' + permissions = [ + ('execute_workflowjobtemplate', 'Can run this workflow job template'), + ('approve_workflowjobtemplate', 'Can approve steps in this workflow job template'), + ] notification_templates_approvals = models.ManyToManyField( "NotificationTemplate", diff --git a/awx/main/notifications/custom_notification_base.py b/awx/main/notifications/custom_notification_base.py index f3a9e1e3c8..22d04f6511 100644 --- a/awx/main/notifications/custom_notification_base.py +++ b/awx/main/notifications/custom_notification_base.py @@ -1,5 +1,6 @@ # Copyright (c) 2019 Ansible, Inc. # All Rights Reserved. +# -*-coding:utf-8-*- class CustomNotificationBase(object): diff --git a/awx/main/notifications/twilio_backend.py b/awx/main/notifications/twilio_backend.py index 420c5ef74d..34fbff53d4 100644 --- a/awx/main/notifications/twilio_backend.py +++ b/awx/main/notifications/twilio_backend.py @@ -39,11 +39,15 @@ class TwilioBackend(AWXBaseEmailBackend, CustomNotificationBase): logger.error(smart_str(_("Exception connecting to Twilio: {}").format(e))) for m in messages: - try: - connection.messages.create(to=m.to, from_=m.from_email, body=m.subject) - sent_messages += 1 - except Exception as e: - logger.error(smart_str(_("Exception sending messages: {}").format(e))) - if not self.fail_silently: - raise + failed = False + for dest in m.to: + try: + logger.debug(smart_str(_("FROM: {} / TO: {}").format(m.from_email, dest))) + connection.messages.create(to=dest, from_=m.from_email, body=m.subject) + sent_messages += 1 + except Exception as e: + logger.error(smart_str(_("Exception sending messages: {}").format(e))) + failed = True + if not self.fail_silently and failed: + raise return sent_messages diff --git a/awx/main/routing.py b/awx/main/routing.py index 9625b23176..bf2c06cfe3 100644 --- a/awx/main/routing.py +++ b/awx/main/routing.py @@ -4,13 +4,15 @@ import logging from django.conf import settings from django.urls import re_path -from channels.auth import AuthMiddlewareStack from channels.routing import ProtocolTypeRouter, URLRouter +from ansible_base.lib.channels.middleware import DrfAuthMiddlewareStack + from . import consumers logger = logging.getLogger('awx.main.routing') +_application = None class AWXProtocolTypeRouter(ProtocolTypeRouter): @@ -26,13 +28,91 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter): super().__init__(*args, **kwargs) +class MultipleURLRouterAdapter: + """ + Django channels doesn't nicely support Auth_1(urls_1), Auth_2(urls_2), ..., Auth_n(urls_n) + This class allows assocating a websocket url with an auth + Ordering matters. The first matching url will be used. + """ + + def __init__(self, *auths): + self._auths = [a for a in auths] + + async def __call__(self, scope, receive, send): + """ + Loop through the list of passed in URLRouter's (they may or may not be wrapped by auth). + We know we have exhausted the list of URLRouter patterns when we get a + ValueError('No route found for path %s'). When that happens, move onto the next + URLRouter. + If the final URLRouter raises an error, re-raise it in the end. + + We know that we found a match when no error is raised, end the loop. + """ + last_index = len(self._auths) - 1 + for i, auth in enumerate(self._auths): + try: + return await auth.__call__(scope, receive, send) + except ValueError as e: + if str(e).startswith('No route found for path'): + # Only surface the error if on the last URLRouter + if i == last_index: + raise + + websocket_urlpatterns = [ + re_path(r'api/websocket/$', consumers.EventConsumer.as_asgi()), re_path(r'websocket/$', consumers.EventConsumer.as_asgi()), +] +websocket_relay_urlpatterns = [ re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()), ] -application = AWXProtocolTypeRouter( - { - 'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)), - } -) + +def application_func(cls=AWXProtocolTypeRouter) -> ProtocolTypeRouter: + return cls( + { + 'websocket': MultipleURLRouterAdapter( + URLRouter(websocket_relay_urlpatterns), + DrfAuthMiddlewareStack(URLRouter(websocket_urlpatterns)), + ) + } + ) + + +def __getattr__(name: str) -> ProtocolTypeRouter: + """ + Defer instantiating application. + For testing, we just need it to NOT run on import. + + https://peps.python.org/pep-0562/#specification + + Normally, someone would get application from this module via: + from awx.main.routing import application + + and do something with the application: + application.do_something() + + What does the callstack look like when the import runs? + ... + awx.main.routing.__getattribute__(...) # <-- we don't define this so NOOP as far as we are concerned + if '__getattr__' in awx.main.routing.__dict__: # <-- this triggers the function we are in + return awx.main.routing.__dict__.__getattr__("application") + + Why isn't this function simply implemented as: + def __getattr__(name): + if not _application: + _application = application_func() + return _application + + It could. I manually tested it and it passes test_routing.py. + + But my understanding after reading the PEP-0562 specification link above is that + performance would be a bit worse due to the extra __getattribute__ calls when + we reference non-global variables. + """ + if name == "application": + globs = globals() + if not globs['_application']: + globs['_application'] = application_func() + return globs['_application'] + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/awx/main/scheduler/task_manager.py b/awx/main/scheduler/task_manager.py index 441c4e921b..7e72d4cb8e 100644 --- a/awx/main/scheduler/task_manager.py +++ b/awx/main/scheduler/task_manager.py @@ -17,6 +17,8 @@ from django.utils.timezone import now as tz_now from django.conf import settings from django.contrib.contenttypes.models import ContentType +from ansible_base.lib.utils.models import get_type_for_model + # AWX from awx.main.dispatch.reaper import reap_job from awx.main.models import ( @@ -34,7 +36,6 @@ from awx.main.models import ( from awx.main.scheduler.dag_workflow import WorkflowDAG from awx.main.utils.pglock import advisory_lock from awx.main.utils import ( - get_type_for_model, ScheduleTaskManager, ScheduleWorkflowManager, ) @@ -67,7 +68,7 @@ class TaskBase: # initialize each metric to 0 and force metric_has_changed to true. This # ensures each task manager metric will be overridden when pipe_execute # is called later. - self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False) + self.subsystem_metrics = s_metrics.DispatcherMetrics(auto_pipe_execute=False) self.start_time = time.time() # We want to avoid calling settings in loops, so cache these settings at init time @@ -104,7 +105,7 @@ class TaskBase: try: # increment task_manager_schedule_calls regardless if the other # metrics are recorded - s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1) + s_metrics.DispatcherMetrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1) # Only record metrics if the last time recording was more # than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago. # Prevents a short-duration task manager that runs directly after a diff --git a/awx/main/signals.py b/awx/main/signals.py index 58ab17c95e..e2fb00a907 100644 --- a/awx/main/signals.py +++ b/awx/main/signals.py @@ -126,6 +126,8 @@ def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwarg def sync_superuser_status_to_rbac(instance, **kwargs): 'When the is_superuser flag is changed on a user, reflect that in the membership of the System Admnistrator role' + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + return update_fields = kwargs.get('update_fields', None) if update_fields and 'is_superuser' not in update_fields: return @@ -137,6 +139,8 @@ def sync_superuser_status_to_rbac(instance, **kwargs): def sync_rbac_to_superuser_status(instance, sender, **kwargs): 'When the is_superuser flag is false but a user has the System Admin role, update the database to reflect that' + if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: + return if kwargs['action'] in ['post_add', 'post_remove', 'post_clear']: new_status_value = bool(kwargs['action'] == 'post_add') if hasattr(instance, 'singleton_name'): # duck typing, role.members.add() vs user.roles.add() diff --git a/awx/main/tasks/callback.py b/awx/main/tasks/callback.py index dd172efd83..069bc408c9 100644 --- a/awx/main/tasks/callback.py +++ b/awx/main/tasks/callback.py @@ -29,7 +29,7 @@ class RunnerCallback: self.safe_env = {} self.event_ct = 0 self.model = model - self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5) + self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5) self.wrapup_event_dispatched = False self.artifacts_processed = False self.extra_update_fields = {} @@ -95,17 +95,17 @@ class RunnerCallback: if self.parent_workflow_job_id: event_data['workflow_job_id'] = self.parent_workflow_job_id event_data['job_created'] = self.job_created - if self.host_map: - host = event_data.get('event_data', {}).get('host', '').strip() - if host: - event_data['host_name'] = host - if host in self.host_map: - event_data['host_id'] = self.host_map[host] - else: - event_data['host_name'] = '' - event_data['host_id'] = '' - if event_data.get('event') == 'playbook_on_stats': - event_data['host_map'] = self.host_map + + host = event_data.get('event_data', {}).get('host', '').strip() + if host: + event_data['host_name'] = host + if host in self.host_map: + event_data['host_id'] = self.host_map[host] + else: + event_data['host_name'] = '' + event_data['host_id'] = '' + if event_data.get('event') == 'playbook_on_stats': + event_data['host_map'] = self.host_map if isinstance(self, RunnerCallbackForProjectUpdate): # need a better way to have this check. diff --git a/awx/main/tasks/jobs.py b/awx/main/tasks/jobs.py index d244baa534..0a9e7f5975 100644 --- a/awx/main/tasks/jobs.py +++ b/awx/main/tasks/jobs.py @@ -114,7 +114,7 @@ class BaseTask(object): def __init__(self): self.cleanup_paths = [] - self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5) + self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5) self.runner_callback = self.callback_class(model=self.model) def update_model(self, pk, _attempt=0, **updates): diff --git a/awx/main/tasks/receptor.py b/awx/main/tasks/receptor.py index 7cd79c85e7..121b5502d9 100644 --- a/awx/main/tasks/receptor.py +++ b/awx/main/tasks/receptor.py @@ -27,7 +27,7 @@ from awx.main.utils.common import ( ) from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER from awx.main.tasks.signals import signal_state, signal_callback, SignalExit -from awx.main.models import Instance, InstanceLink, UnifiedJob +from awx.main.models import Instance, InstanceLink, UnifiedJob, ReceptorAddress from awx.main.dispatch import get_task_queuename from awx.main.dispatch.publish import task from awx.main.utils.pglock import advisory_lock @@ -49,6 +49,70 @@ class ReceptorConnectionType(Enum): STREAMTLS = 2 +""" +Translate receptorctl messages that come in over stdout into +structured messages. Currently, these are error messages. +""" + + +class ReceptorErrorBase: + _MESSAGE = 'Receptor Error' + + def __init__(self, node: str = 'N/A', state_name: str = 'N/A'): + self.node = node + self.state_name = state_name + + def __str__(self): + return f"{self.__class__.__name__} '{self._MESSAGE}' on node '{self.node}' with state '{self.state_name}'" + + +class WorkUnitError(ReceptorErrorBase): + _MESSAGE = 'unknown work unit ' + + def __init__(self, work_unit_id: str, *args, **kwargs): + super().__init__(*args, **kwargs) + self.work_unit_id = work_unit_id + + def __str__(self): + return f"{super().__str__()} work unit id '{self.work_unit_id}'" + + +class WorkUnitCancelError(WorkUnitError): + _MESSAGE = 'error cancelling remote unit: unknown work unit ' + + +class WorkUnitResultsError(WorkUnitError): + _MESSAGE = 'Failed to get results: unknown work unit ' + + +class UnknownError(ReceptorErrorBase): + _MESSAGE = 'Unknown receptor ctl error' + + def __init__(self, msg, *args, **kwargs): + super().__init__(*args, **kwargs) + self._MESSAGE = msg + + +class FuzzyError: + def __new__(self, e: RuntimeError, node: str, state_name: str): + """ + At the time of writing this comment all of the sub-classes detection + is centralized in this parent class. It's like a Router(). + Someone may find it better to push down the error detection logic into + each sub-class. + """ + msg = e.args[0] + + common_startswith = (WorkUnitCancelError, WorkUnitResultsError, WorkUnitError) + + for klass in common_startswith: + if msg.startswith(klass._MESSAGE): + work_unit_id = msg[len(klass._MESSAGE) :] + return klass(work_unit_id, node=node, state_name=state_name) + + return UnknownError(msg, node=node, state_name=state_name) + + def read_receptor_config(): # for K8S deployments, getting a lock is necessary as another process # may be re-writing the config at this time @@ -185,6 +249,7 @@ def run_until_complete(node, timing_data=None, **kwargs): timing_data['transmit_timing'] = run_start - transmit_start run_timing = 0.0 stdout = '' + state_name = 'local var never set' try: resultfile = receptor_ctl.get_work_results(unit_id) @@ -205,13 +270,33 @@ def run_until_complete(node, timing_data=None, **kwargs): stdout = resultfile.read() stdout = str(stdout, encoding='utf-8') + except RuntimeError as e: + receptor_e = FuzzyError(e, node, state_name) + if type(receptor_e) in ( + WorkUnitError, + WorkUnitResultsError, + ): + logger.warning(f'While consuming job results: {receptor_e}') + else: + raise finally: if settings.RECEPTOR_RELEASE_WORK: - res = receptor_ctl.simple_command(f"work release {unit_id}") - if res != {'released': unit_id}: - logger.warning(f'Could not confirm release of receptor work unit id {unit_id} from {node}, data: {res}') + try: + res = receptor_ctl.simple_command(f"work release {unit_id}") - receptor_ctl.close() + if res != {'released': unit_id}: + logger.warning(f'Could not confirm release of receptor work unit id {unit_id} from {node}, data: {res}') + + receptor_ctl.close() + except RuntimeError as e: + receptor_e = FuzzyError(e, node, state_name) + if type(receptor_e) in ( + WorkUnitError, + WorkUnitCancelError, + ): + logger.warning(f"While releasing work: {receptor_e}") + else: + logger.error(f"While releasing work: {receptor_e}") if state_name.lower() == 'failed': work_detail = status.get('Detail', '') @@ -275,7 +360,7 @@ def _convert_args_to_cli(vargs): args = ['cleanup'] for option in ('exclude_strings', 'remove_images'): if vargs.get(option): - args.append('--{}={}'.format(option.replace('_', '-'), ' '.join(vargs.get(option)))) + args.append('--{}="{}"'.format(option.replace('_', '-'), ' '.join(vargs.get(option)))) for option in ('file_pattern', 'image_prune', 'process_isolation_executable', 'grace_period'): if vargs.get(option) is True: args.append('--{}'.format(option.replace('_', '-'))) @@ -676,36 +761,44 @@ RECEPTOR_CONFIG_STARTER = ( ) -def should_update_config(instances): +def should_update_config(new_config): ''' checks that the list of instances matches the list of tcp-peers in the config ''' - current_config = read_receptor_config() # this gets receptor conf lock - current_peers = [] - for config_entry in current_config: - for key, value in config_entry.items(): - if key.endswith('-peer'): - current_peers.append(value['address']) - intended_peers = [f"{i.hostname}:{i.listener_port}" for i in instances] - logger.debug(f"Peers current {current_peers} intended {intended_peers}") - if set(current_peers) == set(intended_peers): - return False # config file is already update to date - return True + current_config = read_receptor_config() # this gets receptor conf lock + for config_entry in current_config: + if config_entry not in new_config: + logger.warning(f"{config_entry} should not be in receptor config. Updating.") + return True + for config_entry in new_config: + if config_entry not in current_config: + logger.warning(f"{config_entry} missing from receptor config. Updating.") + return True + + return False def generate_config_data(): # returns two values # receptor config - based on current database peers # should_update - If True, receptor_config differs from the receptor conf file on disk - instances = Instance.objects.filter(node_type__in=(Instance.Types.EXECUTION, Instance.Types.HOP), peers_from_control_nodes=True) + addresses = ReceptorAddress.objects.filter(peers_from_control_nodes=True) receptor_config = list(RECEPTOR_CONFIG_STARTER) - for instance in instances: - peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}} - receptor_config.append(peer) - should_update = should_update_config(instances) + for address in addresses: + if address.get_peer_type(): + peer = { + f'{address.get_peer_type()}': { + 'address': f'{address.get_full_address()}', + 'tls': 'tlsclient', + } + } + receptor_config.append(peer) + else: + logger.warning(f"Receptor address {address} has unsupported peer type, skipping.") + should_update = should_update_config(receptor_config) return receptor_config, should_update @@ -747,14 +840,13 @@ def write_receptor_config(): with lock: with open(__RECEPTOR_CONF, 'w') as file: yaml.dump(receptor_config, file, default_flow_style=False) - reload_receptor() @task(queue=get_task_queuename) def remove_deprovisioned_node(hostname): InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING) - InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING) + InstanceLink.objects.filter(target__instance__hostname=hostname).update(link_state=InstanceLink.States.REMOVING) node_jobs = UnifiedJob.objects.filter( execution_node=hostname, diff --git a/awx/main/tasks/system.py b/awx/main/tasks/system.py index b32ae7b5e5..bca9d16c05 100644 --- a/awx/main/tasks/system.py +++ b/awx/main/tasks/system.py @@ -6,6 +6,7 @@ import itertools import json import logging import os +import psycopg from io import StringIO from contextlib import redirect_stdout import shutil @@ -62,7 +63,7 @@ from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanu from awx.main.consumers import emit_channel_notification from awx.main import analytics from awx.conf import settings_registry -from awx.main.analytics.subsystem_metrics import Metrics +from awx.main.analytics.subsystem_metrics import DispatcherMetrics from rest_framework.exceptions import PermissionDenied @@ -113,7 +114,7 @@ def dispatch_startup(): cluster_node_heartbeat() reaper.startup_reaping() reaper.reap_waiting(grace_period=0) - m = Metrics() + m = DispatcherMetrics() m.reset_values() @@ -416,7 +417,7 @@ def handle_removed_image(remove_images=None): @task(queue=get_task_queuename) def cleanup_images_and_files(): - _cleanup_images_and_files() + _cleanup_images_and_files(image_prune=True) @task(queue=get_task_queuename) @@ -495,7 +496,7 @@ def inspect_established_receptor_connections(mesh_status): update_links = [] for link in all_links: if link.link_state != InstanceLink.States.REMOVING: - if link.target.hostname in active_receptor_conns.get(link.source.hostname, {}): + if link.target.instance.hostname in active_receptor_conns.get(link.source.hostname, {}): if link.link_state is not InstanceLink.States.ESTABLISHED: link.link_state = InstanceLink.States.ESTABLISHED update_links.append(link) @@ -630,10 +631,18 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None): logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen)) except DatabaseError as e: - if 'did not affect any rows' in str(e): - logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname)) + cause = e.__cause__ + if cause and hasattr(cause, 'sqlstate'): + sqlstate = cause.sqlstate + sqlstate_str = psycopg.errors.lookup(sqlstate) + logger.debug('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str)) + + if sqlstate == psycopg.errors.NoData: + logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname)) + else: + logger.exception("Error marking {} as lost.".format(other_inst.hostname)) else: - logger.exception('Error marking {} as lost'.format(other_inst.hostname)) + logger.exception('No SQL state available. Error marking {} as lost'.format(other_inst.hostname)) # Run local reaper if worker_tasks is not None: @@ -788,10 +797,19 @@ def update_inventory_computed_fields(inventory_id): try: i.update_computed_fields() except DatabaseError as e: - if 'did not affect any rows' in str(e): - logger.debug('Exiting duplicate update_inventory_computed_fields task.') - return - raise + # https://github.com/django/django/blob/eff21d8e7a1cb297aedf1c702668b590a1b618f3/django/db/models/base.py#L1105 + # django raises DatabaseError("Forced update did not affect any rows.") + + # if sqlstate is set then there was a database error and otherwise will re-raise that error + cause = e.__cause__ + if cause and hasattr(cause, 'sqlstate'): + sqlstate = cause.sqlstate + sqlstate_str = psycopg.errors.lookup(sqlstate) + logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str)) + raise + + # otherwise + logger.debug('Exiting duplicate update_inventory_computed_fields task.') def update_smart_memberships_for_inventory(smart_inventory): diff --git a/awx/main/tests/data/ansible_utils/playbooks/valid/hello_world.yml b/awx/main/tests/data/ansible_utils/playbooks/valid/hello_world.yml index 80d56debc4..7aff8dbf9e 100644 --- a/awx/main/tests/data/ansible_utils/playbooks/valid/hello_world.yml +++ b/awx/main/tests/data/ansible_utils/playbooks/valid/hello_world.yml @@ -3,5 +3,5 @@ hosts: all tasks: - name: Hello Message - debug: + ansible.builtin.debug: msg: "Hello World!" diff --git a/awx/main/tests/data/inventory/plugins/terraform/env.json b/awx/main/tests/data/inventory/plugins/terraform/env.json new file mode 100644 index 0000000000..c68086c8d7 --- /dev/null +++ b/awx/main/tests/data/inventory/plugins/terraform/env.json @@ -0,0 +1,3 @@ +{ + "TF_BACKEND_CONFIG_FILE": "{{ file_reference }}" +} diff --git a/awx/main/tests/docs/conftest.py b/awx/main/tests/docs/conftest.py index bd0cf1c99f..7ec4273627 100644 --- a/awx/main/tests/docs/conftest.py +++ b/awx/main/tests/docs/conftest.py @@ -1,13 +1,8 @@ from awx.main.tests.functional.conftest import * # noqa +import os +import pytest -def pytest_addoption(parser): - parser.addoption("--release", action="store", help="a release version number, e.g., 3.3.0") - - -def pytest_generate_tests(metafunc): - # This is called for every test. Only get/set command line arguments - # if the argument is specified in the list of test "fixturenames". - option_value = metafunc.config.option.release - if 'release' in metafunc.fixturenames and option_value is not None: - metafunc.parametrize("release", [option_value]) +@pytest.fixture() +def release(): + return os.environ.get('VERSION_TARGET', '') diff --git a/awx/main/tests/docs/test_swagger_generation.py b/awx/main/tests/docs/test_swagger_generation.py index b480e4562e..f05435c1e3 100644 --- a/awx/main/tests/docs/test_swagger_generation.py +++ b/awx/main/tests/docs/test_swagger_generation.py @@ -99,7 +99,7 @@ class TestSwaggerGeneration: # The number of API endpoints changes over time, but let's just check # for a reasonable number here; if this test starts failing, raise/lower the bounds paths = JSON['paths'] - assert 250 < len(paths) < 375 + assert 250 < len(paths) < 400 assert set(list(paths['/api/'].keys())) == set(['get', 'parameters']) assert set(list(paths['/api/v2/'].keys())) == set(['get', 'parameters']) assert set(list(sorted(paths['/api/v2/credentials/'].keys()))) == set(['get', 'post', 'parameters']) diff --git a/awx/main/tests/functional/analytics/test_metrics.py b/awx/main/tests/functional/analytics/test_metrics.py index 6192d4e9bd..4295da0a6e 100644 --- a/awx/main/tests/functional/analytics/test_metrics.py +++ b/awx/main/tests/functional/analytics/test_metrics.py @@ -4,7 +4,6 @@ from prometheus_client.parser import text_string_to_metric_families from awx.main import models from awx.main.analytics.metrics import metrics from awx.api.versioning import reverse -from awx.main.models.rbac import Role EXPECTED_VALUES = { 'awx_system_info': 1.0, @@ -66,7 +65,6 @@ def test_metrics_permissions(get, admin, org_admin, alice, bob, organization): organization.auditor_role.members.add(bob) assert get(get_metrics_view_db_only(), user=bob).status_code == 403 - Role.singleton('system_auditor').members.add(bob) bob.is_system_auditor = True assert get(get_metrics_view_db_only(), user=bob).status_code == 200 diff --git a/awx/main/tests/functional/api/test_auth.py b/awx/main/tests/functional/api/test_auth.py index d9ac588de3..7ecfe9de95 100644 --- a/awx/main/tests/functional/api/test_auth.py +++ b/awx/main/tests/functional/api/test_auth.py @@ -6,7 +6,7 @@ from django.test import Client from rest_framework.test import APIRequestFactory from awx.api.generics import LoggedLoginView -from awx.api.versioning import drf_reverse +from rest_framework.reverse import reverse as drf_reverse @pytest.mark.django_db diff --git a/awx/main/tests/functional/api/test_credential.py b/awx/main/tests/functional/api/test_credential.py index 0cfac1506e..f958894702 100644 --- a/awx/main/tests/functional/api/test_credential.py +++ b/awx/main/tests/functional/api/test_credential.py @@ -385,10 +385,9 @@ def test_list_created_org_credentials(post, get, organization, org_admin, org_me @pytest.mark.django_db def test_list_cannot_order_by_encrypted_field(post, get, organization, org_admin, credentialtype_ssh, order_by): for i, password in enumerate(('abc', 'def', 'xyz')): - response = post(reverse('api:credential_list'), {'organization': organization.id, 'name': 'C%d' % i, 'password': password}, org_admin) + post(reverse('api:credential_list'), {'organization': organization.id, 'name': 'C%d' % i, 'password': password}, org_admin, expect=400) - response = get(reverse('api:credential_list'), org_admin, QUERY_STRING='order_by=%s' % order_by, status=400) - assert response.status_code == 400 + get(reverse('api:credential_list'), org_admin, QUERY_STRING='order_by=%s' % order_by, expect=400) @pytest.mark.django_db @@ -399,8 +398,7 @@ def test_inputs_cannot_contain_extra_fields(get, post, organization, admin, cred 'credential_type': credentialtype_ssh.pk, 'inputs': {'invalid_field': 'foo'}, } - response = post(reverse('api:credential_list'), params, admin) - assert response.status_code == 400 + response = post(reverse('api:credential_list'), params, admin, expect=400) assert "'invalid_field' was unexpected" in response.data['inputs'][0] diff --git a/awx/main/tests/functional/api/test_instance_peers.py b/awx/main/tests/functional/api/test_instance_peers.py index 93af6bf5a2..1ce6f843bd 100644 --- a/awx/main/tests/functional/api/test_instance_peers.py +++ b/awx/main/tests/functional/api/test_instance_peers.py @@ -1,19 +1,16 @@ import pytest import yaml -import itertools from unittest import mock -from django.db.utils import IntegrityError - from awx.api.versioning import reverse -from awx.main.models import Instance +from awx.main.models import Instance, ReceptorAddress from awx.api.views.instance_install_bundle import generate_group_vars_all_yml def has_peer(group_vars, peer): peers = group_vars.get('receptor_peers', []) for p in peers: - if f"{p['host']}:{p['port']}" == peer: + if p['address'] == peer: return True return False @@ -24,119 +21,314 @@ class TestPeers: def configure_settings(self, settings): settings.IS_K8S = True - @pytest.mark.parametrize('node_type', ['control', 'hybrid']) - def test_prevent_peering_to_self(self, node_type): + @pytest.mark.parametrize('node_type', ['hop', 'execution']) + def test_peering_to_self(self, node_type, admin_user, patch): """ cannot peer to self """ - control_instance = Instance.objects.create(hostname='abc', node_type=node_type) - with pytest.raises(IntegrityError): - control_instance.peers.add(control_instance) + instance = Instance.objects.create(hostname='abc', node_type=node_type) + addr = ReceptorAddress.objects.create(instance=instance, address='abc', canonical=True) + resp = patch( + url=reverse('api:instance_detail', kwargs={'pk': instance.pk}), + data={"hostname": "abc", "node_type": node_type, "peers": [addr.id]}, + user=admin_user, + expect=400, + ) + assert 'Instance cannot peer to its own address.' in str(resp.data) @pytest.mark.parametrize('node_type', ['control', 'hybrid', 'hop', 'execution']) def test_creating_node(self, node_type, admin_user, post): """ can only add hop and execution nodes via API """ - post( + resp = post( url=reverse('api:instance_list'), data={"hostname": "abc", "node_type": node_type}, user=admin_user, expect=400 if node_type in ['control', 'hybrid'] else 201, ) + if resp.status_code == 400: + assert 'Can only create execution or hop nodes.' in str(resp.data) def test_changing_node_type(self, admin_user, patch): """ cannot change node type """ hop = Instance.objects.create(hostname='abc', node_type="hop") - patch( + resp = patch( url=reverse('api:instance_detail', kwargs={'pk': hop.pk}), data={"node_type": "execution"}, user=admin_user, expect=400, ) + assert 'Cannot change node type.' in str(resp.data) - @pytest.mark.parametrize('node_type', ['hop', 'execution']) - def test_listener_port_null(self, node_type, admin_user, post): - """ - listener_port can be None - """ - post( - url=reverse('api:instance_list'), - data={"hostname": "abc", "node_type": node_type, "listener_port": None}, + @pytest.mark.parametrize( + 'payload_port, payload_peers_from, initial_port, initial_peers_from', + [ + (-1, -1, None, None), + (-1, -1, 27199, False), + (-1, -1, 27199, True), + (None, -1, None, None), + (None, False, None, None), + (-1, False, None, None), + (27199, True, 27199, True), + (27199, False, 27199, False), + (27199, -1, 27199, True), + (27199, -1, 27199, False), + (-1, True, 27199, True), + (-1, False, 27199, False), + ], + ) + def test_no_op(self, payload_port, payload_peers_from, initial_port, initial_peers_from, admin_user, patch): + node = Instance.objects.create(hostname='abc', node_type='hop') + if initial_port is not None: + ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node) + + assert ReceptorAddress.objects.filter(instance=node).count() == 1 + else: + assert ReceptorAddress.objects.filter(instance=node).count() == 0 + + data = {'enabled': True} # Just to have something to post. + if payload_port != -1: + data['listener_port'] = payload_port + if payload_peers_from != -1: + data['peers_from_control_nodes'] = payload_peers_from + + patch( + url=reverse('api:instance_detail', kwargs={'pk': node.pk}), + data=data, user=admin_user, - expect=201, + expect=200, ) - @pytest.mark.parametrize('node_type, allowed', [('control', False), ('hybrid', False), ('hop', True), ('execution', True)]) - def test_peers_from_control_nodes_allowed(self, node_type, allowed, post, admin_user): - """ - only hop and execution nodes can have peers_from_control_nodes set to True - """ - post( - url=reverse('api:instance_list'), - data={"hostname": "abc", "peers_from_control_nodes": True, "node_type": node_type, "listener_port": 6789}, + assert ReceptorAddress.objects.filter(instance=node).count() == (0 if initial_port is None else 1) + if initial_port is not None: + ra = ReceptorAddress.objects.get(instance=node, canonical=True) + assert ra.port == initial_port + assert ra.peers_from_control_nodes == initial_peers_from + + @pytest.mark.parametrize( + 'payload_port, payload_peers_from', + [ + (27199, True), + (27199, False), + (27199, -1), + ], + ) + def test_creates_canonical_address(self, payload_port, payload_peers_from, admin_user, patch): + node = Instance.objects.create(hostname='abc', node_type='hop') + assert ReceptorAddress.objects.filter(instance=node).count() == 0 + + data = {'enabled': True} # Just to have something to post. + if payload_port != -1: + data['listener_port'] = payload_port + if payload_peers_from != -1: + data['peers_from_control_nodes'] = payload_peers_from + + patch( + url=reverse('api:instance_detail', kwargs={'pk': node.pk}), + data=data, user=admin_user, - expect=201 if allowed else 400, + expect=200, ) - def test_listener_port_is_required(self, admin_user, post): - """ - if adding instance to peers list, that instance must have listener_port set - """ - Instance.objects.create(hostname='abc', node_type="hop", listener_port=None) - post( - url=reverse('api:instance_list'), - data={"hostname": "ex", "peers_from_control_nodes": False, "node_type": "execution", "listener_port": None, "peers": ["abc"]}, + assert ReceptorAddress.objects.filter(instance=node).count() == 1 + ra = ReceptorAddress.objects.get(instance=node, canonical=True) + assert ra.port == payload_port + assert ra.peers_from_control_nodes == (payload_peers_from if payload_peers_from != -1 else False) + + @pytest.mark.parametrize( + 'payload_port, payload_peers_from, initial_port, initial_peers_from', + [ + (None, False, 27199, True), + (None, -1, 27199, True), + (None, False, 27199, False), + (None, -1, 27199, False), + ], + ) + def test_deletes_canonical_address(self, payload_port, payload_peers_from, initial_port, initial_peers_from, admin_user, patch): + node = Instance.objects.create(hostname='abc', node_type='hop') + ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node) + + assert ReceptorAddress.objects.filter(instance=node).count() == 1 + + data = {'enabled': True} # Just to have something to post. + if payload_port != -1: + data['listener_port'] = payload_port + if payload_peers_from != -1: + data['peers_from_control_nodes'] = payload_peers_from + + patch( + url=reverse('api:instance_detail', kwargs={'pk': node.pk}), + data=data, + user=admin_user, + expect=200, + ) + + assert ReceptorAddress.objects.filter(instance=node).count() == 0 + + @pytest.mark.parametrize( + 'payload_port, payload_peers_from, initial_port, initial_peers_from', + [ + (27199, True, 27199, False), + (27199, False, 27199, True), + (-1, True, 27199, False), + (-1, False, 27199, True), + ], + ) + def test_updates_canonical_address(self, payload_port, payload_peers_from, initial_port, initial_peers_from, admin_user, patch): + node = Instance.objects.create(hostname='abc', node_type='hop') + ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node) + + assert ReceptorAddress.objects.filter(instance=node).count() == 1 + + data = {'enabled': True} # Just to have something to post. + if payload_port != -1: + data['listener_port'] = payload_port + if payload_peers_from != -1: + data['peers_from_control_nodes'] = payload_peers_from + + patch( + url=reverse('api:instance_detail', kwargs={'pk': node.pk}), + data=data, + user=admin_user, + expect=200, + ) + + assert ReceptorAddress.objects.filter(instance=node).count() == 1 + ra = ReceptorAddress.objects.get(instance=node, canonical=True) + assert ra.port == initial_port # At the present time, changing ports is not allowed + assert ra.peers_from_control_nodes == payload_peers_from + + @pytest.mark.parametrize( + 'payload_port, payload_peers_from, initial_port, initial_peers_from, error_msg', + [ + (-1, True, None, None, "Cannot enable peers_from_control_nodes"), + (None, True, None, None, "Cannot enable peers_from_control_nodes"), + (None, True, 21799, True, "Cannot enable peers_from_control_nodes"), + (None, True, 21799, False, "Cannot enable peers_from_control_nodes"), + (21800, -1, 21799, True, "Cannot change listener port"), + (21800, True, 21799, True, "Cannot change listener port"), + (21800, False, 21799, True, "Cannot change listener port"), + (21800, -1, 21799, False, "Cannot change listener port"), + (21800, True, 21799, False, "Cannot change listener port"), + (21800, False, 21799, False, "Cannot change listener port"), + ], + ) + def test_canonical_address_validation_error(self, payload_port, payload_peers_from, initial_port, initial_peers_from, error_msg, admin_user, patch): + node = Instance.objects.create(hostname='abc', node_type='hop') + if initial_port is not None: + ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node) + + assert ReceptorAddress.objects.filter(instance=node).count() == 1 + else: + assert ReceptorAddress.objects.filter(instance=node).count() == 0 + + data = {'enabled': True} # Just to have something to post. + if payload_port != -1: + data['listener_port'] = payload_port + if payload_peers_from != -1: + data['peers_from_control_nodes'] = payload_peers_from + + resp = patch( + url=reverse('api:instance_detail', kwargs={'pk': node.pk}), + data=data, user=admin_user, expect=400, ) - def test_peers_from_control_nodes_listener_port_enabled(self, admin_user, post): + assert error_msg in str(resp.data) + + def test_changing_managed_listener_port(self, admin_user, patch): """ - if peers_from_control_nodes is True, listener_port must an integer - Assert that all other combinations are allowed + if instance is managed, cannot change listener port at all """ - for index, item in enumerate(itertools.product(['hop', 'execution'], [True, False], [None, 6789])): - node_type, peers_from, listener_port = item - # only disallowed case is when peers_from is True and listener port is None - disallowed = peers_from and not listener_port - post( - url=reverse('api:instance_list'), - data={"hostname": f"abc{index}", "peers_from_control_nodes": peers_from, "node_type": node_type, "listener_port": listener_port}, - user=admin_user, - expect=400 if disallowed else 201, - ) + hop = Instance.objects.create(hostname='abc', node_type="hop", managed=True) + resp = patch( + url=reverse('api:instance_detail', kwargs={'pk': hop.pk}), + data={"listener_port": 5678}, + user=admin_user, + expect=400, # cannot set port + ) + assert 'Cannot change listener port for managed nodes.' in str(resp.data) + ReceptorAddress.objects.create(instance=hop, address='hop', port=27199, canonical=True) + resp = patch( + url=reverse('api:instance_detail', kwargs={'pk': hop.pk}), + data={"listener_port": None}, + user=admin_user, + expect=400, # cannot unset port + ) + assert 'Cannot change listener port for managed nodes.' in str(resp.data) + + def test_bidirectional_peering(self, admin_user, patch): + """ + cannot peer to node that is already to peered to it + if A -> B, then disallow B -> A + """ + hop1 = Instance.objects.create(hostname='hop1', node_type='hop') + hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', canonical=True) + hop2 = Instance.objects.create(hostname='hop2', node_type='hop') + hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True) + hop1.peers.add(hop2addr) + resp = patch( + url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}), + data={"peers": [hop1addr.id]}, + user=admin_user, + expect=400, + ) + assert 'Instance hop1 is already peered to this instance.' in str(resp.data) + + def test_multiple_peers_same_instance(self, admin_user, patch): + """ + cannot peer to more than one address of the same instance + """ + hop1 = Instance.objects.create(hostname='hop1', node_type='hop') + hop1addr1 = ReceptorAddress.objects.create(instance=hop1, address='hop1', canonical=True) + hop1addr2 = ReceptorAddress.objects.create(instance=hop1, address='hop1alternate') + hop2 = Instance.objects.create(hostname='hop2', node_type='hop') + resp = patch( + url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}), + data={"peers": [hop1addr1.id, hop1addr2.id]}, + user=admin_user, + expect=400, + ) + assert 'Cannot peer to the same instance more than once.' in str(resp.data) @pytest.mark.parametrize('node_type', ['control', 'hybrid']) - def test_disallow_modifying_peers_control_nodes(self, node_type, admin_user, patch): + def test_changing_peers_control_nodes(self, node_type, admin_user, patch): """ for control nodes, peers field should not be modified directly via patch. """ - control = Instance.objects.create(hostname='abc', node_type=node_type) - hop1 = Instance.objects.create(hostname='hop1', node_type='hop', peers_from_control_nodes=True, listener_port=6789) - hop2 = Instance.objects.create(hostname='hop2', node_type='hop', peers_from_control_nodes=False, listener_port=6789) - assert [hop1] == list(control.peers.all()) # only hop1 should be peered - patch( + control = Instance.objects.create(hostname='abc', node_type=node_type, managed=True) + hop1 = Instance.objects.create(hostname='hop1', node_type='hop') + hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=True, canonical=True) + hop2 = Instance.objects.create(hostname='hop2', node_type='hop') + hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True) + assert [hop1addr] == list(control.peers.all()) # only hop1addr should be peered + resp = patch( url=reverse('api:instance_detail', kwargs={'pk': control.pk}), - data={"peers": ["hop2"]}, + data={"peers": [hop2addr.id]}, user=admin_user, - expect=400, # cannot add peers directly + expect=400, # cannot add peers manually ) + assert 'Setting peers manually for managed nodes is not allowed.' in str(resp.data) + patch( url=reverse('api:instance_detail', kwargs={'pk': control.pk}), - data={"peers": ["hop1"]}, + data={"peers": [hop1addr.id]}, user=admin_user, expect=200, # patching with current peers list should be okay ) - patch( + resp = patch( url=reverse('api:instance_detail', kwargs={'pk': control.pk}), data={"peers": []}, user=admin_user, expect=400, # cannot remove peers directly ) + assert 'Setting peers manually for managed nodes is not allowed.' in str(resp.data) + patch( url=reverse('api:instance_detail', kwargs={'pk': control.pk}), data={}, @@ -148,23 +340,25 @@ class TestPeers: url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}), data={"peers_from_control_nodes": True}, user=admin_user, - expect=200, # patching without data should be fine too + expect=200, ) - assert {hop1, hop2} == set(control.peers.all()) # hop1 and hop2 should now be peered from control node + assert {hop1addr, hop2addr} == set(control.peers.all()) # hop1 and hop2 should now be peered from control node - def test_disallow_changing_hostname(self, admin_user, patch): + def test_changing_hostname(self, admin_user, patch): """ cannot change hostname """ hop = Instance.objects.create(hostname='hop', node_type='hop') - patch( + resp = patch( url=reverse('api:instance_detail', kwargs={'pk': hop.pk}), data={"hostname": "hop2"}, user=admin_user, expect=400, ) - def test_disallow_changing_node_state(self, admin_user, patch): + assert 'Cannot change hostname.' in str(resp.data) + + def test_changing_node_state(self, admin_user, patch): """ only allow setting to deprovisioning """ @@ -175,12 +369,54 @@ class TestPeers: user=admin_user, expect=200, ) - patch( + resp = patch( url=reverse('api:instance_detail', kwargs={'pk': hop.pk}), data={"node_state": "ready"}, user=admin_user, expect=400, ) + assert "Can only change instances to the 'deprovisioning' state." in str(resp.data) + + def test_changing_managed_node_state(self, admin_user, patch): + """ + cannot change node state of managed node + """ + hop = Instance.objects.create(hostname='hop', node_type='hop', managed=True) + resp = patch( + url=reverse('api:instance_detail', kwargs={'pk': hop.pk}), + data={"node_state": "deprovisioning"}, + user=admin_user, + expect=400, + ) + + assert 'Cannot deprovision managed nodes.' in str(resp.data) + + def test_changing_managed_peers_from_control_nodes(self, admin_user, patch): + """ + cannot change peers_from_control_nodes of managed node + """ + hop = Instance.objects.create(hostname='hop', node_type='hop', managed=True) + ReceptorAddress.objects.create(instance=hop, address='hop', peers_from_control_nodes=True, canonical=True) + resp = patch( + url=reverse('api:instance_detail', kwargs={'pk': hop.pk}), + data={"peers_from_control_nodes": False}, + user=admin_user, + expect=400, + ) + + assert 'Cannot change peers_from_control_nodes for managed nodes.' in str(resp.data) + + hop.peers_from_control_nodes = False + hop.save() + + resp = patch( + url=reverse('api:instance_detail', kwargs={'pk': hop.pk}), + data={"peers_from_control_nodes": False}, + user=admin_user, + expect=400, + ) + + assert 'Cannot change peers_from_control_nodes for managed nodes.' in str(resp.data) @pytest.mark.parametrize('node_type', ['control', 'hybrid']) def test_control_node_automatically_peers(self, node_type): @@ -191,9 +427,10 @@ class TestPeers: peer to hop should be removed if hop is deleted """ - hop = Instance.objects.create(hostname='hop', node_type='hop', peers_from_control_nodes=True, listener_port=6789) + hop = Instance.objects.create(hostname='hop', node_type='hop') + hopaddr = ReceptorAddress.objects.create(instance=hop, address='hop', peers_from_control_nodes=True, canonical=True) control = Instance.objects.create(hostname='abc', node_type=node_type) - assert hop in control.peers.all() + assert hopaddr in control.peers.all() hop.delete() assert not control.peers.exists() @@ -203,26 +440,50 @@ class TestPeers: if a new node comes online, other peer relationships should remain intact """ - hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True) - hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False) - hop1.peers.add(hop2) + hop1 = Instance.objects.create(hostname='hop1', node_type='hop') + hop2 = Instance.objects.create(hostname='hop2', node_type='hop') + hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True) + hop1.peers.add(hop2addr) # a control node is added - Instance.objects.create(hostname='control', node_type=node_type, listener_port=None) + Instance.objects.create(hostname='control', node_type=node_type) assert hop1.peers.exists() - def test_group_vars(self, get, admin_user): + def test_reverse_peers(self, admin_user, get): + """ + if hop1 peers to hop2, hop1 should + be in hop2's reverse_peers list + """ + hop1 = Instance.objects.create(hostname='hop1', node_type='hop') + hop2 = Instance.objects.create(hostname='hop2', node_type='hop') + hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True) + hop1.peers.add(hop2addr) + + resp = get( + url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}), + user=admin_user, + expect=200, + ) + + assert hop1.pk in resp.data['reverse_peers'] + + def test_group_vars(self): """ control > hop1 > hop2 < execution """ - control = Instance.objects.create(hostname='control', node_type='control', listener_port=None) - hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True) - hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False) - execution = Instance.objects.create(hostname='execution', node_type='execution', listener_port=6789) + control = Instance.objects.create(hostname='control', node_type='control') + hop1 = Instance.objects.create(hostname='hop1', node_type='hop') + ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=True, port=6789, canonical=True) - execution.peers.add(hop2) - hop1.peers.add(hop2) + hop2 = Instance.objects.create(hostname='hop2', node_type='hop') + hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', peers_from_control_nodes=False, port=6789, canonical=True) + + execution = Instance.objects.create(hostname='execution', node_type='execution') + ReceptorAddress.objects.create(instance=execution, address='execution', peers_from_control_nodes=False, port=6789, canonical=True) + + execution.peers.add(hop2addr) + hop1.peers.add(hop2addr) control_vars = yaml.safe_load(generate_group_vars_all_yml(control)) hop1_vars = yaml.safe_load(generate_group_vars_all_yml(hop1)) @@ -265,13 +526,15 @@ class TestPeers: control = Instance.objects.create(hostname='control1', node_type='control') write_method.assert_not_called() - # new hop node with peers_from_control_nodes False (no) - hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=False) + # new address with peers_from_control_nodes False (no) + hop1 = Instance.objects.create(hostname='hop1', node_type='hop') + hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=False, canonical=True) hop1.delete() write_method.assert_not_called() - # new hop node with peers_from_control_nodes True (yes) - hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True) + # new address with peers_from_control_nodes True (yes) + hop1 = Instance.objects.create(hostname='hop1', node_type='hop') + hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=True, canonical=True) write_method.assert_called() write_method.reset_mock() @@ -280,20 +543,21 @@ class TestPeers: write_method.assert_called() write_method.reset_mock() - # new hop node with peers_from_control_nodes False and peered to another hop node (no) - hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False) - hop2.peers.add(hop1) + # new address with peers_from_control_nodes False and peered to another hop node (no) + hop2 = Instance.objects.create(hostname='hop2', node_type='hop') + ReceptorAddress.objects.create(instance=hop2, address='hop2', peers_from_control_nodes=False, canonical=True) + hop2.peers.add(hop1addr) hop2.delete() write_method.assert_not_called() # changing peers_from_control_nodes to False (yes) - hop1.peers_from_control_nodes = False - hop1.save() + hop1addr.peers_from_control_nodes = False + hop1addr.save() write_method.assert_called() write_method.reset_mock() - # deleting hop node that has peers_from_control_nodes to False (no) - hop1.delete() + # deleting address that has peers_from_control_nodes to False (no) + hop1.delete() # cascade deletes to hop1addr write_method.assert_not_called() # deleting control nodes (no) @@ -315,8 +579,8 @@ class TestPeers: # not peered, so config file should not be updated for i in range(3): - Instance.objects.create(hostname=f"exNo-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=False) - + inst = Instance.objects.create(hostname=f"exNo-{i}", node_type='execution') + ReceptorAddress.objects.create(instance=inst, address=f"exNo-{i}", port=6789, peers_from_control_nodes=False, canonical=True) _, should_update = generate_config_data() assert not should_update @@ -324,11 +588,13 @@ class TestPeers: expected_peers = [] for i in range(3): expected_peers.append(f"hop-{i}:6789") - Instance.objects.create(hostname=f"hop-{i}", node_type='hop', listener_port=6789, peers_from_control_nodes=True) + inst = Instance.objects.create(hostname=f"hop-{i}", node_type='hop') + ReceptorAddress.objects.create(instance=inst, address=f"hop-{i}", port=6789, peers_from_control_nodes=True, canonical=True) for i in range(3): expected_peers.append(f"exYes-{i}:6789") - Instance.objects.create(hostname=f"exYes-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=True) + inst = Instance.objects.create(hostname=f"exYes-{i}", node_type='execution') + ReceptorAddress.objects.create(instance=inst, address=f"exYes-{i}", port=6789, peers_from_control_nodes=True, canonical=True) new_config, should_update = generate_config_data() assert should_update diff --git a/awx/main/tests/functional/api/test_oauth.py b/awx/main/tests/functional/api/test_oauth.py index 4387f06b9c..e95d2cdc4a 100644 --- a/awx/main/tests/functional/api/test_oauth.py +++ b/awx/main/tests/functional/api/test_oauth.py @@ -8,8 +8,10 @@ from django.db import connection from django.test.utils import override_settings from django.utils.encoding import smart_str, smart_bytes +from rest_framework.reverse import reverse as drf_reverse + from awx.main.utils.encryption import decrypt_value, get_encryption_key -from awx.api.versioning import reverse, drf_reverse +from awx.api.versioning import reverse from awx.main.models.oauth import OAuth2Application as Application, OAuth2AccessToken as AccessToken from awx.main.tests.functional import immediate_on_commit from awx.sso.models import UserEnterpriseAuth diff --git a/awx/main/tests/functional/api/test_role.py b/awx/main/tests/functional/api/test_role.py index cec31d9d7e..68ce8855fe 100644 --- a/awx/main/tests/functional/api/test_role.py +++ b/awx/main/tests/functional/api/test_role.py @@ -3,17 +3,6 @@ import pytest from awx.api.versioning import reverse -@pytest.mark.django_db -def test_admin_visible_to_orphaned_users(get, alice): - names = set() - - response = get(reverse('api:role_list'), user=alice) - for item in response.data['results']: - names.add(item['name']) - assert 'System Auditor' in names - assert 'System Administrator' in names - - @pytest.mark.django_db @pytest.mark.parametrize('role,code', [('member_role', 400), ('admin_role', 400), ('inventory_admin_role', 204)]) @pytest.mark.parametrize('reversed', [True, False]) diff --git a/awx/main/tests/functional/api/test_survey_spec.py b/awx/main/tests/functional/api/test_survey_spec.py index cbb22b3bdc..ec20806f6b 100644 --- a/awx/main/tests/functional/api/test_survey_spec.py +++ b/awx/main/tests/functional/api/test_survey_spec.py @@ -2,12 +2,12 @@ from unittest import mock import pytest import json +from ansible_base.lib.utils.models import get_type_for_model from awx.api.versioning import reverse from awx.main.models.jobs import JobTemplate, Job from awx.main.models.activity_stream import ActivityStream from awx.main.access import JobTemplateAccess -from awx.main.utils.common import get_type_for_model @pytest.fixture diff --git a/awx/main/tests/functional/conftest.py b/awx/main/tests/functional/conftest.py index d65c80e96c..8c68bd91ee 100644 --- a/awx/main/tests/functional/conftest.py +++ b/awx/main/tests/functional/conftest.py @@ -3,15 +3,19 @@ import pytest from unittest import mock import urllib.parse from unittest.mock import PropertyMock +import importlib # Django from django.urls import resolve from django.http import Http404 +from django.apps import apps from django.core.handlers.exception import response_for_exception from django.contrib.auth.models import User from django.core.serializers.json import DjangoJSONEncoder from django.db.backends.sqlite3.base import SQLiteCursorWrapper +from django.db.models.signals import post_migrate + # AWX from awx.main.models.projects import Project from awx.main.models.ha import Instance @@ -28,7 +32,6 @@ from awx.main.models.organization import ( Organization, Team, ) -from awx.main.models.rbac import Role from awx.main.models.notifications import NotificationTemplate, Notification from awx.main.models.events import ( JobEvent, @@ -41,10 +44,19 @@ from awx.main.models.workflow import WorkflowJobTemplate from awx.main.models.ad_hoc_commands import AdHocCommand from awx.main.models.oauth import OAuth2Application as Application from awx.main.models.execution_environments import ExecutionEnvironment +from awx.main.utils import is_testing __SWAGGER_REQUESTS__ = {} +# HACK: the dab_resource_registry app required ServiceID in migrations which checks do not run +dab_rr_initial = importlib.import_module('ansible_base.resource_registry.migrations.0001_initial') + + +if is_testing(): + post_migrate.connect(lambda **kwargs: dab_rr_initial.create_service_id(apps, None)) + + @pytest.fixture(scope="session") def swagger_autogen(requests=__SWAGGER_REQUESTS__): return requests @@ -421,7 +433,7 @@ def admin(user): @pytest.fixture def system_auditor(user): u = user('an-auditor', False) - Role.singleton('system_auditor').members.add(u) + u.is_system_auditor = True return u diff --git a/awx/main/tests/functional/dab_rbac/conftest.py b/awx/main/tests/functional/dab_rbac/conftest.py new file mode 100644 index 0000000000..2e37b7f751 --- /dev/null +++ b/awx/main/tests/functional/dab_rbac/conftest.py @@ -0,0 +1,10 @@ +import pytest +from django.apps import apps + +from awx.main.migrations._dab_rbac import setup_managed_role_definitions + + +@pytest.fixture +def managed_roles(): + "Run the migration script to pre-create managed role definitions" + setup_managed_role_definitions(apps, None) diff --git a/awx/main/tests/functional/dab_rbac/test_access_list.py b/awx/main/tests/functional/dab_rbac/test_access_list.py new file mode 100644 index 0000000000..2a88b5b18f --- /dev/null +++ b/awx/main/tests/functional/dab_rbac/test_access_list.py @@ -0,0 +1,111 @@ +import pytest + +from awx.main.models import User +from awx.api.versioning import reverse + + +@pytest.mark.django_db +def test_access_list_superuser(get, admin_user, inventory): + url = reverse('api:inventory_access_list', kwargs={'pk': inventory.id}) + + response = get(url, user=admin_user, expect=200) + by_username = {} + for entry in response.data['results']: + by_username[entry['username']] = entry + assert 'admin' in by_username + + assert len(by_username['admin']['summary_fields']['indirect_access']) == 1 + assert len(by_username['admin']['summary_fields']['direct_access']) == 0 + access_entry = by_username['admin']['summary_fields']['indirect_access'][0] + assert sorted(access_entry['descendant_roles']) == sorted(['adhoc_role', 'use_role', 'update_role', 'read_role', 'admin_role']) + + +@pytest.mark.django_db +def test_access_list_system_auditor(get, admin_user, inventory): + sys_auditor = User.objects.create(username='sys-aud') + sys_auditor.is_system_auditor = True + assert sys_auditor.is_system_auditor + url = reverse('api:inventory_access_list', kwargs={'pk': inventory.id}) + + response = get(url, user=admin_user, expect=200) + by_username = {} + for entry in response.data['results']: + by_username[entry['username']] = entry + assert 'sys-aud' in by_username + + assert len(by_username['sys-aud']['summary_fields']['indirect_access']) == 1 + assert len(by_username['sys-aud']['summary_fields']['direct_access']) == 0 + access_entry = by_username['sys-aud']['summary_fields']['indirect_access'][0] + assert access_entry['descendant_roles'] == ['read_role'] + + +@pytest.mark.django_db +def test_access_list_direct_access(get, admin_user, inventory): + u1 = User.objects.create(username='u1') + + inventory.admin_role.members.add(u1) + + url = reverse('api:inventory_access_list', kwargs={'pk': inventory.id}) + response = get(url, user=admin_user, expect=200) + by_username = {} + for entry in response.data['results']: + by_username[entry['username']] = entry + assert 'u1' in by_username + + assert len(by_username['u1']['summary_fields']['direct_access']) == 1 + assert len(by_username['u1']['summary_fields']['indirect_access']) == 0 + access_entry = by_username['u1']['summary_fields']['direct_access'][0] + assert sorted(access_entry['descendant_roles']) == sorted(['adhoc_role', 'use_role', 'update_role', 'read_role', 'admin_role']) + + +@pytest.mark.django_db +def test_access_list_organization_access(get, admin_user, inventory): + u2 = User.objects.create(username='u2') + + inventory.organization.inventory_admin_role.members.add(u2) + + # User has indirect access to the inventory + url = reverse('api:inventory_access_list', kwargs={'pk': inventory.id}) + response = get(url, user=admin_user, expect=200) + by_username = {} + for entry in response.data['results']: + by_username[entry['username']] = entry + assert 'u2' in by_username + + assert len(by_username['u2']['summary_fields']['indirect_access']) == 1 + assert len(by_username['u2']['summary_fields']['direct_access']) == 0 + access_entry = by_username['u2']['summary_fields']['indirect_access'][0] + assert sorted(access_entry['descendant_roles']) == sorted(['adhoc_role', 'use_role', 'update_role', 'read_role', 'admin_role']) + + # Test that user shows up in the organization access list with direct access of expected roles + url = reverse('api:organization_access_list', kwargs={'pk': inventory.organization_id}) + response = get(url, user=admin_user, expect=200) + by_username = {} + for entry in response.data['results']: + by_username[entry['username']] = entry + assert 'u2' in by_username + + assert len(by_username['u2']['summary_fields']['direct_access']) == 1 + assert len(by_username['u2']['summary_fields']['indirect_access']) == 0 + access_entry = by_username['u2']['summary_fields']['direct_access'][0] + assert sorted(access_entry['descendant_roles']) == sorted(['inventory_admin_role', 'read_role']) + + +@pytest.mark.django_db +def test_team_indirect_access(get, team, admin_user, inventory): + u1 = User.objects.create(username='u1') + team.member_role.members.add(u1) + + inventory.organization.inventory_admin_role.parents.add(team.member_role) + + url = reverse('api:inventory_access_list', kwargs={'pk': inventory.id}) + response = get(url, user=admin_user, expect=200) + by_username = {} + for entry in response.data['results']: + by_username[entry['username']] = entry + assert 'u1' in by_username + + assert len(by_username['u1']['summary_fields']['direct_access']) == 1 + assert len(by_username['u1']['summary_fields']['indirect_access']) == 0 + access_entry = by_username['u1']['summary_fields']['direct_access'][0] + assert sorted(access_entry['descendant_roles']) == sorted(['adhoc_role', 'use_role', 'update_role', 'read_role', 'admin_role']) diff --git a/awx/main/tests/functional/dab_rbac/test_dab_migration.py b/awx/main/tests/functional/dab_rbac/test_dab_migration.py new file mode 100644 index 0000000000..34639774db --- /dev/null +++ b/awx/main/tests/functional/dab_rbac/test_dab_migration.py @@ -0,0 +1,45 @@ +import pytest +from django.apps import apps +from django.test.utils import override_settings + +from awx.main.migrations._dab_rbac import setup_managed_role_definitions + +from ansible_base.rbac.models import RoleDefinition + +INVENTORY_OBJ_PERMISSIONS = ['view_inventory', 'adhoc_inventory', 'use_inventory', 'change_inventory', 'delete_inventory', 'update_inventory'] + + +@pytest.mark.django_db +def test_managed_definitions_precreate(): + with override_settings( + ANSIBLE_BASE_ROLE_PRECREATE={ + 'object_admin': '{cls._meta.model_name}-admin', + 'org_admin': 'organization-admin', + 'org_children': 'organization-{cls._meta.model_name}-admin', + 'special': '{cls._meta.model_name}-{action}', + } + ): + setup_managed_role_definitions(apps, None) + rd = RoleDefinition.objects.get(name='inventory-admin') + assert rd.managed is True + # add permissions do not go in the object-level admin + assert set(rd.permissions.values_list('codename', flat=True)) == set(INVENTORY_OBJ_PERMISSIONS) + + # test org-level object admin permissions + rd = RoleDefinition.objects.get(name='organization-inventory-admin') + assert rd.managed is True + assert set(rd.permissions.values_list('codename', flat=True)) == set(['add_inventory', 'view_organization'] + INVENTORY_OBJ_PERMISSIONS) + + +@pytest.mark.django_db +def test_managed_definitions_custom_obj_admin_name(): + with override_settings( + ANSIBLE_BASE_ROLE_PRECREATE={ + 'object_admin': 'foo-{cls._meta.model_name}-foo', + } + ): + setup_managed_role_definitions(apps, None) + rd = RoleDefinition.objects.get(name='foo-inventory-foo') + assert rd.managed is True + # add permissions do not go in the object-level admin + assert set(rd.permissions.values_list('codename', flat=True)) == set(INVENTORY_OBJ_PERMISSIONS) diff --git a/awx/main/tests/functional/dab_rbac/test_dab_rbac_api.py b/awx/main/tests/functional/dab_rbac/test_dab_rbac_api.py new file mode 100644 index 0000000000..293f37c1f9 --- /dev/null +++ b/awx/main/tests/functional/dab_rbac/test_dab_rbac_api.py @@ -0,0 +1,75 @@ +import pytest + +from django.contrib.contenttypes.models import ContentType +from django.urls import reverse as django_reverse + +from awx.api.versioning import reverse +from awx.main.models import JobTemplate, Inventory, Organization + +from ansible_base.rbac.models import RoleDefinition + + +@pytest.mark.django_db +def test_managed_roles_created(managed_roles): + "Managed RoleDefinitions are created in post_migration signal, we expect to see them here" + for cls in (JobTemplate, Inventory): + ct = ContentType.objects.get_for_model(cls) + rds = list(RoleDefinition.objects.filter(content_type=ct)) + assert len(rds) > 1 + assert f'{cls.__name__} Admin' in [rd.name for rd in rds] + for rd in rds: + assert rd.managed is True + + +@pytest.mark.django_db +def test_custom_read_role(admin_user, post, managed_roles): + rd_url = django_reverse('roledefinition-list') + resp = post( + url=rd_url, data={"name": "read role made for test", "content_type": "awx.inventory", "permissions": ['view_inventory']}, user=admin_user, expect=201 + ) + rd_id = resp.data['id'] + rd = RoleDefinition.objects.get(id=rd_id) + assert rd.content_type == ContentType.objects.get_for_model(Inventory) + + +@pytest.mark.django_db +def test_custom_system_roles_prohibited(admin_user, post): + rd_url = django_reverse('roledefinition-list') + resp = post(url=rd_url, data={"name": "read role made for test", "content_type": None, "permissions": ['view_inventory']}, user=admin_user, expect=400) + assert 'System-wide roles are not enabled' in str(resp.data) + + +@pytest.mark.django_db +def test_assign_managed_role(admin_user, alice, rando, inventory, post, managed_roles): + rd = RoleDefinition.objects.get(name='Inventory Admin') + rd.give_permission(alice, inventory) + # Now that alice has full permissions to the inventory, she will give rando permission + url = django_reverse('roleuserassignment-list') + post(url=url, data={"user": rando.id, "role_definition": rd.id, "object_id": inventory.id}, user=alice, expect=201) + assert rando.has_obj_perm(inventory, 'change') is True + + +@pytest.mark.django_db +def test_assign_custom_delete_role(admin_user, rando, inventory, delete, patch): + rd, _ = RoleDefinition.objects.get_or_create( + name='inventory-delete', permissions=['delete_inventory', 'view_inventory'], content_type=ContentType.objects.get_for_model(Inventory) + ) + rd.give_permission(rando, inventory) + inv_id = inventory.pk + inv_url = reverse('api:inventory_detail', kwargs={'pk': inv_id}) + patch(url=inv_url, data={"description": "new"}, user=rando, expect=403) + delete(url=inv_url, user=rando, expect=202) + assert Inventory.objects.get(id=inv_id).pending_deletion + + +@pytest.mark.django_db +def test_assign_custom_add_role(admin_user, rando, organization, post, managed_roles): + rd, _ = RoleDefinition.objects.get_or_create( + name='inventory-add', permissions=['add_inventory', 'view_organization'], content_type=ContentType.objects.get_for_model(Organization) + ) + rd.give_permission(rando, organization) + url = reverse('api:inventory_list') + r = post(url=url, data={'name': 'abc', 'organization': organization.id}, user=rando, expect=201) + inv_id = r.data['id'] + inventory = Inventory.objects.get(id=inv_id) + assert rando.has_obj_perm(inventory, 'change') diff --git a/awx/main/tests/functional/dab_rbac/test_translation_layer.py b/awx/main/tests/functional/dab_rbac/test_translation_layer.py new file mode 100644 index 0000000000..2829599252 --- /dev/null +++ b/awx/main/tests/functional/dab_rbac/test_translation_layer.py @@ -0,0 +1,107 @@ +from unittest import mock + +import pytest + +from awx.main.models.rbac import get_role_from_object_role, give_creator_permissions +from awx.main.models import User, Organization, WorkflowJobTemplate, WorkflowJobTemplateNode, Team +from awx.api.versioning import reverse + +from ansible_base.rbac.models import RoleUserAssignment + + +@pytest.mark.django_db +@pytest.mark.parametrize( + 'role_name', + ['execution_environment_admin_role', 'project_admin_role', 'admin_role', 'auditor_role', 'read_role', 'execute_role', 'notification_admin_role'], +) +def test_round_trip_roles(organization, rando, role_name, managed_roles): + """ + Make an assignment with the old-style role, + get the equivelent new role + get the old role again + """ + getattr(organization, role_name).members.add(rando) + assignment = RoleUserAssignment.objects.get(user=rando) + print(assignment.role_definition.name) + old_role = get_role_from_object_role(assignment.object_role) + assert old_role.id == getattr(organization, role_name).id + + +@pytest.mark.django_db +def test_organization_level_permissions(organization, inventory, managed_roles): + u1 = User.objects.create(username='alice') + u2 = User.objects.create(username='bob') + + organization.inventory_admin_role.members.add(u1) + organization.workflow_admin_role.members.add(u2) + + assert u1 in inventory.admin_role + assert u1 in organization.inventory_admin_role + assert u2 in organization.workflow_admin_role + + assert u2 not in organization.inventory_admin_role + assert u1 not in organization.workflow_admin_role + assert not (set(u1.has_roles.all()) & set(u2.has_roles.all())) # user have no roles in common + + # Old style + assert set(Organization.accessible_objects(u1, 'inventory_admin_role')) == set([organization]) + assert set(Organization.accessible_objects(u2, 'inventory_admin_role')) == set() + assert set(Organization.accessible_objects(u1, 'workflow_admin_role')) == set() + assert set(Organization.accessible_objects(u2, 'workflow_admin_role')) == set([organization]) + + # New style + assert set(Organization.access_qs(u1, 'add_inventory')) == set([organization]) + assert set(Organization.access_qs(u1, 'change_inventory')) == set([organization]) + assert set(Organization.access_qs(u2, 'add_inventory')) == set() + assert set(Organization.access_qs(u1, 'add_workflowjobtemplate')) == set() + assert set(Organization.access_qs(u2, 'add_workflowjobtemplate')) == set([organization]) + + +@pytest.mark.django_db +def test_organization_execute_role(organization, rando, managed_roles): + organization.execute_role.members.add(rando) + assert rando in organization.execute_role + assert set(Organization.accessible_objects(rando, 'execute_role')) == set([organization]) + + +@pytest.mark.django_db +def test_workflow_approval_list(get, post, admin_user, managed_roles): + workflow_job_template = WorkflowJobTemplate.objects.create() + approval_node = WorkflowJobTemplateNode.objects.create(workflow_job_template=workflow_job_template) + url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'}) + post(url, {'name': 'URL Test', 'description': 'An approval', 'timeout': 0}, user=admin_user) + approval_node.refresh_from_db() + approval_jt = approval_node.unified_job_template + approval_jt.create_unified_job() + + r = get(url=reverse('api:workflow_approval_list'), user=admin_user, expect=200) + assert r.data['count'] >= 1 + + +@pytest.mark.django_db +def test_creator_permission(rando, admin_user, inventory, managed_roles): + give_creator_permissions(rando, inventory) + assert rando in inventory.admin_role + assert rando in inventory.admin_role.members.all() + + +@pytest.mark.django_db +def test_team_team_read_role(rando, team, admin_user, post, managed_roles): + orgs = [Organization.objects.create(name=f'foo-{i}') for i in range(2)] + teams = [Team.objects.create(name=f'foo-{i}', organization=orgs[i]) for i in range(2)] + teams[1].member_role.members.add(rando) + + # give second team read permission to first team through the API for regression testing + url = reverse('api:role_teams_list', kwargs={'pk': teams[0].read_role.pk, 'version': 'v2'}) + post(url, {'id': teams[1].id}, user=admin_user) + + # user should be able to view the first team + assert rando in teams[0].read_role + + +@pytest.mark.django_db +def test_implicit_parents_no_assignments(organization): + """Through the normal course of creating models, we should not be changing DAB RBAC permissions""" + with mock.patch('awx.main.models.rbac.give_or_remove_permission') as mck: + Team.objects.create(name='random team', organization=organization) + mck.assert_not_called() diff --git a/awx/main/tests/functional/dab_resource_registry/test_ansible_id_display.py b/awx/main/tests/functional/dab_resource_registry/test_ansible_id_display.py new file mode 100644 index 0000000000..bf0d550262 --- /dev/null +++ b/awx/main/tests/functional/dab_resource_registry/test_ansible_id_display.py @@ -0,0 +1,39 @@ +import pytest + +from ansible_base.resource_registry.models import Resource + +from awx.api.versioning import reverse + + +def assert_has_resource(list_response, obj=None): + data = list_response.data + assert 'resource' in data['results'][0]['summary_fields'] + resource_data = data['results'][0]['summary_fields']['resource'] + assert resource_data['ansible_id'] + resource = Resource.objects.filter(ansible_id=resource_data['ansible_id']).first() + assert resource + assert resource.content_object + if obj: + objects = [Resource.objects.get(ansible_id=entry['summary_fields']['resource']['ansible_id']).content_object for entry in data['results']] + assert obj in objects + + +@pytest.mark.django_db +def test_organization_ansible_id(organization, admin_user, get): + url = reverse('api:organization_list') + response = get(url=url, user=admin_user, expect=200) + assert_has_resource(response, obj=organization) + + +@pytest.mark.django_db +def test_team_ansible_id(team, admin_user, get): + url = reverse('api:team_list') + response = get(url=url, user=admin_user, expect=200) + assert_has_resource(response, obj=team) + + +@pytest.mark.django_db +def test_user_ansible_id(rando, admin_user, get): + url = reverse('api:user_list') + response = get(url=url, user=admin_user, expect=200) + assert_has_resource(response, obj=rando) diff --git a/awx/main/tests/functional/models/test_activity_stream.py b/awx/main/tests/functional/models/test_activity_stream.py index f8ae40b540..8be052628d 100644 --- a/awx/main/tests/functional/models/test_activity_stream.py +++ b/awx/main/tests/functional/models/test_activity_stream.py @@ -104,11 +104,13 @@ class TestRolesAssociationEntries: else: assert len(entry_qs) == 1 # unfortunate, the original creation does _not_ set a real is_auditor field - assert 'is_system_auditor' not in json.loads(entry_qs[0].changes) + assert 'is_system_auditor' not in json.loads(entry_qs[0].changes) # NOTE: if this fails, see special note + # special note - if system auditor flag is moved to user model then we expect this assertion to be changed + # make sure that an extra entry is not created, expectation for count would change to 1 if value: - auditor_changes = json.loads(entry_qs[1].changes) - assert auditor_changes['object2'] == 'user' - assert auditor_changes['object2_pk'] == u.pk + entry = entry_qs[1] + assert json.loads(entry.changes) == {'is_system_auditor': [False, True]} + assert entry.object1 == 'user' def test_user_no_op_api(self, system_auditor): as_ct = ActivityStream.objects.count() diff --git a/awx/main/tests/functional/models/test_context_managers.py b/awx/main/tests/functional/models/test_context_managers.py index 9807d8a6e9..271f88b21f 100644 --- a/awx/main/tests/functional/models/test_context_managers.py +++ b/awx/main/tests/functional/models/test_context_managers.py @@ -1,7 +1,6 @@ import pytest # AWX context managers for testing -from awx.main.models.rbac import batch_role_ancestor_rebuilding from awx.main.signals import disable_activity_stream, disable_computed_fields, update_inventory_computed_fields # AWX models @@ -10,15 +9,6 @@ from awx.main.models import ActivityStream, Job from awx.main.tests.functional import immediate_on_commit -@pytest.mark.django_db -def test_rbac_batch_rebuilding(rando, organization): - with batch_role_ancestor_rebuilding(): - organization.admin_role.members.add(rando) - inventory = organization.inventories.create(name='test-inventory') - assert rando not in inventory.admin_role - assert rando in inventory.admin_role - - @pytest.mark.django_db def test_disable_activity_stream(): with disable_activity_stream(): diff --git a/awx/main/tests/functional/models/test_inventory.py b/awx/main/tests/functional/models/test_inventory.py index 27d2f46ec8..a07ef1b21c 100644 --- a/awx/main/tests/functional/models/test_inventory.py +++ b/awx/main/tests/functional/models/test_inventory.py @@ -193,6 +193,7 @@ class TestInventorySourceInjectors: ('satellite6', 'theforeman.foreman.foreman'), ('insights', 'redhatinsights.insights.insights'), ('controller', 'awx.awx.tower'), + ('terraform', 'cloud.terraform.terraform_state'), ], ) def test_plugin_proper_names(self, source, proper_name): diff --git a/awx/main/tests/functional/test_bulk.py b/awx/main/tests/functional/test_bulk.py index d05bb7a1f8..6b166cdf2b 100644 --- a/awx/main/tests/functional/test_bulk.py +++ b/awx/main/tests/functional/test_bulk.py @@ -309,3 +309,139 @@ def test_bulk_job_set_all_prompt(job_template, organization, inventory, project, assert node[0].limit == 'kansas' assert node[0].skip_tags == 'foobar' assert node[0].job_tags == 'untagged' + + +@pytest.mark.django_db +@pytest.mark.parametrize('num_hosts, num_queries', [(1, 70), (10, 150), (25, 250)]) +def test_bulk_host_delete_num_queries(organization, inventory, post, get, user, num_hosts, num_queries, django_assert_max_num_queries): + ''' + If I am a... + org admin + inventory admin at org level + admin of a particular inventory + superuser + + Bulk Host delete should take under a certain number of queries + ''' + users_list = setup_admin_users_list(organization, inventory, user) + for u in users_list: + hosts = [{'name': str(uuid4())} for i in range(num_hosts)] + with django_assert_max_num_queries(num_queries): + bulk_host_create_response = post(reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': hosts}, u, expect=201).data + assert len(bulk_host_create_response['hosts']) == len(hosts), f"unexpected number of hosts created for user {u}" + hosts_ids_created = get_inventory_hosts(get, inventory.id, u) + bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, u, expect=201).data + assert len(bulk_host_delete_response['hosts'].keys()) == len(hosts), f"unexpected number of hosts deleted for user {u}" + + +@pytest.mark.django_db +def test_bulk_host_delete_rbac(organization, inventory, post, get, user): + ''' + If I am a... + org admin + inventory admin at org level + admin of a particular invenotry + ... I can bulk delete hosts + + Everyone else cannot + ''' + admin_users_list = setup_admin_users_list(organization, inventory, user) + users_list = setup_none_admin_uses_list(organization, inventory, user) + + for indx, u in enumerate(admin_users_list): + bulk_host_create_response = post( + reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': [{'name': f'foobar-{indx}'}]}, u, expect=201 + ).data + assert len(bulk_host_create_response['hosts']) == 1, f"unexpected number of hosts created for user {u}" + assert Host.objects.filter(inventory__id=inventory.id)[0].name == f'foobar-{indx}' + hosts_ids_created = get_inventory_hosts(get, inventory.id, u) + bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, u, expect=201).data + assert len(bulk_host_delete_response['hosts'].keys()) == 1, f"unexpected number of hosts deleted by user {u}" + + for indx, create_u in enumerate(admin_users_list): + bulk_host_create_response = post( + reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': [{'name': f'foobar2-{indx}'}]}, create_u, expect=201 + ).data + print(bulk_host_create_response) + assert bulk_host_create_response['hosts'][0]['name'] == f'foobar2-{indx}' + hosts_ids_created = get_inventory_hosts(get, inventory.id, create_u) + print(f"Try to delete {hosts_ids_created}") + for delete_u in users_list: + bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, delete_u, expect=403).data + assert "Lack permissions to delete hosts from this inventory." in bulk_host_delete_response['inventories'].values() + + +@pytest.mark.django_db +def test_bulk_host_delete_from_multiple_inv(organization, inventory, post, get, user): + ''' + If I am inventory admin at org level + + Bulk Host delete should be enabled only on my inventory + ''' + num_hosts = 10 + inventory.organization = organization + + # Create second inventory + inv2 = organization.inventories.create(name="second-test-inv") + inv2.organization = organization + admin2_user = user('inventory2_admin', False) + inv2.admin_role.members.add(admin2_user) + + admin_user = user('inventory_admin', False) + inventory.admin_role.members.add(admin_user) + + organization.member_role.members.add(admin_user) + organization.member_role.members.add(admin2_user) + + hosts = [{'name': str(uuid4())} for i in range(num_hosts)] + hosts2 = [{'name': str(uuid4())} for i in range(num_hosts)] + + # create hosts in each of the inventories + bulk_host_create_response = post(reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': hosts}, admin_user, expect=201).data + assert len(bulk_host_create_response['hosts']) == len(hosts), f"unexpected number of hosts created for user {admin_user}" + + bulk_host_create_response2 = post(reverse('api:bulk_host_create'), {'inventory': inv2.id, 'hosts': hosts2}, admin2_user, expect=201).data + assert len(bulk_host_create_response2['hosts']) == len(hosts), f"unexpected number of hosts created for user {admin2_user}" + + # get all hosts ids - from both inventories + hosts_ids_created = get_inventory_hosts(get, inventory.id, admin_user) + hosts_ids_created += get_inventory_hosts(get, inv2.id, admin2_user) + + expected_error = "Lack permissions to delete hosts from this inventory." + # try to delete ALL hosts with admin user of inventory 1. + for inv_name, invadmin in zip([inv2.name, inventory.name], [admin_user, admin2_user]): + bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, invadmin, expect=403).data + result_message = bulk_host_delete_response['inventories'][inv_name] + assert result_message == expected_error, f"deleted hosts without permission by user {invadmin}" + + +def setup_admin_users_list(organization, inventory, user): + inventory.organization = organization + inventory_admin = user('inventory_admin', False) + org_admin = user('org_admin', False) + org_inv_admin = user('org_admin', False) + superuser = user('admin', True) + for u in [org_admin, org_inv_admin, inventory_admin]: + organization.member_role.members.add(u) + organization.admin_role.members.add(org_admin) + organization.inventory_admin_role.members.add(org_inv_admin) + inventory.admin_role.members.add(inventory_admin) + return [inventory_admin, org_inv_admin, superuser, org_admin] + + +def setup_none_admin_uses_list(organization, inventory, user): + inventory.organization = organization + auditor = user('auditor', False) + member = user('member', False) + use_inv_member = user('member', False) + for u in [auditor, member, use_inv_member]: + organization.member_role.members.add(u) + inventory.use_role.members.add(use_inv_member) + organization.auditor_role.members.add(auditor) + return [auditor, member, use_inv_member] + + +def get_inventory_hosts(get, inv_id, use_user): + data = get(reverse('api:inventory_hosts_list', kwargs={'pk': inv_id}), use_user, expect=200).data + results = [host['id'] for host in data['results']] + return results diff --git a/awx/main/tests/functional/test_credential.py b/awx/main/tests/functional/test_credential.py index d61f2e09ba..97cf2beb2d 100644 --- a/awx/main/tests/functional/test_credential.py +++ b/awx/main/tests/functional/test_credential.py @@ -81,6 +81,7 @@ def test_default_cred_types(): 'aws_secretsmanager_credential', 'azure_kv', 'azure_rm', + 'bitbucket_dc_token', 'centrify_vault_kv', 'conjur', 'controller', @@ -100,6 +101,7 @@ def test_default_cred_types(): 'satellite6', 'scm', 'ssh', + 'terraform', 'thycotic_dsv', 'thycotic_tss', 'vault', diff --git a/awx/main/tests/functional/test_credential_plugins.py b/awx/main/tests/functional/test_credential_plugins.py index 9d199c31f5..3ee29e9ce3 100644 --- a/awx/main/tests/functional/test_credential_plugins.py +++ b/awx/main/tests/functional/test_credential_plugins.py @@ -60,6 +60,13 @@ def test_hashivault_client_cert_auth_no_role(): assert res == expected_res +def test_hashivault_userpass_auth(): + kwargs = {'username': 'the_username', 'password': 'the_password'} + expected_res = {'username': 'the_username', 'password': 'the_password'} + res = hashivault.userpass_auth(**kwargs) + assert res == expected_res + + def test_hashivault_handle_auth_token(): kwargs = { 'token': 'the_token', diff --git a/awx/main/tests/functional/test_fixture_factories.py b/awx/main/tests/functional/test_fixture_factories.py index 1af7b66246..5792197177 100644 --- a/awx/main/tests/functional/test_fixture_factories.py +++ b/awx/main/tests/functional/test_fixture_factories.py @@ -50,13 +50,13 @@ def test_org_factory_roles(organization_factory): teams=['team1', 'team2'], users=['team1:foo', 'bar'], projects=['baz', 'bang'], - roles=['team2.member_role:foo', 'team1.admin_role:bar', 'team1.admin_role:team2.admin_role', 'baz.admin_role:foo'], + roles=['team2.member_role:foo', 'team1.admin_role:bar', 'team1.member_role:team2.admin_role', 'baz.admin_role:foo'], ) assert objects.users.bar in objects.teams.team2.admin_role assert objects.users.foo in objects.projects.baz.admin_role assert objects.users.foo in objects.teams.team1.member_role - assert objects.teams.team2.admin_role in objects.teams.team1.admin_role.children.all() + assert objects.teams.team2.admin_role in objects.teams.team1.member_role.children.all() @pytest.mark.django_db diff --git a/awx/main/tests/functional/test_inventory_source_injectors.py b/awx/main/tests/functional/test_inventory_source_injectors.py index 903a6c9875..80bc5429c1 100644 --- a/awx/main/tests/functional/test_inventory_source_injectors.py +++ b/awx/main/tests/functional/test_inventory_source_injectors.py @@ -107,6 +107,7 @@ def read_content(private_data_dir, raw_env, inventory_update): for filename in os.listdir(os.path.join(private_data_dir, subdir)): filename_list.append(os.path.join(subdir, filename)) filename_list = sorted(filename_list, key=lambda fn: inverse_env.get(os.path.join(private_data_dir, fn), [fn])[0]) + inventory_content = "" for filename in filename_list: if filename in ('args', 'project'): continue # Ansible runner @@ -130,6 +131,7 @@ def read_content(private_data_dir, raw_env, inventory_update): dir_contents[abs_file_path] = f.read() # Declare a reference to inventory plugin file if it exists if abs_file_path.endswith('.yml') and 'plugin: ' in dir_contents[abs_file_path]: + inventory_content = dir_contents[abs_file_path] referenced_paths.add(abs_file_path) # used as inventory file elif cache_file_regex.match(abs_file_path): file_aliases[abs_file_path] = 'cache_file' @@ -157,7 +159,11 @@ def read_content(private_data_dir, raw_env, inventory_update): content = {} for abs_file_path, file_content in dir_contents.items(): # assert that all files laid down are used - if abs_file_path not in referenced_paths and abs_file_path not in ignore_files: + if ( + abs_file_path not in referenced_paths + and to_container_path(abs_file_path, private_data_dir) not in inventory_content + and abs_file_path not in ignore_files + ): raise AssertionError( "File {} is not referenced. References and files:\n{}\n{}".format(abs_file_path, json.dumps(env, indent=4), json.dumps(dir_contents, indent=4)) ) diff --git a/awx/main/tests/functional/test_linkstate.py b/awx/main/tests/functional/test_linkstate.py new file mode 100644 index 0000000000..478883870a --- /dev/null +++ b/awx/main/tests/functional/test_linkstate.py @@ -0,0 +1,30 @@ +import pytest + +from awx.main.models import Instance, ReceptorAddress, InstanceLink +from awx.main.tasks.system import inspect_established_receptor_connections + + +@pytest.mark.django_db +class TestLinkState: + @pytest.fixture(autouse=True) + def configure_settings(self, settings): + settings.IS_K8S = True + + def test_inspect_established_receptor_connections(self): + ''' + Change link state from ADDING to ESTABLISHED + if the receptor status KnownConnectionCosts field + has an entry for the source and target node. + ''' + hop1 = Instance.objects.create(hostname='hop1') + hop2 = Instance.objects.create(hostname='hop2') + hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', port=5678) + InstanceLink.objects.create(source=hop1, target=hop2addr, link_state=InstanceLink.States.ADDING) + + # calling with empty KnownConnectionCosts should not change the link state + inspect_established_receptor_connections({"KnownConnectionCosts": {}}) + assert InstanceLink.objects.get(source=hop1, target=hop2addr).link_state == InstanceLink.States.ADDING + + mesh_state = {"KnownConnectionCosts": {"hop1": {"hop2": 1}}} + inspect_established_receptor_connections(mesh_state) + assert InstanceLink.objects.get(source=hop1, target=hop2addr).link_state == InstanceLink.States.ESTABLISHED diff --git a/awx/main/tests/functional/test_migrations.py b/awx/main/tests/functional/test_migrations.py index cd0889c208..ab877f603f 100644 --- a/awx/main/tests/functional/test_migrations.py +++ b/awx/main/tests/functional/test_migrations.py @@ -42,3 +42,29 @@ class TestMigrationSmoke: final_state = migrator.apply_tested_migration(final_migration) Instance = final_state.apps.get_model('main', 'Instance') assert Instance.objects.filter(hostname='foobar').count() == 1 + + def test_receptor_address(self, migrator): + old_state = migrator.apply_initial_migration(('main', '0188_add_bitbucket_dc_webhook')) + Instance = old_state.apps.get_model('main', 'Instance') + for i in range(3): + Instance.objects.create(hostname=f'foobar{i}', node_type='hop') + foo = Instance.objects.create(hostname='foo', node_type='execution', listener_port=1234) + bar = Instance.objects.create(hostname='bar', node_type='execution', listener_port=None) + bar.peers.add(foo) + + new_state = migrator.apply_tested_migration( + ('main', '0189_inbound_hop_nodes'), + ) + Instance = new_state.apps.get_model('main', 'Instance') + ReceptorAddress = new_state.apps.get_model('main', 'ReceptorAddress') + + # We can now test how our migration worked, new field is there: + assert ReceptorAddress.objects.filter(address='foo', port=1234).count() == 1 + assert not ReceptorAddress.objects.filter(address='bar').exists() + + bar = Instance.objects.get(hostname='bar') + fooaddr = ReceptorAddress.objects.get(address='foo') + + bar_peers = bar.peers.all() + assert len(bar_peers) == 1 + assert fooaddr in bar_peers diff --git a/awx/main/tests/functional/test_named_url.py b/awx/main/tests/functional/test_named_url.py index 884ecd7dc0..54e3b96edd 100644 --- a/awx/main/tests/functional/test_named_url.py +++ b/awx/main/tests/functional/test_named_url.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- -from unittest import mock - import pytest -from django.core.exceptions import ImproperlyConfigured from django.conf import settings from awx.api.versioning import reverse @@ -23,25 +20,6 @@ from awx.main.models import ( # noqa User, WorkflowJobTemplate, ) -from awx.conf import settings_registry - - -def setup_module(module): - # In real-world scenario, named url graph structure is populated by __init__ - # of URLModificationMiddleware. The way Django bootstraps ensures the initialization - # will happen *once and only once*, while the number of initialization is uncontrollable - # in unit test environment. So it is wrapped by try-except block to mute any - # unwanted exceptions. - try: - URLModificationMiddleware(mock.Mock()) - except ImproperlyConfigured: - pass - - -def teardown_module(module): - # settings_registry will be persistent states unless we explicitly clean them up. - settings_registry.unregister('NAMED_URL_FORMATS') - settings_registry.unregister('NAMED_URL_GRAPH_NODES') @pytest.mark.django_db diff --git a/awx/main/tests/functional/test_projects.py b/awx/main/tests/functional/test_projects.py index e4c23bf0f2..17eda7f58f 100644 --- a/awx/main/tests/functional/test_projects.py +++ b/awx/main/tests/functional/test_projects.py @@ -411,14 +411,14 @@ def test_project_delete(delete, organization, admin_user): @pytest.mark.parametrize( - 'order_by, expected_names, expected_ids', + 'order_by, expected_names', [ - ('name', ['alice project', 'bob project', 'shared project'], [1, 2, 3]), - ('-name', ['shared project', 'bob project', 'alice project'], [3, 2, 1]), + ('name', ['alice project', 'bob project', 'shared project']), + ('-name', ['shared project', 'bob project', 'alice project']), ], ) @pytest.mark.django_db -def test_project_list_ordering_by_name(get, order_by, expected_names, expected_ids, organization_factory): +def test_project_list_ordering_by_name(get, order_by, expected_names, organization_factory): 'ensure sorted order of project list is maintained correctly when the requested order is invalid or not applicable' objects = organization_factory( 'org1', @@ -426,13 +426,11 @@ def test_project_list_ordering_by_name(get, order_by, expected_names, expected_i superusers=['admin'], ) project_names = [] - project_ids = [] # TODO: ask for an order by here that doesn't apply results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results'] for x in range(len(results)): project_names.append(results[x]['name']) - project_ids.append(results[x]['id']) - assert project_names == expected_names and project_ids == expected_ids + assert project_names == expected_names @pytest.mark.parametrize('order_by', ('name', '-name')) @@ -450,7 +448,8 @@ def test_project_list_ordering_with_duplicate_names(get, order_by, organization_ for x in range(3): results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results'] project_ids[x] = [proj['id'] for proj in results] - assert project_ids[0] == project_ids[1] == project_ids[2] == [1, 2, 3, 4, 5] + assert project_ids[0] == project_ids[1] == project_ids[2] + assert project_ids[0] == sorted(project_ids[0]) @pytest.mark.django_db diff --git a/awx/main/tests/functional/test_rbac_api.py b/awx/main/tests/functional/test_rbac_api.py index b697ef3144..e1e76e981e 100644 --- a/awx/main/tests/functional/test_rbac_api.py +++ b/awx/main/tests/functional/test_rbac_api.py @@ -3,7 +3,9 @@ import pytest from django.db import transaction from awx.api.versioning import reverse -from awx.main.models.rbac import Role, ROLE_SINGLETON_SYSTEM_ADMINISTRATOR +from awx.main.models.rbac import Role + +from django.test.utils import override_settings @pytest.fixture @@ -31,8 +33,6 @@ def test_get_roles_list_user(organization, inventory, team, get, user): 'Users can see all roles they have access to, but not all roles' this_user = user('user-test_get_roles_list_user') organization.member_role.members.add(this_user) - custom_role = Role.objects.create(role_field='custom_role-test_get_roles_list_user') - organization.member_role.children.add(custom_role) url = reverse('api:role_list') response = get(url, this_user) @@ -46,10 +46,8 @@ def test_get_roles_list_user(organization, inventory, team, get, user): for r in roles['results']: role_hash[r['id']] = r - assert Role.singleton(ROLE_SINGLETON_SYSTEM_ADMINISTRATOR).id in role_hash assert organization.admin_role.id in role_hash assert organization.member_role.id in role_hash - assert custom_role.id in role_hash assert inventory.admin_role.id not in role_hash assert team.member_role.id not in role_hash @@ -57,7 +55,8 @@ def test_get_roles_list_user(organization, inventory, team, get, user): @pytest.mark.django_db def test_roles_visibility(get, organization, project, admin, alice, bob): - Role.singleton('system_auditor').members.add(alice) + alice.is_system_auditor = True + alice.save() assert get(reverse('api:role_list') + '?id=%d' % project.update_role.id, user=admin).data['count'] == 1 assert get(reverse('api:role_list') + '?id=%d' % project.update_role.id, user=alice).data['count'] == 1 assert get(reverse('api:role_list') + '?id=%d' % project.update_role.id, user=bob).data['count'] == 0 @@ -67,7 +66,8 @@ def test_roles_visibility(get, organization, project, admin, alice, bob): @pytest.mark.django_db def test_roles_filter_visibility(get, organization, project, admin, alice, bob): - Role.singleton('system_auditor').members.add(alice) + alice.is_system_auditor = True + alice.save() project.update_role.members.add(admin) assert get(reverse('api:user_roles_list', kwargs={'pk': admin.id}) + '?id=%d' % project.update_role.id, user=admin).data['count'] == 1 @@ -105,15 +105,6 @@ def test_cant_delete_role(delete, admin, inventory): # -@pytest.mark.django_db -def test_get_user_roles_list(get, admin): - url = reverse('api:user_roles_list', kwargs={'pk': admin.id}) - response = get(url, admin) - assert response.status_code == 200 - roles = response.data - assert roles['count'] > 0 # 'system_administrator' role if nothing else - - @pytest.mark.django_db def test_user_view_other_user_roles(organization, inventory, team, get, alice, bob): 'Users can see roles for other users, but only the roles that that user has access to see as well' @@ -141,7 +132,6 @@ def test_user_view_other_user_roles(organization, inventory, team, get, alice, b assert organization.admin_role.id in role_hash assert custom_role.id not in role_hash # doesn't show up in the user roles list, not an explicit grant - assert Role.singleton(ROLE_SINGLETON_SYSTEM_ADMINISTRATOR).id not in role_hash assert inventory.admin_role.id not in role_hash assert team.member_role.id not in role_hash # alice can't see this @@ -197,6 +187,7 @@ def test_remove_role_from_user(role, post, admin): @pytest.mark.django_db +@override_settings(ANSIBLE_BASE_ALLOW_TEAM_ORG_ADMIN=True) def test_get_teams_roles_list(get, team, organization, admin): team.member_role.children.add(organization.admin_role) url = reverse('api:team_roles_list', kwargs={'pk': team.id}) diff --git a/awx/main/tests/functional/test_rbac_core.py b/awx/main/tests/functional/test_rbac_core.py deleted file mode 100644 index 7029bbe544..0000000000 --- a/awx/main/tests/functional/test_rbac_core.py +++ /dev/null @@ -1,213 +0,0 @@ -import pytest - -from awx.main.models import ( - Role, - Organization, - Project, -) -from awx.main.fields import update_role_parentage_for_instance - - -@pytest.mark.django_db -def test_auto_inheritance_by_children(organization, alice): - A = Role.objects.create() - B = Role.objects.create() - A.members.add(alice) - - assert alice not in organization.admin_role - assert Organization.accessible_objects(alice, 'admin_role').count() == 0 - A.children.add(B) - assert alice not in organization.admin_role - assert Organization.accessible_objects(alice, 'admin_role').count() == 0 - A.children.add(organization.admin_role) - assert alice in organization.admin_role - assert Organization.accessible_objects(alice, 'admin_role').count() == 1 - A.children.remove(organization.admin_role) - assert alice not in organization.admin_role - B.children.add(organization.admin_role) - assert alice in organization.admin_role - B.children.remove(organization.admin_role) - assert alice not in organization.admin_role - assert Organization.accessible_objects(alice, 'admin_role').count() == 0 - - # We've had the case where our pre/post save init handlers in our field descriptors - # end up creating a ton of role objects because of various not-so-obvious issues - assert Role.objects.count() < 50 - - -@pytest.mark.django_db -def test_auto_inheritance_by_parents(organization, alice): - A = Role.objects.create() - B = Role.objects.create() - A.members.add(alice) - - assert alice not in organization.admin_role - B.parents.add(A) - assert alice not in organization.admin_role - organization.admin_role.parents.add(A) - assert alice in organization.admin_role - organization.admin_role.parents.remove(A) - assert alice not in organization.admin_role - organization.admin_role.parents.add(B) - assert alice in organization.admin_role - organization.admin_role.parents.remove(B) - assert alice not in organization.admin_role - - -@pytest.mark.django_db -def test_accessible_objects(organization, alice, bob): - A = Role.objects.create() - A.members.add(alice) - B = Role.objects.create() - B.members.add(alice) - B.members.add(bob) - - assert Organization.accessible_objects(alice, 'admin_role').count() == 0 - assert Organization.accessible_objects(bob, 'admin_role').count() == 0 - A.children.add(organization.admin_role) - assert Organization.accessible_objects(alice, 'admin_role').count() == 1 - assert Organization.accessible_objects(bob, 'admin_role').count() == 0 - - -@pytest.mark.django_db -def test_team_symantics(organization, team, alice): - assert alice not in organization.auditor_role - team.member_role.children.add(organization.auditor_role) - assert alice not in organization.auditor_role - team.member_role.members.add(alice) - assert alice in organization.auditor_role - team.member_role.members.remove(alice) - assert alice not in organization.auditor_role - - -@pytest.mark.django_db -def test_auto_field_adjustments(organization, inventory, team, alice): - 'Ensures the auto role reparenting is working correctly through non m2m fields' - org2 = Organization.objects.create(name='Org 2', description='org 2') - org2.admin_role.members.add(alice) - assert alice not in inventory.admin_role - inventory.organization = org2 - inventory.save() - assert alice in inventory.admin_role - inventory.organization = organization - inventory.save() - assert alice not in inventory.admin_role - # assert False - - -@pytest.mark.django_db -def test_implicit_deletes(alice): - 'Ensures implicit resources and roles delete themselves' - delorg = Organization.objects.create(name='test-org') - child = Role.objects.create() - child.parents.add(delorg.admin_role) - delorg.admin_role.members.add(alice) - - admin_role_id = delorg.admin_role.id - auditor_role_id = delorg.auditor_role.id - - assert child.ancestors.count() > 1 - assert Role.objects.filter(id=admin_role_id).count() == 1 - assert Role.objects.filter(id=auditor_role_id).count() == 1 - n_alice_roles = alice.roles.count() - n_system_admin_children = Role.singleton('system_administrator').children.count() - - delorg.delete() - - assert Role.objects.filter(id=admin_role_id).count() == 0 - assert Role.objects.filter(id=auditor_role_id).count() == 0 - assert alice.roles.count() == (n_alice_roles - 1) - assert Role.singleton('system_administrator').children.count() == (n_system_admin_children - 1) - assert child.ancestors.count() == 1 - assert child.ancestors.all()[0] == child - - -@pytest.mark.django_db -def test_content_object(user): - 'Ensure our content_object stuf seems to be working' - - org = Organization.objects.create(name='test-org') - assert org.admin_role.content_object.id == org.id - - -@pytest.mark.django_db -def test_hierarchy_rebuilding_multi_path(): - 'Tests a subdtle cases around role hierarchy rebuilding when you have multiple paths to the same role of different length' - - X = Role.objects.create() - A = Role.objects.create() - B = Role.objects.create() - C = Role.objects.create() - D = Role.objects.create() - - A.children.add(B) - A.children.add(D) - B.children.add(C) - C.children.add(D) - - assert A.is_ancestor_of(D) - assert X.is_ancestor_of(D) is False - - X.children.add(A) - - assert X.is_ancestor_of(D) is True - - X.children.remove(A) - - # This can be the stickler, the rebuilder needs to ensure that D's role - # hierarchy is built after both A and C are updated. - assert X.is_ancestor_of(D) is False - - -@pytest.mark.django_db -def test_auto_parenting(): - org1 = Organization.objects.create(name='org1') - org2 = Organization.objects.create(name='org2') - - prj1 = Project.objects.create(name='prj1') - prj2 = Project.objects.create(name='prj2') - - assert org1.admin_role.is_ancestor_of(prj1.admin_role) is False - assert org1.admin_role.is_ancestor_of(prj2.admin_role) is False - assert org2.admin_role.is_ancestor_of(prj1.admin_role) is False - assert org2.admin_role.is_ancestor_of(prj2.admin_role) is False - - prj1.organization = org1 - prj1.save() - - assert org1.admin_role.is_ancestor_of(prj1.admin_role) - assert org1.admin_role.is_ancestor_of(prj2.admin_role) is False - assert org2.admin_role.is_ancestor_of(prj1.admin_role) is False - assert org2.admin_role.is_ancestor_of(prj2.admin_role) is False - - prj2.organization = org1 - prj2.save() - - assert org1.admin_role.is_ancestor_of(prj1.admin_role) - assert org1.admin_role.is_ancestor_of(prj2.admin_role) - assert org2.admin_role.is_ancestor_of(prj1.admin_role) is False - assert org2.admin_role.is_ancestor_of(prj2.admin_role) is False - - prj1.organization = org2 - prj1.save() - - assert org1.admin_role.is_ancestor_of(prj1.admin_role) is False - assert org1.admin_role.is_ancestor_of(prj2.admin_role) - assert org2.admin_role.is_ancestor_of(prj1.admin_role) - assert org2.admin_role.is_ancestor_of(prj2.admin_role) is False - - prj2.organization = org2 - prj2.save() - - assert org1.admin_role.is_ancestor_of(prj1.admin_role) is False - assert org1.admin_role.is_ancestor_of(prj2.admin_role) is False - assert org2.admin_role.is_ancestor_of(prj1.admin_role) - assert org2.admin_role.is_ancestor_of(prj2.admin_role) - - -@pytest.mark.django_db -def test_update_parents_keeps_teams(team, project): - project.update_role.parents.add(team.member_role) - assert team.member_role in project.update_role # test prep sanity check - update_role_parentage_for_instance(project) - assert team.member_role in project.update_role # actual assertion diff --git a/awx/main/tests/functional/test_rbac_job_templates.py b/awx/main/tests/functional/test_rbac_job_templates.py index bccec0a1c2..17e7ff3524 100644 --- a/awx/main/tests/functional/test_rbac_job_templates.py +++ b/awx/main/tests/functional/test_rbac_job_templates.py @@ -4,7 +4,7 @@ import pytest from awx.api.versioning import reverse from awx.main.access import BaseAccess, JobTemplateAccess, ScheduleAccess from awx.main.models.jobs import JobTemplate -from awx.main.models import Project, Organization, Inventory, Schedule, User +from awx.main.models import Project, Organization, Schedule @mock.patch.object(BaseAccess, 'check_license', return_value=None) @@ -177,7 +177,7 @@ def test_job_template_creator_access(project, organization, rando, post): jt_pk = response.data['id'] jt_obj = JobTemplate.objects.get(pk=jt_pk) # Creating a JT should place the creator in the admin role - assert rando in jt_obj.admin_role.members.all() + assert rando in jt_obj.admin_role @pytest.mark.django_db @@ -283,48 +283,3 @@ class TestProjectOrganization: assert org_admin not in jt.admin_role patch(url=jt.get_absolute_url(), data={'project': project.id}, user=admin_user, expect=200) assert org_admin in jt.admin_role - - def test_inventory_read_transfer_direct(self, patch): - orgs = [] - invs = [] - admins = [] - for i in range(2): - org = Organization.objects.create(name='org{}'.format(i)) - org_admin = User.objects.create(username='user{}'.format(i)) - inv = Inventory.objects.create(organization=org, name='inv{}'.format(i)) - org.auditor_role.members.add(org_admin) - - orgs.append(org) - admins.append(org_admin) - invs.append(inv) - - jt = JobTemplate.objects.create(name='foo', inventory=invs[0]) - assert admins[0] in jt.read_role - assert admins[1] not in jt.read_role - - jt.inventory = invs[1] - jt.save(update_fields=['inventory']) - assert admins[0] not in jt.read_role - assert admins[1] in jt.read_role - - def test_inventory_read_transfer_indirect(self, patch): - orgs = [] - admins = [] - for i in range(2): - org = Organization.objects.create(name='org{}'.format(i)) - org_admin = User.objects.create(username='user{}'.format(i)) - org.auditor_role.members.add(org_admin) - - orgs.append(org) - admins.append(org_admin) - - inv = Inventory.objects.create(organization=orgs[0], name='inv{}'.format(i)) - - jt = JobTemplate.objects.create(name='foo', inventory=inv) - assert admins[0] in jt.read_role - assert admins[1] not in jt.read_role - - inv.organization = orgs[1] - inv.save(update_fields=['organization']) - assert admins[0] not in jt.read_role - assert admins[1] in jt.read_role diff --git a/awx/main/tests/functional/test_rbac_migration.py b/awx/main/tests/functional/test_rbac_migration.py index 5f1b2633e8..8ee411ba1a 100644 --- a/awx/main/tests/functional/test_rbac_migration.py +++ b/awx/main/tests/functional/test_rbac_migration.py @@ -1,9 +1,7 @@ import pytest -from django.apps import apps - from awx.main.migrations import _rbac as rbac -from awx.main.models import UnifiedJobTemplate, InventorySource, Inventory, JobTemplate, Project, Organization, User +from awx.main.models import UnifiedJobTemplate, InventorySource, Inventory, JobTemplate, Project, Organization @pytest.mark.django_db @@ -49,27 +47,3 @@ def test_implied_organization_subquery_job_template(): assert jt.test_field is None else: assert jt.test_field == jt.project.organization_id - - -@pytest.mark.django_db -def test_give_explicit_inventory_permission(): - dual_admin = User.objects.create(username='alice') - inv_admin = User.objects.create(username='bob') - inv_org = Organization.objects.create(name='inv-org') - proj_org = Organization.objects.create(name='proj-org') - - inv_org.admin_role.members.add(inv_admin, dual_admin) - proj_org.admin_role.members.add(dual_admin) - - proj = Project.objects.create(name="test-proj", organization=proj_org) - inv = Inventory.objects.create(name='test-inv', organization=inv_org) - - jt = JobTemplate.objects.create(name='foo', project=proj, inventory=inv) - - assert dual_admin in jt.admin_role - - rbac.restore_inventory_admins(apps, None) - - assert inv_admin in jt.admin_role.members.all() - assert dual_admin not in jt.admin_role.members.all() - assert dual_admin in jt.admin_role diff --git a/awx/main/tests/functional/test_rbac_team.py b/awx/main/tests/functional/test_rbac_team.py index a18a69a94b..6c3e68c6c1 100644 --- a/awx/main/tests/functional/test_rbac_team.py +++ b/awx/main/tests/functional/test_rbac_team.py @@ -92,7 +92,7 @@ def test_team_accessible_by(team, user, project): u = user('team_member', False) team.member_role.children.add(project.use_role) - assert team in project.read_role + assert list(Project.accessible_objects(team, 'read_role')) == [project] assert u not in project.read_role team.member_role.members.add(u) diff --git a/awx/main/tests/functional/test_rbac_user.py b/awx/main/tests/functional/test_rbac_user.py index 54a1cd57fe..10ca851bbe 100644 --- a/awx/main/tests/functional/test_rbac_user.py +++ b/awx/main/tests/functional/test_rbac_user.py @@ -4,7 +4,7 @@ from unittest import mock from django.test import TransactionTestCase from awx.main.access import UserAccess, RoleAccess, TeamAccess -from awx.main.models import User, Organization, Inventory, Role +from awx.main.models import User, Organization, Inventory, get_system_auditor_role class TestSysAuditorTransactional(TransactionTestCase): @@ -18,7 +18,8 @@ class TestSysAuditorTransactional(TransactionTestCase): def test_auditor_caching(self): rando = self.rando() - with self.assertNumQueries(1): + get_system_auditor_role() # pre-create role, normally done by migrations + with self.assertNumQueries(2): v = rando.is_system_auditor assert not v with self.assertNumQueries(0): @@ -153,34 +154,3 @@ def test_org_admin_cannot_delete_member_attached_to_other_group(org_admin, org_m access = UserAccess(org_admin) other_org.member_role.members.add(org_member) assert not access.can_delete(org_member) - - -@pytest.mark.parametrize('reverse', (True, False)) -@pytest.mark.django_db -def test_consistency_of_is_superuser_flag(reverse): - users = [User.objects.create(username='rando_{}'.format(i)) for i in range(2)] - for u in users: - assert u.is_superuser is False - - system_admin = Role.singleton('system_administrator') - if reverse: - for u in users: - u.roles.add(system_admin) - else: - system_admin.members.add(*[u.id for u in users]) # like .add(42, 54) - - for u in users: - u.refresh_from_db() - assert u.is_superuser is True - - users[0].roles.clear() - for u in users: - u.refresh_from_db() - assert users[0].is_superuser is False - assert users[1].is_superuser is True - - system_admin.members.clear() - - for u in users: - u.refresh_from_db() - assert u.is_superuser is False diff --git a/awx/main/tests/functional/test_routing.py b/awx/main/tests/functional/test_routing.py new file mode 100644 index 0000000000..a9d758da2b --- /dev/null +++ b/awx/main/tests/functional/test_routing.py @@ -0,0 +1,90 @@ +import pytest + +from django.contrib.auth.models import AnonymousUser + +from channels.routing import ProtocolTypeRouter +from channels.testing.websocket import WebsocketCommunicator + + +from awx.main.consumers import WebsocketSecretAuthHelper + + +@pytest.fixture +def application(): + # code in routing hits the db on import because .. settings cache + from awx.main.routing import application_func + + yield application_func(ProtocolTypeRouter) + + +@pytest.fixture +def websocket_server_generator(application): + def fn(endpoint): + return WebsocketCommunicator(application, endpoint) + + return fn + + +@pytest.mark.asyncio +@pytest.mark.django_db +class TestWebsocketRelay: + @pytest.fixture + def websocket_relay_secret_generator(self, settings): + def fn(secret, set_broadcast_websocket_secret=False): + secret_backup = settings.BROADCAST_WEBSOCKET_SECRET + settings.BROADCAST_WEBSOCKET_SECRET = 'foobar' + res = ('secret'.encode('utf-8'), WebsocketSecretAuthHelper.construct_secret().encode('utf-8')) + if set_broadcast_websocket_secret is False: + settings.BROADCAST_WEBSOCKET_SECRET = secret_backup + return res + + return fn + + @pytest.fixture + def websocket_relay_secret(self, settings, websocket_relay_secret_generator): + return websocket_relay_secret_generator('foobar', set_broadcast_websocket_secret=True) + + async def test_authorized(self, websocket_server_generator, websocket_relay_secret): + server = websocket_server_generator('/websocket/relay/') + + server.scope['headers'] = (websocket_relay_secret,) + connected, _ = await server.connect() + assert connected is True + + async def test_not_authorized(self, websocket_server_generator): + server = websocket_server_generator('/websocket/relay/') + connected, _ = await server.connect() + assert connected is False, "Connection to the relay websocket without auth. We expected the client to be denied." + + async def test_wrong_secret(self, websocket_server_generator, websocket_relay_secret_generator): + server = websocket_server_generator('/websocket/relay/') + + server.scope['headers'] = (websocket_relay_secret_generator('foobar', set_broadcast_websocket_secret=False),) + connected, _ = await server.connect() + assert connected is False + + +@pytest.mark.asyncio +@pytest.mark.django_db +class TestWebsocketEventConsumer: + async def test_unauthorized_anonymous(self, websocket_server_generator): + server = websocket_server_generator('/websocket/') + + server.scope['user'] = AnonymousUser() + connected, _ = await server.connect() + assert connected is False, "Anonymous user should NOT be allowed to login." + + @pytest.mark.skip(reason="Ran out of coding time.") + async def test_authorized(self, websocket_server_generator, application, admin): + server = websocket_server_generator('/websocket/') + + """ + I ran out of time. Here is what I was thinking ... + Inject a valid session into the cookies in the header + + server.scope['headers'] = ( + (b'cookie', ...), + ) + """ + connected, _ = await server.connect() + assert connected is True, "User should be allowed in via cookies auth via a session key in the cookies" diff --git a/awx/main/tests/functional/test_teams.py b/awx/main/tests/functional/test_teams.py deleted file mode 100644 index eda57579ce..0000000000 --- a/awx/main/tests/functional/test_teams.py +++ /dev/null @@ -1,14 +0,0 @@ -import pytest - - -@pytest.mark.django_db() -def test_admin_not_member(team): - """Test to ensure we don't add admin_role as a parent to team.member_role, as - this creates a cycle with organization administration, which we've decided - to remove support for - - (2016-06-16) I think this might have been resolved. I'm asserting - this to be true in the mean time. - """ - - assert team.admin_role.is_ancestor_of(team.member_role) is True diff --git a/awx/main/tests/settings_for_test.py b/awx/main/tests/settings_for_test.py index 373489de37..5634494c33 100644 --- a/awx/main/tests/settings_for_test.py +++ b/awx/main/tests/settings_for_test.py @@ -1,11 +1,6 @@ # Python -from unittest import mock import uuid -# patch python-ldap -with mock.patch('__main__.__builtins__.dir', return_value=[]): - import ldap # NOQA - # Load development settings for base variables. from awx.settings.development import * # NOQA diff --git a/awx/main/tests/unit/api/test_filters.py b/awx/main/tests/unit/api/test_filters.py index 7d6501a871..29c2e3a93d 100644 --- a/awx/main/tests/unit/api/test_filters.py +++ b/awx/main/tests/unit/api/test_filters.py @@ -3,15 +3,13 @@ import pytest # Django -from django.core.exceptions import FieldDoesNotExist +from rest_framework.exceptions import PermissionDenied -from rest_framework.exceptions import PermissionDenied, ParseError +from ansible_base.rest_filters.rest_framework.field_lookup_backend import FieldLookupBackend -from awx.api.filters import FieldLookupBackend, OrderByBackend, get_field_from_path from awx.main.models import ( AdHocCommand, ActivityStream, - Credential, Job, JobTemplate, SystemJob, @@ -20,88 +18,11 @@ from awx.main.models import ( WorkflowJob, WorkflowJobTemplate, WorkflowJobOptions, - InventorySource, - JobEvent, ) from awx.main.models.oauth import OAuth2Application from awx.main.models.jobs import JobOptions -def test_related(): - field_lookup = FieldLookupBackend() - lookup = '__'.join(['inventory', 'organization', 'pk']) - field, new_lookup = field_lookup.get_field_from_lookup(InventorySource, lookup) - print(field) - print(new_lookup) - - -def test_invalid_filter_key(): - field_lookup = FieldLookupBackend() - # FieldDoesNotExist is caught and converted to ParseError by filter_queryset - with pytest.raises(FieldDoesNotExist) as excinfo: - field_lookup.value_to_python(JobEvent, 'event_data.task_action', 'foo') - assert 'has no field named' in str(excinfo) - - -def test_invalid_field_hop(): - with pytest.raises(ParseError) as excinfo: - get_field_from_path(Credential, 'organization__description__user') - assert 'No related model for' in str(excinfo) - - -def test_invalid_order_by_key(): - field_order_by = OrderByBackend() - with pytest.raises(ParseError) as excinfo: - [f for f in field_order_by._validate_ordering_fields(JobEvent, ('event_data.task_action',))] - assert 'has no field named' in str(excinfo) - - -@pytest.mark.parametrize(u"empty_value", [u'', '']) -def test_empty_in(empty_value): - field_lookup = FieldLookupBackend() - with pytest.raises(ValueError) as excinfo: - field_lookup.value_to_python(JobTemplate, 'project__name__in', empty_value) - assert 'empty value for __in' in str(excinfo.value) - - -@pytest.mark.parametrize(u"valid_value", [u'foo', u'foo,']) -def test_valid_in(valid_value): - field_lookup = FieldLookupBackend() - value, new_lookup, _ = field_lookup.value_to_python(JobTemplate, 'project__name__in', valid_value) - assert 'foo' in value - - -def test_invalid_field(): - invalid_field = u"ヽヾ" - field_lookup = FieldLookupBackend() - with pytest.raises(ValueError) as excinfo: - field_lookup.value_to_python(WorkflowJobTemplate, invalid_field, 'foo') - assert 'is not an allowed field name. Must be ascii encodable.' in str(excinfo.value) - - -def test_valid_iexact(): - field_lookup = FieldLookupBackend() - value, new_lookup, _ = field_lookup.value_to_python(JobTemplate, 'project__name__iexact', 'foo') - assert 'foo' in value - - -def test_invalid_iexact(): - field_lookup = FieldLookupBackend() - with pytest.raises(ValueError) as excinfo: - field_lookup.value_to_python(Job, 'id__iexact', '1') - assert 'is not a text field and cannot be filtered by case-insensitive search' in str(excinfo.value) - - -@pytest.mark.parametrize('lookup_suffix', ['', 'contains', 'startswith', 'in']) -@pytest.mark.parametrize('password_field', Credential.PASSWORD_FIELDS) -def test_filter_on_password_field(password_field, lookup_suffix): - field_lookup = FieldLookupBackend() - lookup = '__'.join(filter(None, [password_field, lookup_suffix])) - with pytest.raises(PermissionDenied) as excinfo: - field, new_lookup = field_lookup.get_field_from_lookup(Credential, lookup) - assert 'not allowed' in str(excinfo.value) - - @pytest.mark.parametrize( 'model, query', [ @@ -128,10 +49,3 @@ def test_filter_sensitive_fields_and_relations(model, query): with pytest.raises(PermissionDenied) as excinfo: field, new_lookup = field_lookup.get_field_from_lookup(model, query) assert 'not allowed' in str(excinfo.value) - - -def test_looping_filters_prohibited(): - field_lookup = FieldLookupBackend() - with pytest.raises(ParseError) as loop_exc: - field_lookup.get_field_from_lookup(Job, 'job_events__job__job_events') - assert 'job_events' in str(loop_exc.value) diff --git a/awx/main/tests/unit/commands/test_dump_auth_config.py b/awx/main/tests/unit/commands/test_dump_auth_config.py new file mode 100644 index 0000000000..96f6aeb865 --- /dev/null +++ b/awx/main/tests/unit/commands/test_dump_auth_config.py @@ -0,0 +1,122 @@ +from io import StringIO +import json +from django.core.management import call_command +from django.test import TestCase, override_settings + + +settings_dict = { + "SOCIAL_AUTH_SAML_SP_ENTITY_ID": "SP_ENTITY_ID", + "SOCIAL_AUTH_SAML_SP_PUBLIC_CERT": "SP_PUBLIC_CERT", + "SOCIAL_AUTH_SAML_SP_PRIVATE_KEY": "SP_PRIVATE_KEY", + "SOCIAL_AUTH_SAML_ORG_INFO": "ORG_INFO", + "SOCIAL_AUTH_SAML_TECHNICAL_CONTACT": "TECHNICAL_CONTACT", + "SOCIAL_AUTH_SAML_SUPPORT_CONTACT": "SUPPORT_CONTACT", + "SOCIAL_AUTH_SAML_SP_EXTRA": "SP_EXTRA", + "SOCIAL_AUTH_SAML_SECURITY_CONFIG": "SECURITY_CONFIG", + "SOCIAL_AUTH_SAML_EXTRA_DATA": "EXTRA_DATA", + "SOCIAL_AUTH_SAML_ENABLED_IDPS": { + "Keycloak": { + "attr_last_name": "last_name", + "attr_groups": "groups", + "attr_email": "email", + "attr_user_permanent_id": "name_id", + "attr_username": "username", + "entity_id": "https://example.com/auth/realms/awx", + "url": "https://example.com/auth/realms/awx/protocol/saml", + "x509cert": "-----BEGIN CERTIFICATE-----\nMIIDDjCCAfYCCQCPBeVvpo8+VzANBgkqhkiG9w0BAQsFADBJMQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEMMAoGA1UECgwDYXd4MQ4w\nDAYDVQQDDAVsb2NhbDAeFw0yNDAxMTgxNDA4MzFaFw0yNTAxMTcxNDA4MzFaMEkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOQzEPMA0GA1UEBwwGRHVyaGFtMQwwCgYD\nVQQKDANhd3gxDjAMBgNVBAMMBWxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\nMIIBCgKCAQEAzouj93oyFXsHEABdPESh3CYpp5QJJBM4TLYIIolk6PFOFIVwBuFY\nfExi5w7Hh4A42lPM6RkrT+u3h7LV39H9MRUfqygOSmaxICTOI0sU9ROHc44fWWzN\n756OP4B5zSiqG82q8X7nYVkcID+2F/3ekPLMOlWn53OrcdfKKDIcqavoTkQJefc2\nggXU3WgVCxGki/qCm+e5cZ1Cpl/ykSLOT8dWMEzDd12kin66zJ3KYz9F2Q5kQTh4\nKRAChnBBoEqzOfENHEAaHALiXOlVSy61VcLbtvskRMMwBtsydlnd9n/HGnktgrid\n3Ca0z5wBTHWjAOBvCKxKJuDa+jmyHEnpcQIDAQABMA0GCSqGSIb3DQEBCwUAA4IB\nAQBXvmyPWgXhC26cHYJBgQqj57dZ+n7p00kM1J+27oDMjGmbmX+XIKXLWazw/rG3\ngDjw9MXI2tVCrQMX0ohjphaULXhb/VBUPDOiW+k7C6AB3nZySFRflcR3cM4f83zF\nMoBd0549h5Red4p72FeOKNJRTN8YO4ooH9YNh5g0FQkgqn7fV9w2CNlomeKIW9zP\nm8tjFw0cJUk2wEYBVl8O7ko5rgNlzhkLoZkMvJhKa99AQJA6MAdyoLl1lv56Kq4X\njk+mMEiz9SaInp+ILQ1uQxZEwuC7DoGRW76rV4Fnie6+DLft4WKZfX1497mx8NV3\noR0abutJaKnCj07dwRu4/EsK\n-----END CERTIFICATE-----", + "attr_first_name": "first_name", + } + }, + "SOCIAL_AUTH_SAML_CALLBACK_URL": "CALLBACK_URL", + "AUTH_LDAP_1_SERVER_URI": "SERVER_URI", + "AUTH_LDAP_1_BIND_DN": "BIND_DN", + "AUTH_LDAP_1_BIND_PASSWORD": "BIND_PASSWORD", + "AUTH_LDAP_1_GROUP_SEARCH": ["GROUP_SEARCH"], + "AUTH_LDAP_1_GROUP_TYPE": "string object", + "AUTH_LDAP_1_GROUP_TYPE_PARAMS": {"member_attr": "member", "name_attr": "cn"}, + "AUTH_LDAP_1_USER_DN_TEMPLATE": "USER_DN_TEMPLATE", + "AUTH_LDAP_1_USER_SEARCH": ["USER_SEARCH"], + "AUTH_LDAP_1_USER_ATTR_MAP": { + "email": "email", + "last_name": "last_name", + "first_name": "first_name", + }, + "AUTH_LDAP_1_CONNECTION_OPTIONS": {}, + "AUTH_LDAP_1_START_TLS": None, +} + + +@override_settings(**settings_dict) +class TestDumpAuthConfigCommand(TestCase): + def setUp(self): + super().setUp() + self.expected_config = [ + { + "type": "awx.authentication.authenticator_plugins.saml", + "name": "Keycloak", + "enabled": True, + "create_objects": True, + "users_unique": False, + "remove_users": True, + "configuration": { + "SP_ENTITY_ID": "SP_ENTITY_ID", + "SP_PUBLIC_CERT": "SP_PUBLIC_CERT", + "SP_PRIVATE_KEY": "SP_PRIVATE_KEY", + "ORG_INFO": "ORG_INFO", + "TECHNICAL_CONTACT": "TECHNICAL_CONTACT", + "SUPPORT_CONTACT": "SUPPORT_CONTACT", + "SP_EXTRA": "SP_EXTRA", + "SECURITY_CONFIG": "SECURITY_CONFIG", + "EXTRA_DATA": "EXTRA_DATA", + "ENABLED_IDPS": { + "Keycloak": { + "attr_last_name": "last_name", + "attr_groups": "groups", + "attr_email": "email", + "attr_user_permanent_id": "name_id", + "attr_username": "username", + "entity_id": "https://example.com/auth/realms/awx", + "url": "https://example.com/auth/realms/awx/protocol/saml", + "x509cert": "-----BEGIN CERTIFICATE-----\nMIIDDjCCAfYCCQCPBeVvpo8+VzANBgkqhkiG9w0BAQsFADBJMQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEMMAoGA1UECgwDYXd4MQ4w\nDAYDVQQDDAVsb2NhbDAeFw0yNDAxMTgxNDA4MzFaFw0yNTAxMTcxNDA4MzFaMEkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOQzEPMA0GA1UEBwwGRHVyaGFtMQwwCgYD\nVQQKDANhd3gxDjAMBgNVBAMMBWxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\nMIIBCgKCAQEAzouj93oyFXsHEABdPESh3CYpp5QJJBM4TLYIIolk6PFOFIVwBuFY\nfExi5w7Hh4A42lPM6RkrT+u3h7LV39H9MRUfqygOSmaxICTOI0sU9ROHc44fWWzN\n756OP4B5zSiqG82q8X7nYVkcID+2F/3ekPLMOlWn53OrcdfKKDIcqavoTkQJefc2\nggXU3WgVCxGki/qCm+e5cZ1Cpl/ykSLOT8dWMEzDd12kin66zJ3KYz9F2Q5kQTh4\nKRAChnBBoEqzOfENHEAaHALiXOlVSy61VcLbtvskRMMwBtsydlnd9n/HGnktgrid\n3Ca0z5wBTHWjAOBvCKxKJuDa+jmyHEnpcQIDAQABMA0GCSqGSIb3DQEBCwUAA4IB\nAQBXvmyPWgXhC26cHYJBgQqj57dZ+n7p00kM1J+27oDMjGmbmX+XIKXLWazw/rG3\ngDjw9MXI2tVCrQMX0ohjphaULXhb/VBUPDOiW+k7C6AB3nZySFRflcR3cM4f83zF\nMoBd0549h5Red4p72FeOKNJRTN8YO4ooH9YNh5g0FQkgqn7fV9w2CNlomeKIW9zP\nm8tjFw0cJUk2wEYBVl8O7ko5rgNlzhkLoZkMvJhKa99AQJA6MAdyoLl1lv56Kq4X\njk+mMEiz9SaInp+ILQ1uQxZEwuC7DoGRW76rV4Fnie6+DLft4WKZfX1497mx8NV3\noR0abutJaKnCj07dwRu4/EsK\n-----END CERTIFICATE-----", + "attr_first_name": "first_name", + } + }, + "CALLBACK_URL": "CALLBACK_URL", + "IDP_URL": "https://example.com/auth/realms/awx/protocol/saml", + "IDP_X509_CERT": "-----BEGIN CERTIFICATE-----\nMIIDDjCCAfYCCQCPBeVvpo8+VzANBgkqhkiG9w0BAQsFADBJMQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEMMAoGA1UECgwDYXd4MQ4w\nDAYDVQQDDAVsb2NhbDAeFw0yNDAxMTgxNDA4MzFaFw0yNTAxMTcxNDA4MzFaMEkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOQzEPMA0GA1UEBwwGRHVyaGFtMQwwCgYD\nVQQKDANhd3gxDjAMBgNVBAMMBWxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\nMIIBCgKCAQEAzouj93oyFXsHEABdPESh3CYpp5QJJBM4TLYIIolk6PFOFIVwBuFY\nfExi5w7Hh4A42lPM6RkrT+u3h7LV39H9MRUfqygOSmaxICTOI0sU9ROHc44fWWzN\n756OP4B5zSiqG82q8X7nYVkcID+2F/3ekPLMOlWn53OrcdfKKDIcqavoTkQJefc2\nggXU3WgVCxGki/qCm+e5cZ1Cpl/ykSLOT8dWMEzDd12kin66zJ3KYz9F2Q5kQTh4\nKRAChnBBoEqzOfENHEAaHALiXOlVSy61VcLbtvskRMMwBtsydlnd9n/HGnktgrid\n3Ca0z5wBTHWjAOBvCKxKJuDa+jmyHEnpcQIDAQABMA0GCSqGSIb3DQEBCwUAA4IB\nAQBXvmyPWgXhC26cHYJBgQqj57dZ+n7p00kM1J+27oDMjGmbmX+XIKXLWazw/rG3\ngDjw9MXI2tVCrQMX0ohjphaULXhb/VBUPDOiW+k7C6AB3nZySFRflcR3cM4f83zF\nMoBd0549h5Red4p72FeOKNJRTN8YO4ooH9YNh5g0FQkgqn7fV9w2CNlomeKIW9zP\nm8tjFw0cJUk2wEYBVl8O7ko5rgNlzhkLoZkMvJhKa99AQJA6MAdyoLl1lv56Kq4X\njk+mMEiz9SaInp+ILQ1uQxZEwuC7DoGRW76rV4Fnie6+DLft4WKZfX1497mx8NV3\noR0abutJaKnCj07dwRu4/EsK\n-----END CERTIFICATE-----", + "IDP_ENTITY_ID": "https://example.com/auth/realms/awx", + "IDP_ATTR_EMAIL": "email", + "IDP_GROUPS": "groups", + "IDP_ATTR_USERNAME": "username", + "IDP_ATTR_LAST_NAME": "last_name", + "IDP_ATTR_FIRST_NAME": "first_name", + "IDP_ATTR_USER_PERMANENT_ID": "name_id", + }, + }, + { + "type": "awx.authentication.authenticator_plugins.ldap", + "name": "1", + "enabled": True, + "create_objects": True, + "users_unique": False, + "remove_users": True, + "configuration": { + "SERVER_URI": "SERVER_URI", + "BIND_DN": "BIND_DN", + "BIND_PASSWORD": "BIND_PASSWORD", + "CONNECTION_OPTIONS": {}, + "GROUP_TYPE": "str", + "GROUP_TYPE_PARAMS": {"member_attr": "member", "name_attr": "cn"}, + "GROUP_SEARCH": ["GROUP_SEARCH"], + "START_TLS": None, + "USER_DN_TEMPLATE": "USER_DN_TEMPLATE", + "USER_ATTR_MAP": {"email": "email", "last_name": "last_name", "first_name": "first_name"}, + "USER_SEARCH": ["USER_SEARCH"], + }, + }, + ] + + def test_json_returned_from_cmd(self): + output = StringIO() + call_command("dump_auth_config", stdout=output) + assert json.loads(output.getvalue()) == self.expected_config diff --git a/awx/main/tests/unit/models/test_receptor_address.py b/awx/main/tests/unit/models/test_receptor_address.py new file mode 100644 index 0000000000..f18e1a9018 --- /dev/null +++ b/awx/main/tests/unit/models/test_receptor_address.py @@ -0,0 +1,32 @@ +from awx.main.models import ReceptorAddress +import pytest + +ReceptorAddress() + + +@pytest.mark.parametrize( + 'address, protocol, port, websocket_path, expected', + [ + ('foo', 'tcp', 27199, '', 'foo:27199'), + ('bar', 'ws', 6789, '', 'wss://bar:6789'), + ('mal', 'ws', 6789, 'path', 'wss://mal:6789/path'), + ('example.com', 'ws', 443, 'path', 'wss://example.com:443/path'), + ], +) +def test_get_full_address(address, protocol, port, websocket_path, expected): + receptor_address = ReceptorAddress(address=address, protocol=protocol, port=port, websocket_path=websocket_path) + assert receptor_address.get_full_address() == expected + + +@pytest.mark.parametrize( + 'protocol, expected', + [ + ('tcp', 'tcp-peer'), + ('ws', 'ws-peer'), + ('wss', 'ws-peer'), + ('foo', None), + ], +) +def test_get_peer_type(protocol, expected): + receptor_address = ReceptorAddress(protocol=protocol) + assert receptor_address.get_peer_type() == expected diff --git a/awx/main/tests/unit/tasks/test_system.py b/awx/main/tests/unit/tasks/test_system.py new file mode 100644 index 0000000000..c567dc4833 --- /dev/null +++ b/awx/main/tests/unit/tasks/test_system.py @@ -0,0 +1,64 @@ +import pytest +from unittest.mock import MagicMock, patch +from awx.main.tasks.system import update_inventory_computed_fields +from awx.main.models import Inventory +from django.db import DatabaseError + + +@pytest.fixture +def mock_logger(): + with patch("awx.main.tasks.system.logger") as logger: + yield logger + + +@pytest.fixture +def mock_inventory(): + return MagicMock(spec=Inventory) + + +def test_update_inventory_computed_fields_existing_inventory(mock_logger, mock_inventory): + # Mocking the Inventory.objects.filter method to return a non-empty queryset + with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter: + mock_filter.return_value.exists.return_value = True + mock_filter.return_value.__getitem__.return_value = mock_inventory + + # Mocking the update_computed_fields method + with patch.object(mock_inventory, "update_computed_fields") as mock_update_computed_fields: + update_inventory_computed_fields(1) + + # Assertions + mock_filter.assert_called_once_with(id=1) + mock_update_computed_fields.assert_called_once() + + # You can add more assertions based on your specific requirements + + +def test_update_inventory_computed_fields_missing_inventory(mock_logger): + # Mocking the Inventory.objects.filter method to return an empty queryset + with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter: + mock_filter.return_value.exists.return_value = False + + update_inventory_computed_fields(1) + + # Assertions + mock_filter.assert_called_once_with(id=1) + mock_logger.error.assert_called_once_with("Update Inventory Computed Fields failed due to missing inventory: 1") + + +def test_update_inventory_computed_fields_database_error_nosqlstate(mock_logger, mock_inventory): + # Mocking the Inventory.objects.filter method to return a non-empty queryset + with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter: + mock_filter.return_value.exists.return_value = True + mock_filter.return_value.__getitem__.return_value = mock_inventory + + # Mocking the update_computed_fields method + with patch.object(mock_inventory, "update_computed_fields") as mock_update_computed_fields: + # Simulating the update_computed_fields method to explicitly raise a DatabaseError + mock_update_computed_fields.side_effect = DatabaseError("Some error") + + update_inventory_computed_fields(1) + + # Assertions + mock_filter.assert_called_once_with(id=1) + mock_update_computed_fields.assert_called_once() + mock_inventory.update_computed_fields.assert_called_once() diff --git a/awx/main/tests/unit/test_tasks.py b/awx/main/tests/unit/test_tasks.py index aa1e63c906..703dc72f77 100644 --- a/awx/main/tests/unit/test_tasks.py +++ b/awx/main/tests/unit/test_tasks.py @@ -1085,6 +1085,27 @@ class TestJobCredentials(TestJobExecution): assert open(env['ANSIBLE_NET_SSH_KEYFILE'], 'r').read() == self.EXAMPLE_PRIVATE_KEY assert safe_env['ANSIBLE_NET_PASSWORD'] == HIDDEN_PASSWORD + def test_terraform_cloud_credentials(self, job, private_data_dir, mock_me): + terraform = CredentialType.defaults['terraform']() + hcl_config = ''' + backend "s3" { + bucket = "s3_sample_bucket" + key = "/tf_state/" + region = "us-east-1" + } + ''' + credential = Credential(pk=1, credential_type=terraform, inputs={'configuration': hcl_config}) + credential.inputs['configuration'] = encrypt_field(credential, 'configuration') + job.credentials.add(credential) + + env = {} + safe_env = {} + credential.credential_type.inject_credential(credential, env, safe_env, [], private_data_dir) + + local_path = to_host_path(env['TF_BACKEND_CONFIG_FILE'], private_data_dir) + config = open(local_path, 'r').read() + assert config == hcl_config + def test_custom_environment_injectors_with_jinja_syntax_error(self, private_data_dir, mock_me): some_cloud = CredentialType( kind='cloud', diff --git a/awx/main/tests/unit/utils/test_common.py b/awx/main/tests/unit/utils/test_common.py index cc8f65bf93..261741ca5b 100644 --- a/awx/main/tests/unit/utils/test_common.py +++ b/awx/main/tests/unit/utils/test_common.py @@ -12,6 +12,8 @@ from unittest import mock from rest_framework.exceptions import ParseError +from ansible_base.lib.utils.models import get_type_for_model + from awx.main.utils import common from awx.api.validators import HostnameRegexValidator @@ -106,7 +108,7 @@ TEST_MODELS = [ # Cases relied on for scheduler dependent jobs list @pytest.mark.parametrize('model,name', TEST_MODELS) def test_get_type_for_model(model, name): - assert common.get_type_for_model(model) == name + assert get_type_for_model(model) == name def test_get_model_for_invalid_type(): @@ -119,6 +121,10 @@ def test_get_model_for_valid_type(model_type, model_class): assert common.get_model_for_type(model_type) == model_class +def test_is_testing(): + assert common.is_testing() is True + + @pytest.mark.parametrize("model_type,model_class", [(name, cls) for cls, name in TEST_MODELS]) def test_get_capacity_type(model_type, model_class): if model_type in ('job', 'ad_hoc_command', 'inventory_update', 'job_template'): diff --git a/awx/main/tests/unit/utils/test_filters.py b/awx/main/tests/unit/utils/test_filters.py index cf07b87d65..83d9da0e4f 100644 --- a/awx/main/tests/unit/utils/test_filters.py +++ b/awx/main/tests/unit/utils/test_filters.py @@ -68,7 +68,9 @@ class mockHost: @mock.patch('awx.main.utils.filters.get_model', return_value=mockHost()) class TestSmartFilterQueryFromString: - @mock.patch('awx.api.filters.get_fields_from_path', lambda model, path: ([model], path)) # disable field filtering, because a__b isn't a real Host field + @mock.patch( + 'ansible_base.rest_filters.rest_framework.field_lookup_backend.get_fields_from_path', lambda model, path, **kwargs: ([model], path) + ) # disable field filtering, because a__b isn't a real Host field @pytest.mark.parametrize( "filter_string,q_expected", [ diff --git a/awx/main/tests/unit/utils/test_receptor.py b/awx/main/tests/unit/utils/test_receptor.py index 0a7e182070..b077e8a5db 100644 --- a/awx/main/tests/unit/utils/test_receptor.py +++ b/awx/main/tests/unit/utils/test_receptor.py @@ -3,7 +3,7 @@ from awx.main.tasks.receptor import _convert_args_to_cli def test_file_cleanup_scenario(): args = _convert_args_to_cli({'exclude_strings': ['awx_423_', 'awx_582_'], 'file_pattern': '/tmp/awx_*_*'}) - assert ' '.join(args) == 'cleanup --exclude-strings=awx_423_ awx_582_ --file-pattern=/tmp/awx_*_*' + assert ' '.join(args) == 'cleanup --exclude-strings="awx_423_ awx_582_" --file-pattern=/tmp/awx_*_*' def test_image_cleanup_scenario(): @@ -17,5 +17,5 @@ def test_image_cleanup_scenario(): } ) assert ( - ' '.join(args) == 'cleanup --remove-images=quay.invalid/foo/bar:latest quay.invalid/foo/bar:devel --image-prune --process-isolation-executable=podman' + ' '.join(args) == 'cleanup --remove-images="quay.invalid/foo/bar:latest quay.invalid/foo/bar:devel" --image-prune --process-isolation-executable=podman' ) diff --git a/awx/main/utils/common.py b/awx/main/utils/common.py index 9066707d4d..bdd7465b90 100644 --- a/awx/main/utils/common.py +++ b/awx/main/utils/common.py @@ -7,6 +7,7 @@ import json import yaml import logging import time +import psycopg import os import subprocess import re @@ -23,7 +24,7 @@ from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist from django.utils.dateparse import parse_datetime from django.utils.translation import gettext_lazy as _ from django.utils.functional import cached_property -from django.db import connection, transaction, ProgrammingError, IntegrityError +from django.db import connection, DatabaseError, transaction, ProgrammingError, IntegrityError from django.db.models.fields.related import ForeignObjectRel, ManyToManyField from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor from django.db.models.query import QuerySet @@ -52,12 +53,10 @@ __all__ = [ 'get_awx_http_client_headers', 'get_awx_version', 'update_scm_url', - 'get_type_for_model', 'get_model_for_type', 'copy_model_by_class', 'copy_m2m_relationships', 'prefetch_page_capabilities', - 'to_python_boolean', 'datetime_hook', 'ignore_inventory_computed_fields', 'ignore_inventory_group_removal', @@ -110,18 +109,6 @@ def get_object_or_400(klass, *args, **kwargs): raise ParseError(*e.args) -def to_python_boolean(value, allow_none=False): - value = str(value) - if value.lower() in ('true', '1', 't'): - return True - elif value.lower() in ('false', '0', 'f'): - return False - elif allow_none and value.lower() in ('none', 'null'): - return None - else: - raise ValueError(_(u'Unable to convert "%s" to boolean') % value) - - def datetime_hook(d): new_d = {} for key, value in d.items(): @@ -150,7 +137,7 @@ def underscore_to_camelcase(s): @functools.cache def is_testing(argv=None): '''Return True if running django or py.test unit tests.''' - if 'PYTEST_CURRENT_TEST' in os.environ.keys(): + if os.environ.get('DJANGO_SETTINGS_MODULE') == 'awx.main.tests.settings_for_test': return True argv = sys.argv if argv is None else argv if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]): @@ -569,14 +556,6 @@ def copy_m2m_relationships(obj1, obj2, fields, kwargs=None): dest_field.add(*list(src_field_value.all().values_list('id', flat=True))) -def get_type_for_model(model): - """ - Return type name for a given model class. - """ - opts = model._meta.concrete_model._meta - return camelcase_to_underscore(opts.object_name) - - def get_model_for_type(type_name): """ Return model class for a given type name. @@ -1177,11 +1156,25 @@ def create_partition(tblname, start=None): f'ALTER TABLE {tblname} ATTACH PARTITION {tblname}_{partition_label} ' f'FOR VALUES FROM (\'{start_timestamp}\') TO (\'{end_timestamp}\');' ) + except (ProgrammingError, IntegrityError) as e: - if 'already exists' in str(e): - logger.info(f'Caught known error due to partition creation race: {e}') - else: - raise + cause = e.__cause__ + if cause and hasattr(cause, 'sqlstate'): + sqlstate = cause.sqlstate + sqlstate_cls = psycopg.errors.lookup(sqlstate) + + if psycopg.errors.DuplicateTable == sqlstate_cls or psycopg.errors.UniqueViolation == sqlstate_cls: + logger.info(f'Caught known error due to partition creation race: {e}') + else: + logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_cls)) + raise + except DatabaseError as e: + cause = e.__cause__ + if cause and hasattr(cause, 'sqlstate'): + sqlstate = cause.sqlstate + sqlstate_str = psycopg.errors.lookup(sqlstate) + logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str)) + raise def cleanup_new_process(func): diff --git a/awx/main/utils/db.py b/awx/main/utils/db.py index 4117c5274c..8cc6aacce9 100644 --- a/awx/main/utils/db.py +++ b/awx/main/utils/db.py @@ -1,27 +1,10 @@ # Copyright (c) 2017 Ansible by Red Hat # All Rights Reserved. -from itertools import chain from awx.settings.application_name import set_application_name from django.conf import settings -def get_all_field_names(model): - # Implements compatibility with _meta.get_all_field_names - # See: https://docs.djangoproject.com/en/1.11/ref/models/meta/#migrating-from-the-old-api - return list( - set( - chain.from_iterable( - (field.name, field.attname) if hasattr(field, 'attname') else (field.name,) - for field in model._meta.get_fields() - # For complete backwards compatibility, you may want to exclude - # GenericForeignKey from the results. - if not (field.many_to_one and field.related_model is None) - ) - ) - ) - - def set_connection_name(function): set_application_name(settings.DATABASES, settings.CLUSTER_HOST_ID, function=function) diff --git a/awx/main/utils/filters.py b/awx/main/utils/filters.py index 7f9724329b..6aa882a0c4 100644 --- a/awx/main/utils/filters.py +++ b/awx/main/utils/filters.py @@ -161,7 +161,7 @@ class SmartFilter(object): else: # detect loops and restrict access to sensitive fields # this import is intentional here to avoid a circular import - from awx.api.filters import FieldLookupBackend + from ansible_base.rest_filters.rest_framework.field_lookup_backend import FieldLookupBackend FieldLookupBackend().get_field_from_lookup(Host, k) kwargs[k] = v diff --git a/awx/main/utils/formatters.py b/awx/main/utils/formatters.py index 48edd56f65..5b6b5d785d 100644 --- a/awx/main/utils/formatters.py +++ b/awx/main/utils/formatters.py @@ -3,7 +3,6 @@ from copy import copy import json -import json_log_formatter import logging import traceback import socket @@ -15,15 +14,6 @@ from django.core.serializers.json import DjangoJSONEncoder from django.conf import settings -class JobLifeCycleFormatter(json_log_formatter.JSONFormatter): - def json_record(self, message: str, extra: dict, record: logging.LogRecord): - if 'time' not in extra: - extra['time'] = now() - if record.exc_info: - extra['exc_info'] = self.formatException(record.exc_info) - return extra - - class TimeFormatter(logging.Formatter): """ Custom log formatter used for inventory imports diff --git a/awx/main/utils/named_url_graph.py b/awx/main/utils/named_url_graph.py index 9d2c0a27c9..632064f0c1 100644 --- a/awx/main/utils/named_url_graph.py +++ b/awx/main/utils/named_url_graph.py @@ -5,7 +5,6 @@ from collections import deque # Django from django.db import models from django.conf import settings -from django.contrib.contenttypes.models import ContentType NAMED_URL_RES_DILIMITER = "++" @@ -245,6 +244,8 @@ def _generate_configurations(nodes): def _dfs(configuration, model, graph, dead_ends, new_deadends, parents): + from django.contrib.contenttypes.models import ContentType + parents.add(model) fields, fk_names = configuration[model][0][:], configuration[model][1][:] adj_list = [] @@ -306,3 +307,19 @@ def generate_graph(models): def reset_counters(): for node in settings.NAMED_URL_GRAPH.values(): node.counter = 0 + + +def _customize_graph(): + from django.contrib.auth.models import User + from awx.main.models import Instance, Schedule, UnifiedJobTemplate + + for model in [Schedule, UnifiedJobTemplate]: + if model in settings.NAMED_URL_GRAPH: + settings.NAMED_URL_GRAPH[model].remove_bindings() + settings.NAMED_URL_GRAPH.pop(model) + if User not in settings.NAMED_URL_GRAPH: + settings.NAMED_URL_GRAPH[User] = GraphNode(User, ['username'], []) + settings.NAMED_URL_GRAPH[User].add_bindings() + if Instance not in settings.NAMED_URL_GRAPH: + settings.NAMED_URL_GRAPH[Instance] = GraphNode(Instance, ['hostname'], []) + settings.NAMED_URL_GRAPH[Instance].add_bindings() diff --git a/awx/main/wsrelay.py b/awx/main/wsrelay.py index 1a5f727b5d..8a1a834295 100644 --- a/awx/main/wsrelay.py +++ b/awx/main/wsrelay.py @@ -2,6 +2,7 @@ import json import logging import asyncio from typing import Dict +from copy import deepcopy import ipaddress @@ -20,7 +21,6 @@ from awx.main.analytics.broadcast_websocket import ( RelayWebsocketStats, RelayWebsocketStatsManager, ) -import awx.main.analytics.subsystem_metrics as s_metrics logger = logging.getLogger('awx.main.wsrelay') @@ -54,7 +54,6 @@ class WebsocketRelayConnection: self.protocol = protocol self.verify_ssl = verify_ssl self.channel_layer = None - self.subsystem_metrics = s_metrics.Metrics(instance_name=name) self.producers = dict() self.connected = False @@ -304,20 +303,38 @@ class WebSocketRelayManager(object): self.stats_mgr.start() # Set up a pg_notify consumer for allowing web nodes to "provision" and "deprovision" themselves gracefully. - database_conf = settings.DATABASES['default'] - async_conn = await psycopg.AsyncConnection.connect( - dbname=database_conf['NAME'], - host=database_conf['HOST'], - user=database_conf['USER'], - password=database_conf['PASSWORD'], - port=database_conf['PORT'], - **database_conf.get("OPTIONS", {}), - ) - await async_conn.set_autocommit(True) - event_loop.create_task(self.on_ws_heartbeat(async_conn)) + database_conf = deepcopy(settings.DATABASES['default']) + database_conf['OPTIONS'] = deepcopy(database_conf.get('OPTIONS', {})) + + for k, v in settings.LISTENER_DATABASES.get('default', {}).items(): + database_conf[k] = v + for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items(): + database_conf['OPTIONS'][k] = v + + if 'PASSWORD' in database_conf: + database_conf['OPTIONS']['password'] = database_conf.pop('PASSWORD') + + task = None # Establishes a websocket connection to /websocket/relay on all API servers while True: + if not task or task.done(): + try: + async_conn = await psycopg.AsyncConnection.connect( + dbname=database_conf['NAME'], + host=database_conf['HOST'], + user=database_conf['USER'], + port=database_conf['PORT'], + **database_conf.get("OPTIONS", {}), + ) + await async_conn.set_autocommit(True) + + task = event_loop.create_task(self.on_ws_heartbeat(async_conn), name="on_ws_heartbeat") + logger.info("Creating `on_ws_heartbeat` task in event loop.") + + except Exception as e: + logger.warning(f"Failed to connect to database for pg_notify: {e}") + future_remote_hosts = self.known_hosts.keys() current_remote_hosts = self.relay_connections.keys() deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts) @@ -341,7 +358,7 @@ class WebSocketRelayManager(object): if deleted_remote_hosts: logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list") - await asyncio.gather(self.cleanup_offline_host(h) for h in deleted_remote_hosts) + await asyncio.gather(*[self.cleanup_offline_host(h) for h in deleted_remote_hosts]) if new_remote_hosts: logger.info(f"Adding {new_remote_hosts} to websocket broadcast list") diff --git a/awx/playbooks/project_update.yml b/awx/playbooks/project_update.yml index f1b9710f19..26a4891c14 100644 --- a/awx/playbooks/project_update.yml +++ b/awx/playbooks/project_update.yml @@ -216,42 +216,59 @@ - block: - name: Fetch galaxy roles from roles/requirements.(yml/yaml) ansible.builtin.command: - cmd: "ansible-galaxy role install -r {{ item }} {{ verbosity }}" + cmd: "ansible-galaxy role install -r {{ req_file }} {{ verbosity }}" register: galaxy_result - with_fileglob: - - "{{ project_path | quote }}/roles/requirements.yaml" - - "{{ project_path | quote }}/roles/requirements.yml" + vars: + req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}" + req_candidates: + files: + - "{{ project_path | quote }}/roles/requirements.yml" + - "{{ project_path | quote }}/roles/requirements.yaml" + skip: True changed_when: "'was installed successfully' in galaxy_result.stdout" - when: roles_enabled | bool + when: + - roles_enabled | bool + - req_file tags: - install_roles - name: Fetch galaxy collections from collections/requirements.(yml/yaml) ansible.builtin.command: - cmd: "ansible-galaxy collection install -r {{ item }} {{ verbosity }}" + cmd: "ansible-galaxy collection install -r {{ req_file }} {{ verbosity }}" register: galaxy_collection_result - with_fileglob: - - "{{ project_path | quote }}/collections/requirements.yaml" - - "{{ project_path | quote }}/collections/requirements.yml" + vars: + req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}" + req_candidates: + files: + - "{{ project_path | quote }}/collections/requirements.yml" + - "{{ project_path | quote }}/collections/requirements.yaml" + skip: True changed_when: "'Nothing to do.' not in galaxy_collection_result.stdout" when: - "ansible_version.full is version_compare('2.9', '>=')" - collections_enabled | bool + - req_file tags: - install_collections + # requirements.yml in project root can be either "old" (roles only) or "new" (collections+roles) format - name: Fetch galaxy roles and collections from requirements.(yml/yaml) ansible.builtin.command: - cmd: "ansible-galaxy install -r {{ item }} {{ verbosity }}" + cmd: "ansible-galaxy install -r {{ req_file }} {{ verbosity }}" register: galaxy_combined_result - with_fileglob: - - "{{ project_path | quote }}/requirements.yaml" - - "{{ project_path | quote }}/requirements.yml" + vars: + req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}" + req_candidates: + files: + - "{{ project_path | quote }}/requirements.yaml" + - "{{ project_path | quote }}/requirements.yml" + skip: True changed_when: "'Nothing to do.' not in galaxy_combined_result.stdout" when: - "ansible_version.full is version_compare('2.10', '>=')" - collections_enabled | bool - roles_enabled | bool + - req_file tags: - install_collections - install_roles diff --git a/awx/resource_api.py b/awx/resource_api.py new file mode 100644 index 0000000000..2009dfab8b --- /dev/null +++ b/awx/resource_api.py @@ -0,0 +1,22 @@ +from ansible_base.resource_registry.registry import ParentResource, ResourceConfig, ServiceAPIConfig, SharedResource +from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType + +from awx.main import models + + +class APIConfig(ServiceAPIConfig): + service_type = "awx" + + +RESOURCE_LIST = ( + ResourceConfig( + models.Organization, + shared_resource=SharedResource(serializer=OrganizationType, is_provider=False), + ), + ResourceConfig(models.User, shared_resource=SharedResource(serializer=UserType, is_provider=False), name_field="username"), + ResourceConfig( + models.Team, + shared_resource=SharedResource(serializer=TeamType, is_provider=False), + parent_resources=[ParentResource(model=models.Organization, field_name="organization")], + ), +) diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 0b5e000b40..751e419730 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -11,6 +11,7 @@ from datetime import timedelta # python-ldap import ldap +from split_settings.tools import include DEBUG = True @@ -36,6 +37,18 @@ DATABASES = { } } +# Special database overrides for dispatcher connections listening to pg_notify +LISTENER_DATABASES = { + 'default': { + 'OPTIONS': { + 'keepalives': 1, + 'keepalives_idle': 5, + 'keepalives_interval': 5, + 'keepalives_count': 5, + }, + } +} + # Whether or not the deployment is a K8S-based deployment # In K8S-based deployments, instances have zero capacity - all playbook # automation is intended to flow through defined Container Groups that @@ -131,6 +144,9 @@ BULK_JOB_MAX_LAUNCH = 100 # Maximum number of host that can be created in 1 bulk host create BULK_HOST_MAX_CREATE = 100 +# Maximum number of host that can be deleted in 1 bulk host delete +BULK_HOST_MAX_DELETE = 250 + SITE_ID = 1 # Make this unique, and don't share it with anybody. @@ -336,9 +352,13 @@ INSTALLED_APPS = [ 'awx.ui', 'awx.sso', 'solo', - 'ansible_base', + 'ansible_base.rest_filters', + 'ansible_base.jwt_consumer', + 'ansible_base.resource_registry', + 'ansible_base.rbac', ] + INTERNAL_IPS = ('127.0.0.1',) MAX_PAGE_SIZE = 200 @@ -346,17 +366,12 @@ REST_FRAMEWORK = { 'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination', 'PAGE_SIZE': 25, 'DEFAULT_AUTHENTICATION_CLASSES': ( + 'ansible_base.jwt_consumer.awx.auth.AwxJWTAuthentication', 'awx.api.authentication.LoggedOAuth2Authentication', 'awx.api.authentication.SessionAuthentication', 'awx.api.authentication.LoggedBasicAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ('awx.api.permissions.ModelAccessPermission',), - 'DEFAULT_FILTER_BACKENDS': ( - 'awx.api.filters.TypeFilterBackend', - 'awx.api.filters.FieldLookupBackend', - 'rest_framework.filters.SearchFilter', - 'awx.api.filters.OrderByBackend', - ), 'DEFAULT_PARSER_CLASSES': ('awx.api.parsers.JSONParser',), 'DEFAULT_RENDERER_CLASSES': ('awx.api.renderers.DefaultJSONRenderer', 'awx.api.renderers.BrowsableAPIRenderer'), 'DEFAULT_METADATA_CLASS': 'awx.api.metadata.Metadata', @@ -483,6 +498,12 @@ CACHES = {'default': {'BACKEND': 'awx.main.cache.AWXRedisCache', 'LOCATION': 'un SOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy' SOCIAL_AUTH_STORAGE = 'social_django.models.DjangoStorage' SOCIAL_AUTH_USER_MODEL = 'auth.User' +ROLE_SINGLETON_USER_RELATIONSHIP = '' +ROLE_SINGLETON_TEAM_RELATIONSHIP = '' + +# We want to short-circuit RBAC methods to get permission to system admins and auditors +ROLE_BYPASS_SUPERUSER_FLAGS = ['is_superuser'] +ROLE_BYPASS_ACTION_FLAGS = {'view': 'is_system_auditor'} _SOCIAL_AUTH_PIPELINE_BASE = ( 'social_core.pipeline.social_auth.social_details', @@ -745,6 +766,14 @@ SATELLITE6_INSTANCE_ID_VAR = 'foreman_id,foreman.id' INSIGHTS_INSTANCE_ID_VAR = 'insights_id' INSIGHTS_EXCLUDE_EMPTY_GROUPS = False +# ---------------- +# -- Terraform State -- +# ---------------- +# TERRAFORM_ENABLED_VAR = +# TERRAFORM_ENABLED_VALUE = +TERRAFORM_INSTANCE_ID_VAR = 'id' +TERRAFORM_EXCLUDE_EMPTY_GROUPS = True + # --------------------- # ----- Custom ----- # --------------------- @@ -827,7 +856,6 @@ LOGGING = { 'json': {'()': 'awx.main.utils.formatters.LogstashFormatter'}, 'timed_import': {'()': 'awx.main.utils.formatters.TimeFormatter', 'format': '%(relativeSeconds)9.3f %(levelname)-8s %(message)s'}, 'dispatcher': {'format': '%(asctime)s %(levelname)-8s [%(guid)s] %(name)s PID:%(process)d %(message)s'}, - 'job_lifecycle': {'()': 'awx.main.utils.formatters.JobLifeCycleFormatter'}, }, # Extended below based on install scenario. You probably don't want to add something directly here. # See 'handler_config' below. @@ -852,6 +880,7 @@ LOGGING = { 'loggers': { 'django': {'handlers': ['console']}, 'django.request': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'WARNING'}, + 'ansible_base': {'handlers': ['console', 'file', 'tower_warnings']}, 'daphne': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'INFO'}, 'rest_framework.request': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'WARNING', 'propagate': False}, 'py.warnings': {'handlers': ['console']}, @@ -895,7 +924,7 @@ handler_config = { 'wsrelay': {'filename': 'wsrelay.log'}, 'task_system': {'filename': 'task_system.log'}, 'rbac_migrations': {'filename': 'tower_rbac_migrations.log'}, - 'job_lifecycle': {'filename': 'job_lifecycle.log', 'formatter': 'job_lifecycle'}, + 'job_lifecycle': {'filename': 'job_lifecycle.log'}, 'rsyslog_configurer': {'filename': 'rsyslog_configurer.log'}, 'cache_clear': {'filename': 'cache_clear.log'}, 'ws_heartbeat': {'filename': 'ws_heartbeat.log'}, @@ -981,6 +1010,7 @@ MIDDLEWARE = [ 'django.contrib.auth.middleware.AuthenticationMiddleware', 'awx.main.middleware.DisableLocalAuthMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', + 'awx.main.middleware.OptionalURLPrefixPath', 'awx.sso.middleware.SocialAuthMiddleware', 'crum.CurrentRequestUserMiddleware', 'awx.main.middleware.URLModificationMiddleware', @@ -1064,3 +1094,90 @@ CLEANUP_HOST_METRICS_HARD_THRESHOLD = 36 # months # Host metric summary monthly task - last time of run HOST_METRIC_SUMMARY_TASK_LAST_TS = None HOST_METRIC_SUMMARY_TASK_INTERVAL = 7 # days + + +# TODO: cmeyers, replace with with register pattern +# The register pattern is particularly nice for this because we need +# to know the process to start the thread that will be the server. +# The registration location should be the same location as we would +# call MetricsServer.start() +# Note: if we don't get to this TODO, then at least create constants +# for the services strings below. +# TODO: cmeyers, break this out into a separate django app so other +# projects can take advantage. + +METRICS_SERVICE_CALLBACK_RECEIVER = 'callback_receiver' +METRICS_SERVICE_DISPATCHER = 'dispatcher' +METRICS_SERVICE_WEBSOCKETS = 'websockets' + +METRICS_SUBSYSTEM_CONFIG = { + 'server': { + METRICS_SERVICE_CALLBACK_RECEIVER: { + 'port': 8014, + }, + METRICS_SERVICE_DISPATCHER: { + 'port': 8015, + }, + METRICS_SERVICE_WEBSOCKETS: { + 'port': 8016, + }, + } +} + + +# django-ansible-base +ANSIBLE_BASE_TEAM_MODEL = 'main.Team' +ANSIBLE_BASE_ORGANIZATION_MODEL = 'main.Organization' +ANSIBLE_BASE_RESOURCE_CONFIG_MODULE = 'awx.resource_api' +ANSIBLE_BASE_PERMISSION_MODEL = 'main.Permission' + +from ansible_base.lib import dynamic_config # noqa: E402 + +include(os.path.join(os.path.dirname(dynamic_config.__file__), 'dynamic_settings.py')) + +# Add a postfix to the API URL patterns +# example if set to '' API pattern will be /api +# example if set to 'controller' API pattern will be /api AND /api/controller +OPTIONAL_API_URLPATTERN_PREFIX = '' + +# Use AWX base view, to give 401 on unauthenticated requests +ANSIBLE_BASE_CUSTOM_VIEW_PARENT = 'awx.api.generics.APIView' + +# Settings for the ansible_base RBAC system + +# Only used internally, names of the managed RoleDefinitions to create +ANSIBLE_BASE_ROLE_PRECREATE = { + 'object_admin': '{cls.__name__} Admin', + 'org_admin': 'Organization Admin', + 'org_children': 'Organization {cls.__name__} Admin', + 'special': '{cls.__name__} {action}', +} + +# Name for auto-created roles that give users permissions to what they create +ANSIBLE_BASE_ROLE_CREATOR_NAME = '{cls.__name__} Creator' + +# Use the new Gateway RBAC system for evaluations? You should. We will remove the old system soon. +ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED = True + +# Permissions a user will get when creating a new item +ANSIBLE_BASE_CREATOR_DEFAULTS = ['change', 'delete', 'execute', 'use', 'adhoc', 'approve', 'update', 'view'] + +# This is a stopgap, will delete after resource registry integration +ANSIBLE_BASE_SERVICE_PREFIX = "awx" + +# Temporary, for old roles API compatibility, save child permissions at organization level +ANSIBLE_BASE_CACHE_PARENT_PERMISSIONS = True + +# Currently features are enabled to keep compatibility with old system, except custom roles +ANSIBLE_BASE_ALLOW_TEAM_ORG_ADMIN = False +# ANSIBLE_BASE_ALLOW_CUSTOM_ROLES = True +ANSIBLE_BASE_ALLOW_CUSTOM_TEAM_ROLES = False +ANSIBLE_BASE_ALLOW_SINGLETON_USER_ROLES = True +ANSIBLE_BASE_ALLOW_SINGLETON_TEAM_ROLES = False # System auditor has always been restricted to users +ANSIBLE_BASE_ALLOW_SINGLETON_ROLES_API = False # Do not allow creating user-defined system-wide roles + +# system username for django-ansible-base +SYSTEM_USERNAME = None + +# Use AWX base view, to give 401 on unauthenticated requests +ANSIBLE_BASE_CUSTOM_VIEW_PARENT = 'awx.api.generics.APIView' diff --git a/awx/settings/development.py b/awx/settings/development.py index 25ec408ad9..68fa75ceb8 100644 --- a/awx/settings/development.py +++ b/awx/settings/development.py @@ -21,7 +21,7 @@ from split_settings.tools import optional, include from .defaults import * # NOQA # awx-manage shell_plus --notebook -NOTEBOOK_ARGUMENTS = ['--NotebookApp.token=', '--ip', '0.0.0.0', '--port', '8888', '--allow-root', '--no-browser'] +NOTEBOOK_ARGUMENTS = ['--NotebookApp.token=', '--ip', '0.0.0.0', '--port', '9888', '--allow-root', '--no-browser'] # print SQL queries in shell_plus SHELL_PLUS_PRINT_SQL = False @@ -72,6 +72,8 @@ AWX_CALLBACK_PROFILE = True # Allows user to trigger task managers directly for debugging and profiling purposes. # Only works in combination with settings.SETTINGS_MODULE == 'awx.settings.development' AWX_DISABLE_TASK_MANAGERS = False + +# Needed for launching runserver in debug mode # ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!================================= # Store a snapshot of default settings at this point before loading any diff --git a/awx/sso/backends.py b/awx/sso/backends.py index 82f538771e..bd4cb6c672 100644 --- a/awx/sso/backends.py +++ b/awx/sso/backends.py @@ -68,7 +68,6 @@ class LDAPSettings(BaseLDAPSettings): class LDAPBackend(BaseLDAPBackend): - """ Custom LDAP backend for AWX. """ diff --git a/awx/sso/views.py b/awx/sso/views.py index c4ecdc7632..c23ee4428a 100644 --- a/awx/sso/views.py +++ b/awx/sso/views.py @@ -38,7 +38,7 @@ class CompleteView(BaseRedirectView): response = super(CompleteView, self).dispatch(request, *args, **kwargs) if self.request.user and self.request.user.is_authenticated: logger.info(smart_str(u"User {} logged in".format(self.request.user.username))) - response.set_cookie('userLoggedIn', 'true') + response.set_cookie('userLoggedIn', 'true', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False)) response.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid')) return response diff --git a/awx/templates/rest_framework/api.html b/awx/templates/rest_framework/api.html index 56ef3c53af..09b6059756 100644 --- a/awx/templates/rest_framework/api.html +++ b/awx/templates/rest_framework/api.html @@ -39,7 +39,7 @@ {% else %}
  • Log in
  • {% endif %} -
  • {% trans 'API Guide' %}
  • +
  • {% trans 'API Guide' %}
  • {% trans 'Back to application' %}
  • diff --git a/awx/ui/conf.py b/awx/ui/conf.py index 3e87620add..fa8a2ead52 100644 --- a/awx/ui/conf.py +++ b/awx/ui/conf.py @@ -59,6 +59,7 @@ register( help_text=_('Maximum number of job events for the UI to retrieve within a single request.'), category=_('UI'), category_slug='ui', + hidden=True, ) register( @@ -68,4 +69,5 @@ register( help_text=_('If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details.'), category=_('UI'), category_slug='ui', + hidden=True, ) diff --git a/awx/ui/package-lock.json b/awx/ui/package-lock.json index 86acef30b0..c785a18802 100644 --- a/awx/ui/package-lock.json +++ b/awx/ui/package-lock.json @@ -13,7 +13,7 @@ "@patternfly/react-table": "4.113.0", "ace-builds": "^1.10.1", "ansi-to-html": "0.7.2", - "axios": "0.27.2", + "axios": "^1.6.7", "d3": "7.6.1", "dagre": "^0.8.4", "dompurify": "2.4.0", @@ -5940,12 +5940,13 @@ } }, "node_modules/axios": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", - "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.7.tgz", + "integrity": "sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA==", "dependencies": { - "follow-redirects": "^1.14.9", - "form-data": "^4.0.0" + "follow-redirects": "^1.15.4", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" } }, "node_modules/axios/node_modules/form-data": { @@ -10387,9 +10388,9 @@ } }, "node_modules/follow-redirects": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz", - "integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==", + "version": "1.15.5", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz", + "integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==", "funding": [ { "type": "individual", @@ -18349,6 +18350,11 @@ "node": ">= 0.10" } }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, "node_modules/pseudolocale": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz", @@ -26915,12 +26921,13 @@ "dev": true }, "axios": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", - "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.7.tgz", + "integrity": "sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA==", "requires": { - "follow-redirects": "^1.14.9", - "form-data": "^4.0.0" + "follow-redirects": "^1.15.4", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" }, "dependencies": { "form-data": { @@ -30371,9 +30378,9 @@ } }, "follow-redirects": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz", - "integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==" + "version": "1.15.5", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz", + "integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==" }, "fork-ts-checker-webpack-plugin": { "version": "6.5.2", @@ -36325,6 +36332,11 @@ } } }, + "proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, "pseudolocale": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz", diff --git a/awx/ui/package.json b/awx/ui/package.json index dbcd1c3768..a52e987910 100644 --- a/awx/ui/package.json +++ b/awx/ui/package.json @@ -13,7 +13,7 @@ "@patternfly/react-table": "4.113.0", "ace-builds": "^1.10.1", "ansi-to-html": "0.7.2", - "axios": "0.27.2", + "axios": "^1.6.7", "d3": "7.6.1", "dagre": "^0.8.4", "dompurify": "2.4.0", diff --git a/awx/ui/src/api/index.js b/awx/ui/src/api/index.js index 93631c2137..9c78db6e36 100644 --- a/awx/ui/src/api/index.js +++ b/awx/ui/src/api/index.js @@ -29,6 +29,7 @@ import Notifications from './models/Notifications'; import Organizations from './models/Organizations'; import ProjectUpdates from './models/ProjectUpdates'; import Projects from './models/Projects'; +import ReceptorAddresses from './models/Receptor'; import Roles from './models/Roles'; import Root from './models/Root'; import Schedules from './models/Schedules'; @@ -79,6 +80,7 @@ const NotificationsAPI = new Notifications(); const OrganizationsAPI = new Organizations(); const ProjectUpdatesAPI = new ProjectUpdates(); const ProjectsAPI = new Projects(); +const ReceptorAPI = new ReceptorAddresses(); const RolesAPI = new Roles(); const RootAPI = new Root(); const SchedulesAPI = new Schedules(); @@ -130,6 +132,7 @@ export { OrganizationsAPI, ProjectUpdatesAPI, ProjectsAPI, + ReceptorAPI, RolesAPI, RootAPI, SchedulesAPI, diff --git a/awx/ui/src/api/models/Instances.js b/awx/ui/src/api/models/Instances.js index 388bb2eb4e..7730a31df8 100644 --- a/awx/ui/src/api/models/Instances.js +++ b/awx/ui/src/api/models/Instances.js @@ -8,6 +8,7 @@ class Instances extends Base { this.readHealthCheckDetail = this.readHealthCheckDetail.bind(this); this.healthCheck = this.healthCheck.bind(this); this.readInstanceGroup = this.readInstanceGroup.bind(this); + this.readReceptorAddresses = this.readReceptorAddresses.bind(this); this.deprovisionInstance = this.deprovisionInstance.bind(this); } @@ -27,6 +28,17 @@ class Instances extends Base { return this.http.get(`${this.baseUrl}${instanceId}/instance_groups/`); } + readReceptorAddresses(instanceId) { + return this.http.get(`${this.baseUrl}${instanceId}/receptor_addresses/`); + } + + updateReceptorAddresses(instanceId, data) { + return this.http.post( + `${this.baseUrl}${instanceId}/receptor_addresses/`, + data + ); + } + deprovisionInstance(instanceId) { return this.http.patch(`${this.baseUrl}${instanceId}/`, { node_state: 'deprovisioning', diff --git a/awx/ui/src/api/models/Receptor.js b/awx/ui/src/api/models/Receptor.js new file mode 100644 index 0000000000..fd63d4cf74 --- /dev/null +++ b/awx/ui/src/api/models/Receptor.js @@ -0,0 +1,14 @@ +import Base from '../Base'; + +class ReceptorAddresses extends Base { + constructor(http) { + super(http); + this.baseUrl = 'api/v2/receptor_addresses/'; + } + + updateReceptorAddresses(instanceId, data) { + return this.http.post(`${this.baseUrl}`, data); + } +} + +export default ReceptorAddresses; diff --git a/awx/ui/src/components/LaunchPrompt/steps/useSurveyStep.js b/awx/ui/src/components/LaunchPrompt/steps/useSurveyStep.js index a19bc46a57..ab0c9e5018 100644 --- a/awx/ui/src/components/LaunchPrompt/steps/useSurveyStep.js +++ b/awx/ui/src/components/LaunchPrompt/steps/useSurveyStep.js @@ -67,27 +67,18 @@ function getInitialValues(launchConfig, surveyConfig, resource) { const values = {}; if (surveyConfig?.spec) { surveyConfig.spec.forEach((question) => { - if (question.type === 'multiselect') { + if (resource?.extra_data && resource?.extra_data[question.variable]) { + values[`survey_${question.variable}`] = + resource.extra_data[question.variable]; + } else if (question.type === 'multiselect') { values[`survey_${question.variable}`] = question.default ? question.default.split('\n') : []; } else { values[`survey_${question.variable}`] = question.default ?? ''; } - if (resource?.extra_data) { - Object.entries(resource.extra_data).forEach(([key, value]) => { - if (key === question.variable) { - if (question.type === 'multiselect') { - values[`survey_${question.variable}`] = value; - } else { - values[`survey_${question.variable}`] = value; - } - } - }); - } }); } - return values; } diff --git a/awx/ui/src/components/PromptDetail/PromptDetail.js b/awx/ui/src/components/PromptDetail/PromptDetail.js index adb09b55f2..4f0faf3bd6 100644 --- a/awx/ui/src/components/PromptDetail/PromptDetail.js +++ b/awx/ui/src/components/PromptDetail/PromptDetail.js @@ -257,12 +257,17 @@ function PromptDetail({ numChips={5} ouiaId="prompt-job-tag-chips" totalChips={ - !overrides.job_tags || overrides.job_tags === '' + overrides.job_tags === undefined || + overrides.job_tags === null || + overrides.job_tags === '' ? 0 : overrides.job_tags.split(',').length } > - {overrides.job_tags.length > 0 && + {overrides.job_tags !== undefined && + overrides.job_tags !== null && + overrides.job_tags !== '' && + overrides.job_tags.length > 0 && overrides.job_tags.split(',').map((jobTag) => ( - {overrides.skip_tags.length > 0 && + {overrides.skip_tags !== undefined && + overrides.skip_tags !== null && + overrides.skip_tags !== '' && + overrides.skip_tags.length > 0 && overrides.skip_tags.split(',').map((skipTag) => ( { + if (!surveyValues[q.variable]) { + delete extraVars[q.variable]; + } + }); + return extraVars; +} + function ScheduleEdit({ hasDaysToKeepField, schedule, @@ -33,10 +45,12 @@ function ScheduleEdit({ surveyConfiguration, originalInstanceGroups, originalLabels, - scheduleCredentials = [] + scheduleCredentials = [], + isPromptTouched = false ) => { const { execution_environment, + extra_vars = null, instance_groups, inventory, credentials = [], @@ -48,45 +62,54 @@ function ScheduleEdit({ labels, ...submitValues } = values; - let extraVars; + const surveyValues = getSurveyValues(values); if ( - !Object.values(surveyValues).length && - surveyConfiguration?.spec?.length + isPromptTouched && + surveyConfiguration?.spec && + launchConfiguration?.ask_variables_on_launch ) { - surveyConfiguration.spec.forEach((q) => { - surveyValues[q.variable] = q.default; - }); + submitValues.extra_data = generateExtraData( + extra_vars, + surveyValues, + surveyConfiguration + ); + } else if ( + isPromptTouched && + surveyConfiguration?.spec && + !launchConfiguration?.ask_variables_on_launch + ) { + submitValues.extra_data = generateExtraData( + schedule.extra_data, + surveyValues, + surveyConfiguration + ); + } else if ( + isPromptTouched && + launchConfiguration?.ask_variables_on_launch + ) { + submitValues.extra_data = parseVariableField(extra_vars); } - const initialExtraVars = - launchConfiguration?.ask_variables_on_launch && - (values.extra_vars || '---'); - if (surveyConfiguration?.spec) { - extraVars = yaml.dump(mergeExtraVars(initialExtraVars, surveyValues)); - } else { - extraVars = yaml.dump(mergeExtraVars(initialExtraVars, {})); - } - submitValues.extra_data = extraVars && parseVariableField(extraVars); - if ( - Object.keys(submitValues.extra_data).length === 0 && - Object.keys(schedule.extra_data).length > 0 + isPromptTouched && + launchConfiguration?.ask_inventory_on_launch && + inventory ) { - submitValues.extra_data = schedule.extra_data; - } - delete values.extra_vars; - if (inventory) { submitValues.inventory = inventory.id; } - if (execution_environment) { + if ( + isPromptTouched && + launchConfiguration?.ask_execution_environment_on_launch && + execution_environment + ) { submitValues.execution_environment = execution_environment.id; } try { - if (launchConfiguration?.ask_labels_on_launch) { + if (isPromptTouched && launchConfiguration?.ask_labels_on_launch) { const { labelIds, error } = createNewLabels( values.labels, resource.organization @@ -120,9 +143,16 @@ function ScheduleEdit({ } } + const cleanedRequestData = Object.keys(requestData) + .filter((key) => !key.startsWith('survey_')) + .reduce((acc, key) => { + acc[key] = requestData[key]; + return acc; + }, {}); + const { data: { id: scheduleId }, - } = await SchedulesAPI.update(schedule.id, requestData); + } = await SchedulesAPI.update(schedule.id, cleanedRequestData); const { added: addedCredentials, removed: removedCredentials } = getAddedAndRemoved( diff --git a/awx/ui/src/components/Schedule/ScheduleEdit/ScheduleEdit.test.js b/awx/ui/src/components/Schedule/ScheduleEdit/ScheduleEdit.test.js index f5c6eb5aec..7d74ebd232 100644 --- a/awx/ui/src/components/Schedule/ScheduleEdit/ScheduleEdit.test.js +++ b/awx/ui/src/components/Schedule/ScheduleEdit/ScheduleEdit.test.js @@ -6,6 +6,7 @@ import { InventoriesAPI, CredentialsAPI, CredentialTypesAPI, + JobTemplatesAPI, } from 'api'; import { mountWithContexts } from '../../../../testUtils/enzymeHelpers'; import ScheduleEdit from './ScheduleEdit'; @@ -125,6 +126,7 @@ describe('', () => { id: 27, }, }); + await act(async () => { wrapper = mountWithContexts( ', () => { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { description: 'test description', name: 'Run once schedule', - extra_data: {}, rrule: 'DTSTART;TZID=America/New_York:20200325T100000 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY', }); @@ -233,7 +234,6 @@ describe('', () => { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { description: 'test description', name: 'Run every 10 minutes 10 times', - extra_data: {}, rrule: 'DTSTART;TZID=America/New_York:20200325T103000 RRULE:INTERVAL=10;FREQ=MINUTELY;COUNT=10', }); @@ -262,7 +262,6 @@ describe('', () => { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { description: 'test description', name: 'Run every hour until date', - extra_data: {}, rrule: 'DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=HOURLY;UNTIL=20200326T144500Z', }); @@ -288,7 +287,6 @@ describe('', () => { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { description: 'test description', name: 'Run daily', - extra_data: {}, rrule: 'DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=DAILY', }); @@ -316,7 +314,6 @@ describe('', () => { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { description: 'test description', name: 'Run weekly on mon/wed/fri', - extra_data: {}, rrule: `DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=WEEKLY;BYDAY=${RRule.MO},${RRule.WE},${RRule.FR}`, }); }); @@ -344,7 +341,6 @@ describe('', () => { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { description: 'test description', name: 'Run on the first day of the month', - extra_data: {}, rrule: 'DTSTART;TZID=America/New_York:20200401T104500 RRULE:INTERVAL=1;FREQ=MONTHLY;BYMONTHDAY=1', }); @@ -376,7 +372,6 @@ describe('', () => { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { description: 'test description', name: 'Run monthly on the last Tuesday', - extra_data: {}, rrule: 'DTSTART;TZID=America/New_York:20200331T110000 RRULE:INTERVAL=1;FREQ=MONTHLY;BYSETPOS=-1;BYDAY=TU', }); @@ -406,7 +401,6 @@ describe('', () => { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { description: 'test description', name: 'Yearly on the first day of March', - extra_data: {}, rrule: 'DTSTART;TZID=America/New_York:20200301T000000 RRULE:INTERVAL=1;FREQ=YEARLY;BYMONTH=3;BYMONTHDAY=1', }); @@ -437,7 +431,6 @@ describe('', () => { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { description: 'test description', name: 'Yearly on the second Friday in April', - extra_data: {}, rrule: 'DTSTART;TZID=America/New_York:20200410T111500 RRULE:INTERVAL=1;FREQ=YEARLY;BYSETPOS=2;BYDAY=FR;BYMONTH=4', }); @@ -468,7 +461,6 @@ describe('', () => { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { description: 'test description', name: 'Yearly on the first weekday in October', - extra_data: {}, rrule: 'DTSTART;TZID=America/New_York:20200410T111500 RRULE:INTERVAL=1;FREQ=YEARLY;BYSETPOS=1;BYDAY=MO,TU,WE,TH,FR;BYMONTH=10', }); @@ -562,7 +554,6 @@ describe('', () => { wrapper.update(); expect(SchedulesAPI.update).toBeCalledWith(27, { - extra_data: {}, name: 'mock schedule', rrule: 'DTSTART;TZID=America/New_York:20210128T141500 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY', @@ -633,15 +624,13 @@ describe('', () => { endDateTime: undefined, startDateTime: undefined, description: '', - extra_data: {}, name: 'foo', - inventory: 702, rrule: 'DTSTART;TZID=America/New_York:20200402T144500 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY', }); }); - test('should submit survey with default values properly, without opening prompt wizard', async () => { + test('should submit update values properly when prompt is not opened', async () => { let scheduleSurveyWrapper; await act(async () => { scheduleSurveyWrapper = mountWithContexts( @@ -746,9 +735,195 @@ describe('', () => { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { description: 'test description', name: 'Run once schedule', - extra_data: { mc: 'first', text: 'text variable' }, rrule: 'DTSTART;TZID=America/New_York:20200325T100000 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY', }); }); + test('should submit update values properly when survey values change', async () => { + JobTemplatesAPI.readSurvey.mockResolvedValue({ + data: { + spec: [ + { + question_name: 'text', + question_description: '', + required: true, + type: 'text', + variable: 'text', + min: 0, + max: 1024, + default: 'text variable', + choices: '', + new_question: true, + }, + ], + }, + }); + + JobTemplatesAPI.readLaunch.mockResolvedValue({ + data: { + can_start_without_user_input: false, + passwords_needed_to_start: [], + ask_scm_branch_on_launch: false, + ask_variables_on_launch: false, + ask_tags_on_launch: false, + ask_diff_mode_on_launch: false, + ask_skip_tags_on_launch: false, + ask_job_type_on_launch: false, + ask_limit_on_launch: false, + ask_verbosity_on_launch: false, + ask_inventory_on_launch: true, + ask_credential_on_launch: true, + survey_enabled: true, + variables_needed_to_start: [], + credential_needed_to_start: true, + inventory_needed_to_start: true, + job_template_data: { + name: 'Demo Job Template', + id: 7, + description: '', + }, + defaults: { + extra_vars: '---', + diff_mode: false, + limit: '', + job_tags: '', + skip_tags: '', + job_type: 'run', + verbosity: 0, + inventory: { + name: null, + id: null, + }, + scm_branch: '', + credentials: [], + }, + }, + }); + + let scheduleSurveyWrapper; + await act(async () => { + scheduleSurveyWrapper = mountWithContexts( + + ); + }); + scheduleSurveyWrapper.update(); + + await act(async () => + scheduleSurveyWrapper + .find('Button[aria-label="Prompt"]') + .prop('onClick')() + ); + scheduleSurveyWrapper.update(); + expect(scheduleSurveyWrapper.find('WizardNavItem').length).toBe(4); + await act(async () => + scheduleSurveyWrapper.find('WizardFooterInternal').prop('onNext')() + ); + scheduleSurveyWrapper.update(); + await act(async () => + scheduleSurveyWrapper.find('WizardFooterInternal').prop('onNext')() + ); + scheduleSurveyWrapper.update(); + await act(async () => + scheduleSurveyWrapper + .find('input#survey-question-text') + .simulate('change', { + target: { value: 'foo', name: 'survey_text' }, + }) + ); + scheduleSurveyWrapper.update(); + await act(async () => + scheduleSurveyWrapper.find('WizardFooterInternal').prop('onNext')() + ); + scheduleSurveyWrapper.update(); + await act(async () => + scheduleSurveyWrapper.find('WizardFooterInternal').prop('onNext')() + ); + scheduleSurveyWrapper.update(); + + expect(scheduleSurveyWrapper.find('Wizard').length).toBe(0); + + await act(async () => + scheduleSurveyWrapper.find('Button[aria-label="Save"]').prop('onClick')() + ); + + expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { + description: '', + name: 'mock schedule', + inventory: 702, + extra_data: { + text: 'foo', + }, + rrule: + 'DTSTART;TZID=America/New_York:20200402T144500 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY', + }); + }); }); diff --git a/awx/ui/src/components/Schedule/shared/ScheduleForm.js b/awx/ui/src/components/Schedule/shared/ScheduleForm.js index 43407d2737..15c972d5d4 100644 --- a/awx/ui/src/components/Schedule/shared/ScheduleForm.js +++ b/awx/ui/src/components/Schedule/shared/ScheduleForm.js @@ -20,6 +20,7 @@ import UnsupportedScheduleForm from './UnsupportedScheduleForm'; import parseRuleObj, { UnsupportedRRuleError } from './parseRuleObj'; import buildRuleObj from './buildRuleObj'; import buildRuleSet from './buildRuleSet'; +import mergeArraysByCredentialType from './mergeArraysByCredentialType'; const NUM_DAYS_PER_FREQUENCY = { week: 7, @@ -39,6 +40,7 @@ function ScheduleForm({ resourceDefaultCredentials, }) { const [isWizardOpen, setIsWizardOpen] = useState(false); + const [isPromptTouched, setIsPromptTouched] = useState(false); const [isSaveDisabled, setIsSaveDisabled] = useState(false); const originalLabels = useRef([]); const originalInstanceGroups = useRef([]); @@ -350,6 +352,12 @@ function ScheduleForm({ startDate: currentDate, startTime: time, timezone: schedule.timezone || now.zoneName, + credentials: mergeArraysByCredentialType( + resourceDefaultCredentials, + credentials + ), + labels: originalLabels.current, + instance_groups: originalInstanceGroups.current, }; if (hasDaysToKeepField) { @@ -485,7 +493,8 @@ function ScheduleForm({ surveyConfig, originalInstanceGroups.current, originalLabels.current, - credentials + credentials, + isPromptTouched ); }} validate={validate} @@ -511,6 +520,7 @@ function ScheduleForm({ onSave={() => { setIsWizardOpen(false); setIsSaveDisabled(false); + setIsPromptTouched(true); }} resourceDefaultCredentials={resourceDefaultCredentials} labels={originalLabels.current} diff --git a/awx/ui/src/components/Schedule/shared/mergeArraysByCredentialType.js b/awx/ui/src/components/Schedule/shared/mergeArraysByCredentialType.js new file mode 100644 index 0000000000..13935f930e --- /dev/null +++ b/awx/ui/src/components/Schedule/shared/mergeArraysByCredentialType.js @@ -0,0 +1,18 @@ +export default function mergeArraysByCredentialType( + defaultCredentials = [], + overrides = [] +) { + const mergedArray = [...defaultCredentials]; + + overrides.forEach((override) => { + const index = mergedArray.findIndex( + (defaultCred) => defaultCred.credential_type === override.credential_type + ); + if (index !== -1) { + mergedArray.splice(index, 1); + } + mergedArray.push(override); + }); + + return mergedArray; +} diff --git a/awx/ui/src/contexts/Session.js b/awx/ui/src/contexts/Session.js index d0a92d7523..26f915e1d5 100644 --- a/awx/ui/src/contexts/Session.js +++ b/awx/ui/src/contexts/Session.js @@ -115,8 +115,11 @@ function SessionProvider({ children }) { }, [setSessionTimeout, setSessionCountdown]); useEffect(() => { + const isRedirectCondition = (location, histLength) => + location.pathname === '/login' && histLength === 2; + const unlisten = history.listen((location, action) => { - if (action === 'POP') { + if (action === 'POP' || isRedirectCondition(location, history.length)) { setIsRedirectLinkReceived(true); } }); diff --git a/awx/ui/src/locales/fr/messages.po b/awx/ui/src/locales/fr/messages.po index be9a4721ac..2f5b8fc205 100644 --- a/awx/ui/src/locales/fr/messages.po +++ b/awx/ui/src/locales/fr/messages.po @@ -784,7 +784,7 @@ msgstr "Branche à utiliser dans l’exécution de la tâche. Projet par défaut #: screens/Inventory/shared/Inventory.helptext.js:155 msgid "Branch to use on inventory sync. Project default used if blank. Only allowed if project allow_override field is set to true." -msgstr "" +msgstr "Branche à utiliser pour la synchronisation de l'inventaire. La valeur par défaut du projet est utilisée si elle est vide. Cette option n'est autorisée que si le champ allow_override du projet est défini sur vrai." #: components/About/About.js:45 msgid "Brand Image" @@ -2832,7 +2832,7 @@ msgstr "Entrez les variables avec la syntaxe JSON ou YAML. Consultez la documen #: screens/Inventory/shared/SmartInventoryForm.js:94 msgid "Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Controller documentation for example syntax." -msgstr "" +msgstr "Entrez les variables d'inventaire en utilisant la syntaxe JSON ou YAML. Utilisez le bouton d'option pour basculer entre les deux. Référez-vous à la documentation du contrôleur Ansible pour les exemples de syntaxe." #: screens/CredentialType/CredentialTypeDetails/CredentialTypeDetails.js:87 msgid "Environment variables or extra variables that specify the values a credential type can inject." @@ -3015,7 +3015,7 @@ msgstr "Recherche exacte sur le champ d'identification." #: components/Search/RelatedLookupTypeInput.js:38 msgid "Exact search on name field." -msgstr "" +msgstr "Recherche exacte sur le champ nom." #: screens/Project/shared/Project.helptext.js:23 msgid "Example URLs for GIT Source Control include:" @@ -3242,7 +3242,7 @@ msgstr "Jobs ayant échoué" #: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:262 msgid "Failed to approve one or more workflow approval." -msgstr "" +msgstr "Échec de l'approbation d'une ou plusieurs validations de flux de travail." #: screens/WorkflowApproval/shared/WorkflowApprovalButton.js:56 msgid "Failed to approve {0}." @@ -3474,7 +3474,7 @@ msgstr "N'a pas réussi à supprimer {name}." #: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:263 msgid "Failed to deny one or more workflow approval." -msgstr "" +msgstr "Échec du refus d'une ou plusieurs validations de flux de travail." #: screens/WorkflowApproval/shared/WorkflowDenyButton.js:51 msgid "Failed to deny {0}." @@ -3520,7 +3520,7 @@ msgstr "Echec du lancement du Job." #: screens/Inventory/InventoryHosts/InventoryHostItem.js:121 msgid "Failed to load related groups." -msgstr "" +msgstr "Impossible de charger les groupes associés." #: screens/Instances/InstanceDetail/InstanceDetail.js:388 #: screens/Instances/InstanceList/InstanceList.js:266 @@ -3972,12 +3972,12 @@ msgstr "Demande(s) de bilan de santé soumise(s). Veuillez patienter et recharge #: screens/Instances/InstanceDetail/InstanceDetail.js:234 #: screens/Instances/InstanceList/InstanceListItem.js:242 msgid "Health checks are asynchronous tasks. See the" -msgstr "" +msgstr "Les bilans de santé sont des tâches asynchrones. Veuillez consulter la documentation pour plus d'informations." #: screens/InstanceGroup/Instances/InstanceList.js:286 #: screens/Instances/InstanceList/InstanceList.js:219 msgid "Health checks can only be run on execution nodes." -msgstr "" +msgstr "Les bilans de santé ne peuvent être exécutées que sur les nœuds d'exécution." #: components/StatusLabel/StatusLabel.js:42 msgid "Healthy" @@ -5048,7 +5048,7 @@ msgstr "Lancer" #: components/TemplateList/TemplateListItem.js:214 msgid "Launch Template" -msgstr "Lacer le modèle." +msgstr "Lancer le modèle." #: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:32 #: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:34 @@ -9637,7 +9637,7 @@ msgstr "Utilisateur" #: components/AppContainer/PageHeaderToolbar.js:160 msgid "User Details" -msgstr "Détails de l'erreur" +msgstr "Détails de l'utilisateur" #: screens/Setting/SettingList.js:121 #: screens/Setting/Settings.js:118 diff --git a/awx/ui/src/screens/Dashboard/Dashboard.js b/awx/ui/src/screens/Dashboard/Dashboard.js index 716aa322f4..87e06bdf79 100644 --- a/awx/ui/src/screens/Dashboard/Dashboard.js +++ b/awx/ui/src/screens/Dashboard/Dashboard.js @@ -80,7 +80,7 @@ function Dashboard() {

    A tech preview of the new {brandName} user - interface can be found here. + interface can be found here.

    diff --git a/awx/ui/src/screens/Instances/Instance.js b/awx/ui/src/screens/Instances/Instance.js index 6d0d1e8004..59350d9c51 100644 --- a/awx/ui/src/screens/Instances/Instance.js +++ b/awx/ui/src/screens/Instances/Instance.js @@ -12,6 +12,7 @@ import { SettingsAPI } from 'api'; import ContentLoading from 'components/ContentLoading'; import InstanceDetail from './InstanceDetail'; import InstancePeerList from './InstancePeers'; +import InstanceListenerAddressList from './InstanceListenerAddressList'; function Instance({ setBreadcrumb }) { const { me } = useConfig(); @@ -54,7 +55,12 @@ function Instance({ setBreadcrumb }) { }, [request]); if (isK8s) { - tabsArray.push({ name: t`Peers`, link: `${match.url}/peers`, id: 1 }); + tabsArray.push({ + name: t`Listener Addresses`, + link: `${match.url}/listener_addresses`, + id: 1, + }); + tabsArray.push({ name: t`Peers`, link: `${match.url}/peers`, id: 2 }); } if (isLoading) { return ; @@ -72,6 +78,14 @@ function Instance({ setBreadcrumb }) { + {isK8s && ( + + + + )} {isK8s && ( diff --git a/awx/ui/src/screens/Instances/InstanceAdd/InstanceAdd.js b/awx/ui/src/screens/Instances/InstanceAdd/InstanceAdd.js index 1c0e86400d..e9b33e1338 100644 --- a/awx/ui/src/screens/Instances/InstanceAdd/InstanceAdd.js +++ b/awx/ui/src/screens/Instances/InstanceAdd/InstanceAdd.js @@ -9,6 +9,10 @@ function InstanceAdd() { const [formError, setFormError] = useState(); const handleSubmit = async (values) => { try { + if (values.listener_port === undefined) { + values.listener_port = null; + } + const { data: { id }, } = await InstancesAPI.create(values); diff --git a/awx/ui/src/screens/Instances/InstanceAdd/InstanceAdd.test.js b/awx/ui/src/screens/Instances/InstanceAdd/InstanceAdd.test.js index 1c4d8d1d1c..ac667c37ad 100644 --- a/awx/ui/src/screens/Instances/InstanceAdd/InstanceAdd.test.js +++ b/awx/ui/src/screens/Instances/InstanceAdd/InstanceAdd.test.js @@ -36,6 +36,7 @@ describe('', () => { }); }); expect(InstancesAPI.create).toHaveBeenCalledWith({ + listener_port: null, // injected if listener_port is not set node_type: 'hop', }); expect(history.location.pathname).toBe('/instances/13/details'); diff --git a/awx/ui/src/screens/Instances/InstanceDetail/InstanceDetail.js b/awx/ui/src/screens/Instances/InstanceDetail/InstanceDetail.js index 8e60ff5d68..1c94bd158b 100644 --- a/awx/ui/src/screens/Instances/InstanceDetail/InstanceDetail.js +++ b/awx/ui/src/screens/Instances/InstanceDetail/InstanceDetail.js @@ -183,6 +183,7 @@ function InstanceDetail({ setBreadcrumb, isK8s }) { } const isHopNode = instance.node_type === 'hop'; const isExecutionNode = instance.node_type === 'execution'; + const isManaged = instance.managed; return ( <> @@ -208,33 +209,31 @@ function InstanceDetail({ setBreadcrumb, isK8s }) { + {!isManaged && instance.related?.install_bundle && ( + + + + } + /> + )} {(isExecutionNode || isHopNode) && ( - <> - {instance.related?.install_bundle && ( - - - - } - /> - )} - - + )} {!isHopNode && ( <> @@ -294,7 +293,9 @@ function InstanceDetail({ setBreadcrumb, isK8s }) { value={instance.capacity_adjustment} onChange={handleChangeValue} isDisabled={ - !config?.me?.is_superuser || !instance.enabled + !config?.me?.is_superuser || + !instance.enabled || + !isManaged } data-cy="slider" /> @@ -338,31 +339,31 @@ function InstanceDetail({ setBreadcrumb, isK8s }) { )} - {config?.me?.is_superuser && isK8s && (isExecutionNode || isHopNode) && ( - - )} - {config?.me?.is_superuser && - isK8s && - (isExecutionNode || isHopNode) && ( + {config?.me?.is_superuser && isK8s && !isManaged && ( + <> + - )} + + )} {isExecutionNode && (