mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 11:34:43 -03:30
Compare commits
71 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
20f5b255c9 | ||
|
|
3bcf46555d | ||
|
|
94703ccf84 | ||
|
|
6cdea1909d | ||
|
|
f133580172 | ||
|
|
4b90a7fcd1 | ||
|
|
95bfedad5b | ||
|
|
1081f2d8e9 | ||
|
|
c4ab54d7f3 | ||
|
|
bcefcd8cf8 | ||
|
|
0bd057529d | ||
|
|
a82c03e2e2 | ||
|
|
447ac77535 | ||
|
|
72d0928f1b | ||
|
|
6d727d4bc4 | ||
|
|
6040e44d9d | ||
|
|
b99ce5cd62 | ||
|
|
ba8a90c55f | ||
|
|
7ee2172517 | ||
|
|
07f49f5925 | ||
|
|
376993077a | ||
|
|
48f586bac4 | ||
|
|
16dab57c63 | ||
|
|
75a71492fd | ||
|
|
e9bd99c1ff | ||
|
|
56878b4910 | ||
|
|
19ca480078 | ||
|
|
64eb963025 | ||
|
|
dc34d0887a | ||
|
|
160634fb6f | ||
|
|
9745058546 | ||
|
|
c97a48b165 | ||
|
|
259bca0113 | ||
|
|
92c2b4e983 | ||
|
|
127a0cff23 | ||
|
|
a0ef25006a | ||
|
|
50c98a52f7 | ||
|
|
4008d72af6 | ||
|
|
e72e9f94b9 | ||
|
|
9d60b0b9c6 | ||
|
|
05b58c4df6 | ||
|
|
b1b960fd17 | ||
|
|
3c8f71e559 | ||
|
|
f5922f76fa | ||
|
|
05582702c6 | ||
|
|
1d340c5b4e | ||
|
|
15925f1416 | ||
|
|
6e06a20cca | ||
|
|
bb3acbb8ad | ||
|
|
a88e47930c | ||
|
|
a0d4515ba4 | ||
|
|
770cc10a78 | ||
|
|
159dd62d84 | ||
|
|
640e5db9c6 | ||
|
|
9ed527eb26 | ||
|
|
29ad6e1eaa | ||
|
|
3e607f8964 | ||
|
|
c9d1a4d063 | ||
|
|
a290b082db | ||
|
|
6d3c22e801 | ||
|
|
1f91773a3c | ||
|
|
7b846e1e49 | ||
|
|
f7a2de8a07 | ||
|
|
194c214f03 | ||
|
|
77e30dd4b2 | ||
|
|
9d7421b9bc | ||
|
|
3b8e662916 | ||
|
|
aa3228eec9 | ||
|
|
7b0598c7d8 | ||
|
|
49832d6379 | ||
|
|
8feeb5f1fa |
28
.github/actions/awx_devel_image/action.yml
vendored
Normal file
28
.github/actions/awx_devel_image/action.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Setup images for AWX
|
||||
description: Builds new awx_devel image
|
||||
inputs:
|
||||
github-token:
|
||||
description: GitHub Token for registry access
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get python version from Makefile
|
||||
shell: bash
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to registry
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull latest devel image to warm cache
|
||||
shell: bash
|
||||
run: docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
|
||||
- name: Build image for current source checkout
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
make docker-compose-build
|
||||
28
.github/actions/run_awx_devel/action.yml
vendored
28
.github/actions/run_awx_devel/action.yml
vendored
@@ -1,12 +1,8 @@
|
||||
# This currently *always* uses the "warm build cache" image
|
||||
# We should do something to allow forcing a rebuild, probably by looking for
|
||||
# some string in the commit message or something.
|
||||
|
||||
name: Run AWX (devel environment)
|
||||
name: Run AWX docker-compose
|
||||
description: Runs AWX with `make docker-compose`
|
||||
inputs:
|
||||
github-token:
|
||||
description: GitHub Token for registry access
|
||||
description: GitHub Token to pass to awx_devel_image
|
||||
required: true
|
||||
build-ui:
|
||||
description: Should the UI be built?
|
||||
@@ -23,9 +19,10 @@ outputs:
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get python version from Makefile
|
||||
shell: bash
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
- name: Build awx_devel image for running checks
|
||||
uses: ./.github/actions/awx_devel_image
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
shell: bash
|
||||
@@ -35,19 +32,6 @@ runs:
|
||||
shell: bash
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
- name: Log in to registry
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull latest available devel image and build HEAD on top of it
|
||||
shell: bash
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
make docker-compose-build
|
||||
|
||||
- name: Start AWX
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
20
.github/workflows/ci.yml
vendored
20
.github/workflows/ci.yml
vendored
@@ -7,9 +7,6 @@ env:
|
||||
COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- '.github/workflows/docs.yml'
|
||||
jobs:
|
||||
common-tests:
|
||||
name: ${{ matrix.tests.name }}
|
||||
@@ -40,16 +37,27 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build awx_devel image for running checks
|
||||
uses: ./.github/actions/awx_devel_image
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run check ${{ matrix.tests.name }}
|
||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make github_ci_runner
|
||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make docker-runner
|
||||
|
||||
dev-env:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
build-ui: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run smoke test
|
||||
run: make github_ci_setup && ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||
run: ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||
|
||||
awx-operator:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -159,11 +167,13 @@ jobs:
|
||||
|
||||
# Upload coverage report as artifact
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: coverage-${{ matrix.target-regex.name }}
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
if: always()
|
||||
with:
|
||||
log-filename: collection-integration-${{ matrix.target-regex.name }}.log
|
||||
|
||||
|
||||
3
.github/workflows/docs.yml
vendored
3
.github/workflows/docs.yml
vendored
@@ -2,9 +2,6 @@
|
||||
name: Docsite CI
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- '.github/workflows/docs.yml'
|
||||
jobs:
|
||||
docsite-build:
|
||||
name: docsite test build
|
||||
|
||||
2
.github/workflows/e2e_test.yml
vendored
2
.github/workflows/e2e_test.yml
vendored
@@ -26,7 +26,6 @@ jobs:
|
||||
with:
|
||||
build-ui: true
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
log-filename: e2e-${{ matrix.job }}.log
|
||||
|
||||
- name: Pull awx_cypress_base image
|
||||
run: |
|
||||
@@ -71,5 +70,6 @@ jobs:
|
||||
awx-pf-tests run --project .
|
||||
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
if: always()
|
||||
with:
|
||||
log-filename: e2e-${{ matrix.job }}.log
|
||||
|
||||
6
.github/workflows/promote.yml
vendored
6
.github/workflows/promote.yml
vendored
@@ -40,8 +40,12 @@ jobs:
|
||||
if: ${{ github.repository_owner != 'ansible' }}
|
||||
|
||||
- name: Build collection and publish to galaxy
|
||||
env:
|
||||
COLLECTION_NAMESPACE: ${{ env.collection_namespace }}
|
||||
COLLECTION_VERSION: ${{ github.event.release.tag_name }}
|
||||
COLLECTION_TEMPLATE_VERSION: true
|
||||
run: |
|
||||
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
make build_collection
|
||||
if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
||||
echo "Galaxy release already done"; \
|
||||
else \
|
||||
|
||||
5
.pip-tools.toml
Normal file
5
.pip-tools.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[tool.pip-tools]
|
||||
resolver = "backtracking"
|
||||
allow-unsafe = true
|
||||
strip-extras = true
|
||||
quiet = true
|
||||
17
Makefile
17
Makefile
@@ -6,6 +6,7 @@ DOCKER_COMPOSE ?= docker-compose
|
||||
OFFICIAL ?= no
|
||||
NODE ?= node
|
||||
NPM_BIN ?= npm
|
||||
KIND_BIN ?= $(shell which kind)
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
@@ -78,7 +79,7 @@ I18N_FLAG_FILE = .i18n_built
|
||||
sdist \
|
||||
ui-release ui-devel \
|
||||
VERSION PYTHON_VERSION docker-compose-sources \
|
||||
.git/hooks/pre-commit github_ci_setup github_ci_runner
|
||||
.git/hooks/pre-commit
|
||||
|
||||
clean-tmp:
|
||||
rm -rf tmp/
|
||||
@@ -323,21 +324,10 @@ test:
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||
|
||||
## Login to Github container image registry, pull image, then build image.
|
||||
github_ci_setup:
|
||||
# GITHUB_ACTOR is automatic github actions env var
|
||||
# CI_GITHUB_TOKEN is defined in .github files
|
||||
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
|
||||
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
|
||||
$(MAKE) docker-compose-build
|
||||
|
||||
## Runs AWX_DOCKER_CMD inside a new docker container.
|
||||
docker-runner:
|
||||
docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD)
|
||||
|
||||
## Builds image and runs AWX_DOCKER_CMD in it, mainly for .github checks.
|
||||
github_ci_runner: github_ci_setup docker-runner
|
||||
|
||||
test_collection:
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -664,6 +654,9 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
|
||||
|
||||
kind-dev-load: awx-kube-dev-build
|
||||
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[](https://github.com/ansible/awx/actions/workflows/ci.yml) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
||||
[](https://libera.chat)
|
||||
[](https://chat.ansible.im/#/welcome) [](https://forum.ansible.com)
|
||||
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
@@ -30,12 +30,12 @@ If you're experiencing a problem that you feel is a bug in AWX or have ideas for
|
||||
Code of Conduct
|
||||
---------------
|
||||
|
||||
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
|
||||
Get Involved
|
||||
------------
|
||||
|
||||
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
|
||||
|
||||
- Join the `#ansible-awx` channel on irc.libera.chat
|
||||
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
||||
- Join the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com)
|
||||
- Join the [Ansible Community Forum](https://forum.ansible.com)
|
||||
|
||||
@@ -52,39 +52,14 @@ try:
|
||||
except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
import django # noqa: F401
|
||||
|
||||
HAS_DJANGO = True
|
||||
except ImportError:
|
||||
HAS_DJANGO = False
|
||||
pass
|
||||
else:
|
||||
from django.db.backends.base import schema
|
||||
from django.db.models import indexes
|
||||
from django.db.backends.utils import names_digest
|
||||
from django.db import connection
|
||||
|
||||
if HAS_DJANGO is True:
|
||||
# See upgrade blocker note in requirements/README.md
|
||||
try:
|
||||
names_digest('foo', 'bar', 'baz', length=8)
|
||||
except ValueError:
|
||||
|
||||
def names_digest(*args, length):
|
||||
"""
|
||||
Generate a 32-bit digest of a set of arguments that can be used to shorten
|
||||
identifying names. Support for use in FIPS environments.
|
||||
"""
|
||||
h = hashlib.md5(usedforsecurity=False)
|
||||
for arg in args:
|
||||
h.update(arg.encode())
|
||||
return h.hexdigest()[:length]
|
||||
|
||||
schema.names_digest = names_digest
|
||||
indexes.names_digest = names_digest
|
||||
|
||||
|
||||
def find_commands(management_dir):
|
||||
# Modified version of function from django/core/management/__init__.py.
|
||||
|
||||
@@ -418,6 +418,10 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
"""Get value while accepting the in-memory cache if key is available"""
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
return self._get_local(name)
|
||||
# If the last line did not return, that means we hit a database error
|
||||
# in that case, we should not have a local cache value
|
||||
# thus, return empty as a signal to use the default
|
||||
return empty
|
||||
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
|
||||
@@ -13,6 +13,7 @@ from unittest import mock
|
||||
from django.conf import LazySettings
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db.utils import Error as DBError
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
import pytest
|
||||
|
||||
@@ -331,3 +332,18 @@ def test_in_memory_cache_works(settings):
|
||||
with mock.patch.object(settings, '_get_local') as mock_get:
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
mock_get.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_VAR=[])
|
||||
def test_getattr_with_database_error(settings):
|
||||
"""
|
||||
If a setting is defined via the registry and has a null-ish default which is not None
|
||||
then referencing that setting during a database outage should give that default
|
||||
this is regression testing for a bug where it would return None
|
||||
"""
|
||||
settings.registry.register('AWX_VAR', field_class=fields.StringListField, default=[], category=_('System'), category_slug='system')
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection') as mock_ensure:
|
||||
mock_ensure.side_effect = DBError('for test')
|
||||
assert settings.AWX_VAR == []
|
||||
|
||||
@@ -2,7 +2,11 @@ from .plugin import CredentialPlugin
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from thycotic.secrets.vault import SecretsVault
|
||||
|
||||
try:
|
||||
from delinea.secrets.vault import SecretsVault
|
||||
except ImportError:
|
||||
from thycotic.secrets.vault import SecretsVault
|
||||
|
||||
|
||||
dsv_inputs = {
|
||||
|
||||
@@ -54,7 +54,9 @@ tss_inputs = {
|
||||
|
||||
def tss_backend(**kwargs):
|
||||
if kwargs.get("domain"):
|
||||
authorizer = DomainPasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'], kwargs['domain'])
|
||||
authorizer = DomainPasswordGrantAuthorizer(
|
||||
base_url=kwargs['server_url'], username=kwargs['username'], domain=kwargs['domain'], password=kwargs['password']
|
||||
)
|
||||
else:
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
secret_server = SecretServer(kwargs['server_url'], authorizer)
|
||||
|
||||
@@ -24,6 +24,9 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove activity stream events more than N days old')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument(
|
||||
'--batch-size', dest='batch_size', type=int, default=500, metavar='X', help='Remove activity stream events in batch of X events. Defaults to 500.'
|
||||
)
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
|
||||
@@ -48,7 +51,7 @@ class Command(BaseCommand):
|
||||
else:
|
||||
pks_to_delete.add(asobj.pk)
|
||||
# Cleanup objects in batches instead of deleting each one individually.
|
||||
if len(pks_to_delete) >= 500:
|
||||
if len(pks_to_delete) >= self.batch_size:
|
||||
ActivityStream.objects.filter(pk__in=pks_to_delete).delete()
|
||||
n_deleted_items += len(pks_to_delete)
|
||||
pks_to_delete.clear()
|
||||
@@ -63,4 +66,5 @@ class Command(BaseCommand):
|
||||
self.days = int(options.get('days', 30))
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.batch_size = int(options.get('batch_size', 500))
|
||||
self.cleanup_activitystream()
|
||||
|
||||
@@ -9,6 +9,7 @@ import re
|
||||
|
||||
|
||||
# Django
|
||||
from django.apps import apps
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction, connection
|
||||
from django.db.models import Min, Max
|
||||
@@ -150,6 +151,9 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove jobs/updates executed more than N days ago. Defaults to 90.')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument(
|
||||
'--batch-size', dest='batch_size', type=int, default=100000, metavar='X', help='Remove jobs in batch of X jobs. Defaults to 100000.'
|
||||
)
|
||||
parser.add_argument('--jobs', dest='only_jobs', action='store_true', default=False, help='Remove jobs')
|
||||
parser.add_argument('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, help='Remove ad hoc commands')
|
||||
parser.add_argument('--project-updates', dest='only_project_updates', action='store_true', default=False, help='Remove project updates')
|
||||
@@ -195,18 +199,58 @@ class Command(BaseCommand):
|
||||
delete_meta.delete_jobs()
|
||||
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||
|
||||
def _cascade_delete_job_events(self, model, pk_list):
|
||||
def has_unpartitioned_table(self, model):
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT 1 FROM pg_tables WHERE tablename = '_unpartitioned_{tblname}';")
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _delete_unpartitioned_table(self, model):
|
||||
"If the unpartitioned table is no longer necessary, it will drop the table"
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
if not self.has_unpartitioned_table(model):
|
||||
self.logger.debug(f'Table _unpartitioned_{tblname} does not exist, you are fully migrated.')
|
||||
return
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
# same as UnpartitionedJobEvent.objects.aggregate(Max('created'))
|
||||
cursor.execute(f'SELECT MAX("_unpartitioned_{tblname}"."created") FROM "_unpartitioned_{tblname}";')
|
||||
row = cursor.fetchone()
|
||||
last_created = row[0]
|
||||
|
||||
if last_created:
|
||||
self.logger.info(f'Last event created in _unpartitioned_{tblname} was {last_created.isoformat()}')
|
||||
else:
|
||||
self.logger.info(f'Table _unpartitioned_{tblname} has no events in it')
|
||||
|
||||
if (last_created is None) or (last_created < self.cutoff):
|
||||
self.logger.warning(
|
||||
f'Dropping table _unpartitioned_{tblname} since no records are newer than {self.cutoff}\n'
|
||||
'WARNING - this will happen in a separate transaction so a failure will not roll back prior cleanup'
|
||||
)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f'DROP TABLE _unpartitioned_{tblname};')
|
||||
|
||||
def _delete_unpartitioned_events(self, model, pk_list):
|
||||
"If unpartitioned job events remain, it will cascade those from jobs in pk_list"
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
rel_name = model().event_parent_key
|
||||
|
||||
# Bail if the unpartitioned table does not exist anymore
|
||||
if not self.has_unpartitioned_table(model):
|
||||
return
|
||||
|
||||
# Table still exists, delete individual unpartitioned events
|
||||
if pk_list:
|
||||
with connection.cursor() as cursor:
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
|
||||
self.logger.debug(f'Deleting {len(pk_list)} events from _unpartitioned_{tblname}, use a longer cleanup window to delete the table.')
|
||||
pk_list_csv = ','.join(map(str, pk_list))
|
||||
rel_name = model().event_parent_key
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv});")
|
||||
|
||||
def cleanup_jobs(self):
|
||||
batch_size = 100000
|
||||
|
||||
# Hack to avoid doing N+1 queries as each item in the Job query set does
|
||||
# an individual query to get the underlying UnifiedJob.
|
||||
Job.polymorphic_super_sub_accessors_replaced = True
|
||||
@@ -221,13 +265,14 @@ class Command(BaseCommand):
|
||||
deleted = 0
|
||||
info = qs.aggregate(min=Min('id'), max=Max('id'))
|
||||
if info['min'] is not None:
|
||||
for start in range(info['min'], info['max'] + 1, batch_size):
|
||||
qs_batch = qs.filter(id__gte=start, id__lte=start + batch_size)
|
||||
for start in range(info['min'], info['max'] + 1, self.batch_size):
|
||||
qs_batch = qs.filter(id__gte=start, id__lte=start + self.batch_size)
|
||||
pk_list = qs_batch.values_list('id', flat=True)
|
||||
|
||||
_, results = qs_batch.delete()
|
||||
deleted += results['main.Job']
|
||||
self._cascade_delete_job_events(Job, pk_list)
|
||||
# Avoid dropping the job event table in case we have interacted with it already
|
||||
self._delete_unpartitioned_events(Job, pk_list)
|
||||
|
||||
return skipped, deleted
|
||||
|
||||
@@ -250,7 +295,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(AdHocCommand, pk_list)
|
||||
self._delete_unpartitioned_events(AdHocCommand, pk_list)
|
||||
|
||||
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -278,7 +323,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(ProjectUpdate, pk_list)
|
||||
self._delete_unpartitioned_events(ProjectUpdate, pk_list)
|
||||
|
||||
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -306,7 +351,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(InventoryUpdate, pk_list)
|
||||
self._delete_unpartitioned_events(InventoryUpdate, pk_list)
|
||||
|
||||
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -330,7 +375,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(SystemJob, pk_list)
|
||||
self._delete_unpartitioned_events(SystemJob, pk_list)
|
||||
|
||||
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -375,12 +420,12 @@ class Command(BaseCommand):
|
||||
skipped += Notification.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
self.init_logging()
|
||||
self.days = int(options.get('days', 90))
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.batch_size = int(options.get('batch_size', 100000))
|
||||
try:
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
except OverflowError:
|
||||
@@ -402,19 +447,29 @@ class Command(BaseCommand):
|
||||
del s.receivers[:]
|
||||
s.sender_receivers_cache.clear()
|
||||
|
||||
for m in model_names:
|
||||
if m not in models_to_cleanup:
|
||||
continue
|
||||
with transaction.atomic():
|
||||
for m in models_to_cleanup:
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
# Deleting unpartitioned tables cannot be done in same transaction as updates to related tables
|
||||
if not self.dry_run:
|
||||
with transaction.atomic():
|
||||
for m in models_to_cleanup:
|
||||
unified_job_class_name = m[:-1].title().replace('Management', 'System').replace('_', '')
|
||||
unified_job_class = apps.get_model('main', unified_job_class_name)
|
||||
try:
|
||||
unified_job_class().event_class
|
||||
except (NotImplementedError, AttributeError):
|
||||
continue # no need to run this for models without events
|
||||
self._delete_unpartitioned_table(unified_job_class)
|
||||
|
||||
@@ -125,14 +125,15 @@ class InstanceManager(models.Manager):
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
# detect any instances with the same IP address.
|
||||
# if one exists, set it to None
|
||||
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
|
||||
if inst_conflicting_ip.exists():
|
||||
for other_inst in inst_conflicting_ip:
|
||||
other_hostname = other_inst.hostname
|
||||
other_inst.ip_address = None
|
||||
other_inst.save(update_fields=['ip_address'])
|
||||
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
|
||||
# if one exists, set it to ""
|
||||
if ip_address:
|
||||
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
|
||||
if inst_conflicting_ip.exists():
|
||||
for other_inst in inst_conflicting_ip:
|
||||
other_hostname = other_inst.hostname
|
||||
other_inst.ip_address = ""
|
||||
other_inst.save(update_fields=['ip_address'])
|
||||
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
|
||||
|
||||
# Return existing instance that matches hostname or UUID (default to UUID)
|
||||
if node_uuid is not None and node_uuid != UUID_DEFAULT and self.filter(uuid=node_uuid).exists():
|
||||
|
||||
@@ -124,6 +124,13 @@ class TaskBase:
|
||||
self.record_aggregate_metrics()
|
||||
sys.exit(1)
|
||||
|
||||
def get_local_metrics(self):
|
||||
data = {}
|
||||
for k, metric in self.subsystem_metrics.METRICS.items():
|
||||
if k.startswith(self.prefix) and metric.metric_has_changed:
|
||||
data[k[len(self.prefix) + 1 :]] = metric.current_value
|
||||
return data
|
||||
|
||||
def schedule(self):
|
||||
# Always be able to restore the original signal handler if we finish
|
||||
original_sigusr1 = signal.getsignal(signal.SIGUSR1)
|
||||
@@ -146,10 +153,14 @@ class TaskBase:
|
||||
signal.signal(signal.SIGUSR1, original_sigusr1)
|
||||
commit_start = time.time()
|
||||
|
||||
logger.debug(f"Commiting {self.prefix} Scheduler changes")
|
||||
|
||||
if self.prefix == "task_manager":
|
||||
self.subsystem_metrics.set(f"{self.prefix}_commit_seconds", time.time() - commit_start)
|
||||
local_metrics = self.get_local_metrics()
|
||||
self.record_aggregate_metrics()
|
||||
logger.debug(f"Finishing {self.prefix} Scheduler")
|
||||
|
||||
logger.debug(f"Finished {self.prefix} Scheduler, timing data:\n{local_metrics}")
|
||||
|
||||
|
||||
class WorkflowManager(TaskBase):
|
||||
|
||||
@@ -1873,6 +1873,8 @@ class RunSystemJob(BaseTask):
|
||||
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
|
||||
if 'days' in json_vars:
|
||||
args.extend(['--days', str(json_vars.get('days', 60))])
|
||||
if 'batch_size' in json_vars:
|
||||
args.extend(['--batch-size', str(json_vars['batch_size'])])
|
||||
if 'dry_run' in json_vars and json_vars['dry_run']:
|
||||
args.extend(['--dry-run'])
|
||||
if system_job.job_type == 'cleanup_jobs':
|
||||
|
||||
@@ -76,3 +76,24 @@ def test_hashivault_handle_auth_kubernetes():
|
||||
def test_hashivault_handle_auth_not_enough_args():
|
||||
with pytest.raises(Exception):
|
||||
hashivault.handle_auth()
|
||||
|
||||
|
||||
class TestDelineaImports:
|
||||
"""
|
||||
These module have a try-except for ImportError which will allow using the older library
|
||||
but we do not want the awx_devel image to have the older library,
|
||||
so these tests are designed to fail if these wind up using the fallback import
|
||||
"""
|
||||
|
||||
def test_dsv_import(self):
|
||||
from awx.main.credential_plugins.dsv import SecretsVault # noqa
|
||||
|
||||
# assert this module as opposed to older thycotic.secrets.vault
|
||||
assert SecretsVault.__module__ == 'delinea.secrets.vault'
|
||||
|
||||
def test_tss_import(self):
|
||||
from awx.main.credential_plugins.tss import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret # noqa
|
||||
|
||||
for cls in (DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret):
|
||||
# assert this module as opposed to older thycotic.secrets.server
|
||||
assert cls.__module__ == 'delinea.secrets.server'
|
||||
|
||||
@@ -23,7 +23,7 @@ from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.functional import cached_property
|
||||
from django.db import connection, transaction, ProgrammingError
|
||||
from django.db import connection, transaction, ProgrammingError, IntegrityError
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
|
||||
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
|
||||
from django.db.models.query import QuerySet
|
||||
@@ -1164,13 +1164,24 @@ def create_partition(tblname, start=None):
|
||||
try:
|
||||
with transaction.atomic():
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = '{tblname}_{partition_label}');")
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
for val in row: # should only have 1
|
||||
if val is True:
|
||||
logger.debug(f'Event partition table {tblname}_{partition_label} already exists')
|
||||
return
|
||||
|
||||
cursor.execute(
|
||||
f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} '
|
||||
f'PARTITION OF {tblname} '
|
||||
f'FOR VALUES FROM (\'{start_timestamp}\') to (\'{end_timestamp}\');'
|
||||
f'CREATE TABLE {tblname}_{partition_label} (LIKE {tblname} INCLUDING DEFAULTS INCLUDING CONSTRAINTS); '
|
||||
f'ALTER TABLE {tblname} ATTACH PARTITION {tblname}_{partition_label} '
|
||||
f'FOR VALUES FROM (\'{start_timestamp}\') TO (\'{end_timestamp}\');'
|
||||
)
|
||||
except ProgrammingError as e:
|
||||
logger.debug(f'Caught known error due to existing partition: {e}')
|
||||
except (ProgrammingError, IntegrityError) as e:
|
||||
if 'already exists' in str(e):
|
||||
logger.info(f'Caught known error due to partition creation race: {e}')
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def cleanup_new_process(func):
|
||||
|
||||
@@ -33,6 +33,7 @@ import Roles from './models/Roles';
|
||||
import Root from './models/Root';
|
||||
import Schedules from './models/Schedules';
|
||||
import Settings from './models/Settings';
|
||||
import SubscriptionUsage from './models/SubscriptionUsage';
|
||||
import SystemJobs from './models/SystemJobs';
|
||||
import SystemJobTemplates from './models/SystemJobTemplates';
|
||||
import Teams from './models/Teams';
|
||||
@@ -82,6 +83,7 @@ const RolesAPI = new Roles();
|
||||
const RootAPI = new Root();
|
||||
const SchedulesAPI = new Schedules();
|
||||
const SettingsAPI = new Settings();
|
||||
const SubscriptionUsageAPI = new SubscriptionUsage();
|
||||
const SystemJobsAPI = new SystemJobs();
|
||||
const SystemJobTemplatesAPI = new SystemJobTemplates();
|
||||
const TeamsAPI = new Teams();
|
||||
@@ -132,6 +134,7 @@ export {
|
||||
RootAPI,
|
||||
SchedulesAPI,
|
||||
SettingsAPI,
|
||||
SubscriptionUsageAPI,
|
||||
SystemJobsAPI,
|
||||
SystemJobTemplatesAPI,
|
||||
TeamsAPI,
|
||||
|
||||
16
awx/ui/src/api/models/SubscriptionUsage.js
Normal file
16
awx/ui/src/api/models/SubscriptionUsage.js
Normal file
@@ -0,0 +1,16 @@
|
||||
import Base from '../Base';
|
||||
|
||||
class SubscriptionUsage extends Base {
|
||||
constructor(http) {
|
||||
super(http);
|
||||
this.baseUrl = 'api/v2/host_metric_summary_monthly/';
|
||||
}
|
||||
|
||||
readSubscriptionUsageChart(dateRange) {
|
||||
return this.http.get(
|
||||
`${this.baseUrl}?date__gte=${dateRange}&order_by=date&page_size=100`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default SubscriptionUsage;
|
||||
@@ -75,6 +75,7 @@ function SessionProvider({ children }) {
|
||||
const [sessionCountdown, setSessionCountdown] = useState(0);
|
||||
const [authRedirectTo, setAuthRedirectTo] = useState('/');
|
||||
const [isUserBeingLoggedOut, setIsUserBeingLoggedOut] = useState(false);
|
||||
const [isRedirectLinkReceived, setIsRedirectLinkReceived] = useState(false);
|
||||
|
||||
const {
|
||||
request: fetchLoginRedirectOverride,
|
||||
@@ -99,6 +100,7 @@ function SessionProvider({ children }) {
|
||||
|
||||
const logout = useCallback(async () => {
|
||||
setIsUserBeingLoggedOut(true);
|
||||
setIsRedirectLinkReceived(false);
|
||||
if (!isSessionExpired.current) {
|
||||
setAuthRedirectTo('/logout');
|
||||
window.localStorage.setItem(SESSION_USER_ID, null);
|
||||
@@ -112,6 +114,18 @@ function SessionProvider({ children }) {
|
||||
return <Redirect to="/login" />;
|
||||
}, [setSessionTimeout, setSessionCountdown]);
|
||||
|
||||
useEffect(() => {
|
||||
const unlisten = history.listen((location, action) => {
|
||||
if (action === 'POP') {
|
||||
setIsRedirectLinkReceived(true);
|
||||
}
|
||||
});
|
||||
|
||||
return () => {
|
||||
unlisten(); // ensure that the listener is removed when the component unmounts
|
||||
};
|
||||
}, [history]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isAuthenticated(document.cookie)) {
|
||||
return () => {};
|
||||
@@ -176,6 +190,8 @@ function SessionProvider({ children }) {
|
||||
logout,
|
||||
sessionCountdown,
|
||||
setAuthRedirectTo,
|
||||
isRedirectLinkReceived,
|
||||
setIsRedirectLinkReceived,
|
||||
}),
|
||||
[
|
||||
authRedirectTo,
|
||||
@@ -186,6 +202,8 @@ function SessionProvider({ children }) {
|
||||
logout,
|
||||
sessionCountdown,
|
||||
setAuthRedirectTo,
|
||||
isRedirectLinkReceived,
|
||||
setIsRedirectLinkReceived,
|
||||
]
|
||||
);
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ import Organizations from 'screens/Organization';
|
||||
import Projects from 'screens/Project';
|
||||
import Schedules from 'screens/Schedule';
|
||||
import Settings from 'screens/Setting';
|
||||
import SubscriptionUsage from 'screens/SubscriptionUsage/SubscriptionUsage';
|
||||
import Teams from 'screens/Team';
|
||||
import Templates from 'screens/Template';
|
||||
import TopologyView from 'screens/TopologyView';
|
||||
@@ -61,6 +62,11 @@ function getRouteConfig(userProfile = {}) {
|
||||
path: '/host_metrics',
|
||||
screen: HostMetrics,
|
||||
},
|
||||
{
|
||||
title: <Trans>Subscription Usage</Trans>,
|
||||
path: '/subscription_usage',
|
||||
screen: SubscriptionUsage,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -189,6 +195,7 @@ function getRouteConfig(userProfile = {}) {
|
||||
'unique_managed_hosts'
|
||||
) {
|
||||
deleteRoute('host_metrics');
|
||||
deleteRoute('subscription_usage');
|
||||
}
|
||||
if (userProfile?.isSuperUser || userProfile?.isSystemAuditor)
|
||||
return routeConfig;
|
||||
@@ -197,6 +204,7 @@ function getRouteConfig(userProfile = {}) {
|
||||
deleteRoute('management_jobs');
|
||||
deleteRoute('topology_view');
|
||||
deleteRoute('instances');
|
||||
deleteRoute('subscription_usage');
|
||||
if (userProfile?.isOrgAdmin) return routeConfig;
|
||||
if (!userProfile?.isNotificationAdmin) deleteRoute('notification_templates');
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ describe('getRouteConfig', () => {
|
||||
'/activity_stream',
|
||||
'/workflow_approvals',
|
||||
'/host_metrics',
|
||||
'/subscription_usage',
|
||||
'/templates',
|
||||
'/credentials',
|
||||
'/projects',
|
||||
@@ -61,6 +62,7 @@ describe('getRouteConfig', () => {
|
||||
'/activity_stream',
|
||||
'/workflow_approvals',
|
||||
'/host_metrics',
|
||||
'/subscription_usage',
|
||||
'/templates',
|
||||
'/credentials',
|
||||
'/projects',
|
||||
|
||||
@@ -302,9 +302,9 @@ function HostsByProcessorTypeExample() {
|
||||
|
||||
const hostsByProcessorLimit = `intel_hosts`;
|
||||
const hostsByProcessorSourceVars = `plugin: constructed
|
||||
strict: true
|
||||
groups:
|
||||
intel_hosts: "GenuineIntel" in ansible_processor`;
|
||||
strict: true
|
||||
groups:
|
||||
intel_hosts: "'GenuineIntel' in ansible_processor"`;
|
||||
|
||||
return (
|
||||
<FormFieldGroupExpandable
|
||||
|
||||
@@ -45,7 +45,7 @@ describe('<ConstructedInventoryHint />', () => {
|
||||
);
|
||||
expect(navigator.clipboard.writeText).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'intel_hosts: "GenuineIntel" in ansible_processor'
|
||||
`intel_hosts: \"'GenuineIntel' in ansible_processor\"`
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
@@ -45,7 +45,8 @@ const Login = styled(PFLogin)`
|
||||
|
||||
function AWXLogin({ alt, isAuthenticated }) {
|
||||
const [userId, setUserId] = useState(null);
|
||||
const { authRedirectTo, isSessionExpired } = useSession();
|
||||
const { authRedirectTo, isSessionExpired, isRedirectLinkReceived } =
|
||||
useSession();
|
||||
const isNewUser = useRef(true);
|
||||
const hasVerifiedUser = useRef(false);
|
||||
|
||||
@@ -179,7 +180,8 @@ function AWXLogin({ alt, isAuthenticated }) {
|
||||
return <LoadingSpinner />;
|
||||
}
|
||||
if (userId && hasVerifiedUser.current) {
|
||||
const redirect = isNewUser.current ? '/home' : authRedirectTo;
|
||||
const redirect =
|
||||
isNewUser.current && !isRedirectLinkReceived ? '/home' : authRedirectTo;
|
||||
|
||||
return <Redirect to={redirect} />;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,319 @@
|
||||
import React, { useEffect, useCallback } from 'react';
|
||||
import { string, number, shape, arrayOf } from 'prop-types';
|
||||
import * as d3 from 'd3';
|
||||
import { t } from '@lingui/macro';
|
||||
import { PageContextConsumer } from '@patternfly/react-core';
|
||||
import UsageChartTooltip from './UsageChartTooltip';
|
||||
|
||||
function UsageChart({ id, data, height, pageContext }) {
|
||||
const { isNavOpen } = pageContext;
|
||||
|
||||
// Methods
|
||||
const draw = useCallback(() => {
|
||||
const margin = { top: 15, right: 25, bottom: 105, left: 70 };
|
||||
|
||||
const getWidth = () => {
|
||||
let width;
|
||||
// This is in an a try/catch due to an error from jest.
|
||||
// Even though the d3.select returns a valid selector with
|
||||
// style function, it says it is null in the test
|
||||
try {
|
||||
width =
|
||||
parseInt(d3.select(`#${id}`).style('width'), 10) -
|
||||
margin.left -
|
||||
margin.right || 700;
|
||||
} catch (error) {
|
||||
width = 700;
|
||||
}
|
||||
return width;
|
||||
};
|
||||
|
||||
// Clear our chart container element first
|
||||
d3.selectAll(`#${id} > *`).remove();
|
||||
const width = getWidth();
|
||||
|
||||
function transition(path) {
|
||||
path.transition().duration(1000).attrTween('stroke-dasharray', tweenDash);
|
||||
}
|
||||
|
||||
function tweenDash(...params) {
|
||||
const l = params[2][params[1]].getTotalLength();
|
||||
const i = d3.interpolateString(`0,${l}`, `${l},${l}`);
|
||||
return (val) => i(val);
|
||||
}
|
||||
|
||||
const x = d3.scaleTime().rangeRound([0, width]);
|
||||
const y = d3.scaleLinear().range([height, 0]);
|
||||
|
||||
// [consumed, capacity]
|
||||
const colors = d3.scaleOrdinal(['#06C', '#C9190B']);
|
||||
const svg = d3
|
||||
.select(`#${id}`)
|
||||
.append('svg')
|
||||
.attr('width', width + margin.left + margin.right)
|
||||
.attr('height', height + margin.top + margin.bottom)
|
||||
.attr('z', 100)
|
||||
.append('g')
|
||||
.attr('id', 'chart-container')
|
||||
.attr('transform', `translate(${margin.left}, ${margin.top})`);
|
||||
// Tooltip
|
||||
const tooltip = new UsageChartTooltip({
|
||||
svg: `#${id}`,
|
||||
colors,
|
||||
label: t`Hosts`,
|
||||
});
|
||||
|
||||
const parseTime = d3.timeParse('%Y-%m-%d');
|
||||
|
||||
const formattedData = data?.reduce(
|
||||
(formatted, { date, license_consumed, license_capacity }) => {
|
||||
const MONTH = parseTime(date);
|
||||
const CONSUMED = +license_consumed;
|
||||
const CAPACITY = +license_capacity;
|
||||
return formatted.concat({ MONTH, CONSUMED, CAPACITY });
|
||||
},
|
||||
[]
|
||||
);
|
||||
|
||||
// Scale the range of the data
|
||||
const largestY = formattedData?.reduce((a_max, b) => {
|
||||
const b_max = Math.max(b.CONSUMED > b.CAPACITY ? b.CONSUMED : b.CAPACITY);
|
||||
return a_max > b_max ? a_max : b_max;
|
||||
}, 0);
|
||||
x.domain(d3.extent(formattedData, (d) => d.MONTH));
|
||||
y.domain([
|
||||
0,
|
||||
largestY > 4 ? largestY + Math.max(largestY / 10, 1) : 5,
|
||||
]).nice();
|
||||
|
||||
const capacityLine = d3
|
||||
.line()
|
||||
.curve(d3.curveMonotoneX)
|
||||
.x((d) => x(d.MONTH))
|
||||
.y((d) => y(d.CAPACITY));
|
||||
|
||||
const consumedLine = d3
|
||||
.line()
|
||||
.curve(d3.curveMonotoneX)
|
||||
.x((d) => x(d.MONTH))
|
||||
.y((d) => y(d.CONSUMED));
|
||||
|
||||
// Add the Y Axis
|
||||
svg
|
||||
.append('g')
|
||||
.attr('class', 'y-axis')
|
||||
.call(
|
||||
d3
|
||||
.axisLeft(y)
|
||||
.ticks(
|
||||
largestY > 3
|
||||
? Math.min(largestY + Math.max(largestY / 10, 1), 10)
|
||||
: 5
|
||||
)
|
||||
.tickSize(-width)
|
||||
.tickFormat(d3.format('d'))
|
||||
)
|
||||
.selectAll('line')
|
||||
.attr('stroke', '#d7d7d7');
|
||||
svg.selectAll('.y-axis .tick text').attr('x', -5).attr('font-size', '14');
|
||||
|
||||
// text label for the y axis
|
||||
svg
|
||||
.append('text')
|
||||
.attr('transform', 'rotate(-90)')
|
||||
.attr('y', 0 - margin.left)
|
||||
.attr('x', 0 - height / 2)
|
||||
.attr('dy', '1em')
|
||||
.style('text-anchor', 'middle')
|
||||
.text(t`Unique Hosts`);
|
||||
|
||||
// Add the X Axis
|
||||
let ticks;
|
||||
const maxTicks = Math.round(
|
||||
formattedData.length / (formattedData.length / 2)
|
||||
);
|
||||
ticks = formattedData.map((d) => d.MONTH);
|
||||
if (formattedData.length === 13) {
|
||||
ticks = formattedData
|
||||
.map((d, i) => (i % maxTicks === 0 ? d.MONTH : undefined))
|
||||
.filter((item) => item);
|
||||
}
|
||||
|
||||
svg.select('.domain').attr('stroke', '#d7d7d7');
|
||||
|
||||
svg
|
||||
.append('g')
|
||||
.attr('class', 'x-axis')
|
||||
.attr('transform', `translate(0, ${height})`)
|
||||
.call(
|
||||
d3
|
||||
.axisBottom(x)
|
||||
.tickValues(ticks)
|
||||
.tickSize(-height)
|
||||
.tickFormat(d3.timeFormat('%m/%y'))
|
||||
)
|
||||
.selectAll('line')
|
||||
.attr('stroke', '#d7d7d7');
|
||||
|
||||
svg
|
||||
.selectAll('.x-axis .tick text')
|
||||
.attr('x', -25)
|
||||
.attr('font-size', '14')
|
||||
.attr('transform', 'rotate(-65)');
|
||||
|
||||
// text label for the x axis
|
||||
svg
|
||||
.append('text')
|
||||
.attr(
|
||||
'transform',
|
||||
`translate(${width / 2} , ${height + margin.top + 50})`
|
||||
)
|
||||
.style('text-anchor', 'middle')
|
||||
.text(t`Month`);
|
||||
const vertical = svg
|
||||
.append('path')
|
||||
.attr('class', 'mouse-line')
|
||||
.style('stroke', 'black')
|
||||
.style('stroke-width', '3px')
|
||||
.style('stroke-dasharray', '3, 3')
|
||||
.style('opacity', '0');
|
||||
|
||||
const handleMouseOver = (event, d) => {
|
||||
tooltip.handleMouseOver(event, d);
|
||||
// show vertical line
|
||||
vertical.transition().style('opacity', '1');
|
||||
};
|
||||
const handleMouseMove = function mouseMove(event) {
|
||||
const [pointerX] = d3.pointer(event);
|
||||
vertical.attr('d', () => `M${pointerX},${height} ${pointerX},${0}`);
|
||||
};
|
||||
|
||||
const handleMouseOut = () => {
|
||||
// hide tooltip
|
||||
tooltip.handleMouseOut();
|
||||
// hide vertical line
|
||||
vertical.transition().style('opacity', 0);
|
||||
};
|
||||
|
||||
const dateFormat = d3.timeFormat('%m/%y');
|
||||
|
||||
// Add the consumed line path
|
||||
svg
|
||||
.append('path')
|
||||
.data([formattedData])
|
||||
.attr('class', 'line')
|
||||
.style('fill', 'none')
|
||||
.style('stroke', () => colors(1))
|
||||
.attr('stroke-width', 2)
|
||||
.attr('d', consumedLine)
|
||||
.call(transition);
|
||||
|
||||
// create our consumed line circles
|
||||
|
||||
svg
|
||||
.selectAll('dot')
|
||||
.data(formattedData)
|
||||
.enter()
|
||||
.append('circle')
|
||||
.attr('r', 3)
|
||||
.style('stroke', () => colors(1))
|
||||
.style('fill', () => colors(1))
|
||||
.attr('cx', (d) => x(d.MONTH))
|
||||
.attr('cy', (d) => y(d.CONSUMED))
|
||||
.attr('id', (d) => `consumed-dot-${dateFormat(d.MONTH)}`)
|
||||
.on('mouseover', (event, d) => handleMouseOver(event, d))
|
||||
.on('mousemove', handleMouseMove)
|
||||
.on('mouseout', handleMouseOut);
|
||||
|
||||
// Add the capacity line path
|
||||
svg
|
||||
.append('path')
|
||||
.data([formattedData])
|
||||
.attr('class', 'line')
|
||||
.style('fill', 'none')
|
||||
.style('stroke', () => colors(0))
|
||||
.attr('stroke-width', 2)
|
||||
.attr('d', capacityLine)
|
||||
.call(transition);
|
||||
|
||||
// create our capacity line circles
|
||||
|
||||
svg
|
||||
.selectAll('dot')
|
||||
.data(formattedData)
|
||||
.enter()
|
||||
.append('circle')
|
||||
.attr('r', 3)
|
||||
.style('stroke', () => colors(0))
|
||||
.style('fill', () => colors(0))
|
||||
.attr('cx', (d) => x(d.MONTH))
|
||||
.attr('cy', (d) => y(d.CAPACITY))
|
||||
.attr('id', (d) => `capacity-dot-${dateFormat(d.MONTH)}`)
|
||||
.on('mouseover', handleMouseOver)
|
||||
.on('mousemove', handleMouseMove)
|
||||
.on('mouseout', handleMouseOut);
|
||||
|
||||
// Create legend
|
||||
const legend_keys = [t`Subscriptions consumed`, t`Subscription capacity`];
|
||||
let totalWidth = width / 2 - 175;
|
||||
|
||||
const lineLegend = svg
|
||||
.selectAll('.lineLegend')
|
||||
.data(legend_keys)
|
||||
.enter()
|
||||
.append('g')
|
||||
.attr('class', 'lineLegend')
|
||||
.each(function formatLegend() {
|
||||
const current = d3.select(this);
|
||||
current.attr('transform', `translate(${totalWidth}, ${height + 90})`);
|
||||
totalWidth += 200;
|
||||
});
|
||||
|
||||
lineLegend
|
||||
.append('text')
|
||||
.text((d) => d)
|
||||
.attr('font-size', '14')
|
||||
.attr('transform', 'translate(15,9)'); // align texts with boxes
|
||||
|
||||
lineLegend
|
||||
.append('rect')
|
||||
.attr('fill', (d) => colors(d))
|
||||
.attr('width', 10)
|
||||
.attr('height', 10);
|
||||
}, [data, height, id]);
|
||||
|
||||
useEffect(() => {
|
||||
draw();
|
||||
}, [draw, isNavOpen]);
|
||||
|
||||
useEffect(() => {
|
||||
function handleResize() {
|
||||
draw();
|
||||
}
|
||||
|
||||
window.addEventListener('resize', handleResize);
|
||||
|
||||
handleResize();
|
||||
|
||||
return () => window.removeEventListener('resize', handleResize);
|
||||
}, [draw]);
|
||||
|
||||
return <div id={id} />;
|
||||
}
|
||||
|
||||
UsageChart.propTypes = {
|
||||
id: string.isRequired,
|
||||
data: arrayOf(shape({})).isRequired,
|
||||
height: number.isRequired,
|
||||
};
|
||||
|
||||
const withPageContext = (Component) =>
|
||||
function contextComponent(props) {
|
||||
return (
|
||||
<PageContextConsumer>
|
||||
{(pageContext) => <Component {...props} pageContext={pageContext} />}
|
||||
</PageContextConsumer>
|
||||
);
|
||||
};
|
||||
|
||||
export default withPageContext(UsageChart);
|
||||
@@ -0,0 +1,177 @@
|
||||
import * as d3 from 'd3';
|
||||
import { t } from '@lingui/macro';
|
||||
|
||||
class UsageChartTooltip {
|
||||
constructor(opts) {
|
||||
this.label = opts.label;
|
||||
this.svg = opts.svg;
|
||||
this.colors = opts.colors;
|
||||
|
||||
this.draw();
|
||||
}
|
||||
|
||||
draw() {
|
||||
this.toolTipBase = d3.select(`${this.svg} > svg`).append('g');
|
||||
this.toolTipBase.attr('id', 'chart-tooltip');
|
||||
this.toolTipBase.attr('overflow', 'visible');
|
||||
this.toolTipBase.style('opacity', 0);
|
||||
this.toolTipBase.style('pointer-events', 'none');
|
||||
this.toolTipBase.attr('transform', 'translate(100, 100)');
|
||||
this.boxWidth = 200;
|
||||
this.textWidthThreshold = 20;
|
||||
|
||||
this.toolTipPoint = this.toolTipBase
|
||||
.append('rect')
|
||||
.attr('transform', 'translate(10, -10) rotate(45)')
|
||||
.attr('x', 0)
|
||||
.attr('y', 0)
|
||||
.attr('height', 20)
|
||||
.attr('width', 20)
|
||||
.attr('fill', '#393f44');
|
||||
this.boundingBox = this.toolTipBase
|
||||
.append('rect')
|
||||
.attr('x', 10)
|
||||
.attr('y', -41)
|
||||
.attr('rx', 2)
|
||||
.attr('height', 82)
|
||||
.attr('width', this.boxWidth)
|
||||
.attr('fill', '#393f44');
|
||||
this.circleBlue = this.toolTipBase
|
||||
.append('circle')
|
||||
.attr('cx', 26)
|
||||
.attr('cy', 0)
|
||||
.attr('r', 7)
|
||||
.attr('stroke', 'white')
|
||||
.attr('fill', this.colors(1));
|
||||
this.circleRed = this.toolTipBase
|
||||
.append('circle')
|
||||
.attr('cx', 26)
|
||||
.attr('cy', 26)
|
||||
.attr('r', 7)
|
||||
.attr('stroke', 'white')
|
||||
.attr('fill', this.colors(0));
|
||||
this.consumedText = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('x', 43)
|
||||
.attr('y', 4)
|
||||
.attr('font-size', 12)
|
||||
.attr('fill', 'white')
|
||||
.text(t`Subscriptions consumed`);
|
||||
this.capacityText = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('x', 43)
|
||||
.attr('y', 28)
|
||||
.attr('font-size', 12)
|
||||
.attr('fill', 'white')
|
||||
.text(t`Subscription capacity`);
|
||||
this.icon = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('fill', 'white')
|
||||
.attr('stroke', 'white')
|
||||
.attr('x', 24)
|
||||
.attr('y', 30)
|
||||
.attr('font-size', 12);
|
||||
this.consumed = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('fill', 'white')
|
||||
.attr('font-size', 12)
|
||||
.attr('x', 122)
|
||||
.attr('y', 4)
|
||||
.attr('id', 'consumed-count')
|
||||
.text('0');
|
||||
this.capacity = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('fill', 'white')
|
||||
.attr('font-size', 12)
|
||||
.attr('x', 122)
|
||||
.attr('y', 28)
|
||||
.attr('id', 'capacity-count')
|
||||
.text('0');
|
||||
this.date = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('fill', 'white')
|
||||
.attr('stroke', 'white')
|
||||
.attr('x', 20)
|
||||
.attr('y', -21)
|
||||
.attr('font-size', 12);
|
||||
}
|
||||
|
||||
handleMouseOver = (event, data) => {
|
||||
let consumed = 0;
|
||||
let capacity = 0;
|
||||
const [x, y] = d3.pointer(event);
|
||||
const tooltipPointerX = x + 75;
|
||||
|
||||
const formatTooltipDate = d3.timeFormat('%m/%y');
|
||||
if (!event) {
|
||||
return;
|
||||
}
|
||||
|
||||
const toolTipWidth = this.toolTipBase.node().getBoundingClientRect().width;
|
||||
const chartWidth = d3
|
||||
.select(`${this.svg}> svg`)
|
||||
.node()
|
||||
.getBoundingClientRect().width;
|
||||
const overflow = 100 - (toolTipWidth / chartWidth) * 100;
|
||||
const flipped = overflow < (tooltipPointerX / chartWidth) * 100;
|
||||
if (data) {
|
||||
consumed = data.CONSUMED || 0;
|
||||
capacity = data.CAPACITY || 0;
|
||||
this.date.text(formatTooltipDate(data.MONTH || null));
|
||||
}
|
||||
|
||||
this.capacity.text(`${capacity}`);
|
||||
this.consumed.text(`${consumed}`);
|
||||
this.consumedTextWidth = this.consumed.node().getComputedTextLength();
|
||||
this.capacityTextWidth = this.capacity.node().getComputedTextLength();
|
||||
|
||||
const maxTextPerc = (this.jobsWidth / this.boxWidth) * 100;
|
||||
const threshold = 40;
|
||||
const overage = maxTextPerc / threshold;
|
||||
let adjustedWidth;
|
||||
if (maxTextPerc > threshold) {
|
||||
adjustedWidth = this.boxWidth * overage;
|
||||
} else {
|
||||
adjustedWidth = this.boxWidth;
|
||||
}
|
||||
|
||||
this.boundingBox.attr('width', adjustedWidth);
|
||||
this.toolTipBase.attr('transform', `translate(${tooltipPointerX}, ${y})`);
|
||||
if (flipped) {
|
||||
this.toolTipPoint.attr('transform', 'translate(-20, -10) rotate(45)');
|
||||
this.boundingBox.attr('x', -adjustedWidth - 20);
|
||||
this.circleBlue.attr('cx', -adjustedWidth);
|
||||
this.circleRed.attr('cx', -adjustedWidth);
|
||||
this.icon.attr('x', -adjustedWidth - 2);
|
||||
this.consumedText.attr('x', -adjustedWidth + 17);
|
||||
this.capacityText.attr('x', -adjustedWidth + 17);
|
||||
this.consumed.attr('x', -this.consumedTextWidth - 20 - 12);
|
||||
this.capacity.attr('x', -this.capacityTextWidth - 20 - 12);
|
||||
this.date.attr('x', -adjustedWidth - 5);
|
||||
} else {
|
||||
this.toolTipPoint.attr('transform', 'translate(10, -10) rotate(45)');
|
||||
this.boundingBox.attr('x', 10);
|
||||
this.circleBlue.attr('cx', 26);
|
||||
this.circleRed.attr('cx', 26);
|
||||
this.icon.attr('x', 24);
|
||||
this.consumedText.attr('x', 43);
|
||||
this.capacityText.attr('x', 43);
|
||||
this.consumed.attr('x', adjustedWidth - this.consumedTextWidth);
|
||||
this.capacity.attr('x', adjustedWidth - this.capacityTextWidth);
|
||||
this.date.attr('x', 20);
|
||||
}
|
||||
|
||||
this.toolTipBase.style('opacity', 1);
|
||||
this.toolTipBase.interrupt();
|
||||
};
|
||||
|
||||
handleMouseOut = () => {
|
||||
this.toolTipBase
|
||||
.transition()
|
||||
.delay(15)
|
||||
.style('opacity', 0)
|
||||
.style('pointer-events', 'none');
|
||||
};
|
||||
}
|
||||
|
||||
export default UsageChartTooltip;
|
||||
53
awx/ui/src/screens/SubscriptionUsage/SubscriptionUsage.js
Normal file
53
awx/ui/src/screens/SubscriptionUsage/SubscriptionUsage.js
Normal file
@@ -0,0 +1,53 @@
|
||||
import React from 'react';
|
||||
import styled from 'styled-components';
|
||||
|
||||
import { t, Trans } from '@lingui/macro';
|
||||
import { Banner, Card, PageSection } from '@patternfly/react-core';
|
||||
import { InfoCircleIcon } from '@patternfly/react-icons';
|
||||
|
||||
import { useConfig } from 'contexts/Config';
|
||||
import useBrandName from 'hooks/useBrandName';
|
||||
import ScreenHeader from 'components/ScreenHeader';
|
||||
import SubscriptionUsageChart from './SubscriptionUsageChart';
|
||||
|
||||
const MainPageSection = styled(PageSection)`
|
||||
padding-top: 24px;
|
||||
padding-bottom: 0;
|
||||
|
||||
& .spacer {
|
||||
margin-bottom: var(--pf-global--spacer--lg);
|
||||
}
|
||||
`;
|
||||
|
||||
function SubscriptionUsage() {
|
||||
const config = useConfig();
|
||||
const brandName = useBrandName();
|
||||
|
||||
return (
|
||||
<>
|
||||
{config?.ui_next && (
|
||||
<Banner variant="info">
|
||||
<Trans>
|
||||
<p>
|
||||
<InfoCircleIcon /> A tech preview of the new {brandName} user
|
||||
interface can be found <a href="/ui_next/dashboard">here</a>.
|
||||
</p>
|
||||
</Trans>
|
||||
</Banner>
|
||||
)}
|
||||
<ScreenHeader
|
||||
streamType="all"
|
||||
breadcrumbConfig={{ '/subscription_usage': t`Subscription Usage` }}
|
||||
/>
|
||||
<MainPageSection>
|
||||
<div className="spacer">
|
||||
<Card id="dashboard-main-container">
|
||||
<SubscriptionUsageChart />
|
||||
</Card>
|
||||
</div>
|
||||
</MainPageSection>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
export default SubscriptionUsage;
|
||||
167
awx/ui/src/screens/SubscriptionUsage/SubscriptionUsageChart.js
Normal file
167
awx/ui/src/screens/SubscriptionUsage/SubscriptionUsageChart.js
Normal file
@@ -0,0 +1,167 @@
|
||||
import React, { useCallback, useEffect, useState } from 'react';
|
||||
import styled from 'styled-components';
|
||||
|
||||
import { t } from '@lingui/macro';
|
||||
import {
|
||||
Card,
|
||||
CardHeader,
|
||||
CardActions,
|
||||
CardBody,
|
||||
CardTitle,
|
||||
Flex,
|
||||
FlexItem,
|
||||
PageSection,
|
||||
Select,
|
||||
SelectVariant,
|
||||
SelectOption,
|
||||
Text,
|
||||
} from '@patternfly/react-core';
|
||||
|
||||
import useRequest from 'hooks/useRequest';
|
||||
import { SubscriptionUsageAPI } from 'api';
|
||||
import { useUserProfile } from 'contexts/Config';
|
||||
import ContentLoading from 'components/ContentLoading';
|
||||
import UsageChart from './ChartComponents/UsageChart';
|
||||
|
||||
const GraphCardHeader = styled(CardHeader)`
|
||||
margin-bottom: var(--pf-global--spacer--lg);
|
||||
`;
|
||||
|
||||
const ChartCardTitle = styled(CardTitle)`
|
||||
padding-right: 24px;
|
||||
font-size: 20px;
|
||||
font-weight: var(--pf-c-title--m-xl--FontWeight);
|
||||
`;
|
||||
|
||||
const CardText = styled(Text)`
|
||||
padding-right: 24px;
|
||||
`;
|
||||
|
||||
const GraphCardActions = styled(CardActions)`
|
||||
margin-left: initial;
|
||||
padding-left: 0;
|
||||
`;
|
||||
|
||||
function SubscriptionUsageChart() {
|
||||
const [isPeriodDropdownOpen, setIsPeriodDropdownOpen] = useState(false);
|
||||
const [periodSelection, setPeriodSelection] = useState('year');
|
||||
const userProfile = useUserProfile();
|
||||
|
||||
const calculateDateRange = () => {
|
||||
const today = new Date();
|
||||
let date = '';
|
||||
switch (periodSelection) {
|
||||
case 'year':
|
||||
date =
|
||||
today.getMonth() < 10
|
||||
? `${today.getFullYear() - 1}-0${today.getMonth() + 1}-01`
|
||||
: `${today.getFullYear() - 1}-${today.getMonth() + 1}-01`;
|
||||
break;
|
||||
case 'two_years':
|
||||
date =
|
||||
today.getMonth() < 10
|
||||
? `${today.getFullYear() - 2}-0${today.getMonth() + 1}-01`
|
||||
: `${today.getFullYear() - 2}-${today.getMonth() + 1}-01`;
|
||||
break;
|
||||
case 'three_years':
|
||||
date =
|
||||
today.getMonth() < 10
|
||||
? `${today.getFullYear() - 3}-0${today.getMonth() + 1}-01`
|
||||
: `${today.getFullYear() - 3}-${today.getMonth() + 1}-01`;
|
||||
break;
|
||||
default:
|
||||
date =
|
||||
today.getMonth() < 10
|
||||
? `${today.getFullYear() - 1}-0${today.getMonth() + 1}-01`
|
||||
: `${today.getFullYear() - 1}-${today.getMonth() + 1}-01`;
|
||||
break;
|
||||
}
|
||||
return date;
|
||||
};
|
||||
|
||||
const {
|
||||
isLoading,
|
||||
result: subscriptionUsageChartData,
|
||||
request: fetchSubscriptionUsageChart,
|
||||
} = useRequest(
|
||||
useCallback(async () => {
|
||||
const data = await SubscriptionUsageAPI.readSubscriptionUsageChart(
|
||||
calculateDateRange()
|
||||
);
|
||||
return data.data.results;
|
||||
}, [periodSelection]),
|
||||
[]
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
fetchSubscriptionUsageChart();
|
||||
}, [fetchSubscriptionUsageChart, periodSelection]);
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<PageSection>
|
||||
<Card>
|
||||
<ContentLoading />
|
||||
</Card>
|
||||
</PageSection>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Card>
|
||||
<Flex style={{ justifyContent: 'space-between' }}>
|
||||
<FlexItem>
|
||||
<ChartCardTitle>{t`Subscription Compliance`}</ChartCardTitle>
|
||||
</FlexItem>
|
||||
<FlexItem>
|
||||
<CardText component="small">
|
||||
{t`Last recalculation date:`}{' '}
|
||||
{userProfile.systemConfig.HOST_METRIC_SUMMARY_TASK_LAST_TS.slice(
|
||||
0,
|
||||
10
|
||||
)}
|
||||
</CardText>
|
||||
</FlexItem>
|
||||
</Flex>
|
||||
<GraphCardHeader>
|
||||
<GraphCardActions>
|
||||
<Select
|
||||
variant={SelectVariant.single}
|
||||
placeholderText={t`Select period`}
|
||||
aria-label={t`Select period`}
|
||||
typeAheadAriaLabel={t`Select period`}
|
||||
className="periodSelect"
|
||||
onToggle={setIsPeriodDropdownOpen}
|
||||
onSelect={(event, selection) => {
|
||||
setIsPeriodDropdownOpen(false);
|
||||
setPeriodSelection(selection);
|
||||
}}
|
||||
selections={periodSelection}
|
||||
isOpen={isPeriodDropdownOpen}
|
||||
noResultsFoundText={t`No results found`}
|
||||
ouiaId="subscription-usage-period-select"
|
||||
>
|
||||
<SelectOption key="year" value="year">
|
||||
{t`Past year`}
|
||||
</SelectOption>
|
||||
<SelectOption key="two_years" value="two_years">
|
||||
{t`Past two years`}
|
||||
</SelectOption>
|
||||
<SelectOption key="three_years" value="three_years">
|
||||
{t`Past three years`}
|
||||
</SelectOption>
|
||||
</Select>
|
||||
</GraphCardActions>
|
||||
</GraphCardHeader>
|
||||
<CardBody>
|
||||
<UsageChart
|
||||
period={periodSelection}
|
||||
height={600}
|
||||
id="d3-usage-line-chart-root"
|
||||
data={subscriptionUsageChartData}
|
||||
/>
|
||||
</CardBody>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
export default SubscriptionUsageChart;
|
||||
@@ -2,16 +2,9 @@ export default function getDocsBaseUrl(config) {
|
||||
let version = 'latest';
|
||||
const licenseType = config?.license_info?.license_type;
|
||||
|
||||
if (licenseType && licenseType !== 'open') {
|
||||
if (config?.version) {
|
||||
if (parseFloat(config?.version.split('-')[0]) >= 4.3) {
|
||||
version = parseFloat(config?.version.split('-')[0]);
|
||||
} else {
|
||||
version = config?.version.split('-')[0];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
version = 'latest';
|
||||
if (licenseType && licenseType !== 'open' && config?.version) {
|
||||
version = parseFloat(config?.version.split('-')[0]).toFixed(1);
|
||||
}
|
||||
|
||||
return `https://docs.ansible.com/automation-controller/${version}`;
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ describe('getDocsBaseUrl', () => {
|
||||
license_info: {
|
||||
license_type: 'open',
|
||||
},
|
||||
version: '18.0.0',
|
||||
version: '18.4.4',
|
||||
});
|
||||
|
||||
expect(result).toEqual(
|
||||
@@ -19,11 +19,11 @@ describe('getDocsBaseUrl', () => {
|
||||
license_info: {
|
||||
license_type: 'enterprise',
|
||||
},
|
||||
version: '4.0.0',
|
||||
version: '18.4.4',
|
||||
});
|
||||
|
||||
expect(result).toEqual(
|
||||
'https://docs.ansible.com/automation-controller/4.0.0'
|
||||
'https://docs.ansible.com/automation-controller/18.4'
|
||||
);
|
||||
});
|
||||
|
||||
@@ -32,17 +32,17 @@ describe('getDocsBaseUrl', () => {
|
||||
license_info: {
|
||||
license_type: 'enterprise',
|
||||
},
|
||||
version: '4.0.0-beta',
|
||||
version: '7.0.0-beta',
|
||||
});
|
||||
|
||||
expect(result).toEqual(
|
||||
'https://docs.ansible.com/automation-controller/4.0.0'
|
||||
'https://docs.ansible.com/automation-controller/7.0'
|
||||
);
|
||||
});
|
||||
|
||||
it('should return latest version if license info missing', () => {
|
||||
const result = getDocsBaseUrl({
|
||||
version: '18.0.0',
|
||||
version: '18.4.4',
|
||||
});
|
||||
|
||||
expect(result).toEqual(
|
||||
|
||||
@@ -33,7 +33,6 @@ options:
|
||||
image:
|
||||
description:
|
||||
- The fully qualified url of the container image.
|
||||
required: True
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
@@ -79,7 +78,7 @@ def main():
|
||||
argument_spec = dict(
|
||||
name=dict(required=True),
|
||||
new_name=dict(),
|
||||
image=dict(required=True),
|
||||
image=dict(),
|
||||
description=dict(),
|
||||
organization=dict(),
|
||||
credential=dict(),
|
||||
|
||||
@@ -273,6 +273,26 @@ def main():
|
||||
# If the state was absent we can let the module delete it if needed, the module will handle exiting from this
|
||||
module.delete_if_needed(existing_item)
|
||||
|
||||
# We need to clear out the name from the search fields so we can use name_or_id in the following searches
|
||||
if 'name' in search_fields:
|
||||
del search_fields['name']
|
||||
|
||||
# Create the data that gets sent for create and update
|
||||
new_fields = {}
|
||||
if execution_environment is not None:
|
||||
if execution_environment == '':
|
||||
new_fields['execution_environment'] = ''
|
||||
else:
|
||||
ee = module.get_one('execution_environments', name_or_id=execution_environment, **{'data': search_fields})
|
||||
if ee is None:
|
||||
ee2 = module.get_one('execution_environments', name_or_id=execution_environment)
|
||||
if ee2 is None or ee2['organization'] is not None:
|
||||
module.fail_json(msg='could not find execution_environment entry with name {0}'.format(execution_environment))
|
||||
else:
|
||||
new_fields['execution_environment'] = ee2['id']
|
||||
else:
|
||||
new_fields['execution_environment'] = ee['id']
|
||||
|
||||
association_fields = {}
|
||||
|
||||
if credentials is not None:
|
||||
@@ -280,9 +300,9 @@ def main():
|
||||
for item in credentials:
|
||||
association_fields['credentials'].append(module.resolve_name_to_id('credentials', item))
|
||||
|
||||
# We need to clear out the name from the search fields so we can use name_or_id in the following searches
|
||||
if 'name' in search_fields:
|
||||
del search_fields['name']
|
||||
# We need to clear out the organization from the search fields the searches for labels and instance_groups doesnt support it and won't be needed anymore
|
||||
if 'organization' in search_fields:
|
||||
del search_fields['organization']
|
||||
|
||||
if labels is not None:
|
||||
association_fields['labels'] = []
|
||||
@@ -302,8 +322,6 @@ def main():
|
||||
else:
|
||||
association_fields['instance_groups'].append(instance_group_id['id'])
|
||||
|
||||
# Create the data that gets sent for create and update
|
||||
new_fields = {}
|
||||
if rrule is not None:
|
||||
new_fields['rrule'] = rrule
|
||||
new_fields['name'] = new_name if new_name else (module.get_item_name(existing_item) if existing_item else name)
|
||||
@@ -338,16 +356,6 @@ def main():
|
||||
if timeout is not None:
|
||||
new_fields['timeout'] = timeout
|
||||
|
||||
if execution_environment is not None:
|
||||
if execution_environment == '':
|
||||
new_fields['execution_environment'] = ''
|
||||
else:
|
||||
ee = module.get_one('execution_environments', name_or_id=execution_environment, **{'data': search_fields})
|
||||
if ee is None:
|
||||
module.fail_json(msg='could not find execution_environment entry with name {0}'.format(execution_environment))
|
||||
else:
|
||||
new_fields['execution_environment'] = ee['id']
|
||||
|
||||
# If the state was present and we can let the module build or update the existing item, this will return on its own
|
||||
module.create_or_update_if_needed(
|
||||
existing_item,
|
||||
|
||||
@@ -89,7 +89,7 @@ def coerce_type(module, value):
|
||||
if not HAS_YAML:
|
||||
module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
|
||||
return yaml.safe_load(value)
|
||||
elif value.lower in ('true', 'false', 't', 'f'):
|
||||
elif value.lower() in ('true', 'false', 't', 'f'):
|
||||
return {'t': True, 'f': False}[value[0].lower()]
|
||||
try:
|
||||
return int(value)
|
||||
|
||||
@@ -108,8 +108,9 @@
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- wait_results is successful
|
||||
- 'wait_results.status == "successful"'
|
||||
- 'wait_results.status in ["successful", "canceled"]'
|
||||
fail_msg: "Ad hoc command stdout: {{ lookup('awx.awx.controller_api', 'ad_hoc_commands/' + command.id | string + '/stdout/?format=json') }}"
|
||||
success_msg: "Ad hoc command finished with status {{ wait_results.status }}"
|
||||
|
||||
- name: Delete the Credential
|
||||
credential:
|
||||
|
||||
@@ -225,6 +225,7 @@
|
||||
schedule:
|
||||
name: "{{ sched2 }}"
|
||||
state: present
|
||||
organization: Default
|
||||
unified_job_template: "{{ jt1 }}"
|
||||
rrule: "DTSTART:20191219T130551Z RRULE:FREQ=WEEKLY;INTERVAL=1;COUNT=1"
|
||||
description: "This hopefully will work"
|
||||
|
||||
@@ -1,4 +1,42 @@
|
||||
---
|
||||
- name: Initialize starting project vvv setting to false
|
||||
awx.awx.settings:
|
||||
name: "PROJECT_UPDATE_VVV"
|
||||
value: false
|
||||
|
||||
- name: Change project vvv setting to true
|
||||
awx.awx.settings:
|
||||
name: "PROJECT_UPDATE_VVV"
|
||||
value: true
|
||||
register: result
|
||||
|
||||
- name: Changing setting to true should have changed the value
|
||||
assert:
|
||||
that:
|
||||
- "result is changed"
|
||||
|
||||
- name: Change project vvv setting to true
|
||||
awx.awx.settings:
|
||||
name: "PROJECT_UPDATE_VVV"
|
||||
value: true
|
||||
register: result
|
||||
|
||||
- name: Changing setting to true again should not change the value
|
||||
assert:
|
||||
that:
|
||||
- "result is not changed"
|
||||
|
||||
- name: Change project vvv setting back to false
|
||||
awx.awx.settings:
|
||||
name: "PROJECT_UPDATE_VVV"
|
||||
value: false
|
||||
register: result
|
||||
|
||||
- name: Changing setting back to false should have changed the value
|
||||
assert:
|
||||
that:
|
||||
- "result is changed"
|
||||
|
||||
- name: Set the value of AWX_ISOLATION_SHOW_PATHS to a baseline
|
||||
settings:
|
||||
name: AWX_ISOLATION_SHOW_PATHS
|
||||
|
||||
7
docs/docsite/requirements.in
Normal file
7
docs/docsite/requirements.in
Normal file
@@ -0,0 +1,7 @@
|
||||
# This requirements file is used for AWX latest doc builds.
|
||||
|
||||
sphinx # Tooling to build HTML from RST source.
|
||||
sphinx-ansible-theme # Ansible community theme for Sphinx doc builds.
|
||||
docutils # Tooling for RST processing and the swagger extension.
|
||||
Jinja2 # Requires investiation. Possibly inherited from previous repo with a custom theme.
|
||||
PyYaml # Requires investigation. Possibly used as tooling for swagger API reference content.
|
||||
@@ -1,5 +1,74 @@
|
||||
sphinx==5.1.1
|
||||
sphinx-ansible-theme==0.9.1
|
||||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.11
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile --allow-unsafe --output-file=docs/docsite/requirements.txt --strip-extras docs/docsite/requirements.in
|
||||
#
|
||||
alabaster==0.7.13
|
||||
# via sphinx
|
||||
ansible-pygments==0.1.1
|
||||
# via sphinx-ansible-theme
|
||||
babel==2.12.1
|
||||
# via sphinx
|
||||
certifi==2023.7.22
|
||||
# via requests
|
||||
charset-normalizer==3.2.0
|
||||
# via requests
|
||||
docutils==0.16
|
||||
Jinja2<3.1
|
||||
PyYaml
|
||||
# via
|
||||
# -r docs/docsite/requirements.in
|
||||
# sphinx
|
||||
# sphinx-rtd-theme
|
||||
idna==3.4
|
||||
# via requests
|
||||
imagesize==1.4.1
|
||||
# via sphinx
|
||||
jinja2==3.0.3
|
||||
# via
|
||||
# -r docs/docsite/requirements.in
|
||||
# sphinx
|
||||
markupsafe==2.1.3
|
||||
# via jinja2
|
||||
packaging==23.1
|
||||
# via sphinx
|
||||
pygments==2.16.1
|
||||
# via
|
||||
# ansible-pygments
|
||||
# sphinx
|
||||
pyyaml==6.0.1
|
||||
# via -r docs/docsite/requirements.in
|
||||
requests==2.31.0
|
||||
# via sphinx
|
||||
snowballstemmer==2.2.0
|
||||
# via sphinx
|
||||
sphinx==5.1.1
|
||||
# via
|
||||
# -r docs/docsite/requirements.in
|
||||
# sphinx-ansible-theme
|
||||
# sphinx-rtd-theme
|
||||
# sphinxcontrib-applehelp
|
||||
# sphinxcontrib-devhelp
|
||||
# sphinxcontrib-htmlhelp
|
||||
# sphinxcontrib-jquery
|
||||
# sphinxcontrib-qthelp
|
||||
# sphinxcontrib-serializinghtml
|
||||
sphinx-ansible-theme==0.9.1
|
||||
# via -r docs/docsite/requirements.in
|
||||
sphinx-rtd-theme==1.3.0
|
||||
# via sphinx-ansible-theme
|
||||
sphinxcontrib-applehelp==1.0.7
|
||||
# via sphinx
|
||||
sphinxcontrib-devhelp==1.0.5
|
||||
# via sphinx
|
||||
sphinxcontrib-htmlhelp==2.0.4
|
||||
# via sphinx
|
||||
sphinxcontrib-jquery==4.1
|
||||
# via sphinx-rtd-theme
|
||||
sphinxcontrib-jsmath==1.0.1
|
||||
# via sphinx
|
||||
sphinxcontrib-qthelp==1.0.6
|
||||
# via sphinx
|
||||
sphinxcontrib-serializinghtml==1.1.9
|
||||
# via sphinx
|
||||
urllib3==2.0.4
|
||||
# via requests
|
||||
|
||||
@@ -1,10 +1,22 @@
|
||||
.. _ag_start:
|
||||
|
||||
==================
|
||||
AWX Administration
|
||||
==================
|
||||
=============================
|
||||
Administering AWX Deployments
|
||||
=============================
|
||||
|
||||
Learn how to administer AWX deployments through custom scripts, management jobs, and DevOps workflows.
|
||||
This guide assumes at least basic understanding of the systems that you manage and maintain with AWX.
|
||||
|
||||
This guide applies to the latest version of AWX only.
|
||||
The content in this guide is updated frequently and might contain functionality that is not available in previous versions.
|
||||
Likewise content in this guide can be removed or replaced if it applies to functionality that is no longer available in the latest version.
|
||||
|
||||
**Join us online**
|
||||
|
||||
We talk about AWX documentation on Matrix at `#docs:ansible.im <https://matrix.to/#/#docs:ansible.im>`_ and on libera IRC at ``#ansible-docs`` if you ever want to join us and chat about the docs!
|
||||
|
||||
You can also find lots of AWX discussion and get answers to questions at `forum.ansible.com <https://forum.ansible.com/>`_.
|
||||
|
||||
AWX Administration
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
@@ -26,7 +26,7 @@ Vertical scaling improvements
|
||||
.. index::
|
||||
pair: improvements; scaling
|
||||
|
||||
Control nodes are responsible for processing the output of jobs and writing them to the database. The process that does this is called the callback receiver. The callback receiver has a configurable number of workers, controlled by the setting ``JOB_EVENT_WORKERS``. In the past, the default for this setting was always 4, regardless of the CPU or memory capacity of the node. Now, in traditional virtual machines, the ``JOB_EVENT_WORKERS`` will be set to the same as the number of CPU if that is greater than 4. This means administrators that provision larger control nodes will see greater ability for those nodes to keep up with the job output created by jobs without having to manually adjust ``JOB_EVENT_WORKERS``.
|
||||
Control nodes are responsible for processing the output of jobs and writing them to the database. The process that does this is called the callback receiver. The callback receiver has a configurable number of workers, controlled by the setting ``JOB_EVENT_WORKERS``. In the past, the default for this setting was always 4, regardless of the CPU or memory capacity of the node. Now, in traditional virtual machines, the ``JOB_EVENT_WORKERS`` will be set to the same as the number of CPU if that is greater than 4. This means administrators that provision larger control nodes will see greater ability for those nodes to keep up with the job output created by jobs without having to manually adjust ``JOB_EVENT_WORKERS``.
|
||||
|
||||
|
||||
Job scheduling improvements
|
||||
@@ -34,9 +34,9 @@ Job scheduling improvements
|
||||
.. index::
|
||||
pair: improvements; scheduling
|
||||
|
||||
When jobs are created either via a schedule, a workflow, the UI or the API, they are first created in Pending state. To determine when and where to run this job, a background task called the Task Manager collects all pending and running jobs and determines where capacity is available to run the job. In previous versions of AWX, scheduling slowed as the number of pending and running jobs increased, and the Task Manager was vulnerable to timing out without having made any progress. The scenario exhibits symptoms of having thousands of pending jobs, available capacity, but no jobs starting.
|
||||
When jobs are created either via a schedule, a workflow, the UI or the API, they are first created in Pending state. To determine when and where to run this job, a background task called the Task Manager collects all pending and running jobs and determines where capacity is available to run the job. In previous versions of AWX, scheduling slowed as the number of pending and running jobs increased, and the Task Manager was vulnerable to timing out without having made any progress. The scenario exhibits symptoms of having thousands of pending jobs, available capacity, but no jobs starting.
|
||||
|
||||
Optimizations in the job scheduler have made scheduling faster, as well as safeguards to better ensure the scheduler commits its progress even if it is nearing time out. Additionally, work that previously occurred in the Task Manager that blocked its progress has been decoupled into separate, non-blocking work units executed by the Dispatcher.
|
||||
Optimizations in the job scheduler have made scheduling faster, as well as safeguards to better ensure the scheduler commits its progress even if it is nearing time out. Additionally, work that previously occurred in the Task Manager that blocked its progress has been decoupled into separate, non-blocking work units executed by the Dispatcher.
|
||||
|
||||
|
||||
Database resource usage improvements
|
||||
@@ -47,7 +47,7 @@ Database resource usage improvements
|
||||
|
||||
The use of database connections by running jobs has dramatically decreased, which removes a previous limit to concurrent running jobs, as well reduces pressure on memory consumption of PostgreSQL.
|
||||
|
||||
Each job in AWX has a worker process, called the dispatch worker, on the control node that started the process, which submits the work to the execution node via the Receptor, as well as consumes the output of the job and puts it in the Redis queue for the callback receiver to serialize the output and write it to the database as job events.
|
||||
Each job in AWX has a worker process, called the dispatch worker, on the control node that started the process, which submits the work to the execution node via the Receptor, as well as consumes the output of the job and puts it in the Redis queue for the callback receiver to serialize the output and write it to the database as job events.
|
||||
|
||||
The dispatch worker is also responsible for noticing if the job has been canceled by the user in order to then cancel the receptor work unit. In the past, the worker maintained multiple open database connections per job. This caused two main problems:
|
||||
|
||||
@@ -98,7 +98,7 @@ Capacity Planning
|
||||
Example capacity planning exercise
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. index::
|
||||
pair: exercise; capacity planning
|
||||
pair: exercise; capacity planning
|
||||
|
||||
Determining the number and size of instances to support the desired workload must take into account the following:
|
||||
|
||||
@@ -183,13 +183,13 @@ Control nodes
|
||||
^^^^^^^^^^^^^^
|
||||
Vertically scaling a control node increases the number of jobs it can perform control tasks for, which requires both more CPU and memory. In general, scaling CPU alongside memory in the same proportion is recommended (e.g. 1 CPU: 4GB RAM). Even in the case where memory consumption is observed to be high, increasing the CPU of an instance can often relieve pressure, as most memory consumption of control nodes is usually from unprocessed events.
|
||||
|
||||
As mentioned in the :ref:`ag_performance_improvements` section, increasing the number of CPU can also increase the job event processing rate of a control node. At this time, vertically scaling a control node does not increase the number of workers that handle web requests, so horizontally scaling is more effective, if the desire is to increase the API availability.
|
||||
As mentioned in the :ref:`ag_performance_improvements` section, increasing the number of CPU can also increase the job event processing rate of a control node. At this time, vertically scaling a control node does not increase the number of workers that handle web requests, so horizontally scaling is more effective, if the desire is to increase the API availability.
|
||||
|
||||
Execution Nodes
|
||||
^^^^^^^^^^^^^^^^
|
||||
Vertical scaling an execution node will provide more forks for job execution. As mentioned in the example, a host with 16 GB of memory will by default, be assigned the capacity to run 137 “forks”, which at the default setting of 5 forks/job, will be able to run around 22 jobs concurrently. In general, scaling CPU alongside memory in the same proportion is recommended. Like control and hybrid nodes, there is a “capacity adjustment” on each execution instance that can be used to align actual utilization with the estimation of capacity consumption AWX makes. By default, all nodes are set to the top range of the capacity AWX estimates the node to have. If actual monitoring data reveals the node to be over-utilized, decreasing the capacity adjustment can help bring this in line with actual usage.
|
||||
|
||||
Vertically scaling execution will do exactly what the user expects and increase the number of concurrent jobs an instance can run. One downside is that concurrently running jobs on the same execution node, while isolated from each other in the sense that they cannot access the other’s data, can impact the other's performance, if a particular job is very resource-consumptive and overwhelms the node to the extent that it degrades performance of the entire node. Horizontal scaling the execution plane (e.g deploying more execution nodes) can provide some additional isolation of workloads, as well as allowing administrators to assign different instances to different instance groups, which can then be assigned to Organizations, Inventories, or Job Templates. This can enable something like an instance group that can only be used for running jobs against a “production” Inventory, this way jobs for development do not end up eating up capacity and causing higher priority jobs to queue waiting for capacity.
|
||||
|
||||
Vertically scaling execution will do exactly what the user expects and increase the number of concurrent jobs an instance can run. One downside is that concurrently running jobs on the same execution node, while isolated from each other in the sense that they cannot access the other’s data, can impact the other's performance, if a particular job is very resource-consumptive and overwhelms the node to the extent that it degrades performance of the entire node. Horizontal scaling the execution plane (e.g deploying more execution nodes) can provide some additional isolation of workloads, as well as allowing administrators to assign different instances to different instance groups, which can then be assigned to Organizations, Inventories, or Job Templates. This can enable something like an instance group that can only be used for running jobs against a “production” Inventory, this way jobs for development do not end up eating up capacity and causing higher priority jobs to queue waiting for capacity.
|
||||
|
||||
|
||||
Hop Nodes
|
||||
@@ -198,7 +198,7 @@ Hop nodes have very low memory and CPU utilization and there is no significant m
|
||||
|
||||
Hybrid nodes
|
||||
^^^^^^^^^^^^^
|
||||
Hybrid nodes perform both execution and control tasks, so vertically scaling these nodes both increases the number of jobs they can run, and now in 4.3.0, how many events they can process.
|
||||
Hybrid nodes perform both execution and control tasks, so vertically scaling these nodes both increases the number of jobs they can run, and now in 4.3.0, how many events they can process.
|
||||
|
||||
|
||||
Capacity planning for Operator based Deployments
|
||||
@@ -240,23 +240,23 @@ The following are configurable settings in the database that may help improve pe
|
||||
- ``work_mem`` (integer)
|
||||
- ``maintenance_work_mem`` (integer)
|
||||
|
||||
All of these parameters reside under the ``postgresql.conf`` file (inside ``$PDATA`` directory), which manages the configurations of the database server.
|
||||
All of these parameters reside under the ``postgresql.conf`` file (inside ``$PDATA`` directory), which manages the configurations of the database server.
|
||||
|
||||
The **shared_buffers** parameter determines how much memory is dedicated to the server for caching data. Set in ``postgresql.conf``, the default value for this parameter is::
|
||||
|
||||
#sharedPostgres_buffers = 128MB
|
||||
|
||||
|
||||
The value should be set at 15%-25% of the machine’s total RAM. For example: if your machine’s RAM size is 32 GB, then the recommended value for ``shared_buffers`` is 8 GB. Please note that the database server needs to be restarted after this change.
|
||||
|
||||
The **work_mem** parameter basically provides the amount of memory to be used by internal sort operations and hash tables before writing to temporary disk files. Sort operations are used for order by, distinct, and merge join operations. Hash tables are used in hash joins and hash based aggregation. Set in ``postgresql.conf``, the default value for this parameter is::
|
||||
|
||||
#work_mem = 4MB
|
||||
|
||||
Setting the correct value of ``work_mem`` parameter can result in less disk-swapping, and therefore far quicker queries.
|
||||
Setting the correct value of ``work_mem`` parameter can result in less disk-swapping, and therefore far quicker queries.
|
||||
|
||||
We can use the formula below to calculate the optimal ``work_mem`` value for the database server::
|
||||
|
||||
Total RAM * 0.25 / max_connections
|
||||
Total RAM * 0.25 / max_connections
|
||||
|
||||
The ``max_connections`` parameter is one of the GUC parameters to specify the maximum number of concurrent connections to the database server. Please note setting a large ``work_mem`` can cause issues like PostgreSQL server going out of memory (OOM), if there are too many open connections to the database.
|
||||
|
||||
@@ -264,10 +264,40 @@ The **maintenance_work_mem** parameter basically provides the maximum amount of
|
||||
|
||||
#maintenance_work_mem = 64MB
|
||||
|
||||
It is recommended to set this value higher than ``work_mem``; this can improve performance for vacuuming. In general, it should calculated as::
|
||||
It is recommended to set this value higher than ``work_mem``; this can improve performance for vacuuming. In general, it should calculated as::
|
||||
|
||||
Total RAM * 0.05
|
||||
|
||||
Max Connections
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
For a realistic method of determining a value of ``max_connections``, a ballpark formula for AWX is outlined here.
|
||||
Database connections will scale with the number of control and hybrid nodes.
|
||||
Per-node connection needs are listed here.
|
||||
|
||||
* Callback Receiver workers: 4 connections per node or the number of CPUs per node, whichever is larger
|
||||
* Dispatcher Workers: instance (forks) capacity plus 7
|
||||
* uWSGI workers: 16 connections per node
|
||||
* Listeners and auxiliary services: 4 connections per node
|
||||
* Reserve for installer and other actions: 5 connections in total
|
||||
|
||||
Each of these points represent maximum expected connection use in high-load circumstances.
|
||||
To apply this, consider a cluster with 3 hybrid nodes, each with 8 CPUs and 16 GB of RAM.
|
||||
The capacity formula will determine a capacity of 132 forks per node based on the memory and capacity formula.
|
||||
|
||||
(3 nodes) x (
|
||||
(8 CPUs / node) x (1 connection / CPU) +
|
||||
(132 forks / node) x (1 connection / fork) + (7 connections / node) +
|
||||
(16 connections / node) +
|
||||
(4 connections / node)
|
||||
) + (5 connections)
|
||||
|
||||
Adding up all the components comes out to 506 for this example cluster.
|
||||
Practically, this means that the max_connections should be set to something higher than this.
|
||||
Additional connections should be added to account for other platform components.
|
||||
|
||||
This calculation is most sensitive to the number of forks per node. Database connections are briefly opened at the start of and end of jobs. Environments where bursts of many jobs start at once will be most likely to reach the theoretical max number of open database connections.
|
||||
The max number of jobs that would be started concurrently can be adjusted by modifying the effective capacity of the instances. This can be done with the SYSTEM_TASK_ABS_MEM setting, the capacity adjustment on instances, or with instance groups max jobs or max forks.
|
||||
|
||||
AWX Settings
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -332,7 +362,7 @@ Task Manager (Job Scheduling) Settings
|
||||
pair: settings; job scheduling
|
||||
|
||||
The task manager is a periodic task that collects tasks that need to be scheduled and determines what instances have capacity and are eligible for running them. Its job is to find and assign the control and execution instances, update the job’s status to waiting, and send the message to the control node via ``pg_notify`` for the dispatcher to pick up the task and start running it.
|
||||
|
||||
|
||||
As mentioned in the :ref:`ag_performance_improvements` section, a number of optimizations and refactors of this process were implemented in version 4.3. One such refactor was to fix a defect that when the task manager did reach its timeout, it was terminated in such a way that it did not make any progress. Multiple changes were implemented to fix this, so that as the task manager approaches its timeout, it makes an effort to exit and commit any progress made on that run. These issues generally arise when there are thousands of pending jobs, so may not be applicable to your use case.
|
||||
|
||||
The first “short-circuit” available to limit how much work the task manager attempts to do in one run is ``START_TASK_LIMIT``. The default is 100 jobs, which is a safe default. If there are remaining jobs to schedule, a new run of the task manager will be scheduled to run immediately after the current run. Users who are willing to risk potentially longer individual runs of the task manager in order to start more jobs in individual run may consider increasing the ``START_TASK_LIMIT``. One metric, the Prometheus metrics, available in ``/api/v2/metrics`` observes how long individual runs of the task manager take is “task_manager__schedule_seconds”.
|
||||
|
||||
22
docs/docsite/rst/contributor/index.rst
Normal file
22
docs/docsite/rst/contributor/index.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
.. _contributor_guide:
|
||||
|
||||
=======================
|
||||
AWX Contributor's Guide
|
||||
=======================
|
||||
|
||||
Want to get involved with the AWX community?
|
||||
Great!
|
||||
There are so many ways you can contribute to AWX.
|
||||
|
||||
**Join us online**
|
||||
|
||||
You can chat with us and ask questions on Matrix at `#awx:ansible.com <https://matrix.to/#/#awx:ansible.com>`_ or visit the `Ansible Community Forum <https://forum.ansible.com/c/project/7/>`_ to find contributor resources.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:numbered:
|
||||
|
||||
intro
|
||||
setting_up
|
||||
work_items
|
||||
report_issues
|
||||
9
docs/docsite/rst/contributor/intro.rst
Normal file
9
docs/docsite/rst/contributor/intro.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
|
||||
Introduction
|
||||
=============
|
||||
|
||||
Hi there! We're excited to have you as a contributor.
|
||||
|
||||
Have questions about this document or anything not covered here? Come chat with us and ask questions on Matrix at `#awx:ansible.com <https://matrix.to/#/#awx:ansible.com>`_.
|
||||
|
||||
Also visit the `Ansible Community Forum <https://forum.ansible.com/c/project/7/>`_ to find contributor resources where you can also submit your questions or concerns.
|
||||
22
docs/docsite/rst/contributor/report_issues.rst
Normal file
22
docs/docsite/rst/contributor/report_issues.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
|
||||
.. _docs_report_issues:
|
||||
|
||||
Reporting Issues
|
||||
================
|
||||
|
||||
To report issues you find in the AWX documentation, use the GitHub `issue tracker <https://github.com/ansible/awx/issues>`_ for filing bugs. In order to save time, and help us respond to issues quickly, make sure to fill out as much of the issue template
|
||||
as possible. Version information, and an accurate reproducing scenario are critical to helping us identify the problem.
|
||||
|
||||
Be sure to attach the ``component:docs`` label to your issue. These labels are determined by the template data. Please use the template and fill it out as accurately as possible.
|
||||
|
||||
Please don't use the issue tracker as a way to ask how to do something. Instead, discuss it on on the `Ansible Community Forum <https://forum.ansible.com/c/project/7/>`_, or you can chat with us and ask questions on Matrix at `#awx:ansible.com <https://matrix.to/#/#awx:ansible.com>`_.
|
||||
|
||||
Before opening a new issue, please use the issue search feature to see if what you're experiencing has already been reported. If you have any extra detail to provide, please comment. Otherwise, rather than posting a "me too" comment, please consider giving it a `"thumbs up" <https://github.com/blog/2119-add-reactions-to-pull-requests-issues-and-comment>`_ to give us an indication of the severity of the problem.
|
||||
|
||||
See `How issues are resolved <https://github.com/ansible/awx/blob/devel/ISSUES.md#how-issues-are-resolved>`_ for more information about the triaging and resolution process.
|
||||
|
||||
|
||||
Getting help
|
||||
-------------
|
||||
|
||||
If you require additional assistance, join the discussions on the `Ansible Community Forum <https://forum.ansible.com/c/project/7/>`_. Specify with tags ``#documentation`` and ``#awx`` to narrow down the area(s) of interest. For more information on tags, see `Navigating the Ansible forum — Tags, Categories, and Concepts <https://forum.ansible.com/t/navigating-the-ansible-forum-tags-categories-and-concepts/39>`_. You may also reach out to us and ask questions on Matrix at `#awx:ansible.com <https://matrix.to/#/#awx:ansible.com>`_.
|
||||
76
docs/docsite/rst/contributor/setting_up.rst
Normal file
76
docs/docsite/rst/contributor/setting_up.rst
Normal file
@@ -0,0 +1,76 @@
|
||||
|
||||
Setting up your development environment
|
||||
========================================
|
||||
|
||||
The AWX docs are developed using the Python toolchain. The content itself is authored in ReStructuredText (rst).
|
||||
|
||||
Prerequisites
|
||||
---------------
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
|
||||
|
||||
Fork and clone the AWX repo
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If you have not done so already, you'll need to fork the AWX repo on GitHub. For more on how to do this, see `Fork a Repo <https://help.github.com/articles/fork-a-repo/>`_.
|
||||
|
||||
|
||||
Install python and setuptools
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Install the setuptools package on Linux using pip:
|
||||
|
||||
|
||||
1. If not already installed, `download the latest version of Python3 <https://www.geeksforgeeks.org/how-to-download-and-install-python-latest-version-on-linux/>`_ on your machine.
|
||||
|
||||
2. Check if pip3 and python3 are correctly installed in your system using the following command:
|
||||
|
||||
::
|
||||
|
||||
python3 --version
|
||||
pip3 --version
|
||||
|
||||
3. Upgrade pip3 to the latest version to prevent installation issues:
|
||||
|
||||
::
|
||||
|
||||
pip3 install --upgrade pip
|
||||
|
||||
4. Install Setuptools:
|
||||
|
||||
::
|
||||
|
||||
pip3 install setuptools
|
||||
|
||||
5. Verify whether the Setuptools has been properly installed:
|
||||
|
||||
::
|
||||
|
||||
python3 -c 'import setuptools'
|
||||
|
||||
If no errors are returned, then the package was installed properly.
|
||||
|
||||
6. Install the tox package so you can build the docs locally:
|
||||
|
||||
::
|
||||
|
||||
pip3 install tox
|
||||
|
||||
|
||||
|
||||
Run local build of the docs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To build the docs on your local machine, use the tox utility. In your forked branch of your AWX repo, run:
|
||||
|
||||
::
|
||||
|
||||
tox -e docs
|
||||
|
||||
|
||||
Access the AWX user interface
|
||||
------------------------------
|
||||
|
||||
To access an instance of the AWX interface, refer to `Build and run the development environment <https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md#setting-up-your-development-environment>`_ for detail. Once you have your environment setup, you can access the AWX UI by logging into it at `https://localhost:8043 <https://localhost:8043>`_, and access the API directly at `https://localhost:8043/api/ <https://localhost:8043/api/>`_.
|
||||
45
docs/docsite/rst/contributor/work_items.rst
Normal file
45
docs/docsite/rst/contributor/work_items.rst
Normal file
@@ -0,0 +1,45 @@
|
||||
|
||||
What should I work on?
|
||||
=======================
|
||||
|
||||
Good first issue
|
||||
-----------------
|
||||
|
||||
We have a `"good first issue" label` <https://github.com/ansible/awx/issues?q=is%3Aopen+label%3A%22good+first+issue%22+label%3Acomponent%3Adocs+) we put on some doc issues that might be a good starting point for new contributors with the following filter:
|
||||
|
||||
::
|
||||
|
||||
is:open label:"good first issue" label:component:docs
|
||||
|
||||
|
||||
Fixing and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start.
|
||||
|
||||
|
||||
Things to know prior to submitting revisions
|
||||
----------------------------------------------
|
||||
|
||||
- All doc revisions or additions are done through pull requests against the ``devel`` branch.
|
||||
- You must use ``git commit --signoff`` for any commit to be merged, and agree that usage of ``--signoff`` constitutes agreement with the terms of `DCO 1.1 <https://github.com/ansible/awx/blob/devel/DCO_1_1.md>`_.
|
||||
- Take care to make sure no merge commits are in the submission, and use ``git rebase`` vs ``git merge`` for this reason.
|
||||
- If collaborating with someone else on the same branch, consider using ``--force-with-lease`` instead of ``--force``. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see `git push docs <https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt>`_.
|
||||
- If submitting a large doc change, it's a good idea to join the `Ansible Community Forum <https://forum.ansible.com/c/project/7/>`_, and talk about what you would like to do or add first. Use the ``#documentation`` and ``#awx`` tags to help notify relevant people of the topic. This not only helps everyone know what's going on, it also helps save time and effort, if the community decides some changes are needed. For more information on tags, see `Navigating the Ansible forum — Tags, Categories, and Concepts <https://forum.ansible.com/t/navigating-the-ansible-forum-tags-categories-and-concepts/39>`_.
|
||||
- We ask all of our community members and contributors to adhere to the `Ansible code of conduct <http://docs.ansible.com/ansible/latest/community/code_of_conduct.html>`_. If you have questions, or need assistance, please reach out to our community team at `codeofconduct@ansible.com <mailto:codeofconduct@ansible.com>`_.
|
||||
|
||||
|
||||
.. Note::
|
||||
|
||||
- Issue assignment will only be done for maintainers of the project. If you decide to work on an issue, please feel free to add a comment in the issue to let others know that you are working on it; but know that we will accept the first pull request from whomever is able to fix an issue. Once your PR is accepted we can add you as an assignee to an issue upon request.
|
||||
|
||||
- If you work in a part of the docs that is going through active development, your changes may be rejected, or you may be asked to `rebase`. A good idea before starting work is to have a discussion with us and ask questions on Matrix at `#awx:ansible.com <https://matrix.to/#/#awx:ansible.com>`_ or discuss your ideas on the `Ansible Community Forum <https://forum.ansible.com/c/project/7/>`_.
|
||||
|
||||
- If you find an issue with the functions of the UI or API, please see the `Reporting Issues <https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md#reporting-issues>`_ section to open an issue.
|
||||
|
||||
- If you find an issue with the docs themselves, refer to :ref:`docs_report_issues`.
|
||||
|
||||
|
||||
Translations
|
||||
-------------
|
||||
|
||||
At this time we do not accept PRs for adding additional language translations as we have an automated process for generating our translations. This is because translations require constant care as new strings are added and changed in the code base. Because of this the .po files are overwritten during every translation release cycle. We also can't support a lot of translations on AWX as its an open source project and each language adds time and cost to maintain. If you would like to see AWX translated into a new language please create an issue and ask others you know to upvote the issue. Our translation team will review the needs of the community and see what they can do around supporting additional language.
|
||||
|
||||
If you find an issue with an existing translation, please see the `Reporting Issues <https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md#reporting-issues>`_ section to open an issue and our translation team will work with you on a resolution.
|
||||
@@ -5,36 +5,37 @@ Ansible AWX helps teams manage complex multi-tier deployments by adding control,
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: AWX Quickstart
|
||||
:caption: Get started
|
||||
|
||||
quickstart/index
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: User Guide
|
||||
:caption: Community
|
||||
|
||||
contributor/index
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Users
|
||||
|
||||
userguide/index
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: AWX Administration
|
||||
|
||||
administration/index
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: AWX REST API
|
||||
:caption: Developers
|
||||
|
||||
rest_api/index
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Upgrades and Migrations
|
||||
:caption: Administrators
|
||||
|
||||
administration/index
|
||||
upgrade_migration/index
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Release Notes
|
||||
:caption: Release notes
|
||||
|
||||
release_notes/index
|
||||
|
||||
@@ -4,7 +4,17 @@
|
||||
AWX Quickstart
|
||||
==============
|
||||
|
||||
AWX Quickstart
|
||||
Complete the basic steps for using AWX and running your first playbook.
|
||||
|
||||
This guide applies to the latest version of AWX only.
|
||||
The content in this guide is updated frequently and might contain functionality that is not available in previous versions.
|
||||
Likewise content in this guide can be removed or replaced if it applies to functionality that is no longer available in the latest version.
|
||||
|
||||
**Join us online**
|
||||
|
||||
We talk about AWX documentation on Matrix at `#docs:ansible.im <https://matrix.to/#/#docs:ansible.im>`_ and on libera IRC at ``#ansible-docs`` if you ever want to join us and chat about the docs!
|
||||
|
||||
You can also find lots of AWX discussion and get answers to questions at `forum.ansible.com <https://forum.ansible.com/>`_.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
@@ -1,10 +1,20 @@
|
||||
.. _releasenotes_start:
|
||||
|
||||
=================
|
||||
AWX Release Notes
|
||||
=================
|
||||
=============
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
AWX Release Notes
|
||||
AWX release notes, known issues, and related reference materials.
|
||||
|
||||
This guide applies to the latest version of AWX only.
|
||||
The content in this guide is updated frequently and might contain functionality that is not available in previous versions.
|
||||
Likewise content in this guide can be removed or replaced if it applies to functionality that is no longer available in the latest version.
|
||||
|
||||
**Join us online**
|
||||
|
||||
We talk about AWX documentation on Matrix at `#docs:ansible.im <https://matrix.to/#/#docs:ansible.im>`_ if you ever want to join us and chat about the docs!
|
||||
|
||||
You can also find lots of AWX discussion and get answers to questions on the `Ansible Community Forum <https://forum.ansible.com/c/project/7/>`_.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
@@ -5,32 +5,16 @@ Release Notes
|
||||
**************
|
||||
|
||||
.. index::
|
||||
pair: release notes; v23.00
|
||||
pair: release notes; v23.0.0
|
||||
pair: release notes; v23.1.0
|
||||
pair: release notes; v23.2.0
|
||||
|
||||
|
||||
For versions older than 23.0.0, refer to `AWX Release Notes <https://github.com/ansible/awx/releases>`_.
|
||||
|
||||
.. Removed relnotes_current from common/.
|
||||
|
||||
23.0.0
|
||||
-------
|
||||
- See `What's Changed for 23.2.0 <https://github.com/ansible/awx/releases/tag/23.2.0>`_.
|
||||
|
||||
- Added hop nodes support for k8s (@fosterseth #13904)
|
||||
- Reverted "Improve performance for AWX CLI export (#13182)" (@jbradberry #14342)
|
||||
- Corrected spelling on database downtime and tolerance variable (@tuxpreacher #14347)
|
||||
- Fixed schedule rruleset (@KaraokeKev #13611)
|
||||
- Updates ``python-tss-sdk`` dependency (@delinea-sagar #14207)
|
||||
- Fixed UI_NEXT build process broken by ansible/ansible-ui#766 (@TheRealHaoLiu #14349)
|
||||
- Fixed task and web docs (@abwalczyk #14350)
|
||||
- Fixed UI_NEXT build step file path issue (@TheRealHaoLiu #14357)
|
||||
- Added required epoch time field for Splunk HEC event receiver (@digitalbadger-uk #14246)
|
||||
- Fixed edit constructed inventory hanging loading state (@marshmalien #14343)
|
||||
- Added location for locales in nginx config (@mabashian #14368)
|
||||
- Updated cryptography for CVE-2023-38325 (@relrod #14358)
|
||||
- Applied ``AWX_TASK_ENV`` when performing credential plugin lookups (@AlanCoding #14271)
|
||||
- Enforced mutually exclusive options in credential module of the collection (@djdanielsson #14363)
|
||||
- Added an example to clarify that the ``awx.subscriptions`` module should be used prior to ``awx.license`` (@phess #14351)
|
||||
- Fixed default Redis URL to pass check in redis-py>4.4 (@ChandlerSwift #14344)
|
||||
- Fixed typo in the description of ``scm_update_on_launch`` (@bxbrenden #14382)
|
||||
- Fixed CVE-2023-40267 (@TheRealHaoLiu #14388)
|
||||
- Updated PR body checks (@AlanCoding #14389)
|
||||
- See `What's Changed for 23.1.0 <https://github.com/ansible/awx/releases/tag/23.1.0>`_.
|
||||
|
||||
- See `What's Changed for 23.0.0 <https://github.com/ansible/awx/releases/tag/23.0.0>`_.
|
||||
@@ -1,10 +1,20 @@
|
||||
.. _api_start:
|
||||
|
||||
============
|
||||
AWX REST API
|
||||
============
|
||||
=================
|
||||
AWX API Reference
|
||||
=================
|
||||
|
||||
AWX REST API
|
||||
Developer reference for the AWX API.
|
||||
|
||||
This guide applies to the latest version of AWX only.
|
||||
The content in this guide is updated frequently and might contain functionality that is not available in previous versions.
|
||||
Likewise content in this guide can be removed or replaced if it applies to functionality that is no longer available in the latest version.
|
||||
|
||||
**Join us online**
|
||||
|
||||
We talk about AWX documentation on Matrix at `#docs:ansible.im <https://matrix.to/#/#docs:ansible.im>`_ and on libera IRC at ``#ansible-docs`` if you ever want to join us and chat about the docs!
|
||||
|
||||
You can also find lots of AWX discussion and get answers to questions at `forum.ansible.com <https://forum.ansible.com/>`_.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
@@ -1,10 +1,20 @@
|
||||
.. _upgrade_migration_start:
|
||||
|
||||
=======================================
|
||||
Upgrading and Migrating AWX Deployments
|
||||
=======================================
|
||||
=======================
|
||||
Upgrades and Migrations
|
||||
=======================
|
||||
|
||||
Upgrading and Migrating AWX Deployments
|
||||
Review important information before upgrading or migrating AWX deployments.
|
||||
|
||||
This guide applies to the latest version of AWX only.
|
||||
The content in this guide is updated frequently and might contain functionality that is not available in previous versions.
|
||||
Likewise content in this guide can be removed or replaced if it applies to functionality that is no longer available in the latest version.
|
||||
|
||||
**Join us online**
|
||||
|
||||
We talk about AWX documentation on Matrix at `#docs:ansible.im <https://matrix.to/#/#docs:ansible.im>`_ and on libera IRC at ``#ansible-docs`` if you ever want to join us and chat about the docs!
|
||||
|
||||
You can also find lots of AWX discussion and get answers to questions at `forum.ansible.com <https://forum.ansible.com/>`_.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
@@ -31,11 +31,12 @@ Access the Applications page by clicking **Applications** from the left navigati
|
||||
|Applications - home with example apps|
|
||||
|
||||
.. |Applications - home with example apps| image:: ../common/images/apps-list-view-examples.png
|
||||
:alt: Applications list view
|
||||
|
||||
If no other applications exist, only a gray box with a message to add applications displays.
|
||||
|
||||
.. image:: ../common/images/apps-list-view-empty.png
|
||||
|
||||
:alt: No applications found in the list view
|
||||
|
||||
.. _ug_applications_auth_create:
|
||||
|
||||
@@ -59,6 +60,7 @@ The New Application window opens.
|
||||
|Create application|
|
||||
|
||||
.. |Create application| image:: ../common/images/apps-create-new.png
|
||||
:alt: Create new application dialog
|
||||
|
||||
3. Enter the following details in **Create New Application** window:
|
||||
|
||||
@@ -72,7 +74,7 @@ The New Application window opens.
|
||||
4. When done, click **Save** or **Cancel** to abandon your changes. Upon saving, the client ID displays in a pop-up window.
|
||||
|
||||
.. image:: ../common/images/apps-client-id-popup.png
|
||||
|
||||
:alt: Client ID popup
|
||||
|
||||
Applications - Tokens
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -86,6 +88,7 @@ Selecting the **Tokens** view displays a list of the users that have tokens to a
|
||||
|Applications - tokens list|
|
||||
|
||||
.. |Applications - tokens list| image:: ../common/images/apps-tokens-list-view-examples.png
|
||||
:alt: Application tokens list view
|
||||
|
||||
Tokens can only access resources that its associated user can access, and can be limited further by specifying the scope of the token.
|
||||
|
||||
@@ -108,3 +111,4 @@ Tokens are added through the Users screen and can be associated with an applicat
|
||||
To verify the application in the example above now shows the user with the appropriate token, go to the **Tokens** tab of the Applications window:
|
||||
|
||||
.. image:: ../common/images/apps-tokens-list-view-example2.png
|
||||
:alt: Verifying a specific user application token
|
||||
|
||||
@@ -45,17 +45,20 @@ Use the AWX User Interface to configure and use each of the supported 3-party se
|
||||
3. For any of the fields below the **Type Details** area that you want to link to the external credential, click the |key| button of the input field. You are prompted to set the input source to use to retrieve your secret information.
|
||||
|
||||
.. |key| image:: ../common/images/key-mgmt-button.png
|
||||
|
||||
:alt: Icon for managing external credentials
|
||||
.. image:: ../common/images/credentials-link-credential-prompt.png
|
||||
:alt: Credential section of the external secret management system dialog
|
||||
|
||||
4. Select the credential you want to link to, and click **Next**. This takes you to the **Metadata** tab of the input source. This example shows the Metadata prompt for HashiVault Secret Lookup. Metadata is specific to the input source you select. See the :ref:`ug_metadata_creds_inputs` table for details.
|
||||
|
||||
.. image:: ../common/images/credentials-link-metadata-prompt.png
|
||||
:alt: Metadata section of the external secret management system dialog
|
||||
|
||||
5. Click **Test** to verify connection to the secret management system. If the lookup is unsuccessful, an error message like this one displays:
|
||||
|
||||
.. image:: ../common/images/credentials-link-metadata-test-error.png
|
||||
|
||||
:alt: Example exception dialog for credentials lookup
|
||||
|
||||
6. When done, click **OK**. This closes the prompt window and returns you to the Details screen of your target credential. **Repeat these steps**, starting with :ref:`step 3 above <ag_credential_plugins_link_step>` to complete the remaining input fields for the target credential. By linking the information in this manner, AWX retrieves sensitive information, such as username, password, keys, certificates, and tokens from the 3rd-party management systems and populates that data into the remaining fields of the target credential form.
|
||||
|
||||
7. If necessary, supply any information manually for those fields that do not use linking as a way of retrieving sensitive information. Refer to the appropriate :ref:`ug_credentials_cred_types` for more detail about each of the fields.
|
||||
@@ -200,7 +203,7 @@ You need the Centrify Vault web service running to store secrets in order for th
|
||||
Below shows an example of a configured CyberArk AIM credential.
|
||||
|
||||
.. image:: ../common/images/credentials-create-centrify-vault-credential.png
|
||||
|
||||
:alt: Example new centrify vault credential lookup dialog
|
||||
|
||||
.. _ug_credentials_cyberarkccp:
|
||||
|
||||
@@ -222,7 +225,7 @@ You need the CyberArk Central Credential Provider web service running to store s
|
||||
Below shows an example of a configured CyberArk CCP credential.
|
||||
|
||||
.. image:: ../common/images/credentials-create-cyberark-ccp-credential.png
|
||||
|
||||
:alt: Example new CyberArk vault credential lookup dialog
|
||||
|
||||
.. _ug_credentials_cyberarkconjur:
|
||||
|
||||
@@ -245,7 +248,7 @@ When **CyberArk Conjur Secrets Manager Lookup** is selected for **Credential Typ
|
||||
Below shows an example of a configured CyberArk Conjur credential.
|
||||
|
||||
.. image:: ../common/images/credentials-create-cyberark-conjur-credential.png
|
||||
|
||||
:alt: Example new CyberArk Conjur Secret lookup dialog
|
||||
|
||||
.. _ug_credentials_hashivault:
|
||||
|
||||
@@ -268,7 +271,7 @@ When **HashiCorp Vault Secret Lookup** is selected for **Credential Type**, prov
|
||||
For more detail about Approle and its fields, refer to the `Vault documentation for Approle Auth Method <https://www.vaultproject.io/docs/auth/approle>`_. Below shows an example of a configured HashiCorp Vault Secret Lookup credential.
|
||||
|
||||
.. image:: ../common/images/credentials-create-hashicorp-kv-credential.png
|
||||
|
||||
:alt: Example new HashiCorp Vault Secret lookup dialog
|
||||
|
||||
.. _ug_credentials_hashivaultssh:
|
||||
|
||||
@@ -292,7 +295,7 @@ For more detail about Approle and its fields, refer to the `Vault documentation
|
||||
Below shows an example of a configured HashiCorp SSH Secrets Engine credential.
|
||||
|
||||
.. image:: ../common/images/credentials-create-hashicorp-ssh-credential.png
|
||||
|
||||
:alt: Example new HashiCorp Vault Signed SSH credential lookup dialog
|
||||
|
||||
.. _ug_credentials_azurekeyvault:
|
||||
|
||||
@@ -314,7 +317,7 @@ When **Microsoft Azure Key Vault** is selected for **Credential Type**, provide
|
||||
Below shows an example of a configured Microsoft Azure KMS credential.
|
||||
|
||||
.. image:: ../common/images/credentials-create-azure-kms-credential.png
|
||||
|
||||
:alt: Example new Microsoft Azure Key Vault credential lookup dialog
|
||||
|
||||
.. _ug_credentials_thycoticvault:
|
||||
|
||||
@@ -334,6 +337,7 @@ When **Thycotic DevOps Secrets Vault** is selected for **Credential Type**, prov
|
||||
Below shows an example of a configured Thycotic DevOps Secrets Vault credential.
|
||||
|
||||
.. image:: ../common/images/credentials-create-thycotic-devops-credential.png
|
||||
:alt: Example new Thycotic DevOps Secrets Vault credential lookup dialog
|
||||
|
||||
|
||||
|
||||
@@ -354,5 +358,6 @@ When **Thycotic Secrets Server** is selected for **Credential Type**, provide th
|
||||
Below shows an example of a configured Thycotic Secret Server credential.
|
||||
|
||||
.. image:: ../common/images/credentials-create-thycotic-server-credential.png
|
||||
:alt: Example new Thycotic Secret Server credential lookup dialog
|
||||
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ Additionally, post-upgrade, these settings are not be visible (or editable) from
|
||||
AWX should still continue to fetch roles directly from public Galaxy even if galaxy.ansible.com is not the first credential in the list for the Organization. The global "Galaxy" settings are no longer configured at the jobs level, but at the Organization level in the User Interface. The Organization's Add and Edit windows have an optional **Credential** lookup field for credentials of ``kind=galaxy``.
|
||||
|
||||
.. image:: ../common/images/organizations-galaxy-credentials.png
|
||||
:alt: Create a new Organization with Galaxy Credentials
|
||||
|
||||
It is very important to specify the order of these credentials as order sets precedence for the sync and lookup of the content.
|
||||
For more information, see :ref:`ug_organizations_create`.
|
||||
@@ -99,6 +100,7 @@ Access the Credentials from clicking **Credential Types** from the left navigati
|
||||
|Credential Types - home empty|
|
||||
|
||||
.. |Credential Types - home empty| image:: ../common/images/credential-types-home-empty.png
|
||||
:alt: Credential Types view without any credential types populated
|
||||
|
||||
|
||||
If credential types have been created, this page displays a list of all existing and available Credential Types.
|
||||
@@ -106,10 +108,12 @@ If credential types have been created, this page displays a list of all existing
|
||||
|Credential Types - home with example credential types|
|
||||
|
||||
.. |Credential Types - home with example credential types| image:: ../common/images/credential-types-home-with-example-types.png
|
||||
:alt: Credential Types list view with example credential types
|
||||
|
||||
To view more information about a credential type, click on its name or the Edit (|edit|) button from the **Actions** column.
|
||||
|
||||
.. |edit| image:: ../common/images/edit-button.png
|
||||
:alt: Edit button
|
||||
|
||||
Each credential type displays its own unique configurations in the **Input Configuration** field and the **Injector Configuration** field, if applicable. Both YAML and JSON formats are supported in the configuration fields.
|
||||
|
||||
@@ -127,6 +131,7 @@ To create a new credential type:
|
||||
|Create new credential type|
|
||||
|
||||
.. |Create new credential type| image:: ../common/images/credential-types-create-new.png
|
||||
:alt: Create new credential type form
|
||||
|
||||
2. Enter the appropriate details in the **Name** and **Description** field.
|
||||
|
||||
@@ -302,6 +307,7 @@ An example of referencing multiple files in a custom credential template is as f
|
||||
|New credential type|
|
||||
|
||||
.. |New credential type| image:: ../common/images/credential-types-new-listed.png
|
||||
:alt: Credential Types list view with newly created credential type shown
|
||||
|
||||
Click |edit| to modify the credential type options under the Actions column.
|
||||
|
||||
@@ -310,6 +316,7 @@ Click |edit| to modify the credential type options under the Actions column.
|
||||
In the Edit screen, you can modify the details or delete the credential. If the **Delete** button is grayed out, it is indication that the credential type that is being used by a credential, and you must delete the credential type from all the credentials that use it before you can delete it. Below is an example of such a message:
|
||||
|
||||
.. image:: ../common/images/credential-types-delete-confirmation.png
|
||||
:alt: Credential type delete confirmation
|
||||
|
||||
|
||||
7. Verify that the newly created credential type can be selected from the **Credential Type** selection window when creating a new credential:
|
||||
@@ -317,5 +324,6 @@ Click |edit| to modify the credential type options under the Actions column.
|
||||
|Verify new credential type|
|
||||
|
||||
.. |Verify new credential type| image:: ../common/images/credential-types-new-listed-verify.png
|
||||
:alt: Newly created credential type selected from the credentials drop-down menu
|
||||
|
||||
For details on how to create a new credential, see :ref:`ug_credentials`.
|
||||
|
||||
@@ -41,6 +41,7 @@ Click **Credentials** from the left navigation bar to access the Credentials pag
|
||||
|Credentials - home with example credentials|
|
||||
|
||||
.. |Credentials - home with example credentials| image:: ../common/images/credentials-demo-edit-details.png
|
||||
:alt: Credentials - home with example credentials
|
||||
|
||||
Credentials added to a Team are made available to all members of the Team, whereas credentials added to a User are only available to that specific User by default.
|
||||
|
||||
@@ -51,6 +52,7 @@ Clicking on the link for the **Demo Credential** takes you to the **Details** vi
|
||||
|Credentials - home with demo credential details|
|
||||
|
||||
.. |Credentials - home with demo credential details| image:: ../common/images/credentials-home-with-demo-credential-details.png
|
||||
:alt: Credentials - Demo credential details
|
||||
|
||||
|
||||
Clicking the **Access** tab shows you users and teams associated with this Credential and their granted roles (owner, admin, auditor, etc.)
|
||||
@@ -59,6 +61,7 @@ Clicking the **Access** tab shows you users and teams associated with this Crede
|
||||
|Credentials - home with permissions credential details|
|
||||
|
||||
.. |Credentials - home with permissions credential details| image:: ../common/images/credentials-home-with-permissions-detail.png
|
||||
:alt: Credentials - Access tab for Demo credential containing two users with their roles
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -71,6 +74,7 @@ Clicking the **Job Templates** tab shows you the job templates associated with t
|
||||
|
||||
|
||||
.. image:: ../common/images/credentials-home-with-jt-detail.png
|
||||
:alt: Credentials - Job Template tab for Demo credential with example job template
|
||||
|
||||
You can click the **Add** button to assign this **Demo Credential** to additional job templates. Refer to the :ref:`ug_JobTemplates` section for further detail on creating a new job template.
|
||||
|
||||
@@ -89,6 +93,7 @@ To create a new credential:
|
||||
|Create credential|
|
||||
|
||||
.. |Create credential| image:: ../common/images/credentials-create-credential.png
|
||||
:alt: Create credential form
|
||||
|
||||
2. Enter the name for your new credential in the **Name** field.
|
||||
|
||||
@@ -102,6 +107,7 @@ To create a new credential:
|
||||
4. Enter or select the credential type you want to create.
|
||||
|
||||
.. image:: ../common/images/credential-types-drop-down-menu.png
|
||||
:alt: Credential types drop down menu
|
||||
|
||||
5. Enter the appropriate details depending on the type of credential selected, as described in the next section, :ref:`ug_credentials_cred_types`.
|
||||
|
||||
@@ -146,6 +152,7 @@ AWX uses the following environment variables for AWS credentials and are fields
|
||||
|Credentials - create AWS credential|
|
||||
|
||||
.. |Credentials - create AWS credential| image:: ../common/images/credentials-create-aws-credential.png
|
||||
:alt: Credentials - create AWS credential form
|
||||
|
||||
Traditional Amazon Web Services credentials consist of the AWS **Access Key** and **Secret Key**.
|
||||
|
||||
@@ -171,10 +178,12 @@ Selecting this credential allows AWX to access Galaxy or use a collection publis
|
||||
|Credentials - create galaxy credential|
|
||||
|
||||
.. |Credentials - create galaxy credential| image:: ../common/images/credentials-create-galaxy-credential.png
|
||||
:alt: Credentials - create galaxy credential form
|
||||
|
||||
To populate the **Galaxy Server URL** and the **Auth Server URL** fields, look for the corresponding fields of the |ah| section of the `Red Hat Hybrid Cloud Console <https://console.redhat.com/ansible/automation-hub/token>`_ labeled **Server URL** and **SSO URL**, respectively.
|
||||
|
||||
.. image:: ../common/images/hub-console-tokens-page.png
|
||||
:alt: Hub console tokens page
|
||||
|
||||
|
||||
Centrify Vault Credential Provider Lookup
|
||||
@@ -194,6 +203,7 @@ Aside from specifying a name, the **Authentication URL** is the only required fi
|
||||
|Credentials - create container credential|
|
||||
|
||||
.. |Credentials - create container credential| image:: ../common/images/credentials-create-container-credential.png
|
||||
:alt: Credentials - create container credential form
|
||||
|
||||
|
||||
CyberArk Central Credential Provider Lookup
|
||||
@@ -218,6 +228,7 @@ Selecting this credential allows you to access GitHub using a Personal Access To
|
||||
|Credentials - create GitHub credential|
|
||||
|
||||
.. |Credentials - create GitHub credential| image:: ../common/images/credentials-create-webhook-github-credential.png
|
||||
:alt: Credentials - create GitHub credential form
|
||||
|
||||
GitHub PAT credentials require a value in the **Token** field, which is provided in your GitHub profile settings.
|
||||
|
||||
@@ -236,6 +247,7 @@ Selecting this credential allows you to access GitLab using a Personal Access To
|
||||
|Credentials - create GitLab credential|
|
||||
|
||||
.. |Credentials - create GitLab credential| image:: ../common/images/credentials-create-webhook-gitlab-credential.png
|
||||
:alt: Credentials - create GitLab credential form
|
||||
|
||||
GitLab PAT credentials require a value in the **Token** field, which is provided in your GitLab profile settings.
|
||||
|
||||
@@ -261,6 +273,7 @@ AWX uses the following environment variables for GCE credentials and are fields
|
||||
|Credentials - create GCE credential|
|
||||
|
||||
.. |Credentials - create GCE credential| image:: ../common/images/credentials-create-gce-credential.png
|
||||
:alt: Credentials - create GCE credential form
|
||||
|
||||
GCE credentials have the following inputs that are required:
|
||||
|
||||
@@ -270,6 +283,7 @@ GCE credentials have the following inputs that are required:
|
||||
- **RSA Private Key**: The PEM file associated with the service account email.
|
||||
|
||||
.. |file-browser| image:: ../common/images/file-browser-button.png
|
||||
:alt: File browser button
|
||||
|
||||
|
||||
GPG Public Key
|
||||
@@ -283,6 +297,7 @@ Selecting this credential type allows you to create a credential that gives AWX
|
||||
|Credentials - create GPG credential|
|
||||
|
||||
.. |Credentials - create GPG credential| image:: ../common/images/credentials-create-gpg-credential.png
|
||||
:alt: Credentials - create GPG credential form
|
||||
|
||||
See :ref:`ug_content_signing` for detailed information on how to generate a valid keypair, use the CLI tool to sign content, and how to add the public key to AWX.
|
||||
|
||||
@@ -308,6 +323,7 @@ Selecting this credential type enables synchronization of cloud inventory with R
|
||||
|Credentials - create Insights credential|
|
||||
|
||||
.. |Credentials - create Insights credential| image:: ../common/images/credentials-create-insights-credential.png
|
||||
:alt: Credentials - create Insights credential form
|
||||
|
||||
Insights credentials consist of the Insights **Username** and **Password**, which is the user’s Red Hat Customer Portal Account username and password.
|
||||
|
||||
@@ -326,6 +342,7 @@ Machine/SSH credentials do not use environment variables. Instead, they pass the
|
||||
|Credentials - create machine credential|
|
||||
|
||||
.. |Credentials - create machine credential| image:: ../common/images/credentials-create-machine-credential.png
|
||||
:alt: Credentials - create machine credential form
|
||||
|
||||
Machine credentials have several attributes that may be configured:
|
||||
|
||||
@@ -336,6 +353,7 @@ Machine credentials have several attributes that may be configured:
|
||||
- **Privilege Escalation Method**: Specifies the type of escalation privilege to assign to specific users. This is equivalent to specifying the ``--become-method=BECOME_METHOD`` parameter, where ``BECOME_METHOD`` could be any of the typical methods described below, or a custom method you've written. Begin entering the name of the method, and the appropriate name auto-populates.
|
||||
|
||||
.. image:: ../common/images/credentials-create-machine-credential-priv-escalation.png
|
||||
:alt: Credentials - create machine credential privilege escalation drop-down menu
|
||||
|
||||
|
||||
- empty selection: If a task/play has ``become`` set to ``yes`` and is used with an empty selection, then it will default to ``sudo``
|
||||
@@ -381,6 +399,7 @@ Selecting this credential type enables synchronization of cloud inventory with M
|
||||
|Credentials - create Azure credential|
|
||||
|
||||
.. |Credentials - create Azure credential| image:: ../common/images/credentials-create-azure-credential.png
|
||||
:alt: Credentials - create Azure credential form
|
||||
|
||||
Microsoft Azure Resource Manager credentials have several attributes that may be configured:
|
||||
|
||||
@@ -449,6 +468,7 @@ AWX uses the following environment variables for Network credentials and are fie
|
||||
|Credentials - create network credential|
|
||||
|
||||
.. |Credentials - create network credential| image:: ../common/images/credentials-create-network-credential.png
|
||||
:alt: Credentials - create network credential form
|
||||
|
||||
|
||||
Network credentials have several attributes that may be configured:
|
||||
@@ -480,6 +500,7 @@ Selecting this credential type allows you to create instance groups that point t
|
||||
|Credentials - create Containers credential|
|
||||
|
||||
.. |Credentials - create Containers credential| image:: ../common/images/credentials-create-containers-credential.png
|
||||
:alt: Credentials - create Containers credential form
|
||||
|
||||
Container credentials have the following inputs:
|
||||
|
||||
@@ -503,6 +524,7 @@ Selecting this credential type enables synchronization of cloud inventory with O
|
||||
|Credentials - create OpenStack credential|
|
||||
|
||||
.. |Credentials - create OpenStack credential| image:: ../common/images/credentials-create-openstack-credential.png
|
||||
:alt: Credentials - create OpenStack credential form
|
||||
|
||||
OpenStack credentials have the following inputs that are required:
|
||||
|
||||
@@ -525,6 +547,7 @@ Red Hat Ansible Automation Platform
|
||||
Selecting this credential allows you to access a Red Hat Ansible Automation Platform instance.
|
||||
|
||||
.. image:: ../common/images/credentials-create-at-credential.png
|
||||
:alt: Credentials - create Red Hat Ansible Automation Platform credential form
|
||||
|
||||
The Red Hat Ansible Automation Platform credentials have the following inputs that are required:
|
||||
|
||||
@@ -551,6 +574,7 @@ AWX writes a Satellite configuration file based on fields prompted in the user i
|
||||
|Credentials - create Red Hat Satellite 6 credential|
|
||||
|
||||
.. |Credentials - create Red Hat Satellite 6 credential| image:: ../common/images/credentials-create-rh-sat-credential.png
|
||||
:alt: Credentials - create Red Hat Satellite 6 credential form
|
||||
|
||||
|
||||
Satellite credentials have the following inputs that are required:
|
||||
@@ -581,6 +605,7 @@ AWX uses the following environment variables for Red Hat Virtualization credenti
|
||||
|Credentials - create rhv credential|
|
||||
|
||||
.. |Credentials - create rhv credential| image:: ../common/images/credentials-create-rhv-credential.png
|
||||
:alt: Credentials - create Red Hat Virtualization credential form
|
||||
|
||||
RHV credentials have the following inputs that are required:
|
||||
|
||||
@@ -601,6 +626,7 @@ SCM (source control) credentials are used with Projects to clone and update loca
|
||||
|Credentials - create SCM credential|
|
||||
|
||||
.. |Credentials - create SCM credential| image:: ../common/images/credentials-create-scm-credential.png
|
||||
:alt: Credentials - create SCM credential form
|
||||
|
||||
|
||||
Source Control credentials have several attributes that may be configured:
|
||||
@@ -613,7 +639,7 @@ Source Control credentials have several attributes that may be configured:
|
||||
.. note::
|
||||
|
||||
Source Control credentials cannot be configured as "**Prompt on launch**".
|
||||
If you are using a GitHub account for a Source Control credential and you have 2FA (Two Factor Authenication) enabled on your account, you will need to use your Personal Access Token in the password field rather than your account password.
|
||||
If you are using a GitHub account for a Source Control credential and you have 2FA (Two Factor Authentication) enabled on your account, you will need to use your Personal Access Token in the password field rather than your account password.
|
||||
|
||||
|
||||
Thycotic DevOps Secrets Vault
|
||||
@@ -637,6 +663,7 @@ Selecting this credential type enables synchronization of inventory with Ansible
|
||||
|Credentials - create Vault credential|
|
||||
|
||||
.. |Credentials - create Vault credential| image:: ../common/images/credentials-create-vault-credential.png
|
||||
:alt: Credentials - create Vault credential form
|
||||
|
||||
|
||||
Vault credentials require the **Vault Password** and an optional **Vault Identifier** if applying multi-Vault credentialing. For more information on AWX Multi-Vault support, refer to the :ref:`ag_multi_vault` section of the |ata|.
|
||||
@@ -671,6 +698,7 @@ AWX uses the following environment variables for VMware vCenter credentials and
|
||||
|Credentials - create VMware credential|
|
||||
|
||||
.. |Credentials - create VMware credential| image:: ../common/images/credentials-create-vmware-credential.png
|
||||
:alt: Credentials - create VMware credential form
|
||||
|
||||
VMware credentials have the following inputs that are required:
|
||||
|
||||
|
||||
@@ -2,304 +2,8 @@
|
||||
Execution Environment Setup Reference
|
||||
=======================================
|
||||
|
||||
This section contains reference information associated with the definition of an |ee|.
|
||||
|
||||
You define the content of your execution environment in a YAML file. By default, this file is called ``execution_environment.yml``. This file tells Ansible Builder how to create the build instruction file (Containerfile for Podman, Dockerfile for Docker) and build context for your container image.
|
||||
|
||||
.. note::
|
||||
|
||||
This page documents the definition schema for Ansible Builder 3.x. If you are running an older version of Ansible Builder, you need an older schema version. Please consult older versions of the docs for more information. We recommend using version 3, which offers substantially more configurable options and functionality than previous versions.
|
||||
|
||||
.. _ref_ee_definition:
|
||||
|
||||
Execution environment definition
|
||||
---------------------------------
|
||||
|
||||
A definition file is a ``.yml`` file that is required to build an image for an |ee|. Below is a sample version 3 |ee| definition schema file. To use Ansible Builder 3.x, you must specify the schema version. If your |ee| file does not specify ``version: 3``, Ansible Builder will assume you want version 1.
|
||||
|
||||
::
|
||||
|
||||
---
|
||||
version: 3
|
||||
|
||||
build_arg_defaults:
|
||||
ANSIBLE_GALAXY_CLI_COLLECTION_OPTS: '--pre'
|
||||
|
||||
dependencies:
|
||||
galaxy: requirements.yml
|
||||
python:
|
||||
- six
|
||||
- psutil
|
||||
system: bindep.txt
|
||||
|
||||
images:
|
||||
base_image:
|
||||
name: registry.redhat.io/ansible-automation-platform-24/ee-minimal-rhel8:latest
|
||||
|
||||
additional_build_files:
|
||||
- src: files/ansible.cfg
|
||||
dest: configs
|
||||
|
||||
additional_build_steps:
|
||||
prepend_galaxy:
|
||||
- ADD _build/configs/ansible.cfg ~/.ansible.cfg
|
||||
|
||||
prepend_final: |
|
||||
RUN whoami
|
||||
RUN cat /etc/os-release
|
||||
append_final:
|
||||
- RUN echo This is a post-install command!
|
||||
- RUN ls -la /etc
|
||||
|
||||
|
||||
Configuration options
|
||||
----------------------
|
||||
|
||||
You may use the configuration YAML keys listed here in your v3 |ee| definition file. The Ansible Builder 3.x execution environment definition file accepts seven top-level sections:
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
|
||||
additional_build_files
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Specifies files to be added to the build context directory. These can then be referenced or copied by ``additional_build_steps`` during any build stage. The format is a list of dictionary values, each with a ``src`` and ``dest`` key and value.
|
||||
|
||||
Each list item must be a dictionary containing the following (non-optional) keys:
|
||||
|
||||
**src**
|
||||
Specifies the source file(s) to copy into the build context directory. This may either be an absolute path (e.g., ``/home/user/.ansible.cfg``), or a path that is relative to the |ee| file. Relative paths may be a glob expression matching one or more files (e.g. ``files/*.cfg``). Note that an absolute path may **not** include a regular expression. If ``src`` is a directory, the entire contents of that directory are copied to ``dest``.
|
||||
|
||||
**dest**
|
||||
Specifies a subdirectory path underneath the ``_build`` subdirectory of the build context directory that should contain the source file(s) (e.g., ``files/configs``). This may not be an absolute path or contain ``..`` within the path. This directory will be created for you if it does not exist.
|
||||
|
||||
.. note::
|
||||
When using an ``ansible.cfg`` file to pass a token and other settings for a private account to an |ah| server, listing the config file path here (as a string) will enable it to be included as a build argument in the initial phase of the build.
|
||||
|
||||
|
||||
additional_build_steps
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Specifies custom build commands for any build phase. These commands will be inserted directly into the build instruction file for the container runtime (e.g., Containerfile or Dockerfile). The commands must conform to any rules required by the containerization tool.
|
||||
|
||||
You can add build steps before or after any stage of the image creation process. For example, if you need ``git`` to be installed before you install your dependencies, you can add a build step at the end of the base build stage.
|
||||
|
||||
Below are the valid keys for this section. Each supports either a multi-line string, or a list of strings.
|
||||
|
||||
**prepend_base**
|
||||
Commands to insert before building of the base image.
|
||||
|
||||
**append_base**
|
||||
Commands to insert after building of the base image.
|
||||
|
||||
**prepend_galaxy**
|
||||
Commands to insert before building of the galaxy image.
|
||||
|
||||
**append_galaxy**
|
||||
Commands to insert after building of the galaxy image.
|
||||
|
||||
**prepend_builder**
|
||||
Commands to insert before building of the builder image.
|
||||
|
||||
**append_builder**
|
||||
Commands to insert after building of the builder image.
|
||||
|
||||
**prepend_final**
|
||||
Commands to insert before building of the final image.
|
||||
|
||||
**append_final**
|
||||
Commands to insert after building of the final image.
|
||||
|
||||
|
||||
build_arg_defaults
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
Specifies default values for build args as a dictionary. This is an alternative to using the ``--build-arg`` CLI flag.
|
||||
|
||||
Build arguments used by ``ansible-builder`` are the following:
|
||||
|
||||
**ANSIBLE_GALAXY_CLI_COLLECTION_OPTS**
|
||||
Allows the user to pass the ``–pre`` flag (or others) to enable the installation of pre-release collections.
|
||||
|
||||
**ANSIBLE_GALAXY_CLI_ROLE_OPTS**
|
||||
This allows the user to pass any flags, such as --no-deps, to the role installation.
|
||||
|
||||
**PKGMGR_PRESERVE_CACHE**
|
||||
This controls how often the package manager cache is cleared during the image build process. If this value is not set, which is the default, the cache is cleared frequently. If it is set to the string ``always``, the cache is never cleared. Any other value forces the cache to be cleared only after the system dependencies are installed in the final build stage.
|
||||
|
||||
Ansible Builder hard-codes values given inside of ``build_arg_defaults`` into the build instruction file, so they will persist if you run your container build manually.
|
||||
|
||||
If you specify the same variable in the |ee| definition and at the command line with the CLI ``build-arg`` flag, the CLI value will take higher precedence (the CLI value will override the value in the |ee| definition).
|
||||
|
||||
.. _ref_collections_metadata:
|
||||
|
||||
dependencies
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Specifies dependencies to install into the final image, including ``ansible-core``, ``ansible-runner``, Python packages, system packages, and Ansible Collections. Ansible Builder automatically installs dependencies for any Ansible Collections you install.
|
||||
|
||||
In general, you can use standard syntax to constrain package versions. Use the same syntax you would pass to ``dnf``, ``pip``, ``ansible-galaxy``, or any other package management utility. You can also define your packages or collections in separate files and reference those files in the ``dependencies`` section of your |ee| definition file.
|
||||
|
||||
The following keys are valid for this section:
|
||||
|
||||
**ansible_core**
|
||||
The version of the ``ansible-core`` Python package to be installed. This value is a dictionary with a single key, ``package_pip``. The ``package_pip`` value is passed directly to pip for installation and can be in any format that pip supports. Below are some example values:
|
||||
::
|
||||
|
||||
ansible_core:
|
||||
package_pip: ansible-core
|
||||
ansible_core:
|
||||
package_pip: ansible-core==2.14.3
|
||||
ansible_core:
|
||||
package_pip: https://github.com/example_user/ansible/archive/refs/heads/ansible.tar.gz
|
||||
|
||||
**ansible_runner**
|
||||
The version of the Ansible Runner Python package to be installed. This value is a dictionary with a single key, package_pip. The package_pip value is passed directly to pip for installation and can be in any format that pip supports. Below are some example values:
|
||||
::
|
||||
|
||||
ansible_runner:
|
||||
package_pip: ansible-runner
|
||||
ansible_runner:
|
||||
package_pip: ansible-runner==2.3.2
|
||||
ansible_runner:
|
||||
package_pip: https://github.com/example_user/ansible-runner/archive/refs/heads/ansible-runner.tar.gz
|
||||
|
||||
|
||||
**galaxy**
|
||||
Ansible Collections to be installed from Galaxy. This may be a filename, a dictionary, or a multi-line string representation of an Ansible Galaxy ``requirements.yml`` file (see below for examples). Read more about the requirements file format in the `Galaxy user guide <https://docs.ansible.com/ansible/latest/galaxy/user_guide.html#install-multiple-collections-with-a-requirements-file>`_.
|
||||
|
||||
**python**
|
||||
The Python installation requirements. This may either be a filename, or a list of requirements (see below for an example). Ansible Builder combines all the Python requirements files from all collections into a single file using the ``requirements-parser`` library. This library supports complex syntax, including references to other files. If multiple collections require the same *package name*, Ansible Builder combines them into a single entry and combines the constraints. Certain package names are specifically *ignored* by ``ansible-builder``, meaning that Ansible Builder does not include them in the combined file of Python dependencies, even if a collection lists them as dependencies. These include test packages and packages that provide Ansible itself. The full list can be found in ``EXCLUDE_REQUIREMENTS`` in ``src/ansible_builder/_target_scripts/introspect.py``. If you need to include one of these ignored package names, use the ``--user-pip`` option of the ``introspect`` command to list it in the user requirements file. Packages supplied this way are not processed against the list of excluded Python packages.
|
||||
|
||||
**python_interpreter**
|
||||
A dictionary that defines the Python system package name to be installed by dnf (``package_system``) and/or a path to the Python interpreter to be used (``python_path)``.
|
||||
|
||||
**system**
|
||||
The system packages to be installed, in bindep format. This may either be a filename, or a list of requirements (see below for an example). For more information about bindep, refer to the `OpenDev documentation <https://docs.opendev.org/opendev/bindep/latest/readme.html>`_.
|
||||
For system packages, use the ``bindep`` format to specify cross-platform requirements, so they can be installed by whichever package management system the execution environment uses. Collections should specify necessary requirements for ``[platform:rpm]``. Ansible Builder combines system package entries from multiple collections into a single file. Only requirements with *no* profiles (runtime requirements) are installed to the image. Entries from multiple collections which are outright duplicates of each other may be consolidated in the combined file.
|
||||
|
||||
The following example uses filenames that contain the various dependencies:
|
||||
|
||||
::
|
||||
|
||||
dependencies:
|
||||
python: requirements.txt
|
||||
system: bindep.txt
|
||||
galaxy: requirements.yml
|
||||
ansible_core:
|
||||
package_pip: ansible-core==2.14.2
|
||||
ansible_runner:
|
||||
package_pip: ansible-runner==2.3.1
|
||||
python_interpreter:
|
||||
package_system: "python310"
|
||||
python_path: "/usr/bin/python3.10"
|
||||
|
||||
And this example uses inline values:
|
||||
|
||||
::
|
||||
|
||||
dependencies:
|
||||
python:
|
||||
- pywinrm
|
||||
system:
|
||||
- iputils [platform:rpm]
|
||||
galaxy:
|
||||
collections:
|
||||
- name: community.windows
|
||||
- name: ansible.utils
|
||||
version: 2.10.1
|
||||
ansible_core:
|
||||
package_pip: ansible-core==2.14.2
|
||||
ansible_runner:
|
||||
package_pip: ansible-runner==2.3.1
|
||||
python_interpreter:
|
||||
package_system: "python310"
|
||||
python_path: "/usr/bin/python3.10"
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
If any of these dependency files (``requirementa.txt,bindep.txt, and requirements.yml``) are in the ``build_ignore`` of the collection, it will not work correctly.
|
||||
|
||||
Collection maintainers can verify that ansible-builder recognizes the requirements they expect by using the ``introspect`` command, for example:
|
||||
|
||||
::
|
||||
|
||||
ansible-builder introspect --sanitize ~/.ansible/collections/
|
||||
|
||||
The ``--sanitize`` option reviews all of the collection requirements and removes duplicates. It also removes any Python requirements that should normally be excluded. Use the ``-v3`` option to ``introspect`` to see logging messages about requirements that are being excluded.
|
||||
|
||||
|
||||
images
|
||||
~~~~~~~
|
||||
|
||||
Specifies the base image to be used. At a minimum you **MUST** specify a source, image, and tag for the base image. The base image provides the operating system and may also provide some packages. We recommend using the standard ``host/namespace/container:tag`` syntax to specify images. You may use Podman or Docker shortcut syntax instead, but the full definition is more reliable and portable.
|
||||
|
||||
Valid keys for this section are:
|
||||
|
||||
**base_image**
|
||||
A dictionary defining the parent image for the execution environment. A ``name`` key must be supplied with the container image to use. Use the ``signature_original_name`` key if the image is mirrored within your repository, but signed with the original image's signature key.
|
||||
|
||||
image verification
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
You can verify signed container images if you are using the ``podman`` container runtime. Set the ``container-policy`` CLI option to control how this data is used in relation to a Podman ``policy.json`` file for container image signature validation.
|
||||
|
||||
- ``ignore_all`` policy: Generate a ``policy.json`` file in the build ``context directory <context>`` where no signature validation is performed.
|
||||
- ``system`` policy: Signature validation is performed using pre-existing ``policy.json`` files in standard system locations. ``ansible-builder`` assumes no responsibility for the content within these files, and the user has complete control over the content.
|
||||
- ``signature_required`` policy: ``ansible-builder`` will use the container image definitions here to generate a ``policy.json`` file in the build ``context directory <context>`` that will be used during the build to validate the images.
|
||||
|
||||
|
||||
options
|
||||
~~~~~~~~
|
||||
|
||||
A dictionary of keywords/options that can affect builder runtime functionality. Valid keys for this section are:
|
||||
|
||||
**container_init**
|
||||
A dictionary with keys that allow for customization of the container ``ENTRYPOINT`` and ``CMD`` directives (and related behaviors). Customizing these behaviors is an advanced task, and may result in subtle, difficult-to-debug failures. As the provided defaults for this section control a number of intertwined behaviors, overriding any value will skip all remaining defaults in this dictionary. Valid keys are:
|
||||
|
||||
**cmd**
|
||||
Literal value for the ``CMD`` Containerfile directive. The default value is ``["bash"]``.
|
||||
|
||||
**entrypoint**
|
||||
Literal value for the ``ENTRYPOINT`` Containerfile directive. The default entrypoint behavior handles signal propagation to subprocesses, as well as attempting to ensure at runtime that the container user has a proper environment with a valid writeable home directory, represented in ``/etc/passwd``, with the ``HOME`` environment variable set to match. The default entrypoint script may emit warnings to ``stderr`` in cases where it is unable to suitably adjust the user runtime environment. This behavior can be ignored or elevated to a fatal error; consult the source for the ``entrypoint`` target script for more details. The default value is ``["/opt/builder/bin/entrypoint", "dumb-init"]``.
|
||||
|
||||
**package_pip**
|
||||
Package to install via pip for entrypoint support. This package will be installed in the final build image. The default value is ``dumb-init==1.2.5``.
|
||||
|
||||
**package_manager_path**
|
||||
A string with the path to the package manager (dnf or microdnf) to use. The default is ``/usr/bin/dnf``. This value will be used to install a Python interpreter, if specified in ``dependencies``, and during the build phase by the ``assemble`` script.
|
||||
|
||||
**skip_ansible_check**
|
||||
This boolean value controls whether or not the check for an installation of Ansible and Ansible Runner is performed on the final image. Set this value to ``True`` to not perform this check. The default is ``False``.
|
||||
|
||||
**relax_passwd_permissions**
|
||||
This boolean value controls whether the ``root`` group (GID 0) is explicitly granted write permission to ``/etc/passwd`` in the final container image. The default entrypoint script may attempt to update ``/etc/passwd`` under some container runtimes with dynamically created users to ensure a fully-functional POSIX user environment and home directory. Disabling this capability can cause failures of software features that require users to be listed in ``/etc/passwd`` with a valid and writeable home directory (eg, ``async`` in ansible-core, and the ``~username`` shell expansion). The default is ``True``.
|
||||
|
||||
**workdir**
|
||||
Default current working directory for new processes started under the final container image. Some container runtimes also use this value as ``HOME`` for dynamically-created users in the ``root`` (GID 0) group. When this value is specified, the directory will be created (if it doesn't already exist), set to ``root`` group ownership, and ``rwx`` group permissions recursively applied to it. The default value is ``/runner``.
|
||||
|
||||
**user**
|
||||
This sets the username or UID to use as the default user for the final container image. The default value is ``1000``.
|
||||
|
||||
Example options section:
|
||||
::
|
||||
|
||||
options:
|
||||
container_init:
|
||||
package_pip: dumb-init>=1.2.5
|
||||
entrypoint: '["dumb-init"]'
|
||||
cmd: '["csh"]'
|
||||
package_manager_path: /usr/bin/microdnf
|
||||
relax_password_permissions: false
|
||||
skip_ansible_check: true
|
||||
workdir: /myworkdir
|
||||
user: bob
|
||||
|
||||
|
||||
version
|
||||
~~~~~~~~
|
||||
|
||||
An integer value that sets the schema version of the execution environment definition file. Defaults to ``1``. Must be ``3`` if you are using Ansible Builder 3.x.
|
||||
|
||||
For detailed information about the |ee| definition,
|
||||
refer to the `Ansible Builder documentation <https://ansible.readthedocs.io/projects/builder/en/latest/definition/#execution-environment-definition>`_.
|
||||
|
||||
Default execution environment for AWX
|
||||
--------------------------------------
|
||||
|
||||
@@ -16,88 +16,14 @@ Execution Environments
|
||||
Building an Execution Environment
|
||||
---------------------------------
|
||||
|
||||
.. index::
|
||||
single: execution environment
|
||||
pair: build; execution environment
|
||||
|
||||
|
||||
Using Ansible content that depends on non-default dependencies (custom virtual environments) can be tricky. Packages must be installed on each node, play nicely with other software installed on the host system, and be kept in sync. Previously, jobs ran inside of a virtual environment at ``/var/lib/awx/venv/ansible`` by default, which was pre-loaded with dependencies for ansible-runner and certain types of Ansible content used by the Ansible control machine.
|
||||
|
||||
To help simplify this process, container images can be built that serve as Ansible `control nodes <https://docs.ansible.com/ansible/latest/network/getting_started/basic_concepts.html#control-node>`_. These container images are referred to as automation |ees|, which you can create with ansible-builder and then ansible-runner can make use of those images.
|
||||
|
||||
Install ansible-builder
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
In order to build images, either installations of podman or docker is required along with the ansible-builder Python package. The ``--container-runtime`` option needs to correspond to the Podman/Docker executable you intend to use.
|
||||
|
||||
Refer to the latest `Quickstart for Ansible Builder <https://ansible.readthedocs.io/projects/builder/en/latest/#quickstart-for-ansible-builder>`_ for detail.
|
||||
|
||||
.. _build_ee:
|
||||
|
||||
Build an execution environment
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Ansible-builder is used to create an |ee|.
|
||||
|
||||
An |ee| is expected to contain:
|
||||
|
||||
- Ansible
|
||||
- Ansible Runner
|
||||
- Ansible Collections
|
||||
- Python and/or system dependencies of:
|
||||
|
||||
- modules/plugins in collections
|
||||
- content in ansible-base
|
||||
- custom user needs
|
||||
|
||||
Building a new |ee| involves a definition (a ``.yml`` file) that specifies which content you would like to include in your |ee|, such as collections, Python requirements, and system-level packages. The content from the output generated from migrating to |ees| has some of the required data that can be piped to a file or pasted into this definition file.
|
||||
|
||||
|
||||
Run the builder
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Once you created a definition, use this procedure to build your |ee|.
|
||||
|
||||
The ``ansible-builder build`` command takes an |ee| definition as an input. It outputs the build context necessary for building an |ee| image, and proceeds with building that image. The image can be re-built with the build context elsewhere, and produces the same result. By default, it looks for a file named ``execution-environment.yml`` in the current directory.
|
||||
|
||||
For the illustration purposes, the following example ``execution-environment.yml`` file is used as a starting point:
|
||||
|
||||
::
|
||||
|
||||
---
|
||||
version: 3
|
||||
dependencies:
|
||||
galaxy: requirements.yml
|
||||
|
||||
The content of ``requirements.yml``:
|
||||
|
||||
::
|
||||
|
||||
---
|
||||
collections:
|
||||
- name: awx.awx
|
||||
|
||||
To build an |ee| using the files above, run:
|
||||
|
||||
::
|
||||
|
||||
$ ansible-builder build
|
||||
...
|
||||
STEP 7: COMMIT my-awx-ee
|
||||
--> 09c930f5f6a
|
||||
09c930f5f6ac329b7ddb321b144a029dbbfcc83bdfc77103968b7f6cdfc7bea2
|
||||
Complete! The build context can be found at: context
|
||||
|
||||
In addition to producing a ready-to-use container image, the build context is preserved, which can be rebuilt at a different time and/or location with the tooling of your choice, such as ``docker build`` or ``podman build``.
|
||||
|
||||
For additional information about the ``ansible-builder build`` command, refer to Ansible's `CLI Usage <https://ansible.readthedocs.io/projects/builder/en/latest/usage/#cli-usage>`_ documentation.
|
||||
The `Getting started with Execution Environments guide <https://ansible.readthedocs.io/en/latest/getting_started_ee/index.html>`_ will give you a brief technology overview and show you how to build and test your first |ee| in a few easy steps.
|
||||
|
||||
Use an execution environment in jobs
|
||||
------------------------------------
|
||||
|
||||
In order to use an |ee| in a job, a few components are required:
|
||||
|
||||
- An |ee| must have been created using |ab|. See :ref:`build_ee` for detail. Once an |ee| is created, you can use it to run jobs. Use the AWX user interface to specify the |ee| to use in your job templates.
|
||||
- Use the AWX user interface to specify the |ee| you :ref:`build<ug_build_ees>` to use in your job templates.
|
||||
|
||||
- Depending on whether an |ee| is made available for global use or tied to an organization, you must have the appropriate level of administrator privileges in order to use an |ee| in a job. |Ees| tied to an organization require Organization administrators to be able to run jobs with those |ees|.
|
||||
|
||||
@@ -122,16 +48,19 @@ In order to use an |ee| in a job, a few components are required:
|
||||
- **Registry credential**: If the image has a protected container registry, provide the credential to access it.
|
||||
|
||||
.. image:: ../common/images/ee-new-ee-form-filled.png
|
||||
:alt: Create new Execution Environment form
|
||||
|
||||
4. Click **Save**.
|
||||
|
||||
Now your newly added |ee| is ready to be used in a job template. To add an |ee| to a job template, specify it in the **Execution Environment** field of the job template, as shown in the example below. For more information on setting up a job template, see :ref:`ug_JobTemplates` in the |atu|.
|
||||
|
||||
.. image:: ../common/images/job-template-with-example-ee-selected.png
|
||||
:alt: Job template using newly created Execution Environment
|
||||
|
||||
Once you added an |ee| to a job template, you can see those templates listed in the **Templates** tab of the |ee|:
|
||||
|
||||
.. image:: ../common/images/ee-details-templates-list.png
|
||||
:alt: Templates tab of the Execution Environment showing one job associated with it
|
||||
|
||||
|
||||
Execution environment mount options
|
||||
@@ -156,7 +85,8 @@ If you encounter this error, or have upgraded from an older version of AWX, perf
|
||||
2. In the **Paths to expose to isolated jobs** field of the Job Settings page, using the current example, expose the path as such:
|
||||
|
||||
.. image:: ../common/images/settings-paths2expose-iso-jobs.png
|
||||
|
||||
:alt: Jobs Settings page showing Paths to expose to isolated jobs field with defaults
|
||||
|
||||
.. note::
|
||||
|
||||
The ``:O`` option is only supported for directories. It is highly recommended that you be as specific as possible, especially when specifying system paths. Mounting ``/etc`` or ``/usr`` directly have impact that make it difficult to troubleshoot.
|
||||
@@ -173,10 +103,11 @@ This informs podman to run a command similar to the example below, where the con
|
||||
To expose isolated paths in OpenShift or Kubernetes containers as HostPath, assume the following configuration:
|
||||
|
||||
.. image:: ../common/images/settings-paths2expose-iso-jobs-mount-containers.png
|
||||
:alt: Jobs Settings page showing Paths to expose to isolated jobs field with assumed configuration and Expose host paths for Container Group toggle enabled
|
||||
|
||||
Use the **Expose host paths for Container Groups** toggle to enable it.
|
||||
|
||||
Once the playbook runs, the resulting Pod spec will display similar to the example below. Note the details of the ``volumeMounts`` and ``volumes`` sections.
|
||||
|
||||
.. image:: ../common/images/mount-containers-playbook-run-podspec.png
|
||||
|
||||
:alt: Pod spec for the playbook run showing volumeMounts and volumes details
|
||||
|
||||
@@ -37,7 +37,7 @@ Glossary
|
||||
Facts are simply things that are discovered about remote nodes. While they can be used in playbooks and templates just like variables, facts are things that are inferred, rather than set. Facts are automatically discovered when running plays by executing the internal setup module on the remote nodes. You never have to call the setup module explicitly, it just runs, but it can be disabled to save time if it is not needed. For the convenience of users who are switching from other configuration management systems, the fact module also pulls in facts from the ‘ohai’ and ‘facter’ tools if they are installed, which are fact libraries from Chef and Puppet, respectively.
|
||||
|
||||
Forks
|
||||
Ansible and AWX talk to remote nodes in parallel and the level of parallelism can be set serveral ways--during the creation or editing of a Job Template, by passing ``--forks``, or by editing the default in a configuration file. The default is a very conservative 5 forks, though if you have a lot of RAM, you can easily set this to a value like 50 for increased parallelism.
|
||||
Ansible and AWX talk to remote nodes in parallel and the level of parallelism can be set several ways--during the creation or editing of a Job Template, by passing ``--forks``, or by editing the default in a configuration file. The default is a very conservative 5 forks, though if you have a lot of RAM, you can easily set this to a value like 50 for increased parallelism.
|
||||
|
||||
Group
|
||||
A set of hosts in Ansible that can be addressed as a set, of which many may exist within a single Inventory.
|
||||
|
||||
@@ -1,10 +1,21 @@
|
||||
.. _ug_start:
|
||||
|
||||
==========
|
||||
User Guide
|
||||
==========
|
||||
===================
|
||||
Automating with AWX
|
||||
===================
|
||||
|
||||
User Guide
|
||||
Learn how to use AWX functionality to scale and manage your automation.
|
||||
This guide assumes moderate familiarity with Ansible, including concepts such as **Playbooks**, **Variables**, and **Tags**.
|
||||
|
||||
This guide applies to the latest version of AWX only.
|
||||
The content in this guide is updated frequently and might contain functionality that is not available in previous versions.
|
||||
Likewise content in this guide can be removed or replaced if it applies to functionality that is no longer available in the latest version.
|
||||
|
||||
**Join us online**
|
||||
|
||||
We talk about AWX documentation on Matrix at `#docs:ansible.im <https://matrix.to/#/#docs:ansible.im>`_ and on libera IRC at ``#ansible-docs`` if you ever want to join us and chat about the docs!
|
||||
|
||||
You can also find lots of AWX discussion and get answers to questions at `forum.ansible.com <https://forum.ansible.com/>`_.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
@@ -11,6 +11,7 @@ An :term:`Instance Group` provides the ability to group instances in a clustered
|
||||
|Instance Group policy example|
|
||||
|
||||
.. |Instance Group policy example| image:: ../common/images/instance-groups_list_view.png
|
||||
:alt: Instance groups list view showing example instance groups and one with capacity levels
|
||||
|
||||
For more information about the policy or rules associated with instance groups, see the :ref:`ag_instance_groups` section of the |ata|.
|
||||
|
||||
@@ -34,6 +35,7 @@ To create a new instance group:
|
||||
|IG - create new IG|
|
||||
|
||||
.. |IG - create new IG| image:: ../common/images/instance-group-create-new-ig.png
|
||||
:alt: Create instance group form
|
||||
|
||||
3. Enter the appropriate details into the following fields:
|
||||
|
||||
@@ -57,11 +59,12 @@ To create a new instance group:
|
||||
Once the instance group is successfully created, the **Details** tab of the newly created instance group remains, allowing you to review and edit your instance group information. This is the same screen that opens when the **Edit** (|edit-button|) button is clicked from the **Instance Groups** list view. You can also edit **Instances** and review **Jobs** associated with this instance group.
|
||||
|
||||
.. |edit-button| image:: ../common/images/edit-button.png
|
||||
:alt: Edit button
|
||||
|
||||
|IG - example IG successfully created|
|
||||
|
||||
.. |IG - example IG successfully created| image:: ../common/images/instance-group-example-ig-successfully-created.png
|
||||
|
||||
:alt: Instance group details showing how to view instances and jobs associated with an instance group
|
||||
|
||||
Associate instances to an instance group
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -75,6 +78,7 @@ To associate instances to an instance group:
|
||||
|IG - select instances|
|
||||
|
||||
.. |IG - select instances| image:: ../common/images/instance-group-assoc-instances.png
|
||||
:alt: Associating an instance with an instance group
|
||||
|
||||
3. In the following example, the instances added to the instance group displays along with information about their capacity.
|
||||
|
||||
@@ -83,6 +87,7 @@ This view also allows you to edit some key attributes associated with the instan
|
||||
|IG - instances in IG callouts|
|
||||
|
||||
.. |IG - instances in IG callouts| image:: ../common/images/instance-group-instances-example-callouts.png
|
||||
:alt: Edit attributes associated with instances in an instance group
|
||||
|
||||
|
||||
View jobs associated with an instance group
|
||||
@@ -93,6 +98,7 @@ To view the jobs associated with the instance group, click the **Jobs** tab of t
|
||||
|IG - instances jobs|
|
||||
|
||||
.. |IG - instances jobs| image:: ../common/images/instance-group-jobs-list.png
|
||||
:alt: Viewing jobs associated with an instance group
|
||||
|
||||
Each job displays the job status, ID, and name; type of job, time started and completed, who started the job; and applicable resources associated with it, such as template, inventory, project, |ee|, etc.
|
||||
|
||||
|
||||
@@ -896,7 +896,7 @@ Amazon Web Services EC2
|
||||
|
||||
3. You can optionally specify the verbosity, host filter, enabled variable/value, and update options as described in the main procedure for :ref:`adding a source <ug_add_inv_common_fields>`.
|
||||
|
||||
4. Use the **Source Variables** field to override variables used by the ``aws_ec2`` inventory plugin. Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two. For a detailed description of these variables, view the `aws_ec2 inventory plugin documenation <https://cloud.redhat.com/ansible/automation-hub/repo/published/amazon/aws/content/inventory/aws_ec2>`__.
|
||||
4. Use the **Source Variables** field to override variables used by the ``aws_ec2`` inventory plugin. Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two. For a detailed description of these variables, view the `aws_ec2 inventory plugin documentation <https://cloud.redhat.com/ansible/automation-hub/repo/published/amazon/aws/content/inventory/aws_ec2>`__.
|
||||
|
||||
|Inventories - create source - AWS EC2 example|
|
||||
|
||||
@@ -924,7 +924,7 @@ Google Compute Engine
|
||||
|
||||
3. You can optionally specify the verbosity, host filter, enabled variable/value, and update options as described in the main procedure for :ref:`adding a source <ug_add_inv_common_fields>`.
|
||||
|
||||
4. Use the **Source Variables** field to override variables used by the ``gcp_compute`` inventory plugin. Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two. For a detailed description of these variables, view the `gcp_compute inventory plugin documenation <https://cloud.redhat.com/ansible/automation-hub/repo/published/google/cloud/content/inventory/gcp_compute>`__.
|
||||
4. Use the **Source Variables** field to override variables used by the ``gcp_compute`` inventory plugin. Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two. For a detailed description of these variables, view the `gcp_compute inventory plugin documentation <https://cloud.redhat.com/ansible/automation-hub/repo/published/google/cloud/content/inventory/gcp_compute>`__.
|
||||
|
||||
|
||||
.. _ug_source_azure:
|
||||
|
||||
@@ -3,10 +3,12 @@
|
||||
Projects specify the branch, tag, or reference to use from source control in the ``scm_branch`` field. These are represented by the values specified in the Project Details fields as shown.
|
||||
|
||||
.. image:: ../common/images/projects-create-scm-project-branching-emphasized.png
|
||||
:alt: Create New Project page with SCM branching options emphasized
|
||||
|
||||
Projects have the option to "Allow Branch Override". When checked, project admins can delegate branch selection to the job templates that use that project (requiring only project ``use_role``).
|
||||
|
||||
.. image:: ../common/images/projects-create-scm-project-branch-override-checked.png
|
||||
:alt: Allow Branch Override checkbox option in Project selected
|
||||
|
||||
|
||||
|
||||
@@ -22,6 +24,7 @@ If **Clean** is checked, AWX discards modified files in its local copy of the re
|
||||
.. _`Subversion`: https://docs.ansible.com/ansible/latest/collections/ansible/builtin/subversion_module.html#parameters
|
||||
|
||||
.. image:: ../common/images/projects-create-scm-project-clean-checked.png
|
||||
:alt: Clean checkbox option in Project selected
|
||||
|
||||
|
||||
Project revision behavior
|
||||
@@ -32,6 +35,7 @@ is stored when updated, and jobs using that project will employ this revision. P
|
||||
This revision is shown in the **Source Control Revision** field of the job and its respective project update.
|
||||
|
||||
.. image:: ../common/images/jobs-output-branch-override-example.png
|
||||
:alt: Project's Source Control Revision value
|
||||
|
||||
Consequently, offline job runs are impossible for non-default branches. To be sure that a job is running a static version from source control, use tags or commit hashes. Project updates do not save the revision of all branches, only the project default branch.
|
||||
|
||||
|
||||
@@ -106,4 +106,5 @@ The instance field ``capacity_adjustment`` allows you to select how much of one
|
||||
|
||||
To view or edit the capacity in the user interface, select the **Instances** tab of the Instance Group.
|
||||
|
||||
.. image:: ../common/images/instance-group-instances-capacity-callouts.png
|
||||
.. image:: ../common/images/instance-group-instances-capacity-callouts.png
|
||||
:alt: Instances tab of Instance Group showing sliders for capacity adjustment.
|
||||
|
||||
@@ -28,6 +28,7 @@ Consider the following when setting up job slices:
|
||||
- When executed, a sliced job splits each inventory into a number of "slice size" chunks. It then queues jobs of ansible-playbook runs on each chunk of the appropriate inventory. The inventory fed into ansible-playbook is a pared-down version of the original inventory that only contains the hosts in that particular slice. The completed sliced job that displays on the Jobs list are labeled accordingly, with the number of sliced jobs that have run:
|
||||
|
||||
.. image:: ../common/images/sliced-job-shown-jobs-list-view.png
|
||||
:alt: Sliced job shown in Jobs list view
|
||||
|
||||
|
||||
- These sliced jobs follow normal scheduling behavior (number of forks, queuing due to capacity, assignation to instance groups based on inventory mapping).
|
||||
@@ -54,6 +55,7 @@ Job slice execution behavior
|
||||
When jobs are sliced, they can run on any node and some may not run at the same time (insufficient capacity in the system, for example). When slice jobs are running, job details display the workflow and job slice(s) currently running, as well as a link to view their details individually.
|
||||
|
||||
.. image:: ../common/images/sliced-job-shown-jobs-output-view.png
|
||||
:alt: Sliced job shown in Jobs output view
|
||||
|
||||
By default, job templates are not normally configured to execute simultaneously (``allow_simultaneous`` must be checked in the API or **Enable Concurrent Jobs** in the UI). Slicing overrides this behavior and implies ``allow_simultaneous`` even if that setting is unchecked. See :ref:`ug_JobTemplates` for information on how to specify this, as well as the number of job slices on your job template configuration.
|
||||
|
||||
|
||||
@@ -17,13 +17,16 @@ The **Templates** menu opens a list of the job templates that are currently avai
|
||||
|Job templates - home with example job template|
|
||||
|
||||
.. |Job templates - home with example job template| image:: ../common/images/job-templates-home-with-example-job-template.png
|
||||
:alt: Job templates - home with example job template
|
||||
|
||||
|
||||
From this screen, you can launch (|launch|), edit (|edit|), and copy (|copy|) a job template. To delete a job template, you must select one or more templates and click the **Delete** button. Before deleting a job template, be sure it is not used in a workflow job template.
|
||||
|
||||
.. |edit| image:: ../common/images/edit-button.png
|
||||
:alt: Edit button
|
||||
|
||||
.. |delete| image:: ../common/images/delete-button.png
|
||||
.. |delete| image:: ../common/images/delete-button.png
|
||||
:alt: Delete button
|
||||
|
||||
|
||||
.. include:: ../common/work_items_deletion_warning.rst
|
||||
@@ -33,6 +36,7 @@ From this screen, you can launch (|launch|), edit (|edit|), and copy (|copy|) a
|
||||
Job templates can be used to build a workflow template. For templates that show the Workflow Visualizer (|wf-viz-icon|) icon next to them are workflow templates. Clicking it allows you to graphically build a workflow. Many parameters in a job template allow you to enable **Prompt on Launch** that can be modified at the workflow level, and do not affect the values assigned at the job template level. For instructions, see the :ref:`ug_wf_editor` section.
|
||||
|
||||
.. |wf-viz-icon| image:: ../common/images/wf-viz-icon.png
|
||||
:alt: Workflow Visualizer icon
|
||||
|
||||
Create a Job Template
|
||||
-----------------------
|
||||
@@ -153,10 +157,13 @@ To create a new job template:
|
||||
- Yes
|
||||
|
||||
.. |search| image:: ../common/images/search-button.png
|
||||
:alt: Search button
|
||||
|
||||
.. |x-circle| image:: ../common/images/x-delete-button.png
|
||||
:alt: Delete button
|
||||
|
||||
.. |x| image:: ../common/images/x-button.png
|
||||
:alt: X button
|
||||
|
||||
|
||||
3. **Options**: Specify options for launching this template, if necessary.
|
||||
@@ -170,6 +177,7 @@ To create a new job template:
|
||||
If you enable webhooks, other fields display, prompting for additional information:
|
||||
|
||||
.. image:: ../common/images/job-templates-options-webhooks.png
|
||||
:alt: Job templates - options - webhooks
|
||||
|
||||
- **Webhook Service**: Select which service to listen for webhooks from
|
||||
- **Webhook URL**: Automatically populated with the URL for the webhook service to POST requests to.
|
||||
@@ -184,16 +192,19 @@ To create a new job template:
|
||||
- **Prevent Instance Group Fallback**: Check this option to allow only the instance groups listed in the **Instance Groups** field above to execute the job. If unchecked, all available instances in the execution pool will be used based on the hierarchy described in :ref:`ag_instance_groups_control_where_job_runs`. Click the |help| icon for more information.
|
||||
|
||||
.. |help| image:: ../common/images/tooltips-icon.png
|
||||
:alt: Tooltip
|
||||
|
||||
|Job templates - create new job template|
|
||||
|
||||
.. |Job templates - create new job template| image:: ../common/images/job-templates-create-new-job-template.png
|
||||
:alt: Job templates - create new job template
|
||||
|
||||
4. When you have completed configuring the details of the job template, click **Save**.
|
||||
|
||||
Saving the template does not exit the job template page but advances to the Job Template Details tab for viewing. After saving the template, you can click **Launch** to launch the job, or click **Edit** to add or change the attributes of the template, such as permissions, notifications, view completed jobs, and add a survey (if the job type is not a scan). You must first save the template prior to launching, otherwise, the **Launch** button remains grayed-out.
|
||||
|
||||
.. image:: ../common/images/job-templates-job-template-details.png
|
||||
:alt: Job templates - job template details
|
||||
|
||||
You can verify the template is saved when the newly created template appears on the Templates list view.
|
||||
|
||||
@@ -211,6 +222,7 @@ Work with Notifications
|
||||
Clicking the **Notifications** tab allows you to review any notification integrations you have setup and their statuses, if they have ran.
|
||||
|
||||
.. image:: ../common/images/job-template-completed-notifications-view.png
|
||||
:alt: Job templates - completed notifications view
|
||||
|
||||
Use the toggles to enable or disable the notifications to use with your particular template. For more detail, see :ref:`ug_notifications_on_off`.
|
||||
|
||||
@@ -223,11 +235,13 @@ View Completed Jobs
|
||||
The **Completed Jobs** tab provides the list of job templates that have ran. Click **Expanded** to view details of each job, including its status, ID, and name; type of job, time started and completed, who started the job; and which template, inventory, project, and credential were used. You can filter the list of completed jobs using any of these criteria.
|
||||
|
||||
.. image:: ../common/images/job-template-completed-jobs-view.png
|
||||
:alt: Job templates - completed jobs view
|
||||
|
||||
|
||||
Sliced jobs that display on this list are labeled accordingly, with the number of sliced jobs that have run:
|
||||
|
||||
.. image:: ../common/images/sliced-job-shown-jobs-list-view.png
|
||||
:alt: Sliced job shown in jobs list view
|
||||
|
||||
|
||||
Scheduling
|
||||
@@ -242,6 +256,7 @@ Access the schedules for a particular job template from the **Schedules** tab.
|
||||
|Job Templates - schedule launch|
|
||||
|
||||
.. |Job Templates - schedule launch| image:: ../common/images/job-templates-schedules.png
|
||||
:alt: Job Templates - schedule launch
|
||||
|
||||
|
||||
Schedule a Job Template
|
||||
@@ -316,6 +331,7 @@ To create a survey:
|
||||
- **Default answer**: The default answer to the question. This value is pre-filled in the interface and is used if the answer is not provided by the user.
|
||||
|
||||
.. image:: ../common/images/job-template-create-survey.png
|
||||
:alt: Job templates - create survey
|
||||
|
||||
|
||||
3. Once you have entered the question information, click **Save** to add the question.
|
||||
@@ -325,11 +341,13 @@ The survey question displays in the Survey list. For any question, you can click
|
||||
|job-template-completed-survey|
|
||||
|
||||
.. |job-template-completed-survey| image:: ../common/images/job-template-completed-survey.png
|
||||
:alt: Job templates - completed survey
|
||||
|
||||
|
||||
If you have more than one survey question, use the **Edit Order** button to rearrange the order of the questions by clicking and dragging on the grid icon.
|
||||
|
||||
.. image:: ../common/images/job-template-rearrange-survey.png
|
||||
:alt: Job templates - rearrange survey
|
||||
|
||||
|
||||
4. To add more questions, click the **Add** button to add additional questions.
|
||||
@@ -369,10 +387,12 @@ Launch a job template by any of the following ways:
|
||||
- Access the job template list from the **Templates** menu on the left navigation bar or while in the Job Template Details view, scroll to the bottom to access the |launch| button from the list of templates.
|
||||
|
||||
.. image:: ../common/images/job-templates-home-with-example-job-template-launch.png
|
||||
:alt: Job templates - home with example job template - launch
|
||||
|
||||
- While in the Job Template Details view of the job template you want to launch, click **Launch**.
|
||||
|
||||
.. |launch| image:: ../common/images/launch-button.png
|
||||
:alt: Launch button
|
||||
|
||||
A job may require additional information to run. The following data may be requested at launch:
|
||||
|
||||
@@ -392,10 +412,12 @@ Below is an example job launch that prompts for Job Tags, and runs the example s
|
||||
|job-launch-with-prompt-job-tags|
|
||||
|
||||
.. |job-launch-with-prompt-job-tags| image:: ../common/images/job-launch-with-prompt-at-launch-jobtags.png
|
||||
:alt: Job launch with prompt job tags
|
||||
|
||||
|job-launch-with-prompt-survey|
|
||||
|
||||
.. |job-launch-with-prompt-survey| image:: ../common/images/job-launch-with-prompt-at-launch-survey.png
|
||||
:alt: Job launch with prompt survey
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -445,6 +467,7 @@ Upon launch, AWX automatically redirects the web browser to the Job Status page
|
||||
When slice jobs are running, job lists display the workflow and job slices, as well as a link to view their details individually.
|
||||
|
||||
.. image:: ../common/images/sliced-job-shown-jobs-list-view.png
|
||||
:alt: Sliced job shown in jobs list view
|
||||
|
||||
.. _ug_JobTemplates_bulk_api:
|
||||
|
||||
@@ -465,6 +488,7 @@ If you choose to copy Job Template, it **does not** copy any associated schedule
|
||||
2. Click the |copy| button associated with the template you want to copy.
|
||||
|
||||
.. |copy| image:: ../common/images/copy-button.png
|
||||
:alt: Copy button
|
||||
|
||||
The new template with the name of the template from which you copied and a timestamp displays in the list of templates.
|
||||
|
||||
@@ -517,6 +541,7 @@ The ``scan_files`` fact module is the only module that accepts parameters, passe
|
||||
Scan job templates should enable ``become`` and use credentials for which ``become`` is a possibility. You can enable become by checking the **Enable Privilege Escalation** from the Options menu:
|
||||
|
||||
.. image:: ../common/images/job-templates-create-new-job-template-become.png
|
||||
:alt: Job template with Privilege Escalation checked from the Options field.
|
||||
|
||||
|
||||
Supported OSes for ``scan_facts.yml``
|
||||
@@ -631,6 +656,7 @@ Fact Caching
|
||||
AWX can store and retrieve facts on a per-host basis through an Ansible Fact Cache plugin. This behavior is configurable on a per-job template basis. Fact caching is turned off by default but can be enabled to serve fact requests for all hosts in an inventory related to the job running. This allows you to use job templates with ``--limit`` while still having access to the entire inventory of host facts. A global timeout setting that the plugin enforces per-host, can be specified (in seconds) through the Jobs settings menu:
|
||||
|
||||
.. image:: ../common/images/configure-awx-jobs-fact-cache-timeout.png
|
||||
:alt: Jobs Settings window showing the location of the Per-Host Ansible Fact Cache Timeout parameter from the Edit Details screen.
|
||||
|
||||
Upon launching a job that uses fact cache (``use_fact_cache=True``), AWX will store all ``ansible_facts`` associated with each host in the inventory associated with the job. The Ansible Fact Cache plugin that ships with AWX will only be enabled on jobs with fact cache enabled (``use_fact_cache=True``).
|
||||
|
||||
@@ -659,7 +685,8 @@ Fact caching saves a significant amount of time over running fact gathering. If
|
||||
|
||||
You can choose to use cached facts in your job by enabling it in the **Options** field of the Job Templates window.
|
||||
|
||||
.. image:: ../common/images/job-templates-options-use-factcache.png
|
||||
.. image:: ../common/images/job-templates-options-use-factcache.png
|
||||
:alt: Job templates - options - use factcache
|
||||
|
||||
To clear facts, you need to run the Ansible ``clear_facts`` `meta task`_. Below is an example playbook that uses the Ansible ``clear_facts`` meta task.
|
||||
|
||||
@@ -836,6 +863,7 @@ To enable callbacks, check the *Provisioning Callbacks* checkbox in the Job Temp
|
||||
If you intend to use AWX's provisioning callback feature with a dynamic inventory, Update on Launch should be set for the inventory group used in the Job Template.
|
||||
|
||||
.. image:: ../common/images/provisioning-callbacks-config.png
|
||||
:alt: Provisioning callbacks config
|
||||
|
||||
Callbacks also require a Host Config Key, to ensure that foreign hosts with the URL cannot request configuration. Please provide a custom value for Host Config Key. The host key may be reused across multiple hosts to apply this job template against multiple hosts. Should you wish to control what hosts are able to request configuration, the key may be changed at any time.
|
||||
|
||||
@@ -978,6 +1006,7 @@ The following table notes the behavior (hierarchy) of variable precedence in AWX
|
||||
**AWX Variable Precedence Hierarchy (last listed wins)**
|
||||
|
||||
.. image:: ../common/images/Architecture-AWX_Variable_Precedence_Hierarchy.png
|
||||
:alt: AWX Variable Precedence Hierarchy
|
||||
|
||||
|
||||
Relaunching Job Templates
|
||||
|
||||
@@ -14,10 +14,11 @@ The default view is collapsed (**Compact**) with the job name, status, job type,
|
||||
|Jobs - home with example job|
|
||||
|
||||
.. |Jobs - home with example job| image:: ../common/images/jobs-home-with-example-job.png
|
||||
|
||||
:alt: Jobs List with Example Jobs
|
||||
.. image:: ../common/images/jobs-list-all-expanded.png
|
||||
:alt: Expanded Jobs List
|
||||
|
||||
Actions you can take from this screen include viewing the details and standard output of a particular job, relaunching (|launch|) jobs, or removing selected jobs. The relaunch operation only applies to relaunches of playbook runs and does not apply to project/inventory updates, system jobs, workflow jobs, etc.
|
||||
Actions you can take from this screen include viewing the details and standard output of a particular job, relaunching (|launch|) jobs, or removing selected jobs.The relaunch operation only applies to relaunches of playbook runs and does not apply to project/inventory updates, system jobs, workflow jobs, etc.
|
||||
|
||||
.. _ug_job_results:
|
||||
|
||||
@@ -29,12 +30,13 @@ When a job relaunches, you are directed the Jobs Output screen as the job runs.
|
||||
|
||||
|
||||
.. image:: ../common/images/job-details-view-filters.png
|
||||
|
||||
:alt: Filter options in the Jobs Output window
|
||||
|
||||
- The **Stdout** option is the default display that shows the job processes and output
|
||||
- The **Event** option allows you to filter by the event(s) of interest, such as errors, host failures, host retries, items skipped, etc. You can include as many events in the filter as necessary.
|
||||
|
||||
.. image:: ../common/images/job-details-view-filters-examples.png
|
||||
:alt: Selected filter examples from the Jobs Output window
|
||||
|
||||
- The **Advanced** option is a refined search that allows you a combination of including or excluding criteria, searching by key, or by lookup type. For details about using Search, refer to the :ref:`ug_search` chapter.
|
||||
|
||||
@@ -55,14 +57,19 @@ When an inventory sync is executed, the full results automatically display in th
|
||||
The icons at the top right corner of the Output tab allow you to relaunch (|launch|), download (|download|) the job output, or delete (|delete|) the job.
|
||||
|
||||
.. |launch| image:: ../common/images/launch-button.png
|
||||
:alt: Launch Action Button
|
||||
.. |delete| image:: ../common/images/delete-button.png
|
||||
:alt: Delete Action Button
|
||||
.. |cancel| image:: ../common/images/job-cancel-button.png
|
||||
:alt: Cancel Action Button
|
||||
.. |download| image:: ../common/images/download.png
|
||||
:alt: Download Action Button
|
||||
|
||||
|
||||
|job details example of inventory sync|
|
||||
|
||||
.. |job details example of inventory sync| image:: ../common/images/jobs-show-job-results-for-inv-sync.png
|
||||
:alt: Example output for a successful Inventory Sync job
|
||||
|
||||
|
||||
.. note:: An inventory update can be performed while a related job is running. In cases where you have a big project (around 10 GB), disk space on ``/tmp`` may be an issue.
|
||||
@@ -71,11 +78,12 @@ The icons at the top right corner of the Output tab allow you to relaunch (|laun
|
||||
Inventory sync details
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Access the **Details** tab to provide details about the job execution.
|
||||
Access the **Details** tab to provide details about the job execution.
|
||||
|
||||
.. image:: ../common/images/jobs-show-job-details-for-inv-sync.png
|
||||
:alt: Example details for an Inventory Sync job
|
||||
|
||||
Notable details of the job executed are:
|
||||
Notable details of the job executed are:
|
||||
|
||||
- **Status**: Can be any of the following:
|
||||
|
||||
@@ -109,15 +117,17 @@ SCM Inventory Jobs
|
||||
When an inventory sourced from an SCM is executed, the full results automatically display in the Output tab. This shows the same information you would see if you ran it through the Ansible command line, and can be useful for debugging. The icons at the top right corner of the Output tab allow you to relaunch (|launch|), download (|download|) the job output, or delete (|delete|) the job.
|
||||
|
||||
.. image:: ../common/images/jobs-show-job-results-for-scm-job.png
|
||||
:alt: Example output for a successful SCM job
|
||||
|
||||
SCM inventory details
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Access the **Details** tab to provide details about the job execution and its associated project.
|
||||
Access the **Details** tab to provide details about the job execution and its associated project.
|
||||
|
||||
.. image:: ../common/images/jobs-show-job-details-for-scm-job.png
|
||||
:alt: Example details for an SCM job
|
||||
|
||||
Notable details of the job executed are:
|
||||
Notable details of the job executed are:
|
||||
|
||||
- **Status**: Can be any of the following:
|
||||
|
||||
@@ -157,6 +167,7 @@ Playbook Run Jobs
|
||||
When a playbook is executed, the full results automatically display in the Output tab. This shows the same information you would see if you ran it through the Ansible command line, and can be useful for debugging.
|
||||
|
||||
.. image:: ../common/images/jobs-show-job-results-for-example-job.png
|
||||
:alt: Example output for a successful playbook run
|
||||
|
||||
The events summary captures a tally of events that were run as part of this playbook:
|
||||
|
||||
@@ -169,7 +180,7 @@ The events summary captures a tally of events that were run as part of this play
|
||||
- the amount of time it took to complete the playbook run in the **Elapsed** field
|
||||
|
||||
.. image:: ../common/images/jobs-events-summary.png
|
||||
|
||||
:alt: Example summary details for a playbook
|
||||
|
||||
The icons next to the events summary allow you to relaunch (|launch|), download (|download|) the job output, or delete (|delete|) the job.
|
||||
|
||||
@@ -178,7 +189,7 @@ The host status bar runs across the top of the Output view. Hover over a section
|
||||
|Job - All Host Events|
|
||||
|
||||
.. |Job - All Host Events| image:: ../common/images/job-all-host-events.png
|
||||
|
||||
:alt: Show All Host Events
|
||||
|
||||
The output for a Playbook job is also accessible after launching a job from the **Jobs** tab of its Job Templates page.
|
||||
|
||||
@@ -200,24 +211,27 @@ Use Search to look up specific events, hostnames, and their statuses. To filter
|
||||
These statuses also display at bottom of each Stdout pane, in a group of "stats" called the Host Summary fields.
|
||||
|
||||
.. image:: ../common/images/job-std-out-host-summary-rescued-ignored.png
|
||||
|
||||
:alt: Example summary details in standard output
|
||||
|
||||
The example below shows a search with only unreachable hosts.
|
||||
|
||||
.. image:: ../common/images/job-std-out-filter-failed.png
|
||||
:alt: Example of errored jobs filtered by unreachable hosts
|
||||
|
||||
For more details about using the Search, refer to the :ref:`ug_search` chapter.
|
||||
|
||||
The standard output view displays all the events that occur on a particular job. By default, all rows are expanded so that all the details are displayed. Use the collapse-all button (|collapse-all|) to switch to a view that only contains the headers for plays and tasks. Click the (|expand-all|) button to view all lines of the standard output.
|
||||
|
||||
.. |collapse-all| image:: ../common/images/job-details-view-std-out-collapse-all-icon.png
|
||||
:alt: Collapse All Icon
|
||||
.. |expand-all| image:: ../common/images/job-details-view-std-out-expand-all-icon.png
|
||||
:alt: Expand All Icon
|
||||
|
||||
Alternatively, you can display all the details of a specific play or task by clicking on the arrow icons next to them. Click an arrow from sideways to downward to expand the lines associated with that play or task. Click the arrow back to the sideways position to collapse and hide the lines.
|
||||
|
||||
|
||||
.. image:: ../common/images/job-details-view-std-out-expand-collapse-icons.png
|
||||
|
||||
:alt: Expand and Collapse Icons
|
||||
|
||||
Things to note when viewing details in the expand/collapse mode:
|
||||
|
||||
@@ -250,7 +264,7 @@ The **Host Details** dialog shows information about the host affected by the sel
|
||||
- if applicable, the Ansible **Module** for the task, and any *arguments* for that module
|
||||
|
||||
.. image:: ../common/images/job-details-host-hostevent.png
|
||||
|
||||
:alt: Host Events Details
|
||||
|
||||
To view the results in JSON format, click on the **JSON** tab. To view the output of the task, click the **Standard Out**. To view errors from the output, click **Standard Error**.
|
||||
|
||||
@@ -262,7 +276,7 @@ Playbook run details
|
||||
Access the **Details** tab to provide details about the job execution.
|
||||
|
||||
.. image:: ../common/images/jobs-show-job-details-for-example-job.png
|
||||
|
||||
:alt: Example Job details for a playbook run
|
||||
|
||||
Notable details of the job executed are:
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ To create a Notification Template:
|
||||
2. Click the **Add** button.
|
||||
|
||||
.. image:: ../common/images/notifications-template-add-new.png
|
||||
:alt: Create new notification template
|
||||
|
||||
3. Enter the name of the notification and a description in their respective fields, and specify the organization (required) it belongs to.
|
||||
|
||||
@@ -115,6 +116,7 @@ You must provide the following details to setup an email notification:
|
||||
- Timeout (in seconds): allows you to specify up to 120 seconds, the length of time AWX may attempt connecting to the email server before giving up.
|
||||
|
||||
.. image:: ../common/images/notification-template-email.png
|
||||
:alt: Email notification template
|
||||
|
||||
Grafana
|
||||
------------
|
||||
@@ -136,7 +138,7 @@ The other options of note are:
|
||||
- Disable SSL Verification: SSL verification is on by default, but you can choose to turn off verification the authenticity of the target's certificate. Environments that use internal or private CA's should select this option to disable verification.
|
||||
|
||||
.. image:: ../common/images/notification-template-grafana.png
|
||||
|
||||
:alt: Grafana notification template
|
||||
|
||||
IRC
|
||||
-----
|
||||
@@ -154,6 +156,7 @@ Connectivity information is straightforward:
|
||||
|
||||
|
||||
.. image:: ../common/images/notification-template-irc.png
|
||||
:alt: IRC notification template
|
||||
|
||||
Mattermost
|
||||
------------
|
||||
@@ -167,6 +170,7 @@ The Mattermost notification type provides a simple interface to Mattermost's mes
|
||||
- Disable SSL Verification: Turns off verification of the authenticity of the target's certificate. Environments that use internal or private CA's should select this option to disable verification.
|
||||
|
||||
.. image:: ../common/images/notification-template-mattermost.png
|
||||
:alt: Mattermost notification template
|
||||
|
||||
|
||||
PagerDuty
|
||||
@@ -182,6 +186,8 @@ PagerDuty is a fairly straightforward integration. First, create an API Key in t
|
||||
- Client Identifier: This will be sent along with the alert content to the pagerduty service to help identify the service that is using the api key/service. This is helpful if multiple integrations are using the same API key and service.
|
||||
|
||||
.. image:: ../common/images/notification-template-pagerduty.png
|
||||
:alt: PagerDuty notification template
|
||||
|
||||
|
||||
Rocket.Chat
|
||||
-------------
|
||||
@@ -194,6 +200,7 @@ The Rocket.Chat notification type provides an interface to Rocket.Chat's collabo
|
||||
- Disable SSL Verification: Turns off verification of the authenticity of the target's certificate. Environments that use internal or private CA's should select this option to disable verification.
|
||||
|
||||
.. image:: ../common/images/notification-template-rocketchat.png
|
||||
:alt: Rocket.Chat notification template
|
||||
|
||||
|
||||
Slack
|
||||
@@ -212,6 +219,7 @@ Once you have a bot/app set up, you must navigate to "Your Apps", click on the n
|
||||
You must also invite the notification bot to join the channel(s) in question in Slack. Note that private messages are not supported.
|
||||
|
||||
.. image:: ../common/images/notification-template-slack.png
|
||||
:alt: Slack notification template
|
||||
|
||||
|
||||
Twilio
|
||||
@@ -231,6 +239,8 @@ To setup Twilio, provide the following details:
|
||||
- Account SID
|
||||
|
||||
.. image:: ../common/images/notification-template-twilio.png
|
||||
:alt: Twilio notification template
|
||||
|
||||
|
||||
|
||||
Webhook
|
||||
@@ -257,6 +267,8 @@ The parameters for configuring webhooks are:
|
||||
|
||||
|
||||
.. image:: ../common/images/notification-template-webhook.png
|
||||
:alt: Webhook notification template
|
||||
|
||||
|
||||
|
||||
Webhook payloads
|
||||
@@ -333,6 +345,8 @@ Create custom notifications
|
||||
You can :ref:`customize the text content <ir_notifications_reference>` of each of the :ref:`ug_notifications_types` by enabling the **Customize Messages** portion at the bottom of the notifications form using the toggle button.
|
||||
|
||||
.. image:: ../common/images/notification-template-customize.png
|
||||
:alt: Custom notification template
|
||||
|
||||
|
||||
You can provide a custom message for various job events:
|
||||
|
||||
@@ -347,10 +361,12 @@ You can provide a custom message for various job events:
|
||||
The message forms vary depending on the type of notification you are configuring. For example, messages for email and PagerDuty notifications have the appearance of a typical email form with a subject and body, in which case, AWX displays the fields as **Message** and **Message Body**. Other notification types only expect a **Message** for each type of event:
|
||||
|
||||
.. image:: ../common/images/notification-template-customize-simple.png
|
||||
:alt: Custom notification template example
|
||||
|
||||
The **Message** fields are pre-populated with a template containing a top-level variable, ``job`` coupled with an attribute, such as ``id`` or ``name``, for example. Templates are enclosed in curly braces and may draw from a fixed set of fields provided by AWX, as shown in the pre-populated **Messages** fields.
|
||||
|
||||
.. image:: ../common/images/notification-template-customize-simple-syntax.png
|
||||
:alt: Custom notification template example syntax
|
||||
|
||||
This pre-populated field suggests commonly displayed messages to a recipient who is notified of an event. You can, however, customize these messages with different criteria by adding your own attribute(s) for the job as needed. Custom notification messages are rendered using Jinja - the same templating engine used by Ansible playbooks.
|
||||
|
||||
@@ -474,8 +490,8 @@ If you create a notification template that uses invalid syntax or references unu
|
||||
|
||||
If you save the notifications template without editing the custom message (or edit and revert back to the default values), the **Details** screen assumes the defaults and will not display the custom message tables. If you edit and save any of the values, the entire table displays in the **Details** screen.
|
||||
|
||||
.. image:: ../common/images/notifications-with-without-messages.png
|
||||
|
||||
.. image:: ../common/images/notifications-with-without-messages.png
|
||||
:alt: Notification template with and without a custom message
|
||||
|
||||
.. _ug_notifications_on_off:
|
||||
|
||||
@@ -498,11 +514,12 @@ You can enable notifications on job start, job success, and job failure, or any
|
||||
- Organizations
|
||||
|
||||
.. image:: ../common/images/projects-notifications-example-list.png
|
||||
|
||||
:alt: List of project notifications
|
||||
|
||||
For workflow templates that have approval nodes, in addition to *Start*, *Success*, and *Failure*, you can enable or disable certain approval-related events:
|
||||
|
||||
.. image:: ../common/images/wf-template-completed-notifications-view.png
|
||||
:alt: List of project notifications with approval nodes option
|
||||
|
||||
Refer to :ref:`ug_wf_approval_nodes` for additional detail on working with these types of nodes.
|
||||
|
||||
@@ -516,6 +533,7 @@ Configure the ``host`` hostname for notifications
|
||||
In the :ref:`System Settings <configure_awx_system>`, you can replace the default value in the **Base URL of the service** field with your preferred hostname to change the notification hostname.
|
||||
|
||||
.. image:: ../common/images/configure-awx-system-misc-baseurl.png
|
||||
:alt: Configuring base URL with preferred hostname
|
||||
|
||||
Refreshing your license also changes the notification hostname. New installations of AWX should not have to set the hostname for notifications.
|
||||
|
||||
@@ -529,7 +547,7 @@ Reset the ``AWX_URL_BASE``
|
||||
|
||||
The primary way that AWX determines how the base URL (``AWX_URL_BASE``) is defined is by looking at an incoming request and setting the server address based on that incoming request.
|
||||
|
||||
AWX takes settings values from the database first. If no settings values are found, it falls back to using the values from the settings files. If a user posts a license by navigating to the AWX host's IP adddress, the posted license is written to the settings entry in the database.
|
||||
AWX takes settings values from the database first. If no settings values are found, it falls back to using the values from the settings files. If a user posts a license by navigating to the AWX host's IP address, the posted license is written to the settings entry in the database.
|
||||
|
||||
To change the ``AWX_URL_BASE`` if the wrong address has been picked up, navigate to **Miscellaneous System settings** from the Settings menu using the DNS entry you wish to appear in notifications, and re-add your license.
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ An :term:`Organization` is a logical collection of **Users**, **Teams**, **Proje
|
||||
|awx hierarchy|
|
||||
|
||||
.. |awx hierarchy| image:: ../common/images/AWXHierarchy.png
|
||||
:alt: AWX Hierarchy
|
||||
|
||||
Access the Organizations page by clicking **Organizations** from the left navigation bar. The Organizations page displays all of the existing organizations for your installation. Organizations can be searched by **Name** or **Description**. Modify and remove organizations using the **Edit** and **Delete** buttons.
|
||||
|
||||
@@ -20,10 +21,12 @@ Access the Organizations page by clicking **Organizations** from the left naviga
|
||||
|Organizations - home showing example organization|
|
||||
|
||||
.. |Organizations - home showing example organization| image:: ../common/images/organizations-home-showing-example-organization.png
|
||||
:alt: Example of organizations home page
|
||||
|
||||
From this list view, you can edit the details of an organization (|edit button|) from the **Actions** menu.
|
||||
|
||||
.. |edit button| image:: ../common/images/edit-button.png
|
||||
:alt: Edit button
|
||||
|
||||
.. _ug_organizations_create:
|
||||
|
||||
@@ -35,6 +38,7 @@ Creating a New Organization
|
||||
|Organizations - new organization form|
|
||||
|
||||
.. |Organizations - new organization form| image:: ../common/images/organizations-new-organization-form.png
|
||||
:alt: Create new organization form
|
||||
|
||||
2. An organization has several attributes that may be configured:
|
||||
|
||||
@@ -51,7 +55,7 @@ Once created, AWX displays the Organization details, and allows for the managing
|
||||
|Organizations - show record for example organization|
|
||||
|
||||
.. |Organizations - show record for example organization| image:: ../common/images/organizations-show-record-for-example-organization.png
|
||||
|
||||
:alt: Organization details tab with edit, delete options
|
||||
|
||||
From the **Details** tab, you can edit or delete the organization.
|
||||
|
||||
@@ -73,6 +77,7 @@ Clicking on **Access** (beside **Details** when viewing your organization), disp
|
||||
|Organizations - show users for example organization|
|
||||
|
||||
.. |Organizations - show users for example organization| image:: ../common/images/organizations-show-users-permissions-organization.png
|
||||
:alt: Organization Access tab with user permissions
|
||||
|
||||
As you can manage the user membership for this Organization here, you can manage user membership on a per-user basis from the Users page by clicking **Users** from the left navigation bar. Organizations have a unique set of roles not described here. You can assign specific users certain levels of permissions within your organization, or allow them to act as an admin for a particular resource. Refer to :ref:`rbac-ug` for more information.
|
||||
|
||||
@@ -102,12 +107,14 @@ Work with Notifications
|
||||
Clicking the **Notifications** tab allows you to review any notification integrations you have setup.
|
||||
|
||||
.. image:: ../common/images/organizations-notifications-samples-list.png
|
||||
:alt: List of sample organization notifications
|
||||
|
||||
Use the toggles to enable or disable the notifications to use with your particular organization. For more detail, see :ref:`ug_notifications_on_off`.
|
||||
|
||||
If no notifications have been set up, you must create them from the **Notifications** option on the left navigation bar.
|
||||
|
||||
.. image:: ../common/images/organization-notifications-empty.png
|
||||
:alt: Empty organization notifications list
|
||||
|
||||
Refer to :ref:`ug_notifications_types` for additional details on configuring various notification types.
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ Assuming that the repository has already been configured for signing and verific
|
||||
4. When the user syncs the project, AWX (already configured, in this scenario) pulls in the new changes, checks that the public key associated with the project in AWX matches the private key that the checksum manifest was signed with (this prevents tampering with the checksum manifest itself), then re-calculates checksums of each file in the manifest to ensure that the checksum matches (and thus that no file has changed). It also looks to ensure that all files are accounted for: They must have been either included in, or excluded from, the ``MANIFEST.in`` file discussed below; if files have been added or removed unexpectedly, verification will fail.
|
||||
|
||||
.. image:: ../common/images/content-sign-diagram.png
|
||||
:alt: Content signing process diagram
|
||||
|
||||
|
||||
Prerequisites
|
||||
@@ -33,7 +34,7 @@ Prerequisites
|
||||
|
||||
.. _`How to create GPG keypairs`: https://www.redhat.com/sysadmin/creating-gpg-keypairs
|
||||
|
||||
Vist the `GnuPG documentation <https://www.gnupg.org/documentation/index.html>`_ for more information regarding GPG keys.
|
||||
Visit the `GnuPG documentation <https://www.gnupg.org/documentation/index.html>`_ for more information regarding GPG keys.
|
||||
|
||||
You can verify that you have a valid GPG keypair and in your default GnuPG keyring, with the following command:
|
||||
|
||||
@@ -68,16 +69,19 @@ In order to use the GPG key for content singing and validation in AWX, you must
|
||||
5. Click **Save** when done.
|
||||
|
||||
.. image:: ../common/images/credentials-gpg-details.png
|
||||
:alt: Example GPG credential details
|
||||
|
||||
This credential can now be selected in :ref:`projects <ug_projects_add>`, and content verification will automatically take place on future project syncs.
|
||||
|
||||
.. image:: ../common/images/project-create-with-gpg-creds.png
|
||||
:alt: Create project with example GPG credentials
|
||||
|
||||
.. note::
|
||||
|
||||
Use the project cache SCM timeout to control how often you want AWX to re-validate the signed content. When a project is configured to update on launch (of any job template configured to use that project), you can enable the cache timeout setting, which tells it to update after N seconds have passed since the last update. If validation is running too frequently, you can slow down how often project updates occur by specifying the time in the **Cache Timeout** field of the Option Details pane of the project.
|
||||
|
||||
.. image:: ../common/images/project-update-launch-cache-timeout.png
|
||||
:alt: Checked Update Revision on Launch option with Cache Timeout value specified from the Create new project page
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -19,18 +19,24 @@ The Projects page displays the list of the projects that are currently available
|
||||
|Projects - home with example project|
|
||||
|
||||
.. |Projects - home with example project| image:: ../common/images/projects-list-all.png
|
||||
:alt: Compact Projects list view with two projects shown.
|
||||
|
||||
.. image:: ../common/images/projects-list-all-expanded.png
|
||||
:alt: Projects list view showing arrows used to expand and collapse projects in the view.
|
||||
|
||||
For each project listed, you can get the latest SCM revision (|refresh|), edit the project (|edit|), or copy the project attributes (|copy|), using the respective icons next to each project. Projects are allowed to be updated while a related job is running. In cases where you have a big project (around 10 GB), disk space on ``/tmp`` may be an issue.
|
||||
|
||||
.. |edit-icon| image:: ../common/images/edit-button.png
|
||||
:alt: edit button
|
||||
|
||||
.. |copy| image:: ../common/images/copy-button.png
|
||||
:alt: copy button
|
||||
|
||||
.. |refresh| image:: ../common/images/refresh-gray.png
|
||||
:alt: Refresh button
|
||||
|
||||
.. |edit| image:: ../common/images/edit-button.png
|
||||
:alt: edit button
|
||||
|
||||
|
||||
**Status** indicates the state of the project and may be one of the following (note that you can also filter your view by specific status types):
|
||||
@@ -72,6 +78,7 @@ To create a new project:
|
||||
|Projects - create new project|
|
||||
|
||||
.. |Projects - create new project| image:: ../common/images/projects-create-new-project.png
|
||||
:alt: Create New Project form
|
||||
|
||||
2. Enter the appropriate details into the following required fields:
|
||||
|
||||
@@ -119,6 +126,7 @@ If you have trouble adding a project path, check the permissions and SELinux con
|
||||
Correct this issue by creating the appropriate playbook directories and checking out playbooks from your SCM or otherwise copying playbooks into the appropriate playbook directories.
|
||||
|
||||
.. |Projects - create new warning| image:: ../common/images/projects-create-manual-warning.png
|
||||
:alt: Create New Project form showing warning associated with selecting Source Control Credential Type of Manual
|
||||
|
||||
|
||||
.. _ug_projects_scm_types:
|
||||
@@ -147,12 +155,14 @@ To configure playbooks to use source control, in the Project **Details** tab:
|
||||
|Projects - create SCM project|
|
||||
|
||||
.. |Projects - create SCM project| image:: ../common/images/projects-create-scm-project.png
|
||||
:alt: Create New Project form for Git Source Control Credential Type.
|
||||
|
||||
2. Enter the appropriate details into the following fields:
|
||||
|
||||
- **SCM URL** - See an example in the tooltip |tooltip|.
|
||||
|
||||
.. |tooltip| image:: ../common/images/tooltips-icon.png
|
||||
:alt: tooltips icon
|
||||
|
||||
- **SCM Branch/Tag/Commit** - Optionally enter the SCM branch, tags, commit hashes, arbitrary refs, or revision number (if applicable) from the source control (Git or Subversion) to checkout. Some commit hashes and refs may not be available unless you also provide a custom refspec in the next field. If left blank, the default is HEAD which is the last checked out Branch/Tag/Commit for this project.
|
||||
- **SCM Refspec** - This field is an option specific to git source control and only advanced users familiar and comfortable with git should specify which references to download from the remote repository. For more detail, see :ref:`job branch overriding <ug_job_branching>`.
|
||||
@@ -168,6 +178,7 @@ To configure playbooks to use source control, in the Project **Details** tab:
|
||||
- **Allow Branch Override** - Allows a job template or an inventory source that uses this project to launch with a specified SCM branch or revision other than that of the project's. For more detail, see :ref:`job branch overriding <ug_job_branching>`.
|
||||
|
||||
.. image:: ../common/images/projects-create-scm-project-branch-override-checked.png
|
||||
:alt: create scm project branch override checked
|
||||
|
||||
4. Click **Save** to save your project.
|
||||
|
||||
@@ -198,6 +209,7 @@ To configure playbooks to use Red Hat Insights, in the Project **Details** tab:
|
||||
- **Update Revision on Launch** - Updates the revision of the project to the current revision in the remote source control, as well as cache the roles directory from :ref:`Galaxy <ug_galaxy>` or :ref:`Collections <ug_collections>`. AWX ensures that the local revision matches and that the roles and collections are up-to-date with the last update. Also, to avoid job overflows if jobs are spawned faster than the project can sync, selecting this allows you to configure a Cache Timeout to cache prior project syncs for a certain number of seconds.
|
||||
|
||||
.. image:: ../common/images/projects-create-scm-insights.png
|
||||
:alt: Create New Project form for Red Hat Insights Source Control Credential Type.
|
||||
|
||||
|
||||
3. Click **Save** to save your project.
|
||||
@@ -230,6 +242,7 @@ To configure playbooks to use a remote archive, in the Project **Details** tab:
|
||||
- **Allow Branch Override** - Not recommended, as this option allows a job template that uses this project to launch with a specified SCM branch or revision other than that of the project's.
|
||||
|
||||
.. image:: ../common/images/projects-create-scm-rm-archive.png
|
||||
:alt: Create New Project form for Remote Archive Source Control Credential Type.
|
||||
|
||||
.. note::
|
||||
Since this SCM type is intended to support the concept of unchanging artifacts, it is advisable to disable Galaxy integration (for roles, at minimum).
|
||||
@@ -253,15 +266,18 @@ Updating projects from source control
|
||||
|projects - list all|
|
||||
|
||||
.. |projects - list all| image:: ../common/images/projects-list-all.png
|
||||
:alt: Projects list view with the latest revision information of the projects that synched.
|
||||
|
||||
2. Click on project's status under the **Status** column to get further details about the update process.
|
||||
|
||||
.. image:: ../common/images/projects-list-status-more.png
|
||||
:alt: Projects list view with example project with a successful status.
|
||||
|
||||
|
||||
|Project - update status|
|
||||
|
||||
.. |Project - update status| image:: ../common/images/projects-update-status.png
|
||||
:alt: Example project with real-time standard output details.
|
||||
|
||||
|
||||
Work with Permissions
|
||||
@@ -276,6 +292,7 @@ You can access the project permissions via the **Access** tab next to the **Deta
|
||||
|Projects - permissions list for example project|
|
||||
|
||||
.. |Projects - permissions list for example project| image:: ../common/images/projects-permissions-example.png
|
||||
:alt: Access tab of a sample project that shows list of users who have permissions to this project.
|
||||
|
||||
|
||||
Add Permissions
|
||||
@@ -290,12 +307,14 @@ Work with Notifications
|
||||
Clicking the **Notifications** tab allows you to review any notification integrations you have setup.
|
||||
|
||||
.. image:: ../common/images/projects-notifications-example-list.png
|
||||
:alt: List of notifications configured for this project.
|
||||
|
||||
Use the toggles to enable or disable the notifications to use with your particular project. For more detail, see :ref:`ug_notifications_on_off`.
|
||||
|
||||
If no notifications have been set up, you can configure them from the **Notifications** link from the left navigation bar to create a new notification.
|
||||
|
||||
.. image:: ../common/images/project-notifications-empty.png
|
||||
:alt: Notifications Templates page with no notification templates found.
|
||||
|
||||
Refer to :ref:`ug_notifications_types` for additional details on configuring various notification types.
|
||||
|
||||
@@ -306,14 +325,17 @@ Work with Job Templates
|
||||
Clicking on **Job Templates** allows you to add and review any job templates or workflow templates associated with this project.
|
||||
|
||||
.. image:: ../common/images/projects-templates-example-list.png
|
||||
:alt: List of job templates associated with this project.
|
||||
|
||||
Click on the recent jobs that ran using that template to see its details and other useful information. You can sort this list by various criteria, and perform a search to filter the templates of interest.
|
||||
|
||||
.. image:: ../common/images/projects-templates-search-dropdown.png
|
||||
:alt: Job Templates tab of the project showing an example drop-down menu that can be used to filter your search.
|
||||
|
||||
From this view, you can also launch (|launch|), edit (|edit|), or copy (|copy|) the template configuration.
|
||||
|
||||
.. |launch| image:: ../common/images/launch-button.png
|
||||
:alt: launch button
|
||||
|
||||
|
||||
Work with Schedules
|
||||
@@ -326,6 +348,7 @@ Work with Schedules
|
||||
Clicking on **Schedules** allows you to review any schedules set up for this project.
|
||||
|
||||
.. image:: ../common/images/generic-schedules-list-configured.png
|
||||
:alt: List of configured schedules that may be used with this project.
|
||||
|
||||
|
||||
Schedule a Project
|
||||
@@ -358,12 +381,14 @@ At the end of a Project update, AWX searches for a file called ``requirements.ym
|
||||
This file allows you to reference Galaxy roles or roles within other repositories which can be checked out in conjunction with your own project. The addition of this Ansible Galaxy support eliminates the need to create git submodules for achieving this result. Given that SCM projects (along with roles/collections) are pulled into and executed from a private job environment, a <private job directory> specific to the project within ``/tmp`` is created by default. However, you can specify another **Job Execution Path** based on your environment in the Jobs Settings tab of the Settings window:
|
||||
|
||||
.. image:: ../common/images/configure-awx-jobs-execution-path.png
|
||||
:alt: Job Settings page showing where to configure the Job execution path.
|
||||
|
||||
The cache directory is a subdirectory inside the global projects folder. The content may be copied from the cache location to ``<job private directory>/requirements_roles`` location.
|
||||
|
||||
By default, AWX has a system-wide setting that allows roles to be dynamically downloaded from the ``roles/requirements.yml`` file for SCM projects. You may turn off this setting in the **Jobs settings** screen of the Settings menu by switching the **Enable Role Download** toggle button to **OFF**.
|
||||
|
||||
.. image:: ../common/images/configure-awx-jobs-download-roles.png
|
||||
:alt: Job Settings page showing the option to Enable Role Download.
|
||||
|
||||
|
||||
Whenever a project sync runs, AWX determines if the project source and any roles from Galaxy and/or Collections are out of date with the project. Project updates will download the roles inside the update.
|
||||
@@ -377,6 +402,7 @@ In short, jobs would download the most recent roles before every job run. Roles
|
||||
|update-on-launch|
|
||||
|
||||
.. |update-on-launch| image:: ../common/images/projects-scm-update-options-update-on-launch-checked.png
|
||||
:alt: SCM update options Update Revision on Launch checked.
|
||||
|
||||
.. end reused section
|
||||
|
||||
@@ -405,6 +431,7 @@ In the User Interface, you can configure these settings in the Jobs settings win
|
||||
|
||||
|
||||
.. image:: ../common/images/configure-awx-jobs-path-to-expose.png
|
||||
:alt: Job Settings page showing example paths to expose to isolated jobs.
|
||||
|
||||
|
||||
.. _ug_collections:
|
||||
@@ -421,6 +448,7 @@ AWX supports project-specific `Ansible collections <https://docs.ansible.com/ans
|
||||
By default, AWX has a system-wide setting that allows collections to be dynamically downloaded from the ``collections/requirements.yml`` file for SCM projects. You may turn off this setting in the **Jobs settings** tab of the Settings menu by switching the **Enable Collections Download** toggle button to **OFF**.
|
||||
|
||||
.. image:: ../common/images/configure-awx-jobs-download-collections.png
|
||||
:alt: Job Settings page showing where to enable collection(s) download.
|
||||
|
||||
Roles and collections are locally cached for performance reasons, and you will need to select **Update Revision on Launch** in the project SCM Update Options to ensure this:
|
||||
|
||||
@@ -439,6 +467,7 @@ Before AWX can use |ah| as the default source for collections content, you need
|
||||
2. Click the copy icon to copy the API token to the clipboard.
|
||||
|
||||
.. image:: ../common/images/projects-ah-loaded-token-shown.png
|
||||
:alt: Connect to Hub page showing where to copy the offline token.
|
||||
|
||||
3. To use the public |ah|, create an |ah| credential using the copied token and pointing to the URLs shown in the **Server URL** and **SSO URL** fields of the token page:
|
||||
|
||||
@@ -449,15 +478,19 @@ Before AWX can use |ah| as the default source for collections content, you need
|
||||
4. To use a private |ah|, create an |ah| credential using a token retrieved from the Repo Management dashboard of your local |ah| and pointing to the published repo URL as shown:
|
||||
|
||||
.. image:: ../common/images/projects-ah-repo-mgmt-get-token.png
|
||||
:alt: The Repo Management dashboard of your local Automation Hub.
|
||||
.. image:: ../common/images/projects-ah-repo-mgmt-repos-published.png
|
||||
:alt: The Get token button next to the published repo URL in the Repo Management dashboard of your local Automation Hub.
|
||||
|
||||
You can create different repos with different namespaces/collections in them. But for each repo in |ah| you need to create a different |ah| credential. Copy the **Ansible CLI URL** from the |ah| UI in the format of ``https://$<hub_url>/api/galaxy/content/<repo you want to pull from>`` into the **Galaxy Server URL** field of the *Create Credential* form:
|
||||
|
||||
.. image:: ../common/images/projects-create-ah-credential.png
|
||||
:alt: Create New Credential form for Ansible Galaxy/Automation Hub API Token Credential Type.
|
||||
|
||||
5. Navigate to the organization for which you want to be able to sync content from |ah| and add the new |ah| credential to the organization. This step allows you to associate each organization with the |ah| credential (i.e. repo) that you want to be able to use content from.
|
||||
|
||||
.. image:: ../common/images/projects-organizations-add-ah-credential.png
|
||||
:alt: Edit example default organizations form with Ansible Galaxy and Automation Hub credentials.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -472,14 +505,17 @@ You can create different repos with different namespaces/collections in them. Bu
|
||||
6. If the |ah| has self-signed certificates, click the toggle to enable the setting **Ignore Ansible Galaxy SSL Certificate Verification**. For **public Automation Hub**, which uses a signed certificate, click the toggle to disable it instead. Note this is a global setting:
|
||||
|
||||
.. image:: ../common/images/settings-jobs-ignore-galaxy-certs.png
|
||||
:alt: Job Settings page showing where to enable the option to ignore Ansible Galaxy SSL Certificate Verification.
|
||||
|
||||
7. Create a project, where the source repository specifies the necessary collections in a requirements file located in the ``collections/requirements.yml`` file. Refer to the syntax described in the corresponding `Ansible documentation <https://docs.ansible.com/ansible/latest/user_guide/collections_using.html#install-multiple-collections-with-a-requirements-file>`_.
|
||||
|
||||
.. image:: ../common/images/projects-add-ah-source-repo.png
|
||||
:alt: The URL for the Source Control URL in the Type Details section of the Create New Project form.
|
||||
|
||||
8. In the Projects list view, click |update| to run an update against this project. AWX fetches the Galaxy collections from the ``collections/requirements.yml`` file and report it as changed; and the collections will now be installed for any job template using this project.
|
||||
|
||||
.. |update| image:: ../common/images/refresh-gray.png
|
||||
:alt: Refresh button.
|
||||
|
||||
.. note::
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
.. _ug_security:
|
||||
|
||||
Security
|
||||
@@ -197,7 +198,7 @@ The following table lists the RBAC system roles and a brief description of the h
|
||||
+-----------------------------------------------------------------------+------------------------------------------------------------------------------------------+
|
||||
| Admin Role - Organizations, Teams, Inventory, Projects, Job Templates | Manages all aspects of a defined Organization, Team, Inventory, Project, or Job Template |
|
||||
+-----------------------------------------------------------------------+------------------------------------------------------------------------------------------+
|
||||
| Auditor Role - All | Views all aspects of a defined Organization, Project, Inventory, or Job Template |
|
||||
| Auditor Role - All | Views all aspects of a defined Organization, Team, Inventory, Project, or Job Template |
|
||||
+-----------------------------------------------------------------------+------------------------------------------------------------------------------------------+
|
||||
| Execute Role - Job Templates | Runs assigned Job Template |
|
||||
+-----------------------------------------------------------------------+------------------------------------------------------------------------------------------+
|
||||
|
||||
@@ -14,10 +14,12 @@ Access the Teams page by clicking **Teams** from the left navigation bar. The te
|
||||
|
||||
|
||||
.. image:: ../common/images/organizations-teams-list.png
|
||||
:alt: Teams page containing a list of teams and the organizations they belong to.
|
||||
|
||||
Clicking the Edit (|edit-button|) button next to the list of **Teams** allows you to edit details about the team. You can also review **Users** and **Permissions** associated with this Team.
|
||||
|
||||
.. |edit-button| image:: ../common/images/edit-button.png
|
||||
:alt: Edit Button
|
||||
|
||||
|
||||
.. _ug_team_create:
|
||||
@@ -33,6 +35,7 @@ To create a new Team:
|
||||
|Teams - create new team|
|
||||
|
||||
.. |Teams - create new team| image:: ../common/images/teams-create-new-team.png
|
||||
:alt: Create New Team Form
|
||||
|
||||
2. Enter the appropriate details into the following fields:
|
||||
|
||||
@@ -47,6 +50,7 @@ Once the Team is successfully created, AWX opens the **Details** dialog, which a
|
||||
|Teams - example team successfully created|
|
||||
|
||||
.. |Teams - example team successfully created| image:: ../common/images/teams-example-team-successfully-created.png
|
||||
:alt: Example Team Successfully Created
|
||||
|
||||
|
||||
Team Access
|
||||
@@ -60,6 +64,7 @@ This tab displays the list of Users that are members of this Team. This list may
|
||||
|Teams - users list|
|
||||
|
||||
.. |Teams - users list| image:: ../common/images/teams-users-list.png
|
||||
:alt: Teams list showing the Access tab displaying a list of users and their roles.
|
||||
|
||||
|
||||
.. _ug_teams_permissions:
|
||||
@@ -78,10 +83,12 @@ In order to add a user to a team, the user must already be created. Refer to :re
|
||||
To remove roles for a particular user, click the disassociate (x) button next to its resource.
|
||||
|
||||
.. image:: ../common/images/permissions-disassociate.png
|
||||
:alt: Access tab with list of users and an arrow pointing to the disassociate button next to a user's role.
|
||||
|
||||
This launches a confirmation dialog, asking you to confirm the disassociation.
|
||||
|
||||
.. image:: ../common/images/permissions-disassociate-confirm.png
|
||||
:alt: Disassociation Confirmation
|
||||
|
||||
|
||||
Team Roles
|
||||
@@ -97,6 +104,7 @@ Selecting the **Roles** view displays a list of the permissions that are current
|
||||
|Teams - permissions list|
|
||||
|
||||
.. |Teams - permissions list| image:: ../common/images/teams-permissions-sample-roles.png
|
||||
:alt: Permissions list with resource names, type and their associated roles.
|
||||
|
||||
The set of privileges assigned to Teams that provide the ability to read, modify, and administer projects, inventories, and other AWX elements are permissions. By default, the Team is given the "read" permission (also called a role).
|
||||
|
||||
@@ -111,33 +119,28 @@ To add permissions to a Team:
|
||||
1. Click the **Add** button, which opens the Add Permissions Wizard.
|
||||
|
||||
.. image:: ../common/images/teams-users-add-permissions-form.png
|
||||
:alt: Add Permissions Form
|
||||
:alt: Add Teams Permissions Wizard step 1, choose the resource type.
|
||||
|
||||
2. Click to select the object for which the team will have access and click **Next**.
|
||||
|
||||
3. Click to select the resource to assign team roles and click **Next**.
|
||||
|
||||
.. image:: ../common/images/teams-permissions-templates-select.png
|
||||
:alt: Add Teams Permissions Wizard step 2, choose the resources from the list, Demo Job template selected.
|
||||
|
||||
4. Click the checkbox beside the role to assign that role to your chosen type of resource. Different resources have different options available.
|
||||
|
||||
.. image:: ../common/images/teams-permissions-template-roles.png
|
||||
:alt: Add Teams Permissions Wizard step 3, choose the roles to apply to the previously selected resource.
|
||||
|
||||
|
||||
5. Click **Save** when done, and the Add Permissions Wizard closes to display the updated profile for the user with the roles assigned for each selected resource.
|
||||
5. Click **Save** when done, and the Add Permissions Wizard closes to display the updated profile for the team with the roles assigned for each selected resource.
|
||||
|
||||
.. image:: ../common/images/teams-permissions-sample-roles.png
|
||||
|
||||
To remove Permissions for a particular resource, click the disassociate (x) button next to its resource. This launches a confirmation dialog, asking you to confirm the disassociation.
|
||||
:alt: Updated profile for each team's resources and their roles.
|
||||
|
||||
To remove Permissions for a particular resource, click the disassociate (x) button next to its resource. This launches a confirmation dialog, asking you to confirm the disassociation.
|
||||
|
||||
.. note::
|
||||
|
||||
You can also add teams, individual, or multiple users and assign them permissions at the object level (projects, inventories, job templates, and workflow templates) as well. This feature reduces the time for an organization to onboard many users at one time.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
.. _ug_users:
|
||||
.. _ug_users:
|
||||
|
||||
Users
|
||||
-----
|
||||
|
||||
.. index::
|
||||
single: users
|
||||
|
||||
|
||||
A :term:`User` is someone who has access to AWX with associated permissions and credentials. Access the Users page by clicking **Users** from the left navigation bar. The User list may be sorted and searched by **Username**, **First Name**, or **Last Name** and click the headers to toggle your sorting preference.
|
||||
|
||||
@@ -14,7 +13,6 @@ A :term:`User` is someone who has access to AWX with associated permissions and
|
||||
|
||||
You can easily view permissions and user type information by looking beside their user name in the User overview screen.
|
||||
|
||||
|
||||
.. _ug_users_create:
|
||||
|
||||
Create a User
|
||||
@@ -50,6 +48,7 @@ Three types of Users can be assigned:
|
||||
Once the user is successfully created, the **User** dialog opens for that newly created User.
|
||||
|
||||
.. |edit-button| image:: ../common/images/edit-button.png
|
||||
:alt: Edit button
|
||||
|
||||
.. image:: ../common/images/users-edit-user-form.png
|
||||
:alt: Edit User Form
|
||||
@@ -63,10 +62,12 @@ The same window opens whether you click on the user's name, or the Edit (|edit-b
|
||||
If the user is not a newly-created user, the user's details screen displays the last login activity of that user.
|
||||
|
||||
.. image:: ../common/images/users-last-login-info.png
|
||||
:alt: User details with last login information
|
||||
|
||||
When you log in as yourself, and view the details of your own user profile, you can manage tokens from your user profile. See :ref:`ug_users_tokens` for more detail.
|
||||
|
||||
.. image:: ../common/images/user-with-token-button.png
|
||||
:alt: User details with Tokens tab highlighted
|
||||
|
||||
.. _ug_users_delete:
|
||||
|
||||
@@ -80,10 +81,10 @@ Before you can delete a user, you must have user permissions. When you delete a
|
||||
2. Select the check box(es) for the user(s) that you want to remove and click **Delete**.
|
||||
|
||||
.. image:: ../common/images/users-home-users-checked-delete.png
|
||||
:alt: Users list view with two users checked
|
||||
|
||||
3. Click **Delete** in the confirmation warning message to permanently delete the user.
|
||||
|
||||
|
||||
Users - Organizations
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@@ -96,6 +97,7 @@ Organization membership cannot be modified from this display panel.
|
||||
|Users - Organizations list for example user|
|
||||
|
||||
.. |Users - Organizations list for example user| image:: ../common/images/users-organizations-list-for-example-user.png
|
||||
:alt: Users - Organizations list for example user
|
||||
|
||||
Users - Teams
|
||||
~~~~~~~~~~~~~
|
||||
@@ -110,7 +112,7 @@ Until a Team has been created and the user has been assigned to that team, the a
|
||||
|Users - teams list for example user|
|
||||
|
||||
.. |Users - teams list for example user| image:: ../common/images/users-teams-list-for-example-user.png
|
||||
|
||||
:alt: Users - teams list for example user - empty
|
||||
|
||||
.. _ug_users_roles:
|
||||
|
||||
@@ -121,7 +123,6 @@ Users - Roles
|
||||
pair: users; permissions
|
||||
pair: users; roles
|
||||
|
||||
|
||||
The set of permissions assigned to this user (role-based access controls) that provide the ability to read, modify, and administer projects, inventories, job templates, and other AWX elements are Roles.
|
||||
|
||||
.. note::
|
||||
@@ -133,6 +134,7 @@ This screen displays a list of the roles that are currently assigned to the sele
|
||||
|Users - permissions list for example user|
|
||||
|
||||
.. |Users - permissions list for example user| image:: ../common/images/users-permissions-list-for-example-user.png
|
||||
:alt: Users - permissions list for example user
|
||||
|
||||
.. _ug_users_permissions:
|
||||
|
||||
@@ -144,31 +146,31 @@ To add permissions to a particular user:
|
||||
1. Click the **Add** button, which opens the Add Permissions Wizard.
|
||||
|
||||
.. image:: ../common/images/users-add-permissions-form.png
|
||||
:alt: Add Permissions Form
|
||||
:alt: Add User Permissions Form, first step, Add resource type
|
||||
|
||||
2. Click to select the object for which the user will have access and click **Next**.
|
||||
|
||||
3. Click to select the resource to assign team roles and click **Next**.
|
||||
|
||||
.. image:: ../common/images/users-permissions-IG-select.png
|
||||
:alt: Add User Permissions Form, second step, Select items from list - instance group checked
|
||||
|
||||
4. Click the checkbox beside the role to assign that role to your chosen type of resource. Different resources have different options available.
|
||||
|
||||
.. image:: ../common/images/users-permissions-IG-roles.png
|
||||
|
||||
:alt: Add User Permissions Form, final step, Select roles to apply - "Use" role checked
|
||||
|
||||
5. Click **Save** when done, and the Add Permissions Wizard closes to display the updated profile for the user with the roles assigned for each selected resource.
|
||||
|
||||
.. image:: ../common/images/users-permissions-sample-roles.png
|
||||
:alt: Users - Permissions Sample Roles
|
||||
|
||||
To remove Permissions for a particular resource, click the disassociate (x) button next to its resource. This launches a confirmation dialog, asking you to confirm the disassociation.
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
You can also add teams, individual, or multiple users and assign them permissions at the object level (templates, credentials, inventories, projects, organizations, or instance groups) as well. This feature reduces the time for an organization to onboard many users at one time.
|
||||
|
||||
|
||||
.. _ug_users_tokens:
|
||||
|
||||
Users - Tokens
|
||||
@@ -179,4 +181,3 @@ The **Tokens** tab will only be present for your user (yourself). Before you add
|
||||
1. If not already selected, click on your user from the Users list view to configure your OAuth 2 tokens.
|
||||
|
||||
.. include:: ../common/add-token.rst
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@ AWX has the ability to run jobs based on a triggered webhook event coming in. Jo
|
||||
g. In the Scope fields, the automation webhook only needs repo scope access, with the exception of invites. For information about other scopes, click the link right above the table to access the docs.
|
||||
|
||||
.. image:: ../common/images/webhooks-create-webhook-github-scope.png
|
||||
:alt: Link to more information on scopes
|
||||
|
||||
h. Click the **Generate Token** button.
|
||||
|
||||
@@ -50,26 +51,31 @@ AWX has the ability to run jobs based on a triggered webhook event coming in. Jo
|
||||
b. Make note of the name of this credential, as it will be used in the job template that posts back to GitHub.
|
||||
|
||||
.. image:: ../common/images/webhooks-create-credential-github-PAT-token.png
|
||||
:alt: Enter your generated PAT into the Token field
|
||||
|
||||
c. Go to the job template with which you want to enable webhooks, and select the webhook service and credential you created in the previous step.
|
||||
|
||||
.. image:: ../common/images/webhooks-job-template-gh-webhook-credential.png
|
||||
:alt: Select the webhook service and credential you created
|
||||
|
||||
|
|
||||
|
||||
d. Click **Save**. Now your job template is set up to be able to post back to GitHub. An example of one may look like this:
|
||||
|
||||
.. image:: ../common/images/webhooks-awx-to-github-status.png
|
||||
:alt: An example GitHub status that shows all checks have passed
|
||||
|
||||
.. _ug_webhooks_setup_github:
|
||||
|
||||
3. Go to a specific GitHub repo you want to configure webhooks and click **Settings**.
|
||||
|
||||
.. image:: ../common/images/webhooks-github-repo-settings.png
|
||||
:alt: Settings link in your GitHub repo
|
||||
|
||||
4. Under Options, click **Webhooks**.
|
||||
|
||||
.. image:: ../common/images/webhooks-github-repo-settings-options.png
|
||||
:alt: Webhooks link under Options
|
||||
|
||||
5. On the Webhooks page, click **Add webhook**.
|
||||
|
||||
@@ -80,22 +86,24 @@ AWX has the ability to run jobs based on a triggered webhook event coming in. Jo
|
||||
c. Copy the contents of the **Webhook Key** from the job template above and paste it in the **Secret** field.
|
||||
d. Leave **Enable SSL verification** selected.
|
||||
|
||||
.. image:: ../common/images/webhooks-github-repo-add-webhook.png
|
||||
|
||||
|
|
||||
.. image:: ../common/images/webhooks-github-repo-add-webhook.png
|
||||
:alt: Add Webhook page
|
||||
|
||||
e. Next, you must select the types of events you want to trigger a webhook. Any such event will trigger the Job or Workflow. In order to have job status (pending, error, success) sent back to GitHub, you must select **Pull requests** in the individual events section.
|
||||
|
||||
.. image:: ../common/images/webhooks-github-repo-choose-events.png
|
||||
:alt: List of trigger events for the webhook
|
||||
|
||||
f. Leave **Active** checked and click **Add Webhook**.
|
||||
|
||||
.. image:: ../common/images/webhooks-github-repo-add-webhook-actve.png
|
||||
:alt: Active option and Add Webhook button
|
||||
|
||||
|
||||
7. After your webhook is configured, it displays in the list of webhooks active for your repo, along with the ability to edit or delete it. Click on a webhook, and it brings you to the Manage webhook screen. Scroll to the very bottom of the screen to view all the delivery attempts made to your webhook and whether they succeeded or failed.
|
||||
|
||||
.. image:: ../common/images/webhooks-github-repo-webhooks-deliveries.png
|
||||
:alt: An example listing of recent deliveries
|
||||
|
||||
For more information, refer to the `GitHub Webhooks developer documentation <https://developer.github.com/webhooks/>`_.
|
||||
|
||||
@@ -113,12 +121,14 @@ AWX has the ability to run jobs based on a triggered webhook event coming in. Jo
|
||||
b. On the sidebar, under User Settings, click **Access Tokens**.
|
||||
|
||||
.. image:: ../common/images/webhooks-create-webhook-gitlab-settings.png
|
||||
:alt: Access Tokens link under User Settings
|
||||
|
||||
c. In the **Name** field, enter a brief description about what this PAT will be used for.
|
||||
d. Skip the **Expires at** field unless you want to set an expiration date for your webhook.
|
||||
e. In the Scopes fields, select the ones applicable to your integration. For AWX, API is the only selection necessary.
|
||||
|
||||
.. image:: ../common/images/webhooks-create-webhook-gitlab-scope.png
|
||||
:alt: Personal Access Token page
|
||||
|
||||
f. Click the **Create personal access token** button.
|
||||
|
||||
@@ -130,16 +140,19 @@ AWX has the ability to run jobs based on a triggered webhook event coming in. Jo
|
||||
b. Make note of the name of this credential, as it will be used in the job template that posts back to GitHub.
|
||||
|
||||
.. image:: ../common/images/webhooks-create-credential-gitlab-PAT-token.png
|
||||
:alt: Create New Credential page
|
||||
|
||||
c. Go to the job template with which you want to enable webhooks, and select the webhook service and credential you created in the previous step.
|
||||
|
||||
.. image:: ../common/images/webhooks-job-template-gl-webhook-credential.png
|
||||
:alt: Select the webhook credential you created
|
||||
|
||||
|
|
||||
|
||||
d. Click **Save**. Now your job template is set up to be able to post back to GitLab. An example of one may look like this:
|
||||
|
||||
.. image:: ../common/images/webhooks-awx-to-gitlab-status.png
|
||||
:alt: An example GitLab status message
|
||||
|
||||
|
||||
.. _ug_webhooks_setup_gitlab:
|
||||
@@ -147,6 +160,7 @@ AWX has the ability to run jobs based on a triggered webhook event coming in. Jo
|
||||
3. Go to a specific GitLab repo you want to configure webhooks and click **Settings > Integrations**.
|
||||
|
||||
.. image:: ../common/images/webhooks-gitlab-repo-settings.png
|
||||
:alt: Integrations link under Settings
|
||||
|
||||
4. To complete the Integrations page, you need to :ref:`enable webhooks in a job template <ug_jt_enable_webhooks>` (or in a :ref:`workflow job template <ug_wfjt_enable_webhooks>`), which will provide you with the following information:
|
||||
|
||||
@@ -157,6 +171,7 @@ AWX has the ability to run jobs based on a triggered webhook event coming in. Jo
|
||||
e. Click **Add webhook**.
|
||||
|
||||
.. image:: ../common/images/webhooks-gitlab-repo-add-webhook.png
|
||||
:alt: Integrations page
|
||||
|
||||
|
||||
5. After your webhook is configured, it displays in the list of Project Webhooks for your repo, along with the ability to test events, edit or delete the webhook. Testing a webhook event displays the results at the top of the page whether it succeeded or failed.
|
||||
@@ -170,5 +185,7 @@ Payload output
|
||||
The entire payload is exposed as an extra variable. To view the payload information, go to the Jobs Detail view of the job template that ran with the webhook enabled. In the **Extra Variables** field of the Details pane, view the payload output from the ``awx_webhook_payload`` variable, as shown in the example below.
|
||||
|
||||
.. image:: ../common/images/webhooks-jobs-extra-vars-payload.png
|
||||
:alt: Details page with payload output
|
||||
|
||||
.. image:: ../common/images/webhooks-jobs-extra-vars-payload-expanded.png
|
||||
:alt: Variables field expanded view
|
||||
|
||||
@@ -1,25 +1,22 @@
|
||||
.. _ug_workflows:
|
||||
|
||||
|
||||
Workflows
|
||||
============
|
||||
|
||||
.. index::
|
||||
single: workflows
|
||||
|
||||
Workflows allow you to configure a sequence of disparate job templates (or workflow templates) that may or may not share inventory, playbooks, or permissions. However, workflows have ‘admin’ and ‘execute’ permissions, similar to job templates. A workflow accomplishes the task of tracking the full set of jobs that were part of the release process as a single unit.
|
||||
|
||||
Workflows allow you to configure a sequence of disparate job templates (or workflow templates) that may or may not share inventory, playbooks, or permissions. However, workflows have ‘admin’ and ‘execute’ permissions, similar to job templates. A workflow accomplishes the task of tracking the full set of jobs that were part of the release process as a single unit.
|
||||
|
||||
Job or workflow templates are linked together using a graph-like structure called nodes. These nodes can be jobs, project syncs, or inventory syncs. A template can be part of different workflows or used multiple times in the same workflow. A copy of the graph structure is saved to a workflow job when you launch the workflow.
|
||||
|
||||
The example below shows a workflow that contains all three, as well as a workflow job template:
|
||||
|
||||
.. image:: ../common/images/wf-node-all-scenarios-wf-in-wf.png
|
||||
|
||||
:alt: Workflow Node All Scenarios
|
||||
|
||||
As the workflow runs, jobs are spawned from the node's linked template. Nodes linking to a job template which has prompt-driven fields (``job_type``, ``job_tags``, ``skip_tags``, ``limit``) can contain those fields, and will not be prompted on launch. Job templates with promptable credential and/or inventory, WITHOUT defaults, will not be available for inclusion in a workflow.
|
||||
|
||||
|
||||
Workflow scenarios and considerations
|
||||
----------------------------------------
|
||||
|
||||
@@ -28,34 +25,38 @@ Consider the following scenarios for building workflows:
|
||||
- A root node is set to ALWAYS by default and it not editable.
|
||||
|
||||
.. image:: ../common/images/wf-root-node-always.png
|
||||
:alt: Root Node Always
|
||||
|
||||
- A node can have multiple parents and children may be linked to any of the states of success, failure, or always. If always, then the state is neither success or failure. States apply at the node level, not at the workflow job template level. A workflow job will be marked as successful unless it is canceled or encounters an error.
|
||||
- A node can have multiple parents and children may be linked to any of the states of success, failure, or always. If always, then the state is neither success or failure. States apply at the node level, not at the workflow job template level. A workflow job will be marked as successful unless it is canceled or encounters an error.
|
||||
|
||||
.. image:: ../common/images/wf-sibling-nodes-all-edge-types.png
|
||||
:alt: Sibling Nodes All Edge Types
|
||||
|
||||
- If you remove a job or workflow template within the workflow, the node(s) previously connected to those deleted, automatically get connected upstream and retains its edge type as in the example below:
|
||||
|
||||
.. image:: ../common/images/wf-node-delete-scenario.png
|
||||
:alt: Node Delete Scenario
|
||||
|
||||
- You could have a convergent workflow, where multiple jobs converge into one. In this scenario, any of the jobs or all of them must complete before the next one runs, as shown in the example below:
|
||||
- You could have a convergent workflow, where multiple jobs converge into one. In this scenario, any of the jobs or all of them must complete before the next one runs, as shown in the example below:
|
||||
|
||||
.. image:: ../common/images/wf-node-convergence.png
|
||||
.. image:: ../common/images/wf-node-convergence.png
|
||||
:alt: Node Convergence
|
||||
|
||||
In the example provided, AWX runs the first two job templates in parallel. When they both finish and succeed as specified, the 3rd downstream (:ref:`convergence node <convergence_node>`), will trigger.
|
||||
|
||||
- Prompts for inventory and surveys will apply to workflow nodes in workflow job templates.
|
||||
|
||||
- If you launch from the API, running a ``get`` command displays a list of warnings and highlights missing components. The basic workflow for a workflow job template is illustrated below.
|
||||
- If you launch from the API, running a ``get`` command displays a list of warnings and highlights missing components. The basic workflow for a workflow job template is illustrated below.
|
||||
|
||||
.. image:: ../common/images/workflow-diagram.png
|
||||
:alt: Workflow Diagram
|
||||
|
||||
- It is possible to launch several workflows simultaneously, and set a schedule for when to launch them. You can set notifications on workflows, such as when a job completes, similar to that of job templates.
|
||||
- It is possible to launch several workflows simultaneously, and set a schedule for when to launch them. You can set notifications on workflows, such as when a job completes, similar to that of job templates.
|
||||
|
||||
.. note::
|
||||
|
||||
.. include:: ../common/job-slicing-rule.rst
|
||||
|
||||
|
||||
- You can build a recursive workflow, but if AWX detects an error, it will stop at the time the nested workflow attempts to run.
|
||||
|
||||
- Artifacts gathered in jobs in the sub-workflow will be passed to downstream nodes.
|
||||
@@ -70,7 +71,6 @@ In the example provided, AWX runs the first two job templates in parallel. When
|
||||
|
||||
- In a workflow convergence scenario, ``set_stats`` data will be merged in an undefined way, so it is recommended that you set unique keys.
|
||||
|
||||
|
||||
Extra Variables
|
||||
----------------
|
||||
|
||||
@@ -83,6 +83,7 @@ Also similar to job templates, workflows use surveys to specify variables to be
|
||||
Workflows utilize the same behavior (hierarchy) of variable precedence as Job Templates with the exception of three additional variables. Refer to the Variable Precedence Hierarchy in the :ref:`ug_jobtemplates_extravars` section of the Job Templates chapter of this guide. The three additional variables include:
|
||||
|
||||
.. image:: ../common/images/Architecture-AWX_Variable_Precedence_Hierarchy-Workflows.png
|
||||
:alt: Variable Precedence Hierarchy
|
||||
|
||||
Workflows included in a workflow will follow the same variable precedence - they will only inherit variables if they are specifically prompted for, or defined as part of a survey.
|
||||
|
||||
@@ -108,7 +109,6 @@ If you use the ``set_stats`` module in your playbook, you can produce results th
|
||||
data:
|
||||
integration_results_url: "{{ (result.stdout|from_json).link }}"
|
||||
|
||||
|
||||
- **use_set_stats.yml**: second playbook in the workflow
|
||||
|
||||
::
|
||||
@@ -121,47 +121,44 @@ If you use the ``set_stats`` module in your playbook, you can produce results th
|
||||
url: "{{ integration_results_url }}"
|
||||
return_content: true
|
||||
register: results
|
||||
|
||||
|
||||
- name: "Output test results"
|
||||
debug:
|
||||
msg: "{{ results.content }}"
|
||||
|
||||
|
||||
The ``set_stats`` module processes this workflow as follows:
|
||||
|
||||
1. The contents of an integration results (example: integration_results.txt below) is first uploaded to the web.
|
||||
1. The contents of an integration results (example: integration_results.txt below) is first uploaded to the web.
|
||||
|
||||
::
|
||||
|
||||
the tests are passing!
|
||||
the tests are passing!
|
||||
|
||||
2. Through the **invoke_set_stats** playbook, ``set_stats`` is then invoked to artifact the URL of the uploaded integration_results.txt into the Ansible variable "integration_results_url".
|
||||
3. The second playbook in the workflow consumes the Ansible extra variable "integration_results_url". It calls out to the web using the ``uri`` module to get the contents of the file uploaded by the previous Job Template Job. Then, it simply prints out the contents of the gotten file.
|
||||
|
||||
.. note::
|
||||
|
||||
For artifacts to work, keep the default setting, ``per_host = False`` in the ``set_stats`` module.
|
||||
.. note::
|
||||
|
||||
For artifacts to work, keep the default setting, ``per_host = False`` in the ``set_stats`` module.
|
||||
|
||||
Workflow States
|
||||
----------------
|
||||
|
||||
The workflow job can have the following states (no Failed state):
|
||||
|
||||
- Waiting
|
||||
- Waiting
|
||||
|
||||
- Running
|
||||
|
||||
- Success (finished)
|
||||
|
||||
- Cancel
|
||||
- Cancel
|
||||
|
||||
- Error
|
||||
|
||||
- Failed
|
||||
|
||||
In the workflow scheme, canceling a job cancels the branch, while canceling the workflow job cancels the entire workflow.
|
||||
|
||||
In the workflow scheme, canceling a job cancels the branch, while canceling the workflow job cancels the entire workflow.
|
||||
|
||||
Role-Based Access Controls
|
||||
-----------------------------
|
||||
@@ -174,6 +171,4 @@ Other tasks such as the ability to make a duplicate copy and re-launch a workflo
|
||||
|
||||
.. ^^
|
||||
|
||||
For more information on performing the tasks described in this section, refer to the :ref:`Administration Guide <ag_start>`.
|
||||
|
||||
|
||||
For more information on performing the tasks described in this section, refer to the :ref:`Administration Guide <ag_start>`.
|
||||
|
||||
@@ -15,7 +15,7 @@ There are two methods you can use to get the next release version. The manual wa
|
||||
Log into your github account, under your user icon go to Settings => Developer Settings => Personal Access Tokens => Tokens (classic).
|
||||
Select the Generate new token => Generate new token (classic)
|
||||
Fill in the note, select no scopes select "Generate token".
|
||||
Copy the token and create a file in your awx repo called `.github_creds`. Enter the token in this file.
|
||||
Copy the token and create a file at `~/.github_creds` or in your awx repo as `.github_creds`. Enter the token in this file.
|
||||
Run `./tools/scripts/get_next_release.py`
|
||||
This will use your token to go query for the PRs in the release and scan their bodies to select X/Y/Z and suggest new versions and spit out notifications.
|
||||
|
||||
@@ -149,7 +149,7 @@ Send notifications to the following groups:
|
||||
* AWX Mailing List
|
||||
* #social:ansible.com IRC (@newsbot for inclusion in bullhorn)
|
||||
* #awx:ansible.com (no @newsbot in this room)
|
||||
* #ansible-controller slack channel
|
||||
* #ansible-controller slack channel
|
||||
|
||||
These messages are templated out for you in the output of `get_next_release.yml`.
|
||||
|
||||
@@ -169,7 +169,7 @@ Operator hub PRs are generated via an Ansible Playbook. See someone on the AWX t
|
||||
* [kustomize](https://kustomize.io/)
|
||||
* [opm](https://docs.openshift.com/container-platform/4.9/cli_reference/opm/cli-opm-install.html)
|
||||
|
||||
3. Download the script from https://gist.github.com/rooftopcellist/0e232f26666dee45be1d8a69270d63c2 into your awx-operator repo as release_operator_hub.sh
|
||||
3. Download the script from https://github.com/ansible/awx-operator/blob/devel/hack/publish-to-operator-hub.sh into your awx-operator repo as release_operator_hub.sh
|
||||
|
||||
4. Make sure you are logged into quay.io with `docker login quay.io`
|
||||
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
if [ -z $AWX_IGNORE_BLACK ] ; then
|
||||
python_files_changed=$(git diff --cached --name-only --diff-filter=AM | grep -E '\.py$')
|
||||
python_files_changed=$(git diff --cached --name-only --diff-filter=AM awx/ awxkit/ tools/ | grep -E '\.py$')
|
||||
if [ "x$python_files_changed" != "x" ] ; then
|
||||
black --check $python_files_changed || \
|
||||
if [ $? != 0 ] ; then
|
||||
|
||||
@@ -49,24 +49,6 @@ Make sure to delete the old tarball if it is an upgrade.
|
||||
Anything pinned in `*.in` files involves additional manual work in
|
||||
order to upgrade. Some information related to that work is outlined here.
|
||||
|
||||
### Django
|
||||
|
||||
For any upgrade of Django, it must be confirmed that
|
||||
we don't regress on FIPS support before merging.
|
||||
|
||||
See internal integration test knowledge base article `how_to_test_FIPS`
|
||||
for instructions.
|
||||
|
||||
If operating in a FIPS environment, `hashlib.md5()` will raise a `ValueError`,
|
||||
but will support the `usedforsecurity` keyword on RHEL and Centos systems.
|
||||
|
||||
Keep an eye on https://code.djangoproject.com/ticket/28401
|
||||
|
||||
The override of `names_digest` could easily be broken in a future version.
|
||||
Check that the import remains the same in the desired version.
|
||||
|
||||
https://github.com/django/django/blob/af5ec222ccd24e81f9fec6c34836a4e503e7ccf7/django/db/backends/base/schema.py#L7
|
||||
|
||||
### django-split-settings
|
||||
|
||||
When we attemed to upgrade past 1.0.0 the build process in GitHub failed on the docker build step with the following error:
|
||||
@@ -172,4 +154,3 @@ available on PyPi with source distribution.
|
||||
|
||||
Version 4.8 makes us a little bit nervous with changes to `searchwindowsize` https://github.com/pexpect/pexpect/pull/579/files
|
||||
Pin to `pexpect==4.7.x` until we have more time to move to `4.8` and test.
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ cryptography>=41.0.2 # CVE-2023-38325
|
||||
Cython<3 # Since the bump to PyYAML 5.4.1 this is now a mandatory dep
|
||||
daphne
|
||||
distro
|
||||
django==4.2.3 # see UPGRADE BLOCKERs CVEs were identified in 4.2, pinning to .3
|
||||
django==4.2.6 # CVE-2023-43665
|
||||
django-auth-ldap
|
||||
django-cors-headers
|
||||
django-crum
|
||||
@@ -42,11 +42,11 @@ pygerduty
|
||||
pyopenssl>=23.2.0 # resolve dep conflict from cryptography pin above
|
||||
pyparsing==2.4.6 # Upgrading to v3 of pyparsing introduce errors on smart host filtering: Expected 'or' term, found 'or' (at char 15), (line:1, col:16)
|
||||
python-daemon>3.0.0
|
||||
python-dsv-sdk
|
||||
python-tss-sdk==1.2.1
|
||||
python-dsv-sdk>=1.0.4
|
||||
python-tss-sdk>=1.2.1
|
||||
python-ldap
|
||||
pyyaml>=6.0.1
|
||||
receptorctl==1.3.0
|
||||
receptorctl
|
||||
social-auth-core[openidconnect]==4.3.0 # see UPGRADE BLOCKERs
|
||||
social-auth-app-django==5.0.0 # see UPGRADE BLOCKERs
|
||||
sqlparse >= 0.4.4 # Required by django https://github.com/ansible/awx/security/dependabot/96
|
||||
|
||||
@@ -93,8 +93,6 @@ daphne==3.0.2
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# channels
|
||||
dataclasses==0.6
|
||||
# via python-dsv-sdk
|
||||
defusedxml==0.7.1
|
||||
# via
|
||||
# python3-openid
|
||||
@@ -103,7 +101,7 @@ deprecated==1.2.13
|
||||
# via jwcrypto
|
||||
distro==1.8.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
django==4.2.3
|
||||
django==4.2.6
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# channels
|
||||
@@ -323,7 +321,7 @@ python-dateutil==2.8.2
|
||||
# botocore
|
||||
# kubernetes
|
||||
# receptorctl
|
||||
python-dsv-sdk==1.0.1
|
||||
python-dsv-sdk==1.0.4
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
python-jose==3.3.0
|
||||
# via social-auth-core
|
||||
@@ -351,7 +349,7 @@ pyyaml==6.0.1
|
||||
# djangorestframework-yaml
|
||||
# kubernetes
|
||||
# receptorctl
|
||||
receptorctl==1.3.0
|
||||
receptorctl==1.4.2
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
redis==4.3.5
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
|
||||
@@ -15,6 +15,4 @@ fi
|
||||
|
||||
set -e
|
||||
|
||||
wait-for-migrations
|
||||
|
||||
exec supervisord -c /etc/supervisord_web.conf
|
||||
|
||||
@@ -7,7 +7,6 @@ readonly CMDNAME=$(basename "$0")
|
||||
|
||||
readonly MIN_SLEEP=0.5
|
||||
readonly MAX_SLEEP=30
|
||||
readonly ATTEMPTS=30
|
||||
readonly TIMEOUT=60
|
||||
|
||||
log_message() { echo "[${CMDNAME}]" "$@" >&2; }
|
||||
@@ -25,7 +24,7 @@ wait_for() {
|
||||
local check=1
|
||||
|
||||
while true; do
|
||||
log_message "Attempt ${attempt} of ${ATTEMPTS}"
|
||||
log_message "Attempt ${attempt}"
|
||||
|
||||
timeout "${TIMEOUT}" \
|
||||
/bin/bash -c "awx-manage check" &>/dev/null
|
||||
@@ -37,8 +36,7 @@ wait_for() {
|
||||
&& return || rc=$?
|
||||
fi
|
||||
|
||||
(( ++attempt > ATTEMPTS )) && break
|
||||
|
||||
attempt=$((attempt + 1))
|
||||
log_message "Waiting ${next_sleep} seconds before next attempt"
|
||||
sleep "${next_sleep}"
|
||||
next_sleep=$(next_sleep ${next_sleep})
|
||||
|
||||
@@ -442,13 +442,11 @@ Now we are ready to configure and plumb OpenLDAP with AWX. To do this we have pr
|
||||
|
||||
Note: The default configuration will utilize the non-tls connection. If you want to use the tls configuration you will need to work through TLS negotiation issues because the LDAP server is using a self signed certificate.
|
||||
|
||||
Before we can run the playbook we need to understand that LDAP will be communicated to from within the AWX container. Because of this, we have to tell AWX how to route traffic to the LDAP container through the `LDAP Server URI` settings. The playbook requires a variable called container_reference to be set. The container_reference variable needs to be how your AWX container will be able to talk to the LDAP container. See the SAML section for some examples for how to select a `container_reference`.
|
||||
|
||||
Once you have your container reference you can run the playbook like:
|
||||
You can run the playbook like:
|
||||
```bash
|
||||
export CONTROLLER_USERNAME=<your username>
|
||||
export CONTROLLER_PASSWORD=<your password>
|
||||
ansible-playbook tools/docker-compose/ansible/plumb_ldap.yml -e container_reference=<your container_reference here>
|
||||
ansible-playbook tools/docker-compose/ansible/plumb_ldap.yml
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
vars:
|
||||
new_error_page:
|
||||
error_code: "{{ item | basename() | regex_replace('custom_(\\d+).html', '\\1') }}"
|
||||
web_path: "{{ item | regex_replace('^.*\/static', '/static') }}"
|
||||
web_path: "{{ item | regex_replace('^.*/static', '/static') }}"
|
||||
loop: "{{ lookup('ansible.builtin.fileglob', playbook_dir + '/../../../awx/static/custom_*.html', wantlist=True) }}"
|
||||
when: (item | basename()) is regex("custom_\d+\.html")
|
||||
|
||||
|
||||
@@ -11,23 +11,6 @@
|
||||
- name: Test that the development environment is able to launch a job
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: Boot the development environment
|
||||
command: |
|
||||
make docker-compose
|
||||
environment:
|
||||
COMPOSE_UP_OPTS: -d
|
||||
args:
|
||||
chdir: "{{ playbook_dir }}/../../../"
|
||||
|
||||
# Takes a while for migrations to finish
|
||||
- name: Wait for the dev environment to be ready
|
||||
uri:
|
||||
url: "http://localhost:8013/api/v2/ping/"
|
||||
register: _result
|
||||
until: _result.status == 200
|
||||
retries: 120
|
||||
delay: 5
|
||||
|
||||
- name: Reset admin password
|
||||
shell: |
|
||||
docker exec -i tools_awx_1 bash <<EOSH
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"AUTH_LDAP_1_SERVER_URI": "ldap://{{ container_reference }}:389",
|
||||
"AUTH_LDAP_1_SERVER_URI": "ldap://ldap:1389",
|
||||
"AUTH_LDAP_1_BIND_DN": "cn=admin,dc=example,dc=org",
|
||||
"AUTH_LDAP_1_BIND_PASSWORD": "admin",
|
||||
"AUTH_LDAP_1_START_TLS": false,
|
||||
|
||||
@@ -3,17 +3,15 @@
|
||||
missing_modules = []
|
||||
try:
|
||||
import requests
|
||||
except:
|
||||
except ImportError:
|
||||
missing_modules.append('requests')
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
try:
|
||||
import semantic_version
|
||||
except:
|
||||
except ImportError:
|
||||
missing_modules.append('semantic_version')
|
||||
|
||||
if len(missing_modules) > 0:
|
||||
@@ -55,7 +53,7 @@ def getNextReleases():
|
||||
try:
|
||||
if a_pr['html_url'] in pr_votes:
|
||||
continue
|
||||
except:
|
||||
except KeyError:
|
||||
print("Unable to check on PR")
|
||||
print(json.dumps(a_pr, indent=4))
|
||||
sys.exit(255)
|
||||
@@ -133,14 +131,17 @@ def getNextReleases():
|
||||
# Load the users session information
|
||||
#
|
||||
session = requests.Session()
|
||||
try:
|
||||
print("Loading credentials")
|
||||
with open(".github_creds", "r") as f:
|
||||
password = f.read().strip()
|
||||
session.headers.update({'Authorization': 'bearer {}'.format(password), 'Accept': 'application/vnd.github.v3+json'})
|
||||
except Exception:
|
||||
print("Failed to load credentials from ./.github_creds")
|
||||
sys.exit(255)
|
||||
|
||||
print("Loading credentials")
|
||||
CREDS_LOCATIONS = ('.github_creds', '~/.github_creds')
|
||||
for creds_loc in CREDS_LOCATIONS:
|
||||
if os.path.exists(os.path.expanduser(creds_loc)):
|
||||
with open(os.path.expanduser(creds_loc), "r") as f:
|
||||
password = f.read().strip()
|
||||
session.headers.update({'Authorization': 'bearer {}'.format(password), 'Accept': 'application/vnd.github.v3+json'})
|
||||
break
|
||||
else:
|
||||
raise Exception(f'Could not location github token in locations {CREDS_LOCATIONS}')
|
||||
|
||||
versions = {
|
||||
'current': {},
|
||||
|
||||
@@ -74,3 +74,24 @@ class Controller(Plugin, RedHatPlugin):
|
||||
self.add_forbidden_path(path)
|
||||
|
||||
self.add_cmd_output(SOSREPORT_CONTROLLER_COMMANDS)
|
||||
|
||||
def postproc(self):
|
||||
# remove database password
|
||||
jreg = r"(\s*\'PASSWORD\'\s*:(\s))(?:\"){1,}(.+)(?:\"){1,}"
|
||||
repl = r"\1********"
|
||||
self.do_path_regex_sub("/etc/tower/conf.d/postgres.py", jreg, repl)
|
||||
|
||||
# remove email password
|
||||
jreg = r"(EMAIL_HOST_PASSWORD\s*=)\'(.+)\'"
|
||||
repl = r"\1********"
|
||||
self.do_path_regex_sub("/etc/tower/settings.py", jreg, repl)
|
||||
|
||||
# remove email password (if customized)
|
||||
jreg = r"(EMAIL_HOST_PASSWORD\s*=)\'(.+)\'"
|
||||
repl = r"\1********"
|
||||
self.do_path_regex_sub("/etc/tower/conf.d/custom.py", jreg, repl)
|
||||
|
||||
# remove websocket secret
|
||||
jreg = r"(BROADCAST_WEBSOCKET_SECRET\s*=\s*)\"(.+)\""
|
||||
repl = r"\1********"
|
||||
self.do_path_regex_sub("/etc/tower/conf.d/channels.py", jreg, repl)
|
||||
|
||||
14
tox.ini
14
tox.ini
@@ -19,8 +19,20 @@ commands =
|
||||
select = F401,F402,F821,F823,F841,F811,E265,E266,F541,W605,E722,F822,F523,W291,F405
|
||||
exclude = awx/ui/node_modules,awx/ui/node_modules,env,awx_collection_build
|
||||
|
||||
[testenv:pip-compile-docs]
|
||||
description = Compile docs build lockfiles
|
||||
deps =
|
||||
# pip-tools config file support was introduced in v7
|
||||
pip-tools >= 7
|
||||
commands =
|
||||
{envpython} -m piptools compile \
|
||||
--output-file=docs/docsite/requirements.txt \
|
||||
docs/docsite/requirements.in
|
||||
|
||||
[testenv:docs]
|
||||
description = Build documentation
|
||||
deps = -r{toxinidir}/docs/docsite/requirements.txt
|
||||
deps =
|
||||
-r{toxinidir}/docs/docsite/requirements.in
|
||||
-c{toxinidir}/docs/docsite/requirements.txt
|
||||
commands =
|
||||
sphinx-build -T -E -W -n --keep-going {tty:--color} -j auto -c docs/docsite -d docs/docsite/build/doctrees -b html docs/docsite/rst docs/docsite/build/html
|
||||
|
||||
Reference in New Issue
Block a user