Compare commits

..

1 Commits

Author SHA1 Message Date
David O Neill
e0acd9b111 Change failing PR to draft 2024-02-12 16:15:47 +00:00
39 changed files with 238 additions and 361 deletions

View File

@@ -2,10 +2,12 @@
name: Feature branch deletion cleanup
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: delete
on:
delete:
branches:
- feature_**
jobs:
branch_delete:
if: ${{ github.event.ref_type == 'branch' && startsWith(github.event.ref, 'feature_') }}
push:
runs-on: ubuntu-latest
timeout-minutes: 20
permissions:
@@ -20,4 +22,6 @@ jobs:
run: |
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
ansible localhost -c local -m aws_s3 \
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delobj permission=public-read"
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read"

View File

@@ -24,6 +24,38 @@ jobs:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
configuration-path: .github/pr_labeler.yml
convert-to-draft:
runs-on: ubuntu-latest
name: Change failing PRS to draft
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: 14
- name: Install dependencies
run: npm install -g github
- name: Check CI status
id: check-ci
run: |
status=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
-H "Accept: application/vnd.github.v3+json" \
https://api.github.com/repos/${{ github.repository }}/commits/${{ github.sha }}/check-suites | \
jq -r '.check_suites[0].conclusion')
echo "CI Status: $status"
echo "::set-output name=ci_status::$status"
- name: Convert to Draft on CI Failure
if: steps.check-ci.outputs.ci_status == 'failure'
run: gh pr edit ${{ github.event.number }} --draft
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
community:
runs-on: ubuntu-latest
timeout-minutes: 20

View File

@@ -86,19 +86,13 @@ jobs:
-e push=yes \
-e awx_official=yes
- name: Log into registry ghcr.io
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Log in to GHCR
run: |
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Log into registry quay.io
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAY_USER }}
password: ${{ secrets.QUAY_TOKEN }}
- name: Log in to Quay
run: |
echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin
- name: tag awx-ee:latest with version input
run: |
@@ -106,13 +100,13 @@ jobs:
docker tag quay.io/ansible/awx-ee:latest ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
docker push ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
- name: Stage awx-operator image
- name: Build and stage awx-operator
working-directory: awx-operator
run: |
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version}} \
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
IMG=ghcr.io/${{ github.repository_owner }}/awx-operator:${{ github.event.inputs.operator_version }} \
make docker-buildx
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version }} \
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
IMAGE_TAG_BASE=ghcr.io/${{ github.repository_owner }}/awx-operator \
VERSION=${{ github.event.inputs.operator_version }} make docker-build docker-push
- name: Run test deployment with awx-operator
working-directory: awx-operator

3
.gitignore vendored
View File

@@ -169,6 +169,3 @@ awx/ui_next/build
# Docs build stuff
docs/docsite/build/
_readthedocs/
# Pyenv
.python-version

View File

@@ -10,7 +10,7 @@ KIND_BIN ?= $(shell which kind)
CHROMIUM_BIN=/tmp/chrome-linux/chrome
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
MANAGEMENT_COMMAND ?= awx-manage
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py 2> /dev/null)
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py)
# ansible-test requires semver compatable version, so we allow overrides to hack it
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
@@ -586,35 +586,12 @@ docker-compose-build: Dockerfile.dev
--build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
# ## Build awx_devel image for docker compose development environment for multiple architectures
# docker-compose-buildx: Dockerfile.dev
# DOCKER_BUILDKIT=1 docker build \
# -f Dockerfile.dev \
# -t $(DEVEL_IMAGE_NAME) \
# --build-arg BUILDKIT_INLINE_CACHE=1 \
# --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
## Build awx_devel image for docker compose development environment for multiple architectures
# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to:
# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/
# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/
# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=<myregistry/image:<tag>> than the export will fail)
# To properly provided solutions that supports more than one platform you should use this option.
PLATFORMS ?= linux/amd64,linux/arm64 # linux/ppc64le,linux/s390x
.PHONY: docker-compose-buildx
docker-compose-buildx: Dockerfile.dev ## Build and push docker image for the manager for cross-platform support
- docker buildx create --name project-v3-builder
docker buildx use project-v3-builder
- docker buildx build --push $(BUILD_ARGS) --platform=$(PLATFORMS) --tag $(DEVEL_IMAGE_NAME) -f Dockerfile.dev .
- docker buildx rm project-v3-builder
docker-clean:
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker volume rm -f tools_awx_db tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
docker volume rm -f tools_awx_db tools_vault_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
docker-refresh: docker-clean docker-compose

View File

@@ -105,11 +105,7 @@ def create_listener_connection():
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
conf['OPTIONS'][k] = v
# Allow password-less authentication
if 'PASSWORD' in conf:
conf['OPTIONS']['password'] = conf.pop('PASSWORD')
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} port={conf['PORT']}"
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} password={conf['PASSWORD']} port={conf['PORT']}"
return psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS'])

View File

@@ -162,7 +162,7 @@ class AWXConsumerRedis(AWXConsumerBase):
class AWXConsumerPG(AWXConsumerBase):
def __init__(self, *args, schedule=None, **kwargs):
super().__init__(*args, **kwargs)
self.pg_max_wait = getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE)
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTIME_TOLERANCE
# if no successful loops have ran since startup, then we should fail right away
self.pg_is_down = True # set so that we fail if we get database errors on startup
init_time = time.time()

View File

@@ -5,7 +5,6 @@ from copy import deepcopy
import datetime
import logging
import json
import traceback
from django.db import models
from django.conf import settings
@@ -485,29 +484,14 @@ class JobNotificationMixin(object):
if msg_template:
try:
msg = env.from_string(msg_template).render(**context)
except (TemplateSyntaxError, UndefinedError, SecurityError) as e:
msg = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))])
except (TemplateSyntaxError, UndefinedError, SecurityError):
msg = ''
if body_template:
try:
body = env.from_string(body_template).render(**context)
except (TemplateSyntaxError, UndefinedError, SecurityError) as e:
body = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))])
# https://datatracker.ietf.org/doc/html/rfc2822#section-2.2
# Body should have at least 2 CRLF, some clients will interpret
# the email incorrectly with blank body. So we will check that
if len(body.strip().splitlines()) <= 2:
# blank body
body = '\r\n'.join(
[
"The template rendering return a blank body.",
"Please check the template.",
"Refer to https://github.com/ansible/awx/issues/13983",
"for further information.",
]
)
except (TemplateSyntaxError, UndefinedError, SecurityError):
body = ''
return (msg, body)

View File

@@ -4,10 +4,9 @@ import logging
from django.conf import settings
from django.urls import re_path
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from ansible_base.lib.channels.middleware import DrfAuthMiddlewareStack
from . import consumers
@@ -27,50 +26,13 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
super().__init__(*args, **kwargs)
class MultipleURLRouterAdapter:
"""
Django channels doesn't nicely support Auth_1(urls_1), Auth_2(urls_2), ..., Auth_n(urls_n)
This class allows assocating a websocket url with an auth
Ordering matters. The first matching url will be used.
"""
def __init__(self, *auths):
self._auths = [a for a in auths]
async def __call__(self, scope, receive, send):
"""
Loop through the list of passed in URLRouter's (they may or may not be wrapped by auth).
We know we have exhausted the list of URLRouter patterns when we get a
ValueError('No route found for path %s'). When that happens, move onto the next
URLRouter.
If the final URLRouter raises an error, re-raise it in the end.
We know that we found a match when no error is raised, end the loop.
"""
last_index = len(self._auths) - 1
for i, auth in enumerate(self._auths):
try:
return await auth.__call__(scope, receive, send)
except ValueError as e:
if str(e).startswith('No route found for path'):
# Only surface the error if on the last URLRouter
if i == last_index:
raise
websocket_urlpatterns = [
re_path(r'api/websocket/$', consumers.EventConsumer.as_asgi()),
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
]
websocket_relay_urlpatterns = [
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()),
]
application = AWXProtocolTypeRouter(
{
'websocket': MultipleURLRouterAdapter(
URLRouter(websocket_relay_urlpatterns),
DrfAuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
)
'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
}
)

View File

@@ -29,7 +29,7 @@ class RunnerCallback:
self.safe_env = {}
self.event_ct = 0
self.model = model
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
self.wrapup_event_dispatched = False
self.artifacts_processed = False
self.extra_update_fields = {}

View File

@@ -114,7 +114,7 @@ class BaseTask(object):
def __init__(self):
self.cleanup_paths = []
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
self.runner_callback = self.callback_class(model=self.model)
def update_model(self, pk, _attempt=0, **updates):

View File

@@ -339,7 +339,7 @@ class WebSocketRelayManager(object):
if deleted_remote_hosts:
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
await asyncio.gather(*[self.cleanup_offline_host(h) for h in deleted_remote_hosts])
await asyncio.gather(self.cleanup_offline_host(h) for h in deleted_remote_hosts)
if new_remote_hosts:
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")

View File

@@ -155,4 +155,4 @@ def test_build_notification_message_undefined(run_module, admin_user, organizati
nt = NotificationTemplate.objects.get(id=result['id'])
body = job.build_notification_message(nt, 'running')
assert 'The template rendering return a blank body' in body[1]
assert '{"started_by": "My Placeholder"}' in body[1]

View File

@@ -90,7 +90,6 @@ setup(
install_requires=[
'PyYAML',
'requests',
'setuptools',
],
python_requires=">=3.8",
extras_require={'formatting': ['jq'], 'websockets': ['websocket-client==0.57.0'], 'crypto': ['cryptography']},

View File

@@ -51,213 +51,6 @@ Prerequisites
- To manage instances from the AWX user interface, you must have System Administrator or System Auditor permissions.
Common topologies
------------------
Instances make up the network of devices that communicate with one another. They are the building blocks of an automation mesh. These building blocks serve as nodes in a mesh topology. There are several kinds of instances:
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Node Type | Description |
+===========+=================================================================================================================+
| Control | Nodes that run persistent Ansible Automation Platform services, and delegate jobs to hybrid and execution nodes |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Hybrid | Nodes that run persistent Ansible Automation Platform services and execute jobs |
| | (not applicable to operator-based installations) |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Hop | Used for relaying across the mesh only |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Execution | Nodes that run jobs delivered from control nodes (jobs submitted from the users Ansible automation) |
+-----------+-----------------------------------------------------------------------------------------------------------------+
Simple topology
~~~~~~~~~~~~~~~~
One of the ways to expand job capacity is to create a standalone execution node that can be added to run alongside the Kubernetes deployment of AWX. These machines will not be a part of the AWX Kubernetes cluster. The control nodes running in the cluster will connect and submit work to these machines via Receptor. The machines are registered in AWX as type "execution" instances, meaning they will only be used to run AWX jobs, not dispatch work or handle web requests as control nodes do.
Hop nodes can be added to sit between the control plane of AWX and standalone execution nodes. These machines will not be a part of the AWX Kubernetes cluster and they will be registered in AWX as node type "hop", meaning they will only handle inbound and outbound traffic for otherwise unreachable nodes in a different or more strict network.
Below is an example of an AWX task pod with two execution nodes. Traffic to execution node 2 flows through a hop node that is setup between it and the control plane.
.. image:: ../common/images/instances_awx_task_pods_hopnode.png
:alt: AWX task pod with a hop node between the control plane of AWX and standalone execution nodes.
An example of a simple topology may look like the following:
.. list-table::
:widths: 20 30 10 20 15
:header-rows: 1
* - Instance type
- Hostname
- Listener port
- Peers from control nodes
- Peers
* - Control plane
- awx-task-65d6d96987-mgn9j
- 27199
- True
- []
* - Hop node
- awx-hop-node
- 27199
- True
- []
* - Execution node
- awx-example.com
- n/a
- False
- ["hop node"]
Mesh topology
~~~~~~~~~~~~~~
Mesh ingress is a feature that allows remote nodes to connect inbound to the control plane. This is especially useful when creating remote nodes in restricted networking environments that disallow inbound traffic.
.. image:: ../common/images/instances_mesh_ingress_topology.png
:alt: Mesh ingress architecture showing the peering relationship between nodes.
An example of a topology that uses mesh ingress may look like the following:
.. list-table::
:widths: 20 30 10 20 15
:header-rows: 1
* - Instance type
- Hostname
- Listener port
- Peers from control nodes
- Peers
* - Control plane
- awx-task-xyz
- 27199
- True
- []
* - Hop node
- awx-hop-node
- 27199
- True
- []
* - Execution node
- awx-example.com
- n/a
- False
- ["hop node"]
In order to create a mesh ingress for AWX, see the `Mesh Ingress <https://ansible.readthedocs.io/projects/awx-operator/en/latest/user-guide/advanced-configuration/mesh-ingress.html>`_ chapter of the AWX Operator Documentation for information on setting up this type of topology. The last step is to create a remote execution node and add the execution node to an instance group in order for it to be used in your job execution. Whatever execution environment image used to run a playbook needs to be accessible for your remote execution node. Everything you are using in your playbook also needs to be accessible from this remote execution node.
.. image:: ../common/images/instances-job-template-using-remote-execution-ig.png
:alt: Job template using the instance group with the execution node to run jobs.
.. _ag_instances_add:
Add an instance
----------------
To create an instance in AWX:
1. Click **Instances** from the left side navigation menu of the AWX UI.
2. In the Instances list view, click the **Add** button and the Create new Instance window opens.
.. image:: ../common/images/instances_create_new.png
:alt: Create a new instance form.
An instance has several attributes that may be configured:
- Enter a fully qualified domain name (ping-able DNS) or IP address for your instance in the **Host Name** field (required). This field is equivalent to ``hostname`` in the API.
- Optionally enter a **Description** for the instance
- The **Instance State** field is auto-populated, indicating that it is being installed, and cannot be modified
- Optionally specify the **Listener Port** for the receptor to listen on for incoming connections. This is an open port on the remote machine used to establish inbound TCP connections. This field is equivalent to ``listener_port`` in the API.
- Select from the options in **Instance Type** field to specify the type you want to create. Only execution and hop nodes can be created as operator-based installations do not support hybrid nodes. This field is equivalent to ``node_type`` in the API.
- In the **Peers** field, select the instance hostnames you want your new instance to connect outbound to.
- In the **Options** fields:
- Check the **Enable Instance** box to make it available for jobs to run on an execution node.
- Check the **Managed by Policy** box to allow policy to dictate how the instance is assigned.
- Check the **Peers from control nodes** box to allow control nodes to peer to this instance automatically. Listener port needs to be set if this is enabled or the instance is a peer.
3. Once the attributes are configured, click **Save** to proceed.
Upon successful creation, the Details of the one of the created instances opens.
.. image:: ../common/images/instances_create_details.png
:alt: Details of the newly created instance.
.. note::
The proceeding steps 4-8 are intended to be ran from any computer that has SSH access to the newly created instance.
4. Click the download button next to the **Install Bundle** field to download the tarball that contain files to allow AWX to make proper TCP connections to the remote machine.
.. image:: ../common/images/instances_install_bundle.png
:alt: Instance details showing the Download button in the Install Bundle field of the Details tab.
5. Extract the downloaded ``tar.gz`` file from the location you downloaded it. The install bundle contains TLS certificates and keys, a certificate authority, and a proper Receptor configuration file. To facilitate that these files will be in the right location on the remote machine, the install bundle includes an ``install_receptor.yml`` playbook. The playbook requires the Receptor collection which can be obtained via:
::
ansible-galaxy collection install -r requirements.yml
6. Before running the ``ansible-playbook`` command, edit the following fields in the ``inventory.yml`` file:
- ``ansible_user`` with the username running the installation
- ``ansible_ssh_private_key_file`` to contain the filename of the private key used to connect to the instance
::
---
all:
hosts:
remote-execution:
ansible_host: <hostname>
ansible_user: <username> # user provided
ansible_ssh_private_key_file: ~/.ssh/id_rsa
The content of the ``inventory.yml`` file serves as a template and contains variables for roles that are applied during the installation and configuration of a receptor node in a mesh topology. You may modify some of the other fields, or replace the file in its entirety for advanced scenarios. Refer to `Role Variables <https://github.com/ansible/receptor-collection/blob/main/README.md>`_ for more information on each variable.
7. Save the file to continue.
8. Run the following command on the machine you want to update your mesh:
::
ansible-playbook -i inventory.yml install_receptor.yml
Wait a few minutes for the periodic AWX task to do a health check against the new instance. You may run a health check by selecting the node and clicking the **Run health check** button from its Details page at any time. Once the instances endpoint or page reports a "Ready" status for the instance, jobs are now ready to run on this machine!
9. To view other instances within the same topology or associate peers, click the **Peers** tab.
.. image:: ../common/images/instances_peers_tab.png
:alt: "Peers" tab showing two peers.
To associate peers with your node, click the **Associate** button to open a dialog box of instances eligible for peering.
.. image:: ../common/images/instances_associate_peer.png
:alt: Instances available to peer with the example hop node.
Execution nodes can peer with either hop nodes or other execution nodes. Hop nodes can only peer with execution nodes unless you check the **Peers from control nodes** check box from the **Options** field.
.. note::
If you associate or disassociate a peer, a notification will inform you to re-run the install bundle from the Peer Detail view (the :ref:`ag_topology_viewer` has the download link).
.. image:: ../common/images/instances_associate_peer_reinstallmsg.png
:alt: Notification to re-run the installation bundle due to change in the peering.
You can remove an instance by clicking **Remove** in the Instances page, or by setting the instance ``node_state = deprovisioning`` via the API. Upon deleting, a pop-up message will appear to notify that you may need to re-run the install bundle to make sure things that were removed are no longer connected.
10. To view a graphical representation of your updated topology, refer to the :ref:`ag_topology_viewer` section of this guide.
Manage instances
-----------------
@@ -320,10 +113,153 @@ The example health check shows the status updates with an error on node 'one':
:alt: Health check showing an error in one of the instances.
Add an instance
----------------
One of the ways to expand capacity is to create an instance. Standalone execution nodes can be added to run alongside the Kubernetes deployment of AWX. These machines will not be a part of the AWX Kubernetes cluster. The control nodes running in the cluster will connect and submit work to these machines via Receptor. The machines are registered in AWX as type "execution" instances, meaning they will only be used to run AWX jobs, not dispatch work or handle web requests as control nodes do.
Hop nodes can be added to sit between the control plane of AWX and standalone execution nodes. These machines will not be a part of the AWX Kubernetes cluster and they will be registered in AWX as node type "hop", meaning they will only handle inbound and outbound traffic for otherwise unreachable nodes in a different or more strict network.
Below is an example of an AWX task pod with two execution nodes. Traffic to execution node 2 flows through a hop node that is setup between it and the control plane.
.. image:: ../common/images/instances_awx_task_pods_hopnode.png
:alt: AWX task pod with a hop node between the control plane of AWX and standalone execution nodes.
To create an instance in AWV:
1. Click **Instances** from the left side navigation menu of the AWX UI.
2. In the Instances list view, click the **Add** button and the Create new Instance window opens.
.. image:: ../common/images/instances_create_new.png
:alt: Create a new instance form.
An instance has several attributes that may be configured:
- Enter a fully qualified domain name (ping-able DNS) or IP address for your instance in the **Host Name** field (required). This field is equivalent to ``hostname`` in the API.
- Optionally enter a **Description** for the instance
- The **Instance State** field is auto-populated, indicating that it is being installed, and cannot be modified
- Optionally specify the **Listener Port** for the receptor to listen on for incoming connections. This is an open port on the remote machine used to establish inbound TCP connections. This field is equivalent to ``listener_port`` in the API.
- Select from the options in **Instance Type** field to specify the type you want to create. Only execution and hop nodes can be created as operator-based installations do not support hybrid nodes. This field is equivalent to ``node_type`` in the API.
- In the **Peers** field, select the instance hostnames you want your new instance to connect outbound to.
- In the **Options** fields:
- Check the **Enable Instance** box to make it available for jobs to run on an execution node.
- Check the **Managed by Policy** box to allow policy to dictate how the instance is assigned.
- Check the **Peers from control nodes** box to allow control nodes to peer to this instance automatically. Listener port needs to be set if this is enabled or the instance is a peer.
In the example diagram above, the configurations are as follows:
+------------------+---------------+--------------------------+--------------+
| instance name | listener_port | peers_from_control_nodes | peers |
+==================+===============+==========================+==============+
| execution node 1 | 27199 | true | [] |
+------------------+---------------+--------------------------+--------------+
| hop node | 27199 | true | [] |
+------------------+---------------+--------------------------+--------------+
| execution node 2 | null | false | ["hop node"] |
+------------------+---------------+--------------------------+--------------+
3. Once the attributes are configured, click **Save** to proceed.
Upon successful creation, the Details of the one of the created instances opens.
.. image:: ../common/images/instances_create_details.png
:alt: Details of the newly created instance.
.. note::
The proceeding steps 4-8 are intended to be ran from any computer that has SSH access to the newly created instance.
4. Click the download button next to the **Install Bundle** field to download the tarball that contain files to allow AWX to make proper TCP connections to the remote machine.
.. image:: ../common/images/instances_install_bundle.png
:alt: Instance details showing the Download button in the Install Bundle field of the Details tab.
5. Extract the downloaded ``tar.gz`` file from the location you downloaded it. The install bundle contains TLS certificates and keys, a certificate authority, and a proper Receptor configuration file. To facilitate that these files will be in the right location on the remote machine, the install bundle includes an ``install_receptor.yml`` playbook. The playbook requires the Receptor collection which can be obtained via:
::
ansible-galaxy collection install -r requirements.yml
6. Before running the ``ansible-playbook`` command, edit the following fields in the ``inventory.yml`` file:
- ``ansible_user`` with the username running the installation
- ``ansible_ssh_private_key_file`` to contain the filename of the private key used to connect to the instance
::
---
all:
hosts:
remote-execution:
ansible_host: 18.206.206.34
ansible_user: <username> # user provided
ansible_ssh_private_key_file: ~/.ssh/id_rsa
The content of the ``inventory.yml`` file serves as a template and contains variables for roles that are applied during the installation and configuration of a receptor node in a mesh topology. You may modify some of the other fields, or replace the file in its entirety for advanced scenarios. Refer to `Role Variables <https://github.com/ansible/receptor-collection/blob/main/README.md>`_ for more information on each variable.
7. Save the file to continue.
8. Run the following command on the machine you want to update your mesh:
::
ansible-playbook -i inventory.yml install_receptor.yml
Wait a few minutes for the periodic AWX task to do a health check against the new instance. You may run a health check by selecting the node and clicking the **Run health check** button from its Details page at any time. Once the instances endpoint or page reports a "Ready" status for the instance, jobs are now ready to run on this machine!
9. To view other instances within the same topology or associate peers, click the **Peers** tab.
.. image:: ../common/images/instances_peers_tab.png
:alt: "Peers" tab showing two peers.
To associate peers with your node, click the **Associate** button to open a dialog box of instances eligible for peering.
.. image:: ../common/images/instances_associate_peer.png
:alt: Instances available to peer with the example hop node.
Execution nodes can peer with either hop nodes or other execution nodes. Hop nodes can only peer with execution nodes unless you check the **Peers from control nodes** check box from the **Options** field.
.. note::
If you associate or disassociate a peer, a notification will inform you to re-run the install bundle from the Peer Detail view (the :ref:`ag_topology_viewer` has the download link).
.. image:: ../common/images/instances_associate_peer_reinstallmsg.png
:alt: Notification to re-run the installation bundle due to change in the peering.
You can remove an instance by clicking **Remove** in the Instances page, or by setting the instance ``node_state = deprovisioning`` via the API. Upon deleting, a pop-up message will appear to notify that you may need to re-run the install bundle to make sure things that were removed are no longer connected.
10. To view a graphical representation of your updated topology, refer to the :ref:`ag_topology_viewer` section of this guide.
Using a custom Receptor CA
---------------------------
Refer to the AWX Operator Documentation, `Custom Receptor CA <https://ansible.readthedocs.io/projects/awx-operator/en/latest/user-guide/advanced-configuration/custom-receptor-certs.html>`_ for detail.
The control nodes on the K8S cluster will communicate with execution nodes via mutual TLS TCP connections, running via Receptor. Execution nodes will verify incoming connections by ensuring the x509 certificate was issued by a trusted Certificate Authority (CA).
You may choose to provide your own CA for this validation. If no CA is provided, AWX operator will automatically generate one using OpenSSL.
Given custom ``ca.crt`` and ``ca.key`` stored locally, run the following:
::
kubectl create secret tls awx-demo-receptor-ca \
--cert=/path/to/ca.crt --key=/path/to/ca.key
The secret should be named ``{AWX Custom Resource name}-receptor-ca``. In the above, the AWX Custom Resource name is "awx-demo". Replace "awx-demo" with your AWX Custom Resource name.
If this secret is created after AWX is deployed, run the following to restart the deployment:
::
kubectl rollout restart deployment awx-demo
.. note::
Changing the receptor CA will sever connections to any existing execution nodes. These nodes will enter an *Unavailable* state, and jobs will not be able to run on them. You will need to download and re-run the install bundle for each execution node. This will replace the TLS certificate files with those signed by the new CA. The execution nodes will then appear in a *Ready* state after a few minutes.
Using a private image for the default EE

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 134 KiB

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 106 KiB

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 117 KiB

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 142 KiB

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 88 KiB

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 114 KiB

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 208 KiB

After

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 120 KiB

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 238 KiB

After

Width:  |  Height:  |  Size: 82 KiB

View File

@@ -90,6 +90,19 @@ Glossary
Node
A node corresponds to entries in the instance database model, or the ``/api/v2/instances/`` endpoint, and is a machine participating in the cluster / mesh. The unified jobs API reports ``awx_node`` and ``execution_node`` fields. The execution node is where the job runs, and AWX node interfaces between the job and server functions.
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Node Type | Description |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Control | Nodes that run persistent Ansible Automation Platform services, and delegate jobs to hybrid and execution nodes |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Hybrid | Nodes that run persistent Ansible Automation Platform services and execute jobs |
| | (not applicable to operator-based installations) |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Hop | Used for relaying across the mesh only |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Execution | Nodes that run jobs delivered from control nodes (jobs submitted from the users Ansible automation) |
+-----------+-----------------------------------------------------------------------------------------------------------------+
Notification Template
An instance of a notification type (Email, Slack, Webhook, etc.) with a name, description, and a defined configuration.

View File

@@ -9,7 +9,7 @@ botocore
channels
channels-redis==3.4.1 # see UPGRADE BLOCKERs
cryptography>=41.0.2 # CVE-2023-38325
Cython<3 # this is needed as a build dependency, one day we may have separated build deps
Cython<3 # Since the bump to PyYAML 5.4.1 this is now a mandatory dep
daphne
distro
django==4.2.6 # CVE-2023-43665

View File

@@ -10,6 +10,7 @@ black
pytest!=7.0.0
pytest-cov
pytest-django
pytest-pythonpath
pytest-mock==1.11.1
pytest-timeout
pytest-xdist==1.34.0 # 2.0.0 broke zuul for some reason

View File

@@ -4,9 +4,7 @@ set -ue
requirements_in="$(readlink -f ./requirements.in)"
requirements="$(readlink -f ./requirements.txt)"
requirements_git="$(readlink -f ./requirements_git.txt)"
requirements_dev="$(readlink -f ./requirements_dev.txt)"
pip_compile="pip-compile --no-header --quiet -r --allow-unsafe"
sanitize_git="1"
_cleanup() {
cd /
@@ -23,22 +21,18 @@ generate_requirements() {
# FIXME: https://github.com/jazzband/pip-tools/issues/1558
${venv}/bin/python3 -m pip install -U 'pip<22.0' pip-tools
${pip_compile} $1 --output-file requirements.txt
${pip_compile} "${requirements_in}" "${requirements_git}" --output-file requirements.txt
# consider the git requirements for purposes of resolving deps
# Then remove any git+ lines from requirements.txt
if [[ "$sanitize_git" == "1" ]] ; then
while IFS= read -r line; do
if [[ $line != \#* ]]; then # ignore comments
sed -i "\!${line%#*}!d" requirements.txt
fi
done < "${requirements_git}"
fi;
while IFS= read -r line; do
if [[ $line != \#* ]]; then # ignore comments
sed -i "\!${line%#*}!d" requirements.txt
fi
done < "${requirements_git}"
}
main() {
base_dir=$(pwd)
dest_requirements="${requirements}"
input_requirements="${requirements_in} ${requirements_git}"
_tmp=$(python -c "import tempfile; print(tempfile.mkdtemp(suffix='.awx-requirements', dir='/tmp'))")
@@ -48,12 +42,6 @@ main() {
"run")
NEEDS_HELP=0
;;
"dev")
dest_requirements="${requirements_dev}"
input_requirements="${requirements_dev}"
sanitize_git=0
NEEDS_HELP=0
;;
"upgrade")
NEEDS_HELP=0
pip_compile="${pip_compile} --upgrade"
@@ -73,13 +61,12 @@ main() {
echo "This script generates requirements.txt from requirements.in and requirements_git.in"
echo "It should be run from within the awx container"
echo ""
echo "Usage: $0 [run|upgrade|dev]"
echo "Usage: $0 [run|upgrade]"
echo ""
echo "Commands:"
echo "help Print this message"
echo "run Run the process only upgrading pinned libraries from requirements.in"
echo "upgrade Upgrade all libraries to latest while respecting pinnings"
echo "dev Pin the development requirements file"
echo ""
exit
fi
@@ -98,10 +85,10 @@ main() {
cp -vf requirements.txt "${_tmp}"
cd "${_tmp}"
generate_requirements "${input_requirements}"
generate_requirements
echo "Changing $base_dir to /awx_devel/requirements"
cat requirements.txt | sed "s:$base_dir:/awx_devel/requirements:" > "${dest_requirements}"
cat requirements.txt | sed "s:$base_dir:/awx_devel/requirements:" > "${requirements}"
_cleanup
}

View File

@@ -22,7 +22,6 @@ RUN rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
RUN dnf -y update && dnf install -y 'dnf-command(config-manager)' && \
dnf config-manager --set-enabled crb && \
dnf -y install \
iputils \
gcc \
gcc-c++ \
git-core \

View File

@@ -67,7 +67,7 @@
- name: Get OS info for sdb
shell: |
docker info 2> /dev/null | awk '/Os:/ { gsub(/Os:/, "Operating System:"); }/Operating System/ { print; }'
docker info | grep 'Operating System'
register: os_info
changed_when: false

View File

@@ -10,7 +10,7 @@ location {{ (ingress_path + '/favicon.ico').replace('//', '/') }} {
alias /awx_devel/awx/public/static/favicon.ico;
}
location ~ ^({{ (ingress_path + '/websocket/').replace('//', '/') }}|{{ (ingress_path + '/api/websocket/').replace('//', '/') }}) {
location {{ (ingress_path + '/websocket').replace('//', '/') }} {
# Pass request to the upstream alias
proxy_pass http://daphne;
# Require http version 1.1 to allow for upgrade requests

View File

@@ -4,7 +4,7 @@
- block:
- name: Start the vault
community.docker.docker_compose_v2:
community.docker.docker_compose:
state: present
services: vault
project_src: "{{ sources_dest }}"
@@ -216,7 +216,7 @@
always:
- name: Stop the vault
community.docker.docker_compose_v2:
community.docker.docker_compose:
state: absent
project_src: "{{ sources_dest }}"
when: vault_start is defined and vault_start.changed

View File

@@ -16,7 +16,3 @@
- "{{ Unseal_Key_1 }}"
- "{{ Unseal_Key_2 }}"
- "{{ Unseal_Key_3 }}"
register: unseal_result
until: unseal_result is succeeded or unseal_result is failed and 'Connection refused' not in unseal_result.msg
retries: 5
delay: 1

View File

@@ -12,7 +12,7 @@ SOSREPORT_CONTROLLER_COMMANDS = [
"awx-manage run_dispatcher --status", # controller dispatch worker status
"awx-manage run_callback_receiver --status", # controller callback worker status
"awx-manage check_license --data", # controller license status
"awx-manage run_wsrelay --status", # controller websocket relay status
"awx-manage run_wsbroadcast --status", # controller broadcast websocket status
"supervisorctl status", # controller process status
"/var/lib/awx/venv/awx/bin/pip freeze", # pip package list
"/var/lib/awx/venv/awx/bin/pip freeze -l", # pip package list without globally-installed packages