Chop out the dev environment isolated node

This commit is contained in:
Jeff Bradberry
2021-04-13 13:58:35 -04:00
parent c1a009d128
commit efabc05270
5 changed files with 3 additions and 172 deletions

View File

@@ -174,12 +174,7 @@ init:
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(MANAGEMENT_COMMAND) provision_instance --hostname=$(COMPOSE_HOST); \
$(MANAGEMENT_COMMAND) register_queue --queuename=tower --instance_percent=100;\
if [ "$(AWX_GROUP_QUEUES)" == "tower,thepentagon" ]; then \
$(MANAGEMENT_COMMAND) provision_instance --hostname=isolated; \
$(MANAGEMENT_COMMAND) register_queue --queuename='thepentagon' --hostnames=isolated --controller=tower; \
$(MANAGEMENT_COMMAND) generate_isolated_key > /awx_devel/awx/main/isolated/authorized_keys; \
fi;
$(MANAGEMENT_COMMAND) register_queue --queuename=tower --instance_percent=100;
# Refresh development environment after pulling new code.
refresh: clean requirements_dev version_file develop migrate

View File

@@ -2,10 +2,8 @@ import pytest
from unittest import mock
import os
from django.utils.timezone import now, timedelta
from awx.main.tasks import RunProjectUpdate, RunInventoryUpdate, awx_isolated_heartbeat, isolated_manager
from awx.main.models import ProjectUpdate, InventoryUpdate, InventorySource, Instance, InstanceGroup
from awx.main.tasks import RunProjectUpdate, RunInventoryUpdate
from awx.main.models import ProjectUpdate, InventoryUpdate, InventorySource
@pytest.fixture
@@ -70,61 +68,3 @@ class TestDependentInventoryUpdate:
# Verify that it bails after 1st update, detecting a cancel
assert is2.inventory_updates.count() == 0
iu_run_mock.assert_called_once()
class MockSettings:
AWX_ISOLATED_PERIODIC_CHECK = 60
CLUSTER_HOST_ID = 'tower_1'
@pytest.mark.django_db
class TestIsolatedManagementTask:
@pytest.fixture
def control_group(self):
return InstanceGroup.objects.create(name='alpha')
@pytest.fixture
def control_instance(self, control_group):
return control_group.instances.create(hostname='tower_1')
@pytest.fixture
def needs_updating(self, control_group):
ig = InstanceGroup.objects.create(name='thepentagon', controller=control_group)
inst = ig.instances.create(hostname='isolated', capacity=103)
inst.last_isolated_check = now() - timedelta(seconds=MockSettings.AWX_ISOLATED_PERIODIC_CHECK)
inst.save()
return ig
@pytest.fixture
def just_updated(self, control_group):
ig = InstanceGroup.objects.create(name='thepentagon', controller=control_group)
inst = ig.instances.create(hostname='isolated', capacity=103)
inst.last_isolated_check = now()
inst.save()
return inst
@pytest.fixture
def old_version(self, control_group):
ig = InstanceGroup.objects.create(name='thepentagon', controller=control_group)
inst = ig.instances.create(hostname='isolated-old', capacity=103)
inst.save()
return inst
def test_takes_action(self, control_instance, needs_updating):
original_isolated_instance = needs_updating.instances.all().first()
with mock.patch('awx.main.tasks.settings', MockSettings()):
with mock.patch.object(isolated_manager.IsolatedManager, 'health_check') as check_mock:
awx_isolated_heartbeat()
iso_instance = Instance.objects.get(hostname='isolated')
call_args, _ = check_mock.call_args
assert call_args[0][0] == iso_instance
assert iso_instance.last_isolated_check > original_isolated_instance.last_isolated_check
assert iso_instance.modified == original_isolated_instance.modified
def test_does_not_take_action(self, control_instance, just_updated):
with mock.patch('awx.main.tasks.settings', MockSettings()):
with mock.patch.object(isolated_manager.IsolatedManager, 'health_check') as check_mock:
awx_isolated_heartbeat()
iso_instance = Instance.objects.get(hostname='isolated')
check_mock.assert_not_called()
assert iso_instance.capacity == 103

View File

@@ -1,17 +0,0 @@
---
version: '2'
services:
# Primary Tower Development Container link
awx:
environment:
AWX_GROUP_QUEUES: tower,thepentagon
links:
- isolated
# Isolated Rampart Container
isolated:
image: ${DEV_DOCKER_TAG_BASE}/awx_isolated:${TAG}
container_name: tools_isolated_1
hostname: isolated
volumes:
- "../awx/main/isolated:/awx_devel"
privileged: true

View File

@@ -1,20 +0,0 @@
ARG TAG=latest
FROM ansible/awx_devel:${TAG}
RUN dnf install -y gcc python36-devel openssh-server
RUN python3 -m ensurepip && pip3 install "virtualenv < 20" ansible-runner
RUN dnf remove -y gcc python36-devel && rm -rf /var/cache/dnf
RUN rm -f /etc/ssh/ssh_host_ecdsa_key /etc/ssh/ssh_host_rsa_key
RUN ssh-keygen -q -N "" -t dsa -f /etc/ssh/ssh_host_ecdsa_key
RUN ssh-keygen -q -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key
RUN sed -i "s/#UsePrivilegeSeparation.*/UsePrivilegeSeparation no/g" /etc/ssh/sshd_config
RUN sed -i "s/UsePAM.*/UsePAM yes/g" /etc/ssh/sshd_config
RUN sed -i "s/#StrictModes.*/StrictModes no/g" /etc/ssh/sshd_config
RUN mkdir -p /root/.ssh
RUN ln -s /awx_devel/authorized_keys /root/.ssh/authorized_keys
ENTRYPOINT ["tini", "--"]
CMD ["/usr/sbin/sshd", "-D"]
EXPOSE 22

View File

@@ -1,67 +0,0 @@
## Instructions on using an isolated node
The building of the isolated node is done in the `make docker-compose-build`
target. Its image uses a different tag from the tools_awx container.
Given that the images are built, you can run the combined docker compose target. This uses
the base `docker-compose.yml` with modifications found in `docker-isolated-override.yml`.
You will still need to give COMPOSE_TAG with whatever your intended
base branch is. For example:
```bash
make docker-isolated COMPOSE_TAG=devel
```
This will automatically exchange the keys in order for the `tools_awx_1`
container to access the `tools_isolated_1` container over ssh.
After that, it will bring up all the containers like the normal docker-compose
workflow.
### Running a job on the Isolated Node
Create a job template that runs normally. Add the id of the instance
group named `thepentagon` to the JT's instance groups. To do this, POST
the id (probably id=2) to `/api/v2/job_templates/N/instance_groups/`.
After that, run the job template.
The models are automatically created when running the Makefile target,
and they are structured as follows:
+-------+ +-------------+
| tower |<----+ thepentagon |
+-------+ +-------------+
^ ^
| |
| |
+---+---+ +-----+----+
| tower | | isolated |
+-------+ +----------+
The `controller` for the group "thepentagon" and all hosts therein is
determined by a ForeignKey within the instance group.
### Run a playbook
In order to run an isolated job, associate the instance group `thepentagon` with
a job template, inventory, or organization, then run a job that derives from
that resource. You should be able to confirm success by inspecting the
`instance_group` of the job.
#### Advanced Manual Testing
If you want to run a job manually inside of the isolated container with this
tooling, you need a private data directory. Normal isolated job runs will
clean up their private data directory, but you can temporarily disable this
by disabling some parts of the cleanup_isolated.yml playbook.
Example location of a private data directory:
`/tmp/awx_29_OM6Mnx/`
The following command would run the playbook corresponding to that job.
```bash
ansible-runner start /tmp/awx_29_OM6Mnx/ -p some_playbook.yml
```
Other ansible-runner commands include `start`, `is-alive`, and `stop`.