Add support for directly managing instance groups

* Associating/Disassociating an instance with a group
* Triggering a topology rebuild on that change
* Force rabbitmq cleanup of offline nodes
* Automatically check for dependent service startup
* Fetch and set hostname for celery so it doesn't clobber other
  celeries
* Rely on celery init signals to dyanmically set listen queues
* Removing old total_capacity instance manager property
This commit is contained in:
Matthew Jones 2017-10-06 10:47:02 -04:00
parent 6ede1dfbea
commit 624289bed7
No known key found for this signature in database
GPG Key ID: 76A4C17A97590C1C
7 changed files with 70 additions and 19 deletions

View File

@ -57,7 +57,7 @@ import pytz
from wsgiref.util import FileWrapper
# AWX
from awx.main.tasks import send_notifications
from awx.main.tasks import send_notifications, handle_ha_toplogy_changes
from awx.main.access import get_user_queryset
from awx.main.ha import is_ha_environment
from awx.api.authentication import TokenGetAuthentication
@ -148,6 +148,29 @@ class UnifiedJobDeletionMixin(object):
return Response(status=status.HTTP_204_NO_CONTENT)
class InstanceGroupMembershipMixin(object):
'''
Manages signaling celery to reload its queue configuration on Instance Group membership changes
'''
def attach(self, request, *args, **kwargs):
response = super(InstanceGroupMembershipMixin, self).attach(request, *args, **kwargs)
if status.is_success(response.status_code):
handle_ha_toplogy_changes.apply_async()
return response
def unattach(self, request, *args, **kwargs):
response = super(InstanceGroupMembershipMixin, self).unattach(request, *args, **kwargs)
if status.is_success(response.status_code):
handle_ha_toplogy_changes.apply_async()
return response
def destroy(self, request, *args, **kwargs):
response = super(InstanceGroupMembershipMixin, self).destroy(request, *args, **kwargs)
if status.is_success(response.status_code):
handle_ha_toplogy_changes.apply_async()
return response
class ApiRootView(APIView):
authentication_classes = []
@ -548,7 +571,7 @@ class InstanceUnifiedJobsList(SubListAPIView):
return qs
class InstanceInstanceGroupsList(SubListAPIView):
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
view_name = _("Instance's Instance Groups")
model = InstanceGroup
@ -558,7 +581,7 @@ class InstanceInstanceGroupsList(SubListAPIView):
relationship = 'rampart_groups'
class InstanceGroupList(ListAPIView):
class InstanceGroupList(ListCreateAPIView):
view_name = _("Instance Groups")
model = InstanceGroup
@ -566,7 +589,7 @@ class InstanceGroupList(ListAPIView):
new_in_320 = True
class InstanceGroupDetail(RetrieveAPIView):
class InstanceGroupDetail(InstanceGroupMembershipMixin, RetrieveDestroyAPIView):
view_name = _("Instance Group Detail")
model = InstanceGroup
@ -584,7 +607,7 @@ class InstanceGroupUnifiedJobsList(SubListAPIView):
new_in_320 = True
class InstanceGroupInstanceList(SubListAPIView):
class InstanceGroupInstanceList(InstanceGroupMembershipMixin, SubListAttachDetachAPIView):
view_name = _("Instance Group's Instances")
model = Instance

View File

@ -424,6 +424,18 @@ class InstanceAccess(BaseAccess):
return Instance.objects.filter(
rampart_groups__in=self.user.get_queryset(InstanceGroup)).distinct()
def can_attach(self, obj, sub_obj, relationship, data,
skip_sub_obj_read_check=False):
if relationship == 'rampart_groups' and isinstance(sub_obj, InstanceGroup):
return self.user.is_superuser
return super(InstanceAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
def can_unattach(self, obj, sub_obj, relationship, data=None):
if relationship == 'rampart_groups' and isinstance(sub_obj, InstanceGroup):
return self.user.is_superuser
return super(InstanceAccess, self).can_unattach(obj, sub_obj, relationship, *args, **kwargs)
def can_add(self, data):
return False
@ -444,13 +456,13 @@ class InstanceGroupAccess(BaseAccess):
organization__in=Organization.accessible_pk_qs(self.user, 'admin_role'))
def can_add(self, data):
return False
return self.user.is_superuser
def can_change(self, obj, data):
return False
return self.user.is_superuser
def can_delete(self, obj):
return False
return self.user.is_superuser
class UserAccess(BaseAccess):

View File

@ -93,11 +93,6 @@ class InstanceManager(models.Manager):
"""Return count of active Tower nodes for licensing."""
return self.all().count()
def total_capacity(self):
sumval = self.filter(modified__gte=now() - timedelta(seconds=settings.AWX_ACTIVE_NODE_TIME)) \
.aggregate(total_capacity=Sum('capacity'))['total_capacity']
return max(50, sumval)
def my_role(self):
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
return "tower"

View File

@ -1,7 +1,4 @@
from awx.main.models import (
Job,
Instance
)
from awx.main.models import Job, Instance
from django.test.utils import override_settings
import pytest

View File

@ -4,7 +4,12 @@ if [ `id -u` -ge 500 ]; then
cat /tmp/passwd > /etc/passwd
rm /tmp/passwd
fi
ANSIBLE_REMOTE_TEMP=/tmp ANSIBLE_LOCAL_TEMP=/tmp ansible -i "127.0.0.1," -c local -v -m wait_for -a "host=$DATABASE_HOST port=$DATABASE_PORT" all
ANSIBLE_REMOTE_TEMP=/tmp ANSIBLE_LOCAL_TEMP=/tmp ansible -i "127.0.0.1," -c local -v -m wait_for -a "host=localhost port=11211" all
ANSIBLE_REMOTE_TEMP=/tmp ANSIBLE_LOCAL_TEMP=/tmp ansible -i "127.0.0.1," -c local -v -m wait_for -a "host=localhost port=5672" all
ANSIBLE_REMOTE_TEMP=/tmp ANSIBLE_LOCAL_TEMP=/tmp ansible -i "127.0.0.1," -c local -v -m postgresql_db -U $DATABASE_USER -a "name=$DATABASE_NAME owner=$DATABASE_USER login_user=$DATABASE_USER login_host=$DATABASE_HOST login_password=$DATABASE_PASSWORD port=$DATABASE_PORT" all
awx-manage migrate --noinput --fake-initial
if [ ! -z "$AWX_ADMIN_USER" ]&&[ ! -z "$AWX_ADMIN_PASSWORD" ]; then
echo "from django.contrib.auth.models import User; User.objects.create_superuser('$AWX_ADMIN_USER', 'root@localhost', '$AWX_ADMIN_PASSWORD')" | awx-manage shell

View File

@ -3,7 +3,7 @@ nodaemon = True
umask = 022
[program:celery]
command = /var/lib/awx/venv/awx/bin/celery worker -A awx -l debug --autoscale=4 -Ofair -Q tower_broadcast_all -n celery@localhost
command = /var/lib/awx/venv/awx/bin/celery worker -A awx -l debug --autoscale=4 -Ofair -Q tower_broadcast_all -n celery@%(ENV_HOSTNAME)s
directory = /var/lib/awx
environment = LANGUAGE="en_US.UTF-8",LANG="en_US.UTF-8",LC_ALL="en_US.UTF-8",LC_CTYPE="en_US.UTF-8"
#user = {{ aw_user }}

View File

@ -66,7 +66,9 @@ spec:
- name: AUTOCLUSTER_CLEANUP
value: "true"
- name: CLEANUP_WARN_ONLY
value: "true"
value: "false"
- name: CLEANUP_INTERVAL
value: "30"
- name: RABBITMQ_DEFAULT_USER
value: awx
- name: RABBITMQ_DEFAULT_PASS
@ -102,6 +104,23 @@ spec:
selector:
name: awx-web-deploy
---
---
apiVersion: v1
kind: Service
metadata:
name: awx-rmq-mgmt
namespace: {{ awx_openshift_project }}
labels:
name: awx-rmq-mgmt
spec:
type: ClusterIP
ports:
- name: rmqmgmt
port: 15672
targetPort: 15672
selector:
name: awx-web-deploy
---
apiVersion: v1
kind: Route
metadata: