mirror of
https://github.com/ansible/awx.git
synced 2026-01-10 15:32:07 -03:30
make InstanceGroup.policy_instance_list non-exclusive by default
see: https://github.com/ansible/tower/issues/2583
This commit is contained in:
parent
fec1e8d398
commit
15aaca8f03
@ -4617,7 +4617,7 @@ class InstanceSerializer(BaseSerializer):
|
||||
read_only_fields = ('uuid', 'hostname', 'version')
|
||||
fields = ("id", "type", "url", "related", "uuid", "hostname", "created", "modified", 'capacity_adjustment',
|
||||
"version", "capacity", "consumed_capacity", "percent_capacity_remaining", "jobs_running", "jobs_total",
|
||||
"cpu", "memory", "cpu_capacity", "mem_capacity", "enabled")
|
||||
"cpu", "memory", "cpu_capacity", "mem_capacity", "enabled", "managed_by_policy")
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InstanceSerializer, self).get_related(obj)
|
||||
|
||||
20
awx/main/migrations/0045_v330_instance_managed_by_policy.py
Normal file
20
awx/main/migrations/0045_v330_instance_managed_by_policy.py
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-07-25 17:42
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0044_v330_add_inventory_update_inventory'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='instance',
|
||||
name='managed_by_policy',
|
||||
field=models.BooleanField(default=True),
|
||||
)
|
||||
]
|
||||
@ -64,6 +64,9 @@ class Instance(BaseModel):
|
||||
enabled = models.BooleanField(
|
||||
default=True
|
||||
)
|
||||
managed_by_policy = models.BooleanField(
|
||||
default=True
|
||||
)
|
||||
cpu = models.IntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
|
||||
@ -136,18 +136,16 @@ def inform_cluster_of_shutdown(*args, **kwargs):
|
||||
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
def apply_cluster_membership_policies(self):
|
||||
with advisory_lock('cluster_policy_lock', wait=True):
|
||||
considered_instances = Instance.objects.all().order_by('id')
|
||||
considered_instances = Instance.objects.all_non_isolated().order_by('id')
|
||||
total_instances = considered_instances.count()
|
||||
filtered_instances = []
|
||||
actual_groups = []
|
||||
actual_instances = []
|
||||
Group = namedtuple('Group', ['obj', 'instances'])
|
||||
Node = namedtuple('Instance', ['obj', 'groups'])
|
||||
|
||||
# Process policy instance list first, these will represent manually managed instances
|
||||
# that will not go through automatic policy determination
|
||||
# Process policy instance list first, these will represent manually managed memberships
|
||||
for ig in InstanceGroup.objects.all():
|
||||
logger.info(six.text_type("Applying cluster membership policies to Group {}").format(ig.name))
|
||||
logger.info(six.text_type("Applying cluster policy instance list to Group {}").format(ig.name))
|
||||
ig.instances.clear()
|
||||
group_actual = Group(obj=ig, instances=[])
|
||||
for i in ig.policy_instance_list:
|
||||
@ -158,11 +156,11 @@ def apply_cluster_membership_policies(self):
|
||||
logger.info(six.text_type("Policy List, adding Instance {} to Group {}").format(inst.hostname, ig.name))
|
||||
group_actual.instances.append(inst.id)
|
||||
ig.instances.add(inst)
|
||||
filtered_instances.append(inst)
|
||||
actual_groups.append(group_actual)
|
||||
|
||||
# Process Instance minimum policies next, since it represents a concrete lower bound to the
|
||||
# number of instances to make available to instance groups
|
||||
actual_instances = [Node(obj=i, groups=[]) for i in filter(lambda x: x not in filtered_instances, considered_instances)]
|
||||
actual_instances = [Node(obj=i, groups=[]) for i in considered_instances if i.managed_by_policy]
|
||||
logger.info("Total instances not directly associated: {}".format(total_instances))
|
||||
for g in sorted(actual_groups, cmp=lambda x,y: len(x.instances) - len(y.instances)):
|
||||
for i in sorted(actual_instances, cmp=lambda x,y: len(x.groups) - len(y.groups)):
|
||||
@ -172,7 +170,8 @@ def apply_cluster_membership_policies(self):
|
||||
g.obj.instances.add(i.obj)
|
||||
g.instances.append(i.obj.id)
|
||||
i.groups.append(g.obj.id)
|
||||
# Finally process instance policy percentages
|
||||
|
||||
# Finally, process instance policy percentages
|
||||
for g in sorted(actual_groups, cmp=lambda x,y: len(x.instances) - len(y.instances)):
|
||||
for i in sorted(actual_instances, cmp=lambda x,y: len(x.groups) - len(y.groups)):
|
||||
if i.obj.id in g.instances:
|
||||
|
||||
@ -175,6 +175,8 @@ def test_failover_group_run(instance_factory, default_instance_group, mocker,
|
||||
@pytest.mark.django_db
|
||||
def test_instance_group_basic_policies(instance_factory, instance_group_factory):
|
||||
i0 = instance_factory("i0")
|
||||
i0.managed_by_policy = False
|
||||
i0.save()
|
||||
i1 = instance_factory("i1")
|
||||
i2 = instance_factory("i2")
|
||||
i3 = instance_factory("i3")
|
||||
|
||||
@ -155,7 +155,7 @@ def test_policy_instance_distribution_simultaneous(mock, instance_factory, insta
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.tasks.handle_ha_toplogy_changes', return_value=None)
|
||||
def test_policy_instance_list_manually_managed(mock, instance_factory, instance_group_factory):
|
||||
def test_policy_instance_list_manually_assigned(mock, instance_factory, instance_group_factory):
|
||||
i1 = instance_factory("i1")
|
||||
i2 = instance_factory("i2")
|
||||
ig_1 = instance_group_factory("ig1", percentage=100, minimum=2)
|
||||
@ -163,6 +163,25 @@ def test_policy_instance_list_manually_managed(mock, instance_factory, instance_
|
||||
ig_2.policy_instance_list = [i2.hostname]
|
||||
ig_2.save()
|
||||
apply_cluster_membership_policies()
|
||||
assert len(ig_1.instances.all()) == 2
|
||||
assert i1 in ig_1.instances.all()
|
||||
assert i2 in ig_1.instances.all()
|
||||
assert len(ig_2.instances.all()) == 1
|
||||
assert i2 in ig_2.instances.all()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.tasks.handle_ha_toplogy_changes', return_value=None)
|
||||
def test_policy_instance_list_explicitly_pinned(mock, instance_factory, instance_group_factory):
|
||||
i1 = instance_factory("i1")
|
||||
i2 = instance_factory("i2")
|
||||
i2.managed_by_policy = False
|
||||
i2.save()
|
||||
ig_1 = instance_group_factory("ig1", percentage=100, minimum=2)
|
||||
ig_2 = instance_group_factory("ig2")
|
||||
ig_2.policy_instance_list = [i2.hostname]
|
||||
ig_2.save()
|
||||
apply_cluster_membership_policies()
|
||||
assert len(ig_1.instances.all()) == 1
|
||||
assert i1 in ig_1.instances.all()
|
||||
assert i2 not in ig_1.instances.all()
|
||||
|
||||
@ -253,9 +253,7 @@ Instance Group Policies are controlled by 3 optional fields on an `Instance Grou
|
||||
is less than the given percentage then new ones will be added until the percentage condition is satisfied.
|
||||
* `policy_instance_minimum`: This policy attempts to keep at least this many `Instances` in the `Instance Group`. If the number of
|
||||
available instances is lower than this minimum then all `Instances` will be placed in this `Instance Group`.
|
||||
* `policy_instance_list`: This is a fixed list of `Instance` names. These `Instances` will *always* be added to this `Instance Group`.
|
||||
Further, by adding Instances to this list you are declaring that you will manually manage those Instances and they will not be eligible under any other
|
||||
policy. This means they will not be automatically added to any other `Instance Group` even if the policy would cause them to be matched.
|
||||
* `policy_instance_list`: This is a fixed list of `Instance` names to always include in this `Instance Group`.
|
||||
|
||||
> NOTES
|
||||
|
||||
@ -269,6 +267,26 @@ Instance Group Policies are controlled by 3 optional fields on an `Instance Grou
|
||||
* Policies don't actively prevent `Instances` from being associated with multiple `Instance Groups` but this can effectively be achieved by making the percentages
|
||||
sum to 100. If you have 4 `Instance Groups` assign each a percentage value of 25 and the `Instances` will be distributed among them with no overlap.
|
||||
|
||||
### Manually Pinning Instances to Specific Groups
|
||||
If you have a special `Instance` which needs to be _exclusively_ assigned to a specific `Instance Group` but don't want it to automatically join _other_ groups via "percentage" or "minimum" policies:
|
||||
|
||||
1. Add the `Instance` to one or more `Instance Group`s' `policy_instance_list`
|
||||
2. Update the `Instance`'s `managed_by_policy` property to be `False`.
|
||||
|
||||
This will prevent the `Instance` from being automatically added to other groups based on percentage and minimum policy; it will **only** belong to the groups you've manually assigned it to:
|
||||
|
||||
```
|
||||
HTTP PATCH /api/v2/instance_groups/N/
|
||||
{
|
||||
"policy_instance_list": ["special-instance"]
|
||||
}
|
||||
|
||||
HTTP PATCH /api/v2/instances/X/
|
||||
{
|
||||
"managed_by_policy": False
|
||||
}
|
||||
```
|
||||
|
||||
### Status and Monitoring
|
||||
|
||||
Tower itself reports as much status as it can via the api at `/api/v2/ping` in order to provide validation of the health
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user