properly handle host summary bulk updates if hosts go missing

This commit is contained in:
Ryan Petrello 2020-05-12 09:38:59 -04:00
parent e7347d15c1
commit 3cb2475307
No known key found for this signature in database
GPG Key ID: F2AA5F2122351777
2 changed files with 51 additions and 11 deletions

View File

@ -489,25 +489,23 @@ class JobEvent(BasePlaybookEvent):
return
job = self.job
from awx.main.models.jobs import JobHostSummary # circular import
from awx.main.models import Host, JobHostSummary # circular import
existing = Host.objects.filter(id__in=self.host_map.values()).values_list('id', flat=True)
summaries = dict()
for host in hostnames:
host_id = self.host_map.get(host, None)
if host_id not in existing:
host_id = None
host_stats = {}
for stat in ('changed', 'dark', 'failures', 'ignored', 'ok', 'processed', 'rescued', 'skipped'):
try:
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
except AttributeError: # in case event_data[stat] isn't a dict.
pass
host_id = self.host_map.get(host, None)
summaries.setdefault(
(host_id, host),
JobHostSummary(created=now(), modified=now(), job_id=job.id, host_id=host_id, host_name=host)
summaries[(host_id, host)] = JobHostSummary(
created=now(), modified=now(), job_id=job.id, host_id=host_id, host_name=host, **host_stats
)
host_summary = summaries[(host_id, host)]
for stat, value in host_stats.items():
if getattr(host_summary, stat) != value:
setattr(host_summary, stat, value)
JobHostSummary.objects.bulk_create(summaries.values())

View File

@ -67,7 +67,7 @@ def test_parent_failed(emit, event):
@pytest.mark.django_db
def test_host_summary_generation():
hostnames = [f'Host {i}' for i in range(5000)]
hostnames = [f'Host {i}' for i in range(500)]
inv = Inventory()
inv.save()
Host.objects.bulk_create([
@ -107,3 +107,45 @@ def test_host_summary_generation():
assert s.processed == 0
assert s.rescued == 0
assert s.skipped == 0
@pytest.mark.django_db
def test_host_summary_generation_with_deleted_hosts():
hostnames = [f'Host {i}' for i in range(10)]
inv = Inventory()
inv.save()
Host.objects.bulk_create([
Host(created=now(), modified=now(), name=h, inventory_id=inv.id)
for h in hostnames
])
j = Job(inventory=inv)
j.save()
host_map = dict((host.name, host.id) for host in inv.hosts.all())
# delete half of the hosts during the playbook run
for h in inv.hosts.all()[:5]:
h.delete()
JobEvent.create_from_data(
job_id=j.pk,
parent_uuid='abc123',
event='playbook_on_stats',
event_data={
'ok': dict((hostname, len(hostname)) for hostname in hostnames),
'changed': {},
'dark': {},
'failures': {},
'ignored': {},
'processed': {},
'rescued': {},
'skipped': {},
},
host_map=host_map
).save()
ids = sorted([s.host_id or -1 for s in j.job_host_summaries.order_by('id').all()])
names = sorted([s.host_name for s in j.job_host_summaries.all()])
assert ids == [-1, -1, -1, -1, -1, 6, 7, 8, 9, 10]
assert names == ['Host 0', 'Host 1', 'Host 2', 'Host 3', 'Host 4', 'Host 5',
'Host 6', 'Host 7', 'Host 8', 'Host 9']