Fix traceback on timeout with slicing + facts (#13139)

Slicing a QS with a step parameter forces the QS and returns a list.

Fixes #13131

Signed-off-by: Rick Elrod <rick@elrod.me>
This commit is contained in:
Rick Elrod 2022-11-01 09:11:20 -05:00 committed by GitHub
parent c5cf39abb7
commit 1bedf32baf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 26 additions and 4 deletions

View File

@ -247,6 +247,19 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
return (number, step)
def get_sliced_hosts(self, host_queryset, slice_number, slice_count):
"""
Returns a slice of Hosts given a slice number and total slice count, or
the original queryset if slicing is not requested.
NOTE: If slicing is performed, this will return a List[Host] with the
resulting slice. If slicing is not performed it will return the
original queryset (not evaluating it or forcing it to a list). This
puts the burden on the caller to check the resulting type. This is
non-ideal because it's easy to get wrong, but I think the only way
around it is to force the queryset which has memory implications for
large inventories.
"""
if slice_count > 1 and slice_number > 0:
offset = slice_number - 1
host_queryset = host_queryset[offset::slice_count]

View File

@ -15,6 +15,7 @@ from urllib.parse import urljoin
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.query import QuerySet
# from django.core.cache import cache
from django.utils.encoding import smart_str
@ -844,22 +845,30 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
def get_notification_friendly_name(self):
return "Job"
def _get_inventory_hosts(self, only=['name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id']):
def _get_inventory_hosts(self, only=('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id'), **filters):
"""Return value is an iterable for the relevant hosts for this job"""
if not self.inventory:
return []
host_queryset = self.inventory.hosts.only(*only)
return self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
if filters:
host_queryset = host_queryset.filter(**filters)
host_queryset = self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
if isinstance(host_queryset, QuerySet):
return host_queryset.iterator()
return host_queryset
def start_job_fact_cache(self, destination, modification_times, timeout=None):
self.log_lifecycle("start_job_fact_cache")
os.makedirs(destination, mode=0o700)
hosts = self._get_inventory_hosts()
if timeout is None:
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
if timeout > 0:
# exclude hosts with fact data older than `settings.ANSIBLE_FACT_CACHE_TIMEOUT seconds`
timeout = now() - datetime.timedelta(seconds=timeout)
hosts = hosts.filter(ansible_facts_modified__gte=timeout)
hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout)
else:
hosts = self._get_inventory_hosts()
for host in hosts:
filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination):