set max dispatch workers to same as max forks

Right now, without this, we end up with a different number for max_workers than max_forks. For example, on a control node with 16 Gi of RAM,
  max_mem_capacity  w/ 100 MB/fork = (16*1024)/100 --> 164
  max_workers = 5 * 16 --> 80

This means we would allow that control node to control up to 164 jobs, but all jobs after the 80th job will be stuck in `waiting` waiting for a dispatch worker to free up to run the job.
This commit is contained in:
Elijah DeLee 2022-02-23 11:06:10 -05:00
parent 48fa947692
commit 4bd6c2a804

View File

@ -22,7 +22,7 @@ import psutil
from awx.main.models import UnifiedJob
from awx.main.dispatch import reaper
from awx.main.utils.common import convert_mem_str_to_bytes
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
if 'run_callback_receiver' in sys.argv:
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
@ -324,8 +324,9 @@ class AutoscalePool(WorkerPool):
total_memory_gb = convert_mem_str_to_bytes(settings_absmem) // 2**30
else:
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
# 5 workers per GB of total memory
self.max_workers = total_memory_gb * 5
# Get same number as max forks based on memory, this function takes memory as bytes
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
# max workers can't be less than min_workers
self.max_workers = max(self.min_workers, self.max_workers)