Refactor of accessible_objects for polymorphic model

query simplification, with break-out into an intermediary
state that can be used in access methods
This commit is contained in:
AlanCoding
2016-12-20 09:55:08 -05:00
parent 8650b69c9e
commit b2d0871a5e
5 changed files with 110 additions and 68 deletions

View File

@@ -1,15 +1,15 @@
resource medium
organizations 500
users 5000
teams 500
projects 1000
job-templates 2000
credentials 2000
inventories 2000
inventory-groups 500
inventory-hosts 2500
wfjts 100
nodes 1000
labels 1000
jobs 1000
job-events 1000
resource medium jan2017
organizations 500 1
users 5000 3
teams 500 2
projects 1000 30
job-templates 2000 127
credentials 2000 50
inventories 2000 6
inventory-groups 500 15
inventory-hosts 2500 15
wfjts 100 0
nodes 1000 0
labels 1000 0
jobs 1000 157208
job-events 1000 3370942
1 resource medium jan2017
2 organizations 500 1
3 users 5000 3
4 teams 500 2
5 projects 1000 30
6 job-templates 2000 127
7 credentials 2000 50
8 inventories 2000 6
9 inventory-groups 500 15
10 inventory-hosts 2500 15
11 wfjts 100 0
12 nodes 1000 0
13 labels 1000 0
14 jobs 1000 157208
15 job-events 1000 3370942

View File

@@ -7,6 +7,7 @@ import sys
# Python
from collections import defaultdict
from optparse import make_option, OptionParser
import logging
# Django
@@ -84,6 +85,7 @@ options = vars(options)
if options['preset']:
print ' Using preset data numbers set ' + str(options['preset'])
# Read the numbers of resources from presets file, if provided
presets_filename = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'presets.tsv'))
@@ -603,22 +605,28 @@ try:
wfjt.labels.add(next(label_gen))
wfjt_idx += 1
# Disable logging here, because it will mess up output format
logger = logging.getLogger('awx.main')
logger.propagate = False
print('# Creating %d jobs' % n_jobs)
group_idx = 0
job_template_idx = 0
job_i = 0
for n in spread(n_jobs, n_job_templates):
job_template = job_templates[job_template_idx]
for i in range(n):
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, job_template.name, i+ 1))
sys.stdout.flush()
job_stat = 'successful'
if len(jobs) % 4 == 0:
job_stat = 'failed'
elif len(jobs) % 11 == 0:
job_stat = 'canceled'
else:
job_stat = 'successful'
job, _ = Job.objects.get_or_create(
job_template=job_template,
status=job_stat, name=job_template.name,
status=job_stat, name="%s-%d" % (job_template.name, job_i),
project=job_template.project, inventory=job_template.inventory,
credential=job_template.credential,
cloud_credential=job_template.cloud_credential,
@@ -626,25 +634,29 @@ try:
)
job._is_new = _
jobs.append(job)
job_i += 1
if not job._is_new:
job_template_idx += 1
group_idx += 1
continue
if i == n:
if i+1 == n:
job_template.last_job = job
if job_template.pk % 5 == 0:
job_template.current_job = job
job_template.save()
with transaction.atomic():
if job_template.inventory:
inv_groups = [g for g in job_template.inventory.groups.all()]
if len(inv_groups):
JobHostSummary.objects.bulk_create([
JobHostSummary(
job=job, host=h, host_name=h.name, processed=1,
created=now(), modified=now()
)
for h in inv_groups[group_idx % len(inv_groups)].hosts.all()[:100]
])
if job._is_new:
with transaction.atomic():
if job_template.inventory:
inv_groups = [g for g in job_template.inventory.groups.all()]
if len(inv_groups):
JobHostSummary.objects.bulk_create([
JobHostSummary(
job=job, host=h, host_name=h.name, processed=1,
created=now(), modified=now()
)
for h in inv_groups[group_idx % len(inv_groups)].hosts.all()[:100]
])
group_idx += 1
job_template_idx += 1
if n: