mirror of
https://github.com/ansible/awx.git
synced 2026-01-11 10:00:01 -03:30
Mass rename of shard -> split
This commit is contained in:
parent
475a701f78
commit
46d6dce738
@ -3008,7 +3008,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
|
||||
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',
|
||||
'allow_simultaneous', 'custom_virtualenv', 'job_shard_count')
|
||||
'allow_simultaneous', 'custom_virtualenv', 'job_split_count')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobTemplateSerializer, self).get_related(obj)
|
||||
@ -3025,7 +3025,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
labels = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}),
|
||||
object_roles = self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}),
|
||||
instance_groups = self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}),
|
||||
sharded_jobs = self.reverse('api:job_template_sharded_jobs_list', kwargs={'pk': obj.pk}),
|
||||
split_jobs = self.reverse('api:job_template_split_jobs_list', kwargs={'pk': obj.pk}),
|
||||
))
|
||||
if self.version > 1:
|
||||
res['copy'] = self.reverse('api:job_template_copy', kwargs={'pk': obj.pk})
|
||||
@ -3201,9 +3201,9 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
summary_fields = super(JobSerializer, self).get_summary_fields(obj)
|
||||
if obj.internal_limit:
|
||||
summary_fields['internal_limit'] = {}
|
||||
if obj.internal_limit.startswith('shard'):
|
||||
offset, step = Inventory.parse_shard_params(obj.internal_limit)
|
||||
summary_fields['internal_limit']['shard'] = {'offset': offset, 'step': step}
|
||||
if obj.internal_limit.startswith('split'):
|
||||
offset, step = Inventory.parse_split_params(obj.internal_limit)
|
||||
summary_fields['internal_limit']['split'] = {'offset': offset, 'step': step}
|
||||
else:
|
||||
summary_fields['internal_limit']['unknown'] = self.internal_limit
|
||||
all_creds = []
|
||||
|
||||
@ -26,7 +26,7 @@ string of `?all=1` to return all hosts, including disabled ones.
|
||||
Specify a query string of `?towervars=1` to add variables
|
||||
to the hostvars of each host that specifies its enabled state and database ID.
|
||||
|
||||
Specify a query string of `?subset=shard2of5` to produce an inventory that
|
||||
Specify a query string of `?subset=split2of5` to produce an inventory that
|
||||
has a restricted number of hosts according to the rules of job splitting.
|
||||
|
||||
To apply multiple query strings, join them with the `&` character, like `?hostvars=1&all=1`.
|
||||
|
||||
@ -8,7 +8,7 @@ from awx.api.views import (
|
||||
JobTemplateDetail,
|
||||
JobTemplateLaunch,
|
||||
JobTemplateJobsList,
|
||||
JobTemplateShardedJobsList,
|
||||
JobTemplateSplitJobsList,
|
||||
JobTemplateCallback,
|
||||
JobTemplateSchedulesList,
|
||||
JobTemplateSurveySpec,
|
||||
@ -29,7 +29,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/$', JobTemplateDetail.as_view(), name='job_template_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/launch/$', JobTemplateLaunch.as_view(), name='job_template_launch'),
|
||||
url(r'^(?P<pk>[0-9]+)/jobs/$', JobTemplateJobsList.as_view(), name='job_template_jobs_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/sharded_jobs/$', JobTemplateShardedJobsList.as_view(), name='job_template_sharded_jobs_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/split_jobs/$', JobTemplateSplitJobsList.as_view(), name='job_template_split_jobs_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/callback/$', JobTemplateCallback.as_view(), name='job_template_callback'),
|
||||
url(r'^(?P<pk>[0-9]+)/schedules/$', JobTemplateSchedulesList.as_view(), name='job_template_schedules_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/survey_spec/$', JobTemplateSurveySpec.as_view(), name='job_template_survey_spec'),
|
||||
|
||||
@ -3369,7 +3369,7 @@ class JobTemplateCallback(GenericAPIView):
|
||||
if extra_vars is not None and job_template.ask_variables_on_launch:
|
||||
extra_vars_redacted, removed = extract_ansible_vars(extra_vars)
|
||||
kv['extra_vars'] = extra_vars_redacted
|
||||
kv['_prevent_sharding'] = True # will only run against 1 host, so no point
|
||||
kv['_prevent_splitting'] = True # will only run against 1 host, so no point
|
||||
with transaction.atomic():
|
||||
job = job_template.create_job(**kv)
|
||||
|
||||
@ -3401,12 +3401,12 @@ class JobTemplateJobsList(SubListCreateAPIView):
|
||||
return methods
|
||||
|
||||
|
||||
class JobTemplateShardedJobsList(SubListCreateAPIView):
|
||||
class JobTemplateSplitJobsList(SubListCreateAPIView):
|
||||
|
||||
model = WorkflowJob
|
||||
serializer_class = WorkflowJobListSerializer
|
||||
parent_model = JobTemplate
|
||||
relationship = 'sharded_jobs'
|
||||
relationship = 'split_jobs'
|
||||
parent_key = 'job_template'
|
||||
|
||||
|
||||
|
||||
@ -16,13 +16,13 @@ class Migration(migrations.Migration):
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='job_shard_count',
|
||||
name='job_split_count',
|
||||
field=models.IntegerField(blank=True, default=0, help_text='The number of jobs to split into at runtime. Will cause the Job Template to launch a workflow if value is non-zero.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjob',
|
||||
name='job_template',
|
||||
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sharded_jobs', to='main.JobTemplate'),
|
||||
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='split_jobs', to='main.JobTemplate'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
|
||||
@ -221,14 +221,14 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
return group_children_map
|
||||
|
||||
@staticmethod
|
||||
def parse_shard_params(shard_str):
|
||||
m = re.match(r"shard(?P<offset>\d+)of(?P<step>\d+)", shard_str)
|
||||
def parse_split_params(split_str):
|
||||
m = re.match(r"split(?P<offset>\d+)of(?P<step>\d+)", split_str)
|
||||
if not m:
|
||||
raise ParseError(_('Could not parse subset as shard specification.'))
|
||||
raise ParseError(_('Could not parse subset as split specification.'))
|
||||
offset = int(m.group('offset'))
|
||||
step = int(m.group('step'))
|
||||
if offset > step:
|
||||
raise ParseError(_('Shard offset must be greater than total number of shards.'))
|
||||
raise ParseError(_('Split offset must be greater than total number of splits.'))
|
||||
return (offset, step)
|
||||
|
||||
def get_script_data(self, hostvars=False, towervars=False, show_all=False, subset=None):
|
||||
@ -242,8 +242,8 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
if subset:
|
||||
if not isinstance(subset, six.string_types):
|
||||
raise ParseError(_('Inventory subset argument must be a string.'))
|
||||
if subset.startswith('shard'):
|
||||
offset, step = Inventory.parse_shard_params(subset)
|
||||
if subset.startswith('split'):
|
||||
offset, step = Inventory.parse_split_params(subset)
|
||||
hosts = hosts[offset::step]
|
||||
else:
|
||||
raise ParseError(_('Subset does not use any supported syntax.'))
|
||||
|
||||
@ -277,7 +277,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
default=False,
|
||||
allows_field='credentials'
|
||||
)
|
||||
job_shard_count = models.IntegerField(
|
||||
job_split_count = models.IntegerField(
|
||||
blank=True,
|
||||
default=0,
|
||||
help_text=_("The number of jobs to split into at runtime. "
|
||||
@ -328,10 +328,10 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
return self.create_unified_job(**kwargs)
|
||||
|
||||
def create_unified_job(self, **kwargs):
|
||||
prevent_sharding = kwargs.pop('_prevent_sharding', False)
|
||||
split_event = bool(self.job_shard_count > 1 and (not prevent_sharding))
|
||||
prevent_splitting = kwargs.pop('_prevent_splitting', False)
|
||||
split_event = bool(self.job_split_count > 1 and (not prevent_splitting))
|
||||
if split_event:
|
||||
# A sharded Job Template will generate a WorkflowJob rather than a Job
|
||||
# A Split Job Template will generate a WorkflowJob rather than a Job
|
||||
from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobNode
|
||||
kwargs['_unified_job_class'] = WorkflowJobTemplate._get_unified_job_class()
|
||||
kwargs['_parent_field_name'] = "job_template"
|
||||
@ -342,11 +342,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
wj_config = JobLaunchConfig()
|
||||
actual_inventory = wj_config.inventory if wj_config.inventory else self.inventory
|
||||
for idx in xrange(min(self.job_shard_count,
|
||||
for idx in xrange(min(self.job_split_count,
|
||||
actual_inventory.hosts.count())):
|
||||
create_kwargs = dict(workflow_job=job,
|
||||
unified_job_template=self,
|
||||
ancestor_artifacts=dict(job_shard=idx))
|
||||
ancestor_artifacts=dict(job_split=idx))
|
||||
WorkflowJobNode.objects.create(**create_kwargs)
|
||||
return job
|
||||
|
||||
@ -580,7 +580,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
return JobEvent
|
||||
|
||||
def copy_unified_job(self, **new_prompts):
|
||||
new_prompts['_prevent_sharding'] = True
|
||||
new_prompts['_prevent_splitting'] = True
|
||||
if self.internal_limit:
|
||||
new_prompts.setdefault('_eager_fields', {})
|
||||
new_prompts['_eager_fields']['internal_limit'] = self.internal_limit # oddball, not from JT or prompts
|
||||
|
||||
@ -334,7 +334,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
parent_field_name = None
|
||||
if "_unified_job_class" in kwargs:
|
||||
# Special case where spawned job is different type than usual
|
||||
# Only used for sharded jobs
|
||||
# Only used for split jobs
|
||||
unified_job_class = kwargs.pop("_unified_job_class")
|
||||
fields = unified_job_class._get_unified_job_field_names() & fields
|
||||
parent_field_name = kwargs.pop('_parent_field_name')
|
||||
@ -354,7 +354,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
for fd, val in eager_fields.items():
|
||||
setattr(unified_job, fd, val)
|
||||
|
||||
# NOTE: sharded workflow jobs _get_parent_field_name method
|
||||
# NOTE: split workflow jobs _get_parent_field_name method
|
||||
# is not correct until this is set
|
||||
if not parent_field_name:
|
||||
parent_field_name = unified_job._get_parent_field_name()
|
||||
|
||||
@ -251,19 +251,19 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
data['extra_vars'] = extra_vars
|
||||
# ensure that unified jobs created by WorkflowJobs are marked
|
||||
data['_eager_fields'] = {'launch_type': 'workflow'}
|
||||
# Extra processing in the case that this is a sharded job
|
||||
if 'job_shard' in self.ancestor_artifacts:
|
||||
shard_str = six.text_type(self.ancestor_artifacts['job_shard'] + 1)
|
||||
# Extra processing in the case that this is a split job
|
||||
if 'job_split' in self.ancestor_artifacts:
|
||||
split_str = six.text_type(self.ancestor_artifacts['job_split'] + 1)
|
||||
data['_eager_fields']['name'] = six.text_type("{} - {}").format(
|
||||
self.unified_job_template.name[:512 - len(shard_str) - len(' - ')],
|
||||
shard_str
|
||||
self.unified_job_template.name[:512 - len(split_str) - len(' - ')],
|
||||
split_str
|
||||
)
|
||||
data['_eager_fields']['allow_simultaneous'] = True
|
||||
data['_eager_fields']['internal_limit'] = 'shard{0}of{1}'.format(
|
||||
self.ancestor_artifacts['job_shard'],
|
||||
data['_eager_fields']['internal_limit'] = 'split{0}of{1}'.format(
|
||||
self.ancestor_artifacts['job_split'],
|
||||
self.workflow_job.workflow_job_nodes.count()
|
||||
)
|
||||
data['_prevent_sharding'] = True
|
||||
data['_prevent_splitting'] = True
|
||||
return data
|
||||
|
||||
|
||||
@ -459,7 +459,7 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
)
|
||||
job_template = models.ForeignKey(
|
||||
'JobTemplate',
|
||||
related_name='sharded_jobs',
|
||||
related_name='split_jobs',
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
@ -472,7 +472,7 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
|
||||
def _get_parent_field_name(self):
|
||||
if self.job_template_id:
|
||||
# This is a workflow job which is a container for sharded jobs
|
||||
# This is a workflow job which is a container for split jobs
|
||||
return 'job_template'
|
||||
return 'workflow_job_template'
|
||||
|
||||
|
||||
@ -123,11 +123,11 @@ def test_job_relaunch_on_failed_hosts(post, inventory, project, machine_credenti
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_shard_jt_recent_jobs(shard_job_factory, admin_user, get):
|
||||
workflow_job = shard_job_factory(3, spawn=True)
|
||||
shard_jt = workflow_job.job_template
|
||||
def test_split_jt_recent_jobs(split_job_factory, admin_user, get):
|
||||
workflow_job = split_job_factory(3, spawn=True)
|
||||
split_jt = workflow_job.job_template
|
||||
r = get(
|
||||
url=shard_jt.get_absolute_url(),
|
||||
url=split_jt.get_absolute_url(),
|
||||
user=admin_user,
|
||||
expect=200
|
||||
)
|
||||
|
||||
@ -779,15 +779,15 @@ def disable_database_settings(mocker):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def shard_jt_factory(inventory):
|
||||
def split_jt_factory(inventory):
|
||||
def r(N, jt_kwargs=None):
|
||||
for i in range(N):
|
||||
inventory.hosts.create(name='foo{}'.format(i))
|
||||
if not jt_kwargs:
|
||||
jt_kwargs = {}
|
||||
return JobTemplate.objects.create(
|
||||
name='shard-jt-from-factory',
|
||||
job_shard_count=N,
|
||||
name='split-jt-from-factory',
|
||||
job_split_count=N,
|
||||
inventory=inventory,
|
||||
**jt_kwargs
|
||||
)
|
||||
@ -795,18 +795,18 @@ def shard_jt_factory(inventory):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def shard_job_factory(shard_jt_factory):
|
||||
def split_job_factory(split_jt_factory):
|
||||
def r(N, jt_kwargs=None, prompts=None, spawn=False):
|
||||
shard_jt = shard_jt_factory(N, jt_kwargs=jt_kwargs)
|
||||
split_jt = split_jt_factory(N, jt_kwargs=jt_kwargs)
|
||||
if not prompts:
|
||||
prompts = {}
|
||||
shard_job = shard_jt.create_unified_job(**prompts)
|
||||
split_job = split_jt.create_unified_job(**prompts)
|
||||
if spawn:
|
||||
for node in shard_job.workflow_nodes.all():
|
||||
for node in split_job.workflow_nodes.all():
|
||||
# does what the task manager does for spawning workflow jobs
|
||||
kv = node.get_job_kwargs()
|
||||
job = node.unified_job_template.create_unified_job(**kv)
|
||||
node.job = job
|
||||
node.save()
|
||||
return shard_job
|
||||
return split_job
|
||||
return r
|
||||
|
||||
@ -38,11 +38,11 @@ class TestInventoryScript:
|
||||
'remote_tower_id': host.id
|
||||
}
|
||||
|
||||
def test_shard_subset(self, inventory):
|
||||
def test_split_subset(self, inventory):
|
||||
for i in range(3):
|
||||
inventory.hosts.create(name='host{}'.format(i))
|
||||
for i in range(3):
|
||||
assert inventory.get_script_data(subset='shard{}of3'.format(i)) == {
|
||||
assert inventory.get_script_data(subset='split{}of3'.format(i)) == {
|
||||
'all': {'hosts': ['host{}'.format(i)]}
|
||||
}
|
||||
|
||||
|
||||
@ -84,18 +84,18 @@ def test_job_host_summary_representation(host):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestShardingModels:
|
||||
class TestSplittingModels:
|
||||
|
||||
def test_shard_workflow_spawn(self, shard_jt_factory):
|
||||
shard_jt = shard_jt_factory(3)
|
||||
job = shard_jt.create_unified_job()
|
||||
def test_split_workflow_spawn(self, split_jt_factory):
|
||||
split_jt = split_jt_factory(3)
|
||||
job = split_jt.create_unified_job()
|
||||
assert isinstance(job, WorkflowJob)
|
||||
assert job.job_template == shard_jt
|
||||
assert job.unified_job_template == shard_jt
|
||||
assert job.job_template == split_jt
|
||||
assert job.unified_job_template == split_jt
|
||||
assert job.workflow_nodes.count() == 3
|
||||
|
||||
def test_shards_with_JT_and_prompts(self, shard_job_factory):
|
||||
job = shard_job_factory(3, jt_kwargs={'ask_limit_on_launch': True}, prompts={'limit': 'foobar'}, spawn=True)
|
||||
def test_splits_with_JT_and_prompts(self, split_job_factory):
|
||||
job = split_job_factory(3, jt_kwargs={'ask_limit_on_launch': True}, prompts={'limit': 'foobar'}, spawn=True)
|
||||
assert job.launch_config.prompts_dict() == {'limit': 'foobar'}
|
||||
for node in job.workflow_nodes.all():
|
||||
assert node.limit is None # data not saved in node prompts
|
||||
|
||||
@ -49,18 +49,18 @@ def test_inventory_use_access(inventory, user):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_sharded_job(shard_job_factory, rando):
|
||||
workflow_job = shard_job_factory(2, jt_kwargs={'created_by': rando}, spawn=True)
|
||||
def test_split_job(split_job_factory, rando):
|
||||
workflow_job = split_job_factory(2, jt_kwargs={'created_by': rando}, spawn=True)
|
||||
workflow_job.job_template.execute_role.members.add(rando)
|
||||
|
||||
# Abilities of user with execute_role for shard workflow job container
|
||||
# Abilities of user with execute_role for split workflow job container
|
||||
assert WorkflowJobAccess(rando).can_start(workflow_job) # relaunch allowed
|
||||
for access_cls in (UnifiedJobAccess, WorkflowJobAccess):
|
||||
access = access_cls(rando)
|
||||
assert access.can_read(workflow_job)
|
||||
assert workflow_job in access.get_queryset()
|
||||
|
||||
# Abilities of user with execute_role for all the shards of the job
|
||||
# Abilities of user with execute_role for all the split of the job
|
||||
for node in workflow_job.workflow_nodes.all():
|
||||
access = WorkflowJobNodeAccess(rando)
|
||||
assert access.can_read(node)
|
||||
|
||||
@ -83,7 +83,7 @@ function ListJobsController (
|
||||
return null;
|
||||
}
|
||||
|
||||
const splitJobDetails = internalLimitDetails.shard;
|
||||
const splitJobDetails = internalLimitDetails.split;
|
||||
|
||||
if (!splitJobDetails) {
|
||||
return null;
|
||||
|
||||
@ -133,7 +133,7 @@ function getSplitJobDetails () {
|
||||
return null;
|
||||
}
|
||||
|
||||
const splitJobDetails = resource.model.get('summary_fields.internal_limit.shard');
|
||||
const splitJobDetails = resource.model.get('summary_fields.internal_limit.split');
|
||||
|
||||
if (!splitJobDetails) {
|
||||
return null;
|
||||
|
||||
@ -257,7 +257,7 @@ function(NotificationsList, i18n) {
|
||||
dataPlacement: 'right',
|
||||
control: '<instance-groups-multiselect instance-groups="instance_groups" field-is-disabled="!(job_template_obj.summary_fields.user_capabilities.edit || canAddJobTemplate)"></instance-groups-multiselect>',
|
||||
},
|
||||
job_shard_count: {
|
||||
job_split_count: {
|
||||
label: i18n._('Job Splitting'),
|
||||
type: 'number',
|
||||
integer: true,
|
||||
|
||||
@ -39,7 +39,7 @@ export default ['workflowData', 'workflowResultsService', 'workflowDataOptions',
|
||||
DELETE: i18n._('Delete'),
|
||||
EDIT_USER: i18n._('Edit the user'),
|
||||
EDIT_WORKFLOW: i18n._('Edit the workflow job template'),
|
||||
EDIT_SHARD_TEMPLATE: i18n._('Edit the shard job template'),
|
||||
EDIT_SPLIT_TEMPLATE: i18n._('Edit the split job template'),
|
||||
EDIT_SCHEDULE: i18n._('Edit the schedule'),
|
||||
TOGGLE_STDOUT_FULLSCREEN: i18n._('Expand Output'),
|
||||
STATUS: '' // re-assigned elsewhere
|
||||
@ -50,7 +50,7 @@ export default ['workflowData', 'workflowResultsService', 'workflowDataOptions',
|
||||
STARTED: i18n._('Started'),
|
||||
FINISHED: i18n._('Finished'),
|
||||
LABELS: i18n._('Labels'),
|
||||
SHARD_TEMPLATE: i18n._('Shard Template'),
|
||||
SPLIT_TEMPLATE: i18n._('Split Template'),
|
||||
STATUS: i18n._('Status')
|
||||
},
|
||||
details: {
|
||||
@ -113,7 +113,7 @@ export default ['workflowData', 'workflowResultsService', 'workflowDataOptions',
|
||||
|
||||
if(workflowData.summary_fields && workflowData.summary_fields.job_template &&
|
||||
workflowData.summary_fields.job_template.id){
|
||||
$scope.shard_job_template_link = `/#/templates/job_template/${$scope.workflow.summary_fields.job_template.id}`;
|
||||
$scope.split_job_template_link = `/#/templates/job_template/${$scope.workflow.summary_fields.job_template.id}`;
|
||||
}
|
||||
|
||||
// turn related api browser routes into front end routes
|
||||
|
||||
@ -149,11 +149,11 @@
|
||||
ng-show="workflow.summary_fields.job_template.name">
|
||||
<label
|
||||
class="WorkflowResults-resultRowLabel">
|
||||
{{ strings.labels.SHARD_TEMPLATE }}
|
||||
{{ strings.labels.SPLIT_TEMPLATE }}
|
||||
</label>
|
||||
<div class="WorkflowResults-resultRowText">
|
||||
<a href="{{ shard_job_template_link }}"
|
||||
aw-tool-tip="{{ strings.tooltips.EDIT_SHARD_TEMPLATE }}"
|
||||
<a href="{{ split_job_template_link }}"
|
||||
aw-tool-tip="{{ strings.tooltips.EDIT_SPLIT_TEMPLATE }}"
|
||||
data-placement="top">
|
||||
{{ workflow.summary_fields.job_template.name }}
|
||||
</a>
|
||||
|
||||
@ -1,13 +0,0 @@
|
||||
# Job Sharding Overview
|
||||
|
||||
Ansible, by default, runs jobs from a single control instance. At best a single Ansible job can be split up on a single system via forks but this doesn't fully take advantage of AWX's ability to distribute work to multiple nodes in a cluster.
|
||||
|
||||
Job Sharding solves this by adding a Job Template field `job_shard_count`. This field specifies the number of **Jobs** to split the Ansible run into. When this number is greater than 1 ``AWX`` will generate a **Workflow** from a **JobTemplate** instead of a **Job**. The **Inventory** will be split evenly amongst the sharded jobs. The workflow job is then started and proceeds as though it were a normal workflow. The API will return either a **Job** resource (if `job_shard_count` < 2) or a **WorkflowJob** resource otherwise. Likewise, the UI will redirect to the appropriate screen to display the status of the run.
|
||||
|
||||
## Implications for Job execution
|
||||
|
||||
When jobs are split they can run on any Tower node and some may not run at the same time. Because of this, anything that relies on setting/sharing state (using modules such as ``set_fact``) will not work as expected. It's reasonable to expect that not all jobs will actually run at the same time (if there is not enough capacity in the system for example)
|
||||
|
||||
## Simultaneous Execution Behavior
|
||||
|
||||
By default Job Templates aren't normally configured to execute simultaneously (``allow_simultaneous`` must be checked). Sharding overrides this behavior and implies ``allow_simultaneous`` even if that setting is unchecked.
|
||||
13
docs/job_splitting.md
Normal file
13
docs/job_splitting.md
Normal file
@ -0,0 +1,13 @@
|
||||
# Job Splitting Overview
|
||||
|
||||
Ansible, by default, runs jobs from a single control instance. At best a single Ansible job can be split up on a single system via forks but this doesn't fully take advantage of AWX's ability to distribute work to multiple nodes in a cluster.
|
||||
|
||||
Job Splitting solves this by adding a Job Template field `job_split_count`. This field specifies the number of **Jobs** to split the Ansible run into. When this number is greater than 1 ``AWX`` will generate a **Workflow** from a **JobTemplate** instead of a **Job**. The **Inventory** will be split evenly amongst the split jobs. The workflow job is then started and proceeds as though it were a normal workflow. The API will return either a **Job** resource (if `job_split_count` < 2) or a **WorkflowJob** resource otherwise. Likewise, the UI will redirect to the appropriate screen to display the status of the run.
|
||||
|
||||
## Implications for Job execution
|
||||
|
||||
When jobs are split they can run on any Tower node and some may not run at the same time. Because of this, anything that relies on setting/split state (using modules such as ``set_fact``) will not work as expected. It's reasonable to expect that not all jobs will actually run at the same time (if there is not enough capacity in the system for example)
|
||||
|
||||
## Simultaneous Execution Behavior
|
||||
|
||||
By default Job Templates aren't normally configured to execute simultaneously (``allow_simultaneous`` must be checked). Splitting overrides this behavior and implies ``allow_simultaneous`` even if that setting is unchecked.
|
||||
Loading…
x
Reference in New Issue
Block a user