mirror of
https://github.com/ansible/awx.git
synced 2026-01-10 15:32:07 -03:30
Fix all instances of can not
This commit is contained in:
parent
75546b0e98
commit
20840b9867
@ -1910,7 +1910,7 @@ class JobTemplateSerializer(UnifiedJobTemplateSerializer, JobOptionsSerializer):
|
||||
raise serializers.ValidationError({'project': _("Job types 'run' and 'check' must have assigned a project.")})
|
||||
|
||||
if survey_enabled and job_type == PERM_INVENTORY_SCAN:
|
||||
raise serializers.ValidationError({'survey_enabled': _('Survey Enabled can not be used with scan jobs.')})
|
||||
raise serializers.ValidationError({'survey_enabled': _('Survey Enabled cannot be used with scan jobs.')})
|
||||
|
||||
return super(JobTemplateSerializer, self).validate(attrs)
|
||||
|
||||
|
||||
@ -1095,7 +1095,7 @@ class ProjectUpdateDetail(RetrieveDestroyAPIView):
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.unified_job_nodes.filter(workflow_job__status__in=ACTIVE_STATES).exists():
|
||||
raise PermissionDenied(detail=_('Can not delete job resource when associated workflow job is running.'))
|
||||
raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.'))
|
||||
return super(ProjectUpdateDetail, self).destroy(request, *args, **kwargs)
|
||||
|
||||
class ProjectUpdateCancel(RetrieveAPIView):
|
||||
@ -2179,7 +2179,7 @@ class InventoryUpdateDetail(RetrieveDestroyAPIView):
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.unified_job_nodes.filter(workflow_job__status__in=ACTIVE_STATES).exists():
|
||||
raise PermissionDenied(detail=_('Can not delete job resource when associated workflow job is running.'))
|
||||
raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.'))
|
||||
return super(InventoryUpdateDetail, self).destroy(request, *args, **kwargs)
|
||||
|
||||
class InventoryUpdateCancel(RetrieveAPIView):
|
||||
@ -2991,7 +2991,7 @@ class JobDetail(RetrieveUpdateDestroyAPIView):
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.unified_job_nodes.filter(workflow_job__status__in=ACTIVE_STATES).exists():
|
||||
raise PermissionDenied(detail=_('Can not delete job resource when associated workflow job is running.'))
|
||||
raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.'))
|
||||
return super(JobDetail, self).destroy(request, *args, **kwargs)
|
||||
|
||||
class JobLabelList(SubListAPIView):
|
||||
|
||||
@ -354,17 +354,17 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
def _extra_job_type_errors(self, data):
|
||||
"""
|
||||
Used to enforce 2 special cases around scan jobs and prompting
|
||||
- the inventory can not be changed on a scan job template
|
||||
- scan jobs can not be switched to run/check type and vice versa
|
||||
- the inventory cannot be changed on a scan job template
|
||||
- scan jobs cannot be switched to run/check type and vice versa
|
||||
"""
|
||||
errors = {}
|
||||
if 'job_type' in data and self.ask_job_type_on_launch:
|
||||
if ((self.job_type == PERM_INVENTORY_SCAN and not data['job_type'] == PERM_INVENTORY_SCAN) or
|
||||
(data['job_type'] == PERM_INVENTORY_SCAN and not self.job_type == PERM_INVENTORY_SCAN)):
|
||||
errors['job_type'] = _('Can not override job_type to or from a scan job.')
|
||||
errors['job_type'] = _('Cannot override job_type to or from a scan job.')
|
||||
if (self.job_type == PERM_INVENTORY_SCAN and ('inventory' in data) and self.ask_inventory_on_launch and
|
||||
self.inventory != data['inventory']):
|
||||
errors['inventory'] = _('Inventory can not be changed at runtime for scan jobs.')
|
||||
errors['inventory'] = _('Inventory cannot be changed at runtime for scan jobs.')
|
||||
return errors
|
||||
|
||||
@property
|
||||
|
||||
@ -117,7 +117,7 @@ class WorkflowNodeBase(CreatedModifiedModel):
|
||||
prompts_dict = self.prompts_dict()
|
||||
if not hasattr(ujt_obj, '_ask_for_vars_dict'):
|
||||
if prompts_dict:
|
||||
return {'ignored': {'all': 'Can not use prompts on unified_job_template that is not type of job template'}}
|
||||
return {'ignored': {'all': 'Cannot use prompts on unified_job_template that is not type of job template'}}
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
@ -61,7 +61,7 @@ def apply_roles(roles, objects, persisted):
|
||||
return None
|
||||
|
||||
if not persisted:
|
||||
raise RuntimeError('roles can not be used when persisted=False')
|
||||
raise RuntimeError('roles cannot be used when persisted=False')
|
||||
|
||||
for role in roles:
|
||||
obj_role, sep, member_role = role.partition(':')
|
||||
@ -352,7 +352,7 @@ def generate_workflow_job_template_nodes(workflow_job_template,
|
||||
|
||||
workflow_job_template_nodes = kwargs.get('workflow_job_template_nodes', [])
|
||||
if len(workflow_job_template_nodes) > 0 and not persisted:
|
||||
raise RuntimeError('workflow job template nodes can not be used when persisted=False')
|
||||
raise RuntimeError('workflow job template nodes cannot be used when persisted=False')
|
||||
|
||||
new_nodes = []
|
||||
|
||||
|
||||
@ -89,7 +89,7 @@ def test_rbac_stream_user_roles(activity_stream_entry, organization, org_admin,
|
||||
def test_stream_access_cant_change(activity_stream_entry, organization, org_admin, settings):
|
||||
settings.ACTIVITY_STREAM_ENABLED = True
|
||||
access = ActivityStreamAccess(org_admin)
|
||||
# These should always return false because the activity stream can not be edited
|
||||
# These should always return false because the activity stream cannot be edited
|
||||
assert not access.can_add(activity_stream_entry)
|
||||
assert not access.can_change(activity_stream_entry, {'organization': None})
|
||||
assert not access.can_delete(activity_stream_entry)
|
||||
|
||||
@ -263,11 +263,11 @@ def test_job_relaunch_resource_access(job_with_links, user):
|
||||
job_with_links.inventory.use_role.members.add(both_user)
|
||||
assert both_user.can_access(Job, 'start', job_with_links)
|
||||
|
||||
# Confirm that a user with credential access alone can not launch
|
||||
# Confirm that a user with credential access alone cannot launch
|
||||
job_with_links.credential.use_role.members.add(credential_user)
|
||||
assert not credential_user.can_access(Job, 'start', job_with_links)
|
||||
|
||||
# Confirm that a user with inventory access alone can not launch
|
||||
# Confirm that a user with inventory access alone cannot launch
|
||||
job_with_links.inventory.use_role.members.add(inventory_user)
|
||||
assert not inventory_user.can_access(Job, 'start', job_with_links)
|
||||
|
||||
|
||||
@ -124,7 +124,7 @@ class TestJobTemplateCopyEdit:
|
||||
SHOULD be able to edit that job template, for nonsensitive changes
|
||||
"""
|
||||
|
||||
# Attach credential to JT that org admin can not use
|
||||
# Attach credential to JT that org admin cannot use
|
||||
jt_copy_edit.credential = machine_credential
|
||||
jt_copy_edit.save()
|
||||
|
||||
@ -222,7 +222,7 @@ class TestAccessListCapabilities:
|
||||
assert direct_access_list[0]['role']['user_capabilities']['unattach'] == 'foobar'
|
||||
|
||||
def test_user_access_list_direct_access_capability(self, rando, get):
|
||||
"When a user views their own access list, they can not unattach their admin role"
|
||||
"When a user views their own access list, they cannot unattach their admin role"
|
||||
response = get(reverse('api:user_access_list', args=(rando.id,)), rando)
|
||||
direct_access_list = response.data['results'][0]['summary_fields']['direct_access']
|
||||
assert not direct_access_list[0]['role']['user_capabilities']['unattach']
|
||||
@ -267,7 +267,7 @@ def test_user_roles_unattach_functional(organization, alice, bob, get):
|
||||
organization.member_role.members.add(alice)
|
||||
organization.member_role.members.add(bob)
|
||||
response = get(reverse('api:user_roles_list', args=(alice.id,)), bob)
|
||||
# Org members can not revoke the membership of other members
|
||||
# Org members cannot revoke the membership of other members
|
||||
assert not response.data['results'][0]['summary_fields']['user_capabilities']['unattach']
|
||||
|
||||
|
||||
|
||||
@ -157,7 +157,7 @@ class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase):
|
||||
self.post(launch_url, {'credential_id': self.cred_sue.pk}, expect=403)
|
||||
|
||||
def test_no_project_fail(self):
|
||||
# Job Templates without projects can not be launched
|
||||
# Job Templates without projects cannot be launched
|
||||
with self.current_user(self.user_sue):
|
||||
self.data['name'] = "missing proj"
|
||||
response = self.post(self.url, self.data, expect=201)
|
||||
@ -169,7 +169,7 @@ class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase):
|
||||
self.post(launch_url2, {}, expect=400)
|
||||
|
||||
def test_no_inventory_fail(self):
|
||||
# Job Templates without inventory can not be launched
|
||||
# Job Templates without inventory cannot be launched
|
||||
with self.current_user(self.user_sue):
|
||||
self.data['name'] = "missing inv"
|
||||
response = self.post(self.url, self.data, expect=201)
|
||||
|
||||
@ -485,7 +485,7 @@ class JobTemplateTest(BaseJobTestMixin, django.test.TransactionTestCase):
|
||||
data['credential'] = self.cred_sue.pk
|
||||
response = self.post(url, data, expect=402)
|
||||
self.create_test_license_file(features=dict(system_tracking=True))
|
||||
# Scan Jobs can not be created with survey enabled
|
||||
# Scan Jobs cannot be created with survey enabled
|
||||
with self.current_user(self.user_sue):
|
||||
data['credential'] = self.cred_sue.pk
|
||||
data['survey_enabled'] = True
|
||||
|
||||
@ -161,7 +161,7 @@ When verifying acceptance we should ensure the following statements are true
|
||||
Job failures during the time period should be predictable and not catastrophic.
|
||||
* Node downtime testing should also include recoverability testing. Killing single services and ensuring the system can
|
||||
return itself to a working state
|
||||
* Persistent failure should be tested by killing single services in such a way that the cluster node can not be recovered
|
||||
* Persistent failure should be tested by killing single services in such a way that the cluster node cannot be recovered
|
||||
and ensuring that the node is properly taken offline
|
||||
* Network partitioning failures will be important also. In order to test this
|
||||
- Disallow a single node from communicating with the other nodes but allow it to communicate with the database
|
||||
|
||||
@ -6,7 +6,7 @@ Independent jobs are ran in order of creation time, earliest first. Jobs with de
|
||||
|
||||
## Task Manager Architecture
|
||||
|
||||
The task manager has a single entry point, `Scheduler().schedule()`. The method may be called in parallel, at any time, as many times as the user wants. The `schedule()` function tries to aquire a single, global, lock using the Instance table first record in the database. If the lock can not be aquired the method returns. The failure to aquire the lock indicates that there is another instance currently running `schedule()`.
|
||||
The task manager has a single entry point, `Scheduler().schedule()`. The method may be called in parallel, at any time, as many times as the user wants. The `schedule()` function tries to aquire a single, global, lock using the Instance table first record in the database. If the lock cannot be aquired the method returns. The failure to aquire the lock indicates that there is another instance currently running `schedule()`.
|
||||
|
||||
### Hybrid Scheduler: Periodic + Event
|
||||
The `schedule()` function is ran (a) periodically by a celery task and (b) on job creation or completion. The task manager system would behave correctly if ran, exclusively, via (a) or (b). We chose to trigger `schedule()` via both mechanisms because of the nice properties I will now mention. (b) reduces the time from launch to running, resulting a better user experience. (a) is a fail-safe in case we miss code-paths, in the present and future, that change the 3 scheduling considerations for which we should call `schedule()` (i.e. adding new nodes to tower changes the capacity, obscure job error handling that fails a job)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user