Merge pull request #740 from AlanCoding/configs_rebased5

Feature: saved launchtime configurations
This commit is contained in:
Alan Rominger 2017-12-08 16:55:00 -05:00 committed by GitHub
commit 2135291f35
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 2413 additions and 904 deletions

View File

@ -25,7 +25,7 @@ from django.utils.timezone import now
from django.utils.functional import cached_property
# Django REST Framework
from rest_framework.exceptions import ValidationError, PermissionDenied, ParseError
from rest_framework.exceptions import ValidationError, PermissionDenied
from rest_framework import fields
from rest_framework import serializers
from rest_framework import validators
@ -38,6 +38,7 @@ from polymorphic.models import PolymorphicModel
from awx.main.constants import SCHEDULEABLE_PROVIDERS, ANSI_SGR_PATTERN
from awx.main.models import * # noqa
from awx.main.models.unified_jobs import ACTIVE_STATES
from awx.main.models.base import NEW_JOB_TYPE_CHOICES
from awx.main.access import get_user_capabilities
from awx.main.fields import ImplicitRoleField
from awx.main.utils import (
@ -445,10 +446,6 @@ class BaseSerializer(serializers.ModelSerializer):
else:
field_class = CharNullField
# Update verbosity choices from settings (for job templates, jobs, ad hoc commands).
if field_name == 'verbosity' and 'choices' in field_kwargs:
field_kwargs['choices'] = getattr(settings, 'VERBOSITY_CHOICES', field_kwargs['choices'])
# Update the message used for the unique validator to use capitalized
# verbose name; keeps unique message the same as with DRF 2.x.
opts = self.Meta.model._meta.concrete_model._meta
@ -486,7 +483,7 @@ class BaseSerializer(serializers.ModelSerializer):
# from model validation.
cls = self.Meta.model
opts = cls._meta.concrete_model._meta
exclusions = [field.name for field in opts.fields + opts.many_to_many]
exclusions = [field.name for field in opts.fields]
for field_name, field in self.fields.items():
field_name = field.source or field_name
if field_name not in exclusions:
@ -496,6 +493,8 @@ class BaseSerializer(serializers.ModelSerializer):
if isinstance(field, serializers.Serializer):
continue
exclusions.remove(field_name)
# The clean_ methods cannot be ran on many-to-many models
exclusions.extend([field.name for field in opts.many_to_many])
return exclusions
def validate(self, attrs):
@ -2617,6 +2616,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})
if obj.project_update:
res['project_update'] = self.reverse('api:project_update_detail', kwargs={'pk': obj.project_update.pk})
res['create_schedule'] = self.reverse('api:job_create_schedule', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})
return res
@ -2766,6 +2766,42 @@ class JobRelaunchSerializer(BaseSerializer):
return attrs
class JobCreateScheduleSerializer(BaseSerializer):
can_schedule = serializers.SerializerMethodField()
prompts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('can_schedule', 'prompts',)
def get_can_schedule(self, obj):
'''
Need both a job template and job prompts to schedule
'''
return obj.can_schedule
@staticmethod
def _summarize(res_name, obj):
summary = {}
for field in SUMMARIZABLE_FK_FIELDS[res_name]:
summary[field] = getattr(obj, field, None)
return summary
def get_prompts(self, obj):
try:
config = obj.launch_config
ret = config.prompts_dict(display=True)
if 'inventory' in ret:
ret['inventory'] = self._summarize('inventory', ret['inventory'])
if 'credentials' in ret:
all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]
ret['credentials'] = all_creds
return ret
except JobLaunchConfig.DoesNotExist:
return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
class AdHocCommandSerializer(UnifiedJobSerializer):
class Meta:
@ -2905,7 +2941,8 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
class Meta:
model = WorkflowJobTemplate
fields = ('*', 'extra_vars', 'organization', 'survey_enabled', 'allow_simultaneous',)
fields = ('*', 'extra_vars', 'organization', 'survey_enabled', 'allow_simultaneous',
'ask_variables_on_launch',)
def get_related(self, obj):
res = super(WorkflowJobTemplateSerializer, self).get_related(obj)
@ -2982,104 +3019,160 @@ class WorkflowJobCancelSerializer(WorkflowJobSerializer):
fields = ('can_cancel',)
class WorkflowNodeBaseSerializer(BaseSerializer):
job_type = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
class LaunchConfigurationBaseSerializer(BaseSerializer):
job_type = serializers.ChoiceField(allow_blank=True, allow_null=True, required=False, default=None,
choices=NEW_JOB_TYPE_CHOICES)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
diff_mode = serializers.NullBooleanField(required=False, default=None)
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None,
choices=VERBOSITY_CHOICES)
class Meta:
fields = ('*', 'extra_data', 'inventory', # Saved launch-time config fields
'job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags', 'diff_mode', 'verbosity')
def get_related(self, obj):
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
res['credentials'] = self.reverse(
'api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)),
kwargs={'pk': obj.pk}
)
return res
def _build_mock_obj(self, attrs):
mock_obj = self.Meta.model()
if self.instance:
for field in self.instance._meta.fields:
setattr(mock_obj, field.name, getattr(self.instance, field.name))
field_names = set(field.name for field in self.Meta.model._meta.fields)
for field_name, value in attrs.items():
setattr(mock_obj, field_name, value)
if field_name not in field_names:
attrs.pop(field_name)
return mock_obj
def validate(self, attrs):
attrs = super(LaunchConfigurationBaseSerializer, self).validate(attrs)
# Verify that fields do not violate template's prompting rules
attrs['char_prompts'] = self._build_mock_obj(attrs).char_prompts
return attrs
class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
credential = models.PositiveIntegerField(
blank=True, null=True, default=None,
help_text='This resource has been deprecated and will be removed in a future release')
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
fields = ('*', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
'inventory', 'credential', 'job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags')
def get_related(self, obj):
res = super(WorkflowNodeBaseSerializer, self).get_related(obj)
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
return res
def validate(self, attrs):
# char_prompts go through different validation, so remove them here
for fd in ['job_type', 'job_tags', 'skip_tags', 'limit']:
if fd in attrs:
attrs.pop(fd)
return super(WorkflowNodeBaseSerializer, self).validate(attrs)
class WorkflowJobTemplateNodeSerializer(WorkflowNodeBaseSerializer):
class Meta:
model = WorkflowJobTemplateNode
fields = ('*', 'workflow_job_template',)
fields = ('*', 'credential', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
def get_related(self, obj):
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_template_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_template_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_template_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.workflow_job_template:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})
return res
def to_internal_value(self, data):
internal_value = super(WorkflowNodeBaseSerializer, self).to_internal_value(data)
view = self.context.get('view', None)
request_method = None
if view and view.request:
request_method = view.request.method
if request_method in ['PATCH']:
obj = self.instance
char_prompts = copy.copy(obj.char_prompts)
char_prompts.update(self.extract_char_prompts(data))
else:
char_prompts = self.extract_char_prompts(data)
for fd in copy.copy(char_prompts):
if char_prompts[fd] is None:
char_prompts.pop(fd)
internal_value['char_prompts'] = char_prompts
return internal_value
def extract_char_prompts(self, data):
char_prompts = {}
for fd in ['job_type', 'job_tags', 'skip_tags', 'limit']:
# Accept null values, if given
if fd in data:
char_prompts[fd] = data[fd]
return char_prompts
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name,
self.credential)
return super(WorkflowJobTemplateNodeSerializer, self).build_field(field_name, info, model_class, nested_depth)
def validate(self, attrs):
if 'char_prompts' in attrs:
if 'job_type' in attrs['char_prompts']:
job_types = [t for t, v in JOB_TYPE_CHOICES]
if attrs['char_prompts']['job_type'] not in job_types:
raise serializers.ValidationError({
"job_type": _("%(job_type)s is not a valid job type. The choices are %(choices)s.") % {
'job_type': attrs['char_prompts']['job_type'], 'choices': job_types}})
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when v2 API is deprecated
deprecated_fields['credential'] = attrs.pop('credential')
view = self.context.get('view')
if self.instance is None and ('workflow_job_template' not in attrs or
attrs['workflow_job_template'] is None):
raise serializers.ValidationError({
"workflow_job_template": _("Workflow job template is missing during creation.")
})
ujt_obj = attrs.get('unified_job_template', None)
if 'unified_job_template' in attrs:
ujt_obj = attrs['unified_job_template']
ujt_obj = None
if self.instance:
ujt_obj = self.instance.unified_job_template
if isinstance(ujt_obj, (WorkflowJobTemplate, SystemJobTemplate)):
raise serializers.ValidationError({
"unified_job_template": _("Cannot nest a %s inside a WorkflowJobTemplate") % ujt_obj.__class__.__name__})
return super(WorkflowJobTemplateNodeSerializer, self).validate(attrs)
attrs = super(WorkflowJobTemplateNodeSerializer, self).validate(attrs)
if ujt_obj is None:
ujt_obj = attrs.get('unified_job_template')
accepted, rejected, errors = ujt_obj._accept_or_ignore_job_kwargs(**self._build_mock_obj(attrs).prompts_dict())
# Do not raise survey validation errors
errors.pop('variables_needed_to_start', None)
if errors:
raise serializers.ValidationError(errors)
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
return attrs
def create(self, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).create(validated_data)
if 'credential' in deprecated_fields:
if deprecated_fields['credential']:
obj.credentials.add(deprecated_fields['credential'])
return obj
def update(self, obj, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).update(obj, validated_data)
if 'credential' in deprecated_fields:
for cred in obj.credentials.filter(credential_type__kind='ssh'):
obj.credentials.remove(cred)
if deprecated_fields['credential']:
obj.credentials.add(deprecated_fields['credential'])
return obj
class WorkflowJobNodeSerializer(WorkflowNodeBaseSerializer):
class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
credential = models.PositiveIntegerField(
blank=True, null=True, default=None,
help_text='This resource has been deprecated and will be removed in a future release')
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = WorkflowJobNode
fields = ('*', 'job', 'workflow_job',)
fields = ('*', 'credential', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
def get_related(self, obj):
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.job:
res['job'] = obj.job.get_absolute_url(self.context.get('request'))
if obj.workflow_job:
@ -3111,10 +3204,6 @@ class WorkflowJobTemplateNodeDetailSerializer(WorkflowJobTemplateNodeSerializer)
return field_class, field_kwargs
class WorkflowJobTemplateNodeListSerializer(WorkflowJobTemplateNodeSerializer):
pass
class JobListSerializer(JobSerializer, UnifiedJobListSerializer):
pass
@ -3294,21 +3383,39 @@ class AdHocCommandEventWebSocketSerializer(AdHocCommandEventSerializer):
class JobLaunchSerializer(BaseSerializer):
# Representational fields
passwords_needed_to_start = serializers.ReadOnlyField()
can_start_without_user_input = serializers.BooleanField(read_only=True)
variables_needed_to_start = serializers.ReadOnlyField()
credential_needed_to_start = serializers.SerializerMethodField()
inventory_needed_to_start = serializers.SerializerMethodField()
survey_enabled = serializers.SerializerMethodField()
extra_vars = VerbatimField(required=False, write_only=True)
job_template_data = serializers.SerializerMethodField()
defaults = serializers.SerializerMethodField()
# Accepted on launch fields
extra_vars = serializers.JSONField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
credentials = serializers.PrimaryKeyRelatedField(
many=True, queryset=Credential.objects.all(),
required=False, write_only=True
)
credential_passwords = VerbatimField(required=False, write_only=True)
diff_mode = serializers.BooleanField(required=False, write_only=True)
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
job_type = serializers.ChoiceField(required=False, choices=NEW_JOB_TYPE_CHOICES, write_only=True)
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
class Meta:
model = JobTemplate
fields = ('can_start_without_user_input', 'passwords_needed_to_start',
'extra_vars', 'limit', 'job_tags', 'skip_tags', 'job_type', 'inventory',
'credentials', 'ask_variables_on_launch', 'ask_tags_on_launch',
'extra_vars', 'inventory', 'limit', 'job_tags', 'skip_tags', 'job_type', 'verbosity', 'diff_mode',
'credentials', 'credential_passwords', 'ask_variables_on_launch', 'ask_tags_on_launch',
'ask_diff_mode_on_launch', 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_limit_on_launch',
'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch',
'survey_enabled', 'variables_needed_to_start', 'credential_needed_to_start',
@ -3317,15 +3424,6 @@ class JobLaunchSerializer(BaseSerializer):
'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_credential_on_launch',)
extra_kwargs = {
'credentials': {'write_only': True, 'default': [], 'allow_empty': True},
'limit': {'write_only': True,},
'job_tags': {'write_only': True,},
'skip_tags': {'write_only': True,},
'job_type': {'write_only': True,},
'inventory': {'write_only': True,},
'verbosity': {'write_only': True,}
}
def get_credential_needed_to_start(self, obj):
return False
@ -3339,114 +3437,85 @@ class JobLaunchSerializer(BaseSerializer):
return False
def get_defaults(self, obj):
ask_for_vars_dict = obj._ask_for_vars_dict()
ask_for_vars_dict['vault_credential'] = False
defaults_dict = {}
for field in ask_for_vars_dict:
if field == 'inventory':
defaults_dict[field] = dict(
name=getattrd(obj, '%s.name' % field, None),
id=getattrd(obj, '%s.pk' % field, None))
elif field in ('credential', 'vault_credential', 'extra_credentials'):
# don't prefill legacy defaults; encourage API users to specify
# credentials at launch time using the new `credentials` key
pass
elif field == 'credentials':
for field_name in JobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
elif field_name == 'credentials':
if self.version > 1:
defaults_dict[field] = [
defaults_dict[field_name] = [
dict(
id=cred.id,
name=cred.name,
credential_type=cred.credential_type.pk
credential_type=cred.credential_type.pk,
passwords_needed=cred.passwords_needed
)
for cred in obj.credentials.all()
]
else:
defaults_dict[field] = getattr(obj, field)
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
errors = {}
obj = self.context.get('obj')
data = self.context.get('data')
template = self.context.get('template')
for field in obj.resources_needed_to_start:
if not (attrs.get(field, False) and obj._ask_for_vars_dict().get(field, False)):
errors[field] = _("Job Template '%s' is missing or undefined.") % field
template._is_manual_launch = True # signal to make several error types non-blocking
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
self._ignored_fields = rejected
if obj.inventory and obj.inventory.pending_deletion is True:
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
extra_vars = attrs.get('extra_vars', {})
try:
extra_vars = parse_yaml_or_json(extra_vars, silent_failure=False)
except ParseError as e:
# Catch known user variable formatting errors
errors['extra_vars'] = str(e)
if self.get_survey_enabled(obj):
validation_errors = obj.survey_variable_validation(extra_vars)
if validation_errors:
errors['variables_needed_to_start'] = validation_errors
# Prohibit credential assign of the same CredentialType.kind
# Note: when multi-vault is supported, we'll have to carve out an
# exception to this logic
# Prohibit providing multiple credentials of the same CredentialType.kind
# or multiples of same vault id
distinct_cred_kinds = []
for cred in data.get('credentials', []):
cred = Credential.objects.get(id=cred)
if cred.credential_type.pk in distinct_cred_kinds:
for cred in accepted.get('credentials', []):
if cred.unique_hash() in distinct_cred_kinds:
errors['credentials'] = _('Cannot assign multiple %s credentials.' % cred.credential_type.name)
distinct_cred_kinds.append(cred.credential_type.pk)
distinct_cred_kinds.append(cred.unique_hash())
# Special prohibited cases for scan jobs
errors.update(obj._extra_job_type_errors(data))
# verify that credentials (either provided or existing) don't
# require launch-time passwords that have not been provided
if 'credentials' in accepted:
launch_credentials = accepted['credentials']
else:
launch_credentials = template.credentials.all()
passwords = attrs.get('credential_passwords', {}) # get from original attrs
passwords_lacking = []
for cred in launch_credentials:
for p in cred.passwords_needed:
if p not in passwords:
passwords_lacking.append(p)
else:
accepted.setdefault('credential_passwords', {})
accepted['credential_passwords'][p] = passwords[p]
if len(passwords_lacking):
errors['passwords_needed_to_start'] = passwords_lacking
if errors:
raise serializers.ValidationError(errors)
JT_extra_vars = obj.extra_vars
JT_limit = obj.limit
JT_job_type = obj.job_type
JT_job_tags = obj.job_tags
JT_skip_tags = obj.skip_tags
JT_inventory = obj.inventory
JT_verbosity = obj.verbosity
credentials = attrs.pop('credentials', None)
attrs = super(JobLaunchSerializer, self).validate(attrs)
obj.extra_vars = JT_extra_vars
obj.limit = JT_limit
obj.job_type = JT_job_type
obj.skip_tags = JT_skip_tags
obj.job_tags = JT_job_tags
obj.inventory = JT_inventory
obj.verbosity = JT_verbosity
if credentials is not None:
attrs['credentials'] = credentials
if 'extra_vars' in accepted:
extra_vars_save = accepted['extra_vars']
else:
extra_vars_save = None
# Validate job against JobTemplate clean_ methods
accepted = super(JobLaunchSerializer, self).validate(accepted)
# Preserve extra_vars as dictionary internally
if extra_vars_save:
accepted['extra_vars'] = extra_vars_save
# if the POST includes a list of credentials, verify that they don't
# require launch-time passwords
# if the POST *does not* include a list of credentials, fall back to
# checking the credentials on the JobTemplate
credentials = attrs['credentials'] if 'credentials' in data else obj.credentials.all()
passwords_needed = []
for cred in credentials:
if cred.passwords_needed:
passwords = self.context.get('passwords')
try:
for p in cred.passwords_needed:
passwords[p] = data[p]
except KeyError:
passwords_needed.extend(cred.passwords_needed)
if len(passwords_needed):
raise serializers.ValidationError({
'passwords_needed_to_start': passwords_needed
})
return attrs
return accepted
class WorkflowJobLaunchSerializer(BaseSerializer):
@ -3473,24 +3542,9 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate(self, attrs):
errors = {}
obj = self.instance
extra_vars = attrs.get('extra_vars', {})
try:
extra_vars = parse_yaml_or_json(extra_vars, silent_failure=False)
except ParseError as e:
# Catch known user variable formatting errors
errors['extra_vars'] = str(e)
if self.get_survey_enabled(obj):
validation_errors = obj.survey_variable_validation(extra_vars)
if validation_errors:
errors['variables_needed_to_start'] = validation_errors
if errors:
raise serializers.ValidationError(errors)
accepted, rejected, errors = obj._accept_or_ignore_job_kwargs(**attrs)
WFJT_extra_vars = obj.extra_vars
attrs = super(WorkflowJobLaunchSerializer, self).validate(attrs)
@ -3613,12 +3667,12 @@ class LabelSerializer(BaseSerializer):
return res
class ScheduleSerializer(BaseSerializer):
class ScheduleSerializer(LaunchConfigurationBaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Schedule
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run', 'extra_data')
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run',)
def get_related(self, obj):
res = super(ScheduleSerializer, self).get_related(obj)
@ -3640,22 +3694,17 @@ class ScheduleSerializer(BaseSerializer):
'Schedule its source project `{}` instead.'.format(value.source_project.name)))
return value
def validate_extra_data(self, value):
if isinstance(value, dict):
return value
return vars_validate_or_raise(value)
def validate(self, attrs):
extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))
if extra_data:
ujt = None
if 'unified_job_template' in attrs:
ujt = attrs['unified_job_template']
elif self.instance:
ujt = self.instance.unified_job_template
if ujt and isinstance(ujt, (Project, InventorySource)):
raise serializers.ValidationError({'extra_data': _(
'Projects and inventory updates cannot accept extra variables.')})
ujt = None
if 'unified_job_template' in attrs:
ujt = attrs['unified_job_template']
elif self.instance:
ujt = self.instance.unified_job_template
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(**self._build_mock_obj(attrs).prompts_dict())
if 'extra_vars' in errors:
errors['extra_data'] = errors.pop('extra_vars')
if errors:
raise serializers.ValidationError(errors)
return super(ScheduleSerializer, self).validate(attrs)
# We reject rrules if:

View File

@ -0,0 +1,12 @@
Create a schedule based on a job:
Make a POST request to this endpoint to create a schedule that launches
the job template that launched this job, and uses the same
parameters that the job was launched with. These parameters include all
"prompted" resources such as `extra_vars`, `inventory`, `limit`, etc.
Jobs that were launched with user-provided passwords cannot have a schedule
created from them.
Make a GET request for information about what those prompts are and
whether or not a schedule can be created.

View File

@ -9,6 +9,7 @@ from awx.api.views import (
JobStart,
JobCancel,
JobRelaunch,
JobCreateSchedule,
JobJobHostSummariesList,
JobJobEventsList,
JobActivityStreamList,
@ -25,6 +26,7 @@ urls = [
url(r'^(?P<pk>[0-9]+)/start/$', JobStart.as_view(), name='job_start'), # Todo: Remove In 3.3
url(r'^(?P<pk>[0-9]+)/cancel/$', JobCancel.as_view(), name='job_cancel'),
url(r'^(?P<pk>[0-9]+)/relaunch/$', JobRelaunch.as_view(), name='job_relaunch'),
url(r'^(?P<pk>[0-9]+)/create_schedule/$', JobCreateSchedule.as_view(), name='job_create_schedule'),
url(r'^(?P<pk>[0-9]+)/job_host_summaries/$', JobJobHostSummariesList.as_view(), name='job_job_host_summaries_list'),
url(r'^(?P<pk>[0-9]+)/job_events/$', JobJobEventsList.as_view(), name='job_job_events_list'),
url(r'^(?P<pk>[0-9]+)/activity_stream/$', JobActivityStreamList.as_view(), name='job_activity_stream_list'),

View File

@ -7,6 +7,7 @@ from awx.api.views import (
ScheduleList,
ScheduleDetail,
ScheduleUnifiedJobsList,
ScheduleCredentialsList,
)
@ -14,6 +15,7 @@ urls = [
url(r'^$', ScheduleList.as_view(), name='schedule_list'),
url(r'^(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule_detail'),
url(r'^(?P<pk>[0-9]+)/jobs/$', ScheduleUnifiedJobsList.as_view(), name='schedule_unified_jobs_list'),
url(r'^(?P<pk>[0-9]+)/credentials/$', ScheduleCredentialsList.as_view(), name='schedule_credentials_list'),
]
__all__ = ['urls']

View File

@ -9,6 +9,7 @@ from awx.api.views import (
WorkflowJobNodeSuccessNodesList,
WorkflowJobNodeFailureNodesList,
WorkflowJobNodeAlwaysNodesList,
WorkflowJobNodeCredentialsList,
)
@ -18,6 +19,7 @@ urls = [
url(r'^(?P<pk>[0-9]+)/success_nodes/$', WorkflowJobNodeSuccessNodesList.as_view(), name='workflow_job_node_success_nodes_list'),
url(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'),
url(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'),
url(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'),
]
__all__ = ['urls']

View File

@ -9,6 +9,7 @@ from awx.api.views import (
WorkflowJobTemplateNodeSuccessNodesList,
WorkflowJobTemplateNodeFailureNodesList,
WorkflowJobTemplateNodeAlwaysNodesList,
WorkflowJobTemplateNodeCredentialsList,
)
@ -18,6 +19,7 @@ urls = [
url(r'^(?P<pk>[0-9]+)/success_nodes/$', WorkflowJobTemplateNodeSuccessNodesList.as_view(), name='workflow_job_template_node_success_nodes_list'),
url(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobTemplateNodeFailureNodesList.as_view(), name='workflow_job_template_node_failure_nodes_list'),
url(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobTemplateNodeAlwaysNodesList.as_view(), name='workflow_job_template_node_always_nodes_list'),
url(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobTemplateNodeCredentialsList.as_view(), name='workflow_job_template_node_credentials_list'),
]
__all__ = ['urls']

View File

@ -607,6 +607,46 @@ class ScheduleDetail(RetrieveUpdateDestroyAPIView):
new_in_148 = True
class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
model = Credential
serializer_class = CredentialSerializer
relationship = 'credentials'
def is_valid_relation(self, parent, sub, created=False):
if not parent.unified_job_template:
return {"msg": _("Cannot assign credential when related template is null.")}
ask_mapping = parent.unified_job_template.get_ask_mapping()
if self.relationship not in ask_mapping:
return {"msg": _("Related template cannot accept {} on launch.").format(self.relationship)}
elif sub.passwords_needed:
return {"msg": _("Credential that requires user input on launch "
"cannot be used in saved launch configuration.")}
ask_field_name = ask_mapping[self.relationship]
if not getattr(parent.unified_job_template, ask_field_name):
return {"msg": _("Related template is not configured to accept credentials on launch.")}
elif sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
return {"msg": _("This launch configuration already provides a {credential_type} credential.").format(
credential_type=sub.unique_hash(display=True))}
elif sub.pk in parent.unified_job_template.credentials.values_list('pk', flat=True):
return {"msg": _("Related template already uses {credential_type} credential.").format(
credential_type=sub.name)}
# None means there were no validation errors
return None
class ScheduleCredentialsList(LaunchConfigCredentialsBase):
parent_model = Schedule
new_in_330 = True
new_in_api_v2 = True
class ScheduleUnifiedJobsList(SubListAPIView):
model = UnifiedJob
@ -2704,16 +2744,21 @@ class JobTemplateLaunch(RetrieveAPIView):
return data
extra_vars = data.pop('extra_vars', None) or {}
if obj:
for p in obj.passwords_needed_to_start:
data[p] = u''
needed_passwords = obj.passwords_needed_to_start
if needed_passwords:
data['credential_passwords'] = {}
for p in needed_passwords:
data['credential_passwords'][p] = u''
else:
data.pop('credential_passwords')
for v in obj.variables_needed_to_start:
extra_vars.setdefault(v, u'')
if extra_vars:
data['extra_vars'] = extra_vars
ask_for_vars_dict = obj._ask_for_vars_dict()
ask_for_vars_dict.pop('extra_vars')
for field in ask_for_vars_dict:
if not ask_for_vars_dict[field]:
modified_ask_mapping = JobTemplate.get_ask_mapping()
modified_ask_mapping.pop('extra_vars')
for field, ask_field_name in modified_ask_mapping.items():
if not getattr(obj, ask_field_name):
data.pop(field, None)
elif field == 'inventory':
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
@ -2723,39 +2768,41 @@ class JobTemplateLaunch(RetrieveAPIView):
data[field] = getattr(obj, field)
return data
def post(self, request, *args, **kwargs):
obj = self.get_object()
def modernize_launch_payload(self, data, obj):
'''
Steps to do simple translations of request data to support
old field structure to launch endpoint
TODO: delete this method with future API version changes
'''
ignored_fields = {}
modern_data = data.copy()
for fd in ('credential', 'vault_credential', 'inventory'):
id_fd = '{}_id'.format(fd)
if fd not in request.data and id_fd in request.data:
request.data[fd] = request.data[id_fd]
if fd not in modern_data and id_fd in modern_data:
modern_data[fd] = modern_data[id_fd]
# This block causes `extra_credentials` to _always_ be ignored for
# the launch endpoint if we're accessing `/api/v1/`
if get_request_version(self.request) == 1 and 'extra_credentials' in request.data:
if hasattr(request.data, '_mutable') and not request.data._mutable:
request.data._mutable = True
extra_creds = request.data.pop('extra_credentials', None)
if get_request_version(self.request) == 1 and 'extra_credentials' in modern_data:
extra_creds = modern_data.pop('extra_credentials', None)
if extra_creds is not None:
ignored_fields['extra_credentials'] = extra_creds
# Automatically convert legacy launch credential arguments into a list of `.credentials`
if 'credentials' in request.data and (
'credential' in request.data or
'vault_credential' in request.data or
'extra_credentials' in request.data
if 'credentials' in modern_data and (
'credential' in modern_data or
'vault_credential' in modern_data or
'extra_credentials' in modern_data
):
return Response(dict(
error=_("'credentials' cannot be used in combination with 'credential', 'vault_credential', or 'extra_credentials'.")), # noqa
status=status.HTTP_400_BAD_REQUEST
)
raise ParseError({"error": _(
"'credentials' cannot be used in combination with 'credential', 'vault_credential', or 'extra_credentials'."
)})
if (
'credential' in request.data or
'vault_credential' in request.data or
'extra_credentials' in request.data
'credential' in modern_data or
'vault_credential' in modern_data or
'extra_credentials' in modern_data
):
# make a list of the current credentials
existing_credentials = obj.credentials.all()
@ -2765,49 +2812,56 @@ class JobTemplateLaunch(RetrieveAPIView):
('vault_credential', lambda cred: cred.credential_type.kind != 'vault'),
('extra_credentials', lambda cred: cred.credential_type.kind not in ('cloud', 'net'))
):
if key in request.data:
if key in modern_data:
# if a specific deprecated key is specified, remove all
# credentials of _that_ type from the list of current
# credentials
existing_credentials = filter(conditional, existing_credentials)
prompted_value = request.data.pop(key)
prompted_value = modern_data.pop(key)
# add the deprecated credential specified in the request
if not isinstance(prompted_value, Iterable):
if not isinstance(prompted_value, Iterable) or isinstance(prompted_value, basestring):
prompted_value = [prompted_value]
# If user gave extra_credentials, special case to use exactly
# the given list without merging with JT credentials
if key == 'extra_credentials' and prompted_value:
obj._deprecated_credential_launch = True # signal to not merge credentials
new_credentials.extend(prompted_value)
# combine the list of "new" and the filtered list of "old"
new_credentials.extend([cred.pk for cred in existing_credentials])
if new_credentials:
request.data['credentials'] = new_credentials
modern_data['credentials'] = new_credentials
passwords = {}
serializer = self.serializer_class(instance=obj, data=request.data, context={'obj': obj, 'data': request.data, 'passwords': passwords})
# credential passwords were historically provided as top-level attributes
if 'credential_passwords' not in modern_data:
modern_data['credential_passwords'] = data.copy()
return (modern_data, ignored_fields)
def post(self, request, *args, **kwargs):
obj = self.get_object()
try:
modern_data, ignored_fields = self.modernize_launch_payload(
data=request.data, obj=obj
)
except ParseError as exc:
return Response(exc.detail, status=status.HTTP_400_BAD_REQUEST)
serializer = self.serializer_class(data=modern_data, context={'template': obj})
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
_accepted_or_ignored = obj._accept_or_ignore_job_kwargs(**request.data)
prompted_fields = _accepted_or_ignored[0]
ignored_fields.update(_accepted_or_ignored[1])
ignored_fields.update(serializer._ignored_fields)
fd = 'inventory'
if fd in prompted_fields and prompted_fields[fd] != getattrd(obj, '{}.pk'.format(fd), None):
new_res = get_object_or_400(Inventory, pk=get_pk_from_dict(prompted_fields, fd))
use_role = getattr(new_res, 'use_role')
if request.user not in use_role:
raise PermissionDenied()
if not request.user.can_access(JobLaunchConfig, 'add', serializer.validated_data, template=obj):
raise PermissionDenied()
# For credentials that are _added_ via launch parameters, ensure the
# launching user has access
current_credentials = set(obj.credentials.values_list('id', flat=True))
for new_cred in Credential.objects.filter(id__in=prompted_fields.get('credentials', [])):
if new_cred.pk not in current_credentials and request.user not in new_cred.use_role:
raise PermissionDenied(_(
"You do not have access to credential {}".format(new_cred.name)
))
new_job = obj.create_unified_job(**prompted_fields)
passwords = serializer.validated_data.pop('credential_passwords', {})
new_job = obj.create_unified_job(**serializer.validated_data)
result = new_job.signal_start(**passwords)
if not result:
@ -2817,11 +2871,35 @@ class JobTemplateLaunch(RetrieveAPIView):
else:
data = OrderedDict()
data['job'] = new_job.id
data['ignored_fields'] = ignored_fields
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
data.update(JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
return Response(data, status=status.HTTP_201_CREATED)
def sanitize_for_response(self, data):
'''
Model objects cannot be serialized by DRF,
this replaces objects with their ids for inclusion in response
'''
def display_value(val):
if hasattr(val, 'id'):
return val.id
else:
return val
sanitized_data = {}
for field_name, value in data.items():
if isinstance(value, (set, list)):
sanitized_data[field_name] = []
for sub_value in value:
sanitized_data[field_name].append(display_value(sub_value))
else:
sanitized_data[field_name] = display_value(value)
return sanitized_data
class JobTemplateSchedulesList(SubListCreateAPIView):
view_name = _("Job Template Schedules")
@ -2984,9 +3062,9 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
return sublist_qs
def is_valid_relation(self, parent, sub, created=False):
current_extra_types = [cred.credential_type.pk for cred in parent.credentials.all()]
if sub.credential_type.pk in current_extra_types:
return {'error': _('Cannot assign multiple %s credentials.' % sub.credential_type.name)}
if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
return {"error": _("Cannot assign multiple {credential_type} credentials.".format(
credential_type=sub.unique_hash(display=True)))}
return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created)
@ -3238,10 +3316,20 @@ class WorkflowJobNodeDetail(WorkflowsEnforcementMixin, RetrieveAPIView):
new_in_310 = True
class WorkflowJobNodeCredentialsList(SubListAPIView):
model = Credential
serializer_class = CredentialSerializer
parent_model = WorkflowJobNode
relationship = 'credentials'
new_in_330 = True
new_in_api_v2 = True
class WorkflowJobTemplateNodeList(WorkflowsEnforcementMixin, ListCreateAPIView):
model = WorkflowJobTemplateNode
serializer_class = WorkflowJobTemplateNodeListSerializer
serializer_class = WorkflowJobTemplateNodeSerializer
new_in_310 = True
@ -3251,21 +3339,18 @@ class WorkflowJobTemplateNodeDetail(WorkflowsEnforcementMixin, RetrieveUpdateDes
serializer_class = WorkflowJobTemplateNodeDetailSerializer
new_in_310 = True
def update_raw_data(self, data):
for fd in ['job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags']:
data[fd] = None
try:
obj = self.get_object()
data.update(obj.char_prompts)
except Exception:
pass
return super(WorkflowJobTemplateNodeDetail, self).update_raw_data(data)
class WorkflowJobTemplateNodeCredentialsList(LaunchConfigCredentialsBase):
parent_model = WorkflowJobTemplateNode
new_in_330 = True
new_in_api_v2 = True
class WorkflowJobTemplateNodeChildrenBaseList(WorkflowsEnforcementMixin, EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
model = WorkflowJobTemplateNode
serializer_class = WorkflowJobTemplateNodeListSerializer
serializer_class = WorkflowJobTemplateNodeSerializer
always_allow_superuser = True
parent_model = WorkflowJobTemplateNode
relationship = ''
@ -3447,7 +3532,7 @@ class WorkflowJobTemplateLaunch(WorkflowsEnforcementMixin, RetrieveAPIView):
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
prompted_fields, ignored_fields = obj._accept_or_ignore_job_kwargs(**request.data)
prompted_fields, ignored_fields, errors = obj._accept_or_ignore_job_kwargs(**request.data)
new_job = obj.create_unified_job(**prompted_fields)
new_job.signal_start()
@ -3489,17 +3574,12 @@ class WorkflowJobRelaunch(WorkflowsEnforcementMixin, GenericAPIView):
class WorkflowJobTemplateWorkflowNodesList(WorkflowsEnforcementMixin, SubListCreateAPIView):
model = WorkflowJobTemplateNode
serializer_class = WorkflowJobTemplateNodeListSerializer
serializer_class = WorkflowJobTemplateNodeSerializer
parent_model = WorkflowJobTemplate
relationship = 'workflow_job_template_nodes'
parent_key = 'workflow_job_template'
new_in_310 = True
def update_raw_data(self, data):
for fd in ['job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags']:
data[fd] = None
return super(WorkflowJobTemplateWorkflowNodesList, self).update_raw_data(data)
def get_queryset(self):
return super(WorkflowJobTemplateWorkflowNodesList, self).get_queryset().order_by('id')
@ -3936,6 +4016,52 @@ class JobRelaunch(RetrieveAPIView):
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class JobCreateSchedule(RetrieveAPIView):
model = Job
obj_permission_type = 'start'
serializer_class = JobCreateScheduleSerializer
new_in_330 = True
def post(self, request, *args, **kwargs):
obj = self.get_object()
if not obj.can_schedule:
return Response({"error": _('Information needed to schedule this job is missing.')},
status=status.HTTP_400_BAD_REQUEST)
config = obj.launch_config
if not request.user.can_access(JobLaunchConfig, 'add', {'reference_obj': obj}):
raise PermissionDenied()
# Make up a name for the schedule, guarentee that it is unique
name = 'Auto-generated schedule from job {}'.format(obj.id)
existing_names = Schedule.objects.filter(name__startswith=name).values_list('name', flat=True)
if name in existing_names:
idx = 1
alt_name = '{} - number {}'.format(name, idx)
while alt_name in existing_names:
idx += 1
alt_name = '{} - number {}'.format(name, idx)
name = alt_name
schedule = Schedule.objects.create(
name=name,
unified_job_template=obj.unified_job_template,
enabled=False,
rrule='{}Z RRULE:FREQ=MONTHLY;INTERVAL=1'.format(now().strftime('DTSTART:%Y%m%dT%H%M%S')),
extra_data=config.extra_data,
survey_passwords=config.survey_passwords,
inventory=config.inventory,
char_prompts=config.char_prompts
)
schedule.credentials.add(*config.credentials.all())
data = ScheduleSerializer(schedule, context=self.get_serializer_context()).data
headers = {'Location': schedule.get_absolute_url(request=request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class JobNotificationsList(SubListAPIView):
model = Notification

View File

@ -341,8 +341,7 @@ class BaseAccess(object):
# Actions not possible for reason unrelated to RBAC
# Cannot copy with validation errors, or update a manual group/project
if display_method == 'copy' and isinstance(obj, JobTemplate):
validation_errors, resources_needed_to_start = obj.resource_validation_data()
if validation_errors:
if obj.validation_errors:
user_capabilities[display_method] = False
continue
elif isinstance(obj, (WorkflowJobTemplate, WorkflowJob)):
@ -1150,6 +1149,7 @@ class JobTemplateAccess(BaseAccess):
model = JobTemplate
select_related = ('created_by', 'modified_by', 'inventory', 'project',
'next_schedule',)
prefetch_related = ('credentials__credential_type',)
def filtered_queryset(self):
return self.model.accessible_objects(self.user, 'read_role')
@ -1189,8 +1189,7 @@ class JobTemplateAccess(BaseAccess):
# If credentials is provided, the user should have use access to them.
for pk in data.get('credentials', []):
if self.user not in get_object_or_400(Credential, pk=pk).use_role:
return False
raise Exception('Credentials must be attached through association method.')
# If an inventory is provided, the user should have use access.
inventory = get_value(Inventory, 'inventory')
@ -1317,6 +1316,7 @@ class JobAccess(BaseAccess):
prefetch_related = (
'unified_job_template',
'instance_group',
'credentials__credential_type',
Prefetch('labels', queryset=Label.objects.all().order_by('name')),
)
@ -1396,60 +1396,42 @@ class JobAccess(BaseAccess):
if self.user.is_superuser:
return True
credential_access = all([self.user in cred.use_role for cred in obj.credentials.all()])
inventory_access = obj.inventory and self.user in obj.inventory.use_role
job_credentials = set(obj.credentials.all())
# Obtain prompts used to start original job
JobLaunchConfig = obj._meta.get_field('launch_config').related_model
try:
config = obj.launch_config
except JobLaunchConfig.DoesNotExist:
config = None
# Check if JT execute access (and related prompts) is sufficient
if obj.job_template is not None:
prompts_access = True
job_fields = {}
jt_credentials = set(obj.job_template.credentials.all())
for fd in obj.job_template._ask_for_vars_dict():
if fd == 'credentials':
job_fields[fd] = job_credentials
job_fields[fd] = getattr(obj, fd)
accepted_fields, ignored_fields = obj.job_template._accept_or_ignore_job_kwargs(**job_fields)
# Check if job fields are not allowed by current _on_launch settings
for fd in ignored_fields:
if fd == 'extra_vars':
continue # we cannot yet validate validity of prompted extra_vars
elif fd == 'credentials':
if job_credentials != jt_credentials:
# Job has credentials that are not promptable
prompts_access = False
break
elif job_fields[fd] != getattr(obj.job_template, fd):
# Job has field that is not promptable
prompts_access = False
break
# For those fields that are allowed by prompting, but differ
# from JT, assure that user has explicit access to them
if prompts_access:
if obj.inventory != obj.job_template.inventory and not inventory_access:
prompts_access = False
if prompts_access and job_credentials != jt_credentials:
for cred in job_credentials:
if self.user not in cred.use_role:
prompts_access = False
break
if prompts_access and self.user in obj.job_template.execute_role:
if config is None:
prompts_access = False
else:
prompts_access = (
JobLaunchConfigAccess(self.user).can_add({'reference_obj': config}) and
not config.has_unprompted(obj.job_template)
)
jt_access = self.user in obj.job_template.execute_role
if prompts_access and jt_access:
return True
elif not jt_access:
return False
org_access = obj.inventory and self.user in obj.inventory.organization.admin_role
project_access = obj.project is None or self.user in obj.project.admin_role
credential_access = all([self.user in cred.use_role for cred in obj.credentials.all()])
# job can be relaunched if user could make an equivalent JT
ret = inventory_access and credential_access and (org_access or project_access)
ret = org_access and credential_access and project_access
if not ret and self.save_messages:
if not obj.job_template:
pretext = _('Job has been orphaned from its job template.')
elif prompts_access:
self.messages['detail'] = _('You do not have execute permission to related job template.')
return False
elif config is None:
pretext = _('Job was launched with unknown prompted fields.')
else:
pretext = _('Job was launched with prompted fields.')
if inventory_access and credential_access:
if credential_access:
self.messages['detail'] = '{} {}'.format(pretext, _(' Organization level permissions required.'))
else:
self.messages['detail'] = '{} {}'.format(pretext, _(' You do not have permission to related resources.'))
@ -1495,6 +1477,74 @@ class SystemJobAccess(BaseAccess):
return False # no relaunching of system jobs
class JobLaunchConfigAccess(BaseAccess):
'''
Launch configs must have permissions checked for
- relaunching
- rescheduling
In order to create a new object with a copy of this launch config, I need:
- use access to related inventory (if present)
- use role to many-related credentials (if any present)
'''
model = JobLaunchConfig
select_related = ('job')
prefetch_related = ('credentials', 'inventory')
def _unusable_creds_exist(self, qs):
return qs.exclude(
pk__in=Credential._accessible_pk_qs(Credential, self.user, 'use_role')
).exists()
def has_credentials_access(self, obj):
# user has access if no related credentials exist that the user lacks use role for
return not self._unusable_creds_exist(obj.credentials)
@check_superuser
def can_add(self, data, template=None):
# This is a special case, we don't check related many-to-many elsewhere
# launch RBAC checks use this
if 'credentials' in data and data['credentials'] or 'reference_obj' in data:
if 'reference_obj' in data:
prompted_cred_qs = data['reference_obj'].credentials.all()
else:
# If given model objects, only use the primary key from them
cred_pks = [cred.pk for cred in data['credentials']]
if template:
for cred in template.credentials.all():
if cred.pk in cred_pks:
cred_pks.remove(cred.pk)
prompted_cred_qs = Credential.objects.filter(pk__in=cred_pks)
if self._unusable_creds_exist(prompted_cred_qs):
return False
return self.check_related('inventory', Inventory, data, role_field='use_role')
@check_superuser
def can_use(self, obj):
return (
self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True) and
self.has_credentials_access(obj)
)
def can_change(self, obj, data):
return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role')
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if isinstance(sub_obj, Credential) and relationship == 'credentials':
return self.user in sub_obj.use_role
else:
raise NotImplemented('Only credentials can be attached to launch configurations.')
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if isinstance(sub_obj, Credential) and relationship == 'credentials':
if skip_sub_obj_read_check:
return True
else:
return self.user in sub_obj.read_role
else:
raise NotImplemented('Only credentials can be attached to launch configurations.')
class WorkflowJobTemplateNodeAccess(BaseAccess):
'''
I can see/use a WorkflowJobTemplateNode if I have read permission
@ -1503,13 +1553,13 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
In order to add a node, I need:
- admin access to parent WFJT
- execute access to the unified job template being used
- access to any credential or inventory provided as the prompted fields
- access prompted fields via. launch config access
In order to do anything to a node, I need admin access to its WFJT
In order to edit fields on a node, I need:
- execute access to the unified job template of the node
- access to BOTH credential and inventory post-change, if present
- access to prompted fields
In order to delete a node, I only need the admin access its WFJT
@ -1518,18 +1568,13 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
'''
model = WorkflowJobTemplateNode
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes',
'unified_job_template',)
'unified_job_template', 'credentials',)
def filtered_queryset(self):
return self.model.objects.filter(
workflow_job_template__in=WorkflowJobTemplate.accessible_objects(
self.user, 'read_role'))
def can_use_prompted_resources(self, data):
return (
self.check_related('credential', Credential, data, role_field='use_role') and
self.check_related('inventory', Inventory, data, role_field='use_role'))
@check_superuser
def can_add(self, data):
if not data: # So the browseable API will work
@ -1537,7 +1582,7 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
return (
self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True) and
self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role') and
self.can_use_prompted_resources(data))
JobLaunchConfigAccess(self.user).can_add(data))
def wfjt_admin(self, obj):
if not obj.workflow_job_template:
@ -1547,26 +1592,20 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
def ujt_execute(self, obj):
if not obj.unified_job_template:
return self.wfjt_admin(obj)
else:
return self.user in obj.unified_job_template.execute_role and self.wfjt_admin(obj)
return True
return self.check_related('unified_job_template', UnifiedJobTemplate, {}, obj=obj,
role_field='execute_role', mandatory=True)
def can_change(self, obj, data):
if not data:
return True
if not self.ujt_execute(obj):
# should not be able to edit the prompts if lacking access to UJT
return False
if 'credential' in data or 'inventory' in data:
new_data = data
if 'credential' not in data:
new_data['credential'] = self.credential
if 'inventory' not in data:
new_data['inventory'] = self.inventory
return self.can_use_prompted_resources(new_data)
return True
# should not be able to edit the prompts if lacking access to UJT or WFJT
return (
self.ujt_execute(obj) and
self.wfjt_admin(obj) and
JobLaunchConfigAccess(self.user).can_change(obj, data)
)
def can_delete(self, obj):
return self.wfjt_admin(obj)
@ -1579,10 +1618,35 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
return True
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
return self.wfjt_admin(obj) and self.check_same_WFJT(obj, sub_obj)
if not self.wfjt_admin(obj):
return False
if relationship == 'credentials':
# Need permission to related template to attach a credential
if not self.ujt_execute(obj):
return False
return JobLaunchConfigAccess(self.user).can_attach(
obj, sub_obj, relationship, data,
skip_sub_obj_read_check=skip_sub_obj_read_check
)
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
return self.check_same_WFJT(obj, sub_obj)
else:
raise NotImplemented('Relationship {} not understood for WFJT nodes.'.format(relationship))
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
return self.wfjt_admin(obj) and self.check_same_WFJT(obj, sub_obj)
if not self.wfjt_admin(obj):
return False
if relationship == 'credentials':
if not self.ujt_execute(obj):
return False
return JobLaunchConfigAccess(self.user).can_unattach(
obj, sub_obj, relationship, data,
skip_sub_obj_read_check=skip_sub_obj_read_check
)
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
return self.check_same_WFJT(obj, sub_obj)
else:
raise NotImplemented('Relationship {} not understood for WFJT nodes.'.format(relationship))
class WorkflowJobNodeAccess(BaseAccess):
@ -1597,7 +1661,8 @@ class WorkflowJobNodeAccess(BaseAccess):
'''
model = WorkflowJobNode
select_related = ('unified_job_template', 'job',)
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes',)
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes',
'credentials',)
def filtered_queryset(self):
return self.model.objects.filter(
@ -1610,8 +1675,7 @@ class WorkflowJobNodeAccess(BaseAccess):
return False
return (
self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role') and
self.check_related('credential', Credential, data, role_field='use_role') and
self.check_related('inventory', Inventory, data, role_field='use_role'))
JobLaunchConfigAccess(self.user).can_add(data))
def can_change(self, obj, data):
return False
@ -1949,8 +2013,6 @@ class UnifiedJobTemplateAccess(BaseAccess):
#qs = qs.prefetch_related(
# 'project',
# 'inventory',
# 'credential',
# 'credential__credential_type',
#)
def filtered_queryset(self):
@ -1993,8 +2055,6 @@ class UnifiedJobAccess(BaseAccess):
#qs = qs.prefetch_related(
# 'project',
# 'inventory',
# 'credential',
# 'credential__credential_type',
# 'job_template',
# 'inventory_source',
# 'project___credential',
@ -2002,7 +2062,6 @@ class UnifiedJobAccess(BaseAccess):
# 'inventory_source___inventory',
# 'job_template__inventory',
# 'job_template__project',
# 'job_template__credential',
#)
def filtered_queryset(self):
@ -2027,7 +2086,7 @@ class ScheduleAccess(BaseAccess):
model = Schedule
select_related = ('created_by', 'modified_by',)
prefetch_related = ('unified_job_template',)
prefetch_related = ('unified_job_template', 'credentials',)
def filtered_queryset(self):
qs = self.model.objects.all()
@ -2038,20 +2097,16 @@ class ScheduleAccess(BaseAccess):
Q(unified_job_template_id__in=unified_pk_qs) |
Q(unified_job_template_id__in=inv_src_qs.values_list('pk', flat=True)))
@check_superuser
def can_read(self, obj):
if obj and obj.unified_job_template:
job_class = obj.unified_job_template
return self.user.can_access(type(job_class), 'read', obj.unified_job_template)
else:
return False
@check_superuser
def can_add(self, data):
if not JobLaunchConfigAccess(self.user).can_add(data):
return False
return self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role', mandatory=True)
@check_superuser
def can_change(self, obj, data):
if not JobLaunchConfigAccess(self.user).can_change(obj, data):
return False
if self.check_related('unified_job_template', UnifiedJobTemplate, data, obj=obj, mandatory=True):
return True
# Users with execute role can modify the schedules they created
@ -2059,10 +2114,21 @@ class ScheduleAccess(BaseAccess):
obj.created_by == self.user and
self.check_related('unified_job_template', UnifiedJobTemplate, data, obj=obj, role_field='execute_role', mandatory=True))
def can_delete(self, obj):
return self.can_change(obj, {})
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
return JobLaunchConfigAccess(self.user).can_attach(
obj, sub_obj, relationship, data,
skip_sub_obj_read_check=skip_sub_obj_read_check
)
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
return JobLaunchConfigAccess(self.user).can_unattach(
obj, sub_obj, relationship, data,
skip_sub_obj_read_check=skip_sub_obj_read_check
)
class NotificationTemplateAccess(BaseAccess):
'''

View File

@ -761,3 +761,22 @@ class CredentialTypeInjectorField(JSONSchemaField):
code='invalid',
params={'value': value},
)
class AskForField(models.BooleanField):
"""
Denotes whether to prompt on launch for another field on the same template
"""
def __init__(self, allows_field=None, **kwargs):
super(AskForField, self).__init__(**kwargs)
self._allows_field = allows_field
@property
def allows_field(self):
if self._allows_field is None:
try:
return self.name[len('ask_'):-len('_on_launch')]
except AttributeError:
# self.name will be set by the model metaclass, not this field
raise Exception('Corresponding allows_field cannot be accessed until model is initialized.')
return self._allows_field

View File

@ -400,10 +400,10 @@ class Command(BaseCommand):
overwrite_vars=self.overwrite_vars,
)
self.inventory_update = self.inventory_source.create_inventory_update(
job_args=json.dumps(sys.argv),
job_env=dict(os.environ.items()),
job_cwd=os.getcwd(),
_eager_fields=dict(
job_args=json.dumps(sys.argv),
job_env=dict(os.environ.items()),
job_cwd=os.getcwd(),
execution_node=settings.CLUSTER_HOST_ID,
instance_group=InstanceGroup.objects.get(name='tower'))
)

View File

@ -0,0 +1,144 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import awx.main.fields
from awx.main.migrations import _migration_utils as migration_utils
from awx.main.migrations._multi_cred import migrate_workflow_cred, migrate_workflow_cred_reverse
from awx.main.migrations._scan_jobs import remove_scan_type_nodes
class Migration(migrations.Migration):
dependencies = [
('main', '0009_v330_multi_credential'),
]
operations = [
migrations.AddField(
model_name='schedule',
name='char_prompts',
field=awx.main.fields.JSONField(default={}, blank=True),
),
migrations.AddField(
model_name='schedule',
name='credentials',
field=models.ManyToManyField(related_name='schedules', to='main.Credential'),
),
migrations.AddField(
model_name='schedule',
name='inventory',
field=models.ForeignKey(related_name='schedules', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Inventory', null=True),
),
migrations.AddField(
model_name='schedule',
name='survey_passwords',
field=awx.main.fields.JSONField(default={}, editable=False, blank=True),
),
migrations.AddField(
model_name='workflowjobnode',
name='credentials',
field=models.ManyToManyField(related_name='workflowjobnodes', to='main.Credential'),
),
migrations.AddField(
model_name='workflowjobnode',
name='extra_data',
field=awx.main.fields.JSONField(default={}, blank=True),
),
migrations.AddField(
model_name='workflowjobnode',
name='survey_passwords',
field=awx.main.fields.JSONField(default={}, editable=False, blank=True),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='credentials',
field=models.ManyToManyField(related_name='workflowjobtemplatenodes', to='main.Credential'),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='extra_data',
field=awx.main.fields.JSONField(default={}, blank=True),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='survey_passwords',
field=awx.main.fields.JSONField(default={}, editable=False, blank=True),
),
# Run data migration before removing the old credential field
migrations.RunPython(migration_utils.set_current_apps_for_migrations, migrations.RunPython.noop),
migrations.RunPython(migrate_workflow_cred, migrate_workflow_cred_reverse),
migrations.RunPython(remove_scan_type_nodes, migrations.RunPython.noop),
migrations.RemoveField(
model_name='workflowjobnode',
name='credential',
),
migrations.RemoveField(
model_name='workflowjobtemplatenode',
name='credential',
),
migrations.CreateModel(
name='JobLaunchConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('extra_data', awx.main.fields.JSONField(blank=True, default={})),
('survey_passwords', awx.main.fields.JSONField(blank=True, default={}, editable=False)),
('char_prompts', awx.main.fields.JSONField(blank=True, default={})),
('credentials', models.ManyToManyField(related_name='joblaunchconfigs', to='main.Credential')),
('inventory', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='joblaunchconfigs', to='main.Inventory')),
('job', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='launch_config', to='main.UnifiedJob')),
],
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_variables_on_launch',
field=awx.main.fields.AskForField(default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_credential_on_launch',
field=awx.main.fields.AskForField(default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_diff_mode_on_launch',
field=awx.main.fields.AskForField(default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_inventory_on_launch',
field=awx.main.fields.AskForField(default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_job_type_on_launch',
field=awx.main.fields.AskForField(default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_limit_on_launch',
field=awx.main.fields.AskForField(default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_skip_tags_on_launch',
field=awx.main.fields.AskForField(default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_tags_on_launch',
field=awx.main.fields.AskForField(default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_variables_on_launch',
field=awx.main.fields.AskForField(default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_verbosity_on_launch',
field=awx.main.fields.AskForField(default=False),
),
]

View File

@ -10,3 +10,25 @@ def migrate_to_multi_cred(app, schema_editor):
j.credentials.add(j.vault_credential)
for cred in j.extra_credentials.all():
j.credentials.add(cred)
def migrate_workflow_cred(app, schema_editor):
WorkflowJobTemplateNode = app.get_model('main', 'WorkflowJobTemplateNode')
WorkflowJobNode = app.get_model('main', 'WorkflowJobNode')
for cls in (WorkflowJobNode, WorkflowJobTemplateNode):
for node in cls.objects.iterator():
if node.credential:
node.credentials.add(j.credential)
def migrate_workflow_cred_reverse(app, schema_editor):
WorkflowJobTemplateNode = app.get_model('main', 'WorkflowJobTemplateNode')
WorkflowJobNode = app.get_model('main', 'WorkflowJobNode')
for cls in (WorkflowJobNode, WorkflowJobTemplateNode):
for node in cls.objects.iterator():
cred = node.credentials.first()
if cred:
node.credential = cred
node.save()

View File

@ -82,3 +82,21 @@ def _migrate_scan_job_templates(apps):
def migrate_scan_job_templates(apps, schema_editor):
_migrate_scan_job_templates(apps)
def remove_scan_type_nodes(apps, schema_editor):
WorkflowJobTemplateNode = apps.get_model('main', 'WorkflowJobTemplateNode')
WorkflowJobNode = apps.get_model('main', 'WorkflowJobNode')
for cls in (WorkflowJobNode, WorkflowJobTemplateNode):
for node in cls.objects.iterator():
prompts = node.char_prompts
if prompts.get('job_type', None) == 'scan':
log_text = '{} set job_type to scan, which was deprecated in 3.2, removing.'.format(cls)
if cls == WorkflowJobNode:
logger.info(log_text)
else:
logger.debug(log_text)
prompts.pop('job_type')
node.char_prompts = prompts
node.save()

View File

@ -35,6 +35,11 @@ JOB_TYPE_CHOICES = [
(PERM_INVENTORY_SCAN, _('Scan')),
]
NEW_JOB_TYPE_CHOICES = [
(PERM_INVENTORY_DEPLOY, _('Run')),
(PERM_INVENTORY_CHECK, _('Check')),
]
AD_HOC_JOB_TYPE_CHOICES = [
(PERM_INVENTORY_DEPLOY, _('Run')),
(PERM_INVENTORY_CHECK, _('Check')),

View File

@ -326,9 +326,14 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
@property
def passwords_needed(self):
needed = []
for field in ('ssh_password', 'become_password', 'ssh_key_unlock', 'vault_password'):
for field in ('ssh_password', 'become_password', 'ssh_key_unlock'):
if getattr(self, 'needs_%s' % field):
needed.append(field)
if self.needs_vault_password:
if self.inputs.get('vault_id'):
needed.append('vault_password.{}'.format(self.inputs.get('vault_id')))
else:
needed.append('vault_password')
return needed
def _password_field_allows_ask(self, field):
@ -369,6 +374,31 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
field_val[k] = '$encrypted$'
return field_val
def unique_hash(self, display=False):
'''
Credential exclusivity is not defined solely by the related
credential type (due to vault), so this produces a hash
that can be used to evaluate exclusivity
'''
if display:
type_alias = self.credential_type.name
else:
type_alias = self.credential_type_id
if self.kind == 'vault' and self.inputs.get('vault_id', None):
if display:
fmt_str = '{} (id={})'
else:
fmt_str = '{}_{}'
return fmt_str.format(type_alias, self.inputs.get('vault_id'))
return str(type_alias)
@staticmethod
def unique_dict(cred_qs):
ret = {}
for cred in cred_qs:
ret[cred.unique_hash()] = cred
return ret
class CredentialType(CommonModelNameNotUnique):
'''

View File

@ -1339,7 +1339,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
def _get_unified_job_field_names(cls):
return ['name', 'description', 'source', 'source_path', 'source_script', 'source_vars', 'schedule',
'credential', 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
'timeout', 'verbosity', 'launch_type', 'source_project_update',]
'timeout', 'verbosity', 'source_project_update',]
def save(self, *args, **kwargs):
# If update_fields has been specified, add our field names to it,

View File

@ -21,7 +21,7 @@ from dateutil.tz import tzutc
from django.utils.encoding import force_text, smart_str
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from django.core.exceptions import ValidationError, FieldDoesNotExist
# REST Framework
from rest_framework.exceptions import ParseError
@ -40,8 +40,7 @@ from awx.main.utils import (
)
from awx.main.fields import ImplicitRoleField
from awx.main.models.mixins import ResourceMixin, SurveyJobTemplateMixin, SurveyJobMixin, TaskManagerJobMixin
from awx.main.models.base import PERM_INVENTORY_SCAN
from awx.main.fields import JSONField
from awx.main.fields import JSONField, AskForField
from awx.main.consumers import emit_channel_notification
@ -50,7 +49,7 @@ logger = logging.getLogger('awx.main.models.jobs')
analytics_logger = logging.getLogger('awx.analytics.job_events')
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
__all__ = ['JobTemplate', 'Job', 'JobHostSummary', 'JobEvent', 'SystemJobOptions', 'SystemJobTemplate', 'SystemJob']
__all__ = ['JobTemplate', 'JobLaunchConfig', 'Job', 'JobHostSummary', 'JobEvent', 'SystemJobTemplate', 'SystemJob']
class JobOptions(BaseModel):
@ -215,6 +214,9 @@ class JobOptions(BaseModel):
def passwords_needed_to_start(self):
'''Return list of password field names needed to start the job.'''
needed = []
# Unsaved credential objects can not require passwords
if not self.pk:
return needed
for cred in self.credentials.all():
needed.extend(cred.passwords_needed)
return needed
@ -236,41 +238,39 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
blank=True,
default='',
)
ask_diff_mode_on_launch = models.BooleanField(
ask_diff_mode_on_launch = AskForField(
blank=True,
default=False,
)
ask_variables_on_launch = models.BooleanField(
ask_limit_on_launch = AskForField(
blank=True,
default=False,
)
ask_limit_on_launch = models.BooleanField(
ask_tags_on_launch = AskForField(
blank=True,
default=False,
allows_field='job_tags'
)
ask_skip_tags_on_launch = AskForField(
blank=True,
default=False,
)
ask_tags_on_launch = models.BooleanField(
ask_job_type_on_launch = AskForField(
blank=True,
default=False,
)
ask_skip_tags_on_launch = models.BooleanField(
ask_verbosity_on_launch = AskForField(
blank=True,
default=False,
)
ask_job_type_on_launch = models.BooleanField(
ask_inventory_on_launch = AskForField(
blank=True,
default=False,
)
ask_verbosity_on_launch = models.BooleanField(
blank=True,
default=False,
)
ask_inventory_on_launch = models.BooleanField(
blank=True,
default=False,
)
ask_credential_on_launch = models.BooleanField(
ask_credential_on_launch = AskForField(
blank=True,
default=False,
allows_field='credentials'
)
admin_role = ImplicitRoleField(
parent_role=['project.organization.admin_role', 'inventory.organization.admin_role']
@ -291,36 +291,27 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
def _get_unified_job_field_names(cls):
return ['name', 'description', 'job_type', 'inventory', 'project',
'playbook', 'credentials', 'forks', 'schedule', 'limit',
'verbosity', 'job_tags', 'extra_vars', 'launch_type',
'verbosity', 'job_tags', 'extra_vars',
'force_handlers', 'skip_tags', 'start_at_task',
'become_enabled', 'labels', 'survey_passwords',
'allow_simultaneous', 'timeout', 'use_fact_cache',
'diff_mode',]
def resource_validation_data(self):
@property
def validation_errors(self):
'''
Process consistency errors and need-for-launch related fields.
Fields needed to start, which cannot be given on launch, invalid state.
'''
resources_needed_to_start = []
validation_errors = {}
# Inventory and Credential related checks
if self.inventory is None:
resources_needed_to_start.append('inventory')
if not self.ask_inventory_on_launch:
validation_errors['inventory'] = [_("Job Template must provide 'inventory' or allow prompting for it."),]
# Job type dependent checks
if self.inventory is None and not self.ask_inventory_on_launch:
validation_errors['inventory'] = [_("Job Template must provide 'inventory' or allow prompting for it."),]
if self.project is None:
resources_needed_to_start.append('project')
validation_errors['project'] = [_("Job types 'run' and 'check' must have assigned a project."),]
return (validation_errors, resources_needed_to_start)
return validation_errors
@property
def resources_needed_to_start(self):
validation_errors, resources_needed_to_start = self.resource_validation_data()
return resources_needed_to_start
return [fd for fd in ['project', 'inventory'] if not getattr(self, '{}_id'.format(fd))]
def create_job(self, **kwargs):
'''
@ -350,66 +341,72 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
# that of job template launch, so prompting_needed should
# not block a provisioning callback from creating/launching jobs.
if callback_extra_vars is None:
for value in self._ask_for_vars_dict().values():
if value:
for ask_field_name in set(self.get_ask_mapping().values()):
if getattr(self, ask_field_name):
prompting_needed = True
break
return (not prompting_needed and
not self.passwords_needed_to_start and
not variables_needed)
def _ask_for_vars_dict(self):
return dict(
diff_mode=self.ask_diff_mode_on_launch,
extra_vars=self.ask_variables_on_launch,
limit=self.ask_limit_on_launch,
job_tags=self.ask_tags_on_launch,
skip_tags=self.ask_skip_tags_on_launch,
job_type=self.ask_job_type_on_launch,
verbosity=self.ask_verbosity_on_launch,
inventory=self.ask_inventory_on_launch,
credentials=self.ask_credential_on_launch,
)
def _accept_or_ignore_job_kwargs(self, **kwargs):
# Sort the runtime fields allowed and disallowed by job template
ignored_fields = {}
prompted_fields = {}
prompted_data = {}
rejected_data = {}
accepted_vars, rejected_vars, errors_dict = self.accept_or_ignore_variables(kwargs.get('extra_vars', {}))
if accepted_vars:
prompted_data['extra_vars'] = accepted_vars
if rejected_vars:
rejected_data['extra_vars'] = rejected_vars
ask_for_vars_dict = self._ask_for_vars_dict()
# Handle all the other fields that follow the simple prompting rule
for field_name, ask_field_name in self.get_ask_mapping().items():
if field_name not in kwargs or field_name == 'extra_vars' or kwargs[field_name] is None:
continue
for field in ask_for_vars_dict:
if field in kwargs:
if field == 'extra_vars':
prompted_fields[field] = {}
ignored_fields[field] = {}
if ask_for_vars_dict[field]:
prompted_fields[field] = kwargs[field]
new_value = kwargs[field_name]
old_value = getattr(self, field_name)
field = self._meta.get_field(field_name)
if isinstance(field, models.ManyToManyField):
old_value = set(old_value.all())
if getattr(self, '_deprecated_credential_launch', False):
# TODO: remove this code branch when support for `extra_credentials` goes away
new_value = set(kwargs[field_name])
else:
if field == 'extra_vars' and self.survey_enabled and self.survey_spec:
# Accept vars defined in the survey and no others
survey_vars = [question['variable'] for question in self.survey_spec.get('spec', [])]
extra_vars = parse_yaml_or_json(kwargs[field])
for key in extra_vars:
if key in survey_vars:
prompted_fields[field][key] = extra_vars[key]
else:
ignored_fields[field][key] = extra_vars[key]
else:
ignored_fields[field] = kwargs[field]
new_value = set(kwargs[field_name]) - old_value
if not new_value:
continue
return prompted_fields, ignored_fields
if new_value == old_value:
# no-op case: Fields the same as template's value
# counted as neither accepted or ignored
continue
elif getattr(self, ask_field_name):
# accepted prompt
prompted_data[field_name] = new_value
else:
# unprompted - template is not configured to accept field on launch
rejected_data[field_name] = new_value
# Not considered an error for manual launch, to support old
# behavior of putting them in ignored_fields and launching anyway
if not getattr(self, '_is_manual_launch', False):
errors_dict[field_name] = _('Field is not configured to prompt on launch.').format(field_name=field_name)
def _extra_job_type_errors(self, data):
"""
Used to enforce 2 special cases around scan jobs and prompting
- the inventory cannot be changed on a scan job template
- scan jobs cannot be switched to run/check type and vice versa
"""
errors = {}
if 'job_type' in data and self.ask_job_type_on_launch:
if data['job_type'] == PERM_INVENTORY_SCAN and not self.job_type == PERM_INVENTORY_SCAN:
errors['job_type'] = _('Cannot override job_type to or from a scan job.')
return errors
if not getattr(self, '_is_manual_launch', False) and self.passwords_needed_to_start:
errors_dict['passwords_needed_to_start'] = _(
'Saved launch configurations cannot provide passwords needed to start.')
needed = self.resources_needed_to_start
if needed:
needed_errors = []
for resource in needed:
if resource in prompted_data:
continue
needed_errors.append(_("Job Template {} is missing or undefined.").format(resource))
if needed_errors:
errors_dict['resources_needed_to_start'] = needed_errors
return prompted_data, rejected_data, errors_dict
@property
def cache_timeout_blocked(self):
@ -804,6 +801,160 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
ansible_facts_modified=host.ansible_facts_modified.isoformat()))
# Add on aliases for the non-related-model fields
class NullablePromptPsuedoField(object):
"""
Interface for psuedo-property stored in `char_prompts` dict
Used in LaunchTimeConfig and submodels
"""
def __init__(self, field_name):
self.field_name = field_name
def __get__(self, instance, type=None):
return instance.char_prompts.get(self.field_name, None)
def __set__(self, instance, value):
if value in (None, {}):
instance.char_prompts.pop(self.field_name, None)
else:
instance.char_prompts[self.field_name] = value
class LaunchTimeConfig(BaseModel):
'''
Common model for all objects that save details of a saved launch config
WFJT / WJ nodes, schedules, and job launch configs (not all implemented yet)
'''
class Meta:
abstract = True
# Prompting-related fields that have to be handled as special cases
credentials = models.ManyToManyField(
'Credential',
related_name='%(class)ss'
)
inventory = models.ForeignKey(
'Inventory',
related_name='%(class)ss',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
)
extra_data = JSONField(
blank=True,
default={}
)
survey_passwords = prevent_search(JSONField(
blank=True,
default={},
editable=False,
))
# All standard fields are stored in this dictionary field
# This is a solution to the nullable CharField problem, specific to prompting
char_prompts = JSONField(
blank=True,
default={}
)
def prompts_dict(self, display=False):
data = {}
for prompt_name in JobTemplate.get_ask_mapping().keys():
try:
field = self._meta.get_field(prompt_name)
except FieldDoesNotExist:
field = None
if isinstance(field, models.ManyToManyField):
if not self.pk:
continue # unsaved object can't have related many-to-many
prompt_val = set(getattr(self, prompt_name).all())
if len(prompt_val) > 0:
data[prompt_name] = prompt_val
elif prompt_name == 'extra_vars':
if self.extra_data:
if display:
data[prompt_name] = self.display_extra_data()
else:
data[prompt_name] = self.extra_data
if self.survey_passwords and not display:
data['survey_passwords'] = self.survey_passwords
else:
prompt_val = getattr(self, prompt_name)
if prompt_val is not None:
data[prompt_name] = prompt_val
return data
def display_extra_data(self):
'''
Hides fields marked as passwords in survey.
'''
if self.survey_passwords:
extra_data = parse_yaml_or_json(self.extra_data)
for key, value in self.survey_passwords.items():
if key in extra_data:
extra_data[key] = value
return extra_data
else:
return self.extra_data
@property
def _credential(self):
'''
Only used for workflow nodes to support backward compatibility.
'''
try:
return [cred for cred in self.credentials.all() if cred.credential_type.kind == 'ssh'][0]
except IndexError:
return None
@property
def credential(self):
'''
Returns an integer so it can be used as IntegerField in serializer
'''
cred = self._credential
if cred is not None:
return cred.pk
else:
return None
for field_name in JobTemplate.get_ask_mapping().keys():
try:
LaunchTimeConfig._meta.get_field(field_name)
except FieldDoesNotExist:
setattr(LaunchTimeConfig, field_name, NullablePromptPsuedoField(field_name))
class JobLaunchConfig(LaunchTimeConfig):
'''
Historical record of user launch-time overrides for a job
Not exposed in the API
Used for relaunch, scheduling, etc.
'''
class Meta:
app_label = 'main'
job = models.OneToOneField(
'UnifiedJob',
related_name='launch_config',
on_delete=models.CASCADE,
editable=False,
)
def has_unprompted(self, template):
'''
returns False if the template has set ask_ fields to False after
launching with those prompts
'''
prompts = self.prompts_dict()
for field_name, ask_field_name in template.get_ask_mapping().items():
if field_name in prompts and not getattr(template, ask_field_name):
return True
else:
return False
class JobHostSummary(CreatedModifiedModel):
'''
Per-host statistics for each job.
@ -1404,6 +1555,50 @@ class SystemJobTemplate(UnifiedJobTemplate, SystemJobOptions):
success=list(success_notification_templates),
any=list(any_notification_templates))
def _accept_or_ignore_job_kwargs(self, **kwargs):
extra_data = kwargs.pop('extra_vars', {})
prompted_data, rejected_data, errors = super(SystemJobTemplate, self)._accept_or_ignore_job_kwargs(**kwargs)
prompted_vars, rejected_vars, errors = self.accept_or_ignore_variables(extra_data, errors)
if prompted_vars:
prompted_data['extra_vars'] = prompted_vars
if rejected_vars:
rejected_data['extra_vars'] = rejected_vars
return (prompted_data, rejected_data, errors)
def _accept_or_ignore_variables(self, data, errors):
'''
Unlike other templates, like project updates and inventory sources,
system job templates can accept a limited number of fields
used as options for the management commands.
'''
rejected = {}
allowed_vars = set(['days', 'older_than', 'granularity'])
given_vars = set(data.keys())
unallowed_vars = given_vars - (allowed_vars & given_vars)
errors_list = []
if unallowed_vars:
errors_list.append(_('Variables {list_of_keys} are not allowed for system jobs.').format(
list_of_keys=', '.join(unallowed_vars)))
for key in unallowed_vars:
rejected[key] = data.pop(key)
if 'days' in data:
try:
if type(data['days']) is bool:
raise ValueError
if float(data['days']) != int(data['days']):
raise ValueError
days = int(data['days'])
if days < 0:
raise ValueError
except ValueError:
errors_list.append(_("days must be a positive integer."))
rejected['days'] = data.pop('days')
if errors_list:
errors['extra_vars'] = errors_list
return (data, rejected, errors)
class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):

View File

@ -6,6 +6,7 @@ from copy import copy
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User # noqa
from django.utils.translation import ugettext_lazy as _
# AWX
from awx.main.models.base import prevent_search
@ -13,7 +14,7 @@ from awx.main.models.rbac import (
Role, RoleAncestorEntry, get_roles_on_resource
)
from awx.main.utils import parse_yaml_or_json
from awx.main.fields import JSONField
from awx.main.fields import JSONField, AskForField
__all__ = ['ResourceMixin', 'SurveyJobTemplateMixin', 'SurveyJobMixin',
@ -92,6 +93,11 @@ class SurveyJobTemplateMixin(models.Model):
blank=True,
default={},
))
ask_variables_on_launch = AskForField(
blank=True,
default=False,
allows_field='extra_vars'
)
def survey_password_variables(self):
vars = []
@ -227,17 +233,44 @@ class SurveyJobTemplateMixin(models.Model):
choice_list))
return errors
def survey_variable_validation(self, data):
errors = []
if not self.survey_enabled:
return errors
if 'name' not in self.survey_spec:
errors.append("'name' missing from survey spec.")
if 'description' not in self.survey_spec:
errors.append("'description' missing from survey spec.")
for survey_element in self.survey_spec.get("spec", []):
errors += self._survey_element_validation(survey_element, data)
return errors
def _accept_or_ignore_variables(self, data, errors=None):
survey_is_enabled = (self.survey_enabled and self.survey_spec)
extra_vars = data.copy()
if errors is None:
errors = {}
rejected = {}
accepted = {}
if survey_is_enabled:
# Check for data violation of survey rules
survey_errors = []
for survey_element in self.survey_spec.get("spec", []):
element_errors = self._survey_element_validation(survey_element, data)
key = survey_element.get('variable', None)
if element_errors:
survey_errors += element_errors
if key is not None and key in extra_vars:
rejected[key] = extra_vars.pop(key)
elif key in extra_vars:
accepted[key] = extra_vars.pop(key)
if survey_errors:
errors['variables_needed_to_start'] = survey_errors
if self.ask_variables_on_launch:
# We can accept all variables
accepted.update(extra_vars)
extra_vars = {}
if extra_vars:
# Leftover extra_vars, keys provided that are not allowed
rejected.update(extra_vars)
# ignored variables does not block manual launch
if not getattr(self, '_is_manual_launch', False):
errors['extra_vars'] = [_('Variables {list_of_keys} are not allowed on launch.').format(
list_of_keys=', '.join(extra_vars.keys()))]
return (accepted, rejected, errors)
class SurveyJobMixin(models.Model):

View File

@ -308,7 +308,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
def _get_unified_job_field_names(cls):
return ['name', 'description', 'local_path', 'scm_type', 'scm_url',
'scm_branch', 'scm_clean', 'scm_delete_on_update',
'credential', 'schedule', 'timeout', 'launch_type',]
'credential', 'schedule', 'timeout',]
def save(self, *args, **kwargs):
new_instance = not bool(self.pk)

View File

@ -5,21 +5,19 @@ import re
import logging
import datetime
import dateutil.rrule
import json
# Django
from django.db import models
from django.db.models.query import QuerySet
from django.utils.timezone import now, make_aware, get_default_timezone
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
# AWX
from awx.api.versioning import reverse
from awx.main.models.base import * # noqa
from awx.main.models.jobs import LaunchTimeConfig
from awx.main.utils import ignore_inventory_computed_fields
from awx.main.consumers import emit_channel_notification
from awx.main.fields import JSONField
logger = logging.getLogger('awx.main.models.schedule')
@ -53,7 +51,7 @@ class ScheduleManager(ScheduleFilterMethods, models.Manager):
return ScheduleQuerySet(self.model, using=self._db)
class Schedule(CommonModel):
class Schedule(CommonModel, LaunchTimeConfig):
class Meta:
app_label = 'main'
@ -92,44 +90,6 @@ class Schedule(CommonModel):
editable=False,
help_text=_("The next time that the scheduled action will run.")
)
extra_data = JSONField(
blank=True,
default={}
)
# extra_data is actually a string with a JSON payload in it. This
# is technically OK because a string is a valid JSON. One day we will
# enforce non-string JSON.
def _clean_extra_data_system_jobs(self):
extra_data = self.extra_data
if not isinstance(extra_data, dict):
try:
extra_data = json.loads(self.extra_data)
except Exception:
raise ValidationError(_("Expected JSON"))
if extra_data and 'days' in extra_data:
try:
if type(extra_data['days']) is bool:
raise ValueError
if float(extra_data['days']) != int(extra_data['days']):
raise ValueError
days = int(extra_data['days'])
if days < 0:
raise ValueError
except ValueError:
raise ValidationError(_("days must be a positive integer."))
return self.extra_data
def clean_extra_data(self):
if not self.unified_job_template:
return self.extra_data
# Compare class by string name because it's hard to import SystemJobTemplate
if type(self.unified_job_template).__name__ is not 'SystemJobTemplate':
return self.extra_data
return self._clean_extra_data_system_jobs()
def __unicode__(self):
return u'%s_t%s_%s_%s' % (self.name, self.unified_job_template.id, self.id, self.next_run)
@ -137,6 +97,14 @@ class Schedule(CommonModel):
def get_absolute_url(self, request=None):
return reverse('api:schedule_detail', kwargs={'pk': self.pk}, request=request)
def get_job_kwargs(self):
config_data = self.prompts_dict()
job_kwargs, rejected, errors = self.unified_job_template._accept_or_ignore_job_kwargs(**config_data)
if errors:
logger.info('Errors creating scheduled job: {}'.format(errors))
job_kwargs['_eager_fields'] = {'launch_type': 'scheduled', 'schedule': self}
return job_kwargs
def update_computed_fields(self):
future_rs = dateutil.rrule.rrulestr(self.rrule, forceset=True)
next_run_actual = future_rs.after(now())

View File

@ -21,6 +21,9 @@ from django.utils.encoding import smart_text
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
# REST Framework
from rest_framework.exceptions import ParseError
# Django-Polymorphic
from polymorphic.models import PolymorphicModel
@ -29,7 +32,6 @@ from django_celery_results.models import TaskResult
# AWX
from awx.main.models.base import * # noqa
from awx.main.models.schedules import Schedule
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin
from awx.main.utils import (
decrypt_field, _inventory_updates,
@ -38,7 +40,7 @@ from awx.main.utils import (
)
from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.consumers import emit_channel_notification
from awx.main.fields import JSONField
from awx.main.fields import JSONField, AskForField
__all__ = ['UnifiedJobTemplate', 'UnifiedJob']
@ -251,6 +253,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
return self.last_job_run
def update_computed_fields(self):
Schedule = self._meta.get_field('schedules').related_model
related_schedules = Schedule.objects.filter(enabled=True, unified_job_template=self, next_run__isnull=False).order_by('-next_run')
if related_schedules.exists():
self.next_schedule = related_schedules[0]
@ -340,11 +343,16 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
'''
Create a new unified job based on this unified job template.
'''
new_job_passwords = kwargs.pop('survey_passwords', {})
eager_fields = kwargs.pop('_eager_fields', None)
unified_job_class = self._get_unified_job_class()
fields = self._get_unified_job_field_names()
unallowed_fields = set(kwargs.keys()) - set(fields)
if unallowed_fields:
raise Exception('Fields {} are not allowed as overrides.'.format(unallowed_fields))
unified_job = copy_model_by_class(self, unified_job_class, fields, kwargs)
eager_fields = kwargs.get('_eager_fields', None)
if eager_fields:
for fd, val in eager_fields.items():
setattr(unified_job, fd, val)
@ -355,20 +363,48 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
# For JobTemplate-based jobs with surveys, add passwords to list for perma-redaction
if hasattr(self, 'survey_spec') and getattr(self, 'survey_enabled', False):
password_list = self.survey_password_variables()
hide_password_dict = getattr(unified_job, 'survey_passwords', {})
for password in password_list:
hide_password_dict[password] = REPLACE_STR
unified_job.survey_passwords = hide_password_dict
for password in self.survey_password_variables():
new_job_passwords[password] = REPLACE_STR
if new_job_passwords:
unified_job.survey_passwords = new_job_passwords
kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch
unified_job.save()
# Labels and extra credentials copied here
# Labels and credentials copied here
if kwargs.get('credentials'):
Credential = UnifiedJob._meta.get_field('credentials').related_model
cred_dict = Credential.unique_dict(self.credentials.all())
prompted_dict = Credential.unique_dict(kwargs['credentials'])
# combine prompted credentials with JT
cred_dict.update(prompted_dict)
kwargs['credentials'] = [cred for cred in cred_dict.values()]
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
copy_m2m_relationships(self, unified_job, fields, kwargs=kwargs)
if 'extra_vars' in kwargs:
unified_job.handle_extra_data(kwargs['extra_vars'])
if not getattr(self, '_deprecated_credential_launch', False):
# Create record of provided prompts for relaunch and rescheduling
unified_job.create_config_from_prompts(kwargs)
return unified_job
@classmethod
def get_ask_mapping(cls):
'''
Creates dictionary that maps the unified job field (keys)
to the field that enables prompting for the field (values)
'''
mapping = {}
for field in cls._meta.fields:
if isinstance(field, AskForField):
mapping[field.allows_field] = field.name
return mapping
@classmethod
def _get_unified_jt_copy_names(cls):
return cls._get_unified_job_field_names()
@ -389,6 +425,41 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
copy_m2m_relationships(self, unified_jt, fields)
return unified_jt
def _accept_or_ignore_job_kwargs(self, **kwargs):
'''
Override in subclass if template accepts _any_ prompted params
'''
errors = {}
if kwargs:
for field_name in kwargs.keys():
errors[field_name] = [_("Field is not allowed on launch.")]
return ({}, kwargs, errors)
def accept_or_ignore_variables(self, data, errors=None):
'''
If subclasses accept any `variables` or `extra_vars`, they should
define _accept_or_ignore_variables to place those variables in the accepted dict,
according to the acceptance rules of the template.
'''
if errors is None:
errors = {}
if not isinstance(data, dict):
try:
data = parse_yaml_or_json(data, silent_failure=False)
except ParseError as exc:
errors['extra_vars'] = [str(exc)]
return ({}, data, errors)
if hasattr(self, '_accept_or_ignore_variables'):
# SurveyJobTemplateMixin cannot override any methods because of
# resolution order, forced by how metaclass processes fields,
# thus the need for hasattr check
return self._accept_or_ignore_variables(data, errors)
elif data:
errors['extra_vars'] = [
_('Variables {list_of_keys} provided, but this template cannot accept variables.'.format(
list_of_keys=', '.join(data.keys())))]
return ({}, data, errors)
class UnifiedJobTypeStringMixin(object):
@classmethod
@ -750,18 +821,67 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
unified_job_class = self.__class__
unified_jt_class = self._get_unified_job_template_class()
parent_field_name = unified_job_class._get_parent_field_name()
fields = unified_jt_class._get_unified_job_field_names() + [parent_field_name]
unified_job = copy_model_by_class(self, unified_job_class, fields, {})
unified_job.launch_type = 'relaunch'
create_data = {"launch_type": "relaunch"}
if limit:
unified_job.limit = limit
unified_job.save()
create_data["limit"] = limit
prompts = self.launch_prompts()
if self.unified_job_template and prompts:
prompts['_eager_fields'] = create_data
unified_job = self.unified_job_template.create_unified_job(**prompts)
else:
unified_job = copy_model_by_class(self, unified_job_class, fields, {})
for fd, val in create_data.items():
setattr(unified_job, fd, val)
unified_job.save()
# Labels coppied here
copy_m2m_relationships(self, unified_job, fields)
return unified_job
def launch_prompts(self):
'''
Return dictionary of prompts job was launched with
returns None if unknown
'''
JobLaunchConfig = self._meta.get_field('launch_config').related_model
try:
config = self.launch_config
return config.prompts_dict()
except JobLaunchConfig.DoesNotExist:
return None
def create_config_from_prompts(self, kwargs):
'''
Create a launch configuration entry for this job, given prompts
returns None if it can not be created
'''
if self.unified_job_template is None:
return None
JobLaunchConfig = self._meta.get_field('launch_config').related_model
config = JobLaunchConfig(job=self)
valid_fields = self.unified_job_template.get_ask_mapping().keys()
if hasattr(self, 'extra_vars'):
valid_fields.extend(['survey_passwords', 'extra_vars'])
for field_name, value in kwargs.items():
if field_name not in valid_fields:
raise Exception('Unrecognized launch config field {}.'.format(field_name))
if field_name == 'credentials':
continue
key = field_name
if key == 'extra_vars':
key = 'extra_data'
setattr(config, key, value)
config.save()
job_creds = (set(kwargs.get('credentials', [])) -
set(self.unified_job_template.credentials.all()))
if job_creds:
config.credentials.add(*job_creds)
return config
def result_stdout_raw_handle(self, attempt=0):
"""Return a file-like object containing the standard out of the
job's result.
@ -908,6 +1028,19 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
def can_start(self):
return bool(self.status in ('new', 'waiting'))
@property
def can_schedule(self):
if getattr(self, 'passwords_needed_to_start', None):
return False
JobLaunchConfig = self._meta.get_field('launch_config').related_model
try:
self.launch_config
if self.unified_job_template is None:
return False
return True
except JobLaunchConfig.DoesNotExist:
return False
@property
def task_impact(self):
raise NotImplementedError # Implement in subclass.
@ -1025,8 +1158,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
opts = dict([(field, kwargs.get(field, '')) for field in needed])
if not all(opts.values()):
return False
if 'extra_vars' in kwargs:
self.handle_extra_data(kwargs['extra_vars'])
# Sanity check: If we are running unit tests, then run synchronously.
if getattr(settings, 'CELERY_UNIT_TEST', False):

View File

@ -3,10 +3,12 @@
# Python
#import urlparse
import logging
# Django
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
#from django import settings as tower_settings
# AWX
@ -23,8 +25,9 @@ from awx.main.models.rbac import (
)
from awx.main.fields import ImplicitRoleField
from awx.main.models.mixins import ResourceMixin, SurveyJobTemplateMixin, SurveyJobMixin
from awx.main.models.jobs import LaunchTimeConfig
from awx.main.models.credential import Credential
from awx.main.redact import REPLACE_STR
from awx.main.utils import parse_yaml_or_json
from awx.main.fields import JSONField
from copy import copy
@ -32,10 +35,11 @@ from urlparse import urljoin
__all__ = ['WorkflowJobTemplate', 'WorkflowJob', 'WorkflowJobOptions', 'WorkflowJobNode', 'WorkflowJobTemplateNode',]
CHAR_PROMPTS_LIST = ['job_type', 'job_tags', 'skip_tags', 'limit']
logger = logging.getLogger('awx.main.models.workflow')
class WorkflowNodeBase(CreatedModifiedModel):
class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
class Meta:
abstract = True
app_label = 'main'
@ -66,78 +70,6 @@ class WorkflowNodeBase(CreatedModifiedModel):
default=None,
on_delete=models.SET_NULL,
)
# Prompting-related fields
inventory = models.ForeignKey(
'Inventory',
related_name='%(class)ss',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
)
credential = models.ForeignKey(
'Credential',
related_name='%(class)ss',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
)
char_prompts = JSONField(
blank=True,
default={}
)
def prompts_dict(self):
data = {}
if self.inventory:
data['inventory'] = self.inventory.pk
if self.credential:
data['credential'] = self.credential.pk
for fd in CHAR_PROMPTS_LIST:
if fd in self.char_prompts:
data[fd] = self.char_prompts[fd]
return data
@property
def job_type(self):
return self.char_prompts.get('job_type', None)
@property
def job_tags(self):
return self.char_prompts.get('job_tags', None)
@property
def skip_tags(self):
return self.char_prompts.get('skip_tags', None)
@property
def limit(self):
return self.char_prompts.get('limit', None)
def get_prompts_warnings(self):
ujt_obj = self.unified_job_template
if ujt_obj is None:
return {}
prompts_dict = self.prompts_dict()
if not hasattr(ujt_obj, '_ask_for_vars_dict'):
if prompts_dict:
return {'ignored': {'all': 'Cannot use prompts on unified_job_template that is not type of job template'}}
else:
return {}
accepted_fields, ignored_fields = ujt_obj._accept_or_ignore_job_kwargs(**prompts_dict)
ignored_dict = {}
for fd in ignored_fields:
ignored_dict[fd] = 'Workflow node provided field, but job template is not set to ask on launch'
scan_errors = ujt_obj._extra_job_type_errors(accepted_fields)
ignored_dict.update(scan_errors)
data = {}
if ignored_dict:
data['ignored'] = ignored_dict
return data
def get_parent_nodes(self):
'''Returns queryset containing all parents of this node'''
@ -152,7 +84,8 @@ class WorkflowNodeBase(CreatedModifiedModel):
Return field names that should be copied from template node to job node.
'''
return ['workflow_job', 'unified_job_template',
'inventory', 'credential', 'char_prompts']
'extra_data', 'survey_passwords',
'inventory', 'credentials', 'char_prompts']
def create_workflow_job_node(self, **kwargs):
'''
@ -160,11 +93,20 @@ class WorkflowNodeBase(CreatedModifiedModel):
'''
create_kwargs = {}
for field_name in self._get_workflow_job_field_names():
if field_name == 'credentials':
continue
if field_name in kwargs:
create_kwargs[field_name] = kwargs[field_name]
elif hasattr(self, field_name):
create_kwargs[field_name] = getattr(self, field_name)
return WorkflowJobNode.objects.create(**create_kwargs)
new_node = WorkflowJobNode.objects.create(**create_kwargs)
if self.pk:
allowed_creds = self.credentials.all()
else:
allowed_creds = []
for cred in allowed_creds:
new_node.credentials.add(cred)
return new_node
class WorkflowJobTemplateNode(WorkflowNodeBase):
@ -186,11 +128,17 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
is not allowed to access
'''
create_kwargs = {}
allowed_creds = []
for field_name in self._get_workflow_job_field_names():
if field_name == 'credentials':
for cred in self.credentials.all():
if user.can_access(Credential, 'use', cred):
allowed_creds.append(cred)
continue
item = getattr(self, field_name, None)
if item is None:
continue
if field_name in ['inventory', 'credential']:
if field_name == 'inventory':
if not user.can_access(item.__class__, 'use', item):
continue
if field_name in ['unified_job_template']:
@ -198,7 +146,10 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
continue
create_kwargs[field_name] = item
create_kwargs['workflow_job_template'] = workflow_job_template
return self.__class__.objects.create(**create_kwargs)
new_node = self.__class__.objects.create(**create_kwargs)
for cred in allowed_creds:
new_node.credentials.add(cred)
return new_node
class WorkflowJobNode(WorkflowNodeBase):
@ -237,10 +188,14 @@ class WorkflowJobNode(WorkflowNodeBase):
# reject/accept prompted fields
data = {}
ujt_obj = self.unified_job_template
if ujt_obj and hasattr(ujt_obj, '_ask_for_vars_dict'):
accepted_fields, ignored_fields = ujt_obj._accept_or_ignore_job_kwargs(**self.prompts_dict())
for fd in ujt_obj._extra_job_type_errors(accepted_fields):
accepted_fields.pop(fd)
if ujt_obj is not None:
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**self.prompts_dict())
if errors:
logger.info(_('Bad launch configuration starting template {template_pk} as part of '
'workflow {workflow_pk}. Errors:\n{error_text}').format(
template_pk=ujt_obj.pk,
workflow_pk=self.pk,
error_text=errors))
data.update(accepted_fields) # missing fields are handled in the scheduler
# build ancestor artifacts, save them to node model for later
aa_dict = {}
@ -251,18 +206,20 @@ class WorkflowJobNode(WorkflowNodeBase):
if aa_dict:
self.ancestor_artifacts = aa_dict
self.save(update_fields=['ancestor_artifacts'])
# process password list
password_dict = {}
if '_ansible_no_log' in aa_dict:
for key in aa_dict:
if key != '_ansible_no_log':
password_dict[key] = REPLACE_STR
workflow_job_survey_passwords = self.workflow_job.survey_passwords
if workflow_job_survey_passwords:
password_dict.update(workflow_job_survey_passwords)
if self.workflow_job.survey_passwords:
password_dict.update(self.workflow_job.survey_passwords)
if self.survey_passwords:
password_dict.update(self.survey_passwords)
if password_dict:
data['survey_passwords'] = password_dict
# process extra_vars
extra_vars = {}
extra_vars = data.get('extra_vars', {})
if aa_dict:
functional_aa_dict = copy(aa_dict)
functional_aa_dict.pop('_ansible_no_log', None)
@ -273,7 +230,7 @@ class WorkflowJobNode(WorkflowNodeBase):
if extra_vars:
data['extra_vars'] = extra_vars
# ensure that unified jobs created by WorkflowJobs are marked
data['launch_type'] = 'workflow'
data['_eager_fields'] = {'launch_type': 'workflow'}
return data
@ -370,7 +327,7 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
base_list = super(WorkflowJobTemplate, cls)._get_unified_jt_copy_names()
base_list.remove('labels')
return (base_list +
['survey_spec', 'survey_enabled', 'organization'])
['survey_spec', 'survey_enabled', 'ask_variables_on_launch', 'organization'])
def get_absolute_url(self, request=None):
return reverse('api:workflow_job_template_detail', kwargs={'pk': self.pk}, request=request)
@ -398,27 +355,26 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
workflow_job.copy_nodes_from_original(original=self)
return workflow_job
def _accept_or_ignore_job_kwargs(self, extra_vars=None, **kwargs):
# Only accept allowed survey variables
ignored_fields = {}
def _accept_or_ignore_job_kwargs(self, **kwargs):
prompted_fields = {}
prompted_fields['extra_vars'] = {}
ignored_fields['extra_vars'] = {}
extra_vars = parse_yaml_or_json(extra_vars)
if self.survey_enabled and self.survey_spec:
survey_vars = [question['variable'] for question in self.survey_spec.get('spec', [])]
for key in extra_vars:
if key in survey_vars:
prompted_fields['extra_vars'][key] = extra_vars[key]
else:
ignored_fields['extra_vars'][key] = extra_vars[key]
else:
prompted_fields['extra_vars'] = extra_vars
rejected_fields = {}
accepted_vars, rejected_vars, errors_dict = self.accept_or_ignore_variables(kwargs.get('extra_vars', {}))
if accepted_vars:
prompted_fields['extra_vars'] = accepted_vars
if rejected_vars:
rejected_fields['extra_vars'] = rejected_vars
return prompted_fields, ignored_fields
# WFJTs do not behave like JTs, it can not accept inventory, credential, etc.
bad_kwargs = kwargs.copy()
bad_kwargs.pop('extra_vars', None)
if bad_kwargs:
rejected_fields.update(bad_kwargs)
for field in bad_kwargs:
errors_dict[field] = _('Field is not allowed for use in workflows.')
return prompted_fields, rejected_fields, errors_dict
def can_start_without_user_input(self):
'''Return whether WFJT can be launched without survey passwords.'''
return not bool(
self.variables_needed_to_start or
self.node_templates_missing() or
@ -431,8 +387,12 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
def node_prompts_rejected(self):
node_list = []
for node in self.workflow_job_template_nodes.prefetch_related('unified_job_template').all():
node_prompts_warnings = node.get_prompts_warnings()
if node_prompts_warnings:
ujt_obj = node.unified_job_template
if ujt_obj is None:
continue
prompts_dict = node.prompts_dict()
accepted_fields, ignored_fields, prompts_errors = ujt_obj._accept_or_ignore_job_kwargs(**prompts_dict)
if prompts_errors:
node_list.append(node.pk)
return node_list

View File

@ -196,7 +196,7 @@ class TaskManager():
spawn_node.job = job
spawn_node.save()
if job._resources_sufficient_for_launch():
can_start = job.signal_start(**kv)
can_start = job.signal_start()
if not can_start:
job.job_explanation = _("Job spawned from workflow could not start because it "
"was not in the right state or required manual credentials")
@ -285,7 +285,8 @@ class TaskManager():
map(lambda task: self.graph[task.instance_group.name]['graph'].add_job(task), running_tasks)
def create_project_update(self, task):
project_task = Project.objects.get(id=task.project_id).create_project_update(launch_type='dependency')
project_task = Project.objects.get(id=task.project_id).create_project_update(
_eager_fields=dict(launch_type='dependency'))
# Project created 1 seconds behind
project_task.created = task.created - timedelta(seconds=1)
@ -294,7 +295,8 @@ class TaskManager():
return project_task
def create_inventory_update(self, task, inventory_source_task):
inventory_task = InventorySource.objects.get(id=inventory_source_task.id).create_inventory_update(launch_type='dependency')
inventory_task = InventorySource.objects.get(id=inventory_source_task.id).create_inventory_update(
_eager_fields=dict(launch_type='dependency'))
inventory_task.created = task.created - timedelta(seconds=2)
inventory_task.status = 'pending'

View File

@ -56,7 +56,7 @@ from awx.main.expect import run, isolated_manager
from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url,
check_proot_installed, build_proot_temp_dir, get_licenser,
wrap_args_with_proot, get_system_task_capacity, OutputEventFilter,
parse_yaml_or_json, ignore_inventory_computed_fields, ignore_inventory_group_removal,
ignore_inventory_computed_fields, ignore_inventory_group_removal,
get_type_for_model, extract_ansible_vars)
from awx.main.utils.reload import restart_local_services, stop_local_services
from awx.main.utils.handlers import configure_external_logger
@ -306,8 +306,13 @@ def awx_periodic_scheduler(self):
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
new_unified_job = template.create_unified_job(launch_type='scheduled', schedule=schedule)
can_start = new_unified_job.signal_start(extra_vars=parse_yaml_or_json(schedule.extra_data))
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = "Scheduled job could not start because it was not in the right state or required manual credentials"
@ -1226,8 +1231,8 @@ class RunJob(BaseTask):
pu_ig = pu_ig.controller
pu_en = settings.CLUSTER_HOST_ID
local_project_sync = job.project.create_project_update(
launch_type="sync",
_eager_fields=dict(
launch_type="sync",
job_type='run',
status='running',
instance_group = pu_ig,
@ -1485,8 +1490,8 @@ class RunProjectUpdate(BaseTask):
'another update is already active.'.format(inv_src.name))
continue
local_inv_update = inv_src.create_inventory_update(
launch_type='scm',
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=project_update.instance_group,
execution_node=project_update.execution_node,
@ -1969,8 +1974,8 @@ class RunInventoryUpdate(BaseTask):
if (inventory_update.source=='scm' and inventory_update.launch_type!='scm' and source_project):
request_id = '' if self.request.id is None else self.request.id
local_project_sync = source_project.create_project_update(
launch_type="sync",
_eager_fields=dict(
launch_type="sync",
job_type='run',
status='running',
execution_node=inventory_update.execution_node,

View File

@ -202,7 +202,6 @@ def test_modify_ssh_credential_at_launch(get, post, job_template, admin,
machine_credential, vault_credential, credential):
job_template.credentials.add(vault_credential)
job_template.credentials.add(credential)
job_template.save()
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
pk = post(url, {'credential': machine_credential.pk}, admin, expect=201).data['job']
@ -215,7 +214,6 @@ def test_modify_vault_credential_at_launch(get, post, job_template, admin,
machine_credential, vault_credential, credential):
job_template.credentials.add(machine_credential)
job_template.credentials.add(credential)
job_template.save()
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
pk = post(url, {'vault_credential': vault_credential.pk}, admin, expect=201).data['job']
@ -228,7 +226,6 @@ def test_modify_extra_credentials_at_launch(get, post, job_template, admin,
machine_credential, vault_credential, credential):
job_template.credentials.add(machine_credential)
job_template.credentials.add(vault_credential)
job_template.save()
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
pk = post(url, {'extra_credentials': [credential.pk]}, admin, expect=201).data['job']
@ -239,7 +236,6 @@ def test_modify_extra_credentials_at_launch(get, post, job_template, admin,
@pytest.mark.django_db
def test_overwrite_ssh_credential_at_launch(get, post, job_template, admin, machine_credential):
job_template.credentials.add(machine_credential)
job_template.save()
new_cred = machine_credential
new_cred.pk = None
@ -256,7 +252,6 @@ def test_overwrite_ssh_credential_at_launch(get, post, job_template, admin, mach
@pytest.mark.django_db
def test_ssh_password_prompted_at_launch(get, post, job_template, admin, machine_credential):
job_template.credentials.add(machine_credential)
job_template.save()
machine_credential.inputs['password'] = 'ASK'
machine_credential.save()
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
@ -265,16 +260,17 @@ def test_ssh_password_prompted_at_launch(get, post, job_template, admin, machine
@pytest.mark.django_db
def test_prompted_credential_removed_on_launch(get, post, job_template, admin, machine_credential):
def test_prompted_credential_replaced_on_launch(get, post, job_template, admin, machine_credential):
# If a JT has a credential that needs a password, but the launch POST
# specifies {"credentials": []}, don't require any passwords
job_template.credentials.add(machine_credential)
job_template.save()
machine_credential.inputs['password'] = 'ASK'
machine_credential.save()
# specifies credential that does not require any passwords
cred2 = Credential(name='second-cred', inputs=machine_credential.inputs,
credential_type=machine_credential.credential_type)
cred2.inputs['password'] = 'ASK'
cred2.save()
job_template.credentials.add(cred2)
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
resp = post(url, {}, admin, expect=400)
resp = post(url, {'credentials': []}, admin, expect=201)
resp = post(url, {'credentials': [machine_credential.pk]}, admin, expect=201)
assert 'job' in resp.data
@ -297,7 +293,6 @@ def test_ssh_credential_with_password_at_launch(get, post, job_template, admin,
@pytest.mark.django_db
def test_vault_password_prompted_at_launch(get, post, job_template, admin, vault_credential):
job_template.credentials.add(vault_credential)
job_template.save()
vault_credential.inputs['vault_password'] = 'ASK'
vault_credential.save()
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
@ -337,14 +332,14 @@ def test_extra_creds_prompted_at_launch(get, post, job_template, admin, net_cred
@pytest.mark.django_db
def test_invalid_mixed_credentials_specification(get, post, job_template, admin, net_credential):
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
post(url, {'credentials': [net_credential.pk], 'extra_credentials': [net_credential.pk]}, admin, expect=400)
post(url=url, data={'credentials': [net_credential.pk], 'extra_credentials': [net_credential.pk]},
user=admin, expect=400)
@pytest.mark.django_db
def test_rbac_default_credential_usage(get, post, job_template, alice, machine_credential):
job_template.credentials.add(machine_credential)
job_template.execute_role.members.add(alice)
job_template.save()
# alice can launch; she's not adding any _new_ credentials, and she has
# execute access to the JT
@ -352,9 +347,11 @@ def test_rbac_default_credential_usage(get, post, job_template, alice, machine_c
post(url, {'credential': machine_credential.pk}, alice, expect=201)
# make (copy) a _new_ SSH cred
new_cred = machine_credential
new_cred.pk = None
new_cred.save()
new_cred = Credential.objects.create(
name=machine_credential.name,
credential_type=machine_credential.credential_type,
inputs=machine_credential.inputs
)
# alice is attempting to launch with a *different* SSH cred, but
# she does not have access to it, so she cannot launch

View File

@ -33,7 +33,7 @@ def test_job_relaunch_permission_denied_response(
assert r.data['summary_fields']['user_capabilities']['start']
# Job has prompted extra_credential, launch denied w/ message
job.credentials.add(net_credential)
job.launch_config.credentials.add(net_credential)
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, jt_user, expect=403)
assert 'launched with prompted fields' in r.data['detail']
assert 'do not have permission' in r.data['detail']

View File

@ -1,6 +1,7 @@
import mock
import pytest
import yaml
import json
from awx.api.serializers import JobLaunchSerializer
from awx.main.models.credential import Credential
@ -29,6 +30,8 @@ def runtime_data(organization, credentialtype_ssh):
skip_tags='restart',
inventory=inv_obj.pk,
credentials=[cred_obj.pk],
diff_mode=True,
verbosity=2
)
@ -45,6 +48,10 @@ def job_template_prompts(project, inventory, machine_credential):
project=project,
inventory=inventory,
name='deploy-job-template',
# JT values must differ from prompted vals in order to register
limit='webservers',
job_tags = 'foobar',
skip_tags = 'barfoo',
ask_variables_on_launch=on_off,
ask_tags_on_launch=on_off,
ask_skip_tags_on_launch=on_off,
@ -52,6 +59,7 @@ def job_template_prompts(project, inventory, machine_credential):
ask_inventory_on_launch=on_off,
ask_limit_on_launch=on_off,
ask_credential_on_launch=on_off,
ask_diff_mode_on_launch=on_off,
ask_verbosity_on_launch=on_off,
)
jt.credentials.add(machine_credential)
@ -73,10 +81,26 @@ def job_template_prompts_null(project):
ask_inventory_on_launch=True,
ask_limit_on_launch=True,
ask_credential_on_launch=True,
ask_diff_mode_on_launch=True,
ask_verbosity_on_launch=True,
)
def data_to_internal(data):
'''
returns internal representation, model objects, dictionaries, etc
as opposed to integer primary keys and JSON strings
'''
internal = data.copy()
if 'extra_vars' in data:
internal['extra_vars'] = json.loads(data['extra_vars'])
if 'credentials' in data:
internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials'])
if 'inventory' in data:
internal['inventory'] = Inventory.objects.get(pk=data['inventory'])
return internal
# End of setup, tests start here
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
@ -87,10 +111,10 @@ def test_job_ignore_unprompted_vars(runtime_data, job_template_prompts, post, ad
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
response = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
runtime_data, admin_user, expect=201)
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == ({'extra_vars':{}},)
assert JobTemplate.create_unified_job.call_args == ()
# Check that job is serialized correctly
job_id = response.data['job']
@ -121,7 +145,8 @@ def test_job_accept_prompted_vars(runtime_data, job_template_prompts, post, admi
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
runtime_data, admin_user, expect=201)
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == (runtime_data,)
called_with = data_to_internal(runtime_data)
JobTemplate.create_unified_job.assert_called_with(**called_with)
job_id = response.data['job']
assert job_id == 968
@ -131,7 +156,7 @@ def test_job_accept_prompted_vars(runtime_data, job_template_prompts, post, admi
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_accept_null_tags(job_template_prompts, post, admin_user, mocker):
def test_job_accept_empty_tags(job_template_prompts, post, admin_user, mocker):
job_template = job_template_prompts(True)
mock_job = mocker.MagicMock(spec=Job, id=968)
@ -167,7 +192,8 @@ def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null,
response = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
runtime_data, rando, expect=201)
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == (runtime_data,)
expected_call = data_to_internal(runtime_data)
assert JobTemplate.create_unified_job.call_args == (expected_call,)
job_id = response.data['job']
assert job_id == 968
@ -211,7 +237,7 @@ def test_job_launch_fails_without_inventory(deploy_jobtemplate, post, admin_user
response = post(reverse('api:job_template_launch',
kwargs={'pk': deploy_jobtemplate.pk}), {}, admin_user, expect=400)
assert response.data['inventory'] == ["Job Template 'inventory' is missing or undefined."]
assert 'inventory' in response.data['resources_needed_to_start'][0]
@pytest.mark.django_db
@ -234,10 +260,8 @@ def test_job_launch_fails_without_credential_access(job_template_prompts, runtim
job_template.execute_role.members.add(rando)
# Assure that giving a credential without access blocks the launch
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
dict(credentials=runtime_data['credentials']), rando, expect=403)
assert response.data['detail'] == u'You do not have access to credential runtime-cred'
post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
dict(credentials=runtime_data['credentials']), rando, expect=403)
@pytest.mark.django_db
@ -253,24 +277,24 @@ def test_job_block_scan_job_type_change(job_template_prompts, post, admin_user):
@pytest.mark.django_db
def test_job_launch_JT_with_validation(machine_credential, deploy_jobtemplate):
def test_job_launch_JT_with_validation(machine_credential, credential, deploy_jobtemplate):
deploy_jobtemplate.extra_vars = '{"job_template_var": 3}'
deploy_jobtemplate.ask_credential_on_launch = True
deploy_jobtemplate.ask_variables_on_launch = True
deploy_jobtemplate.save()
kv = dict(extra_vars={"job_launch_var": 4}, credentials=[machine_credential.id])
serializer = JobLaunchSerializer(
instance=deploy_jobtemplate, data=kv,
context={'obj': deploy_jobtemplate, 'data': kv, 'passwords': {}})
kv = dict(extra_vars={"job_launch_var": 4}, credentials=[machine_credential.pk])
serializer = JobLaunchSerializer(data=kv, context={'template': deploy_jobtemplate})
validated = serializer.is_valid()
assert validated
assert validated, serializer.errors
kv['credentials'] = [machine_credential] # conversion to internal value
job_obj = deploy_jobtemplate.create_unified_job(**kv)
final_job_extra_vars = yaml.load(job_obj.extra_vars)
assert 'job_template_var' in final_job_extra_vars
assert 'job_launch_var' in final_job_extra_vars
assert [cred.pk for cred in job_obj.credentials.all()] == [machine_credential.id]
assert 'job_template_var' in final_job_extra_vars
assert set([cred.pk for cred in job_obj.credentials.all()]) == set([machine_credential.id, credential.id])
@pytest.mark.django_db
@ -279,34 +303,54 @@ def test_job_launch_with_default_creds(machine_credential, vault_credential, dep
deploy_jobtemplate.credentials.add(machine_credential)
deploy_jobtemplate.credentials.add(vault_credential)
kv = dict()
serializer = JobLaunchSerializer(
instance=deploy_jobtemplate, data=kv,
context={'obj': deploy_jobtemplate, 'data': kv, 'passwords': {}})
serializer = JobLaunchSerializer(data=kv, context={'template': deploy_jobtemplate})
validated = serializer.is_valid()
assert validated
prompted_fields, ignored_fields = deploy_jobtemplate._accept_or_ignore_job_kwargs(**kv)
prompted_fields, ignored_fields, errors = deploy_jobtemplate._accept_or_ignore_job_kwargs(**kv)
job_obj = deploy_jobtemplate.create_unified_job(**prompted_fields)
assert job_obj.credential == machine_credential.pk
assert job_obj.vault_credential == vault_credential.pk
@pytest.mark.django_db
def test_job_launch_JT_enforces_unique_credentials_kinds(machine_credential, credentialtype_aws, deploy_jobtemplate):
"""
JT launching should require that extra_credentials have distinct CredentialTypes
"""
creds = []
for i in range(2):
aws = Credential.objects.create(
name='cred-%d' % i,
credential_type=credentialtype_aws,
inputs={
'username': 'test_user',
'password': 'pas4word'
}
)
aws.save()
creds.append(aws)
kv = dict(credentials=creds, credential=machine_credential.id)
serializer = JobLaunchSerializer(data=kv, context={'template': deploy_jobtemplate})
validated = serializer.is_valid()
assert not validated
@pytest.mark.django_db
def test_job_launch_with_empty_creds(machine_credential, vault_credential, deploy_jobtemplate):
deploy_jobtemplate.ask_credential_on_launch = True
deploy_jobtemplate.credentials.add(machine_credential)
deploy_jobtemplate.credentials.add(vault_credential)
kv = dict(credentials=[])
serializer = JobLaunchSerializer(
instance=deploy_jobtemplate, data=kv,
context={'obj': deploy_jobtemplate, 'data': kv, 'passwords': {}})
serializer = JobLaunchSerializer(data=kv, context={'template': deploy_jobtemplate})
validated = serializer.is_valid()
assert validated
prompted_fields, ignored_fields = deploy_jobtemplate._accept_or_ignore_job_kwargs(**kv)
prompted_fields, ignored_fields, errors = deploy_jobtemplate._accept_or_ignore_job_kwargs(**kv)
job_obj = deploy_jobtemplate.create_unified_job(**prompted_fields)
assert job_obj.credential is None
assert job_obj.vault_credential is None
assert job_obj.credential is deploy_jobtemplate.credential
assert job_obj.vault_credential is deploy_jobtemplate.vault_credential
@pytest.mark.django_db
@ -383,6 +427,28 @@ def test_job_launch_pass_with_prompted_vault_password(machine_credential, vault_
signal_start.assert_called_with(vault_password='vault-me')
@pytest.mark.django_db
def test_job_launch_JT_with_credentials(machine_credential, credential, net_credential, deploy_jobtemplate):
deploy_jobtemplate.ask_credential_on_launch = True
deploy_jobtemplate.save()
kv = dict(credentials=[credential.pk, net_credential.pk, machine_credential.pk])
serializer = JobLaunchSerializer(data=kv, context={'template': deploy_jobtemplate})
validated = serializer.is_valid()
assert validated, serializer.errors
kv['credentials'] = [credential, net_credential, machine_credential] # convert to internal value
prompted_fields, ignored_fields, errors = deploy_jobtemplate._accept_or_ignore_job_kwargs(**kv)
deploy_jobtemplate._is_manual_launch = True
job_obj = deploy_jobtemplate.create_unified_job(**prompted_fields)
creds = job_obj.credentials.all()
assert len(creds) == 3
assert credential in creds
assert net_credential in creds
assert machine_credential in creds
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_launch_unprompted_vars_with_survey(mocker, survey_spec_factory, job_template_prompts, post, admin_user):
@ -402,7 +468,6 @@ def test_job_launch_unprompted_vars_with_survey(mocker, survey_spec_factory, job
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == ({'extra_vars':{'survey_var': 4}},)
job_id = response.data['job']
assert job_id == 968

View File

@ -2,11 +2,33 @@ import pytest
from awx.api.versioning import reverse
from awx.main.models import JobTemplate
RRULE_EXAMPLE = 'DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1'
@pytest.mark.django_db
def test_non_job_extra_vars_prohibited(post, project, admin_user):
rrule = 'DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1'
url = reverse('api:project_schedules_list', kwargs={'pk': project.id})
r = post(url, {'name': 'test sch', 'rrule': rrule, 'extra_data': '{"a": 5}'},
r = post(url, {'name': 'test sch', 'rrule': RRULE_EXAMPLE, 'extra_data': '{"a": 5}'},
admin_user, expect=400)
assert 'cannot accept extra variables' in r.data['extra_data'][0]
assert 'not allowed on launch' in str(r.data['extra_data'][0])
@pytest.mark.django_db
def test_valid_survey_answer(post, admin_user, project, inventory, survey_spec_factory):
job_template = JobTemplate.objects.create(
name='test-jt',
project=project,
playbook='helloworld.yml',
inventory=inventory
)
job_template.ask_variables_on_launch = False
job_template.survey_enabled = True
job_template.survey_spec = survey_spec_factory('var1')
assert job_template.survey_spec['spec'][0]['type'] == 'integer'
job_template.save()
url = reverse('api:job_template_schedules_list', kwargs={'pk': job_template.id})
post(url, {'name': 'test sch', 'rrule': RRULE_EXAMPLE, 'extra_data': '{"var1": 54}'},
admin_user, expect=201)

View File

@ -200,7 +200,7 @@ def test_delete_survey_spec_without_license(job_template_with_survey, delete, ad
@mock.patch('awx.main.access.BaseAccess.check_license', lambda self, **kwargs: True)
@mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.create_unified_job',
lambda self, extra_vars: mock.MagicMock(spec=Job, id=968))
lambda self, **kwargs: mock.MagicMock(spec=Job, id=968))
@mock.patch('awx.api.serializers.JobSerializer.to_representation', lambda self, obj: {})
@pytest.mark.django_db
@pytest.mark.survey

View File

@ -0,0 +1,188 @@
import pytest
from awx.api.versioning import reverse
from awx.main.models.jobs import JobTemplate
from awx.main.models.workflow import WorkflowJobTemplateNode
from awx.main.models.credential import Credential
@pytest.fixture
def job_template(inventory, project):
# need related resources set for these tests
return JobTemplate.objects.create(
name='test-job_template',
inventory=inventory,
project=project
)
@pytest.fixture
def node(workflow_job_template, post, admin_user, job_template):
return WorkflowJobTemplateNode.objects.create(
workflow_job_template=workflow_job_template,
unified_job_template=job_template
)
@pytest.mark.django_db
def test_blank_UJT_unallowed(workflow_job_template, post, admin_user):
url = reverse('api:workflow_job_template_workflow_nodes_list',
kwargs={'pk': workflow_job_template.pk})
r = post(url, {}, user=admin_user, expect=400)
assert 'unified_job_template' in r.data
@pytest.mark.django_db
def test_cannot_remove_UJT(node, patch, admin_user):
r = patch(
node.get_absolute_url(),
data={'unified_job_template': None},
user=admin_user,
expect=400
)
assert 'unified_job_template' in r.data
@pytest.mark.django_db
def test_node_rejects_unprompted_fields(inventory, project, workflow_job_template, post, admin_user):
job_template = JobTemplate.objects.create(
inventory = inventory,
project = project,
playbook = 'helloworld.yml',
ask_limit_on_launch = False
)
url = reverse('api:workflow_job_template_workflow_nodes_list',
kwargs={'pk': workflow_job_template.pk, 'version': 'v1'})
r = post(url, {'unified_job_template': job_template.pk, 'limit': 'webservers'},
user=admin_user, expect=400)
assert 'limit' in r.data
assert 'not configured to prompt on launch' in r.data['limit'][0]
@pytest.mark.django_db
def test_node_accepts_prompted_fields(inventory, project, workflow_job_template, post, admin_user):
job_template = JobTemplate.objects.create(
inventory = inventory,
project = project,
playbook = 'helloworld.yml',
ask_limit_on_launch = True
)
url = reverse('api:workflow_job_template_workflow_nodes_list',
kwargs={'pk': workflow_job_template.pk, 'version': 'v1'})
post(url, {'unified_job_template': job_template.pk, 'limit': 'webservers'},
user=admin_user, expect=201)
@pytest.mark.django_db
class TestNodeCredentials:
'''
The supported way to provide credentials on launch is through a list
under the "credentials" key - WFJT nodes have a many-to-many relationship
corresponding to this, and it must follow rules consistent with other prompts
'''
def test_not_allows_non_job_models(self, post, admin_user, workflow_job_template,
project, machine_credential):
node = WorkflowJobTemplateNode.objects.create(
workflow_job_template=workflow_job_template,
unified_job_template=project
)
r = post(
reverse(
'api:workflow_job_template_node_credentials_list',
kwargs = {'pk': node.pk}
),
data = {'id': machine_credential.pk},
user = admin_user,
expect = 400
)
assert 'cannot accept credentials on launch' in str(r.data['msg'])
@pytest.mark.django_db
class TestOldCredentialField:
'''
The field `credential` on JTs & WFJT nodes is deprecated, but still supported
TODO: remove tests when JT vault_credential / credential / other stuff
is removed
'''
def test_credential_accepted_create(self, workflow_job_template, post, admin_user,
job_template, machine_credential):
r = post(
reverse(
'api:workflow_job_template_workflow_nodes_list',
kwargs = {'pk': workflow_job_template.pk}
),
data = {'credential': machine_credential.pk, 'unified_job_template': job_template.pk},
user = admin_user,
expect = 201
)
assert r.data['credential'] == machine_credential.pk
node = WorkflowJobTemplateNode.objects.get(pk=r.data['id'])
assert list(node.credentials.all()) == [machine_credential]
@pytest.mark.parametrize('role,code', [
['use_role', 201],
['read_role', 403]
])
def test_credential_rbac(self, role, code, workflow_job_template, post, rando,
job_template, machine_credential):
role_obj = getattr(machine_credential, role)
role_obj.members.add(rando)
job_template.execute_role.members.add(rando)
workflow_job_template.admin_role.members.add(rando)
post(
reverse(
'api:workflow_job_template_workflow_nodes_list',
kwargs = {'pk': workflow_job_template.pk}
),
data = {'credential': machine_credential.pk, 'unified_job_template': job_template.pk},
user = rando,
expect = code
)
def test_credential_add_remove(self, node, patch, machine_credential, admin_user):
node.unified_job_template.ask_credential_on_launch = True
node.unified_job_template.save()
url = node.get_absolute_url()
patch(
url,
data = {'credential': machine_credential.pk},
user = admin_user,
expect = 200
)
node.refresh_from_db()
assert node.credential == machine_credential.pk
patch(
url,
data = {'credential': None},
user = admin_user,
expect = 200
)
node.refresh_from_db()
assert list(node.credentials.values_list('pk', flat=True)) == []
def test_credential_replace(self, node, patch, credentialtype_ssh, admin_user):
node.unified_job_template.ask_credential_on_launch = True
node.unified_job_template.save()
cred1 = Credential.objects.create(
credential_type=credentialtype_ssh,
name='machine-cred1',
inputs={'username': 'test_user', 'password': 'pas4word'})
cred2 = Credential.objects.create(
credential_type=credentialtype_ssh,
name='machine-cred2',
inputs={'username': 'test_user', 'password': 'pas4word'})
node.credentials.add(cred1)
assert node.credential == cred1.pk
url = node.get_absolute_url()
patch(
url,
data = {'credential': cred2.pk},
user = admin_user,
expect = 200
)
assert node.credential == cred2.pk

View File

@ -0,0 +1,69 @@
import pytest
# AWX
from awx.main.models import JobTemplate, JobLaunchConfig
@pytest.fixture
def full_jt(inventory, project, machine_credential):
jt = JobTemplate.objects.create(
name='my-jt',
inventory=inventory,
project=project,
playbook='helloworld.yml'
)
jt.credentials.add(machine_credential)
return jt
@pytest.fixture
def config_factory(full_jt):
def return_config(data):
job = full_jt.create_unified_job(**data)
try:
return job.launch_config
except JobLaunchConfig.DoesNotExist:
return None
return return_config
@pytest.mark.django_db
class TestConfigCreation:
'''
Checks cases for the auto-creation of a job configuration with the
creation of a unified job
'''
def test_null_configuration(self, full_jt):
job = full_jt.create_unified_job()
assert job.launch_config.prompts_dict() == {}
def test_char_field_change(self, full_jt):
job = full_jt.create_unified_job(limit='foobar')
config = job.launch_config
assert config.limit == 'foobar'
assert config.char_prompts == {'limit': 'foobar'}
def test_added_credential(self, full_jt, credential):
job = full_jt.create_unified_job(credentials=[credential])
config = job.launch_config
assert set(config.credentials.all()) == set([credential])
@pytest.mark.django_db
class TestConfigReversibility:
'''
Checks that a blob of saved prompts will be re-created in the
prompts_dict for launching new jobs
'''
def test_char_field_only(self, config_factory):
config = config_factory({'limit': 'foobar'})
assert config.prompts_dict() == {'limit': 'foobar'}
def test_related_objects(self, config_factory, inventory, credential):
prompts = {
'limit': 'foobar',
'inventory': inventory,
'credentials': set([credential])
}
config = config_factory(prompts)
assert config.prompts_dict() == prompts

View File

@ -38,25 +38,20 @@ class TestCreateUnifiedJob:
'''
def test_many_to_many_kwargs(self, mocker, job_template_labels):
jt = job_template_labels
mocked = mocker.MagicMock()
mocked.__class__.__name__ = 'ManyRelatedManager'
kwargs = {
'labels': mocked
}
_get_unified_job_field_names = mocker.patch('awx.main.models.jobs.JobTemplate._get_unified_job_field_names', return_value=['labels'])
jt.create_unified_job(**kwargs)
jt.create_unified_job()
_get_unified_job_field_names.assert_called_with()
mocked.all.assert_called_with()
'''
Ensure that credentials m2m field is copied to new relaunched job
'''
def test_job_relaunch_copy_vars(self, machine_credential, inventory,
deploy_jobtemplate, post, mocker, net_credential):
job_with_links = Job.objects.create(name='existing-job', inventory=inventory)
job_with_links = Job(name='existing-job', inventory=inventory)
job_with_links.job_template = deploy_jobtemplate
job_with_links.limit = "my_server"
job_with_links.save()
job_with_links.credentials.add(machine_credential)
job_with_links.credentials.add(net_credential)
with mocker.patch('awx.main.models.unified_jobs.UnifiedJobTemplate._get_unified_job_field_names',

View File

@ -1,4 +1,7 @@
from awx.main.models import Job, Instance
from awx.main.models import (
Job,
Instance
)
from django.test.utils import override_settings
import pytest
@ -36,3 +39,31 @@ def test_job_notification_data(inventory):
)
notification_data = job.notification_data(block=0)
assert json.loads(notification_data['extra_vars'])['SSN'] == encrypted_str
@pytest.mark.django_db
class TestLaunchConfig:
def test_null_creation_from_prompts(self):
job = Job.objects.create()
data = {
"credentials": [],
"extra_vars": {},
"limit": None,
"job_type": None
}
config = job.create_config_from_prompts(data)
assert config is None
def test_only_limit_defined(self, job_template):
job = Job.objects.create(job_template=job_template)
data = {
"credentials": [],
"extra_vars": {},
"job_tags": None,
"limit": ""
}
config = job.create_config_from_prompts(data)
assert config.char_prompts == {"limit": ""}
assert not config.credentials.exists()
assert config.prompts_dict() == {"limit": ""}

View File

@ -2,18 +2,21 @@ import pytest
from awx.main.access import (
JobAccess,
JobLaunchConfigAccess,
AdHocCommandAccess,
InventoryUpdateAccess,
ProjectUpdateAccess
)
from awx.main.models import (
Job,
JobLaunchConfig,
JobTemplate,
AdHocCommand,
InventoryUpdate,
InventorySource,
ProjectUpdate,
User
User,
Credential
)
@ -137,25 +140,32 @@ def test_project_org_admin_delete_allowed(normal_job, org_admin):
@pytest.mark.django_db
class TestJobRelaunchAccess:
def test_job_relaunch_normal_resource_access(self, user, inventory, machine_credential):
job_with_links = Job.objects.create(name='existing-job', inventory=inventory)
@pytest.mark.parametrize("inv_access,cred_access,can_start", [
(True, True, True), # Confirm that a user with inventory & credential access can launch
(False, True, False), # Confirm that a user with credential access alone cannot launch
(True, False, False), # Confirm that a user with inventory access alone cannot launch
])
def test_job_relaunch_resource_access(self, user, inventory, machine_credential,
inv_access, cred_access, can_start):
job_template = JobTemplate.objects.create(
ask_inventory_on_launch=True,
ask_credential_on_launch=True
)
job_with_links = Job.objects.create(name='existing-job', inventory=inventory, job_template=job_template)
job_with_links.credentials.add(machine_credential)
inventory_user = user('user1', False)
credential_user = user('user2', False)
both_user = user('user3', False)
JobLaunchConfig.objects.create(job=job_with_links, inventory=inventory)
job_with_links.launch_config.credentials.add(machine_credential) # credential was prompted
u = user('user1', False)
job_template.execute_role.members.add(u)
if inv_access:
job_with_links.inventory.use_role.members.add(u)
if cred_access:
machine_credential.use_role.members.add(u)
# Confirm that a user with inventory & credential access can launch
machine_credential.use_role.members.add(both_user)
job_with_links.inventory.use_role.members.add(both_user)
assert both_user.can_access(Job, 'start', job_with_links, validate_license=False)
# Confirm that a user with credential access alone cannot launch
machine_credential.use_role.members.add(credential_user)
assert not credential_user.can_access(Job, 'start', job_with_links, validate_license=False)
# Confirm that a user with inventory access alone cannot launch
job_with_links.inventory.use_role.members.add(inventory_user)
assert not inventory_user.can_access(Job, 'start', job_with_links, validate_license=False)
access = JobAccess(u)
assert access.can_start(job_with_links, validate_license=False) == can_start, (
"Inventory access: {}\nCredential access: {}\n Expected access: {}".format(inv_access, cred_access, can_start)
)
def test_job_relaunch_credential_access(
self, inventory, project, credential, net_credential):
@ -166,11 +176,10 @@ class TestJobRelaunchAccess:
# Job is unchanged from JT, user has ability to launch
jt_user = User.objects.create(username='jobtemplateuser')
jt.execute_role.members.add(jt_user)
assert jt_user in job.job_template.execute_role
assert jt_user.can_access(Job, 'start', job, validate_license=False)
# Job has prompted net credential, launch denied w/ message
job.credentials.add(net_credential)
job = jt.create_unified_job(credentials=[net_credential])
assert not jt_user.can_access(Job, 'start', job, validate_license=False)
def test_prompted_credential_relaunch_denied(
@ -180,9 +189,10 @@ class TestJobRelaunchAccess:
ask_credential_on_launch=True)
job = jt.create_unified_job()
jt.execute_role.members.add(rando)
assert rando.can_access(Job, 'start', job, validate_license=False)
# Job has prompted net credential, rando lacks permission to use it
job.credentials.add(net_credential)
job = jt.create_unified_job(credentials=[net_credential])
assert not rando.can_access(Job, 'start', job, validate_license=False)
def test_prompted_credential_relaunch_allowed(
@ -269,3 +279,50 @@ class TestJobAndUpdateCancels:
project_update = ProjectUpdate(project=project, created_by=admin_user)
access = ProjectUpdateAccess(proj_updater)
assert not access.can_cancel(project_update)
@pytest.mark.django_db
class TestLaunchConfigAccess:
def _make_two_credentials(self, cred_type):
return (
Credential.objects.create(
credential_type=cred_type, name='machine-cred-1',
inputs={'username': 'test_user', 'password': 'pas4word'}),
Credential.objects.create(
credential_type=cred_type, name='machine-cred-2',
inputs={'username': 'test_user', 'password': 'pas4word'})
)
def test_new_credentials_access(self, credentialtype_ssh, rando):
access = JobLaunchConfigAccess(rando)
cred1, cred2 = self._make_two_credentials(credentialtype_ssh)
assert not access.can_add({'credentials': [cred1, cred2]}) # can't add either
cred1.use_role.members.add(rando)
assert not access.can_add({'credentials': [cred1, cred2]}) # can't add 1
cred2.use_role.members.add(rando)
assert access.can_add({'credentials': [cred1, cred2]}) # can add both
def test_obj_credentials_access(self, credentialtype_ssh, rando):
job = Job.objects.create()
config = JobLaunchConfig.objects.create(job=job)
access = JobLaunchConfigAccess(rando)
cred1, cred2 = self._make_two_credentials(credentialtype_ssh)
assert access.has_credentials_access(config) # has access if 0 creds
config.credentials.add(cred1, cred2)
assert not access.has_credentials_access(config) # lacks access to both
cred1.use_role.members.add(rando)
assert not access.has_credentials_access(config) # lacks access to 1
cred2.use_role.members.add(rando)
assert access.has_credentials_access(config) # has access to both
def test_can_use_minor(self, rando):
# Config object only has flat-field overrides, no RBAC restrictions
job = Job.objects.create()
config = JobLaunchConfig.objects.create(job=job)
access = JobLaunchConfigAccess(rando)
assert access.can_use(config)
assert rando.can_access(JobLaunchConfig, 'use', config)

View File

@ -69,7 +69,7 @@ class TestJobRelaunchAccess:
)
new_cred.save()
new_inv = Inventory.objects.create(name='new-inv', organization=organization)
return jt.create_unified_job(credentials=[new_cred.pk], inventory=new_inv)
return jt.create_unified_job(credentials=[new_cred], inventory=new_inv)
def test_normal_relaunch_via_job_template(self, job_no_prompts, rando):
"Has JT execute_role, job unchanged relative to JT"
@ -89,12 +89,15 @@ class TestJobRelaunchAccess:
job_with_prompts.inventory.use_role.members.add(rando)
assert rando.can_access(Job, 'start', job_with_prompts)
def test_no_relaunch_after_limit_change(self, job_no_prompts, rando):
"State of the job contradicts the JT state - deny relaunch"
job_no_prompts.job_template.execute_role.members.add(rando)
job_no_prompts.limit = 'webservers'
job_no_prompts.save()
assert not rando.can_access(Job, 'start', job_no_prompts)
def test_no_relaunch_after_limit_change(self, inventory, machine_credential, rando):
"State of the job contradicts the JT state - deny relaunch based on JT execute"
jt = JobTemplate.objects.create(name='test-job_template', inventory=inventory, ask_limit_on_launch=True)
jt.credentials.add(machine_credential)
job_with_prompts = jt.create_unified_job(limit='webservers')
jt.ask_limit_on_launch = False
jt.save()
jt.execute_role.members.add(rando)
assert not rando.can_access(Job, 'start', job_with_prompts)
def test_can_relaunch_if_limit_was_prompt(self, job_with_prompts, rando):
"Job state differs from JT, but only on prompted fields - allow relaunch"

View File

@ -59,6 +59,16 @@ class TestWorkflowJobTemplateNodeAccess:
access = WorkflowJobTemplateNodeAccess(org_admin)
assert not access.can_change(wfjt_node, {'job_type': 'scan'})
def test_access_to_edit_non_JT(self, rando, workflow_job_template, organization, project):
workflow_job_template.admin_role.members.add(rando)
node = workflow_job_template.workflow_job_template_nodes.create(
unified_job_template=project
)
assert not WorkflowJobTemplateNodeAccess(rando).can_change(node, {'limit': ''})
project.update_role.members.add(rando)
assert WorkflowJobTemplateNodeAccess(rando).can_change(node, {'limit': ''})
def test_add_JT_no_start_perm(self, wfjt, job_template, rando):
wfjt.admin_role.members.add(rando)
access = WorkflowJobTemplateNodeAccess(rando)

View File

@ -1121,8 +1121,6 @@ class JobTemplateSurveyTest(BaseJobTestMixin, django.test.TransactionTestCase):
response = self.get(launch_url)
self.assertTrue('favorite_color' in response['variables_needed_to_start'])
response = self.post(launch_url, dict(extra_vars=dict()), expect=400)
# Note: The below assertion relies on how survey_variable_validation() crafts
# the error message
self.assertIn("'favorite_color' value missing", response['variables_needed_to_start'])
# launch job template with required survey without providing survey data and without
@ -1132,8 +1130,6 @@ class JobTemplateSurveyTest(BaseJobTestMixin, django.test.TransactionTestCase):
response = self.get(launch_url)
self.assertTrue('favorite_color' in response['variables_needed_to_start'])
response = self.post(launch_url, {}, expect=400)
# Note: The below assertion relies on how survey_variable_validation() crafts
# the error message
self.assertIn("'favorite_color' value missing", response['variables_needed_to_start'])
with self.current_user(self.user_sue):

View File

@ -16,13 +16,13 @@ from awx.main.models import (
def mock_JT_resource_data():
return ({}, [])
return {}
@pytest.fixture
def job_template(mocker):
mock_jt = mocker.MagicMock(pk=5)
mock_jt.resource_validation_data = mock_JT_resource_data
mock_jt.validation_errors = mock_JT_resource_data
return mock_jt

View File

@ -20,7 +20,7 @@ from rest_framework import serializers
def mock_JT_resource_data():
return ({}, [])
return {}
@pytest.fixture
@ -28,7 +28,7 @@ def job_template(mocker):
mock_jt = mocker.MagicMock(spec=JobTemplate)
mock_jt.pk = 5
mock_jt.host_config_key = '9283920492'
mock_jt.resource_validation_data = mock_JT_resource_data
mock_jt.validation_errors = mock_JT_resource_data
return mock_jt

View File

@ -5,7 +5,6 @@ import mock
# AWX
from awx.api.serializers import (
WorkflowJobTemplateSerializer,
WorkflowNodeBaseSerializer,
WorkflowJobTemplateNodeSerializer,
WorkflowJobNodeSerializer,
)
@ -54,7 +53,7 @@ class TestWorkflowNodeBaseSerializerGetRelated():
return WorkflowJobTemplateNode(pk=1)
def test_workflow_unified_job_template_present(self, get_related_mock_and_run, workflow_job_template_node_related):
related = get_related_mock_and_run(WorkflowNodeBaseSerializer, workflow_job_template_node_related)
related = get_related_mock_and_run(WorkflowJobTemplateNodeSerializer, workflow_job_template_node_related)
assert 'unified_job_template' in related
assert related['unified_job_template'] == '/api/v2/%s/%d/' % ('job_templates', workflow_job_template_node_related.unified_job_template.pk)
@ -63,7 +62,7 @@ class TestWorkflowNodeBaseSerializerGetRelated():
assert 'unified_job_template' not in related
@mock.patch('awx.api.serializers.WorkflowNodeBaseSerializer.get_related', lambda x,y: {})
@mock.patch('awx.api.serializers.BaseSerializer.get_related', lambda x,y: {})
class TestWorkflowJobTemplateNodeSerializerGetRelated():
@pytest.fixture
def workflow_job_template_node(self):
@ -139,17 +138,19 @@ class TestWorkflowJobTemplateNodeSerializerCharPrompts():
def test_change_single_field(self, WFJT_serializer):
"Test that a single prompt field can be changed without affecting other fields"
internal_value = WFJT_serializer.to_internal_value({'job_type': 'check'})
assert internal_value['char_prompts']['job_type'] == 'check'
assert internal_value['char_prompts']['limit'] == 'webservers'
assert internal_value['job_type'] == 'check'
WFJT_serializer.instance.job_type = 'check'
assert WFJT_serializer.instance.limit == 'webservers'
def test_null_single_field(self, WFJT_serializer):
"Test that a single prompt field can be removed without affecting other fields"
internal_value = WFJT_serializer.to_internal_value({'job_type': None})
assert 'job_type' not in internal_value['char_prompts']
assert internal_value['char_prompts']['limit'] == 'webservers'
assert internal_value['job_type'] is None
WFJT_serializer.instance.job_type = None
assert WFJT_serializer.instance.limit == 'webservers'
@mock.patch('awx.api.serializers.WorkflowNodeBaseSerializer.get_related', lambda x,y: {})
@mock.patch('awx.api.serializers.WorkflowJobTemplateNodeSerializer.get_related', lambda x,y: {})
class TestWorkflowJobNodeSerializerGetRelated():
@pytest.fixture
def workflow_job_node(self):

View File

@ -1,5 +1,7 @@
import pytest
import json
# AWX
from awx.main.models.jobs import JobTemplate
import mock
@ -12,8 +14,7 @@ def test_missing_project_error(job_template_factory):
persisted=False)
obj = objects.job_template
assert 'project' in obj.resources_needed_to_start
validation_errors, resources_needed_to_start = obj.resource_validation_data()
assert 'project' in validation_errors
assert 'project' in obj.validation_errors
def test_inventory_need_to_start(job_template_factory):
@ -32,19 +33,7 @@ def test_inventory_contradictions(job_template_factory):
persisted=False)
obj = objects.job_template
obj.ask_inventory_on_launch = False
validation_errors, resources_needed_to_start = obj.resource_validation_data()
assert 'inventory' in validation_errors
def test_survey_answers_as_string(job_template_factory):
objects = job_template_factory(
'job-template-with-survey',
survey=['var1'],
persisted=False)
jt = objects.job_template
user_extra_vars = json.dumps({'var1': 'asdf'})
accepted, ignored = jt._accept_or_ignore_job_kwargs(extra_vars=user_extra_vars)
assert 'var1' in accepted['extra_vars']
assert 'inventory' in obj.validation_errors
@pytest.mark.survey
@ -54,36 +43,6 @@ def test_job_template_survey_password_redaction(job_template_with_survey_passwor
assert job_template_with_survey_passwords_unit.survey_password_variables() == ['secret_key', 'SSN']
def test_job_template_survey_variable_validation(job_template_factory):
objects = job_template_factory(
'survey_variable_validation',
organization='org1',
inventory='inventory1',
credential='cred1',
persisted=False,
)
obj = objects.job_template
obj.survey_spec = {
"description": "",
"spec": [
{
"required": True,
"min": 0,
"default": "5",
"max": 1024,
"question_description": "",
"choices": "",
"variable": "a",
"question_name": "Whosyourdaddy",
"type": "text"
}
],
"name": ""
}
obj.survey_enabled = True
assert obj.survey_variable_validation({"a": 5}) == ["Value 5 for 'a' expected to be a string."]
def test_job_template_survey_mixin(job_template_factory):
objects = job_template_factory(
'survey_mixin_test',
@ -142,3 +101,8 @@ def test_job_template_can_start_with_callback_extra_vars_provided(job_template_f
obj.ask_variables_on_launch = True
with mock.patch.object(obj.__class__, 'passwords_needed_to_start', []):
assert obj.can_start_without_user_input(callback_extra_vars='{"foo": "bar"}') is True
def test_ask_mapping_integrity():
assert 'credentials' in JobTemplate.get_ask_mapping()
assert JobTemplate.get_ask_mapping()['job_tags'] == 'ask_tags_on_launch'

View File

@ -1,68 +0,0 @@
import pytest
import json
from django.core.exceptions import ValidationError
from awx.main.models import (
Schedule,
SystemJobTemplate,
JobTemplate,
)
def test_clean_extra_data_system_job(mocker):
jt = SystemJobTemplate()
schedule = Schedule(unified_job_template=jt)
schedule._clean_extra_data_system_jobs = mocker.MagicMock()
schedule.clean_extra_data()
schedule._clean_extra_data_system_jobs.assert_called_once()
def test_clean_extra_data_other_job(mocker):
jt = JobTemplate()
schedule = Schedule(unified_job_template=jt)
schedule._clean_extra_data_system_jobs = mocker.MagicMock()
schedule.clean_extra_data()
schedule._clean_extra_data_system_jobs.assert_not_called()
@pytest.mark.parametrize("extra_data", [
'{ "days": 1 }',
'{ "days": 100 }',
'{ "days": 0 }',
{"days": 0},
{"days": 1},
{"days": 13435},
])
def test_valid__clean_extra_data_system_jobs(extra_data):
schedule = Schedule()
schedule.extra_data = extra_data
schedule._clean_extra_data_system_jobs()
@pytest.mark.parametrize("extra_data", [
'{ "days": 1.2 }',
'{ "days": -1.2 }',
'{ "days": -111 }',
'{ "days": "-111" }',
'{ "days": false }',
'{ "days": "foobar" }',
{"days": 1.2},
{"days": -1.2},
{"days": -111},
{"days": "-111"},
{"days": False},
{"days": "foobar"},
])
def test_invalid__clean_extra_data_system_jobs(extra_data):
schedule = Schedule()
schedule.extra_data = extra_data
with pytest.raises(ValidationError) as e:
schedule._clean_extra_data_system_jobs()
assert json.dumps(str(e.value)) == json.dumps(str([u'days must be a positive integer.']))

View File

@ -11,6 +11,53 @@ from awx.main.models import (
)
@pytest.mark.survey
class SurveyVariableValidation:
def test_survey_answers_as_string(self, job_template_factory):
objects = job_template_factory(
'job-template-with-survey',
survey=[{'variable': 'var1', 'type': 'text'}],
persisted=False)
jt = objects.job_template
user_extra_vars = json.dumps({'var1': 'asdf'})
accepted, ignored, errors = jt._accept_or_ignore_job_kwargs(extra_vars=user_extra_vars)
assert ignored.get('extra_vars', {}) == {}, [str(element) for element in errors]
assert 'var1' in accepted['extra_vars']
def test_job_template_survey_variable_validation(self, job_template_factory):
objects = job_template_factory(
'survey_variable_validation',
organization='org1',
inventory='inventory1',
credential='cred1',
persisted=False,
)
obj = objects.job_template
obj.survey_spec = {
"description": "",
"spec": [
{
"required": True,
"min": 0,
"default": "5",
"max": 1024,
"question_description": "",
"choices": "",
"variable": "a",
"question_name": "Whosyourdaddy",
"type": "text"
}
],
"name": ""
}
obj.survey_enabled = True
accepted, rejected, errors = obj.accept_or_ignore_variables({"a": 5})
assert rejected == {"a": 5}
assert accepted == {}
assert str(errors[0]) == "Value 5 for 'a' expected to be a string."
@pytest.fixture
def job(mocker):
ret = mocker.MagicMock(**{

View File

@ -0,0 +1,65 @@
import pytest
from awx.main.models import SystemJobTemplate
@pytest.mark.parametrize("extra_data", [
'{ "days": 1 }',
'{ "days": 100 }',
'{ "days": 0 }',
{"days": 0},
{"days": 1},
{"days": 13435},
])
def test_valid__clean_extra_data_system_jobs(extra_data):
accepted, rejected, errors = SystemJobTemplate().accept_or_ignore_variables(extra_data)
assert not rejected
assert not errors
@pytest.mark.parametrize("extra_data", [
'{ "days": 1.2 }',
'{ "days": -1.2 }',
'{ "days": -111 }',
'{ "days": "-111" }',
'{ "days": false }',
'{ "days": "foobar" }',
{"days": 1.2},
{"days": -1.2},
{"days": -111},
{"days": "-111"},
{"days": False},
{"days": "foobar"},
])
def test_invalid__extra_data_system_jobs(extra_data):
accepted, rejected, errors = SystemJobTemplate().accept_or_ignore_variables(extra_data)
assert str(errors['extra_vars'][0]) == u'days must be a positive integer.'
def test_unallowed_system_job_data():
sjt = SystemJobTemplate()
accepted, ignored, errors = sjt.accept_or_ignore_variables({
'days': 34,
'foobar': 'baz'
})
assert 'foobar' in ignored
assert 'days' in accepted
def test_reject_other_prommpts():
sjt = SystemJobTemplate()
accepted, ignored, errors = sjt._accept_or_ignore_job_kwargs(limit="")
assert accepted == {}
assert 'not allowed on launch' in errors['limit'][0]
def test_reject_some_accept_some():
sjt = SystemJobTemplate()
accepted, ignored, errors = sjt._accept_or_ignore_job_kwargs(limit="", extra_vars={
'days': 34,
'foobar': 'baz'
})
assert accepted == {"extra_vars": {"days": 34}}
assert ignored == {"limit": "", "extra_vars": {"foobar": "baz"}}
assert 'not allowed on launch' in errors['limit'][0]

View File

@ -3,6 +3,7 @@ import mock
from awx.main.models import (
UnifiedJob,
UnifiedJobTemplate,
WorkflowJob,
WorkflowJobNode,
Job,
@ -12,6 +13,14 @@ from awx.main.models import (
)
def test_incorrectly_formatted_variables():
bad_data = '{"bar":"foo'
accepted, ignored, errors = UnifiedJobTemplate().accept_or_ignore_variables(bad_data)
assert not accepted
assert ignored == bad_data
assert 'Cannot parse as JSON' in str(errors['extra_vars'][0])
def test_unified_job_workflow_attributes():
with mock.patch('django.db.ConnectionRouter.db_for_write'):
job = UnifiedJob(id=1, name="job-1", launch_type="workflow")

View File

@ -9,6 +9,17 @@ from awx.main.models.workflow import (
import mock
@pytest.fixture
def credential():
ssh_type = CredentialType.defaults['ssh']()
return Credential(
id=43,
name='example-cred',
credential_type=ssh_type,
inputs={'username': 'asdf', 'password': 'asdf'}
)
class TestWorkflowJobInheritNodesMixin():
class TestCreateWorkflowJobNodes():
@pytest.fixture
@ -101,6 +112,7 @@ def workflow_job_template_unit():
def jt_ask(job_template_factory):
# note: factory sets ask_xxxx_on_launch to true for inventory & credential
jt = job_template_factory(name='example-jt', persisted=False).job_template
jt.ask_variables_on_launch = True
jt.ask_job_type_on_launch = True
jt.ask_skip_tags_on_launch = True
jt.ask_limit_on_launch = True
@ -123,55 +135,60 @@ def job_node_no_prompts(workflow_job_unit, jt_ask):
@pytest.fixture
def job_node_with_prompts(job_node_no_prompts):
def job_node_with_prompts(job_node_no_prompts, mocker):
job_node_no_prompts.char_prompts = example_prompts
job_node_no_prompts.inventory = Inventory(name='example-inv')
ssh_type = CredentialType.defaults['ssh']()
job_node_no_prompts.credential = Credential(
name='example-inv',
credential_type=ssh_type,
inputs={'username': 'asdf', 'password': 'asdf'}
)
job_node_no_prompts.inventory = Inventory(name='example-inv', id=45)
job_node_no_prompts.inventory_id = 45
return job_node_no_prompts
@pytest.fixture
def wfjt_node_no_prompts(workflow_job_template_unit, jt_ask):
return WorkflowJobTemplateNode(workflow_job_template=workflow_job_template_unit, unified_job_template=jt_ask)
node = WorkflowJobTemplateNode(
workflow_job_template=workflow_job_template_unit,
unified_job_template=jt_ask
)
return node
@pytest.fixture
def wfjt_node_with_prompts(wfjt_node_no_prompts):
def wfjt_node_with_prompts(wfjt_node_no_prompts, mocker):
wfjt_node_no_prompts.char_prompts = example_prompts
wfjt_node_no_prompts.inventory = Inventory(name='example-inv')
ssh_type = CredentialType.defaults['ssh']()
wfjt_node_no_prompts.credential = Credential(
name='example-inv',
credential_type=ssh_type,
inputs={'username': 'asdf', 'password': 'asdf'}
)
return wfjt_node_no_prompts
def test_node_getter_and_setters():
node = WorkflowJobTemplateNode()
node.job_type = 'check'
assert node.char_prompts['job_type'] == 'check'
assert node.job_type == 'check'
class TestWorkflowJobCreate:
def test_create_no_prompts(self, wfjt_node_no_prompts, workflow_job_unit, mocker):
mock_create = mocker.MagicMock()
with mocker.patch('awx.main.models.WorkflowJobNode.objects.create', mock_create):
wfjt_node_no_prompts.create_workflow_job_node(workflow_job=workflow_job_unit)
mock_create.assert_called_once_with(
extra_data={},
survey_passwords={},
char_prompts=wfjt_node_no_prompts.char_prompts,
inventory=None, credential=None,
inventory=None,
unified_job_template=wfjt_node_no_prompts.unified_job_template,
workflow_job=workflow_job_unit)
def test_create_with_prompts(self, wfjt_node_with_prompts, workflow_job_unit, mocker):
def test_create_with_prompts(self, wfjt_node_with_prompts, workflow_job_unit, credential, mocker):
mock_create = mocker.MagicMock()
with mocker.patch('awx.main.models.WorkflowJobNode.objects.create', mock_create):
wfjt_node_with_prompts.create_workflow_job_node(workflow_job=workflow_job_unit)
wfjt_node_with_prompts.create_workflow_job_node(
workflow_job=workflow_job_unit
)
mock_create.assert_called_once_with(
extra_data={},
survey_passwords={},
char_prompts=wfjt_node_with_prompts.char_prompts,
inventory=wfjt_node_with_prompts.inventory,
credential=wfjt_node_with_prompts.credential,
unified_job_template=wfjt_node_with_prompts.unified_job_template,
workflow_job=workflow_job_unit)
@ -182,21 +199,22 @@ class TestWorkflowJobNodeJobKWARGS:
Tests for building the keyword arguments that go into creating and
launching a new job that corresponds to a workflow node.
"""
kwargs_base = {'launch_type': 'workflow'}
kwargs_base = {'_eager_fields': {'launch_type': 'workflow'}}
def test_null_kwargs(self, job_node_no_prompts):
assert job_node_no_prompts.get_job_kwargs() == self.kwargs_base
def test_inherit_workflow_job_extra_vars(self, job_node_no_prompts):
def test_inherit_workflow_job_and_node_extra_vars(self, job_node_no_prompts):
job_node_no_prompts.extra_data = {"b": 98}
workflow_job = job_node_no_prompts.workflow_job
workflow_job.extra_vars = '{"a": 84}'
assert job_node_no_prompts.get_job_kwargs() == dict(
extra_vars={'a': 84}, **self.kwargs_base)
extra_vars={'a': 84, 'b': 98}, **self.kwargs_base)
def test_char_prompts_and_res_node_prompts(self, job_node_with_prompts):
# TBD: properly handle multicred credential assignment
expect_kwargs = dict(
inventory=job_node_with_prompts.inventory.pk,
inventory=job_node_with_prompts.inventory,
**example_prompts)
expect_kwargs.update(self.kwargs_base)
assert job_node_with_prompts.get_job_kwargs() == expect_kwargs
@ -205,7 +223,7 @@ class TestWorkflowJobNodeJobKWARGS:
# TBD: properly handle multicred credential assignment
job_node_with_prompts.unified_job_template.ask_inventory_on_launch = False
job_node_with_prompts.unified_job_template.ask_job_type_on_launch = False
expect_kwargs = dict(inventory=job_node_with_prompts.inventory.pk,
expect_kwargs = dict(inventory=job_node_with_prompts.inventory,
**example_prompts)
expect_kwargs.update(self.kwargs_base)
expect_kwargs.pop('inventory')
@ -217,27 +235,5 @@ class TestWorkflowJobNodeJobKWARGS:
assert job_node_no_prompts.get_job_kwargs() == self.kwargs_base
class TestWorkflowWarnings:
"""
Tests of warnings that show user errors in the construction of a workflow
"""
def test_no_warn_project_node_no_prompts(self, job_node_no_prompts, project_unit):
job_node_no_prompts.unified_job_template = project_unit
assert job_node_no_prompts.get_prompts_warnings() == {}
def test_warn_project_node_reject_all_prompts(self, job_node_with_prompts, project_unit):
job_node_with_prompts.unified_job_template = project_unit
assert 'ignored' in job_node_with_prompts.get_prompts_warnings()
assert 'all' in job_node_with_prompts.get_prompts_warnings()['ignored']
def test_no_warn_accept_all_prompts(self, job_node_with_prompts):
assert job_node_with_prompts.get_prompts_warnings() == {}
def test_warn_reject_some_prompts(self, job_node_with_prompts):
job_node_with_prompts.unified_job_template.ask_credential_on_launch = False
job_node_with_prompts.unified_job_template.ask_job_type_on_launch = False
assert 'ignored' in job_node_with_prompts.get_prompts_warnings()
assert 'job_type' in job_node_with_prompts.get_prompts_warnings()['ignored']
assert len(job_node_with_prompts.get_prompts_warnings()['ignored']) == 1
def test_get_ask_mapping_integrity():
assert WorkflowJobTemplate.get_ask_mapping().keys() == ['extra_vars']

View File

@ -27,6 +27,7 @@ from django.core.exceptions import ObjectDoesNotExist
from django.db import DatabaseError
from django.utils.translation import ugettext_lazy as _
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.db.models.query import QuerySet
# Django REST Framework
from rest_framework.exceptions import ParseError, PermissionDenied
@ -477,7 +478,7 @@ def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
src_field_value = getattr(obj1, field_name)
if kwargs and field_name in kwargs:
override_field_val = kwargs[field_name]
if isinstance(override_field_val, list):
if isinstance(override_field_val, (set, list, QuerySet)):
getattr(obj2, field_name).add(*override_field_val)
continue
if override_field_val.__class__.__name__ is 'ManyRelatedManager':

View File

@ -54,3 +54,11 @@
which backward compatibility support for 3.1 use pattern
[[#6915](https://github.com/ansible/ansible-tower/issues/6915)]
* Allow relaunching jobs on a subset of hosts, by status.[[#219](https://github.com/ansible/awx/issues/219)]
* Added `ask_variables_on_launch` to workflow JTs.[[#497](https://github.com/ansible/awx/issues/497)]
* Added `diff_mode` and `verbosity` fields to WFJT nodes.[[#555](https://github.com/ansible/awx/issues/555)]
* Block creation of schedules when variables not allowed are given.
Block similar cases for WFJT nodes.[[#478](https://github.com/ansible/awx/issues/478)]
* Changed WFJT node `credential` to many-to-many `credentials`.
* Saved Launch-time configurations feature - added WFJT node promptable fields to schedules,
added `extra_data` to WFJT nodes, added "schedule this job" endpoint.
[[#169](https://github.com/ansible/awx/issues/169)]

253
docs/prompting.md Normal file
View File

@ -0,0 +1,253 @@
## Launch-time Configurations / Prompting
Admins of templates in AWX have the option to allow fields to be over-written
by user-provided values at the time of launch. The job that runs will
then use the launch-time values in lieu of the template values.
Fields that can be prompted for, and corresponding "ask_" variables
(which exist on the template and must be set to `true` to enable prompting)
are the following.
##### Standard Pattern with Character Fields
- `ask_<variable>_on_launch` allows use of
- `<variable>`
The standard pattern applies to fields
- `job_type`
- `skip_tags`
- `limit`
- `diff_mode`
- `verbosity`
##### Non-Standard Cases (Credentials Changing in Tower 3.3)
- `ask_variables_on_launch` allows unrestricted use of
- `extra_vars`
- `ask_tags_on_launch` allows use of
- `job_tags`
- Enabled survey allows restricted use of
- `extra_vars`, only for variables in survey (with qualifiers)
- `ask_credential_on_launch` allows use of
- `credential`
- `vault_credential` / `extra_credentials` / `credentials`
(version-dependent, see notes below)
- `ask_inventory_on_launch` allows use of
- `inventory`
Surveys are a special-case of prompting for variables - applying a survey to
a template white-lists variable names in the survey spec (requires the survey
spec to exist and `survey_enabled` to be true). On the other hand,
if `ask_variables_on_launch` is true, users can provide any variables in
extra_vars.
Prompting enablement for several types of credentials is controlled by a single
field. On launch, multiple types of credentials can be provided in their respective fields
inside of `credential`, `vault_credential`, and `extra_credentials`. Providing
credentials that require password input from the user on launch is
allowed, and the password must be provided along-side the credential, of course.
If the job is being spawned using a saved launch configuration, however,
all non-machine credential types are managed by a many-to-many relationship
called `credentials` relative to the launch configuration object.
When the job is spawned, the credentials in that relationship will be
sorted into the job's many-to-many credential fields according to their
type (cloud vs. vault).
### Manual use of Prompts
Fields enabled as prompts in the template can be used for the following
actions in the API.
- POST to `/api/v2/job_templates/N/launch/`
- can accept all prompt-able fields
- POST to `/api/v2/workflow_job_templates/N/launch/`
- can only accept extra_vars
- POST to `/api/v2/system_job_templates/N/launch/`
- can accept certain fields, with no user configuration
#### Data Rules for Prompts
For the POST action to launch, data for "prompts" are provided as top-level
keys in the request data. There is a special-case to allow a list to be
provided for `credentials`, which is otherwise not possible in AWX API design.
The list of credentials will either add extra credentials, or replace
existing credentials in the job template if a provided credential is of
the same type.
Values of `null` are not allowed, if the field is not being over-ridden,
the key should not be given in the payload. A 400 should be returned if
this is done.
Example:
POST to `/api/v2/job_templates/N/launch/` with data:
```json
{
"job_type": "check",
"limit": "",
"credentials": [1, 2, 4],
"extra_vars": {}
}
```
where the job template has credentials `[2, 3, 5]`, and the credential type
are the following:
- 1 - gce
- 2 - ssh
- 3 - gce
- 4 - aws
- 5 - openstack
Assuming that the job template is configured to prompt for all these,
fields, here is what happens in this action:
- `job_type` of the job takes the value of "check"
- `limit` of the job takes the value of `""`, which means that Ansible will
target all hosts in the inventory, even though the job template may have
been targeted to a smaller subset of hosts
- The job uses the `credentials` with primary keys 1, 2, 4, and 5
- `extra_vars` of the job template will be used without any overrides
If `extra_vars` in the request data contains some keys, these will
be combined with the job template extra_vars dictionary, with the
request data taking precedence.
Provided credentials will replace any job template credentials of the same
exclusive type, but combine with any others. In the example, the job template
credential 3 was replaced with the provided credential 1, because a job
may only use 1 gce credential because these two credentials define the
same environment variables and configuration file.
### Saved Launch-time Configurations
Several other mechanisms which automatically launch jobs can apply prompts
at launch-time that are saved in advance.
- Workflow nodes
- Schedules
- Job relaunch / re-scheduling
In the case of workflow nodes and schedules, the prompted fields are saved
directly on the model. Those models include Workflow Job Template Nodes,
Workflow Job Nodes (a copy of the first), and Schedules.
Jobs, themselves, have a configuration object stored in a related model,
and only used to prepare the correct launch-time configuration for subsequent
re-launch and re-scheduling of the job. To see these prompts for a particular
job, do a GET to `/api/v2/jobs/N/create_schedule/`.
#### Workflow Node Launch Configuration (Changing in Tower 3.3)
Workflow job nodes will combine `extra_vars` from their parent
workflow job with the variables that they provide in
`extra_data`, as well as artifacts from prior job runs. Both of these
sources of variables have higher precedence than the variables defined in
the node.
All prompts that a workflow node passes to a spawned job abides by the
rules of the related template.
That means that if the node's job template has `ask_variables_on_launch` set
to false with no survey, neither the workflow JT or the artifacts will take effect
in the job that is spawned.
If the node's job template has `ask_inventory_on_launch` set to false and
the node provides an inventory, this resource will not be used in the spawned
job. If a user creates a node that would do this, a 400 response will be returned.
Behavior before the 3.3 release cycle was less-restrictive with passing
workflow variables to the jobs it spawned, allowing variables to take effect
even when the job template was not configured to allow it.
#### Job Relaunch and Re-scheduling
Job relaunch does not allow user to provide any prompted fields at the time of relaunch.
Relaunching will re-apply all the prompts used at the
time of the original launch. This means that:
- all prompts restrictions apply as-if the job was being launched with the
current job template (even if it has been modified)
- RBAC rules for prompted resources still apply
Those same rules apply when created a schedule from the
`/api/v2/schedule_job/` endpoint.
Jobs orphaned by a deleted job template can be relaunched,
but only with organization or system administrator privileges.
#### Credential Password Prompting Restriction
If a job template uses a credential that is configured to prompt for a
password at launch, these passwords cannot be saved for later as part
of a saved launch-time configuration. This is for security reasons.
Credential passwords _can_ be provided at time of relaunch.
### Validation
The general rule for validation:
> When a job is created from a template, only fields specifically configured
to be prompt-able are allowed to differ from the template to the job.
In other words, if no prompts (including surveys) are configured, a job
must be identical to the template it was created from, for all fields
that become `ansible-playbook` options.
#### Disallowed Fields
If a manual launch provides fields not allowed by the rules of the template,
the behavior is:
- Launches without those fields, ignores fields
- lists fields in `ignored_fields` in POST response
#### Data Type Validation
All fields provided on launch, or saved in a launch-time configuration
for later, should be subject to the same validation that they would be
if saving to the job template model. For example, only certain values of
`job_type` are valid.
Surveys impose additional restrictions, and violations of the survey
validation rules will prevent launch from proceeding.
#### Fields Required on Launch
Failing to provide required variables also results in a validation error
when manually launching. It will also result in a 400 error if the user
fails to provide those fields when saving a WFJT node or schedule.
#### Broken Saved Configurations
If a job is spawned from schedule or a workflow in a state that has rejected
prompts, this should be logged, but the job should still be launched, without
those prompts applied.
If the job is spawned from a schedule or workflow in a state that cannot be
launched (typical example is a null `inventory`), then the job should be
created in an "error" state with `job_explanation` containing a summary
of what happened.
### Scenarios to have Coverage for
- variable precedence
- schedule has survey answers for WFJT survey
- WFJT has node that has answers to JT survey
- on launch, the schedule answers override all others
- survey password durability
- schedule has survey password answers from WFJT survey
- WFJT node has answers to different password questions from JT survey
- final job it spawns has both answers encrypted
- POST to associate credential to WFJT node
- requires admin to WFJT and execute to JT
- this is in addition to the restriction of `ask_credential_on_launch`
- credentials merge behavior
- JT has machine & cloud credentials, set to prompt for credential on launch
- schedule for JT provides no credentials
- spawned job still uses all JT credentials
- credentials deprecated behavior
- manual launch providing `"extra_credentials": []` should launch with no job credentials
- such jobs cannot have schedules created from them

View File

@ -15,10 +15,17 @@ Workflow Nodes are containers of workflow spawned job resources and function as
Workflow job template nodes are listed and created under endpoint `/workflow_job_templates/\d+/workflow_nodes/` to be associated with underlying workflow job template, or directly under endpoint `/workflow_job_template_nodes/`. The most important fields of a workflow job template node are `success_nodes`, `failure_nodes`, `always_nodes`, `unified_job_template` and `workflow_job_template`. The former three are lists of workflow job template nodes that, in union, forms the set of all its child nodes, in specific, `success_nodes` are triggered when parnent node job succeeds, `failure_nodes` are triggered when parent node job fails, and `always_nodes` are triggered regardless of whether parent job succeeds or fails; The later two reference the job template resource it contains and workflow job template it belongs to.
Apart from the core fields, workflow job template nodes have optional fields `credential`, `inventory`, `job_type`, `job_tags`, `skip_tags` and `limit`. These fields will be passed on to corresponding fields of underlying jobs if those fields are set prompted at runtime.
#### Workflow Node Launch Configuration
Workflow nodes may also contain the launch-time configuration for the job it will spawn.
As such, they share all the properties common to all saved launch configurations.
When a workflow job template is launched a workflow job is created. A workflow job node is created for each WFJT node and all fields from the WFJT node are copied. Note that workflow job nodes contain all fields that a workflow job template node contains plus an additional field, `job`, which is a reference to the to-be-spawned job resource.
See the document on saved launch configurations for how these are processed
when the job is launched, and the API validation involved in building
the launch configurations on workflow nodes.
### Tree-Graph Formation and Restrictions
The tree-graph structure of a workflow is enforced by associating workflow job template nodes via endpoints `/workflow_job_template_nodes/\d+/*_nodes/`, where `*` has options `success`, `failure` and `always`. However there are restrictions that must be enforced when setting up new connections. Here are the three restrictions that will raise validation error when break:
* Cycle restriction: According to tree definition, no cycle is allowed.