Merge branch 'release_3.2.0' into devel

* release_3.2.0: (138 commits)
  Pull Dutch and Spanish translations
  Increase verbosity of CTiT Logging test error handling
  fix to console error of conditional toggle showing
  Fix error message when calling remove on undefined DOM element
  fix ctit logging toggle from being showed for log types other than https
  Remove delete and edit buttons from smart inventory host list.  Only option should be view.
  feedback from PR
  Enhance query string in ad hoc command event save to consider smart inventory
  Fixed host filter clearall
  fuller validation for host_filter
  On JT form, Show credential tags from summary_fields if user doesn't have view permission on the credential
  Align key toggle button to role dropdown in user team permissions modal
  Removed rogue console.logs
  Removed extra refresh call
  Enhace query string in job event save to consider smart inventory
  Fix typo in scan_packages plugin
  Switch running_jobs and capacity table columns
  Disable insights cred when user doesn't have edit permissions
  Disallow changing credential_type of an existing credential
  fix bug with host_filter RBAC check
  ...
This commit is contained in:
Matthew Jones 2017-09-06 16:10:08 -04:00
commit 67d1a86d81
No known key found for this signature in database
GPG Key ID: 76A4C17A97590C1C
161 changed files with 20384 additions and 6959 deletions

View File

@ -89,8 +89,8 @@ SUMMARIZABLE_FK_FIELDS = {
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud'),
'vault_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud'),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'vault_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed'),
'job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
@ -779,10 +779,10 @@ class UserSerializer(BaseSerializer):
'username', 'first_name', 'last_name',
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'external_account')
def to_representation(self, obj):
def to_representation(self, obj): # TODO: Remove in 3.3
ret = super(UserSerializer, self).to_representation(obj)
ret.pop('password', None)
if obj:
if obj and type(self) is UserSerializer or self.version == 1:
ret['auth'] = obj.social_auth.values('provider', 'uid')
return ret
@ -1171,24 +1171,41 @@ class InventorySerializer(BaseSerializerWithVariables):
ret['organization'] = None
return ret
def validate_host_filter(self, host_filter):
if host_filter:
try:
SmartFilter().query_from_string(host_filter)
except RuntimeError, e:
raise models.base.ValidationError(e)
return host_filter
def validate(self, attrs):
kind = attrs.get('kind', 'standard')
if kind == 'smart':
host_filter = attrs.get('host_filter')
if host_filter is not None:
try:
SmartFilter().query_from_string(host_filter)
except RuntimeError, e:
raise models.base.ValidationError(e)
kind = None
if 'kind' in attrs:
kind = attrs['kind']
elif self.instance:
kind = self.instance.kind
host_filter = None
if 'host_filter' in attrs:
host_filter = attrs['host_filter']
elif self.instance:
host_filter = self.instance.host_filter
if kind == 'smart' and not host_filter:
raise serializers.ValidationError({'host_filter': _(
'Smart inventories must specify host_filter')})
return super(InventorySerializer, self).validate(attrs)
# TODO: Remove entire serializer in 3.3, replace with normal serializer
class InventoryDetailSerializer(InventorySerializer):
class Meta:
fields = ('*', 'can_run_ad_hoc_commands')
can_run_ad_hoc_commands = serializers.SerializerMethodField()
def get_fields(self):
fields = super(InventoryDetailSerializer, self).get_fields()
if self.version == 1:
fields['can_run_ad_hoc_commands'] = serializers.SerializerMethodField()
return fields
def get_can_run_ad_hoc_commands(self, obj):
view = self.context.get('view', None)
@ -1551,11 +1568,11 @@ class InventorySourceOptionsSerializer(BaseSerializer):
errors['inventory'] = _("Must provide an inventory.")
else:
dest_inventory = self.instance.inventory
if source_script.organization != dest_inventory.organization:
if dest_inventory and source_script.organization != dest_inventory.organization:
errors['source_script'] = _("The 'source_script' does not belong to the same organization as the inventory.")
except Exception as exc:
except Exception:
errors['source_script'] = _("'source_script' doesn't exist.")
logger.error(str(exc))
logger.exception('Problem processing source_script validation.')
if errors:
raise serializers.ValidationError(errors)
@ -1670,7 +1687,7 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
return value
def validate_inventory(self, value):
if value.kind == 'smart':
if value and value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
return value
@ -2141,6 +2158,14 @@ class CredentialSerializer(BaseSerializer):
return value
return super(CredentialSerializer, self).to_internal_value(data)
def validate_credential_type(self, credential_type):
if self.instance and credential_type.pk != self.instance.credential_type.pk:
raise ValidationError(
_('You cannot change the credential type of the credential, as it may break the functionality'
' of the resources using it.'),
)
return credential_type
class CredentialSerializerCreate(CredentialSerializer):
@ -2490,6 +2515,22 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def get_summary_fields(self, obj):
summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)
if 'pk' in self.context['view'].kwargs and self.version > 1: # TODO: remove version check in 3.3
extra_creds = []
for cred in obj.extra_credentials.all():
extra_creds.append({
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'credential_type_id': cred.credential_type_id
})
summary_fields['extra_credentials'] = extra_creds
return summary_fields
class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
@ -2525,7 +2566,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
if obj.job_template:
res['job_template'] = self.reverse('api:job_template_detail',
kwargs={'pk': obj.job_template.pk})
if obj.can_start or True:
if (obj.can_start or True) and self.version == 1: # TODO: remove in 3.3
res['start'] = self.reverse('api:job_start', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})
@ -2577,6 +2618,21 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
ret['extra_vars'] = obj.display_extra_vars()
return ret
def get_summary_fields(self, obj):
summary_fields = super(JobSerializer, self).get_summary_fields(obj)
if 'pk' in self.context['view'].kwargs and self.version > 1: # TODO: remove version check in 3.3
extra_creds = []
for cred in obj.extra_credentials.all():
extra_creds.append({
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'credential_type_id': cred.credential_type_id
})
summary_fields['extra_credentials'] = extra_creds
return summary_fields
class JobCancelSerializer(JobSerializer):
@ -2630,7 +2686,7 @@ class JobRelaunchSerializer(JobSerializer):
raise serializers.ValidationError(dict(credential=[_("Credential not found or deleted.")]))
if obj.project is None:
raise serializers.ValidationError(dict(errors=[_("Job Template Project is missing or undefined.")]))
if obj.inventory is None:
if obj.inventory is None or obj.inventory.pending_deletion:
raise serializers.ValidationError(dict(errors=[_("Job Template Inventory is missing or undefined.")]))
attrs = super(JobRelaunchSerializer, self).validate(attrs)
return attrs
@ -3564,6 +3620,7 @@ class InstanceSerializer(BaseSerializer):
class InstanceGroupSerializer(BaseSerializer):
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.SerializerMethodField()
instances = serializers.SerializerMethodField()
@ -3581,17 +3638,37 @@ class InstanceGroupSerializer(BaseSerializer):
res['controller'] = self.reverse('api:instance_group_detail', kwargs={'pk': obj.controller_id})
return res
def get_jobs_qs(self):
# Store running jobs queryset in context, so it will be shared in ListView
if 'running_jobs' not in self.context:
self.context['running_jobs'] = UnifiedJob.objects.filter(
status__in=('running', 'waiting'))
return self.context['running_jobs']
def get_capacity_dict(self):
# Store capacity values (globally computed) in the context
if 'capacity_map' not in self.context:
ig_qs = None
if self.parent: # Is ListView:
ig_qs = self.parent.instance
self.context['capacity_map'] = InstanceGroup.objects.capacity_values(
qs=ig_qs, tasks=self.get_jobs_qs(), breakdown=True)
return self.context['capacity_map']
def get_consumed_capacity(self, obj):
return obj.consumed_capacity
return self.get_capacity_dict()[obj.name]['consumed_capacity']
def get_percent_capacity_remaining(self, obj):
if not obj.capacity or obj.consumed_capacity == obj.capacity:
if not obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
return float("{0:.2f}".format(
((float(obj.capacity) - float(self.get_consumed_capacity(obj))) / (float(obj.capacity))) * 100)
)
def get_jobs_running(self, obj):
return UnifiedJob.objects.filter(instance_group=obj, status__in=('running', 'waiting',)).count()
jobs_qs = self.get_jobs_qs()
return sum(1 for job in jobs_qs if job.instance_group_id == obj.id)
def get_instances(self, obj):
return obj.instances.count()

View File

@ -1,19 +0,0 @@
ANSIBLE TOWER BY RED HAT END USER LICENSE AGREEMENT
This end user license agreement (“EULA”) governs the use of the Ansible Tower software and any related updates, upgrades, versions, appearance, structure and organization (the “Ansible Tower Software”), regardless of the delivery mechanism.
1. License Grant. Subject to the terms of this EULA, Red Hat, Inc. and its affiliates (“Red Hat”) grant to you (“You”) a non-transferable, non-exclusive, worldwide, non-sublicensable, limited, revocable license to use the Ansible Tower Software for the term of the associated Red Hat Software Subscription(s) and in a quantity equal to the number of Red Hat Software Subscriptions purchased from Red Hat for the Ansible Tower Software (“License”), each as set forth on the applicable Red Hat ordering document. You acquire only the right to use the Ansible Tower Software and do not acquire any rights of ownership. Red Hat reserves all rights to the Ansible Tower Software not expressly granted to You. This License grant pertains solely to Your use of the Ansible Tower Software and is not intended to limit Your rights under, or grant You rights that supersede, the license terms of any software packages which may be made available with the Ansible Tower Software that are subject to an open source software license.
2. Intellectual Property Rights. Title to the Ansible Tower Software and each component, copy and modification, including all derivative works whether made by Red Hat, You or on Red Hat's behalf, including those made at Your suggestion and all associated intellectual property rights, are and shall remain the sole and exclusive property of Red Hat and/or it licensors. The License does not authorize You (nor may You allow any third party, specifically non-employees of Yours) to: (a) copy, distribute, reproduce, use or allow third party access to the Ansible Tower Software except as expressly authorized hereunder; (b) decompile, disassemble, reverse engineer, translate, modify, convert or apply any procedure or process to the Ansible Tower Software in order to ascertain, derive, and/or appropriate for any reason or purpose, including the Ansible Tower Software source code or source listings or any trade secret information or process contained in the Ansible Tower Software (except as permitted under applicable law); (c) execute or incorporate other software (except for approved software as appears in the Ansible Tower Software documentation or specifically approved by Red Hat in writing) into Ansible Tower Software, or create a derivative work of any part of the Ansible Tower Software; (d) remove any trademarks, trade names or titles, copyrights legends or any other proprietary marking on the Ansible Tower Software; (e) disclose the results of any benchmarking of the Ansible Tower Software (whether or not obtained with Red Hats assistance) to any third party; (f) attempt to circumvent any user limits or other license, timing or use restrictions that are built into, defined or agreed upon, regarding the Ansible Tower Software. You are hereby notified that the Ansible Tower Software may contain time-out devices, counter devices, and/or other devices intended to ensure the limits of the License will not be exceeded (“Limiting Devices”). If the Ansible Tower Software contains Limiting Devices, Red Hat will provide You materials necessary to use the Ansible Tower Software to the extent permitted. You may not tamper with or otherwise take any action to defeat or circumvent a Limiting Device or other control measure, including but not limited to, resetting the unit amount or using false host identification number for the purpose of extending any term of the License.
3. Evaluation Licenses. Unless You have purchased Ansible Tower Software Subscriptions from Red Hat or an authorized reseller under the terms of a commercial agreement with Red Hat, all use of the Ansible Tower Software shall be limited to testing purposes and not for production use (“Evaluation”). Unless otherwise agreed by Red Hat, Evaluation of the Ansible Tower Software shall be limited to an evaluation environment and the Ansible Tower Software shall not be used to manage any systems or virtual machines on networks being used in the operation of Your business or any other non-evaluation purpose. Unless otherwise agreed by Red Hat, You shall limit all Evaluation use to a single 30 day evaluation period and shall not download or otherwise obtain additional copies of the Ansible Tower Software or license keys for Evaluation.
4. Limited Warranty. Except as specifically stated in this Section 4, to the maximum extent permitted under applicable law, the Ansible Tower Software and the components are provided and licensed “as is” without warranty of any kind, expressed or implied, including the implied warranties of merchantability, non-infringement or fitness for a particular purpose. Red Hat warrants solely to You that the media on which the Ansible Tower Software may be furnished will be free from defects in materials and manufacture under normal use for a period of thirty (30) days from the date of delivery to You. Red Hat does not warrant that the functions contained in the Ansible Tower Software will meet Your requirements or that the operation of the Ansible Tower Software will be entirely error free, appear precisely as described in the accompanying documentation, or comply with regulatory requirements.
5. Limitation of Remedies and Liability. To the maximum extent permitted by applicable law, Your exclusive remedy under this EULA is to return any defective media within thirty (30) days of delivery along with a copy of Your payment receipt and Red Hat, at its option, will replace it or refund the money paid by You for the media. To the maximum extent permitted under applicable law, neither Red Hat nor any Red Hat authorized distributor will be liable to You for any incidental or consequential damages, including lost profits or lost savings arising out of the use or inability to use the Ansible Tower Software or any component, even if Red Hat or the authorized distributor has been advised of the possibility of such damages. In no event shall Red Hat's liability or an authorized distributors liability exceed the amount that You paid to Red Hat for the Ansible Tower Software during the twelve months preceding the first event giving rise to liability.
6. Export Control. In accordance with the laws of the United States and other countries, You represent and warrant that You: (a) understand that the Ansible Tower Software and its components may be subject to export controls under the U.S. Commerce Departments Export Administration Regulations (“EAR”); (b) are not located in any country listed in Country Group E:1 in Supplement No. 1 to part 740 of the EAR; (c) will not export, re-export, or transfer the Ansible Tower Software to any prohibited destination or to any end user who has been prohibited from participating in US export transactions by any federal agency of the US government; (d) will not use or transfer the Ansible Tower Software for use in connection with the design, development or production of nuclear, chemical or biological weapons, or rocket systems, space launch vehicles, or sounding rockets or unmanned air vehicle systems; (e) understand and agree that if you are in the United States and you export or transfer the Ansible Tower Software to eligible end users, you will, to the extent required by EAR Section 740.17 obtain a license for such export or transfer and will submit semi-annual reports to the Commerce Departments Bureau of Industry and Security, which include the name and address (including country) of each transferee; and (f) understand that countries including the United States may restrict the import, use, or export of encryption products (which may include the Ansible Tower Software) and agree that you shall be solely responsible for compliance with any such import, use, or export restrictions.
7. General. If any provision of this EULA is held to be unenforceable, that shall not affect the enforceability of the remaining provisions. This agreement shall be governed by the laws of the State of New York and of the United States, without regard to any conflict of laws provisions. The rights and obligations of the parties to this EULA shall not be governed by the United Nations Convention on the International Sale of Goods.
Copyright © 2015 Red Hat, Inc. All rights reserved. "Red Hat" and “Ansible Tower” are registered trademarks of Red Hat, Inc. All other trademarks are the property of their respective owners.

View File

@ -215,7 +215,7 @@ job_template_urls = patterns('awx.api.views',
job_urls = patterns('awx.api.views',
url(r'^$', 'job_list'),
url(r'^(?P<pk>[0-9]+)/$', 'job_detail'),
url(r'^(?P<pk>[0-9]+)/start/$', 'job_start'),
url(r'^(?P<pk>[0-9]+)/start/$', 'job_start'), # TODO: remove in 3.3
url(r'^(?P<pk>[0-9]+)/cancel/$', 'job_cancel'),
url(r'^(?P<pk>[0-9]+)/relaunch/$', 'job_relaunch'),
url(r'^(?P<pk>[0-9]+)/job_host_summaries/$', 'job_job_host_summaries_list'),

View File

@ -122,7 +122,7 @@ class WorkflowsEnforcementMixin(object):
Mixin to check that license supports workflows.
'''
def check_permissions(self, request):
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS'):
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS', 'DELETE'):
raise LicenseForbids(_('Your license does not allow use of workflows.'))
return super(WorkflowsEnforcementMixin, self).check_permissions(request)
@ -3739,6 +3739,13 @@ class JobList(ListCreateAPIView):
methods.remove('POST')
return methods
# NOTE: Remove in 3.3, switch ListCreateAPIView to ListAPIView
def post(self, request, *args, **kwargs):
if get_request_version(self.request) > 1:
return Response({"error": _("POST not allowed for Job launching in version 2 of the api")},
status=status.HTTP_405_METHOD_NOT_ALLOWED)
return super(JobList, self).post(request, *args, **kwargs)
class JobDetail(UnifiedJobDeletionMixin, RetrieveUpdateDestroyAPIView):
@ -3788,6 +3795,7 @@ class JobActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
new_in_145 = True
# TODO: remove endpoint in 3.3
class JobStart(GenericAPIView):
model = Job
@ -3795,7 +3803,13 @@ class JobStart(GenericAPIView):
is_job_start = True
deprecated = True
def v2_not_allowed(self):
return Response({'detail': 'Action only possible through v1 API.'},
status=status.HTTP_404_NOT_FOUND)
def get(self, request, *args, **kwargs):
if get_request_version(request) > 1:
return self.v2_not_allowed()
obj = self.get_object()
data = dict(
can_start=obj.can_start,
@ -3806,6 +3820,8 @@ class JobStart(GenericAPIView):
return Response(data)
def post(self, request, *args, **kwargs):
if get_request_version(request) > 1:
return self.v2_not_allowed()
obj = self.get_object()
if obj.can_start:
result = obj.signal_start(**request.data)

View File

@ -74,6 +74,10 @@ class Setting(CreatedModifiedModel):
def get_cache_key(self, key):
return key
@classmethod
def get_cache_id_key(self, key):
return '{}_ID'.format(key)
import awx.conf.signals # noqa

View File

@ -69,6 +69,12 @@ def _log_database_error():
pass
def filter_sensitive(registry, key, value):
if registry.is_setting_encrypted(key):
return '$encrypted$'
return value
class EncryptedCacheProxy(object):
def __init__(self, cache, registry, encrypter=None, decrypter=None):
@ -105,9 +111,13 @@ class EncryptedCacheProxy(object):
six.text_type(value)
except UnicodeDecodeError:
value = value.decode('utf-8')
logger.debug('cache get(%r, %r) -> %r', key, empty, filter_sensitive(self.registry, key, value))
return value
def set(self, key, value, **kwargs):
def set(self, key, value, log=True, **kwargs):
if log is True:
logger.debug('cache set(%r, %r, %r)', key, filter_sensitive(self.registry, key, value),
SETTING_CACHE_TIMEOUT)
self.cache.set(
key,
self._handle_encryption(self.encrypter, key, value),
@ -115,8 +125,13 @@ class EncryptedCacheProxy(object):
)
def set_many(self, data, **kwargs):
filtered_data = dict(
(key, filter_sensitive(self.registry, key, value))
for key, value in data.items()
)
logger.debug('cache set_many(%r, %r)', filtered_data, SETTING_CACHE_TIMEOUT)
for key, value in data.items():
self.set(key, value, **kwargs)
self.set(key, value, log=False, **kwargs)
def _handle_encryption(self, method, key, value):
TransientSetting = namedtuple('TransientSetting', ['pk', 'value'])
@ -124,9 +139,16 @@ class EncryptedCacheProxy(object):
if value is not empty and self.registry.is_setting_encrypted(key):
# If the setting exists in the database, we'll use its primary key
# as part of the AES key when encrypting/decrypting
obj_id = self.cache.get(Setting.get_cache_id_key(key), default=empty)
if obj_id is empty:
logger.info('Efficiency notice: Corresponding id not stored in cache %s',
Setting.get_cache_id_key(key))
obj_id = getattr(self._get_setting_from_db(key), 'pk', None)
elif obj_id == SETTING_CACHE_NONE:
obj_id = None
return method(
TransientSetting(
pk=getattr(self._get_setting_from_db(key), 'pk', None),
pk=obj_id,
value=value
),
'value'
@ -241,11 +263,13 @@ class SettingsWrapper(UserSettingsHolder):
# to indicate from the cache that the setting is not configured without
# a database lookup.
settings_to_cache = get_settings_to_cache(self.registry)
setting_ids = {}
# Load all settings defined in the database.
for setting in Setting.objects.filter(key__in=settings_to_cache.keys(), user__isnull=True).order_by('pk'):
if settings_to_cache[setting.key] != SETTING_CACHE_NOTSET:
continue
if self.registry.is_setting_encrypted(setting.key):
setting_ids[setting.key] = setting.id
try:
value = decrypt_field(setting, 'value')
except ValueError, e:
@ -264,12 +288,18 @@ class SettingsWrapper(UserSettingsHolder):
field = self.registry.get_setting_field(key)
try:
settings_to_cache[key] = get_cache_value(field.get_default())
if self.registry.is_setting_encrypted(key):
# No database pk, so None will be passed to encryption algorithm
setting_ids[key] = SETTING_CACHE_NOTSET
except SkipField:
pass
# Generate a cache key for each setting and store them all at once.
settings_to_cache = dict([(Setting.get_cache_key(k), v) for k, v in settings_to_cache.items()])
for k, id_val in setting_ids.items():
logger.debug('Saving id in cache for encrypted setting %s, %s',
Setting.get_cache_id_key(k), id_val)
self.cache.cache.set(Setting.get_cache_id_key(k), id_val)
settings_to_cache['_awx_conf_preload_expires'] = self._awx_conf_preload_expires
logger.debug('cache set_many(%r, %r)', settings_to_cache, SETTING_CACHE_TIMEOUT)
self.cache.set_many(settings_to_cache, timeout=SETTING_CACHE_TIMEOUT)
def _get_local(self, name):
@ -279,7 +309,6 @@ class SettingsWrapper(UserSettingsHolder):
cache_value = self.cache.get(cache_key, default=empty)
except ValueError:
cache_value = empty
logger.debug('cache get(%r, %r) -> %r', cache_key, empty, cache_value)
if cache_value == SETTING_CACHE_NOTSET:
value = empty
elif cache_value == SETTING_CACHE_NONE:
@ -293,6 +322,7 @@ class SettingsWrapper(UserSettingsHolder):
field = self.registry.get_setting_field(name)
if value is empty:
setting = None
setting_id = None
if not field.read_only or name in (
# these two values are read-only - however - we *do* want
# to fetch their value from the database
@ -303,6 +333,7 @@ class SettingsWrapper(UserSettingsHolder):
if setting:
if getattr(field, 'encrypted', False):
value = decrypt_field(setting, 'value')
setting_id = setting.id
else:
value = setting.value
else:
@ -310,15 +341,17 @@ class SettingsWrapper(UserSettingsHolder):
if SETTING_CACHE_DEFAULTS:
try:
value = field.get_default()
if getattr(field, 'encrypted', False):
setting_id = SETTING_CACHE_NONE
except SkipField:
pass
# If None implies not set, convert when reading the value.
if value is None and SETTING_CACHE_NOTSET == SETTING_CACHE_NONE:
value = SETTING_CACHE_NOTSET
if cache_value != value:
logger.debug('cache set(%r, %r, %r)', cache_key,
get_cache_value(value),
SETTING_CACHE_TIMEOUT)
if setting_id:
logger.debug('Saving id in cache for encrypted setting %s', cache_key)
self.cache.cache.set(Setting.get_cache_id_key(cache_key), setting_id)
self.cache.set(cache_key, get_cache_value(value), timeout=SETTING_CACHE_TIMEOUT)
if value == SETTING_CACHE_NOTSET and not SETTING_CACHE_DEFAULTS:
try:

View File

@ -391,7 +391,20 @@ def test_charfield_properly_sets_none(settings, mocker):
)
def test_settings_use_an_encrypted_cache(settings):
def test_settings_use_cache(settings, mocker):
settings.registry.register(
'AWX_VAR',
field_class=fields.CharField,
category=_('System'),
category_slug='system'
)
settings.cache.set('AWX_VAR', 'foobar')
settings.cache.set('_awx_conf_preload_expires', 100)
# Will fail test if database is used
getattr(settings, 'AWX_VAR')
def test_settings_use_an_encrypted_cache(settings, mocker):
settings.registry.register(
'AWX_ENCRYPTED',
field_class=fields.CharField,
@ -402,6 +415,11 @@ def test_settings_use_an_encrypted_cache(settings):
assert isinstance(settings.cache, EncryptedCacheProxy)
assert settings.cache.__dict__['encrypter'] == encrypt_field
assert settings.cache.__dict__['decrypter'] == decrypt_field
settings.cache.set('AWX_ENCRYPTED_ID', 402)
settings.cache.set('AWX_ENCRYPTED', 'foobar')
settings.cache.set('_awx_conf_preload_expires', 100)
# Will fail test if database is used
getattr(settings, 'AWX_ENCRYPTED')
def test_sensitive_cache_data_is_encrypted(settings, mocker):

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -377,9 +377,11 @@ class InstanceAccess(BaseAccess):
def get_queryset(self):
if self.user.is_superuser or self.user.is_system_auditor:
return Instance.objects.all().distinct()
qs = Instance.objects.all().distinct()
else:
return Instance.objects.filter(rampart_groups__in=self.user.get_queryset(InstanceGroup)).distinct()
qs = Instance.objects.filter(
rampart_groups__in=self.user.get_queryset(InstanceGroup)).distinct()
return qs.prefetch_related('rampart_groups')
def can_add(self, data):
return False
@ -397,9 +399,11 @@ class InstanceGroupAccess(BaseAccess):
def get_queryset(self):
if self.user.is_superuser or self.user.is_system_auditor:
return InstanceGroup.objects.all()
qs = InstanceGroup.objects.all()
else:
return InstanceGroup.objects.filter(organization__in=Organization.accessible_objects(self.user, 'admin_role'))
qs = InstanceGroup.objects.filter(
organization__in=Organization.accessible_pk_qs(self.user, 'admin_role'))
return qs.prefetch_related('instances')
def can_add(self, data):
return False
@ -506,6 +510,8 @@ class OrganizationAccess(BaseAccess):
I can change or delete organizations when:
- I am a superuser.
- I'm an admin of that organization.
I can associate/disassociate instance groups when:
- I am a superuser.
'''
model = Organization
@ -537,7 +543,7 @@ class OrganizationAccess(BaseAccess):
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
if relationship == "instance_groups":
if self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.admin_role:
if self.user.is_superuser:
return True
return False
return super(OrganizationAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
@ -596,9 +602,18 @@ class InventoryAccess(BaseAccess):
@check_superuser
def can_admin(self, obj, data):
# Host filter may only be modified by org admin level
org_admin_mandatory = False
new_host_filter = data.get('host_filter', None) if data else None
if new_host_filter and new_host_filter != obj.host_filter:
org_admin_mandatory = True
# Verify that the user has access to the new organization if moving an
# inventory to a new organization. Otherwise, just check for admin permission.
return self.check_related('organization', Organization, data, obj=obj) and self.user in obj.admin_role
return (
self.check_related('organization', Organization, data, obj=obj,
mandatory=org_admin_mandatory) and
self.user in obj.admin_role
)
@check_superuser
def can_update(self, obj):
@ -834,6 +849,10 @@ class InventoryUpdateAccess(BaseAccess):
def get_queryset(self):
qs = InventoryUpdate.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'inventory_source__inventory')
qs = qs.prefetch_related(
'unified_job_template',
'instance_group'
)
inventory_sources_qs = self.user.get_queryset(InventorySource)
return qs.filter(inventory_source__in=inventory_sources_qs)
@ -1080,11 +1099,17 @@ class ProjectUpdateAccess(BaseAccess):
def get_queryset(self):
if self.user.is_superuser or self.user.is_system_auditor:
return self.model.objects.all()
qs = ProjectUpdate.objects.distinct()
qs = self.model.objects.all()
else:
qs = self.model.objects.filter(
project__in=Project.accessible_pk_qs(self.user, 'read_role')
)
qs = qs.select_related('created_by', 'modified_by', 'project')
project_ids = set(self.user.get_queryset(Project).values_list('id', flat=True))
return qs.filter(project_id__in=project_ids)
qs = qs.prefetch_related(
'unified_job_template',
'instance_group'
)
return qs
@check_superuser
def can_cancel(self, obj):
@ -1304,7 +1329,11 @@ class JobAccess(BaseAccess):
qs = self.model.objects
qs = qs.select_related('created_by', 'modified_by', 'job_template', 'inventory',
'project', 'credential', 'job_template')
qs = qs.prefetch_related('unified_job_template')
qs = qs.prefetch_related(
'unified_job_template',
'instance_group',
Prefetch('labels', queryset=Label.objects.all().order_by('name'))
)
if self.user.is_superuser or self.user.is_system_auditor:
return qs.all()
@ -2170,10 +2199,13 @@ class LabelAccess(BaseAccess):
def get_queryset(self):
if self.user.is_superuser or self.user.is_system_auditor:
return self.model.objects.all()
return self.model.objects.all().filter(
organization__in=Organization.accessible_objects(self.user, 'read_role')
)
qs = self.model.objects.all()
else:
qs = self.model.objects.all().filter(
organization__in=Organization.accessible_pk_qs(self.user, 'read_role')
)
qs = qs.prefetch_related('modified_by', 'created_by', 'organization')
return qs
@check_superuser
def can_read(self, obj):

View File

@ -311,7 +311,7 @@ register(
min_value=0,
default=0,
label=_('Default Inventory Update Timeout'),
help_text=_('Maximum time to allow inventory updates to run. Use value of 0 to indicate that no '
help_text=_('Maximum time in seconds to allow inventory updates to run. Use value of 0 to indicate that no '
'timeout should be imposed. A timeout set on an individual inventory source will override this.'),
category=_('Jobs'),
category_slug='jobs',
@ -323,7 +323,7 @@ register(
min_value=0,
default=0,
label=_('Default Project Update Timeout'),
help_text=_('Maximum time to allow project updates to run. Use value of 0 to indicate that no '
help_text=_('Maximum time in seconds to allow project updates to run. Use value of 0 to indicate that no '
'timeout should be imposed. A timeout set on an individual project will override this.'),
category=_('Jobs'),
category_slug='jobs',
@ -446,7 +446,7 @@ register(
register(
'LOG_AGGREGATOR_PROTOCOL',
field_class=fields.ChoiceField,
choices=['https', 'tcp', 'udp'],
choices=[('https', 'HTTPS'), ('tcp', 'TCP'), ('udp', 'UDP')],
default='https',
label=_('Logging Aggregator Protocol'),
help_text=_('Protocol used to communicate with log aggregator.'),

View File

@ -6,15 +6,8 @@ from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
"""Return 0 if licensed; 1 if unlicensed
"""
"""Returns license type, e.g., 'enterprise', 'open', 'none'"""
def handle(self, **options):
super(Command, self).__init__()
license_info = get_licenser().validate()
if license_info['valid_key'] is True:
return 0
else:
return 1
return get_licenser().validate().get('license_type', 'none')

View File

@ -83,6 +83,7 @@ class AnsibleInventoryLoader(object):
env = dict(os.environ.items())
env['VIRTUAL_ENV'] = settings.ANSIBLE_VENV_PATH
env['PATH'] = os.path.join(settings.ANSIBLE_VENV_PATH, "bin") + ":" + env['PATH']
env['ANSIBLE_INVENTORY_UNPARSED_FAILED'] = '1'
venv_libdir = os.path.join(settings.ANSIBLE_VENV_PATH, "lib")
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
for python_ver in ["python2.7", "python2.6"]:

View File

@ -3,6 +3,7 @@
import sys
from datetime import timedelta
import logging
from django.db import models
from django.utils.timezone import now
@ -11,7 +12,9 @@ from django.conf import settings
from awx.main.utils.filters import SmartFilter
___all__ = ['HostManager', 'InstanceManager']
___all__ = ['HostManager', 'InstanceManager', 'InstanceGroupManager']
logger = logging.getLogger('awx.main.managers')
class HostManager(models.Manager):
@ -34,6 +37,8 @@ class HostManager(models.Manager):
hasattr(self.instance, 'kind')):
if self.instance.kind == 'smart' and self.instance.host_filter is not None:
q = SmartFilter.query_from_string(self.instance.host_filter)
if self.instance.organization_id:
q = q.filter(inventory__organization=self.instance.organization_id)
# If we are using host_filters, disable the core_filters, this allows
# us to access all of the available Host entries, not just the ones associated
# with a specific FK/relation.
@ -41,11 +46,26 @@ class HostManager(models.Manager):
# If we don't disable this, a filter of {'inventory': self.instance} gets automatically
# injected by the related object mapper.
self.core_filters = {}
qs = qs & q
return qs.distinct()
unique_by_name = qs.order_by('name', 'pk').distinct('name')
return qs.filter(pk__in=unique_by_name)
return qs
def get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping):
# Create IG mapping by union of all groups their instances are members of
ig_ig_mapping = {}
for group_name in ig_instance_mapping.keys():
ig_ig_set = set()
for instance_hostname in ig_instance_mapping[group_name]:
ig_ig_set |= instance_ig_mapping[instance_hostname]
else:
ig_ig_set.add(group_name) # Group contains no instances, return self
ig_ig_mapping[group_name] = ig_ig_set
return ig_ig_mapping
class InstanceManager(models.Manager):
"""A custom manager class for the Instance model.
@ -77,3 +97,100 @@ class InstanceManager(models.Manager):
def my_role(self):
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
return "tower"
class InstanceGroupManager(models.Manager):
"""A custom manager class for the Instance model.
Used for global capacity calculations
"""
def capacity_mapping(self, qs=None):
"""
Another entry-point to Instance manager method by same name
"""
if qs is None:
qs = self.all().prefetch_related('instances')
instance_ig_mapping = {}
ig_instance_mapping = {}
# Create dictionaries that represent basic m2m memberships
for group in qs:
ig_instance_mapping[group.name] = set(
instance.hostname for instance in group.instances.all() if
instance.capacity != 0
)
for inst in group.instances.all():
if inst.capacity == 0:
continue
instance_ig_mapping.setdefault(inst.hostname, set())
instance_ig_mapping[inst.hostname].add(group.name)
# Get IG capacity overlap mapping
ig_ig_mapping = get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping)
return instance_ig_mapping, ig_ig_mapping
@staticmethod
def zero_out_group(graph, name, breakdown):
if name not in graph:
graph[name] = {}
graph[name]['consumed_capacity'] = 0
if breakdown:
graph[name]['committed_capacity'] = 0
graph[name]['running_capacity'] = 0
def capacity_values(self, qs=None, tasks=None, breakdown=False, graph=None):
"""
Returns a dictionary of capacity values for all IGs
"""
if qs is None: # Optionally BYOQS - bring your own queryset
qs = self.all().prefetch_related('instances')
instance_ig_mapping, ig_ig_mapping = self.capacity_mapping(qs=qs)
if tasks is None:
tasks = self.model.unifiedjob_set.related.related_model.objects.filter(
status__in=('running', 'waiting'))
if graph is None:
graph = {group.name: {} for group in qs}
for group_name in graph:
self.zero_out_group(graph, group_name, breakdown)
for t in tasks:
# TODO: dock capacity for isolated job management tasks running in queue
impact = t.task_impact
if t.status == 'waiting' or not t.execution_node:
# Subtract capacity from any peer groups that share instances
if not t.instance_group:
logger.warning('Excluded %s from capacity algorithm '
'(missing instance_group).', t.log_format)
impacted_groups = []
elif t.instance_group.name not in ig_ig_mapping:
# Waiting job in group with 0 capacity has no collateral impact
impacted_groups = [t.instance_group.name]
else:
impacted_groups = ig_ig_mapping[t.instance_group.name]
for group_name in impacted_groups:
if group_name not in graph:
self.zero_out_group(graph, group_name, breakdown)
graph[group_name]['consumed_capacity'] += impact
if breakdown:
graph[group_name]['committed_capacity'] += impact
elif t.status == 'running':
# Subtract capacity from all groups that contain the instance
if t.execution_node not in instance_ig_mapping:
logger.warning('Detected %s running inside lost instance, '
'may still be waiting for reaper.', t.log_format)
if t.instance_group:
impacted_groups = [t.instance_group.name]
else:
impacted_groups = []
else:
impacted_groups = instance_ig_mapping[t.execution_node]
for group_name in impacted_groups:
if group_name not in graph:
self.zero_out_group(graph, group_name, breakdown)
graph[group_name]['consumed_capacity'] += impact
if breakdown:
graph[group_name]['running_capacity'] += impact
else:
logger.error('Programming error, %s not in ["running", "waiting"]', t.log_format)
return graph

View File

@ -143,7 +143,8 @@ class URLModificationMiddleware(object):
def _convert_named_url(self, url_path):
url_units = url_path.split('/')
if len(url_units) < 6 or url_units[1] != 'api' or url_units[2] not in ['v2']:
# If the identifier is an empty string, it is always invalid.
if len(url_units) < 6 or url_units[1] != 'api' or url_units[2] not in ['v2'] or not url_units[4]:
return url_path
resource = url_units[3]
if resource in settings.NAMED_URL_MAPPINGS:

View File

@ -347,7 +347,6 @@ class AdHocCommandEvent(CreatedModifiedModel):
return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())
def save(self, *args, **kwargs):
from awx.main.models.inventory import Host
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.
update_fields = kwargs.get('update_fields', [])
@ -364,16 +363,16 @@ class AdHocCommandEvent(CreatedModifiedModel):
self.host_name = self.event_data.get('host', '').strip()
if 'host_name' not in update_fields:
update_fields.append('host_name')
try:
if not self.host_id and self.host_name:
host_qs = Host.objects.filter(inventory__ad_hoc_commands__id=self.ad_hoc_command_id, name=self.host_name)
if not self.host_id and self.host_name:
host_qs = self.ad_hoc_command.inventory.hosts.filter(name=self.host_name)
try:
host_id = host_qs.only('id').values_list('id', flat=True)
if host_id.exists():
self.host_id = host_id[0]
if 'host_id' not in update_fields:
update_fields.append('host_id')
except (IndexError, AttributeError):
pass
except (IndexError, AttributeError):
pass
super(AdHocCommandEvent, self).save(*args, **kwargs)
@classmethod

View File

@ -690,6 +690,7 @@ def vault(cls):
'secret': True,
'ask_at_runtime': True
}],
'required': ['vault_password'],
}
)
@ -735,7 +736,8 @@ def net(cls):
'dependencies': {
'ssh_key_unlock': ['ssh_key_data'],
'authorize_password': ['authorize'],
}
},
'required': ['username'],
}
)
@ -822,7 +824,7 @@ def vmware(cls):
'id': 'host',
'label': 'VCenter Host',
'type': 'string',
'help_text': ('Enter the hostname or IP address which corresponds '
'help_text': ('Enter the hostname or IP address that corresponds '
'to your VMware vCenter.')
}, {
'id': 'username',
@ -850,7 +852,7 @@ def satellite6(cls):
'id': 'host',
'label': 'Satellite 6 URL',
'type': 'string',
'help_text': ('Enter the URL which corresponds to your Red Hat '
'help_text': ('Enter the URL that corresponds to your Red Hat '
'Satellite 6 server. For example, https://satellite.example.org')
}, {
'id': 'username',
@ -861,7 +863,8 @@ def satellite6(cls):
'label': 'Password',
'type': 'string',
'secret': True,
}]
}],
'required': ['host', 'username', 'password'],
}
)
@ -877,7 +880,7 @@ def cloudforms(cls):
'id': 'host',
'label': 'CloudForms URL',
'type': 'string',
'help_text': ('Enter the URL for the virtual machine which '
'help_text': ('Enter the URL for the virtual machine that '
'corresponds to your CloudForm instance. '
'For example, https://cloudforms.example.org')
}, {
@ -889,7 +892,8 @@ def cloudforms(cls):
'label': 'Password',
'type': 'string',
'secret': True,
}]
}],
'required': ['host', 'username', 'password'],
}
)
@ -912,8 +916,9 @@ def gce(cls):
'label': 'Project',
'type': 'string',
'help_text': ('The Project ID is the GCE assigned identification. '
'It is constructed as two words followed by a three '
'digit number. Example: adjective-noun-000')
'It is often constructed as three words or two words '
'followed by a three-digit number. Examples: project-id-000 '
'and another-project-id')
}, {
'id': 'ssh_key_data',
'label': 'RSA Private Key',
@ -923,7 +928,8 @@ def gce(cls):
'multiline': True,
'help_text': ('Paste the contents of the PEM file associated '
'with the service account email.')
}]
}],
'required': ['username', 'ssh_key_data'],
}
)
@ -951,7 +957,8 @@ def azure(cls):
'help_text': ('Paste the contents of the PEM file that corresponds '
'to the certificate you uploaded in the Microsoft '
'Azure console.')
}]
}],
'required': ['username', 'ssh_key_data'],
}
)
@ -991,7 +998,8 @@ def azure_rm(cls):
'id': 'tenant',
'label': 'Tenant ID',
'type': 'string'
}]
}],
'required': ['subscription'],
}
)
@ -1022,4 +1030,3 @@ def insights(cls):
},
},
)

View File

@ -11,7 +11,7 @@ from django.utils.timezone import now, timedelta
from solo.models import SingletonModel
from awx.api.versioning import reverse
from awx.main.managers import InstanceManager
from awx.main.managers import InstanceManager, InstanceGroupManager
from awx.main.models.inventory import InventoryUpdate
from awx.main.models.jobs import Job
from awx.main.models.projects import ProjectUpdate
@ -66,6 +66,8 @@ class Instance(models.Model):
class InstanceGroup(models.Model):
"""A model representing a Queue/Group of AWX Instances."""
objects = InstanceGroupManager()
name = models.CharField(max_length=250, unique=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
@ -89,12 +91,7 @@ class InstanceGroup(models.Model):
@property
def capacity(self):
return sum([x[0] for x in self.instances.values_list('capacity')])
@property
def consumed_capacity(self):
return sum(x.task_impact for x in UnifiedJob.objects.filter(instance_group=self,
status__in=('running', 'waiting')))
return sum([inst.capacity for inst in self.instances.all()])
class Meta:
app_label = 'main'

View File

@ -1143,6 +1143,11 @@ class InventorySourceOptions(BaseModel):
# from the instance metadata instead of those explicitly provided.
elif self.source in CLOUD_PROVIDERS and self.source != 'ec2':
raise ValidationError(_('Credential is required for a cloud source.'))
elif self.source == 'custom' and cred and cred.credential_type.kind in ('scm', 'ssh', 'insights', 'vault'):
raise ValidationError(_(
'Credentials of type machine, source control, insights and vault are '
'disallowed for custom inventory sources.'
))
return cred
def clean_source_regions(self):
@ -1400,7 +1405,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
self.source == 'scm' and \
InventorySource.objects.filter(
Q(inventory=self.inventory,
update_on_project_update=True, source='scm') &
update_on_project_update=True, source='scm') &
~Q(id=self.id)).exists():
raise ValidationError(_("More than one SCM-based inventory source with update on project update per-inventory not allowed."))
return self.update_on_project_update
@ -1409,7 +1414,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
if self.update_on_project_update is True and \
self.source == 'scm' and \
self.update_on_launch is True:
raise ValidationError(_("Cannot update SCM-based inventory source on launch if set to update on project update. "
raise ValidationError(_("Cannot update SCM-based inventory source on launch if set to update on project update. "
"Instead, configure the corresponding source project to update on launch."))
return self.update_on_launch

View File

@ -20,7 +20,7 @@ from django.db.models import Q, Count
from django.utils.dateparse import parse_datetime
from dateutil import parser
from dateutil.tz import tzutc
from django.utils.encoding import force_text
from django.utils.encoding import force_text, smart_str
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
@ -785,10 +785,12 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
if 'insights' in ansible_facts and 'system_id' in ansible_facts['insights']:
host.insights_system_id = ansible_facts['insights']['system_id']
host.save()
system_tracking_logger.info('New fact for inventory {} host {}'.format(host.inventory.name, host.name),
extra=dict(inventory_id=host.inventory.id, host_name=host.name,
ansible_facts=host.ansible_facts,
ansible_facts_modified=host.ansible_facts_modified.isoformat()))
system_tracking_logger.info(
'New fact for inventory {} host {}'.format(
smart_str(host.inventory.name), smart_str(host.name)),
extra=dict(inventory_id=host.inventory.id, host_name=host.name,
ansible_facts=host.ansible_facts,
ansible_facts_modified=host.ansible_facts_modified.isoformat()))
class JobHostSummary(CreatedModifiedModel):
@ -830,8 +832,9 @@ class JobHostSummary(CreatedModifiedModel):
failed = models.BooleanField(default=False, editable=False)
def __unicode__(self):
hostname = self.host.name if self.host else 'N/A'
return '%s changed=%d dark=%d failures=%d ok=%d processed=%d skipped=%s' % \
(self.host.name, self.changed, self.dark, self.failures, self.ok,
(hostname, self.changed, self.dark, self.failures, self.ok,
self.processed, self.skipped)
def get_absolute_url(self, request=None):
@ -1163,7 +1166,6 @@ class JobEvent(CreatedModifiedModel):
def _update_hosts(self, extra_host_pks=None):
# Update job event hosts m2m from host_name, propagate to parent events.
from awx.main.models.inventory import Host
extra_host_pks = set(extra_host_pks or [])
hostnames = set()
if self.host_name:
@ -1174,7 +1176,7 @@ class JobEvent(CreatedModifiedModel):
hostnames.update(v.keys())
except AttributeError: # In case event_data or v isn't a dict.
pass
qs = Host.objects.filter(inventory__jobs__id=self.job_id)
qs = self.job.inventory.hosts.all()
qs = qs.filter(Q(name__in=hostnames) | Q(pk__in=extra_host_pks))
qs = qs.exclude(job_events__pk=self.id).only('id')
for host in qs:
@ -1185,30 +1187,32 @@ class JobEvent(CreatedModifiedModel):
parent = parent[0]
parent._update_hosts(qs.values_list('id', flat=True))
def _update_host_summary_from_stats(self):
from awx.main.models.inventory import Host
def _hostnames(self):
hostnames = set()
try:
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
hostnames.update(self.event_data.get(stat, {}).keys())
except AttributeError: # In case event_data or v isn't a dict.
except AttributeError: # In case event_data or v isn't a dict.
pass
return hostnames
def _update_host_summary_from_stats(self, hostnames):
with ignore_inventory_computed_fields():
qs = Host.objects.filter(inventory__jobs__id=self.job_id,
name__in=hostnames)
qs = self.job.inventory.hosts.filter(name__in=hostnames)
job = self.job
for host in hostnames:
host_stats = {}
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
try:
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
except AttributeError: # in case event_data[stat] isn't a dict.
except AttributeError: # in case event_data[stat] isn't a dict.
pass
if qs.filter(name=host).exists():
host_actual = qs.get(name=host)
host_summary, created = job.job_host_summaries.get_or_create(host=host_actual, host_name=host_actual.name, defaults=host_stats)
else:
host_summary, created = job.job_host_summaries.get_or_create(host_name=host, defaults=host_stats)
if not created:
update_fields = []
for stat, value in host_stats.items():
@ -1217,11 +1221,8 @@ class JobEvent(CreatedModifiedModel):
update_fields.append(stat)
if update_fields:
host_summary.save(update_fields=update_fields)
job.inventory.update_computed_fields()
emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=job.id))
def save(self, *args, **kwargs):
from awx.main.models.inventory import Host
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.
update_fields = kwargs.get('update_fields', [])
@ -1236,7 +1237,7 @@ class JobEvent(CreatedModifiedModel):
update_fields.append(field)
# Update host related field from host_name.
if not self.host_id and self.host_name:
host_qs = Host.objects.filter(inventory__jobs__id=self.job_id, name=self.host_name)
host_qs = self.job.inventory.hosts.filter(name=self.host_name)
host_id = host_qs.only('id').values_list('id', flat=True).first()
if host_id != self.host_id:
self.host_id = host_id
@ -1249,7 +1250,12 @@ class JobEvent(CreatedModifiedModel):
self._update_hosts()
if self.event == 'playbook_on_stats':
self._update_parents_failed_and_changed()
self._update_host_summary_from_stats()
hostnames = self._hostnames()
self._update_host_summary_from_stats(hostnames)
self.job.inventory.update_computed_fields()
emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=self.job.id))
@classmethod
def create_from_data(self, **kwargs):

View File

@ -9,7 +9,7 @@ import uuid
# Django
from django.conf import settings
from django.db import models
from django.db import models, connection
from django.contrib.auth.models import User
from django.utils.timezone import now as tz_now
from django.utils.translation import ugettext_lazy as _
@ -187,7 +187,7 @@ class AuthToken(BaseModel):
if not self.pk or not self.is_expired(now=now):
self.expires = now + datetime.timedelta(seconds=settings.AUTH_TOKEN_EXPIRATION)
if save:
self.save()
connection.on_commit(lambda: self.save(update_fields=['expires']))
def invalidate(self, reason='timeout_reached', save=True):
if not AuthToken.reason_long(reason):

View File

@ -14,6 +14,7 @@ from django.db import transaction, connection, DatabaseError
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now as tz_now, utc
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
# AWX
from awx.main.models import * # noqa
@ -37,10 +38,10 @@ class TaskManager():
def __init__(self):
self.graph = dict()
for rampart_group in InstanceGroup.objects.all():
for rampart_group in InstanceGroup.objects.prefetch_related('instances'):
self.graph[rampart_group.name] = dict(graph=DependencyGraph(rampart_group.name),
capacity_total=rampart_group.capacity,
capacity_used=0)
consumed_capacity=0)
def is_job_blocked(self, task):
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
@ -77,11 +78,18 @@ class TaskManager():
'''
def get_running_tasks(self):
execution_nodes = {}
waiting_jobs = []
now = tz_now()
jobs = UnifiedJob.objects.filter(Q(status='running') |
Q(status='waiting', modified__lte=now - timedelta(seconds=60)))
[execution_nodes.setdefault(j.execution_node, [j]).append(j) for j in jobs]
return execution_nodes
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
jobs = UnifiedJob.objects.filter((Q(status='running') |
Q(status='waiting', modified__lte=now - timedelta(seconds=60))) &
~Q(polymorphic_ctype_id=workflow_ctype_id))
for j in jobs:
if j.execution_node:
execution_nodes.setdefault(j.execution_node, [j]).append(j)
else:
waiting_jobs.append(j)
return (execution_nodes, waiting_jobs)
'''
Tasks that are currently running in celery
@ -395,18 +403,45 @@ class TaskManager():
preferred_instance_groups = task.preferred_instance_groups
found_acceptable_queue = False
for rampart_group in preferred_instance_groups:
if self.get_remaining_capacity(rampart_group.name) <= 0:
logger.debug("Skipping group %s capacity <= 0", rampart_group.name)
remaining_capacity = self.get_remaining_capacity(rampart_group.name)
if remaining_capacity <= 0:
logger.debug("Skipping group %s, remaining_capacity %s <= 0",
rampart_group.name, remaining_capacity)
continue
if not self.would_exceed_capacity(task, rampart_group.name):
logger.debug("Starting %s in group %s", task.log_format, rampart_group.name)
logger.debug("Starting %s in group %s (remaining_capacity=%s)",
task.log_format, rampart_group.name, remaining_capacity)
self.graph[rampart_group.name]['graph'].add_job(task)
self.start_task(task, rampart_group, task.get_jobs_fail_chain())
found_acceptable_queue = True
break
else:
logger.debug("Not enough capacity to run %s on %s (remaining_capacity=%s)",
task.log_format, rampart_group.name, remaining_capacity)
if not found_acceptable_queue:
logger.debug("%s couldn't be scheduled on graph, waiting for next cycle", task.log_format)
def fail_jobs_if_not_in_celery(self, node_jobs, active_tasks, celery_task_start_time):
for task in node_jobs:
if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')):
if isinstance(task, WorkflowJob):
continue
if task.modified > celery_task_start_time:
continue
task.status = 'failed'
task.job_explanation += ' '.join((
'Task was marked as running in Tower but was not present in',
'Celery, so it has been marked as failed.',
))
try:
task.save(update_fields=['status', 'job_explanation'])
except DatabaseError:
logger.error("Task {} DB error in marking failed. Job possibly deleted.".format(task.log_format))
continue
awx_tasks._send_notification_templates(task, 'failed')
task.websocket_emit_status('failed')
logger.error("Task {} has no record in celery. Marking as failed".format(task.log_format))
def cleanup_inconsistent_celery_tasks(self):
'''
Rectify tower db <-> celery inconsistent view of jobs state
@ -428,7 +463,13 @@ class TaskManager():
Only consider failing tasks on instances for which we obtained a task
list from celery for.
'''
running_tasks = self.get_running_tasks()
running_tasks, waiting_tasks = self.get_running_tasks()
all_celery_task_ids = []
for node, node_jobs in active_queues.iteritems():
all_celery_task_ids.extend(node_jobs)
self.fail_jobs_if_not_in_celery(waiting_tasks, all_celery_task_ids, celery_task_start_time)
for node, node_jobs in running_tasks.iteritems():
if node in active_queues:
active_tasks = active_queues[node]
@ -445,54 +486,35 @@ class TaskManager():
continue
except Instance.DoesNotExist:
logger.error("Execution node Instance {} not found in database. "
"The node is currently executing jobs {}".format(node, [str(j) for j in node_jobs]))
"The node is currently executing jobs {}".format(node,
[j.log_format for j in node_jobs]))
active_tasks = []
for task in node_jobs:
if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')):
if isinstance(task, WorkflowJob):
continue
if task.modified > celery_task_start_time:
continue
task.status = 'failed'
task.job_explanation += ' '.join((
'Task was marked as running in Tower but was not present in',
'Celery, so it has been marked as failed.',
))
try:
task.save(update_fields=['status', 'job_explanation'])
except DatabaseError:
logger.error("Task {} DB error in marking failed. Job possibly deleted.".format(task.log_format))
continue
awx_tasks._send_notification_templates(task, 'failed')
task.websocket_emit_status('failed')
logger.error("Task {} has no record in celery. Marking as failed".format(task.log_format))
def calculate_capacity_used(self, tasks):
for rampart_group in self.graph:
self.graph[rampart_group]['capacity_used'] = 0
for t in tasks:
# TODO: dock capacity for isolated job management tasks running in queue
for group_actual in InstanceGroup.objects.filter(instances__hostname=t.execution_node).values_list('name'):
if group_actual[0] in self.graph:
self.graph[group_actual[0]]['capacity_used'] += t.task_impact
self.fail_jobs_if_not_in_celery(node_jobs, active_tasks, celery_task_start_time)
def calculate_capacity_consumed(self, tasks):
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
def would_exceed_capacity(self, task, instance_group):
current_capacity = self.graph[instance_group]['capacity_used']
current_capacity = self.graph[instance_group]['consumed_capacity']
capacity_total = self.graph[instance_group]['capacity_total']
if current_capacity == 0:
return False
return (task.task_impact + current_capacity > capacity_total)
def consume_capacity(self, task, instance_group):
self.graph[instance_group]['capacity_used'] += task.task_impact
logger.debug('%s consumed %s capacity units from %s with prior total of %s',
task.log_format, task.task_impact, instance_group,
self.graph[instance_group]['consumed_capacity'])
self.graph[instance_group]['consumed_capacity'] += task.task_impact
def get_remaining_capacity(self, instance_group):
return (self.graph[instance_group]['capacity_total'] - self.graph[instance_group]['capacity_used'])
return (self.graph[instance_group]['capacity_total'] - self.graph[instance_group]['consumed_capacity'])
def process_tasks(self, all_sorted_tasks):
running_tasks = filter(lambda t: t.status in ['waiting', 'running'], all_sorted_tasks)
self.calculate_capacity_used(running_tasks)
self.calculate_capacity_consumed(running_tasks)
self.process_running_tasks(running_tasks)
@ -521,12 +543,13 @@ class TaskManager():
return finished_wfjs
def schedule(self):
logger.debug("Starting Schedule")
with transaction.atomic():
# Lock
with advisory_lock('task_manager_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running scheduler, another task holds lock")
return
logger.debug("Starting Scheduler")
self.cleanup_inconsistent_celery_tasks()
finished_wfjs = self._schedule()

View File

@ -3,7 +3,7 @@
import logging
# Celery
from celery import task
from celery import Task, task
# AWX
from awx.main.scheduler import TaskManager
@ -15,6 +15,12 @@ logger = logging.getLogger('awx.main.scheduler')
# updated model, the call to schedule() may get stale data.
class LogErrorsTask(Task):
def on_failure(self, exc, task_id, args, kwargs, einfo):
logger.exception('Task {} encountered exception.'.format(self.name), exc_info=exc)
super(LogErrorsTask, self).on_failure(exc, task_id, args, kwargs, einfo)
@task
def run_job_launch(job_id):
TaskManager().schedule()
@ -25,7 +31,7 @@ def run_job_complete(job_id):
TaskManager().schedule()
@task
@task(base=LogErrorsTask)
def run_task_manager():
logger.debug("Running Tower task manager.")
TaskManager().schedule()

View File

@ -140,6 +140,9 @@ def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwarg
def sync_superuser_status_to_rbac(instance, **kwargs):
'When the is_superuser flag is changed on a user, reflect that in the membership of the System Admnistrator role'
update_fields = kwargs.get('update_fields', None)
if update_fields and 'is_superuser' not in update_fields:
return
if instance.is_superuser:
Role.singleton(ROLE_SINGLETON_SYSTEM_ADMINISTRATOR).members.add(instance)
else:
@ -147,6 +150,8 @@ def sync_superuser_status_to_rbac(instance, **kwargs):
def create_user_role(instance, **kwargs):
if not kwargs.get('created', True):
return
try:
Role.objects.get(
content_type=ContentType.objects.get_for_model(instance),

View File

@ -748,6 +748,7 @@ def test_falsey_field_data(get, post, organization, admin, field_value):
'credential_type': net.pk,
'organization': organization.id,
'inputs': {
'username': 'joe-user', # username is required
'authorize': field_value
}
}
@ -922,6 +923,25 @@ def test_vault_create_ok(post, organization, admin, version, params):
assert decrypt_field(cred, 'vault_password') == 'some_password'
@pytest.mark.django_db
def test_vault_password_required(post, organization, admin):
vault = CredentialType.defaults['vault']()
vault.save()
response = post(
reverse('api:credential_list', kwargs={'version': 'v2'}),
{
'credential_type': vault.pk,
'organization': organization.id,
'name': 'Best credential ever',
'inputs': {}
},
admin
)
assert response.status_code == 400
assert response.data['inputs'] == {'vault_password': ['required for Vault']}
assert Credential.objects.count() == 0
#
# Net Credentials
#
@ -1426,6 +1446,34 @@ def test_field_removal(put, organization, admin, credentialtype_ssh, version, pa
assert 'password' not in cred.inputs
@pytest.mark.django_db
def test_credential_type_immutable_in_v2(patch, organization, admin, credentialtype_ssh, credentialtype_aws):
cred = Credential(
credential_type=credentialtype_ssh,
name='Best credential ever',
organization=organization,
inputs={
'username': u'jim',
'password': u'pass'
}
)
cred.save()
response = patch(
reverse('api:credential_detail', kwargs={'version': 'v2', 'pk': cred.pk}),
{
'credential_type': credentialtype_aws.pk,
'inputs': {
'username': u'jim',
'password': u'pass'
}
},
admin
)
assert response.status_code == 400
assert 'credential_type' in response.data
@pytest.mark.django_db
@pytest.mark.parametrize('version, params', [
['v1', {

View File

@ -204,22 +204,6 @@ def test_delete_inventory_group(delete, group, alice, role_field, expected_statu
delete(reverse('api:group_detail', kwargs={'pk': group.id}), alice, expect=expected_status_code)
@pytest.mark.django_db
def test_create_inventory_smarthost(post, get, inventory, admin_user, organization):
data = { 'name': 'Host 1', 'description': 'Test Host'}
smart_inventory = Inventory(name='smart',
kind='smart',
organization=organization,
host_filter='inventory_sources__source=ec2')
smart_inventory.save()
post(reverse('api:inventory_hosts_list', kwargs={'pk': smart_inventory.id}), data, admin_user)
resp = get(reverse('api:inventory_hosts_list', kwargs={'pk': smart_inventory.id}), admin_user)
jdata = json.loads(resp.content)
assert getattr(smart_inventory, 'kind') == 'smart'
assert jdata['count'] == 0
@pytest.mark.django_db
def test_create_inventory_smartgroup(post, get, inventory, admin_user, organization):
data = { 'name': 'Group 1', 'description': 'Test Group'}

View File

@ -73,6 +73,7 @@ class TestJobTemplateCopyEdit:
fake_view = FakeView()
fake_view.request = request
fake_view.kwargs = {'pk': '42'}
context = {}
context['view'] = fake_view
context['request'] = request

View File

@ -7,35 +7,21 @@ from awx.main.models import Inventory, Host
@pytest.mark.django_db
def test_empty_inventory(post, get, admin_user, organization, group_factory):
inventory = Inventory(name='basic_inventory',
kind='',
inventory = Inventory(name='basic_inventory',
kind='',
organization=organization)
inventory.save()
resp = get(reverse('api:inventory_script_view', kwargs={'version': 'v2', 'pk': inventory.pk}), admin_user)
jdata = json.loads(resp.content)
assert inventory.hosts.count() == 0
assert jdata == {}
@pytest.mark.django_db
def test_empty_smart_inventory(post, get, admin_user, organization, group_factory):
smart_inventory = Inventory(name='smart',
kind='smart',
organization=organization,
host_filter='enabled=True')
smart_inventory.save()
resp = get(reverse('api:inventory_script_view', kwargs={'version': 'v2', 'pk': smart_inventory.pk}), admin_user)
smartjdata = json.loads(resp.content)
assert smart_inventory.hosts.count() == 0
assert smartjdata == {}
@pytest.mark.django_db
def test_ungrouped_hosts(post, get, admin_user, organization, group_factory):
inventory = Inventory(name='basic_inventory',
kind='',
inventory = Inventory(name='basic_inventory',
kind='',
organization=organization)
inventory.save()
Host.objects.create(name='first_host', inventory=inventory)
@ -44,32 +30,3 @@ def test_ungrouped_hosts(post, get, admin_user, organization, group_factory):
jdata = json.loads(resp.content)
assert inventory.hosts.count() == 2
assert len(jdata['all']['hosts']) == 2
@pytest.mark.django_db
def test_grouped_hosts_smart_inventory(post, get, admin_user, organization, group_factory):
inventory = Inventory(name='basic_inventory',
kind='',
organization=organization)
inventory.save()
groupA = group_factory('test_groupA')
host1 = Host.objects.create(name='first_host', inventory=inventory)
host2 = Host.objects.create(name='second_host', inventory=inventory)
Host.objects.create(name='third_host', inventory=inventory)
groupA.hosts.add(host1)
groupA.hosts.add(host2)
smart_inventory = Inventory(name='smart_inventory',
kind='smart',
organization=organization,
host_filter='enabled=True')
smart_inventory.save()
resp = get(reverse('api:inventory_script_view', kwargs={'version': 'v2', 'pk': inventory.pk}), admin_user)
jdata = json.loads(resp.content)
resp = get(reverse('api:inventory_script_view', kwargs={'version': 'v2', 'pk': smart_inventory.pk}), admin_user)
smartjdata = json.loads(resp.content)
assert getattr(smart_inventory, 'kind') == 'smart'
assert inventory.hosts.count() == 3
assert len(jdata['all']['hosts']) == 1
assert smart_inventory.hosts.count() == 3
assert len(smartjdata['all']['hosts']) == 3

View File

@ -10,6 +10,7 @@ from awx.main.models import (
InventorySource,
InventoryUpdate,
)
from awx.main.utils.filters import SmartFilter
@pytest.mark.django_db
@ -104,40 +105,22 @@ def setup_inventory_groups(inventory, group_factory):
@pytest.mark.django_db
class TestHostManager:
def test_host_filter_change(self, setup_ec2_gce, organization):
smart_inventory = Inventory(name='smart',
kind='smart',
organization=organization,
host_filter='inventory_sources__source=ec2')
smart_inventory.save()
assert len(smart_inventory.hosts.all()) == 2
smart_inventory.host_filter = 'inventory_sources__source=gce'
smart_inventory.save()
assert len(smart_inventory.hosts.all()) == 1
def test_host_filter_not_smart(self, setup_ec2_gce, organization):
smart_inventory = Inventory(name='smart',
organization=organization,
host_filter='inventory_sources__source=ec2')
assert len(smart_inventory.hosts.all()) == 0
def test_host_objects_manager(self, setup_ec2_gce, organization):
smart_inventory = Inventory(kind='smart',
name='smart',
organization=organization,
host_filter='inventory_sources__source=ec2')
smart_inventory.save()
def test_host_distinctness(self, setup_inventory_groups, organization):
"""
two criteria would both yield the same host, check that we only get 1 copy here
"""
assert (
list(SmartFilter.query_from_string('name=single_host or name__startswith=single_')) ==
[Host.objects.get(name='single_host')]
)
hosts = smart_inventory.hosts.all()
assert len(hosts) == 2
assert hosts[0].inventory_sources.first().source == 'ec2'
assert hosts[1].inventory_sources.first().source == 'ec2'
def test_host_objects_no_dupes(self, setup_inventory_groups, organization):
smart_inventory = Inventory(name='smart',
kind='smart',
organization=organization,
host_filter='groups__name=test_groupA or groups__name=test_groupB')
smart_inventory.save()
assert len(smart_inventory.hosts.all()) == 1
# Things we can not easily test due to SQLite backend:
# 2 organizations with host of same name only has 1 entry in smart inventory
# smart inventory in 1 organization does not include host from another
# smart inventory correctly returns hosts in filter in same organization

View File

@ -0,0 +1,34 @@
import pytest
from django.test import TransactionTestCase
from awx.main.models import (
Instance,
InstanceGroup,
)
@pytest.mark.django_db
class TestCapacityMapping(TransactionTestCase):
def sample_cluster(self):
ig_small = InstanceGroup.objects.create(name='ig_small')
ig_large = InstanceGroup.objects.create(name='ig_large')
tower = InstanceGroup.objects.create(name='tower')
i1 = Instance.objects.create(hostname='i1', capacity=200)
i2 = Instance.objects.create(hostname='i2', capacity=200)
i3 = Instance.objects.create(hostname='i3', capacity=200)
ig_small.instances.add(i1)
ig_large.instances.add(i2, i3)
tower.instances.add(i2)
return [tower, ig_large, ig_small]
def test_mapping(self):
self.sample_cluster()
with self.assertNumQueries(2):
inst_map, ig_map = InstanceGroup.objects.capacity_mapping()
assert inst_map['i1'] == set(['ig_small'])
assert inst_map['i2'] == set(['ig_large', 'tower'])
assert ig_map['ig_small'] == set(['ig_small'])
assert ig_map['ig_large'] == set(['ig_large', 'tower'])
assert ig_map['tower'] == set(['ig_large', 'tower'])

View File

@ -9,6 +9,7 @@ from awx.main.scheduler import TaskManager
from awx.main.models import (
Job,
Instance,
WorkflowJob,
)
@ -230,27 +231,29 @@ class TestReaper():
Instance.objects.create(hostname='host4_offline', capacity=0)
j1 = Job.objects.create(status='pending', execution_node='host1')
j2 = Job.objects.create(status='waiting', celery_task_id='considered_j2', execution_node='host1')
j3 = Job.objects.create(status='waiting', celery_task_id='considered_j3', execution_node='host1')
j2 = Job.objects.create(status='waiting', celery_task_id='considered_j2')
j3 = Job.objects.create(status='waiting', celery_task_id='considered_j3')
j3.modified = now - timedelta(seconds=60)
j3.save(update_fields=['modified'])
j4 = Job.objects.create(status='running', celery_task_id='considered_j4', execution_node='host1')
j5 = Job.objects.create(status='waiting', celery_task_id='reapable_j5', execution_node='host1')
j5 = Job.objects.create(status='waiting', celery_task_id='reapable_j5')
j5.modified = now - timedelta(seconds=60)
j5.save(update_fields=['modified'])
j6 = Job.objects.create(status='waiting', celery_task_id='considered_j6', execution_node='host2')
j6 = Job.objects.create(status='waiting', celery_task_id='considered_j6')
j6.modified = now - timedelta(seconds=60)
j6.save(update_fields=['modified'])
j7 = Job.objects.create(status='running', celery_task_id='considered_j7', execution_node='host2')
j8 = Job.objects.create(status='running', celery_task_id='reapable_j7', execution_node='host2')
j9 = Job.objects.create(status='waiting', celery_task_id='host3_j8', execution_node='host3_split')
j9 = Job.objects.create(status='waiting', celery_task_id='reapable_j8')
j9.modified = now - timedelta(seconds=60)
j9.save(update_fields=['modified'])
j10 = Job.objects.create(status='running', execution_node='host3_split')
j10 = Job.objects.create(status='running', celery_task_id='host3_j10', execution_node='host3_split')
j11 = Job.objects.create(status='running', celery_task_id='host4_j11', execution_node='host4_offline')
js = [j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11]
j12 = WorkflowJob.objects.create(status='running', celery_task_id='workflow_job', execution_node='host1')
js = [j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12]
for j in js:
j.save = mocker.Mock(wraps=j.save)
j.websocket_emit_status = mocker.Mock()
@ -263,12 +266,16 @@ class TestReaper():
@pytest.fixture
def running_tasks(self, all_jobs):
return {
'host1': all_jobs[2:5],
'host2': all_jobs[5:8],
'host3_split': all_jobs[8:10],
'host1': [all_jobs[3]],
'host2': [all_jobs[7], all_jobs[8]],
'host3_split': [all_jobs[9]],
'host4_offline': [all_jobs[10]],
}
@pytest.fixture
def waiting_tasks(self, all_jobs):
return [all_jobs[2], all_jobs[4], all_jobs[5], all_jobs[8]]
@pytest.fixture
def reapable_jobs(self, all_jobs):
return [all_jobs[4], all_jobs[7], all_jobs[10]]
@ -287,10 +294,10 @@ class TestReaper():
@pytest.mark.django_db
@mock.patch('awx.main.tasks._send_notification_templates')
@mock.patch.object(TaskManager, 'get_active_tasks', lambda self: ([], []))
def test_cleanup_inconsistent_task(self, notify, active_tasks, considered_jobs, reapable_jobs, running_tasks, mocker):
def test_cleanup_inconsistent_task(self, notify, active_tasks, considered_jobs, reapable_jobs, running_tasks, waiting_tasks, mocker):
tm = TaskManager()
tm.get_running_tasks = mocker.Mock(return_value=running_tasks)
tm.get_running_tasks = mocker.Mock(return_value=(running_tasks, waiting_tasks))
tm.get_active_tasks = mocker.Mock(return_value=active_tasks)
tm.cleanup_inconsistent_celery_tasks()
@ -299,7 +306,7 @@ class TestReaper():
if j not in reapable_jobs:
j.save.assert_not_called()
assert notify.call_count == 3
assert notify.call_count == 4
notify.assert_has_calls([mock.call(j, 'failed') for j in reapable_jobs], any_order=True)
for j in reapable_jobs:
@ -314,20 +321,23 @@ class TestReaper():
tm = TaskManager()
# Ensure the query grabs the expected jobs
execution_nodes_jobs = tm.get_running_tasks()
execution_nodes_jobs, waiting_jobs = tm.get_running_tasks()
assert 'host1' in execution_nodes_jobs
assert 'host2' in execution_nodes_jobs
assert 'host3_split' in execution_nodes_jobs
assert all_jobs[2] in execution_nodes_jobs['host1']
assert all_jobs[3] in execution_nodes_jobs['host1']
assert all_jobs[4] in execution_nodes_jobs['host1']
assert all_jobs[5] in execution_nodes_jobs['host2']
assert all_jobs[6] in execution_nodes_jobs['host2']
assert all_jobs[7] in execution_nodes_jobs['host2']
assert all_jobs[8] in execution_nodes_jobs['host3_split']
assert all_jobs[9] in execution_nodes_jobs['host3_split']
assert all_jobs[10] in execution_nodes_jobs['host4_offline']
assert all_jobs[11] not in execution_nodes_jobs['host1']
assert all_jobs[2] in waiting_jobs
assert all_jobs[4] in waiting_jobs
assert all_jobs[5] in waiting_jobs
assert all_jobs[8] in waiting_jobs

View File

@ -226,7 +226,7 @@ def test_credential_creation_validation_failure(organization_factory, inputs):
[EXAMPLE_PRIVATE_KEY.replace('=', '\u003d'), None, True], # automatically fix JSON-encoded GCE keys
])
def test_ssh_key_data_validation(organization, kind, ssh_key_data, ssh_key_unlock, valid):
inputs = {}
inputs = {'username': 'joe-user'}
if ssh_key_data:
inputs['ssh_key_data'] = ssh_key_data
if ssh_key_unlock:

View File

@ -50,7 +50,7 @@ def test_ig_associability(organization, default_instance_group, admin, system_au
organization.instance_groups.add(default_instance_group)
assert admin_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
assert oadmin_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
assert not oadmin_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
assert not auditor_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
assert not omember_access.can_unattach(organization, default_instance_group, 'instance_groups', None)

View File

@ -174,3 +174,17 @@ def test_inventory_source_org_admin_schedule_access(org_admin, inventory_source)
assert access.get_queryset()
assert access.can_read(schedule)
assert access.can_change(schedule, {'rrule': 'DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2'})
@pytest.fixture
def smart_inventory(organization):
return organization.inventories.create(name="smart-inv", kind="smart")
@pytest.mark.django_db
class TestSmartInventory:
def test_host_filter_edit(self, smart_inventory, rando, org_admin):
assert InventoryAccess(org_admin).can_admin(smart_inventory, {'host_filter': 'search=foo'})
smart_inventory.admin_role.members.add(rando)
assert not InventoryAccess(rando).can_admin(smart_inventory, {'host_filter': 'search=foo'})

View File

@ -1,4 +1,4 @@
import mock
import pytest
@ -39,6 +39,7 @@ def get_summary_fields_assert():
def get_summary_fields_mock_and_run():
def fn(serializer_class, model_obj):
serializer = serializer_class()
serializer.context['view'] = mock.Mock(kwargs={})
return serializer.get_summary_fields(model_obj)
return fn

View File

@ -108,6 +108,7 @@ class TestJobTemplateSerializerGetSummaryFields():
request.user = user
view = JobTemplateDetail()
view.request = request
view.kwargs = {}
serializer.context['view'] = view
with mocker.patch("awx.api.serializers.role_summary_fields_generator", return_value='Can eat pie'):

View File

@ -220,8 +220,8 @@ class TestHostInsights():
class TestInventoryHostsList(object):
def test_host_list_smart_inventory(self, mocker):
Inventory = namedtuple('Inventory', ['kind', 'host_filter', 'hosts'])
obj = Inventory(kind='smart', host_filter='localhost', hosts=HostManager())
Inventory = namedtuple('Inventory', ['kind', 'host_filter', 'hosts', 'organization_id'])
obj = Inventory(kind='smart', host_filter='localhost', hosts=HostManager(), organization_id=None)
obj.hosts.instance = obj
with mock.patch.object(InventoryHostsList, 'get_parent_object', return_value=obj):

View File

@ -0,0 +1,135 @@
import pytest
from awx.main.models import InstanceGroup
class FakeObject(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class Job(FakeObject):
task_impact = 43
def log_format(self):
return 'job 382 (fake)'
@pytest.fixture
def sample_cluster():
def stand_up_cluster():
class Instances(FakeObject):
def add(self, *args):
for instance in args:
self.obj.instance_list.append(instance)
def all(self):
return self.obj.instance_list
class InstanceGroup(FakeObject):
def __init__(self, **kwargs):
super(InstanceGroup, self).__init__(**kwargs)
self.instance_list = []
@property
def instances(self):
mgr = Instances(obj=self)
return mgr
class Instance(FakeObject):
pass
ig_small = InstanceGroup(name='ig_small')
ig_large = InstanceGroup(name='ig_large')
tower = InstanceGroup(name='tower')
i1 = Instance(hostname='i1', capacity=200)
i2 = Instance(hostname='i2', capacity=200)
i3 = Instance(hostname='i3', capacity=200)
ig_small.instances.add(i1)
ig_large.instances.add(i2, i3)
tower.instances.add(i2)
return [tower, ig_large, ig_small]
return stand_up_cluster
def test_committed_capacity(sample_cluster):
tower, ig_large, ig_small = sample_cluster()
tasks = [
Job(status='waiting', instance_group=tower),
Job(status='waiting', instance_group=ig_large),
Job(status='waiting', instance_group=ig_small)
]
capacities = InstanceGroup.objects.capacity_values(
qs=[tower, ig_large, ig_small], tasks=tasks, breakdown=True
)
# Jobs submitted to either tower or ig_larg must count toward both
assert capacities['tower']['committed_capacity'] == 43 * 2
assert capacities['ig_large']['committed_capacity'] == 43 * 2
assert capacities['ig_small']['committed_capacity'] == 43
def test_running_capacity(sample_cluster):
tower, ig_large, ig_small = sample_cluster()
tasks = [
Job(status='running', execution_node='i1'),
Job(status='running', execution_node='i2'),
Job(status='running', execution_node='i3')
]
capacities = InstanceGroup.objects.capacity_values(
qs=[tower, ig_large, ig_small], tasks=tasks, breakdown=True
)
# Tower is only given 1 instance
assert capacities['tower']['running_capacity'] == 43
# Large IG has 2 instances
assert capacities['ig_large']['running_capacity'] == 43 * 2
assert capacities['ig_small']['running_capacity'] == 43
def test_offline_node_running(sample_cluster):
"""
Assure that algorithm doesn't explode if a job is marked running
in an offline node
"""
tower, ig_large, ig_small = sample_cluster()
ig_small.instance_list[0].capacity = 0
tasks = [Job(status='running', execution_node='i1', instance_group=ig_small)]
capacities = InstanceGroup.objects.capacity_values(
qs=[tower, ig_large, ig_small], tasks=tasks)
assert capacities['ig_small']['consumed_capacity'] == 43
def test_offline_node_waiting(sample_cluster):
"""
Same but for a waiting job
"""
tower, ig_large, ig_small = sample_cluster()
ig_small.instance_list[0].capacity = 0
tasks = [Job(status='waiting', instance_group=ig_small)]
capacities = InstanceGroup.objects.capacity_values(
qs=[tower, ig_large, ig_small], tasks=tasks)
assert capacities['ig_small']['consumed_capacity'] == 43
def test_RBAC_reduced_filter(sample_cluster):
"""
User can see jobs that are running in `ig_small` and `ig_large` IGs,
but user does not have permission to see those actual instance groups.
Verify that this does not blow everything up.
"""
tower, ig_large, ig_small = sample_cluster()
tasks = [
Job(status='waiting', instance_group=tower),
Job(status='waiting', instance_group=ig_large),
Job(status='waiting', instance_group=ig_small)
]
capacities = InstanceGroup.objects.capacity_values(
qs=[tower], tasks=tasks, breakdown=True
)
# Cross-links between groups not visible to current user,
# so a naieve accounting of capacities is returned instead
assert capacities['tower']['committed_capacity'] == 43

View File

@ -19,8 +19,8 @@ from django.core.cache import cache
class TestCleanupInconsistentCeleryTasks():
@mock.patch.object(cache, 'get', return_value=None)
@mock.patch.object(TaskManager, 'get_active_tasks', return_value=([], {}))
@mock.patch.object(TaskManager, 'get_running_tasks', return_value={'host1': [Job(id=2), Job(id=3),]})
@mock.patch.object(InstanceGroup.objects, 'all', return_value=[])
@mock.patch.object(TaskManager, 'get_running_tasks', return_value=({'host1': [Job(id=2), Job(id=3),]}, []))
@mock.patch.object(InstanceGroup.objects, 'prefetch_related', return_value=[])
@mock.patch.object(Instance.objects, 'get', side_effect=Instance.DoesNotExist)
@mock.patch('awx.main.scheduler.logger')
def test_instance_does_not_exist(self, logger_mock, *args):
@ -31,19 +31,19 @@ class TestCleanupInconsistentCeleryTasks():
assert "mocked" in str(excinfo.value)
logger_mock.error.assert_called_once_with("Execution node Instance host1 not found in database. "
"The node is currently executing jobs ['None-2-new', "
"'None-3-new']")
"The node is currently executing jobs ['job 2 (new)', "
"'job 3 (new)']")
@mock.patch.object(cache, 'get', return_value=None)
@mock.patch.object(TaskManager, 'get_active_tasks', return_value=([], {'host1': []}))
@mock.patch.object(InstanceGroup.objects, 'all', return_value=[])
@mock.patch.object(InstanceGroup.objects, 'prefetch_related', return_value=[])
@mock.patch.object(TaskManager, 'get_running_tasks')
@mock.patch('awx.main.scheduler.logger')
def test_save_failed(self, logger_mock, get_running_tasks, *args):
logger_mock.error = mock.MagicMock()
job = Job(id=2, modified=tz_now(), status='running', celery_task_id='blah', execution_node='host1')
job.websocket_emit_status = mock.MagicMock()
get_running_tasks.return_value = {'host1': [job]}
get_running_tasks.return_value = ({'host1': [job]}, [])
tm = TaskManager()
with mock.patch.object(job, 'save', side_effect=DatabaseError):

View File

@ -74,7 +74,7 @@ def rpm_package_list():
def deb_package_list():
import apt
apt_cache = apt.Cache()
installed_packages = []
installed_packages = {}
apt_installed_packages = [pk for pk in apt_cache.keys() if apt_cache[pk].is_installed]
for package in apt_installed_packages:
ac_pkg = apt_cache[package].installed

View File

@ -29,9 +29,9 @@ class SocialAuthCallbackURL(object):
SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT = _('''\
Mapping to organization admins/users from social auth accounts. This setting
controls which users are placed into which Tower organizations based on
their username and email address. Configuration details are available in
Tower documentation.\
controls which users are placed into which Tower organizations based on their
username and email address. Configuration details are available in the Ansible
Tower documentation.'\
''')
# FIXME: /regex/gim (flags)
@ -152,11 +152,9 @@ register(
default='',
validators=[validate_ldap_bind_dn],
label=_('LDAP Bind DN'),
help_text=_('DN (Distinguished Name) of user to bind for all search queries. '
'Normally in the format "CN=Some User,OU=Users,DC=example,DC=com" '
'but may also be specified as "DOMAIN\username" for Active Directory. '
'This is the system user account we will use to login to query LDAP '
'for other user information.'),
help_text=_('DN (Distinguished Name) of user to bind for all search queries. This'
' is the system user account we will use to login to query LDAP for other'
' user information. Refer to the Ansible Tower documentation for example syntax.'),
category=_('LDAP'),
category_slug='ldap',
feature_required='ldap',
@ -213,7 +211,7 @@ register(
label=_('LDAP User Search'),
help_text=_('LDAP search query to find users. Any user that matches the given '
'pattern will be able to login to Tower. The user should also be '
'mapped into an Tower organization (as defined in the '
'mapped into a Tower organization (as defined in the '
'AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries '
'need to be supported use of "LDAPUnion" is possible. See '
'Tower documentation for details.'),
@ -235,7 +233,7 @@ register(
default=None,
label=_('LDAP User DN Template'),
help_text=_('Alternative to user search, if user DNs are all of the same '
'format. This approach will be more efficient for user lookups than '
'format. This approach is more efficient for user lookups than '
'searching if it is usable in your organizational environment. If '
'this setting has a value it will be used instead of '
'AUTH_LDAP_USER_SEARCH.'),
@ -250,11 +248,10 @@ register(
field_class=fields.LDAPUserAttrMapField,
default={},
label=_('LDAP User Attribute Map'),
help_text=_('Mapping of LDAP user schema to Tower API user attributes (key is '
'user attribute name, value is LDAP attribute name). The default '
'setting is valid for ActiveDirectory but users with other LDAP '
'configurations may need to change the values (not the keys) of '
'the dictionary/hash-table.'),
help_text=_('Mapping of LDAP user schema to Tower API user attributes. The default'
' setting is valid for ActiveDirectory but users with other LDAP'
' configurations may need to change the values. Refer to the Ansible'
' Tower documentation for additonal details.'),
category=_('LDAP'),
category_slug='ldap',
placeholder=collections.OrderedDict([
@ -270,10 +267,9 @@ register(
field_class=fields.LDAPSearchField,
default=[],
label=_('LDAP Group Search'),
help_text=_('Users are mapped to organizations based on their '
'membership in LDAP groups. This setting defines the LDAP search '
'query to find groups. Note that this, unlike the user search '
'above, does not support LDAPSearchUnion.'),
help_text=_('Users are mapped to organizations based on their membership in LDAP'
' groups. This setting defines the LDAP search query to find groups. '
'Unlike the user search, group search does not support LDAPSearchUnion.'),
category=_('LDAP'),
category_slug='ldap',
placeholder=(
@ -335,12 +331,9 @@ register(
field_class=fields.LDAPUserFlagsField,
default={},
label=_('LDAP User Flags By Group'),
help_text=_('User profile flags updated from group membership (key is user '
'attribute name, value is group DN). These are boolean fields '
'that are matched based on whether the user is a member of the '
'given group. So far only is_superuser and is_system_auditor '
'are settable via this method. This flag is set both true and '
'false at login time based on current LDAP settings.'),
help_text=_('Retrieve users from a given group. At this time, superuser and system'
' auditors are the only groups supported. Refer to the Ansible Tower'
' documentation for more detail.'),
category=_('LDAP'),
category_slug='ldap',
placeholder=collections.OrderedDict([
@ -355,9 +348,9 @@ register(
default={},
label=_('LDAP Organization Map'),
help_text=_('Mapping between organization admins/users and LDAP groups. This '
'controls what users are placed into what Tower organizations '
'controls which users are placed into which Tower organizations '
'relative to their LDAP group memberships. Configuration details '
'are available in Tower documentation.'),
'are available in the Ansible Tower documentation.'),
category=_('LDAP'),
category_slug='ldap',
placeholder=collections.OrderedDict([
@ -382,8 +375,8 @@ register(
field_class=fields.LDAPTeamMapField,
default={},
label=_('LDAP Team Map'),
help_text=_('Mapping between team members (users) and LDAP groups.'
'Configuration details are available in Tower documentation.'),
help_text=_('Mapping between team members (users) and LDAP groups. Configuration'
' details are available in the Ansible Tower documentation.'),
category=_('LDAP'),
category_slug='ldap',
placeholder=collections.OrderedDict([
@ -411,7 +404,7 @@ register(
allow_blank=True,
default='',
label=_('RADIUS Server'),
help_text=_('Hostname/IP of RADIUS server. RADIUS authentication will be '
help_text=_('Hostname/IP of RADIUS server. RADIUS authentication is '
'disabled if this setting is empty.'),
category=_('RADIUS'),
category_slug='radius',
@ -522,10 +515,9 @@ register(
read_only=True,
default=SocialAuthCallbackURL('google-oauth2'),
label=_('Google OAuth2 Callback URL'),
help_text=_('Create a project at https://console.developers.google.com/ to '
'obtain an OAuth2 key and secret for a web application. Ensure '
'that the Google+ API is enabled. Provide this URL as the '
'callback URL for your application.'),
help_text=_('Provide this URL as the callback URL for your application as part '
'of your registration process. Refer to the Ansible Tower '
'documentation for more detail.'),
category=_('Google OAuth2'),
category_slug='google-oauth2',
depends_on=['TOWER_URL_BASE'],
@ -537,7 +529,7 @@ register(
allow_blank=True,
default='',
label=_('Google OAuth2 Key'),
help_text=_('The OAuth2 key from your web application at https://console.developers.google.com/.'),
help_text=_('The OAuth2 key from your web application.'),
category=_('Google OAuth2'),
category_slug='google-oauth2',
placeholder='528620852399-gm2dt4hrl2tsj67fqamk09k1e0ad6gd8.apps.googleusercontent.com',
@ -549,7 +541,7 @@ register(
allow_blank=True,
default='',
label=_('Google OAuth2 Secret'),
help_text=_('The OAuth2 secret from your web application at https://console.developers.google.com/.'),
help_text=_('The OAuth2 secret from your web application.'),
category=_('Google OAuth2'),
category_slug='google-oauth2',
placeholder='q2fMVCmEregbg-drvebPp8OW',
@ -573,10 +565,10 @@ register(
field_class=fields.DictField,
default={},
label=_('Google OAuth2 Extra Arguments'),
help_text=_('Extra arguments for Google OAuth2 login. When only allowing a '
'single domain to authenticate, set to `{"hd": "yourdomain.com"}` '
'and Google will not display any other accounts even if the user '
'is logged in with multiple Google accounts.'),
help_text=_('Extra arguments for Google OAuth2 login. You can restrict it to'
' only allow a single domain to authenticate, even if the user is'
' logged in with multple Google accounts. Refer to the Ansible Tower'
' documentation for more detail.'),
category=_('Google OAuth2'),
category_slug='google-oauth2',
placeholder={'hd': 'example.com'},
@ -616,10 +608,9 @@ register(
read_only=True,
default=SocialAuthCallbackURL('github'),
label=_('GitHub OAuth2 Callback URL'),
help_text=_('Create a developer application at '
'https://github.com/settings/developers to obtain an OAuth2 '
'key (Client ID) and secret (Client Secret). Provide this URL '
'as the callback URL for your application.'),
help_text=_('Provide this URL as the callback URL for your application as part '
'of your registration process. Refer to the Ansible Tower '
'documentation for more detail.'),
category=_('GitHub OAuth2'),
category_slug='github',
depends_on=['TOWER_URL_BASE'],
@ -682,10 +673,9 @@ register(
read_only=True,
default=SocialAuthCallbackURL('github-org'),
label=_('GitHub Organization OAuth2 Callback URL'),
help_text=_('Create an organization-owned application at '
'https://github.com/organizations/<yourorg>/settings/applications '
'and obtain an OAuth2 key (Client ID) and secret (Client Secret). '
'Provide this URL as the callback URL for your application.'),
help_text=_('Provide this URL as the callback URL for your application as part '
'of your registration process. Refer to the Ansible Tower '
'documentation for more detail.'),
category=_('GitHub Organization OAuth2'),
category_slug='github-org',
depends_on=['TOWER_URL_BASE'],
@ -838,10 +828,9 @@ register(
read_only=True,
default=SocialAuthCallbackURL('azuread-oauth2'),
label=_('Azure AD OAuth2 Callback URL'),
help_text=_('Register an Azure AD application as described by '
'https://msdn.microsoft.com/en-us/library/azure/dn132599.aspx '
'and obtain an OAuth2 key (Client ID) and secret (Client Secret). '
'Provide this URL as the callback URL for your application.'),
help_text=_('Provide this URL as the callback URL for your application as part'
' of your registration process. Refer to the Ansible Tower'
' documentation for more detail. '),
category=_('Azure AD OAuth2'),
category_slug='azuread-oauth2',
depends_on=['TOWER_URL_BASE'],
@ -984,7 +973,8 @@ register(
field_class=fields.SAMLOrgInfoField,
required=True,
label=_('SAML Service Provider Organization Info'),
help_text=_('Configure this setting with information about your app.'),
help_text=_('Provide the URL, display name, and the name of your app. Refer to'
' the Ansible Tower documentation for example syntax.'),
category=_('SAML'),
category_slug='saml',
placeholder=collections.OrderedDict([
@ -1003,7 +993,9 @@ register(
allow_blank=True,
required=True,
label=_('SAML Service Provider Technical Contact'),
help_text=_('Configure this setting with your contact information.'),
help_text=_('Provide the name and email address of the technical contact for'
' your service provider. Refer to the Ansible Tower documentation'
' for example syntax.'),
category=_('SAML'),
category_slug='saml',
placeholder=collections.OrderedDict([
@ -1019,7 +1011,9 @@ register(
allow_blank=True,
required=True,
label=_('SAML Service Provider Support Contact'),
help_text=_('Configure this setting with your contact information.'),
help_text=_('Provide the name and email address of the support contact for your'
' service provider. Refer to the Ansible Tower documentation for'
' example syntax.'),
category=_('SAML'),
category_slug='saml',
placeholder=collections.OrderedDict([
@ -1034,12 +1028,11 @@ register(
field_class=fields.SAMLEnabledIdPsField,
default={},
label=_('SAML Enabled Identity Providers'),
help_text=_('Configure the Entity ID, SSO URL and certificate for each '
'identity provider (IdP) in use. Multiple SAML IdPs are supported. '
'Some IdPs may provide user data using attribute names that differ '
'from the default OIDs '
'(https://github.com/omab/python-social-auth/blob/master/social/backends/saml.py#L16). '
'Attribute names may be overridden for each IdP.'),
help_text=_('Configure the Entity ID, SSO URL and certificate for each identity'
' provider (IdP) in use. Multiple SAML IdPs are supported. Some IdPs'
' may provide user data using attribute names that differ from the'
' default OIDs. Attribute names may be overridden for each IdP. Refer'
' to the Ansible documentation for additional details and syntax.'),
category=_('SAML'),
category_slug='saml',
placeholder=collections.OrderedDict([

View File

@ -53,7 +53,7 @@ class SocialAuthMiddleware(SocialAuthExceptionMiddleware):
if not auth_token and request.user and request.user.is_authenticated():
logout(request)
elif auth_token and request.user != auth_token.user:
elif auth_token and request.user.is_anonymous is False and request.user != auth_token.user:
logout(request)
auth_token.user.backend = ''
login(request, auth_token.user)

View File

@ -43,8 +43,8 @@ function AddCredentialsController (models, $state, strings) {
};
vm.form.save = data => {
data.user = me.getSelf().id;
data.user = me.get('id');
return credential.request('post', data);
};

View File

@ -45,6 +45,14 @@ function EditCredentialsController (models, $state, $scope, strings) {
vm.form.disabled = !isEditable;
}
let isOrgAdmin = _.some(me.get('related.admin_of_organizations.results'), (org) => {return org.id === organization.get('id');});
let isSuperuser = me.get('is_superuser');
let isCurrentAuthor = Boolean(credential.get('summary_fields.created_by.id') === me.get('id'));
vm.form.organization._disabled = true;
if(isSuperuser || isOrgAdmin || (credential.get('organization') === null && isCurrentAuthor)){
vm.form.organization._disabled = false;
}
vm.form.organization._resource = 'organization';
vm.form.organization._model = organization;
vm.form.organization._route = 'credentials.edit.organization';
@ -75,12 +83,12 @@ function EditCredentialsController (models, $state, $scope, strings) {
};
/**
* If a credential's `credential_type` is changed while editing, the inputs associated with
* the old type need to be cleared before saving the inputs associated with the new type.
* If a credential's `credential_type` is changed while editing, the inputs associated with
* the old type need to be cleared before saving the inputs associated with the new type.
* Otherwise inputs are merged together making the request invalid.
*/
vm.form.save = data => {
data.user = me.getSelf().id;
data.user = me.get('id');
credential.unset('inputs');
return credential.request('put', data);

View File

@ -7,7 +7,9 @@ function CredentialsResolve ($q, $stateParams, Me, Credential, CredentialType, O
let id = $stateParams.credential_id;
let promises = {
me: new Me('get')
me: new Me('get').then((me) => {
return me.extend('get', 'admin_of_organizations');
})
};
if (!id) {

View File

@ -149,10 +149,10 @@ function LegacyCredentialsService (pathService) {
'QuerySet',
'$stateParams',
'GetBasePath',
(list, qs, $stateParams, GetBasePath) => {
let path = GetBasePath(list.basePath) || GetBasePath(list.name);
'resourceData',
(list, qs, $stateParams, GetBasePath, resourceData) => {
let path = resourceData.data.organization ? GetBasePath('organizations') + `${resourceData.data.organization}/users` : ((list.basePath) || GetBasePath(list.name));
return qs.search(path, $stateParams.user_search);
}
],
teamsDataset: [
@ -160,9 +160,19 @@ function LegacyCredentialsService (pathService) {
'QuerySet',
'$stateParams',
'GetBasePath',
(list, qs, $stateParams, GetBasePath) => {
'resourceData',
(list, qs, $stateParams, GetBasePath, resourceData) => {
let path = GetBasePath(list.basePath) || GetBasePath(list.name);
return qs.search(path, $stateParams.team_search);
if(!resourceData.data.organization) {
return null;
}
else {
$stateParams[`${list.iterator}_search`].organization = resourceData.data.organization;
return qs.search(path, $stateParams.team_search);
}
}
],
resourceData: ['CredentialModel', '$stateParams', (Credential, $stateParams) => {
@ -198,7 +208,8 @@ function LegacyCredentialsService (pathService) {
teams-dataset='$resolve.teamsDataset'
selected='allSelected'
resource-data='$resolve.resourceData'
title='Add Users / Teams'>
without-team-permissions='{{$resolve.resourceData.data.organization ? null : true}}'
title='{{$resolve.resourceData.data.organization ? "Add Users / Teams" : "Add Users"}}'>
</add-rbac-resource>`
}
},

View File

@ -342,12 +342,6 @@ textarea.allowresize {
}
}
.prepend-asterisk:before {
content: "\002A\00A0";
color: @red;
margin-right: -5px;
}
.subtitle {
font-size: 16px;
}

View File

@ -404,7 +404,7 @@
.select2-dropdown {
border:1px solid @field-border;
z-index: 1030;
}
.select2-container--open .select2-dropdown--below {
@ -700,6 +700,10 @@ input[type='radio']:checked:before {
width: 30px;
}
.Form-requiredAsterisk {
color: @red;
}
@media only screen and (max-width: 650px) {
.Form-formGroup {
flex: 1 0 auto;

View File

@ -346,6 +346,8 @@ table, tbody {
background-color: @default-no-items-bord;
color: @list-no-items-txt;
text-transform: uppercase;
text-align: center;
padding: 10px;
}
.modal-body > .List-noItems {

View File

@ -1,11 +1,11 @@
<label class="at-InputLabel">
<span ng-if="state.required" class="at-InputLabel-required">*</span>
<span class="at-InputLabel-name">{{::state.label}}</span>
<span class="at-InputLabel-name" >{{::state.label | translate}}</span>
<at-popover state="state"></at-popover>
<span ng-if="state._displayHint" class="at-InputLabel-hint">{{::state._hint}}</span>
<span ng-if="state._displayHint" class="at-InputLabel-hint" translate>{{::state._hint}}</span>
<div ng-if="state._displayPromptOnLaunch" class="at-InputLabel-checkbox pull-right">
<label class="at-InputLabel-checkboxLabel">
<input type="checkbox"
<input type="checkbox"
ng-model="state._promptOnLaunch"
ng-change="vm.togglePromptOnLaunch()" />
<p>{{:: vm.strings.get('label.PROMPT_ON_LAUNCH') }}</p>

View File

@ -9,8 +9,8 @@
<i ng-if="popover.position === 'top'" class="fa fa-caret-down fa-2x"></i>
</div>
<div class="at-Popover-content">
<h4 ng-if="popover.title" class="at-Popover-title">{{ popover.title }}</h4>
<p ng-if="popover.text" class="at-Popover-text">{{ popover.text }}</p>
<h4 ng-if="popover.title" class="at-Popover-title">{{ popover.title | translate }}</h4>
<p ng-if="popover.text" class="at-Popover-text">{{ popover.text | translate}}</p>
</div>
</div>
</div>

View File

@ -277,9 +277,35 @@ function has (method, keys) {
return value !== undefined && value !== null;
}
function extend (method, related) {
if (!related) {
related = method
method = 'GET'
} else {
method = method.toUpperCase()
}
if (this.has(method, `related.${related}`)) {
let id = this.get('id')
let req = {
method,
url: this.get(`related.${related}`)
};
return $http(req)
.then(({data}) => {
this.set(method, `related.${related}`, data);
return this;
})
}
return Promise.reject(new Error(`No related property, ${related}, exists`));
}
function normalizePath (resource) {
let version = '/api/v2/';
return `${version}${resource}/`;
}
@ -383,6 +409,7 @@ function BaseModel (path, settings) {
this.search = search;
this.set = set;
this.unset = unset;
this.extend = extend;
this.http = {
get: httpGet.bind(this),

View File

@ -1,16 +1,19 @@
let BaseModel;
function getSelf () {
return this.get('results[0]');
}
function MeModel (method, resource, graft) {
BaseModel.call(this, 'me');
this.Constructor = MeModel;
this.getSelf = getSelf.bind(this);
return this.create(method, resource, graft);
return this.create(method, resource, graft)
.then(() => {
if (this.has('results')) {
_.merge(this.model.GET, this.get('results[0]'));
this.unset('results');
}
return this;
});
}
function MeModelLoader (_BaseModel_) {

View File

@ -13,4 +13,3 @@ angular
.service('CredentialTypeModel', CredentialType)
.service('MeModel', Me)
.service('OrganizationModel', Organization);

View File

@ -80,8 +80,8 @@ export default ['$rootScope', '$scope', 'GetBasePath', 'Rest', '$q', 'Wait', 'Pr
};
scope.removeObject = function(obj){
_.remove(scope.allSelected, {id: obj.id});
obj.isSelected = false;
let resourceType = scope.currentTab();
delete scope.allSelected[resourceType][obj.id];
};
scope.toggleKeyPane = function() {

View File

@ -41,7 +41,6 @@
<div class="Form-tab"
ng-click="selectTab('workflow_templates')"
ng-class="{'is-selected': tab.workflow_templates}"
ng-hide="resolve.workflowTemplatesDataset.status === 402"
translate>
Workflow Templates
</div>
@ -74,7 +73,7 @@
<div id="AddPermissions-jobTemplates" class="AddPermissions-list" ng-show="tab.job_templates">
<rbac-multiselect-list view="JobTemplates" all-selected="allSelected" dataset="resolve.jobTemplatesDataset"></rbac-multiselect-list>
</div>
<div ng-if="resolve.workflowTemplatesDataset.status !== 402" id="AddPermissions-workflowTemplates" class="AddPermissions-list" ng-show="tab.workflow_templates">
<div id="AddPermissions-workflowTemplates" class="AddPermissions-list" ng-show="tab.workflow_templates">
<rbac-multiselect-list view="WorkflowTemplates" all-selected="allSelected" dataset="resolve.workflowTemplatesDataset"></rbac-multiselect-list>
</div>
<div id="AddPermissions-projects" class="AddPermissions-list" ng-show="tab.projects">
@ -99,11 +98,6 @@
2
</span>
<translate>Please assign roles to the selected resources</translate>
<div class="AddPermissions-keyToggle btn"
ng-class="{'is-active': showKeyPane}"
ng-click="toggleKeyPane()" translate>
Key
</div>
</div>
<div class="Form-tabHolder">
<div class="Form-tab"
@ -147,10 +141,25 @@
Organizations
</div>
</div>
<div class="AddPermissions-keyPane"
ng-show="showKeyPane">
<div class="AddPermissions-keyRow"
ng-repeat="key in keys[currentTab()]">
<div class="AddPermissions-roleSet">
<!-- role drop-downs -->
<div class="AddPermissions-roleSet-dropdown" ng-repeat="(type, roleSet) in keys"
ng-show="tab[type] && showSection2Tab(type)">
<select id="{{type}}-role-select" class="form-control"
ng-model="roleSelection[type]"
ng-options="key as value.name for (key , value) in roleSet">
<option value="" selected hidden />
</select>
</div>
<div class="AddPermissions-keyToggle btn"
ng-class="{'is-active': showKeyPane}"
ng-click="toggleKeyPane()" translate>
Key
</div>
</div>
<div class="AddPermissions-keyPane" ng-show="showKeyPane">
<div class="AddPermissions-keyRow" ng-repeat="key in keys[currentTab()]">
<div class="AddPermissions-keyName">
{{ key.name }}
</div>
@ -159,29 +168,17 @@
</div>
</div>
</div>
</div>
<!-- role drop-downs -->
<div ng-repeat="(type, roleSet) in keys"
ng-show="tab[type] && showSection2Tab(type)">
<select
id="{{type}}-role-select" class="form-control"
ng-model="roleSelection[type]"
ng-options="key as value.name for (key , value) in roleSet">
<option value="" selected hidden />
</select>
</div>
<!-- lists of selected resources -->
<!-- (type, collection) => ('resource', {id: {}, ... }) -->
<div ng-repeat="(type, collection) in allSelected">
<rbac-selected-list
resource-type="type"
collection="collection"
selected="allSelected"
ng-show="tab[type]">
</rbac-selected-list>
</div>
<!-- lists of selected resources -->
<!-- (type, collection) => ('resource', {id: {}, ... }) -->
<div ng-repeat="(type, collection) in allSelected">
<rbac-selected-list
resource-type="type"
collection="collection"
selected="allSelected"
ng-show="tab[type]">
</rbac-selected-list>
</div>
<!-- end section 2 -->
</div>

View File

@ -220,3 +220,12 @@
.AddPermissions-keyDescription {
flex: 1 0 auto;
}
.AddPermissions-roleSet {
display: flex;
.AddPermissions-roleSet-dropdown {
flex: 1;
margin-right: 20px;
}
}

View File

@ -138,6 +138,10 @@ export default ['addPermissionsTeamsList', 'addPermissionsUsersList', 'TemplateL
scope[`${list.iterator}_dataset`] = scope.dataset.data;
scope[`${list.name}`] = scope[`${list.iterator}_dataset`].results;
scope.$watch(`allSelected.${list.name}`, function(){
_.forEach(scope[`${list.name}`], isSelected);
}, true);
scope.$watch(list.name, function(){
_.forEach(scope[`${list.name}`], isSelected);
optionsRequestDataProcessing();
@ -172,6 +176,7 @@ export default ['addPermissionsTeamsList', 'addPermissionsUsersList', 'TemplateL
}
function isSelected(item){
item.isSelected = false;
_.forEach(scope.allSelected[list.name], (selectedRow) => {
if(selectedRow.id === item.id) {
item.isSelected = true;

View File

@ -161,5 +161,8 @@ input#filePickerText {
border-style: solid;
border-color: @default-interface-txt transparent transparent transparent;
}
}
.LogAggregator-failedNotification{
max-width: 300px;
}

View File

@ -394,7 +394,9 @@ export default [
// Default AD_HOC_COMMANDS to an empty list
payload[key] = $scope[key].value || [];
} else {
payload[key] = $scope[key].value;
if ($scope[key]) {
payload[key] = $scope[key].value;
}
}
}
} else if($scope.configDataResolve[key].type === 'list' && $scope[key] !== null) {

View File

@ -18,6 +18,7 @@ export default [
'Rest',
'ProcessErrors',
'ngToast',
'$filter',
function(
$rootScope, $scope, $state, $stateParams, $timeout,
AngularCodeMirror,
@ -31,7 +32,8 @@ export default [
i18n,
Rest,
ProcessErrors,
ngToast
ngToast,
$filter
) {
var systemVm = this;
@ -166,6 +168,10 @@ export default [
populateLogAggregator(flag);
});
$scope.$on('LOG_AGGREGATOR_PROTOCOL_populated', function(e, data, flag) {
populateLogAggregator(flag);
});
function populateLogAggregator(flag){
if($scope.$parent.LOG_AGGREGATOR_TYPE !== null) {
$scope.$parent.LOG_AGGREGATOR_TYPE = _.find($scope.$parent.LOG_AGGREGATOR_TYPE_options, { value: $scope.$parent.LOG_AGGREGATOR_TYPE });
@ -215,10 +221,9 @@ export default [
.catch(({data, status}) => {
if (status === 500) {
ngToast.danger({
content: `<i class="fa fa-exclamation-triangle
Toast-successIcon"></i>` +
i18n._('Log aggregator test failed.<br />Detail: ') +
data.error
content: '<i class="fa fa-exclamation-triangle Toast-successIcon"></i>' +
i18n._('Log aggregator test failed. <br> Detail: ') + $filter('sanitize')(data.error),
additionalClasses: "LogAggregator-failedNotification"
});
} else {
ProcessErrors($scope, data, status, null,

View File

@ -62,7 +62,8 @@
disableChooseOption: true
},
LOG_AGGREGATOR_VERIFY_CERT: {
type: 'toggleSwitch'
type: 'toggleSwitch',
ngShow: "LOG_AGGREGATOR_PROTOCOL.value === 'https'"
}
},

View File

@ -8,7 +8,7 @@ export default ['templateUrl',
restrict: 'E',
link: function(scope) {
scope.$watch('capacity', function() {
scope.PercentRemainingStyle = {
scope.CapacityStyle = {
'flex-grow': scope.capacity * 0.01
};
}, true);

View File

@ -1,4 +1,4 @@
<div class="CapacityBar">
<div class="CapacityBar-remaining" ng-style="PercentRemainingStyle"></div>
<div class="CapacityBar-remaining" ng-style="CapacityStyle"></div>
<div class="CapacityBar-consumed"></div>
</div>

View File

@ -22,6 +22,7 @@
.Capacity-details--label {
color: @default-interface-txt;
margin: 0 10px 0 0;
width: 100px;
}
.Capacity-details--percentage {
@ -38,7 +39,7 @@
}
}
.List-tableCell--capacityRemainingColumn {
.List-tableCell--capacityColumn {
display: flex;
height: 40px;
align-items: center;

View File

@ -7,7 +7,7 @@
</div>
<div class="List-details">
<div class="Capacity-details">
<p class="Capacity-details--label" translate>Capacity</p>
<p class="Capacity-details--label" translate>Used Capacity</p>
<capacity-bar capacity="instanceGroupCapacity"></capacity-bar>
<span class="Capacity-details--percentage">{{ instanceGroupCapacity }}%</span>
</div>

View File

@ -5,6 +5,7 @@ export default ['i18n', function(i18n) {
iterator: 'instance_group',
editTitle: i18n._('INSTANCE GROUPS'),
listTitle: i18n._('INSTANCE GROUPS'),
emptyListText: i18n._('THERE ARE CURRENTLY NO INSTANCE GROUPS DEFINED'),
index: false,
hover: false,
@ -17,7 +18,7 @@ export default ['i18n', function(i18n) {
uiSref: 'instanceGroups.instances.list({instance_group_id: instance_group.id})',
ngClass: "{'isActive' : isActive()}"
},
percent_capacity_remaining: {
consumed_capacity: {
label: i18n._('Capacity'),
nosort: true,
},

View File

@ -7,7 +7,7 @@
</div>
<div class="List-details">
<div class="Capacity-details">
<p class="Capacity-details--label" translate>Capacity</p>
<p class="Capacity-details--label" translate>Used Capacity</p>
<capacity-bar capacity="instanceCapacity"></capacity-bar>
<span class="Capacity-details--percentage">{{ instanceCapacity }}%</span>
</div>

View File

@ -12,7 +12,7 @@ export default {
templateUrl: templateUrl('./instance-groups/instances/instance-jobs/instance-jobs'),
controller: function($scope, $rootScope, instance) {
$scope.instanceName = instance.hostname;
$scope.instanceCapacity = instance.percent_capacity_remaining;
$scope.instanceCapacity = instance.consumed_capacity;
$scope.instanceJobsRunning = instance.jobs_running;
$rootScope.breadcrumb.instance_name = instance.hostname;
}

View File

@ -15,27 +15,28 @@
"{{'Name' | translate}}"
<i ng-if="columnNoSort !== 'true'" class="fa columnSortIcon fa-sort-up" ng-class="orderByIcon()"></i>
</th>
<th id="instance-percent_capacity_remaining-header" class="List-tableHeader list-header list-header-noSort" translate>
Capacity
</th>
<th id="instance-jobs_running-header" class="List-tableHeader list-header list-header-noSort" translate>
Running Jobs
</th>
<th id="instance-consumed_capacity-header" class="List-tableHeader list-header list-header-noSort" translate>
Used Capacity
</th>
</tr>
</thead>
<tbody>
<!-- ngRepeat: instance in instances -->
<tr ng-class="{isActive: isActive(instance.id)}" id="instance.id" class="List-tableRow instance_class ng-scope" ng-repeat="instance in instances">
<td class="List-tableCell hostname-column col-md-5 col-sm-5 col-xs-5">
<a ui-sref="instanceGroups.instances.list.job.list({instance_id: instance.id})" class="ng-binding">{{ instance.hostname }}</a></td>
<td class="List-tableCell List-tableCell--capacityRemainingColumn ng-binding">
<capacity-bar capacity="instance.percent_capacity_remaining"></capacity-bar><span>{{ instance.percent_capacity_remaining }}%</span>
<a ui-sref="instanceGroups.instances.list.job.list({instance_id: instance.id})" class="ng-binding">{{ instance.hostname }}</a>
</td>
<td class="List-tableCell jobs_running-column ng-binding">
<a ui-sref="instanceGroups.instances.jobs({instance_group_id: $stateParams.instance_group_id})">
{{ instance.jobs_running }}
</a>
</td>
<td class="List-tableCell List-tableCell--capacityColumn ng-binding">
<capacity-bar capacity="instance.consumed_capacity"></capacity-bar><span>{{ instance.consumed_capacity }}%</span>
</td>
</tr>
</tbody>
</table>

View File

@ -16,7 +16,7 @@ export default ['i18n', function(i18n) {
modalColumnClass: 'col-md-8',
uiSref: 'instanceGroups.instances.list.job({instance_id: instance.id})'
},
percent_capacity_remaining: {
consumed_capacity: {
label: i18n._('Capacity'),
nosort: true,
},

View File

@ -9,7 +9,7 @@ export default {
templateUrl: templateUrl('./instance-groups/instance-group'),
controller: function($scope, $rootScope, instanceGroup) {
$scope.instanceGroupName = instanceGroup.name;
$scope.instanceGroupCapacity = instanceGroup.percent_capacity_remaining;
$scope.instanceGroupCapacity = instanceGroup.consumed_capacity;
$scope.instanceGroupJobsRunning = instanceGroup.jobs_running;
$rootScope.breadcrumb.instance_group_name = instanceGroup.name;
}

View File

@ -26,12 +26,12 @@
"{{'Name' | translate}}"
<i ng-if="columnNoSort !== 'true'" class="fa columnSortIcon fa-sort-up" ng-class="orderByIcon()"></i>
</th>
<th id="instance_group-percent_capacity_remaining-header" class="List-tableHeader list-header list-header-noSort" translate>
Capacity
</th>
<th id="instance_group-jobs_running-header" class="List-tableHeader list-header list-header-noSort" translate>
Running Jobs
</th>
<th id="instance_group-consumed_capacity-header" class="List-tableHeader list-header list-header-noSort" translate>
Used Capacity
</th>
</tr>
</thead>
<tbody>
@ -41,14 +41,14 @@
<a ui-sref="instanceGroups.instances.list({instance_group_id: instance_group.id})" class="ng-binding" >{{ instance_group.name }}</a>
<span class="badge List-titleBadge">{{ instance_group.instances }}</span>
</td>
<td class="List-tableCell List-tableCell--capacityRemainingColumn ng-binding">
<capacity-bar capacity="instance_group.percent_capacity_remaining"></capacity-bar><span>{{ instance_group.percent_capacity_remaining }}%</span>
</td>
<td class="List-tableCell jobs_running-column ng-binding">
<a ui-sref="instanceGroups.instances.jobs({instance_group_id: instance_group.id})">
{{ instance_group.jobs_running }}
</a>
</td>
<td class="List-tableCell List-tableCell--capacityColumn ng-binding">
<capacity-bar capacity="instance_group.consumed_capacity"></capacity-bar><span>{{ instance_group.consumed_capacity }}%</span>
</td>
</tr>
</tbody>
</table>

View File

@ -56,7 +56,7 @@
$state.go('inventories.edit.groups.edit', {inventory_id: $scope.inventory_id, group_id: id});
};
$scope.goToGroupGroups = function(id){console.log();
$scope.goToGroupGroups = function(id){
$state.go('inventories.edit.groups.edit.nested_groups', {inventory_id: $scope.inventory_id, group_id: id});
};

View File

@ -117,7 +117,7 @@ export default ['i18n', function(i18n) {
dataTitle: i18n._('Show Changes'),
dataPlacement: 'right',
dataContainer: 'body',
awPopOver: "<p>" + i18n._("If enabled, show the changes made by Ansible tasks, where supported. This is equivalent to Ansible's --diff mode.") + "</p>",
awPopOver: "<p>" + i18n._("If enabled, show the changes made by Ansible tasks, where supported. This is equivalent to Ansible&#x2019;s --diff mode.") + "</p>",
},
become_enabled: {
label: i18n._('Enable Privilege Escalation'),

View File

@ -98,8 +98,7 @@ export default ['i18n', function(i18n) {
ngClick: 'editInventory(inventory)',
awToolTip: i18n._('Edit inventory'),
dataPlacement: 'top',
ngShow: 'inventory.summary_fields.user_capabilities.edit',
ngHide: 'inventory.pending_deletion'
ngShow: '!inventory.pending_deletion && inventory.summary_fields.user_capabilities.edit'
},
view: {
label: i18n._('View'),
@ -113,8 +112,7 @@ export default ['i18n', function(i18n) {
ngClick: "deleteInventory(inventory.id, inventory.name)",
awToolTip: i18n._('Delete inventory'),
dataPlacement: 'top',
ngShow: 'inventory.summary_fields.user_capabilities.delete',
ngHide: 'inventory.pending_deletion'
ngShow: '!inventory.pending_deletion && inventory.summary_fields.user_capabilities.delete'
},
pending_deletion: {

View File

@ -58,13 +58,15 @@ export default ['templateUrl', 'Wait', '$filter', '$compile', 'i18n',
html += "<tbody>\n";
data.results.forEach(function(row) {
html += "<tr>\n";
html += "<td><a href=\"\" ng-click=\"viewJob(" + row.id + ")\" " + "aw-tool-tip=\"" + row.status.charAt(0).toUpperCase() + row.status.slice(1) +
". Click for details\" aw-tip-placement=\"top\" data-tooltip-outer-class=\"Tooltip-secondary\"><i class=\"fa SmartStatus-tooltip--" + row.status + " icon-job-" + row.status + "\"></i></a></td>\n";
html += "<td>" + ($filter('longDate')(row.finished)) + "</td>";
html += "<td><a href=\"\" ng-click=\"viewJob(" + row.id + ")\" " + "aw-tool-tip=\"" + row.status.charAt(0).toUpperCase() + row.status.slice(1) +
". Click for details\" aw-tip-placement=\"top\" data-tooltip-outer-class=\"Tooltip-secondary\">" + $filter('sanitize')(ellipsis(row.name)) + "</a></td>";
html += "</tr>\n";
if ((scope.inventory.has_active_failures && row.status === 'failed') || (!scope.inventory.has_active_failures && row.status === 'successful')) {
html += "<tr>\n";
html += "<td><a href=\"\" ng-click=\"viewJob(" + row.id + ")\" " + "aw-tool-tip=\"" + row.status.charAt(0).toUpperCase() + row.status.slice(1) +
". Click for details\" aw-tip-placement=\"top\" data-tooltip-outer-class=\"Tooltip-secondary\"><i class=\"fa SmartStatus-tooltip--" + row.status + " icon-job-" + row.status + "\"></i></a></td>\n";
html += "<td>" + ($filter('longDate')(row.finished)) + "</td>";
html += "<td><a href=\"\" ng-click=\"viewJob(" + row.id + ")\" " + "aw-tool-tip=\"" + row.status.charAt(0).toUpperCase() + row.status.slice(1) +
". Click for details\" aw-tip-placement=\"top\" data-tooltip-outer-class=\"Tooltip-secondary\">" + $filter('sanitize')(ellipsis(row.name)) + "</a></td>";
html += "</tr>\n";
}
});
html += "</tbody>\n";
html += "</table>\n";

View File

@ -33,7 +33,8 @@ import inventoryHosts from './related/hosts/related-host.route';
import smartInventoryHosts from './smart-inventory/smart-inventory-hosts.route';
import inventoriesList from './inventories.route';
import inventoryHostsAdd from './related/hosts/add/host-add.route';
import inventoryHostsEdit from './related/hosts/edit/host-edit.route';
import inventoryHostsEdit from './related/hosts/edit/standard-host-edit.route';
import smartInventoryHostsEdit from './related/hosts/edit/smart-host-edit.route';
import ansibleFactsRoute from '../shared/ansible-facts/ansible-facts.route';
import insightsRoute from './insights/insights.route';
import inventorySourcesCredentialRoute from './related/sources/lookup/sources-lookup-credential.route';
@ -130,13 +131,15 @@ angular.module('inventory', [
});
});
}],
checkProjectPermission: ['resourceData', '$stateParams', 'Rest', 'GetBasePath',
function(resourceData, $stateParams, Rest, GetBasePath){
checkProjectPermission: ['resourceData', '$stateParams', 'Rest', 'GetBasePath', 'credentialTypesLookup',
function(resourceData, $stateParams, Rest, GetBasePath, credentialTypesLookup){
if(_.has(resourceData, 'data.summary_fields.insights_credential')){
let credential_id = resourceData.data.summary_fields.insights_credential.id,
path = `${GetBasePath('projects')}?credential__id=${credential_id}&role_level=use_role`;
Rest.setUrl(path);
return Rest.get().then(({data}) => {
return credentialTypesLookup()
.then(kinds => {
let insightsKind = kinds.Insights;
let path = `${GetBasePath('projects')}?credential__credential_type=${insightsKind}&role_level=use_role`;
Rest.setUrl(path);
return Rest.get().then(({data}) => {
if (data.results.length > 0){
return true;
}
@ -146,6 +149,7 @@ angular.module('inventory', [
}).catch(() => {
return false;
});
});
}
else {
return false;
@ -319,6 +323,7 @@ angular.module('inventory', [
stateExtender.buildDefinition(smartInventoryHosts),
stateExtender.buildDefinition(inventoryHostsAdd),
stateExtender.buildDefinition(inventoryHostsEdit),
stateExtender.buildDefinition(smartInventoryHostsEdit),
stateExtender.buildDefinition(hostNestedGroupsRoute),
stateExtender.buildDefinition(inventorySourceListRoute),
stateExtender.buildDefinition(inventorySourceAddRoute),

View File

@ -6,10 +6,10 @@
export default ['$scope', 'NestedHostsListDefinition', '$rootScope', 'GetBasePath',
'rbacUiControlService', 'Dataset', '$state', '$filter', 'Prompt', 'Wait',
'HostsService', 'SetStatus', 'canAdd', 'GroupsService', 'ProcessErrors', 'groupData',
'HostsService', 'SetStatus', 'canAdd', 'GroupsService', 'ProcessErrors', 'groupData', 'inventoryData',
function($scope, NestedHostsListDefinition, $rootScope, GetBasePath,
rbacUiControlService, Dataset, $state, $filter, Prompt, Wait,
HostsService, SetStatus, canAdd, GroupsService, ProcessErrors, groupData) {
HostsService, SetStatus, canAdd, GroupsService, ProcessErrors, groupData, inventoryData) {
let list = NestedHostsListDefinition;
@ -25,6 +25,8 @@ export default ['$scope', 'NestedHostsListDefinition', '$rootScope', 'GetBasePat
$scope[`${list.iterator}_dataset`] = Dataset.data;
$scope[list.name] = $scope[`${list.iterator}_dataset`].results;
$scope.inventory_obj = inventoryData;
$rootScope.flashMessage = null;
$scope.$watchCollection(list.name, function() {

View File

@ -21,7 +21,7 @@ export default ['i18n', function(i18n) {
fields: {
toggleHost: {
ngDisabled: 'host.has_inventory_sources',
ngDisabled: '!nested_host.summary_fields.user_capabilities.edit || nested_host.has_inventory_sources',
label: '',
columnClass: 'List-staticColumn--toggle',
type: "toggle",
@ -102,7 +102,7 @@ export default ['i18n', function(i18n) {
showTipWhenDisabled: true,
tooltipInnerClass: "Tooltip-wide",
// TODO: we don't always want to show this
ngShow: true
ngShow: 'inventory_obj.summary_fields.user_capabilities.adhoc'
},
refresh: {
mode: 'all',

View File

@ -7,6 +7,7 @@
export default
['$scope', '$state', '$stateParams', 'GenerateForm', 'ParseTypeChange', 'HostsService', 'host', '$rootScope',
function($scope, $state, $stateParams, GenerateForm, ParseTypeChange, HostsService, host, $rootScope){
$scope.isSmartInvHost = $state.includes('inventories.editSmartInventory.hosts.edit');
$scope.parseType = 'yaml';
$scope.formCancel = function(){
$state.go('^', null, {reload: true});

View File

@ -0,0 +1,29 @@
export default {
name: "inventories.editSmartInventory.hosts.edit",
url: "/edit/:host_id",
ncyBreadcrumb: {
parent: "inventories.editSmartInventory.hosts",
label: "{{breadcrumb.host_name}}"
},
views: {
'hostForm@inventories': {
templateProvider: function(GenerateForm, RelatedHostsFormDefinition) {
let form = _.cloneDeep(RelatedHostsFormDefinition);
form.stateTree = 'inventories.editSmartInventory.hosts';
delete form.related;
return GenerateForm.buildHTML(form, {
mode: 'edit',
related: false
});
},
controller: 'RelatedHostEditController'
}
},
resolve: {
host: ['$stateParams', 'InventoriesService', function($stateParams, InventoriesService) {
return InventoriesService.getHost($stateParams.smartinventory_id, $stateParams.host_id).then(function(res) {
return res.data.results[0];
});
}]
}
};

View File

@ -89,7 +89,7 @@ export default ['$scope', 'ListDefinition', '$rootScope', 'GetBasePath',
$state.go('inventories.edit.hosts.add');
};
$scope.editHost = function(host){
$state.go('inventories.edit.hosts.edit', {inventory_id: host.inventory_id, host_id: host.id});
$state.go('.edit', {inventory_id: host.inventory_id, host_id: host.id});
};
$scope.goToInsights = function(host){
$state.go('inventories.edit.hosts.edit.insights', {inventory_id: host.inventory_id, host_id:host.id});

View File

@ -37,7 +37,7 @@ function(i18n) {
" set by the inventory sync process.") +
"</p>",
dataTitle: i18n._('Host Enabled'),
ngDisabled: 'host.has_inventory_sources'
ngDisabled: '!host.summary_fields.user_capabilities.edit || host.has_inventory_sources || isSmartInvHost'
}
},
fields: {
@ -56,11 +56,11 @@ function(i18n) {
dataTitle: i18n._('Host Name'),
dataPlacement: 'right',
dataContainer: 'body',
ngDisabled: '!(host.summary_fields.user_capabilities.edit || canAdd)'
ngDisabled: '!(host.summary_fields.user_capabilities.edit || canAdd) || isSmartInvHost'
},
description: {
label: i18n._('Description'),
ngDisabled: '!(host.summary_fields.user_capabilities.edit || canAdd)',
ngDisabled: '!(host.summary_fields.user_capabilities.edit || canAdd) || isSmartInvHost',
type: 'text'
},
host_variables: {
@ -78,23 +78,24 @@ function(i18n) {
'<p>' + i18n.sprintf(i18n._('View YAML examples at %s'), '<a href="http://docs.ansible.com/YAMLSyntax.html" target="_blank">docs.ansible.com</a>') + '</p>',
dataTitle: i18n._('Host Variables'),
dataPlacement: 'right',
dataContainer: 'body'
dataContainer: 'body',
ngDisabled: '!(host.summary_fields.user_capabilities.edit || canAdd) || isSmartInvHost'
}
},
buttons: {
cancel: {
ngClick: 'formCancel()',
ngShow: '(host.summary_fields.user_capabilities.edit || canAdd)'
ngShow: '(host.summary_fields.user_capabilities.edit || canAdd) && !isSmartInvHost'
},
close: {
ngClick: 'formCancel()',
ngShow: '!(host.summary_fields.user_capabilities.edit || canAdd)'
ngShow: '!(host.summary_fields.user_capabilities.edit || canAdd) || isSmartInvHost'
},
save: {
ngClick: 'formSave()',
ngDisabled: true,
ngShow: '(host.summary_fields.user_capabilities.edit || canAdd)'
ngShow: '(host.summary_fields.user_capabilities.edit || canAdd) && !isSmartInvHost'
}
},

View File

@ -20,7 +20,7 @@ export default ['i18n', function(i18n) {
fields: {
toggleHost: {
ngDisabled: 'host.has_inventory_sources',
ngDisabled: '!host.summary_fields.user_capabilities.edit || host.has_inventory_sources',
label: '',
columnClass: 'List-staticColumn--toggle',
type: "toggle",
@ -115,7 +115,7 @@ export default ['i18n', function(i18n) {
showTipWhenDisabled: true,
tooltipInnerClass: "Tooltip-wide",
// TODO: we don't always want to show this
ngShow: true
ngShow: 'inventory_obj.summary_fields.user_capabilities.adhoc'
},
create: {
mode: 'all',

View File

@ -9,12 +9,12 @@ export default ['$state', '$stateParams', '$scope', 'SourcesFormDefinition',
'GetChoices', 'GetBasePath', 'CreateSelect2', 'GetSourceTypeOptions',
'rbacUiControlService', 'ToJSON', 'SourcesService', 'Empty',
'Wait', 'Rest', 'Alert', 'ProcessErrors', 'inventorySourcesOptions',
'$rootScope',
'$rootScope', 'i18n',
function($state, $stateParams, $scope, SourcesFormDefinition, ParseTypeChange,
GenerateForm, inventoryData, GroupsService, GetChoices,
GetBasePath, CreateSelect2, GetSourceTypeOptions, rbacUiControlService,
ToJSON, SourcesService, Empty, Wait, Rest, Alert, ProcessErrors,
inventorySourcesOptions,$rootScope) {
inventorySourcesOptions,$rootScope, i18n) {
let form = SourcesFormDefinition;
init();
@ -165,43 +165,40 @@ export default ['$state', '$stateParams', '$scope', 'SourcesFormDefinition',
function initGroupBySelect(){
let add_new = false;
if($scope && $scope.source && $scope.source === 'ec2' || $scope && $scope.source && $scope.source.value && $scope.source.value === 'ec2'){
if( _.get($scope, 'source') === 'ec2' || _.get($scope.source, 'value') === 'ec2') {
$scope.group_by_choices = $scope.ec2_group_by;
$scope.groupByPopOver = "<p>Select which groups to create automatically. " +
$rootScope.BRAND_NAME + " will create group names similar to the following examples based on the options selected:</p><ul>" +
"<li>Availability Zone: <strong>zones &raquo; us-east-1b</strong></li>" +
"<li>Image ID: <strong>images &raquo; ami-b007ab1e</strong></li>" +
"<li>Instance ID: <strong>instances &raquo; i-ca11ab1e</strong></li>" +
"<li>Instance Type: <strong>types &raquo; type_m1_medium</strong></li>" +
"<li>Key Name: <strong>keys &raquo; key_testing</strong></li>" +
"<li>Region: <strong>regions &raquo; us-east-1</strong></li>" +
"<li>Security Group: <strong>security_groups &raquo; security_group_default</strong></li>" +
"<li>Tags: <strong>tags &raquo; tag_Name &raquo; tag_Name_host1</strong></li>" +
"<li>VPC ID: <strong>vpcs &raquo; vpc-5ca1ab1e</strong></li>" +
"<li>Tag None: <strong>tags &raquo; tag_none</strong></li>" +
"</ul><p>If blank, all groups above are created except <em>Instance ID</em>.</p>";
$scope.instanceFilterPopOver = "<p>Provide a comma-separated list of filter expressions. " +
"Hosts are imported to " + $rootScope.BRAND_NAME + " when <em>ANY</em> of the filters match.</p>" +
"Limit to hosts having a tag:<br />\n" +
"<blockquote>tag-key=TowerManaged</blockquote>\n" +
"Limit to hosts using either key pair:<br />\n" +
"<blockquote>key-name=staging, key-name=production</blockquote>\n" +
"Limit to hosts where the Name tag begins with <em>test</em>:<br />\n" +
"<blockquote>tag:Name=test*</blockquote>\n" +
"<p>View the <a href=\"http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html\" target=\"_blank\">Describe Instances documentation</a> " +
"for a complete list of supported filters.</p>";
$scope.groupByPopOver = "<p>" + i18n._("Select which groups to create automatically. ") +
$rootScope.BRAND_NAME + i18n._(" will create group names similar to the following examples based on the options selected:") + "</p><ul>" +
"<li>" + i18n._("Availability Zone:") + "<strong>zones &raquo; us-east-1b</strong></li>" +
"<li>" + i18n._("Image ID:") + "<strong>images &raquo; ami-b007ab1e</strong></li>" +
"<li>" + i18n._("Instance ID:") + "<strong>instances &raquo; i-ca11ab1e</strong></li>" +
"<li>" + i18n._("Instance Type:") + "<strong>types &raquo; type_m1_medium</strong></li>" +
"<li>" + i18n._("Key Name:") + "<strong>keys &raquo; key_testing</strong></li>" +
"<li>" + i18n._("Region:") + "<strong>regions &raquo; us-east-1</strong></li>" +
"<li>" + i18n._("Security Group:") + "<strong>security_groups &raquo; security_group_default</strong></li>" +
"<li>" + i18n._("Tags:") + "<strong>tags &raquo; tag_Name &raquo; tag_Name_host1</strong></li>" +
"<li>" + i18n._("VPC ID:") + "<strong>vpcs &raquo; vpc-5ca1ab1e</strong></li>" +
"<li>" + i18n._("Tag None:") + "<strong>tags &raquo; tag_none</strong></li>" +
"</ul><p>" + i18n._("If blank, all groups above are created except") + "<em>" + i18n._("Instance ID") + "</em>.</p>";
$scope.instanceFilterPopOver = "<p>" + i18n._("Provide a comma-separated list of filter expressions. ") +
i18n._("Hosts are imported to ") + $rootScope.BRAND_NAME + i18n._(" when ") + "<em>" + i18n._("ANY") + "</em>" + i18n._(" of the filters match.") + "</p>" +
i18n._("Limit to hosts having a tag:") + "<br />\n" +
"<blockquote>tag-key=TowerManaged</blockquote>\n" +
i18n._("Limit to hosts using either key pair:") + "<br />\n" +
"<blockquote>key-name=staging, key-name=production</blockquote>\n" +
i18n._("Limit to hosts where the Name tag begins with ") + "<em>" + i18n._("test") + "</em>:<br />\n" +
"<blockquote>tag:Name=test*</blockquote>\n" +
"<p>" + i18n._("View the ") + "<a href=\"http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html\" target=\"_blank\">" + i18n._("Describe Instances documentation") + "</a> " +
i18n._("for a complete list of supported filters.") + "</p>";
}
if($scope && $scope.source && $scope.source === 'vmware' || $scope && $scope.source && $scope.source.value && $scope.source.value === 'vmware'){
if( _.get($scope, 'source') === 'vmware' || _.get($scope.source, 'value') === 'vmware') {
add_new = true;
$scope.group_by_choices = [];
$scope.group_by = $scope.group_by_choices;
$scope.groupByPopOver = `Specify which groups to create automatically.
Group names will be created similar to the options selected.
If blank, all groups above are created. Refer to Ansible Tower documentation for more detail.`;
$scope.instanceFilterPopOver = `Provide a comma-separated list of filter expressions.
Hosts are imported when <em>ANY</em> of the filters match.
Refer to Ansible Tower documentation for more detail.`;
}
$scope.groupByPopOver = i18n._("Specify which groups to create automatically. Group names will be created similar to the options selected. If blank, all groups above are created. Refer to Ansible Tower documentation for more detail.");
$scope.instanceFilterPopOver = i18n._("Provide a comma-separated list of filter expressions. Hosts are imported when all of the filters match. Refer to Ansible Tower documentation for more detail.");
}
CreateSelect2({
element: '#inventory_source_group_by',
multiple: true,

View File

@ -8,12 +8,12 @@ export default ['$state', '$stateParams', '$scope', 'ParseVariableString',
'rbacUiControlService', 'ToJSON', 'ParseTypeChange', 'GroupsService',
'GetChoices', 'GetBasePath', 'CreateSelect2', 'GetSourceTypeOptions',
'inventorySourceData', 'SourcesService', 'inventoryData', 'inventorySourcesOptions', 'Empty',
'Wait', 'Rest', 'Alert', '$rootScope',
'Wait', 'Rest', 'Alert', '$rootScope', 'i18n',
function($state, $stateParams, $scope, ParseVariableString,
rbacUiControlService, ToJSON,ParseTypeChange, GroupsService,
GetChoices, GetBasePath, CreateSelect2, GetSourceTypeOptions,
inventorySourceData, SourcesService, inventoryData, inventorySourcesOptions, Empty,
Wait, Rest, Alert, $rootScope) {
Wait, Rest, Alert, $rootScope, i18n) {
function init() {
$scope.projectBasePath = GetBasePath('projects') + '?not__status=never updated';
@ -243,46 +243,45 @@ export default ['$state', '$stateParams', '$scope', 'ParseVariableString',
function initGroupBySelect(){
let add_new = false;
if($scope && $scope.source && $scope.source === 'ec2' || $scope && $scope.source && $scope.source.value && $scope.source.value === 'ec2'){
if( _.get($scope, 'source') === 'ec2' || _.get($scope.source, 'value') === 'ec2') {
$scope.group_by_choices = $scope.ec2_group_by;
let group_by = inventorySourceData.group_by.split(',');
$scope.group_by = _.map(group_by, (item) => _.find($scope.ec2_group_by, { value: item }));
$scope.groupByPopOver = "<p>Select which groups to create automatically. " +
$rootScope.BRAND_NAME + " will create group names similar to the following examples based on the options selected:</p><ul>" +
"<li>Availability Zone: <strong>zones &raquo; us-east-1b</strong></li>" +
"<li>Image ID: <strong>images &raquo; ami-b007ab1e</strong></li>" +
"<li>Instance ID: <strong>instances &raquo; i-ca11ab1e</strong></li>" +
"<li>Instance Type: <strong>types &raquo; type_m1_medium</strong></li>" +
"<li>Key Name: <strong>keys &raquo; key_testing</strong></li>" +
"<li>Region: <strong>regions &raquo; us-east-1</strong></li>" +
"<li>Security Group: <strong>security_groups &raquo; security_group_default</strong></li>" +
"<li>Tags: <strong>tags &raquo; tag_Name &raquo; tag_Name_host1</strong></li>" +
"<li>VPC ID: <strong>vpcs &raquo; vpc-5ca1ab1e</strong></li>" +
"<li>Tag None: <strong>tags &raquo; tag_none</strong></li>" +
"</ul><p>If blank, all groups above are created except <em>Instance ID</em>.</p>";
$scope.instanceFilterPopOver = "<p>Provide a comma-separated list of filter expressions. " +
"Hosts are imported to " + $rootScope.BRAND_NAME + " when <em>ANY</em> of the filters match.</p>" +
"Limit to hosts having a tag:<br />\n" +
$scope.groupByPopOver = "<p>" + i18n._("Select which groups to create automatically. ") +
$rootScope.BRAND_NAME + i18n._(" will create group names similar to the following examples based on the options selected:") + "</p><ul>" +
"<li>" + i18n._("Availability Zone:") + "<strong>zones &raquo; us-east-1b</strong></li>" +
"<li>" + i18n._("Image ID:") + "<strong>images &raquo; ami-b007ab1e</strong></li>" +
"<li>" + i18n._("Instance ID:") + "<strong>instances &raquo; i-ca11ab1e</strong></li>" +
"<li>" + i18n._("Instance Type:") + "<strong>types &raquo; type_m1_medium</strong></li>" +
"<li>" + i18n._("Key Name:") + "<strong>keys &raquo; key_testing</strong></li>" +
"<li>" + i18n._("Region:") + "<strong>regions &raquo; us-east-1</strong></li>" +
"<li>" + i18n._("Security Group:") + "<strong>security_groups &raquo; security_group_default</strong></li>" +
"<li>" + i18n._("Tags:") + "<strong>tags &raquo; tag_Name &raquo; tag_Name_host1</strong></li>" +
"<li>" + i18n._("VPC ID:") + "<strong>vpcs &raquo; vpc-5ca1ab1e</strong></li>" +
"<li>" + i18n._("Tag None:") + "<strong>tags &raquo; tag_none</strong></li>" +
"</ul><p>" + i18n._("If blank, all groups above are created except") + "<em>" + i18n._("Instance ID") + "</em>.</p>";
$scope.instanceFilterPopOver = "<p>" + i18n._("Provide a comma-separated list of filter expressions. ") +
i18n._("Hosts are imported to ") + $rootScope.BRAND_NAME + i18n._(" when ") + "<em>" + i18n._("ANY") + "</em>" + i18n._(" of the filters match.") + "</p>" +
i18n._("Limit to hosts having a tag:") + "<br />\n" +
"<blockquote>tag-key=TowerManaged</blockquote>\n" +
"Limit to hosts using either key pair:<br />\n" +
i18n._("Limit to hosts using either key pair:") + "<br />\n" +
"<blockquote>key-name=staging, key-name=production</blockquote>\n" +
"Limit to hosts where the Name tag begins with <em>test</em>:<br />\n" +
i18n._("Limit to hosts where the Name tag begins with ") + "<em>" + i18n._("test") + "</em>:<br />\n" +
"<blockquote>tag:Name=test*</blockquote>\n" +
"<p>View the <a href=\"http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html\" target=\"_blank\">Describe Instances documentation</a> " +
"for a complete list of supported filters.</p>";
"<p>" + i18n._("View the ") + "<a href=\"http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html\" target=\"_blank\">" + i18n._("Describe Instances documentation") + "</a> " +
i18n._("for a complete list of supported filters.") + "</p>";
}
if($scope && $scope.source && $scope.source === 'vmware' || $scope && $scope.source && $scope.source.value && $scope.source.value === 'vmware'){
if( _.get($scope, 'source') === 'vmware' || _.get($scope.source, 'value') === 'vmware') {
add_new = true;
$scope.group_by_choices = (inventorySourceData.group_by) ? inventorySourceData.group_by.split(',')
.map((i) => ({name: i, label: i, value: i})) : [];
$scope.group_by = $scope.group_by_choices;
$scope.groupByPopOver = `Specify which groups to create automatically.
Group names will be created similar to the options selected.
If blank, all groups above are created. Refer to Ansible Tower documentation for more detail.`;
$scope.instanceFilterPopOver = `Provide a comma-separated list of filter expressions.
Hosts are imported when <em>ANY</em> of the filters match.
Refer to Ansible Tower documentation for more detail.`;
$scope.groupByPopOver = i18n._(`Specify which groups to create automatically. Group names will be created similar to the options selected. If blank, all groups above are created. Refer to Ansible Tower documentation for more detail.`);
$scope.instanceFilterPopOver = i18n._(`Provide a comma-separated list of filter expressions. Hosts are imported when all of the filters match. Refer to Ansible Tower documentation for more detail.`);
}
CreateSelect2({
element: '#inventory_source_group_by',

View File

@ -75,7 +75,7 @@ return {
type: 'lookup',
list: 'CredentialList',
basePath: 'credentials',
ngShow: "source && source.value !== '' && source.value !== 'custom'",
ngShow: "source && source.value !== ''",
sourceModel: 'credential',
sourceField: 'name',
ngClick: 'lookupCredential()',
@ -109,7 +109,7 @@ return {
inventory_file: {
label: i18n._('Inventory File'),
type:'select',
defaultText: 'Choose an inventory file',
defaultText: i18n._('Choose an inventory file'),
ngOptions: 'file for file in inventory_files track by file',
ngShow: "source && source.value === 'scm'",
ngDisabled: "!(inventory_source_obj.summary_fields.user_capabilities.edit || canAdd) || disableInventoryFileBecausePermissionDenied",
@ -119,7 +119,8 @@ return {
init: "true"
},
column: 1,
awPopOver: "<p>" + i18n._("Select the inventory file to be synced by this source. You can select from the dropdown or enter a file within the input.") + "</p>",
awPopOver: "<p>" + i18n._("Select the inventory file to be synced by this source. " +
"You can select from the dropdown or enter a file within the input.") + "</p>",
dataTitle: i18n._('Inventory File'),
dataPlacement: 'right',
dataContainer: "body",
@ -141,10 +142,10 @@ return {
subForm: 'sourceSubForm'
},
instance_filters: {
label: i18n._('Instance Filters'),
label: i18n._("Instance Filters"),
type: 'text',
ngShow: "source && (source.value == 'ec2' || source.value == 'vmware')",
dataTitle: 'Instance Filters',
dataTitle: i18n._('Instance Filters'),
dataPlacement: 'right',
awPopOverWatch: 'instanceFilterPopOver',
awPopOver: '{{ instanceFilterPopOver }}',
@ -158,7 +159,7 @@ return {
ngShow: "source && (source.value == 'ec2' || source.value == 'vmware')",
ngOptions: 'source.label for source in group_by_choices track by source.value',
multiSelect: true,
dataTitle: 'Only Group By',
dataTitle: i18n._("Only Group By"),
dataPlacement: 'right',
awPopOverWatch: 'groupByPopOver',
awPopOver: '{{ groupByPopOver }}',
@ -192,14 +193,14 @@ return {
parseTypeName: 'envParseType',
dataTitle: i18n._("Environment Variables"),
dataPlacement: 'right',
awPopOver: "<p>Provide environment variables to pass to the custom inventory script.</p>" +
"<p>Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two.</p>" +
"JSON:<br />\n" +
awPopOver: "<p>" + i18n._("Provide environment variables to pass to the custom inventory script.") + "</p>" +
"<p>" + i18n._("Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two.") + "</p>" +
i18n._("JSON:") + "<br />\n" +
"<blockquote>{<br />&emsp;\"somevar\": \"somevalue\",<br />&emsp;\"password\": \"magic\"<br /> }</blockquote>\n" +
"YAML:<br />\n" +
i18n._("YAML:") + "<br />\n" +
"<blockquote>---<br />somevar: somevalue<br />password: magic<br /></blockquote>\n" +
'<p>View JSON examples at <a href="http://www.json.org" target="_blank">www.json.org</a></p>' +
'<p>View YAML examples at <a href="http://docs.ansible.com/YAMLSyntax.html" target="_blank">docs.ansible.com</a></p>',
"<p>" + i18n._("View JSON examples at ") + '<a href="http://www.json.org" target="_blank">www.json.org</a></p>' +
"<p>" + i18n._("View YAML examples at ") + '<a href="http://docs.ansible.com/YAMLSyntax.html" target="_blank">docs.ansible.com</a></p>',
dataContainer: 'body',
subForm: 'sourceSubForm'
},
@ -214,16 +215,16 @@ return {
parseTypeName: 'envParseType',
dataTitle: i18n._("Source Variables"),
dataPlacement: 'right',
awPopOver: "<p>Override variables found in ec2.ini and used by the inventory update script. For a detailed description of these variables " +
awPopOver: "<p>" + i18n._("Override variables found in ec2.ini and used by the inventory update script. For a detailed description of these variables ") +
"<a href=\"https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.ini\" target=\"_blank\">" +
"view ec2.ini in the Ansible github repo.</a></p>" +
"<p>Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two.</p>" +
"JSON:<br />\n" +
i18n._("view ec2.ini in the Ansible github repo.") + "</a></p>" +
"<p>" + i18n._("Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two.") + "</p>" +
i18n._("JSON:") + "<br />\n" +
"<blockquote>{<br />&emsp;\"somevar\": \"somevalue\",<br />&emsp;\"password\": \"magic\"<br /> }</blockquote>\n" +
"YAML:<br />\n" +
i18n._("YAML:") + "<br />\n" +
"<blockquote>---<br />somevar: somevalue<br />password: magic<br /></blockquote>\n" +
'<p>View JSON examples at <a href="http://www.json.org" target="_blank">www.json.org</a></p>' +
'<p>View YAML examples at <a href="http://docs.ansible.com/YAMLSyntax.html" target="_blank">docs.ansible.com</a></p>',
"<p>" + i18n._("View JSON examples at ") + '<a href="http://www.json.org" target="_blank">www.json.org</a></p>' +
"<p>" + i18n._("View YAML examples at ") + '<a href="http://docs.ansible.com/YAMLSyntax.html" target="_blank">docs.ansible.com</a></p>',
dataContainer: 'body',
subForm: 'sourceSubForm'
},
@ -236,18 +237,18 @@ return {
rows: 6,
'default': '---',
parseTypeName: 'envParseType',
dataTitle: "Source Variables",
dataTitle: i18n._("Source Variables"),
dataPlacement: 'right',
awPopOver: "<p>Override variables found in vmware.ini and used by the inventory update script. For a detailed description of these variables " +
awPopOver: "<p>" + i18n._("Override variables found in vmware.ini and used by the inventory update script. For a detailed description of these variables ") +
"<a href=\"https://github.com/ansible/ansible/blob/devel/contrib/inventory/vmware_inventory.ini\" target=\"_blank\">" +
"view vmware_inventory.ini in the Ansible github repo.</a></p>" +
"<p>Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two.</p>" +
"JSON:<br />\n" +
i18n._("view vmware_inventory.ini in the Ansible github repo.") + "</a></p>" +
"<p>" + i18n._("Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two.") + "</p>" +
i18n._("JSON:") + "<br />\n" +
"<blockquote>{<br />&emsp;\"somevar\": \"somevalue\",<br />&emsp;\"password\": \"magic\"<br /> }</blockquote>\n" +
"YAML:<br />\n" +
i18n._("YAML:") + "<br />\n" +
"<blockquote>---<br />somevar: somevalue<br />password: magic<br /></blockquote>\n" +
'<p>View JSON examples at <a href="http://www.json.org" target="_blank">www.json.org</a></p>' +
'<p>View YAML examples at <a href="http://docs.ansible.com/YAMLSyntax.html" target="_blank">docs.ansible.com</a></p>',
"<p>" + i18n._("View JSON examples at ") + '<a href="http://www.json.org" target="_blank">www.json.org</a></p>' +
"<p>" + i18n._("View YAML examples at ") + '<a href="http://docs.ansible.com/YAMLSyntax.html" target="_blank">docs.ansible.com</a></p>',
dataContainer: 'body',
subForm: 'sourceSubForm'
},
@ -260,18 +261,11 @@ return {
rows: 6,
'default': '---',
parseTypeName: 'envParseType',
dataTitle: "Source Variables",
dataTitle: i18n._("Source Variables"),
dataPlacement: 'right',
awPopOver: "<p>Override variables found in openstack.yml and used by the inventory update script. For an example variable configuration " +
"<a href=\"https://github.com/ansible/ansible/blob/devel/contrib/inventory/openstack.yml\" target=\"_blank\">" +
"view openstack.yml in the Ansible github repo.</a></p>" +
"<p>Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two.</p>" +
"JSON:<br />\n" +
"<blockquote>{<br />&emsp;\"somevar\": \"somevalue\",<br />&emsp;\"password\": \"magic\"<br /> }</blockquote>\n" +
"YAML:<br />\n" +
"<blockquote>---<br />somevar: somevalue<br />password: magic<br /></blockquote>\n" +
'<p>View JSON examples at <a href="http://www.json.org" target="_blank">www.json.org</a></p>' +
'<p>View YAML examples at <a href="http://docs.ansible.com/YAMLSyntax.html" target="_blank">docs.ansible.com</a></p>',
awPopOver: i18n._(`Override variables found in openstack.yml and used by the inventory update script. For an example variable configuration
<a href=\"https://github.com/ansible/ansible/blob/devel/contrib/inventory/openstack.yml\" target=\"_blank\">
view openstack.yml in the Ansible github repo.</a> Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Tower documentation for example syntax.`),
dataContainer: 'body',
subForm: 'sourceSubForm'
},
@ -284,18 +278,11 @@ return {
rows: 6,
'default': '---',
parseTypeName: 'envParseType',
dataTitle: "Source Variables",
dataTitle: i18n._("Source Variables"),
dataPlacement: 'right',
awPopOver: "<p>Override variables found in openstack.yml and used by the inventory update script. For an example variable configuration " +
"<a href=\"https://github.com/ansible/ansible/blob/devel/contrib/inventory/cloudforms.ini\" target=\"_blank\">" +
"view openstack.yml in the Ansible github repo.</a></p>" +
"<p>Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two.</p>" +
"JSON:<br />\n" +
"<blockquote>{<br />&emsp;\"somevar\": \"somevalue\",<br />&emsp;\"password\": \"magic\"<br /> }</blockquote>\n" +
"YAML:<br />\n" +
"<blockquote>---<br />somevar: somevalue<br />password: magic<br /></blockquote>\n" +
'<p>View JSON examples at <a href="http://www.json.org" target="_blank">www.json.org</a></p>' +
'<p>View YAML examples at <a href="http://docs.ansible.com/YAMLSyntax.html" target="_blank">docs.ansible.com</a></p>',
awPopOver: i18n._(`Override variables found in cloudforms.ini and used by the inventory update script. For an example variable configuration
<a href=\"https://github.com/ansible/ansible/blob/devel/contrib/inventory/cloudforms.ini\" target=\"_blank\">
view cloudforms.ini in the Ansible github repo.</a> Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Tower documentation for example syntax.`),
dataContainer: 'body',
subForm: 'sourceSubForm'
},
@ -308,18 +295,11 @@ return {
rows: 6,
'default': '---',
parseTypeName: 'envParseType',
dataTitle: "Source Variables",
dataTitle: i18n._("Source Variables"),
dataPlacement: 'right',
awPopOver: "<p>Override variables found in openstack.yml and used by the inventory update script. For an example variable configuration " +
"<a href=\"https://github.com/ansible/ansible/blob/devel/contrib/inventory/foreman.ini\" target=\"_blank\">" +
"view openstack.yml in the Ansible github repo.</a></p>" +
"<p>Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two.</p>" +
"JSON:<br />\n" +
"<blockquote>{<br />&emsp;\"somevar\": \"somevalue\",<br />&emsp;\"password\": \"magic\"<br /> }</blockquote>\n" +
"YAML:<br />\n" +
"<blockquote>---<br />somevar: somevalue<br />password: magic<br /></blockquote>\n" +
'<p>View JSON examples at <a href="http://www.json.org" target="_blank">www.json.org</a></p>' +
'<p>View YAML examples at <a href="http://docs.ansible.com/YAMLSyntax.html" target="_blank">docs.ansible.com</a></p>',
awPopOver: i18n._(`Override variables found in foreman.ini and used by the inventory update script. For an example variable configuration
<a href=\"https://github.com/ansible/ansible/blob/devel/contrib/inventory/foreman.ini\" target=\"_blank\">
view foreman.ini in the Ansible github repo.</a> Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Tower documentation for example syntax.`),
dataContainer: 'body',
subForm: 'sourceSubForm'
},
@ -348,9 +328,8 @@ return {
label: i18n._('Overwrite'),
type: 'checkbox',
ngShow: "source.value !== '' && source.value !== null",
awPopOver: '<p>If checked, all child groups and hosts not found on the external source will be deleted from ' +
'the local inventory.</p><p>When not checked, local child hosts and groups not found on the external source will ' +
'remain untouched by the inventory update process.</p>',
awPopOver: "<p>" + i18n._("If checked, all child groups and hosts not found on the external source will be deleted from the local inventory.") + '</p><p>' +
i18n._("When not checked, local child hosts and groups not found on the external source will remain untouched by the inventory update process.") + "</p>",
dataTitle: i18n._('Overwrite'),
dataContainer: 'body',
dataPlacement: 'right',
@ -361,9 +340,8 @@ return {
label: i18n._('Overwrite Variables'),
type: 'checkbox',
ngShow: "source.value !== '' && source.value !== null",
awPopOver: '<p>If checked, all variables for child groups and hosts will be removed and replaced by those ' +
'found on the external source.</p><p>When not checked, a merge will be performed, combining local variables with ' +
'those found on the external source.</p>',
awPopOver: "<p>" + i18n._("If checked, all variables for child groups and hosts will be removed and replaced by those found on the external source.") + '</p><p>' +
i18n._("When not checked, a merge will be performed, combining local variables with those found on the external source.") + "</p>",
dataTitle: i18n._('Overwrite Variables'),
dataContainer: 'body',
dataPlacement: 'right',
@ -374,8 +352,8 @@ return {
label: i18n._('Update on Launch'),
type: 'checkbox',
ngShow: "source.value !== '' && source.value !== null",
awPopOver: '<p>Each time a job runs using this inventory, refresh the inventory from the selected source before ' +
'executing job tasks.</p>',
awPopOver: "<p>" + i18n._("Each time a job runs using this inventory, " +
"refresh the inventory from the selected source before executing job tasks.") + "</p>",
dataTitle: i18n._('Update on Launch'),
dataContainer: 'body',
dataPlacement: 'right',
@ -386,9 +364,9 @@ return {
label: i18n._('Update on Project Change'),
type: 'checkbox',
ngShow: "source.value === 'scm'",
awPopOver: '<p>After every project update where the SCM revision changes, refresh the inventory ' +
'from the selected source before executing job tasks. This is intended for ' +
'static content, like the Ansible inventory .ini file format.</p>',
awPopOver: "<p>" + i18n._("After every project update where the SCM revision changes, " +
"refresh the inventory from the selected source before executing job tasks. " +
"This is intended for static content, like the Ansible inventory .ini file format.") + "</p>",
dataTitle: i18n._('Update on Project Update'),
dataContainer: 'body',
dataPlacement: 'right',
@ -406,9 +384,9 @@ return {
ngShow: "source && source.value !== '' && update_on_launch",
spinner: true,
"default": 0,
awPopOver: '<p>Time in seconds to consider an inventory sync to be current. During job runs and callbacks the task system will ' +
'evaluate the timestamp of the latest sync. If it is older than Cache Timeout, it is not considered current, ' +
'and a new inventory sync will be performed.</p>',
awPopOver: "<p>" + i18n._("Time in seconds to consider an inventory sync to be current. " +
"During job runs and callbacks the task system will evaluate the timestamp of the latest sync. " +
"If it is older than Cache Timeout, it is not considered current, and a new inventory sync will be performed.") + "</p>",
dataTitle: i18n._('Cache Timeout'),
dataPlacement: 'right',
dataContainer: "body",

View File

@ -54,10 +54,7 @@ function SmartInventoryAdd($scope, $location,
data.variables = ToJSON($scope.parseType, $scope.smartinventory_variables, true);
let decodedHostFilter = decodeURIComponent($scope.smart_hosts.host_filter);
decodedHostFilter = decodedHostFilter.replace(/__icontains_DEFAULT/g, "__icontains");
decodedHostFilter = decodedHostFilter.replace(/__search_DEFAULT/g, "__search");
data.host_filter = decodedHostFilter;
data.host_filter = decodeURIComponent($scope.smart_hosts.host_filter);
data.kind = "smart";

View File

@ -2,7 +2,8 @@ export default ['templateUrl', function(templateUrl) {
return {
restrict: 'E',
scope: {
hostFilter: '='
hostFilter: '=',
organization: '='
},
templateUrl: templateUrl('inventories-hosts/inventories/smart-inventory/smart-inventory-host-filter/host-filter-modal/host-filter-modal'),
link: function(scope, element) {
@ -27,12 +28,14 @@ export default ['templateUrl', function(templateUrl) {
$scope.host_default_params = {
order_by: 'name',
page_size: 5
page_size: 5,
inventory__organization: $scope.organization
};
$scope.host_queryset = _.merge({
order_by: 'name',
page_size: 5
page_size: 5,
inventory__organization: $scope.organization
}, $scope.hostFilter ? $scope.hostFilter : {});
// Fire off the initial search
@ -44,11 +47,13 @@ export default ['templateUrl', function(templateUrl) {
let hostList = _.cloneDeep(HostsList);
delete hostList.fields.toggleHost;
delete hostList.fields.active_failures;
delete hostList.fields.inventory;
delete hostList.fields.name.ngClick;
hostList.fields.name.class += " HostFilterModal-tableRow";
hostList.fields.name.noLink = true;
hostList.well = false;
delete hostList.fields.inventory.ngClick;
hostList.fields.inventory.ngBind = 'host.summary_fields.inventory.name';
hostList.emptyListText = 'You must have access to at least one host in order to create a smart inventory host filter';
let html = GenerateList.build({
list: hostList,
input_type: 'host-filter-modal-body',
@ -79,7 +84,6 @@ export default ['templateUrl', function(templateUrl) {
$scope.destroyModal();
};
}]
};
}];

View File

@ -8,7 +8,14 @@ export default ['$scope', 'QuerySet', 'InventoryHostsStrings',
function($scope, qs, InventoryHostsStrings) {
$scope.hostFilterTags = [];
$scope.filterTooltip = InventoryHostsStrings.get('smartinventories.TOOLTIP');
$scope.$watch('organization', function(){
if($scope.hasEditPermissions) {
$scope.filterTooltip = $scope.organization ? InventoryHostsStrings.get('smartinventories.hostfilter.INSTRUCTIONS') : InventoryHostsStrings.get('smartinventories.hostfilter.MISSING_ORG');
}
else {
$scope.filterTooltip = InventoryHostsStrings.get('smartinventories.hostfilter.MISSING_PERMISSIONS');
}
});
$scope.$watch('hostFilter', function(){
$scope.hostFilterTags = [];
@ -21,10 +28,7 @@ export default ['$scope', 'QuerySet', 'InventoryHostsStrings',
$.each(searchParam, function(index, param) {
let paramParts = decodeURIComponent(param).split(/=(.+)/);
paramParts[0] = paramParts[0].replace(/__icontains(_DEFAULT)?/g, "");
paramParts[0] = paramParts[0].replace(/__search(_DEFAULT)?/g, "");
let reconstructedSearchString = qs.decodeParam(paramParts[1], paramParts[0]);
$scope.hostFilterTags.push(reconstructedSearchString);
$scope.hostFilterTags.push(qs.decodeParam(paramParts[1], paramParts[0]));
});
$scope.hostFilterTags = $scope.hostFilterTags.concat(qs.stripDefaultParams(hostFilterCopy));

View File

@ -10,14 +10,16 @@ export default ['templateUrl', '$compile',
function(templateUrl, $compile) {
return {
scope: {
hostFilter: '='
hostFilter: '=',
hasEditPermissions: '=',
organization: '='
},
restrict: 'E',
templateUrl: templateUrl('inventories-hosts/inventories/smart-inventory/smart-inventory-host-filter/smart-inventory-host-filter'),
controller: smartInventoryHostFilterController,
link: function(scope) {
scope.openHostFilterModal = function() {
$('#content-container').append($compile('<host-filter-modal host-filter="hostFilter"></host-filter-modal>')(scope));
$('#content-container').append($compile('<host-filter-modal host-filter="hostFilter" organization="organization"></host-filter-modal>')(scope));
};
}
};

View File

@ -1,10 +1,10 @@
<div class="input-group Form-mixedInputGroup">
<span class="input-group-btn Form-variableHeightButtonGroup">
<button type="button" class="Form-lookupButton Form-lookupButton--variableHeight btn btn-default" ng-click="openHostFilterModal()">
<button type="button" class="Form-lookupButton Form-lookupButton--variableHeight btn btn-default" ng-click="openHostFilterModal()" ng-disabled="!hasEditPermissions || !organization">
<i class="fa fa-search"></i>
</button>
</span>
<span class="form-control Form-textInput Form-textInput--variableHeight input-medium lookup LabelList-lookupTags LabelList-lookupTags--disabled" aw-tool-tip="{{::filterTooltip}}" data-placement="top">
<span class="form-control Form-textInput Form-textInput--variableHeight input-medium lookup LabelList-lookupTags LabelList-lookupTags--disabled" aw-tool-tip="{{filterTooltip}}" data-tip-watch="filterTooltip" data-placement="top">
<span class="LabelList-tag" ng-repeat="tag in hostFilterTags">
<span class="LabelList-name">{{tag}}</span>
</span>

View File

@ -34,6 +34,11 @@ export default {
let list = _.cloneDeep(RelatedHostsListDefinition);
list.basePath = GetBasePath('inventory') + $stateParams.smartinventory_id + '/hosts';
delete list.actions.create;
delete list.fields.groups;
delete list.fieldActions.delete;
delete list.fieldActions.edit;
delete list.fieldActions.view.ngShow;
list.fields.name.columnClass = 'col-lg-8 col-md-11 col-sm-8 col-xs-7';
return list;
}],
Dataset: ['ListDefinition', 'QuerySet', '$stateParams', 'GetBasePath', '$interpolate', '$rootScope',

View File

@ -54,18 +54,15 @@ export default ['i18n', 'InventoryCompletedJobsList', function(i18n, InventoryCo
list: 'OrganizationList',
sourceModel: 'organization',
sourceField: 'name',
awRequiredWhen: {
reqExpression: "organizationrequired",
init: "true"
},
required: true,
ngDisabled: '!(inventory_obj.summary_fields.user_capabilities.edit || canAdd) || !canEditOrg',
awLookupWhen: '(inventory_obj.summary_fields.user_capabilities.edit || canAdd) && canEditOrg'
},
smart_hosts: {
label: i18n._('Smart Host Filter'),
type: 'custom',
control: '<smart-inventory-host-filter host-filter="smart_hosts"></smart-inventory-host-filter>',
awPopOver: "<p>" + i18n._("Populate the hosts for this inventory by using a search filter.") + "</p><p>" + i18n._("Example: ansible_facts.ansible_distribution:\"RHEL\"") + "</p><p>" + i18n._("Refer to the Ansible Tower documentation for further syntax and examples.") + "</p>",
control: '<smart-inventory-host-filter host-filter="smart_hosts" has-edit-permissions="inventory_obj.summary_fields.user_capabilities.edit || canAdd" organization="organization"></smart-inventory-host-filter>',
awPopOver: "<p>" + i18n._("Populate the hosts for this inventory by using a search filter.") + "</p><p>" + i18n._("Example: ansible_facts.ansible_distribution:\"RedHat\"") + "</p><p>" + i18n._("Refer to the Ansible Tower documentation for further syntax and examples.") + "</p>",
dataTitle: i18n._('Smart Host Filter'),
dataPlacement: 'right',
dataContainer: 'body',

View File

@ -61,10 +61,7 @@ function(i18n, InventoryCompletedJobsList) {
list: 'OrganizationList',
sourceModel: 'organization',
sourceField: 'name',
awRequiredWhen: {
reqExpression: "organizationrequired",
init: "true"
},
required: true,
ngDisabled: '!(inventory_obj.summary_fields.user_capabilities.edit || canAdd) || !canEditOrg',
awLookupWhen: '(inventory_obj.summary_fields.user_capabilities.edit || canAdd) && canEditOrg'
},
@ -77,12 +74,13 @@ function(i18n, InventoryCompletedJobsList) {
sourceField: 'name',
search: {
credential_type: '13' //insights
}
},
ngDisabled: '!(inventory_obj.summary_fields.user_capabilities.edit || canAdd) || !canEditOrg',
},
instance_groups: {
label: i18n._('Instance Groups'),
type: 'custom',
awPopOver: "<p>" + i18n._("Select the Instance Groups for this Inventory to run on.") + "</p>",
awPopOver: i18n._('Select the Instance Groups for this Inventory to run on. Refer to the Ansible Tower documentation for more detail.'),
dataTitle: i18n._('Instance Groups'),
dataPlacement: 'right',
dataContainer: 'body',
@ -95,14 +93,8 @@ function(i18n, InventoryCompletedJobsList) {
class: 'Form-formGroup--fullWidth',
rows: 6,
"default": "---",
awPopOver: "<p>" + i18n._("Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two.") + "</p>" +
"JSON:<br />\n" +
"<blockquote>{<br />&emsp;\"somevar\": \"somevalue\",<br />&emsp;\"password\": \"magic\"<br /> }</blockquote>\n" +
"YAML:<br />\n" +
"<blockquote>---<br />somevar: somevalue<br />password: magic<br /></blockquote>\n" +
'<p>' + i18n.sprintf(i18n._('View JSON examples at %s'), '<a href="http://www.json.org" target="_blank">www.json.org</a>') + '</p>' +
'<p>' + i18n.sprintf(i18n._('View YAML examples at %s'), '<a href="http://docs.ansible.com/YAMLSyntax.html" target="_blank">docs.ansible.com</a>') + '</p>',
dataTitle: i18n._('Inventory Variables'),
awPopOver: i18n._('Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Tower documentation for example syntax.'),
dataTitle: i18n._('Variables'),
dataPlacement: 'right',
dataContainer: 'body',
ngDisabled: '!(inventory_obj.summary_fields.user_capabilities.edit || canAdd)' // TODO: get working

Some files were not shown because too many files have changed in this diff Show More