Merge pull request #1233 from anoek/active-flag-removal

Active flag removal
This commit is contained in:
Wayne Witzel III 2016-03-15 11:52:23 -04:00
commit dd647b2cad
44 changed files with 527 additions and 902 deletions

View File

@ -26,19 +26,6 @@ class MongoFilterBackend(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
return queryset
class ActiveOnlyBackend(BaseFilterBackend):
'''
Filter to show only objects where is_active/active is True.
'''
def filter_queryset(self, request, queryset, view):
for field in queryset.model._meta.fields:
if field.name == 'is_active':
queryset = queryset.filter(is_active=True)
elif field.name == 'active':
queryset = queryset.filter(active=True)
return queryset
class TypeFilterBackend(BaseFilterBackend):
'''
Filter on type field now returned with all objects.
@ -166,12 +153,12 @@ class FieldLookupBackend(BaseFilterBackend):
for key, values in request.query_params.lists():
if key in self.RESERVED_NAMES:
continue
# HACK: Make job event filtering by host name mostly work even
# when not capturing job event hosts M2M.
if queryset.model._meta.object_name == 'JobEvent' and key.startswith('hosts__name'):
key = key.replace('hosts__name', 'or__host__name')
or_filters.append((False, 'host__name__isnull', True))
or_filters.append((False, 'host__name__isnull', True))
# Custom __int filter suffix (internal use only).
q_int = False

View File

@ -7,7 +7,6 @@ import logging
import time
# Django
from django.http import Http404
from django.conf import settings
from django.db import connection
from django.shortcuts import get_object_or_404
@ -415,9 +414,7 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
raise PermissionDenied()
if parent_key:
# sub object has a ForeignKey to the parent, so we can't remove it
# from the set, only mark it as inactive.
sub.mark_inactive()
sub.delete()
else:
relationship.remove(sub)
@ -457,17 +454,9 @@ class RetrieveDestroyAPIView(RetrieveAPIView, generics.RetrieveDestroyAPIView):
def destroy(self, request, *args, **kwargs):
# somewhat lame that delete has to call it's own permissions check
obj = self.get_object()
# FIXME: Why isn't the active check being caught earlier by RBAC?
if not getattr(obj, 'active', True):
raise Http404()
if not getattr(obj, 'is_active', True):
raise Http404()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
if hasattr(obj, 'mark_inactive'):
obj.mark_inactive()
else:
raise NotImplementedError('destroy() not implemented yet for %s' % obj)
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, RetrieveDestroyAPIView):

View File

@ -103,11 +103,7 @@ class ModelAccessPermission(permissions.BasePermission):
if not request.user or request.user.is_anonymous():
return False
# Don't allow inactive users (and respond with a 403).
if not request.user.is_active:
raise PermissionDenied('your account is inactive')
# Always allow superusers (as long as they are active).
# Always allow superusers
if getattr(view, 'always_allow_superuser', True) and request.user.is_superuser:
return True
@ -161,8 +157,6 @@ class JobTemplateCallbackPermission(ModelAccessPermission):
raise PermissionDenied()
elif not host_config_key:
raise PermissionDenied()
elif obj and not obj.active:
raise PermissionDenied()
elif obj and obj.host_config_key != host_config_key:
raise PermissionDenied()
else:
@ -182,7 +176,7 @@ class TaskPermission(ModelAccessPermission):
# Verify that the ID present in the auth token is for a valid, active
# unified job.
try:
unified_job = UnifiedJob.objects.get(active=True, status='running',
unified_job = UnifiedJob.objects.get(status='running',
pk=int(request.auth.split('-')[0]))
except (UnifiedJob.DoesNotExist, TypeError):
return False

View File

@ -252,7 +252,6 @@ class BaseSerializer(serializers.ModelSerializer):
# make certain fields read only
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
active = serializers.SerializerMethodField()
def get_type(self, obj):
@ -288,9 +287,9 @@ class BaseSerializer(serializers.ModelSerializer):
def get_related(self, obj):
res = OrderedDict()
if getattr(obj, 'created_by', None) and obj.created_by.is_active:
if getattr(obj, 'created_by', None):
res['created_by'] = reverse('api:user_detail', args=(obj.created_by.pk,))
if getattr(obj, 'modified_by', None) and obj.modified_by.is_active:
if getattr(obj, 'modified_by', None):
res['modified_by'] = reverse('api:user_detail', args=(obj.modified_by.pk,))
return res
@ -315,10 +314,6 @@ class BaseSerializer(serializers.ModelSerializer):
continue
if fkval == obj:
continue
if hasattr(fkval, 'active') and not fkval.active:
continue
if hasattr(fkval, 'is_active') and not fkval.is_active:
continue
summary_fields[fk] = OrderedDict()
for field in related_fields:
fval = getattr(fkval, field, None)
@ -334,11 +329,11 @@ class BaseSerializer(serializers.ModelSerializer):
# Can be raised by the reverse accessor for a OneToOneField.
except ObjectDoesNotExist:
pass
if getattr(obj, 'created_by', None) and obj.created_by.is_active:
if getattr(obj, 'created_by', None):
summary_fields['created_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['created_by'][field] = getattr(obj.created_by, field)
if getattr(obj, 'modified_by', None) and obj.modified_by.is_active:
if getattr(obj, 'modified_by', None):
summary_fields['modified_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['modified_by'][field] = getattr(obj.modified_by, field)
@ -378,14 +373,6 @@ class BaseSerializer(serializers.ModelSerializer):
else:
return obj.modified
def get_active(self, obj):
if obj is None:
return False
elif isinstance(obj, User):
return obj.is_active
else:
return obj.active
def build_standard_field(self, field_name, model_field):
# DRF 3.3 serializers.py::build_standard_field() -> utils/field_mapping.py::get_field_kwargs() short circuits
@ -564,11 +551,11 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
def get_related(self, obj):
res = super(UnifiedJobTemplateSerializer, self).get_related(obj)
if obj.current_job and obj.current_job.active:
if obj.current_job:
res['current_job'] = obj.current_job.get_absolute_url()
if obj.last_job and obj.last_job.active:
if obj.last_job:
res['last_job'] = obj.last_job.get_absolute_url()
if obj.next_schedule and obj.next_schedule.active:
if obj.next_schedule:
res['next_schedule'] = obj.next_schedule.get_absolute_url()
return res
@ -623,9 +610,9 @@ class UnifiedJobSerializer(BaseSerializer):
def get_related(self, obj):
res = super(UnifiedJobSerializer, self).get_related(obj)
if obj.unified_job_template and obj.unified_job_template.active:
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url()
if obj.schedule and obj.schedule.active:
if obj.schedule:
res['schedule'] = obj.schedule.get_absolute_url()
if isinstance(obj, ProjectUpdate):
res['stdout'] = reverse('api:project_update_stdout', args=(obj.pk,))
@ -874,7 +861,7 @@ class ProjectOptionsSerializer(BaseSerializer):
def get_related(self, obj):
res = super(ProjectOptionsSerializer, self).get_related(obj)
if obj.credential and obj.credential.active:
if obj.credential:
res['credential'] = reverse('api:credential_detail',
args=(obj.credential.pk,))
return res
@ -903,7 +890,7 @@ class ProjectOptionsSerializer(BaseSerializer):
def to_representation(self, obj):
ret = super(ProjectOptionsSerializer, self).to_representation(obj)
if obj is not None and 'credential' in ret and (not obj.credential or not obj.credential.active):
if obj is not None and 'credential' in ret and not obj.credential:
ret['credential'] = None
return ret
@ -1039,13 +1026,13 @@ class InventorySerializer(BaseSerializerWithVariables):
access_list = reverse('api:inventory_access_list', args=(obj.pk,)),
#single_fact = reverse('api:inventory_single_fact_view', args=(obj.pk,)),
))
if obj.organization and obj.organization.active:
if obj.organization:
res['organization'] = reverse('api:organization_detail', args=(obj.organization.pk,))
return res
def to_representation(self, obj):
ret = super(InventorySerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and (not obj.organization or not obj.organization.active):
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
@ -1100,11 +1087,11 @@ class HostSerializer(BaseSerializerWithVariables):
fact_versions = reverse('api:host_fact_versions_list', args=(obj.pk,)),
#single_fact = reverse('api:host_single_fact_view', args=(obj.pk,)),
))
if obj.inventory and obj.inventory.active:
if obj.inventory:
res['inventory'] = reverse('api:inventory_detail', args=(obj.inventory.pk,))
if obj.last_job and obj.last_job.active:
if obj.last_job:
res['last_job'] = reverse('api:job_detail', args=(obj.last_job.pk,))
if obj.last_job_host_summary and obj.last_job_host_summary.job.active:
if obj.last_job_host_summary:
res['last_job_host_summary'] = reverse('api:job_host_summary_detail', args=(obj.last_job_host_summary.pk,))
return res
@ -1120,7 +1107,7 @@ class HostSerializer(BaseSerializerWithVariables):
'name': j.job.job_template.name if j.job.job_template is not None else "",
'status': j.job.status,
'finished': j.job.finished,
} for j in obj.job_host_summaries.filter(job__active=True).select_related('job__job_template').order_by('-created')[:5]]})
} for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created')[:5]]})
return d
def _get_host_port_from_name(self, name):
@ -1169,11 +1156,11 @@ class HostSerializer(BaseSerializerWithVariables):
ret = super(HostSerializer, self).to_representation(obj)
if not obj:
return ret
if 'inventory' in ret and (not obj.inventory or not obj.inventory.active):
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'last_job' in ret and (not obj.last_job or not obj.last_job.active):
if 'last_job' in ret and not obj.last_job:
ret['last_job'] = None
if 'last_job_host_summary' in ret and (not obj.last_job_host_summary or not obj.last_job_host_summary.job.active):
if 'last_job_host_summary' in ret and not obj.last_job_host_summary:
ret['last_job_host_summary'] = None
return ret
@ -1210,7 +1197,7 @@ class GroupSerializer(BaseSerializerWithVariables):
access_list = reverse('api:group_access_list', args=(obj.pk,)),
#single_fact = reverse('api:group_single_fact_view', args=(obj.pk,)),
))
if obj.inventory and obj.inventory.active:
if obj.inventory:
res['inventory'] = reverse('api:inventory_detail', args=(obj.inventory.pk,))
if obj.inventory_source:
res['inventory_source'] = reverse('api:inventory_source_detail', args=(obj.inventory_source.pk,))
@ -1223,7 +1210,7 @@ class GroupSerializer(BaseSerializerWithVariables):
def to_representation(self, obj):
ret = super(GroupSerializer, self).to_representation(obj)
if obj is not None and 'inventory' in ret and (not obj.inventory or not obj.inventory.active):
if obj is not None and 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
@ -1239,7 +1226,7 @@ class GroupTreeSerializer(GroupSerializer):
def get_children(self, obj):
if obj is None:
return {}
children_qs = obj.children.filter(active=True)
children_qs = obj.children
children_qs = children_qs.select_related('inventory')
children_qs = children_qs.prefetch_related('inventory_source')
return GroupTreeSerializer(children_qs, many=True).data
@ -1304,7 +1291,7 @@ class CustomInventoryScriptSerializer(BaseSerializer):
def get_related(self, obj):
res = super(CustomInventoryScriptSerializer, self).get_related(obj)
if obj.organization and obj.organization.active:
if obj.organization:
res['organization'] = reverse('api:organization_detail', args=(obj.organization.pk,))
return res
@ -1317,10 +1304,10 @@ class InventorySourceOptionsSerializer(BaseSerializer):
def get_related(self, obj):
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
if obj.credential and obj.credential.active:
if obj.credential:
res['credential'] = reverse('api:credential_detail',
args=(obj.credential.pk,))
if obj.source_script and obj.source_script.active:
if obj.source_script:
res['source_script'] = reverse('api:inventory_script_detail', args=(obj.source_script.pk,))
return res
@ -1365,7 +1352,7 @@ class InventorySourceOptionsSerializer(BaseSerializer):
ret = super(InventorySourceOptionsSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'credential' in ret and (not obj.credential or not obj.credential.active):
if 'credential' in ret and not obj.credential:
ret['credential'] = None
return ret
@ -1396,9 +1383,9 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
notifiers_success = reverse('api:inventory_source_notifiers_success_list', args=(obj.pk,)),
notifiers_error = reverse('api:inventory_source_notifiers_error_list', args=(obj.pk,)),
))
if obj.inventory and obj.inventory.active:
if obj.inventory:
res['inventory'] = reverse('api:inventory_detail', args=(obj.inventory.pk,))
if obj.group and obj.group.active:
if obj.group:
res['group'] = reverse('api:group_detail', args=(obj.group.pk,))
# Backwards compatibility.
if obj.current_update:
@ -1413,9 +1400,9 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
ret = super(InventorySourceSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and (not obj.inventory or not obj.inventory.active):
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'group' in ret and (not obj.group or not obj.group.active):
if 'group' in ret and not obj.group:
ret['group'] = None
return ret
@ -1473,13 +1460,13 @@ class TeamSerializer(BaseSerializer):
activity_stream = reverse('api:team_activity_stream_list', args=(obj.pk,)),
access_list = reverse('api:team_access_list', args=(obj.pk,)),
))
if obj.organization and obj.organization.active:
if obj.organization:
res['organization'] = reverse('api:organization_detail', args=(obj.organization.pk,))
return res
def to_representation(self, obj):
ret = super(TeamSerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and (not obj.organization or not obj.organization.active):
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
@ -1563,9 +1550,9 @@ class CredentialSerializer(BaseSerializer):
def to_representation(self, obj):
ret = super(CredentialSerializer, self).to_representation(obj)
if obj is not None and 'user' in ret and (not obj.user or not obj.user.is_active):
if obj is not None and 'user' in ret and not obj.user:
ret['user'] = None
if obj is not None and 'team' in ret and (not obj.team or not obj.team.active):
if obj is not None and 'team' in ret and not obj.team:
ret['team'] = None
return ret
@ -1604,13 +1591,13 @@ class JobOptionsSerializer(BaseSerializer):
def get_related(self, obj):
res = super(JobOptionsSerializer, self).get_related(obj)
if obj.inventory and obj.inventory.active:
if obj.inventory:
res['inventory'] = reverse('api:inventory_detail', args=(obj.inventory.pk,))
if obj.project and obj.project.active:
if obj.project:
res['project'] = reverse('api:project_detail', args=(obj.project.pk,))
if obj.credential and obj.credential.active:
if obj.credential:
res['credential'] = reverse('api:credential_detail', args=(obj.credential.pk,))
if obj.cloud_credential and obj.cloud_credential.active:
if obj.cloud_credential:
res['cloud_credential'] = reverse('api:credential_detail',
args=(obj.cloud_credential.pk,))
return res
@ -1619,15 +1606,15 @@ class JobOptionsSerializer(BaseSerializer):
ret = super(JobOptionsSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and (not obj.inventory or not obj.inventory.active):
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'project' in ret and (not obj.project or not obj.project.active):
if 'project' in ret and not obj.project:
ret['project'] = None
if 'playbook' in ret:
ret['playbook'] = ''
if 'credential' in ret and (not obj.credential or not obj.credential.active):
if 'credential' in ret and not obj.credential:
ret['credential'] = None
if 'cloud_credential' in ret and (not obj.cloud_credential or not obj.cloud_credential.active):
if 'cloud_credential' in ret and not obj.cloud_credential:
ret['cloud_credential'] = None
return ret
@ -1690,7 +1677,7 @@ class JobTemplateSerializer(UnifiedJobTemplateSerializer, JobOptionsSerializer):
else:
d['can_copy'] = False
d['can_edit'] = False
d['recent_jobs'] = [{'id': x.id, 'status': x.status, 'finished': x.finished} for x in obj.jobs.filter(active=True).order_by('-created')[:10]]
d['recent_jobs'] = [{'id': x.id, 'status': x.status, 'finished': x.finished} for x in obj.jobs.order_by('-created')[:10]]
return d
def validate(self, attrs):
@ -1721,7 +1708,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
activity_stream = reverse('api:job_activity_stream_list', args=(obj.pk,)),
notifications = reverse('api:job_notifications_list', args=(obj.pk,)),
))
if obj.job_template and obj.job_template.active:
if obj.job_template:
res['job_template'] = reverse('api:job_template_detail',
args=(obj.job_template.pk,))
if obj.can_start or True:
@ -1766,7 +1753,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
ret = super(JobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'job_template' in ret and (not obj.job_template or not obj.job_template.active):
if 'job_template' in ret and not obj.job_template:
ret['job_template'] = None
if obj.job_template and obj.job_template.survey_enabled:
@ -1830,11 +1817,11 @@ class JobRelaunchSerializer(JobSerializer):
def validate(self, attrs):
obj = self.context.get('obj')
if not obj.credential or obj.credential.active is False:
if not obj.credential:
raise serializers.ValidationError(dict(credential=["Credential not found or deleted."]))
if obj.job_type != PERM_INVENTORY_SCAN and (obj.project is None or not obj.project.active):
if obj.job_type != PERM_INVENTORY_SCAN and obj.project is None:
raise serializers.ValidationError(dict(errors=["Job Template Project is missing or undefined"]))
if obj.inventory is None or not obj.inventory.active:
if obj.inventory is None:
raise serializers.ValidationError(dict(errors=["Job Template Inventory is missing or undefined"]))
attrs = super(JobRelaunchSerializer, self).validate(attrs)
return attrs
@ -1874,9 +1861,9 @@ class AdHocCommandSerializer(UnifiedJobSerializer):
def get_related(self, obj):
res = super(AdHocCommandSerializer, self).get_related(obj)
if obj.inventory and obj.inventory.active:
if obj.inventory:
res['inventory'] = reverse('api:inventory_detail', args=(obj.inventory.pk,))
if obj.credential and obj.credential.active:
if obj.credential:
res['credential'] = reverse('api:credential_detail', args=(obj.credential.pk,))
res.update(dict(
events = reverse('api:ad_hoc_command_ad_hoc_command_events_list', args=(obj.pk,)),
@ -1888,9 +1875,9 @@ class AdHocCommandSerializer(UnifiedJobSerializer):
def to_representation(self, obj):
ret = super(AdHocCommandSerializer, self).to_representation(obj)
if 'inventory' in ret and (not obj.inventory or not obj.inventory.active):
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'credential' in ret and (not obj.credential or not obj.credential.active):
if 'credential' in ret and not obj.credential:
ret['credential'] = None
# For the UI, only module_name is returned for name, instead of the
# longer module name + module_args format.
@ -1942,7 +1929,7 @@ class SystemJobSerializer(UnifiedJobSerializer):
def get_related(self, obj):
res = super(SystemJobSerializer, self).get_related(obj)
if obj.system_job_template and obj.system_job_template.active:
if obj.system_job_template:
res['system_job_template'] = reverse('api:system_job_template_detail',
args=(obj.system_job_template.pk,))
if obj.can_cancel or True:
@ -2080,7 +2067,7 @@ class JobLaunchSerializer(BaseSerializer):
}
def get_credential_needed_to_start(self, obj):
return not (obj and obj.credential and obj.credential.active)
return not (obj and obj.credential)
def get_survey_enabled(self, obj):
if obj:
@ -2093,7 +2080,7 @@ class JobLaunchSerializer(BaseSerializer):
data = self.context.get('data')
credential = attrs.get('credential', obj and obj.credential or None)
if not credential or not credential.active:
if not credential:
errors['credential'] = 'Credential not provided'
# fill passwords dict with request data passwords
@ -2124,9 +2111,9 @@ class JobLaunchSerializer(BaseSerializer):
if validation_errors:
errors['variables_needed_to_start'] = validation_errors
if obj.job_type != PERM_INVENTORY_SCAN and (obj.project is None or not obj.project.active):
if obj.job_type != PERM_INVENTORY_SCAN and (obj.project is None):
errors['project'] = 'Job Template Project is missing or undefined'
if obj.inventory is None or not obj.inventory.active:
if obj.inventory is None:
errors['inventory'] = 'Job Template Inventory is missing or undefined'
if errors:
@ -2162,7 +2149,7 @@ class NotifierSerializer(BaseSerializer):
test = reverse('api:notifier_test', args=(obj.pk,)),
notifications = reverse('api:notifier_notification_list', args=(obj.pk,)),
))
if obj.organization and obj.organization.active:
if obj.organization:
res['organization'] = reverse('api:organization_detail', args=(obj.organization.pk,))
return res
@ -2220,7 +2207,7 @@ class ScheduleSerializer(BaseSerializer):
res.update(dict(
unified_jobs = reverse('api:schedule_unified_jobs_list', args=(obj.pk,)),
))
if obj.unified_job_template and obj.unified_job_template.active:
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url()
return res
@ -2447,8 +2434,6 @@ class AuthTokenSerializer(serializers.Serializer):
if username and password:
user = authenticate(username=username, password=password)
if user:
if not user.is_active:
raise serializers.ValidationError('User account is disabled.')
attrs['user'] = user
return attrs
else:

View File

@ -214,7 +214,7 @@ class ApiV1ConfigView(APIView):
user_ldap_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())
data['user_ldap_fields'] = user_ldap_fields
if request.user.is_superuser or request.user.admin_of_organizations.filter(active=True).count():
if request.user.is_superuser or request.user.admin_of_organizations.count():
data.update(dict(
project_base_dir = settings.PROJECTS_ROOT,
project_local_paths = Project.get_local_path_choices(),
@ -609,7 +609,7 @@ class OrganizationList(ListCreateAPIView):
# by the license, then we are only willing to create this organization
# if no organizations exist in the system.
if (not feature_enabled('multiple_organizations') and
self.model.objects.filter(active=True).count() > 0):
self.model.objects.count() > 0):
raise LicenseForbids('Your Tower license only permits a single '
'organization to exist.')
@ -804,7 +804,7 @@ class ProjectList(ListCreateAPIView):
def get(self, request, *args, **kwargs):
# Not optimal, but make sure the project status and last_updated fields
# are up to date here...
projects_qs = Project.objects.filter(active=True)
projects_qs = Project.objects
projects_qs = projects_qs.select_related('current_job', 'last_job')
for project in projects_qs:
project._set_status_and_last_job_run()
@ -1093,8 +1093,6 @@ class UserDetail(RetrieveUpdateDestroyAPIView):
can_delete = request.user.can_access(User, 'delete', obj)
if not can_delete:
raise PermissionDenied('Cannot delete user')
for own_credential in Credential.objects.filter(user=obj):
own_credential.mark_inactive()
return super(UserDetail, self).destroy(request, *args, **kwargs)
class UserAccessList(ResourceAccessList):
@ -1400,7 +1398,7 @@ class GroupChildrenList(SubListCreateAttachDetachAPIView):
if sub_id is not None:
return super(GroupChildrenList, self).unattach(request, *args, **kwargs)
parent = self.get_parent_object()
parent.mark_inactive()
parent.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def _unattach(self, request, *args, **kwargs): # FIXME: Disabled for now for UI support.
@ -1423,8 +1421,8 @@ class GroupChildrenList(SubListCreateAttachDetachAPIView):
sub, self.relationship):
raise PermissionDenied()
if sub.parents.filter(active=True).exclude(pk=parent.pk).count() == 0:
sub.mark_inactive()
if sub.parents.exclude(pk=parent.pk).count() == 0:
sub.delete()
else:
relationship.remove(sub)
@ -1526,15 +1524,9 @@ class GroupDetail(RetrieveUpdateDestroyAPIView):
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
# FIXME: Why isn't the active check being caught earlier by RBAC?
if not getattr(obj, 'active', True):
raise Http404()
if not getattr(obj, 'is_active', True):
raise Http404()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
if hasattr(obj, 'mark_inactive'):
obj.mark_inactive_recursive()
obj.delete_recursive()
return Response(status=status.HTTP_204_NO_CONTENT)
class GroupAccessList(ResourceAccessList):
@ -1601,9 +1593,9 @@ class InventoryScriptView(RetrieveAPIView):
hostvars = bool(request.query_params.get('hostvars', ''))
show_all = bool(request.query_params.get('all', ''))
if show_all:
hosts_q = dict(active=True)
hosts_q = dict()
else:
hosts_q = dict(active=True, enabled=True)
hosts_q = dict(enabled=True)
if hostname:
host = get_object_or_404(obj.hosts, name=hostname, **hosts_q)
data = host.variables_dict
@ -1621,8 +1613,7 @@ class InventoryScriptView(RetrieveAPIView):
all_group['hosts'] = groupless_hosts
# Build in-memory mapping of groups and their hosts.
group_hosts_kw = dict(group__inventory_id=obj.id, group__active=True,
host__inventory_id=obj.id, host__active=True)
group_hosts_kw = dict(group__inventory_id=obj.id, host__inventory_id=obj.id)
if 'enabled' in hosts_q:
group_hosts_kw['host__enabled'] = hosts_q['enabled']
group_hosts_qs = Group.hosts.through.objects.filter(**group_hosts_kw)
@ -1635,8 +1626,8 @@ class InventoryScriptView(RetrieveAPIView):
# Build in-memory mapping of groups and their children.
group_parents_qs = Group.parents.through.objects.filter(
from_group__inventory_id=obj.id, from_group__active=True,
to_group__inventory_id=obj.id, to_group__active=True,
from_group__inventory_id=obj.id,
to_group__inventory_id=obj.id,
)
group_parents_qs = group_parents_qs.order_by('from_group__name')
group_parents_qs = group_parents_qs.values_list('from_group_id', 'from_group__name', 'to_group_id')
@ -1646,7 +1637,7 @@ class InventoryScriptView(RetrieveAPIView):
group_children.append(from_group_name)
# Now use in-memory maps to build up group info.
for group in obj.groups.filter(active=True):
for group in obj.groups.all():
group_info = OrderedDict()
group_info['hosts'] = group_hosts_map.get(group.id, [])
group_info['children'] = group_children_map.get(group.id, [])
@ -1692,9 +1683,9 @@ class InventoryTreeView(RetrieveAPIView):
def retrieve(self, request, *args, **kwargs):
inventory = self.get_object()
group_children_map = inventory.get_group_children_map(active=True)
root_group_pks = inventory.root_groups.filter(active=True).order_by('name').values_list('pk', flat=True)
groups_qs = inventory.groups.filter(active=True)
group_children_map = inventory.get_group_children_map()
root_group_pks = inventory.root_groups.order_by('name').values_list('pk', flat=True)
groups_qs = inventory.groups
groups_qs = groups_qs.select_related('inventory')
groups_qs = groups_qs.prefetch_related('inventory_source')
all_group_data = GroupSerializer(groups_qs, many=True).data
@ -1898,7 +1889,7 @@ class JobTemplateLaunch(RetrieveAPIView, GenericAPIView):
if obj:
for p in obj.passwords_needed_to_start:
data[p] = u''
if obj.credential and obj.credential.active:
if obj.credential:
data.pop('credential', None)
else:
data['credential'] = None
@ -2095,7 +2086,7 @@ class JobTemplateCallback(GenericAPIView):
return set()
# Find the host objects to search for a match.
obj = self.get_object()
qs = obj.inventory.hosts.filter(active=True)
qs = obj.inventory.hosts
# First try for an exact match on the name.
try:
return set([qs.get(name__in=remote_hosts)])
@ -2155,7 +2146,7 @@ class JobTemplateCallback(GenericAPIView):
# match again.
inventory_sources_already_updated = []
if len(matching_hosts) != 1:
inventory_sources = job_template.inventory.inventory_sources.filter(active=True, update_on_launch=True)
inventory_sources = job_template.inventory.inventory_sources.filter( update_on_launch=True)
inventory_update_pks = set()
for inventory_source in inventory_sources:
if inventory_source.needs_update_on_launch:

View File

@ -245,7 +245,7 @@ class UserAccess(BaseAccess):
return False
if self.user.is_superuser:
return True
return Organization.accessible_objects(self.user, ALL_PERMISSIONS).filter(active=True).exists()
return Organization.accessible_objects(self.user, ALL_PERMISSIONS).exists()
def can_change(self, obj, data):
if data is not None and 'is_superuser' in data:
@ -266,7 +266,7 @@ class UserAccess(BaseAccess):
if obj == self.user:
# cannot delete yourself
return False
super_users = User.objects.filter(is_active=True, is_superuser=True)
super_users = User.objects.filter(is_superuser=True)
if obj.is_superuser and super_users.count() == 1:
# cannot delete the last active superuser
return False
@ -527,7 +527,7 @@ class InventoryUpdateAccess(BaseAccess):
model = InventoryUpdate
def get_queryset(self):
qs = InventoryUpdate.objects.filter(active=True).distinct()
qs = InventoryUpdate.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'inventory_source__group',
'inventory_source__inventory')
inventory_sources_qs = self.user.get_queryset(InventorySource)
@ -677,7 +677,7 @@ class ProjectUpdateAccess(BaseAccess):
model = ProjectUpdate
def get_queryset(self):
qs = ProjectUpdate.objects.filter(active=True).distinct()
qs = ProjectUpdate.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'project')
project_ids = set(self.user.get_queryset(Project).values_list('id', flat=True))
return qs.filter(project_id__in=project_ids)
@ -821,7 +821,7 @@ class JobAccess(BaseAccess):
model = Job
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'job_template', 'inventory',
'project', 'credential', 'cloud_credential', 'job_template')
qs = qs.prefetch_related('unified_job_template')
@ -843,12 +843,10 @@ class JobAccess(BaseAccess):
# TODO: I think the below queries can be combined
deploy_permissions_ids = Permission.objects.filter(
Q(user=self.user) | Q(team__in=team_ids),
active=True,
permission_type__in=allowed_deploy,
)
check_permissions_ids = Permission.objects.filter(
Q(user=self.user) | Q(team__in=team_ids),
active=True,
permission_type__in=allowed_check,
)
@ -947,18 +945,17 @@ class AdHocCommandAccess(BaseAccess):
model = AdHocCommand
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'inventory',
'credential')
if self.user.is_superuser:
return qs
credential_ids = set(self.user.get_queryset(Credential).values_list('id', flat=True))
team_ids = set(Team.objects.filter(active=True, users__in=[self.user]).values_list('id', flat=True))
team_ids = set(Team.objects.filter( users__in=[self.user]).values_list('id', flat=True))
permission_ids = set(Permission.objects.filter(
Q(user=self.user) | Q(team__in=team_ids),
active=True,
permission_type__in=PERMISSION_TYPES_ALLOWING_INVENTORY_READ,
run_ad_hoc_commands=True,
).values_list('id', flat=True))
@ -982,7 +979,7 @@ class AdHocCommandAccess(BaseAccess):
# If a credential is provided, the user should have read access to it.
credential_pk = get_pk_from_dict(data, 'credential')
if credential_pk:
credential = get_object_or_400(Credential, pk=credential_pk, active=True)
credential = get_object_or_400(Credential, pk=credential_pk)
if not credential.accessible_by(self.user, {'read':True}):
return False
@ -990,7 +987,7 @@ class AdHocCommandAccess(BaseAccess):
# given inventory.
inventory_pk = get_pk_from_dict(data, 'inventory')
if inventory_pk:
inventory = get_object_or_400(Inventory, pk=inventory_pk, active=True)
inventory = get_object_or_400(Inventory, pk=inventory_pk)
if not inventory.accessible_by(self.user, {'execute': True}):
return False

View File

@ -111,8 +111,6 @@ class Command(BaseCommand):
n_deleted_items = 0
n_deleted_items += self.cleanup_model(User)
for model in self.get_models(PrimordialModel):
n_deleted_items += self.cleanup_model(model)
if not self.dry_run:
self.logger.log(99, "Removed %d items", n_deleted_items)

View File

@ -19,7 +19,7 @@ class Command(BaseCommand):
# Create a default organization as the first superuser found.
try:
superuser = User.objects.filter(is_superuser=True, is_active=True).order_by('pk')[0]
superuser = User.objects.filter(is_superuser=True).order_by('pk')[0]
except IndexError:
superuser = None
with impersonate(superuser):

View File

@ -53,13 +53,13 @@ class MemObject(object):
'''
Common code shared between in-memory groups and hosts.
'''
def __init__(self, name, source_dir):
assert name, 'no name'
assert source_dir, 'no source dir'
self.name = name
self.source_dir = source_dir
def load_vars(self, base_path):
all_vars = {}
files_found = 0
@ -107,7 +107,7 @@ class MemGroup(MemObject):
group_vars = os.path.join(source_dir, 'group_vars', self.name)
self.variables = self.load_vars(group_vars)
logger.debug('Loaded group: %s', self.name)
def child_group_by_name(self, name, loader):
if name == 'all':
return
@ -266,7 +266,7 @@ class BaseLoader(object):
logger.debug('Filtering group %s', name)
return None
if name not in self.all_group.all_groups:
group = MemGroup(name, self.source_dir)
group = MemGroup(name, self.source_dir)
if not child:
all_group.add_child_group(group)
self.all_group.all_groups[name] = group
@ -315,7 +315,7 @@ class IniLoader(BaseLoader):
for t in tokens[1:]:
k,v = t.split('=', 1)
host.variables[k] = v
group.add_host(host)
group.add_host(host)
elif input_mode == 'children':
group.child_group_by_name(line, self)
elif input_mode == 'vars':
@ -328,7 +328,7 @@ class IniLoader(BaseLoader):
# from API documentation:
#
# if called with --list, inventory outputs like so:
#
#
# {
# "databases" : {
# "hosts" : [ "host1.example.com", "host2.example.com" ],
@ -581,7 +581,7 @@ class Command(NoArgsCommand):
def _get_instance_id(self, from_dict, default=''):
'''
Retrieve the instance ID from the given dict of host variables.
The instance ID variable may be specified as 'foo.bar', in which case
the lookup will traverse into nested dicts, equivalent to:
@ -633,7 +633,7 @@ class Command(NoArgsCommand):
else:
q = dict(name=self.inventory_name)
try:
self.inventory = Inventory.objects.filter(active=True).get(**q)
self.inventory = Inventory.objects.get(**q)
except Inventory.DoesNotExist:
raise CommandError('Inventory with %s = %s cannot be found' % q.items()[0])
except Inventory.MultipleObjectsReturned:
@ -648,8 +648,7 @@ class Command(NoArgsCommand):
if inventory_source_id:
try:
self.inventory_source = InventorySource.objects.get(pk=inventory_source_id,
inventory=self.inventory,
active=True)
inventory=self.inventory)
except InventorySource.DoesNotExist:
raise CommandError('Inventory source with id=%s not found' %
inventory_source_id)
@ -669,7 +668,6 @@ class Command(NoArgsCommand):
source_path=os.path.abspath(self.source),
overwrite=self.overwrite,
overwrite_vars=self.overwrite_vars,
active=True,
)
self.inventory_update = self.inventory_source.create_inventory_update(
job_args=json.dumps(sys.argv),
@ -703,7 +701,7 @@ class Command(NoArgsCommand):
host_qs = self.inventory_source.group.all_hosts
else:
host_qs = self.inventory.hosts.all()
host_qs = host_qs.filter(active=True, instance_id='',
host_qs = host_qs.filter(instance_id='',
variables__contains=self.instance_id_var.split('.')[0])
for host in host_qs:
instance_id = self._get_instance_id(host.variables_dict)
@ -740,7 +738,7 @@ class Command(NoArgsCommand):
hosts_qs = self.inventory_source.group.all_hosts
# FIXME: Also include hosts from inventory_source.managed_hosts?
else:
hosts_qs = self.inventory.hosts.filter(active=True)
hosts_qs = self.inventory.hosts
# Build list of all host pks, remove all that should not be deleted.
del_host_pks = set(hosts_qs.values_list('pk', flat=True))
if self.instance_id_var:
@ -765,7 +763,7 @@ class Command(NoArgsCommand):
del_pks = all_del_pks[offset:(offset + self._batch_size)]
for host in hosts_qs.filter(pk__in=del_pks):
host_name = host.name
host.mark_inactive()
host.delete()
self.logger.info('Deleted host "%s"', host_name)
if settings.SQL_DEBUG:
self.logger.warning('host deletions took %d queries for %d hosts',
@ -785,7 +783,7 @@ class Command(NoArgsCommand):
groups_qs = self.inventory_source.group.all_children
# FIXME: Also include groups from inventory_source.managed_groups?
else:
groups_qs = self.inventory.groups.filter(active=True)
groups_qs = self.inventory.groups
# Build list of all group pks, remove those that should not be deleted.
del_group_pks = set(groups_qs.values_list('pk', flat=True))
all_group_names = self.all_group.all_groups.keys()
@ -799,7 +797,8 @@ class Command(NoArgsCommand):
del_pks = all_del_pks[offset:(offset + self._batch_size)]
for group in groups_qs.filter(pk__in=del_pks):
group_name = group.name
group.mark_inactive(recompute=False)
with ignore_inventory_computed_fields():
group.delete()
self.logger.info('Group "%s" deleted', group_name)
if settings.SQL_DEBUG:
self.logger.warning('group deletions took %d queries for %d groups',
@ -821,10 +820,10 @@ class Command(NoArgsCommand):
if self.inventory_source.group:
db_groups = self.inventory_source.group.all_children
else:
db_groups = self.inventory.groups.filter(active=True)
db_groups = self.inventory.groups
for db_group in db_groups:
# Delete child group relationships not present in imported data.
db_children = db_group.children.filter(active=True)
db_children = db_group.children
db_children_name_pk_map = dict(db_children.values_list('name', 'pk'))
mem_children = self.all_group.all_groups[db_group.name].children
for mem_group in mem_children:
@ -839,7 +838,7 @@ class Command(NoArgsCommand):
db_child.name, db_group.name)
# FIXME: Inventory source group relationships
# Delete group/host relationships not present in imported data.
db_hosts = db_group.hosts.filter(active=True)
db_hosts = db_group.hosts
del_host_pks = set(db_hosts.values_list('pk', flat=True))
mem_hosts = self.all_group.all_groups[db_group.name].hosts
all_mem_host_names = [h.name for h in mem_hosts if not h.instance_id]
@ -860,7 +859,7 @@ class Command(NoArgsCommand):
del_pks = del_host_pks[offset:(offset + self._batch_size)]
for db_host in db_hosts.filter(pk__in=del_pks):
group_host_count += 1
if db_host not in db_group.hosts.filter(active=True):
if db_host not in db_group.hosts:
continue
db_group.hosts.remove(db_host)
self.logger.info('Host "%s" removed from group "%s"',
@ -1036,7 +1035,7 @@ class Command(NoArgsCommand):
all_host_pks = sorted(mem_host_pk_map.keys())
for offset in xrange(0, len(all_host_pks), self._batch_size):
host_pks = all_host_pks[offset:(offset + self._batch_size)]
for db_host in self.inventory.hosts.filter(active=True, pk__in=host_pks):
for db_host in self.inventory.hosts.filter( pk__in=host_pks):
if db_host.pk in host_pks_updated:
continue
mem_host = mem_host_pk_map[db_host.pk]
@ -1048,7 +1047,7 @@ class Command(NoArgsCommand):
all_instance_ids = sorted(mem_host_instance_id_map.keys())
for offset in xrange(0, len(all_instance_ids), self._batch_size):
instance_ids = all_instance_ids[offset:(offset + self._batch_size)]
for db_host in self.inventory.hosts.filter(active=True, instance_id__in=instance_ids):
for db_host in self.inventory.hosts.filter( instance_id__in=instance_ids):
if db_host.pk in host_pks_updated:
continue
mem_host = mem_host_instance_id_map[db_host.instance_id]
@ -1060,7 +1059,7 @@ class Command(NoArgsCommand):
all_host_names = sorted(mem_host_name_map.keys())
for offset in xrange(0, len(all_host_names), self._batch_size):
host_names = all_host_names[offset:(offset + self._batch_size)]
for db_host in self.inventory.hosts.filter(active=True, name__in=host_names):
for db_host in self.inventory.hosts.filter( name__in=host_names):
if db_host.pk in host_pks_updated:
continue
mem_host = mem_host_name_map[db_host.name]
@ -1297,7 +1296,7 @@ class Command(NoArgsCommand):
except CommandError as e:
self.mark_license_failure(save=True)
raise e
if self.inventory_source.group:
inv_name = 'group "%s"' % (self.inventory_source.group.name)
else:
@ -1336,7 +1335,7 @@ class Command(NoArgsCommand):
self.inventory_update.result_traceback = tb
self.inventory_update.status = status
self.inventory_update.save(update_fields=['status', 'result_traceback'])
if exc and isinstance(exc, CommandError):
sys.exit(1)
elif exc:

View File

@ -13,9 +13,9 @@ class HostManager(models.Manager):
def active_count(self):
"""Return count of active, unique hosts for licensing."""
try:
return self.filter(active=True, inventory__active=True).order_by('name').distinct('name').count()
return self.order_by('name').distinct('name').count()
except NotImplementedError: # For unit tests only, SQLite doesn't support distinct('name')
return len(set(self.filter(active=True, inventory__active=True).values_list('name', flat=True)))
return len(set(self.values_list('name', flat=True)))
class InstanceManager(models.Manager):
"""A custom manager class for the Instance model.

View File

@ -0,0 +1,65 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from awx.main.migrations import _cleanup_deleted as cleanup_deleted
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0004_v300_changes'),
]
operations = [
migrations.RunPython(cleanup_deleted.cleanup_deleted),
migrations.RemoveField(
model_name='credential',
name='active',
),
migrations.RemoveField(
model_name='custominventoryscript',
name='active',
),
migrations.RemoveField(
model_name='group',
name='active',
),
migrations.RemoveField(
model_name='host',
name='active',
),
migrations.RemoveField(
model_name='inventory',
name='active',
),
migrations.RemoveField(
model_name='notifier',
name='active',
),
migrations.RemoveField(
model_name='organization',
name='active',
),
migrations.RemoveField(
model_name='permission',
name='active',
),
migrations.RemoveField(
model_name='schedule',
name='active',
),
migrations.RemoveField(
model_name='team',
name='active',
),
migrations.RemoveField(
model_name='unifiedjob',
name='active',
),
migrations.RemoveField(
model_name='unifiedjobtemplate',
name='active',
),
]

View File

@ -1,16 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from awx.main.migrations import _rbac as rbac
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0005_v300_changes'),
]
operations = [
# This is a placeholder for our future active flag removal work
]

View File

@ -14,7 +14,7 @@ class Migration(migrations.Migration):
('taggit', '0002_auto_20150616_2121'),
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0006_v300_active_flag_removal'),
('main', '0005_v300_active_flag_removal'),
]
operations = [
@ -41,7 +41,6 @@ class Migration(migrations.Migration):
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('description', models.TextField(default=b'', blank=True)),
('active', models.BooleanField(default=True, editable=False)),
('name', models.CharField(max_length=512)),
('singleton_name', models.TextField(default=None, unique=True, null=True, db_index=True)),
('object_id', models.PositiveIntegerField(default=None, null=True)),

View File

@ -8,7 +8,7 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0007_v300_rbac_changes'),
('main', '0006_v300_rbac_changes'),
]
operations = [

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db import migrations
from django.utils.timezone import now
from awx.api.license import feature_enabled
@ -107,7 +107,7 @@ def create_system_job_templates(apps, schema_editor):
class Migration(migrations.Migration):
dependencies = [
('main', '0004_v300_changes'),
('main', '0007_v300_rbac_migrations'),
]
operations = [

View File

@ -0,0 +1,85 @@
# Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
# Python
import logging
# Django
from django.db import transaction
from django.utils.dateparse import parse_datetime
def cleanup_deleted(apps, schema_editor):
logger = logging.getLogger('awx.main.migrations.cleanup_deleted')
def cleanup_model(model):
'''
Presume the '_deleted_' string to be in the 'name' field unless considering the User model.
When considering the User model, presume the '_d_' string to be in the 'username' field.
'''
logger.debug('cleaning up model %s', model)
name_field = 'name'
name_prefix = '_deleted_'
active_field = None
n_deleted_items = 0
for field in model._meta.fields:
if field.name in ('is_active', 'active'):
active_field = field.name
if field.name == 'is_active': # is User model
name_field = 'username'
name_prefix = '_d_'
if not active_field:
logger.warning('skipping model %s, no active field', model)
return n_deleted_items
qs = model.objects.filter(**{
active_field: False,
'%s__startswith' % name_field: name_prefix,
})
pks_to_delete = set()
for instance in qs.iterator():
dt = parse_datetime(getattr(instance, name_field).split('_')[2])
if not dt:
logger.warning('unable to find deleted timestamp in %s field', name_field)
else:
action_text = 'deleting'
logger.info('%s %s', action_text, instance)
n_deleted_items += 1
instance.delete()
# Cleanup objects in batches instead of deleting each one individually.
if len(pks_to_delete) >= 50:
model.objects.filter(pk__in=pks_to_delete).delete()
pks_to_delete.clear()
if len(pks_to_delete):
model.objects.filter(pk__in=pks_to_delete).delete()
return n_deleted_items
logger = logging.getLogger('awx.main.commands.cleanup_deleted')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(handler)
logger.propagate = False
with transaction.atomic():
n_deleted_items = 0
models = [
apps.get_model('auth', "User"),
apps.get_model('main', 'Credential'),
apps.get_model('main', 'CustomInventoryScript'),
apps.get_model('main', 'Group'),
apps.get_model('main', 'Host'),
apps.get_model('main', 'Inventory'),
apps.get_model('main', 'Notifier'),
apps.get_model('main', 'Organization'),
apps.get_model('main', 'Permission'),
apps.get_model('main', 'Schedule'),
apps.get_model('main', 'Team'),
apps.get_model('main', 'UnifiedJob'),
apps.get_model('main', 'UnifiedJobTemplate'),
]
for model in models:
n_deleted_items += cleanup_model(model)
logger.log(99, "Removed %d items", n_deleted_items)

View File

@ -199,16 +199,16 @@ class UserAccess(BaseAccess):
model = User
def get_queryset(self):
qs = self.model.objects.filter(is_active=True).distinct()
qs = self.model.objects.distinct()
if self.user.is_superuser:
return qs
if tower_settings.ORG_ADMINS_CAN_SEE_ALL_USERS and self.user.admin_of_organizations.filter(active=True).exists():
if tower_settings.ORG_ADMINS_CAN_SEE_ALL_USERS and self.user.admin_of_organizations.all().exists():
return qs
return qs.filter(
Q(pk=self.user.pk) |
Q(organizations__in=self.user.admin_of_organizations.filter(active=True)) |
Q(organizations__in=self.user.organizations.filter(active=True)) |
Q(teams__in=self.user.teams.filter(active=True))
Q(organizations__in=self.user.admin_of_organizations) |
Q(organizations__in=self.user.organizations) |
Q(teams__in=self.user.teams)
).distinct()
def can_add(self, data):
@ -216,7 +216,7 @@ class UserAccess(BaseAccess):
if to_python_boolean(data['is_superuser'], allow_none=True) and not self.user.is_superuser:
return False
return bool(self.user.is_superuser or
self.user.admin_of_organizations.filter(active=True).exists())
self.user.admin_of_organizations.exists())
def can_change(self, obj, data):
if data is not None and 'is_superuser' in data:
@ -231,18 +231,18 @@ class UserAccess(BaseAccess):
# Admin implies changing all user fields.
if self.user.is_superuser:
return True
return bool(obj.organizations.filter(active=True, deprecated_admins__in=[self.user]).exists())
return bool(obj.organizations.filter(deprecated_admins__in=[self.user]).exists())
def can_delete(self, obj):
if obj == self.user:
# cannot delete yourself
return False
super_users = User.objects.filter(is_active=True, is_superuser=True)
super_users = User.objects.filter(is_superuser=True)
if obj.is_superuser and super_users.count() == 1:
# cannot delete the last active superuser
return False
return bool(self.user.is_superuser or
obj.organizations.filter(active=True, deprecated_admins__in=[self.user]).exists())
obj.organizations.filter(deprecated_admins__in=[self.user]).exists())
class OrganizationAccess(BaseAccess):
'''
@ -257,7 +257,7 @@ class OrganizationAccess(BaseAccess):
model = Organization
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by')
if self.user.is_superuser:
return qs
@ -295,25 +295,21 @@ class InventoryAccess(BaseAccess):
def get_queryset(self, allowed=None, ad_hoc=None):
allowed = allowed or PERMISSION_TYPES_ALLOWING_INVENTORY_READ
qs = Inventory.objects.filter(active=True).distinct()
qs = Inventory.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'organization')
if self.user.is_superuser:
return qs
qs = qs.filter(organization__active=True)
admin_of = qs.filter(organization__deprecated_admins__in=[self.user]).distinct()
has_user_kw = dict(
permissions__user__in=[self.user],
permissions__permission_type__in=allowed,
permissions__active=True,
)
if ad_hoc is not None:
has_user_kw['permissions__run_ad_hoc_commands'] = ad_hoc
has_user_perms = qs.filter(**has_user_kw).distinct()
has_team_kw = dict(
permissions__team__deprecated_users__in=[self.user],
permissions__team__active=True,
permissions__permission_type__in=allowed,
permissions__active=True,
)
if ad_hoc is not None:
has_team_kw['permissions__run_ad_hoc_commands'] = ad_hoc
@ -330,7 +326,7 @@ class InventoryAccess(BaseAccess):
# If no data is specified, just checking for generic add permission?
if not data:
return bool(self.user.is_superuser or
self.user.admin_of_organizations.filter(active=True).exists())
self.user.admin_of_organizations.exists())
# Otherwise, verify that the user has access to change the parent
# organization of this inventory.
if self.user.is_superuser:
@ -379,7 +375,7 @@ class HostAccess(BaseAccess):
model = Host
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'inventory',
'last_job__job_template',
'last_job_host_summary__job')
@ -435,7 +431,7 @@ class GroupAccess(BaseAccess):
model = Group
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'inventory')
qs = qs.prefetch_related('parents', 'children', 'inventory_source')
inventory_ids = set(self.user.get_queryset(Inventory).values_list('id', flat=True))
@ -466,9 +462,6 @@ class GroupAccess(BaseAccess):
if not super(GroupAccess, self).can_attach(obj, sub_obj, relationship,
data, skip_sub_obj_read_check):
return False
# Don't allow attaching if the sub obj is not active
if not obj.active:
return False
# Prevent assignments between different inventories.
if obj.inventory != sub_obj.inventory:
raise ParseError('Cannot associate two items from different inventories')
@ -495,7 +488,7 @@ class InventorySourceAccess(BaseAccess):
model = InventorySource
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'group', 'inventory')
inventory_ids = set(self.user.get_queryset(Inventory).values_list('id', flat=True))
return qs.filter(Q(inventory_id__in=inventory_ids) |
@ -535,7 +528,7 @@ class InventoryUpdateAccess(BaseAccess):
model = InventoryUpdate
def get_queryset(self):
qs = InventoryUpdate.objects.filter(active=True).distinct()
qs = InventoryUpdate.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'inventory_source__group',
'inventory_source__inventory')
inventory_sources_qs = self.user.get_queryset(InventorySource)
@ -569,19 +562,19 @@ class CredentialAccess(BaseAccess):
# Create a base queryset.
# If the user is a superuser, and therefore can see everything, this
# is also sufficient, and we are done.
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'user', 'team')
if self.user.is_superuser:
return qs
# Get the list of organizations for which the user is an admin
orgs_as_admin_ids = set(self.user.admin_of_organizations.filter(active=True).values_list('id', flat=True))
orgs_as_admin_ids = set(self.user.admin_of_organizations.values_list('id', flat=True))
return qs.filter(
Q(user=self.user) |
Q(user__organizations__id__in=orgs_as_admin_ids) |
Q(user__admin_of_organizations__id__in=orgs_as_admin_ids) |
Q(team__organization__id__in=orgs_as_admin_ids, team__active=True) |
Q(team__deprecated_users__in=[self.user], team__active=True)
Q(team__organization__id__in=orgs_as_admin_ids) |
Q(team__deprecated_users__in=[self.user])
)
def can_add(self, data):
@ -607,12 +600,12 @@ class CredentialAccess(BaseAccess):
if obj.user:
if self.user == obj.user:
return True
if obj.user.organizations.filter(active=True, deprecated_admins__in=[self.user]).exists():
if obj.user.organizations.filter(deprecated_admins__in=[self.user]).exists():
return True
if obj.user.admin_of_organizations.filter(active=True, deprecated_admins__in=[self.user]).exists():
if obj.user.admin_of_organizations.filter(deprecated_admins__in=[self.user]).exists():
return True
if obj.team:
if self.user in obj.team.organization.deprecated_admins.filter(is_active=True):
if self.user in obj.team.organization.deprecated_admins.all():
return True
return False
@ -637,12 +630,12 @@ class TeamAccess(BaseAccess):
model = Team
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'organization')
if self.user.is_superuser:
return qs
return qs.filter(
Q(organization__deprecated_admins__in=[self.user], organization__active=True) |
Q(organization__deprecated_admins__in=[self.user]) |
Q(deprecated_users__in=[self.user])
)
@ -689,26 +682,24 @@ class ProjectAccess(BaseAccess):
model = Project
def get_queryset(self):
qs = Project.objects.filter(active=True).distinct()
qs = Project.objects.distinct()
qs = qs.select_related('modified_by', 'credential', 'current_job', 'last_job')
if self.user.is_superuser:
return qs
team_ids = set(Team.objects.filter(deprecated_users__in=[self.user]).values_list('id', flat=True))
qs = qs.filter(Q(created_by=self.user, deprecated_organizations__isnull=True) |
Q(deprecated_organizations__deprecated_admins__in=[self.user], deprecated_organizations__active=True) |
Q(deprecated_organizations__deprecated_users__in=[self.user], deprecated_organizations__active=True) |
Q(deprecated_organizations__deprecated_admins__in=[self.user]) |
Q(deprecated_organizations__deprecated_users__in=[self.user]) |
Q(teams__in=team_ids))
allowed_deploy = [PERM_JOBTEMPLATE_CREATE, PERM_INVENTORY_DEPLOY]
allowed_check = [PERM_JOBTEMPLATE_CREATE, PERM_INVENTORY_DEPLOY, PERM_INVENTORY_CHECK]
deploy_permissions_ids = set(Permission.objects.filter(
Q(user=self.user) | Q(team_id__in=team_ids),
active=True,
permission_type__in=allowed_deploy,
).values_list('id', flat=True))
check_permissions_ids = set(Permission.objects.filter(
Q(user=self.user) | Q(team_id__in=team_ids),
active=True,
permission_type__in=allowed_check,
).values_list('id', flat=True))
@ -719,16 +710,16 @@ class ProjectAccess(BaseAccess):
def can_add(self, data):
if self.user.is_superuser:
return True
if self.user.admin_of_organizations.filter(active=True).exists():
if self.user.admin_of_organizations.exists():
return True
return False
def can_change(self, obj, data):
if self.user.is_superuser:
return True
if obj.created_by == self.user and not obj.deprecated_organizations.filter(active=True).count():
if obj.created_by == self.user and not obj.deprecated_organizations.count():
return True
if obj.deprecated_organizations.filter(active=True, deprecated_admins__in=[self.user]).exists():
if obj.deprecated_organizations.filter(deprecated_admins__in=[self.user]).exists():
return True
return False
@ -748,7 +739,7 @@ class ProjectUpdateAccess(BaseAccess):
model = ProjectUpdate
def get_queryset(self):
qs = ProjectUpdate.objects.filter(active=True).distinct()
qs = ProjectUpdate.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'project')
project_ids = set(self.user.get_queryset(Project).values_list('id', flat=True))
return qs.filter(project_id__in=project_ids)
@ -776,18 +767,18 @@ class PermissionAccess(BaseAccess):
model = Permission
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'user', 'team', 'inventory',
'project')
if self.user.is_superuser:
return qs
orgs_as_admin_ids = set(self.user.admin_of_organizations.filter(active=True).values_list('id', flat=True))
orgs_as_admin_ids = set(self.user.admin_of_organizations.values_list('id', flat=True))
return qs.filter(
Q(user__organizations__in=orgs_as_admin_ids) |
Q(user__admin_of_organizations__in=orgs_as_admin_ids) |
Q(team__organization__in=orgs_as_admin_ids, team__active=True) |
Q(team__organization__in=orgs_as_admin_ids) |
Q(user=self.user) |
Q(team__deprecated_users__in=[self.user], team__active=True)
Q(team__deprecated_users__in=[self.user])
)
def can_add(self, data):
@ -868,7 +859,7 @@ class JobTemplateAccess(BaseAccess):
model = JobTemplate
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'inventory', 'project',
'credential', 'cloud_credential', 'next_schedule')
if self.user.is_superuser:
@ -892,12 +883,10 @@ class JobTemplateAccess(BaseAccess):
# TODO: I think the below queries can be combined
deploy_permissions_ids = Permission.objects.filter(
Q(user=self.user) | Q(team_id__in=team_ids),
active=True,
permission_type__in=allowed_deploy,
)
check_permissions_ids = Permission.objects.filter(
Q(user=self.user) | Q(team_id__in=team_ids),
active=True,
permission_type__in=allowed_check,
)
@ -986,7 +975,6 @@ class JobTemplateAccess(BaseAccess):
Q(user=self.user) | Q(team__deprecated_users__in=[self.user]),
inventory=inventory,
project=project,
active=True,
#permission_type__in=[PERM_INVENTORY_CHECK, PERM_INVENTORY_DEPLOY],
permission_type=PERM_JOBTEMPLATE_CREATE,
)
@ -1044,7 +1032,6 @@ class JobTemplateAccess(BaseAccess):
Q(user=self.user) | Q(team__deprecated_users__in=[self.user]),
inventory=obj.inventory,
project=obj.project,
active=True,
permission_type__in=[PERM_JOBTEMPLATE_CREATE, PERM_INVENTORY_CHECK, PERM_INVENTORY_DEPLOY],
)
@ -1086,7 +1073,7 @@ class JobAccess(BaseAccess):
model = Job
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'job_template', 'inventory',
'project', 'credential', 'cloud_credential', 'job_template')
qs = qs.prefetch_related('unified_job_template')
@ -1108,12 +1095,10 @@ class JobAccess(BaseAccess):
# TODO: I think the below queries can be combined
deploy_permissions_ids = Permission.objects.filter(
Q(user=self.user) | Q(team__in=team_ids),
active=True,
permission_type__in=allowed_deploy,
)
check_permissions_ids = Permission.objects.filter(
Q(user=self.user) | Q(team__in=team_ids),
active=True,
permission_type__in=allowed_check,
)
@ -1212,18 +1197,17 @@ class AdHocCommandAccess(BaseAccess):
model = AdHocCommand
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'inventory',
'credential')
if self.user.is_superuser:
return qs
credential_ids = set(self.user.get_queryset(Credential).values_list('id', flat=True))
team_ids = set(Team.objects.filter(active=True, deprecated_users__in=[self.user]).values_list('id', flat=True))
team_ids = set(Team.objects.filter(deprecated_users__in=[self.user]).values_list('id', flat=True))
permission_ids = set(Permission.objects.filter(
Q(user=self.user) | Q(team__in=team_ids),
active=True,
permission_type__in=PERMISSION_TYPES_ALLOWING_INVENTORY_READ,
run_ad_hoc_commands=True,
).values_list('id', flat=True))
@ -1247,7 +1231,7 @@ class AdHocCommandAccess(BaseAccess):
# If a credential is provided, the user should have read access to it.
credential_pk = get_pk_from_dict(data, 'credential')
if credential_pk:
credential = get_object_or_400(Credential, pk=credential_pk, active=True)
credential = get_object_or_400(Credential, pk=credential_pk)
if not check_user_access(self.user, Credential, 'read', credential):
return False
@ -1255,7 +1239,7 @@ class AdHocCommandAccess(BaseAccess):
# given inventory.
inventory_pk = get_pk_from_dict(data, 'inventory')
if inventory_pk:
inventory = get_object_or_400(Inventory, pk=inventory_pk, active=True)
inventory = get_object_or_400(Inventory, pk=inventory_pk)
if not check_user_access(self.user, Inventory, 'run_ad_hoc_commands', inventory):
return False
@ -1375,7 +1359,7 @@ class UnifiedJobTemplateAccess(BaseAccess):
model = UnifiedJobTemplate
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
project_qs = self.user.get_queryset(Project).filter(scm_type__in=[s[0] for s in Project.SCM_TYPE_CHOICES])
inventory_source_qs = self.user.get_queryset(InventorySource).filter(source__in=CLOUD_INVENTORY_SOURCES)
job_template_qs = self.user.get_queryset(JobTemplate)
@ -1405,7 +1389,7 @@ class UnifiedJobAccess(BaseAccess):
model = UnifiedJob
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
project_update_qs = self.user.get_queryset(ProjectUpdate)
inventory_update_qs = self.user.get_queryset(InventoryUpdate).filter(source__in=CLOUD_INVENTORY_SOURCES)
job_qs = self.user.get_queryset(Job)
@ -1442,7 +1426,7 @@ class ScheduleAccess(BaseAccess):
model = Schedule
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by')
qs = qs.prefetch_related('unified_job_template')
if self.user.is_superuser:
@ -1614,7 +1598,7 @@ class CustomInventoryScriptAccess(BaseAccess):
model = CustomInventoryScript
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
if not self.user.is_superuser:
qs = qs.filter(Q(organization__deprecated_admins__in=[self.user]) | Q(organization__deprecated_users__in=[self.user]))
return qs
@ -1622,8 +1606,6 @@ class CustomInventoryScriptAccess(BaseAccess):
def can_read(self, obj):
if self.user.is_superuser:
return True
if not obj.active:
return False
return bool(obj.organization in self.user.organizations.all() or obj.organization in self.user.admin_of_organizations.all())
def can_add(self, data):

View File

@ -73,7 +73,7 @@ def migrate_inventory(apps, schema_editor):
for inventory in Inventory.objects.all():
teams, users = [], []
for perm in Permission.objects.filter(inventory=inventory, active=True):
for perm in Permission.objects.filter(inventory=inventory):
role = None
execrole = None
if perm.permission_type == 'admin':
@ -186,7 +186,7 @@ def migrate_projects(apps, schema_editor):
project.member_role.members.add(user)
migrations[project.name]['users'].add(user)
for perm in Permission.objects.filter(project=project, active=True):
for perm in Permission.objects.filter(project=project):
# All perms at this level just imply a user or team can read
if perm.team:
perm.team.member_role.children.add(project.member_role)
@ -253,7 +253,6 @@ def migrate_job_templates(apps, schema_editor):
permission = Permission.objects.filter(
inventory=jt.inventory,
project=jt.project,
active=True,
permission_type__in=['create', 'check', 'run'] if jt.job_type == 'check' else ['create', 'run'],
)

View File

@ -87,7 +87,7 @@ class AdHocCommand(UnifiedJob):
def clean_inventory(self):
inv = self.inventory
if not inv or not inv.active:
if not inv:
raise ValidationError('Inventory is no longer available.')
return inv
@ -123,7 +123,7 @@ class AdHocCommand(UnifiedJob):
@property
def passwords_needed_to_start(self):
'''Return list of password field names needed to start the job.'''
if self.credential and self.credential.active:
if self.credential:
return self.credential.passwords_needed
else:
return []
@ -164,14 +164,14 @@ class AdHocCommand(UnifiedJob):
def task_impact(self):
# NOTE: We sorta have to assume the host count matches and that forks default to 5
from awx.main.models.inventory import Host
count_hosts = Host.objects.filter(active=True, enabled=True, inventory__ad_hoc_commands__pk=self.pk).count()
count_hosts = Host.objects.filter( enabled=True, inventory__ad_hoc_commands__pk=self.pk).count()
return min(count_hosts, 5 if self.forks == 0 else self.forks) * 10
def generate_dependencies(self, active_tasks):
from awx.main.models import InventoryUpdate
if not self.inventory:
return []
inventory_sources = self.inventory.inventory_sources.filter(active=True, update_on_launch=True)
inventory_sources = self.inventory.inventory_sources.filter( update_on_launch=True)
inventory_sources_found = []
dependencies = []
for obj in active_tasks:

View File

@ -203,15 +203,6 @@ class PasswordFieldsModel(BaseModel):
def _password_field_allows_ask(self, field):
return False # Override in subclasses if needed.
def mark_inactive(self, save=True):
'''
When marking a password model inactive we'll clear sensitive fields
'''
for sensitive_field in self.PASSWORD_FIELDS:
setattr(self, sensitive_field, "")
self.save()
super(PasswordFieldsModel, self).mark_inactive(save=save)
def save(self, *args, **kwargs):
new_instance = not bool(self.pk)
# If update_fields has been specified, add our field names to it,
@ -273,29 +264,9 @@ class PrimordialModel(CreatedModifiedModel):
editable=False,
on_delete=models.SET_NULL,
)
active = models.BooleanField(
default=True,
editable=False,
)
tags = TaggableManager(blank=True)
def mark_inactive(self, save=True, update_fields=None, skip_active_check=False):
'''Use instead of delete to rename and mark inactive.'''
update_fields = update_fields or []
if skip_active_check or self.active:
dtnow = now()
if 'name' in self._meta.get_all_field_names():
self.name = "_deleted_%s_%s" % (dtnow.isoformat(), self.name)
if 'name' not in update_fields:
update_fields.append('name')
self.active = False
if 'active' not in update_fields:
update_fields.append('active')
if save:
self.save(update_fields=update_fields)
return update_fields
def save(self, *args, **kwargs):
update_fields = kwargs.get('update_fields', [])
user = get_current_user()

View File

@ -26,7 +26,7 @@ from awx.main.models.jobs import Job
from awx.main.models.unified_jobs import * # noqa
from awx.main.models.mixins import ResourceMixin
from awx.main.models.notifications import Notifier
from awx.main.utils import ignore_inventory_computed_fields, _inventory_updates
from awx.main.utils import _inventory_updates
from awx.main.conf import tower_settings
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'CustomInventoryScript']
@ -120,30 +120,15 @@ class Inventory(CommonModel, ResourceMixin):
def get_absolute_url(self):
return reverse('api:inventory_detail', args=(self.pk,))
def mark_inactive(self, save=True):
'''
When marking inventory inactive, also mark hosts and groups inactive.
'''
with ignore_inventory_computed_fields():
for host in self.hosts.filter(active=True):
host.mark_inactive()
for group in self.groups.filter(active=True):
group.mark_inactive(recompute=False)
for inventory_source in self.inventory_sources.filter(active=True):
inventory_source.mark_inactive()
super(Inventory, self).mark_inactive(save=save)
variables_dict = VarsDictProperty('variables')
def get_group_hosts_map(self, active=None):
def get_group_hosts_map(self):
'''
Return dictionary mapping group_id to set of child host_id's.
'''
# FIXME: Cache this mapping?
group_hosts_kw = dict(group__inventory_id=self.pk, host__inventory_id=self.pk)
if active is not None:
group_hosts_kw['group__active'] = active
group_hosts_kw['host__active'] = active
group_hosts_qs = Group.hosts.through.objects.filter(**group_hosts_kw)
group_hosts_qs = group_hosts_qs.values_list('group_id', 'host_id')
group_hosts_map = {}
@ -152,15 +137,12 @@ class Inventory(CommonModel, ResourceMixin):
group_host_ids.add(host_id)
return group_hosts_map
def get_group_parents_map(self, active=None):
def get_group_parents_map(self):
'''
Return dictionary mapping group_id to set of parent group_id's.
'''
# FIXME: Cache this mapping?
group_parents_kw = dict(from_group__inventory_id=self.pk, to_group__inventory_id=self.pk)
if active is not None:
group_parents_kw['from_group__active'] = active
group_parents_kw['to_group__active'] = active
group_parents_qs = Group.parents.through.objects.filter(**group_parents_kw)
group_parents_qs = group_parents_qs.values_list('from_group_id', 'to_group_id')
group_parents_map = {}
@ -169,15 +151,12 @@ class Inventory(CommonModel, ResourceMixin):
group_parents.add(to_group_id)
return group_parents_map
def get_group_children_map(self, active=None):
def get_group_children_map(self):
'''
Return dictionary mapping group_id to set of child group_id's.
'''
# FIXME: Cache this mapping?
group_parents_kw = dict(from_group__inventory_id=self.pk, to_group__inventory_id=self.pk)
if active is not None:
group_parents_kw['from_group__active'] = active
group_parents_kw['to_group__active'] = active
group_parents_qs = Group.parents.through.objects.filter(**group_parents_kw)
group_parents_qs = group_parents_qs.values_list('from_group_id', 'to_group_id')
group_children_map = {}
@ -188,12 +167,12 @@ class Inventory(CommonModel, ResourceMixin):
def update_host_computed_fields(self):
'''
Update computed fields for all active hosts in this inventory.
Update computed fields for all hosts in this inventory.
'''
hosts_to_update = {}
hosts_qs = self.hosts.filter(active=True)
hosts_qs = self.hosts
# Define queryset of all hosts with active failures.
hosts_with_active_failures = hosts_qs.filter(last_job_host_summary__isnull=False, last_job_host_summary__job__active=True, last_job_host_summary__failed=True).values_list('pk', flat=True)
hosts_with_active_failures = hosts_qs.filter(last_job_host_summary__isnull=False, last_job_host_summary__failed=True).values_list('pk', flat=True)
# Find all hosts that need the has_active_failures flag set.
hosts_to_set = hosts_qs.filter(has_active_failures=False, pk__in=hosts_with_active_failures)
for host_pk in hosts_to_set.values_list('pk', flat=True):
@ -205,7 +184,7 @@ class Inventory(CommonModel, ResourceMixin):
host_updates = hosts_to_update.setdefault(host_pk, {})
host_updates['has_active_failures'] = False
# Define queryset of all hosts with cloud inventory sources.
hosts_with_cloud_inventory = hosts_qs.filter(inventory_sources__active=True, inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True)
hosts_with_cloud_inventory = hosts_qs.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True)
# Find all hosts that need the has_inventory_sources flag set.
hosts_to_set = hosts_qs.filter(has_inventory_sources=False, pk__in=hosts_with_cloud_inventory)
for host_pk in hosts_to_set.values_list('pk', flat=True):
@ -230,13 +209,13 @@ class Inventory(CommonModel, ResourceMixin):
'''
Update computed fields for all active groups in this inventory.
'''
group_children_map = self.get_group_children_map(active=True)
group_hosts_map = self.get_group_hosts_map(active=True)
active_host_pks = set(self.hosts.filter(active=True).values_list('pk', flat=True))
failed_host_pks = set(self.hosts.filter(active=True, last_job_host_summary__job__active=True, last_job_host_summary__failed=True).values_list('pk', flat=True))
# active_group_pks = set(self.groups.filter(active=True).values_list('pk', flat=True))
group_children_map = self.get_group_children_map()
group_hosts_map = self.get_group_hosts_map()
active_host_pks = set(self.hosts.values_list('pk', flat=True))
failed_host_pks = set(self.hosts.filter(last_job_host_summary__failed=True).values_list('pk', flat=True))
# active_group_pks = set(self.groups.values_list('pk', flat=True))
failed_group_pks = set() # Update below as we check each group.
groups_with_cloud_pks = set(self.groups.filter(active=True, inventory_sources__active=True, inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True))
groups_with_cloud_pks = set(self.groups.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True))
groups_to_update = {}
# Build list of group pks to check, starting with the groups at the
@ -308,11 +287,11 @@ class Inventory(CommonModel, ResourceMixin):
self.update_host_computed_fields()
if update_groups:
self.update_group_computed_fields()
active_hosts = self.hosts.filter(active=True)
active_hosts = self.hosts
failed_hosts = active_hosts.filter(has_active_failures=True)
active_groups = self.groups.filter(active=True)
active_groups = self.groups
failed_groups = active_groups.filter(has_active_failures=True)
active_inventory_sources = self.inventory_sources.filter(active=True, source__in=CLOUD_INVENTORY_SOURCES)
active_inventory_sources = self.inventory_sources.filter( source__in=CLOUD_INVENTORY_SOURCES)
failed_inventory_sources = active_inventory_sources.filter(last_job_failed=True)
computed_fields = {
'has_active_failures': bool(failed_hosts.count()),
@ -412,24 +391,13 @@ class Host(CommonModelNameNotUnique, ResourceMixin):
def get_absolute_url(self):
return reverse('api:host_detail', args=(self.pk,))
def mark_inactive(self, save=True, from_inventory_import=False, skip_active_check=False):
'''
When marking hosts inactive, remove all associations to related
inventory sources.
'''
super(Host, self).mark_inactive(save=save, skip_active_check=skip_active_check)
if not from_inventory_import:
self.inventory_sources.clear()
def update_computed_fields(self, update_inventory=True, update_groups=True):
'''
Update model fields that are computed from database relationships.
'''
has_active_failures = bool(self.last_job_host_summary and
self.last_job_host_summary.job.active and
self.last_job_host_summary.failed)
active_inventory_sources = self.inventory_sources.filter(active=True,
source__in=CLOUD_INVENTORY_SOURCES)
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
computed_fields = {
'has_active_failures': has_active_failures,
'has_inventory_sources': bool(active_inventory_sources.count()),
@ -445,7 +413,7 @@ class Host(CommonModelNameNotUnique, ResourceMixin):
# change.
# NOTE: I think this is no longer needed
# if update_groups:
# for group in self.all_groups.filter(active=True):
# for group in self.all_groups:
# group.update_computed_fields()
# if update_inventory:
# self.inventory.update_computed_fields(update_groups=False,
@ -575,11 +543,11 @@ class Group(CommonModelNameNotUnique, ResourceMixin):
return reverse('api:group_detail', args=(self.pk,))
@transaction.atomic
def mark_inactive_recursive(self):
from awx.main.tasks import bulk_inventory_element_delete
def delete_recursive(self):
from awx.main.utils import ignore_inventory_computed_fields
from awx.main.signals import disable_activity_stream
def mark_actual():
all_group_hosts = Group.hosts.through.objects.select_related("host", "group").filter(group__inventory=self.inventory)
group_hosts = {'groups': {}, 'hosts': {}}
@ -629,51 +597,24 @@ class Group(CommonModelNameNotUnique, ResourceMixin):
for direct_child in group_children[group]:
linked_children.append((group, direct_child))
marked_groups.append(group)
Group.objects.filter(id__in=marked_groups).update(active=False)
Host.objects.filter(id__in=marked_hosts).update(active=False)
Group.parents.through.objects.filter(to_group__id__in=marked_groups)
Group.hosts.through.objects.filter(group__id__in=marked_groups)
Group.inventory_sources.through.objects.filter(group__id__in=marked_groups).delete()
bulk_inventory_element_delete.delay(self.inventory.id, groups=marked_groups, hosts=marked_hosts)
Group.objects.filter(id__in=marked_groups).delete()
Host.objects.filter(id__in=marked_hosts).delete()
update_inventory_computed_fields.delay(self.inventory.id)
with ignore_inventory_computed_fields():
with disable_activity_stream():
mark_actual()
def mark_inactive(self, save=True, recompute=True, from_inventory_import=False, skip_active_check=False):
'''
When marking groups inactive, remove all associations to related
groups/hosts/inventory_sources.
'''
def mark_actual():
super(Group, self).mark_inactive(save=save, skip_active_check=skip_active_check)
self.inventory_source.mark_inactive(save=save)
self.inventory_sources.clear()
self.parents.clear()
self.children.clear()
self.hosts.clear()
i = self.inventory
if from_inventory_import:
super(Group, self).mark_inactive(save=save, skip_active_check=skip_active_check)
elif recompute:
with ignore_inventory_computed_fields():
mark_actual()
i.update_computed_fields()
else:
mark_actual()
def update_computed_fields(self):
'''
Update model fields that are computed from database relationships.
'''
active_hosts = self.all_hosts.filter(active=True)
failed_hosts = active_hosts.filter(last_job_host_summary__job__active=True,
last_job_host_summary__failed=True)
active_groups = self.all_children.filter(active=True)
active_hosts = self.all_hosts
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
active_groups = self.all_children
# FIXME: May not be accurate unless we always update groups depth-first.
failed_groups = active_groups.filter(has_active_failures=True)
active_inventory_sources = self.inventory_sources.filter(active=True,
source__in=CLOUD_INVENTORY_SOURCES)
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
computed_fields = {
'total_hosts': active_hosts.count(),
'has_active_failures': bool(failed_hosts.count()),
@ -1200,7 +1141,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, ResourceMixin)
def _can_update(self):
if self.source == 'custom':
return bool(self.source_script and self.source_script.active)
return bool(self.source_script)
else:
return bool(self.source in CLOUD_INVENTORY_SOURCES)
@ -1217,7 +1158,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, ResourceMixin)
@property
def needs_update_on_launch(self):
if self.active and self.source and self.update_on_launch:
if self.source and self.update_on_launch:
if not self.last_job_run:
return True
if (self.last_job_run + datetime.timedelta(seconds=self.update_cache_timeout)) <= now():
@ -1226,7 +1167,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, ResourceMixin)
@property
def notifiers(self):
base_notifiers = Notifier.objects.filter(active=True)
base_notifiers = Notifier.objects
error_notifiers = list(base_notifiers.filter(organization_notifiers_for_errors=self.inventory.organization))
success_notifiers = list(base_notifiers.filter(organization_notifiers_for_success=self.inventory.organization))
any_notifiers = list(base_notifiers.filter(organization_notifiers_for_any=self.inventory.organization))
@ -1235,7 +1176,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, ResourceMixin)
def clean_source(self):
source = self.source
if source and self.group:
qs = self.group.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES, active=True, group__active=True)
qs = self.group.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
existing_sources = qs.exclude(pk=self.pk)
if existing_sources.count():
s = u', '.join([x.group.name for x in existing_sources])
@ -1279,7 +1220,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions):
def save(self, *args, **kwargs):
update_fields = kwargs.get('update_fields', [])
inventory_source = self.inventory_source
if self.active and inventory_source.inventory and self.name == inventory_source.name:
if inventory_source.inventory and self.name == inventory_source.name:
if inventory_source.group:
self.name = '%s (%s)' % (inventory_source.group.name, inventory_source.inventory.name)
else:
@ -1315,7 +1256,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions):
return False
if (self.source not in ('custom', 'ec2') and
not (self.credential and self.credential.active)):
not (self.credential)):
return False
return True

View File

@ -149,7 +149,7 @@ class JobOptions(BaseModel):
@property
def passwords_needed_to_start(self):
'''Return list of password field names needed to start the job.'''
if self.credential and self.credential.active:
if self.credential:
return self.credential.passwords_needed
else:
return []
@ -357,7 +357,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin):
# Return all notifiers defined on the Job Template, on the Project, and on the Organization for each trigger type
# TODO: Currently there is no org fk on project so this will need to be added once that is
# available after the rbac pr
base_notifiers = Notifier.objects.filter(active=True)
base_notifiers = Notifier.objects
error_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_errors__in=[self, self.project]))
success_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_success__in=[self, self.project]))
any_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_any__in=[self, self.project]))
@ -493,7 +493,7 @@ class Job(UnifiedJob, JobOptions):
from awx.main.models import InventoryUpdate, ProjectUpdate
if self.inventory is None or self.project is None:
return []
inventory_sources = self.inventory.inventory_sources.filter(active=True, update_on_launch=True)
inventory_sources = self.inventory.inventory_sources.filter( update_on_launch=True)
project_found = False
inventory_sources_found = []
dependencies = []
@ -592,7 +592,7 @@ class Job(UnifiedJob, JobOptions):
if not super(Job, self).can_start:
return False
if not (self.credential and self.credential.active):
if not (self.credential):
return False
return True

View File

@ -79,11 +79,6 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin):
def __unicode__(self):
return self.name
def mark_inactive(self, save=True):
for script in self.custom_inventory_scripts.all():
script.organization = None
script.save()
super(Organization, self).mark_inactive(save=save)
class Team(CommonModelNameNotUnique, ResourceMixin):
@ -135,14 +130,6 @@ class Team(CommonModelNameNotUnique, ResourceMixin):
def get_absolute_url(self):
return reverse('api:team_detail', args=(self.pk,))
def mark_inactive(self, save=True):
'''
When marking a team inactive we'll wipe out its credentials also
'''
for cred in self.credentials.all():
cred.mark_inactive()
super(Team, self).mark_inactive(save=save)
class Permission(CommonModelNameNotUnique):
'''
@ -351,22 +338,6 @@ class AuthToken(BaseModel):
return self.key
# Add mark_inactive method to User model.
def user_mark_inactive(user, save=True):
'''Use instead of delete to rename and mark users inactive.'''
if user.is_active:
# Set timestamp to datetime.isoformat() but without the time zone
# offset to stay withint the 30 character username limit.
dtnow = tz_now()
deleted_ts = dtnow.strftime('%Y-%m-%dT%H:%M:%S.%f')
user.username = '_d_%s' % deleted_ts
user.is_active = False
if save:
user.save()
User.add_to_class('mark_inactive', user_mark_inactive)
# Add get_absolute_url method to User model if not present.
if not hasattr(User, 'get_absolute_url'):
def user_get_absolute_url(user):

View File

@ -53,7 +53,7 @@ class ProjectOptions(models.Model):
paths = [x.decode('utf-8') for x in os.listdir(settings.PROJECTS_ROOT)
if (os.path.isdir(os.path.join(settings.PROJECTS_ROOT, x)) and
not x.startswith('.') and not x.startswith('_'))]
qs = Project.objects.filter(active=True)
qs = Project.objects
used_paths = qs.values_list('local_path', flat=True)
return [x for x in paths if x not in used_paths]
else:
@ -336,7 +336,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
@property
def needs_update_on_launch(self):
if self.active and self.scm_type and self.scm_update_on_launch:
if self.scm_type and self.scm_update_on_launch:
if not self.last_job_run:
return True
if (self.last_job_run + datetime.timedelta(seconds=self.scm_update_cache_timeout)) <= now():
@ -345,7 +345,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
@property
def notifiers(self):
base_notifiers = Notifier.objects.filter(active=True)
base_notifiers = Notifier.objects
error_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_errors=self))
success_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_success=self))
any_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_any=self))

View File

@ -27,7 +27,7 @@ __all__ = ['Schedule']
class ScheduleFilterMethods(object):
def enabled(self, enabled=True):
return self.filter(enabled=enabled, active=enabled)
return self.filter(enabled=enabled)
def before(self, dt):
return self.filter(next_run__lt=dt)

View File

@ -210,17 +210,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
self.next_job_run = related_schedules[0].next_run
self.save(update_fields=['next_schedule', 'next_job_run'])
def mark_inactive(self, save=True):
'''
When marking a unified job template inactive, also mark its schedules
inactive.
'''
for schedule in self.schedules.filter(active=True):
schedule.mark_inactive()
schedule.enabled = False
schedule.save()
super(UnifiedJobTemplate, self).mark_inactive(save=save)
def save(self, *args, **kwargs):
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.

View File

@ -8,7 +8,7 @@ import threading
import json
# Django
from django.db.models.signals import pre_save, post_save, pre_delete, post_delete, m2m_changed
from django.db.models.signals import post_save, pre_delete, post_delete, m2m_changed
from django.dispatch import receiver
# Django-CRUM
@ -27,9 +27,8 @@ __all__ = []
logger = logging.getLogger('awx.main.signals')
# Update has_active_failures for inventory/groups when a Host/Group is deleted
# or marked inactive, when a Host-Group or Group-Group relationship is updated,
# or when a Job is deleted or marked inactive.
# Update has_active_failures for inventory/groups when a Host/Group is deleted,
# when a Host-Group or Group-Group relationship is updated, or when a Job is deleted
def emit_job_event_detail(sender, **kwargs):
instance = kwargs['instance']
@ -69,7 +68,7 @@ def emit_update_inventory_computed_fields(sender, **kwargs):
else:
sender_name = unicode(sender._meta.verbose_name)
if kwargs['signal'] == post_save:
if sender == Job and instance.active:
if sender == Job:
return
sender_action = 'saved'
elif kwargs['signal'] == post_delete:
@ -92,7 +91,6 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
return
instance = kwargs['instance']
if ('created' in kwargs and kwargs['created']) or \
(hasattr(instance, '_saved_active_state') and instance._saved_active_state != instance.active) or \
kwargs['signal'] == post_delete:
pass
else:
@ -108,13 +106,6 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id, True)
def store_initial_active_state(sender, **kwargs):
instance = kwargs['instance']
if instance.id is not None:
instance._saved_active_state = sender.objects.get(id=instance.id).active
else:
instance._saved_active_state = True
def rebuild_role_ancestor_list(reverse, model, instance, pk_set, **kwargs):
'When a role parent is added or removed, update our role hierarchy list'
if reverse:
@ -161,20 +152,16 @@ def org_admin_edit_members(instance, action, model, reverse, pk_set, **kwargs):
if action == 'pre_remove':
instance.content_object.admin_role.children.remove(user.admin_role)
pre_save.connect(store_initial_active_state, sender=Host)
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
pre_save.connect(store_initial_active_state, sender=Group)
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.parents.through)
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
pre_save.connect(store_initial_active_state, sender=InventorySource)
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
pre_save.connect(store_initial_active_state, sender=Job)
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Job)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Job)
post_save.connect(emit_job_event_detail, sender=JobEvent)
@ -184,8 +171,8 @@ m2m_changed.connect(org_admin_edit_members, Role.members.through)
post_save.connect(sync_superuser_status_to_rbac, sender=User)
post_save.connect(create_user_role, sender=User)
# Migrate hosts, groups to parent group(s) whenever a group is deleted or
# marked as inactive.
# Migrate hosts, groups to parent group(s) whenever a group is deleted
@receiver(pre_delete, sender=Group)
def save_related_pks_before_group_delete(sender, **kwargs):
@ -208,80 +195,28 @@ def migrate_children_from_deleted_group_to_parent_groups(sender, **kwargs):
with ignore_inventory_group_removal():
with ignore_inventory_computed_fields():
if parents_pks:
for parent_group in Group.objects.filter(pk__in=parents_pks, active=True):
for child_host in Host.objects.filter(pk__in=hosts_pks, active=True):
for parent_group in Group.objects.filter(pk__in=parents_pks):
for child_host in Host.objects.filter(pk__in=hosts_pks):
logger.debug('adding host %s to parent %s after group deletion',
child_host, parent_group)
parent_group.hosts.add(child_host)
for child_group in Group.objects.filter(pk__in=children_pks, active=True):
for child_group in Group.objects.filter(pk__in=children_pks):
logger.debug('adding group %s to parent %s after group deletion',
child_group, parent_group)
parent_group.children.add(child_group)
inventory_pk = getattr(instance, '_saved_inventory_pk', None)
if inventory_pk:
try:
inventory = Inventory.objects.get(pk=inventory_pk, active=True)
inventory = Inventory.objects.get(pk=inventory_pk)
inventory.update_computed_fields()
except Inventory.DoesNotExist:
pass
@receiver(pre_save, sender=Group)
def save_related_pks_before_group_marked_inactive(sender, **kwargs):
if getattr(_inventory_updates, 'is_removing', False):
return
instance = kwargs['instance']
if not instance.pk or instance.active:
return
instance._saved_inventory_pk = instance.inventory.pk
instance._saved_parents_pks = set(instance.parents.values_list('pk', flat=True))
instance._saved_hosts_pks = set(instance.hosts.values_list('pk', flat=True))
instance._saved_children_pks = set(instance.children.values_list('pk', flat=True))
instance._saved_inventory_source_pk = instance.inventory_source.pk
@receiver(post_save, sender=Group)
def migrate_children_from_inactive_group_to_parent_groups(sender, **kwargs):
if getattr(_inventory_updates, 'is_removing', False):
return
instance = kwargs['instance']
if instance.active:
return
parents_pks = getattr(instance, '_saved_parents_pks', [])
hosts_pks = getattr(instance, '_saved_hosts_pks', [])
children_pks = getattr(instance, '_saved_children_pks', [])
with ignore_inventory_group_removal():
with ignore_inventory_computed_fields():
if parents_pks:
for parent_group in Group.objects.filter(pk__in=parents_pks, active=True):
for child_host in Host.objects.filter(pk__in=hosts_pks, active=True):
logger.debug('moving host %s to parent %s after marking group %s inactive',
child_host, parent_group, instance)
parent_group.hosts.add(child_host)
for child_group in Group.objects.filter(pk__in=children_pks, active=True):
logger.debug('moving group %s to parent %s after marking group %s inactive',
child_group, parent_group, instance)
parent_group.children.add(child_group)
parent_group.children.remove(instance)
inventory_source_pk = getattr(instance, '_saved_inventory_source_pk', None)
if inventory_source_pk:
try:
inventory_source = InventorySource.objects.get(pk=inventory_source_pk, active=True)
inventory_source.mark_inactive()
except InventorySource.DoesNotExist:
pass
inventory_pk = getattr(instance, '_saved_inventory_pk', None)
if not getattr(_inventory_updates, 'is_updating', False):
if inventory_pk:
try:
inventory = Inventory.objects.get(pk=inventory_pk, active=True)
inventory.update_computed_fields()
except Inventory.DoesNotExist:
pass
# Update host pointers to last_job and last_job_host_summary when a job is
# marked inactive or deleted.
# Update host pointers to last_job and last_job_host_summary when a job is deleted
def _update_host_last_jhs(host):
jhs_qs = JobHostSummary.objects.filter(job__active=True, host__pk=host.pk)
jhs_qs = JobHostSummary.objects.filter(host__pk=host.pk)
try:
jhs = jhs_qs.order_by('-job__pk')[0]
except IndexError:
@ -297,19 +232,10 @@ def _update_host_last_jhs(host):
if update_fields:
host.save(update_fields=update_fields)
@receiver(post_save, sender=Job)
def update_host_last_job_when_job_marked_inactive(sender, **kwargs):
instance = kwargs['instance']
if instance.active:
return
hosts_qs = Host.objects.filter(active=True, last_job__pk=instance.pk)
for host in hosts_qs:
_update_host_last_jhs(host)
@receiver(pre_delete, sender=Job)
def save_host_pks_before_job_delete(sender, **kwargs):
instance = kwargs['instance']
hosts_qs = Host.objects.filter(active=True, last_job__pk=instance.pk)
hosts_qs = Host.objects.filter( last_job__pk=instance.pk)
instance._saved_hosts_pks = set(hosts_qs.values_list('pk', flat=True))
@receiver(post_delete, sender=Job)
@ -388,11 +314,6 @@ def activity_stream_update(sender, instance, **kwargs):
except sender.DoesNotExist:
return
# Handle the AWX mark-inactive for delete event
if hasattr(instance, 'active') and not instance.active:
activity_stream_delete(sender, instance, **kwargs)
return
new = instance
changes = model_instance_diff(old, new, model_serializer_mapping)
if changes is None:

View File

@ -13,7 +13,7 @@ class Migration(DataMigration):
# and orm['appname.ModelName'] for models in other applications.
# Refresh has_active_failures for all hosts.
for host in orm.Host.objects.filter(active=True):
for host in orm.Host.objects:
has_active_failures = bool(host.last_job_host_summary and
host.last_job_host_summary.job.active and
host.last_job_host_summary.failed)
@ -30,9 +30,9 @@ class Migration(DataMigration):
for subgroup in group.children.exclude(pk__in=except_group_pks):
qs = qs | get_all_hosts_for_group(subgroup, except_group_pks)
return qs
for group in orm.Group.objects.filter(active=True):
for group in orm.Group.objects:
all_hosts = get_all_hosts_for_group(group)
failed_hosts = all_hosts.filter(active=True,
failed_hosts = all_hosts.filter(
last_job_host_summary__job__active=True,
last_job_host_summary__failed=True)
hosts_with_active_failures = failed_hosts.count()
@ -49,8 +49,8 @@ class Migration(DataMigration):
# Now update has_active_failures and hosts_with_active_failures for all
# inventories.
for inventory in orm.Inventory.objects.filter(active=True):
failed_hosts = inventory.hosts.filter(active=True, has_active_failures=True)
for inventory in orm.Inventory.objects:
failed_hosts = inventory.hosts.filter( has_active_failures=True)
hosts_with_active_failures = failed_hosts.count()
has_active_failures = bool(hosts_with_active_failures)
changed = False

View File

@ -8,7 +8,7 @@ from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for iu in orm.InventoryUpdate.objects.filter(active=True):
for iu in orm.InventoryUpdate.objects:
if iu.inventory_source is None or iu.inventory_source.group is None or iu.inventory_source.inventory is None:
continue
iu.name = "%s (%s)" % (iu.inventory_source.group.name, iu.inventory_source.inventory.name)

View File

@ -12,7 +12,7 @@ from django.conf import settings
class Migration(DataMigration):
def forwards(self, orm):
for j in orm.UnifiedJob.objects.filter(active=True):
for j in orm.UnifiedJob.objects:
cur = connection.cursor()
stdout_filename = os.path.join(settings.JOBOUTPUT_ROOT, "%d-%s.out" % (j.pk, str(uuid.uuid1())))
fd = open(stdout_filename, 'w')

View File

@ -51,7 +51,7 @@ from awx.main.queue import FifoQueue
from awx.main.conf import tower_settings
from awx.main.task_engine import TaskSerializer, TASK_TIMEOUT_INTERVAL
from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url,
ignore_inventory_computed_fields, emit_websocket_notification,
emit_websocket_notification,
check_proot_installed, build_proot_temp_dir, wrap_args_with_proot)
__all__ = ['RunJob', 'RunSystemJob', 'RunProjectUpdate', 'RunInventoryUpdate',
@ -110,17 +110,6 @@ def run_administrative_checks(self):
tower_admin_emails,
fail_silently=True)
@task()
def bulk_inventory_element_delete(inventory, hosts=[], groups=[]):
from awx.main.signals import disable_activity_stream
with ignore_inventory_computed_fields():
with disable_activity_stream():
for group in groups:
Group.objects.get(id=group).mark_inactive(skip_active_check=True)
for host in hosts:
Host.objects.get(id=host).mark_inactive(skip_active_check=True)
update_inventory_computed_fields(inventory)
@task(bind=True)
def tower_periodic_scheduler(self):
def get_last_run():
@ -894,12 +883,12 @@ class RunJob(BaseTask):
'tower_job_id': job.pk,
'tower_job_launch_type': job.launch_type,
}
if job.job_template and job.job_template.active:
if job.job_template:
extra_vars.update({
'tower_job_template_id': job.job_template.pk,
'tower_job_template_name': job.job_template.name,
})
if job.created_by and job.created_by.is_active:
if job.created_by:
extra_vars.update({
'tower_user_id': job.created_by.pk,
'tower_user_name': job.created_by.username,
@ -1392,7 +1381,7 @@ class RunInventoryUpdate(BaseTask):
runpath = tempfile.mkdtemp(prefix='ansible_tower_launch_')
handle, path = tempfile.mkstemp(dir=runpath)
f = os.fdopen(handle, 'w')
if inventory_update.source_script is None or not inventory_update.source_script.active:
if inventory_update.source_script is None:
raise RuntimeError('Inventory Script does not exist')
f.write(inventory_update.source_script.script.encode('utf-8'))
f.close()

View File

@ -229,13 +229,18 @@ class BaseJobTestMixin(BaseTestMixin):
self.team_ops_west.users.add(self.user_iris)
# The south team is no longer active having been folded into the east team
self.team_ops_south = self.org_ops.teams.create(
name='southerners',
created_by=self.user_sue,
active=False,
)
self.team_ops_south.projects.add(self.proj_prod)
self.team_ops_south.users.add(self.user_greg)
# FIXME: This code can be removed (probably)
# - this case has been removed as we've gotten rid of the active flag, keeping
# code around in case this has ramifications on some test failures.. if
# you find this message and all tests are passing, then feel free to remove this
# - anoek 2016-03-10
#self.team_ops_south = self.org_ops.teams.create(
# name='southerners',
# created_by=self.user_sue,
# active=False,
#)
#self.team_ops_south.projects.add(self.proj_prod)
#self.team_ops_south.users.add(self.user_greg)
# The north team is going to be deleted
self.team_ops_north = self.org_ops.teams.create(
@ -337,11 +342,18 @@ class BaseJobTestMixin(BaseTestMixin):
password='Heading270',
created_by = self.user_sue,
)
self.cred_ops_south = self.team_ops_south.credentials.create(
username='south',
password='Heading180',
created_by = self.user_sue,
)
# FIXME: This code can be removed (probably)
# - this case has been removed as we've gotten rid of the active flag, keeping
# code around in case this has ramifications on some test failures.. if
# you find this message and all tests are passing, then feel free to remove this
# - anoek 2016-03-10
#self.cred_ops_south = self.team_ops_south.credentials.create(
# username='south',
# password='Heading180',
# created_by = self.user_sue,
#)
self.cred_ops_north = self.team_ops_north.credentials.create(
username='north',

View File

@ -637,8 +637,8 @@ class AdHocCommandApiTest(BaseAdHocCommandTest):
# Verify that the credential and inventory are null when they have
# been deleted, can delete an ad hoc command without inventory or
# credential.
self.credential.mark_inactive()
self.inventory.mark_inactive()
self.credential.delete()
self.inventory.delete()
with self.current_user('admin'):
response = self.get(url, expect=200)
self.assertEqual(response['credential'], None)
@ -758,7 +758,7 @@ class AdHocCommandApiTest(BaseAdHocCommandTest):
tower_settings.AD_HOC_COMMANDS = ad_hoc_commands
# Try to relaunch after the inventory has been marked inactive.
self.inventory.mark_inactive()
self.inventory.delete()
with self.current_user('admin'):
response = self.get(url, expect=200)
self.assertEqual(response['passwords_needed_to_start'], [])

View File

@ -1,34 +0,0 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved
# AWX
from awx.main.tests.base import BaseTest
from command_base import BaseCommandMixin
__all__ = ['AgeDeletedCommandFunctionalTest']
class AgeDeletedCommandFunctionalTest(BaseCommandMixin, BaseTest):
def setUp(self):
super(AgeDeletedCommandFunctionalTest, self).setUp()
self.create_test_license_file()
self.setup_instances()
self.setup_users()
self.organization = self.make_organization(self.super_django_user)
self.credential = self.make_credential()
self.credential2 = self.make_credential()
self.credential.mark_inactive(True)
self.credential2.mark_inactive(True)
self.credential_active = self.make_credential()
self.super_django_user.mark_inactive(True)
def test_default(self):
result, stdout, stderr = self.run_command('age_deleted')
self.assertEqual(stdout, 'Aged %d items\n' % 3)
def test_type(self):
result, stdout, stderr = self.run_command('age_deleted', type='Credential')
self.assertEqual(stdout, 'Aged %d items\n' % 2)
def test_id_type(self):
result, stdout, stderr = self.run_command('age_deleted', type='Credential', id=self.credential.pk)
self.assertEqual(stdout, 'Aged %d items\n' % 1)

View File

@ -15,7 +15,6 @@ import unittest2 as unittest
# Django
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.management.base import CommandError
from django.utils.timezone import now
@ -232,126 +231,6 @@ class DumpDataTest(BaseCommandMixin, BaseTest):
self.assertEqual(result, None)
json.loads(stdout)
class CleanupDeletedTest(BaseCommandMixin, BaseTest):
'''
Test cases for cleanup_deleted management command.
'''
def setUp(self):
self.start_redis()
super(CleanupDeletedTest, self).setUp()
self.create_test_inventories()
def tearDown(self):
super(CleanupDeletedTest, self).tearDown()
self.stop_redis()
def get_model_counts(self):
def get_models(m):
if not m._meta.abstract:
yield m
for sub in m.__subclasses__():
for subm in get_models(sub):
yield subm
counts = {}
for model in get_models(PrimordialModel):
active = model.objects.filter(active=True).count()
inactive = model.objects.filter(active=False).count()
counts[model] = (active, inactive)
return counts
def test_cleanup_our_models(self):
# Test with nothing to be deleted.
counts_before = self.get_model_counts()
self.assertFalse(sum(x[1] for x in counts_before.values()))
result, stdout, stderr = self.run_command('cleanup_deleted')
self.assertEqual(result, None)
counts_after = self.get_model_counts()
self.assertEqual(counts_before, counts_after)
# "Delete" some hosts.
for host in Host.objects.all():
host.mark_inactive()
# With no parameters, "days" defaults to 90, which won't cleanup any of
# the hosts we just removed.
counts_before = self.get_model_counts()
self.assertTrue(sum(x[1] for x in counts_before.values()))
result, stdout, stderr = self.run_command('cleanup_deleted')
self.assertEqual(result, None)
counts_after = self.get_model_counts()
self.assertEqual(counts_before, counts_after)
# Even with days=1, the hosts will remain.
counts_before = self.get_model_counts()
self.assertTrue(sum(x[1] for x in counts_before.values()))
result, stdout, stderr = self.run_command('cleanup_deleted', days=1)
self.assertEqual(result, None)
counts_after = self.get_model_counts()
self.assertEqual(counts_before, counts_after)
# With days=0, the hosts will be deleted.
counts_before = self.get_model_counts()
self.assertTrue(sum(x[1] for x in counts_before.values()))
result, stdout, stderr = self.run_command('cleanup_deleted', days=0)
self.assertEqual(result, None)
counts_after = self.get_model_counts()
self.assertNotEqual(counts_before, counts_after)
self.assertFalse(sum(x[1] for x in counts_after.values()))
return # Don't test how long it takes (for now).
# Create lots of hosts already marked as deleted.
t = time.time()
dtnow = now()
for x in xrange(1000):
hostname = "_deleted_%s_host-%d" % (dtnow.isoformat(), x)
host = self.inventories[0].hosts.create(name=hostname, active=False)
create_elapsed = time.time() - t
# Time how long it takes to cleanup deleted items, should be no more
# then the time taken to create them.
counts_before = self.get_model_counts()
self.assertTrue(sum(x[1] for x in counts_before.values()))
t = time.time()
result, stdout, stderr = self.run_command('cleanup_deleted', days=0)
cleanup_elapsed = time.time() - t
self.assertEqual(result, None)
counts_after = self.get_model_counts()
self.assertNotEqual(counts_before, counts_after)
self.assertFalse(sum(x[1] for x in counts_after.values()))
self.assertTrue(cleanup_elapsed < create_elapsed,
'create took %0.3fs, cleanup took %0.3fs, expected < %0.3fs' % (create_elapsed, cleanup_elapsed, create_elapsed))
def get_user_counts(self):
active = User.objects.filter(is_active=True).count()
inactive = User.objects.filter(is_active=False).count()
return active, inactive
def test_cleanup_user_model(self):
# Test with nothing to be deleted.
counts_before = self.get_user_counts()
self.assertFalse(counts_before[1])
result, stdout, stderr = self.run_command('cleanup_deleted')
self.assertEqual(result, None)
counts_after = self.get_user_counts()
self.assertEqual(counts_before, counts_after)
# "Delete some users".
for user in User.objects.all():
user.mark_inactive()
self.assertTrue(len(user.username) <= 30,
'len(%r) == %d' % (user.username, len(user.username)))
# With days=1, no users will be deleted.
counts_before = self.get_user_counts()
self.assertTrue(counts_before[1])
result, stdout, stderr = self.run_command('cleanup_deleted', days=1)
self.assertEqual(result, None)
counts_after = self.get_user_counts()
self.assertEqual(counts_before, counts_after)
# With days=0, inactive users will be deleted.
counts_before = self.get_user_counts()
self.assertTrue(counts_before[1])
result, stdout, stderr = self.run_command('cleanup_deleted', days=0)
self.assertEqual(result, None)
counts_after = self.get_user_counts()
self.assertNotEqual(counts_before, counts_after)
self.assertFalse(counts_after[1])
@override_settings(CELERY_ALWAYS_EAGER=True,
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
ANSIBLE_TRANSPORT='local')
@ -641,12 +520,12 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
self.assertEqual(inventory_source.inventory_updates.count(), 1)
inventory_update = inventory_source.inventory_updates.all()[0]
self.assertEqual(inventory_update.status, 'successful')
for host in inventory.hosts.filter(active=True):
for host in inventory.hosts:
if host.pk in (except_host_pks or []):
continue
source_pks = host.inventory_sources.values_list('pk', flat=True)
self.assertTrue(inventory_source.pk in source_pks)
for group in inventory.groups.filter(active=True):
for group in inventory.groups:
if group.pk in (except_group_pks or []):
continue
source_pks = group.inventory_sources.values_list('pk', flat=True)
@ -814,7 +693,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
'lbservers', 'others'])
if overwrite:
expected_group_names.remove('lbservers')
group_names = set(new_inv.groups.filter(active=True).values_list('name', flat=True))
group_names = set(new_inv.groups.values_list('name', flat=True))
self.assertEqual(expected_group_names, group_names)
expected_host_names = set(['web1.example.com', 'web2.example.com',
'web3.example.com', 'db1.example.com',
@ -824,13 +703,13 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
'fe80::1610:9fff:fedd:b654', '::1'])
if overwrite:
expected_host_names.remove('lb.example.com')
host_names = set(new_inv.hosts.filter(active=True).values_list('name', flat=True))
host_names = set(new_inv.hosts.values_list('name', flat=True))
self.assertEqual(expected_host_names, host_names)
expected_inv_vars = {'vara': 'A', 'varc': 'C'}
if overwrite_vars:
expected_inv_vars.pop('varc')
self.assertEqual(new_inv.variables_dict, expected_inv_vars)
for host in new_inv.hosts.filter(active=True):
for host in new_inv.hosts:
if host.name == 'web1.example.com':
self.assertEqual(host.variables_dict,
{'ansible_ssh_host': 'w1.example.net'})
@ -842,35 +721,35 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
self.assertEqual(host.variables_dict, {'lbvar': 'ni!'})
else:
self.assertEqual(host.variables_dict, {})
for group in new_inv.groups.filter(active=True):
for group in new_inv.groups:
if group.name == 'servers':
expected_vars = {'varb': 'B', 'vard': 'D'}
if overwrite_vars:
expected_vars.pop('vard')
self.assertEqual(group.variables_dict, expected_vars)
children = set(group.children.filter(active=True).values_list('name', flat=True))
children = set(group.children.values_list('name', flat=True))
expected_children = set(['dbservers', 'webservers', 'lbservers'])
if overwrite:
expected_children.remove('lbservers')
self.assertEqual(children, expected_children)
self.assertEqual(group.hosts.filter(active=True).count(), 0)
self.assertEqual(group.hosts.count(), 0)
elif group.name == 'dbservers':
self.assertEqual(group.variables_dict, {'dbvar': 'ugh'})
self.assertEqual(group.children.filter(active=True).count(), 0)
hosts = set(group.hosts.filter(active=True).values_list('name', flat=True))
self.assertEqual(group.children.count(), 0)
hosts = set(group.hosts.values_list('name', flat=True))
host_names = set(['db1.example.com','db2.example.com'])
self.assertEqual(hosts, host_names)
elif group.name == 'webservers':
self.assertEqual(group.variables_dict, {'webvar': 'blah'})
self.assertEqual(group.children.filter(active=True).count(), 0)
hosts = set(group.hosts.filter(active=True).values_list('name', flat=True))
self.assertEqual(group.children.count(), 0)
hosts = set(group.hosts.values_list('name', flat=True))
host_names = set(['web1.example.com','web2.example.com',
'web3.example.com'])
self.assertEqual(hosts, host_names)
elif group.name == 'lbservers':
self.assertEqual(group.variables_dict, {})
self.assertEqual(group.children.filter(active=True).count(), 0)
hosts = set(group.hosts.filter(active=True).values_list('name', flat=True))
self.assertEqual(group.children.count(), 0)
hosts = set(group.hosts.values_list('name', flat=True))
host_names = set(['lb.example.com'])
self.assertEqual(hosts, host_names)
if overwrite:
@ -920,7 +799,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
# Check hosts in dotcom group.
group = new_inv.groups.get(name='dotcom')
self.assertEqual(group.hosts.count(), 65)
for host in group.hosts.filter(active=True, name__startswith='web'):
for host in group.hosts.filter( name__startswith='web'):
self.assertEqual(host.variables_dict.get('ansible_ssh_user', ''), 'example')
# Check hosts in dotnet group.
group = new_inv.groups.get(name='dotnet')
@ -928,7 +807,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
# Check hosts in dotorg group.
group = new_inv.groups.get(name='dotorg')
self.assertEqual(group.hosts.count(), 61)
for host in group.hosts.filter(active=True):
for host in group.hosts:
if host.name.startswith('mx.'):
continue
self.assertEqual(host.variables_dict.get('ansible_ssh_user', ''), 'example')
@ -936,7 +815,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
# Check hosts in dotus group.
group = new_inv.groups.get(name='dotus')
self.assertEqual(group.hosts.count(), 10)
for host in group.hosts.filter(active=True):
for host in group.hosts:
if int(host.name[2:4]) % 2 == 0:
self.assertEqual(host.variables_dict.get('even_odd', ''), 'even')
else:
@ -1090,7 +969,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
else:
return 0
def _check_largeinv_import(self, new_inv, nhosts, nhosts_inactive=0):
def _check_largeinv_import(self, new_inv, nhosts):
self._start_time = time.time()
inv_file = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'largeinv.py')
ngroups = self._get_ngroups_for_nhosts(nhosts)
@ -1103,9 +982,8 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
# Check that inventory is populated as expected within a reasonable
# amount of time. Computed fields should also be updated.
new_inv = Inventory.objects.get(pk=new_inv.pk)
self.assertEqual(new_inv.hosts.filter(active=True).count(), nhosts)
self.assertEqual(new_inv.groups.filter(active=True).count(), ngroups)
self.assertEqual(new_inv.hosts.filter(active=False).count(), nhosts_inactive)
self.assertEqual(new_inv.hosts.count(), nhosts)
self.assertEqual(new_inv.groups.count(), ngroups)
self.assertEqual(new_inv.total_hosts, nhosts)
self.assertEqual(new_inv.total_groups, ngroups)
self.assertElapsedLessThan(120)
@ -1119,10 +997,10 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
self.assertEqual(new_inv.groups.count(), 0)
nhosts = 2000
# Test initial import into empty inventory.
self._check_largeinv_import(new_inv, nhosts, 0)
self._check_largeinv_import(new_inv, nhosts)
# Test re-importing and overwriting.
self._check_largeinv_import(new_inv, nhosts, 0)
self._check_largeinv_import(new_inv, nhosts)
# Test re-importing with only half as many hosts.
self._check_largeinv_import(new_inv, nhosts / 2, nhosts / 2)
self._check_largeinv_import(new_inv, nhosts / 2)
# Test re-importing that clears all hosts.
self._check_largeinv_import(new_inv, 0, nhosts)
self._check_largeinv_import(new_inv, 0)

View File

@ -69,7 +69,7 @@ class InventoryTest(BaseTest):
def test_get_inventory_list(self):
url = reverse('api:inventory_list')
qs = Inventory.objects.filter(active=True).distinct()
qs = Inventory.objects.distinct()
# Check list view with invalid authentication.
self.check_invalid_auth(url)
@ -226,6 +226,8 @@ class InventoryTest(BaseTest):
self.inventory_a.groups.create(name='group-a')
self.inventory_b.hosts.create(name='host-b')
self.inventory_b.groups.create(name='group-b')
a_pk = self.inventory_a.pk
b_pk = self.inventory_b.pk
# Check put to detail view with invalid authentication.
self.check_invalid_auth(url_a, methods=('delete',))
@ -248,24 +250,16 @@ class InventoryTest(BaseTest):
self.delete(url_a, expect=204)
self.delete(url_b, expect=403)
# Verify that the inventory is marked inactive, along with all its
# hosts and groups.
self.inventory_a = Inventory.objects.get(pk=self.inventory_a.pk)
self.assertFalse(self.inventory_a.active)
self.assertFalse(self.inventory_a.hosts.filter(active=True).count())
self.assertFalse(self.inventory_a.groups.filter(active=True).count())
# Verify that the inventory was deleted
assert Inventory.objects.filter(pk=a_pk).count() == 0
# a super user can delete inventory records
with self.current_user(self.super_django_user):
self.delete(url_a, expect=404)
self.delete(url_b, expect=204)
# Verify that the inventory is marked inactive, along with all its
# hosts and groups.
self.inventory_b = Inventory.objects.get(pk=self.inventory_b.pk)
self.assertFalse(self.inventory_b.active)
self.assertFalse(self.inventory_b.hosts.filter(active=True).count())
self.assertFalse(self.inventory_b.groups.filter(active=True).count())
# Verify that the inventory was deleted
assert Inventory.objects.filter(pk=b_pk).count() == 0
def test_inventory_access_deleted_permissions(self):
temp_org = self.make_organizations(self.super_django_user, 1)[0]
@ -423,7 +417,7 @@ class InventoryTest(BaseTest):
del_children_url = reverse('api:group_children_list', args=(del_group.pk,))
nondel_url = reverse('api:group_detail',
args=(Group.objects.get(name='nondel').pk,))
del_group.mark_inactive()
del_group.delete()
nondel_detail = self.get(nondel_url, expect=200, auth=self.get_normal_credentials())
self.post(del_children_url, data=nondel_detail, expect=403, auth=self.get_normal_credentials())
@ -747,13 +741,11 @@ class InventoryTest(BaseTest):
# removed group should be automatically marked inactive once it no longer has any parents.
removed_group = Group.objects.get(pk=result['id'])
self.assertTrue(removed_group.parents.count())
self.assertTrue(removed_group.active)
for parent in removed_group.parents.all():
parent_children_url = reverse('api:group_children_list', args=(parent.pk,))
data = {'id': removed_group.pk, 'disassociate': 1}
self.post(parent_children_url, data, expect=204, auth=self.get_super_credentials())
removed_group = Group.objects.get(pk=result['id'])
#self.assertFalse(removed_group.active) # FIXME: Disabled for now because automatically deleting group with no parents is also disabled.
# Removing a group from a hierarchy should migrate its children to the
# parent. The group itself will be deleted (marked inactive), and all
@ -766,7 +758,6 @@ class InventoryTest(BaseTest):
with self.current_user(self.super_django_user):
self.post(url, data, expect=204)
gx3 = Group.objects.get(pk=gx3.pk)
#self.assertFalse(gx3.active) # FIXME: Disabled for now....
self.assertFalse(gx3 in gx2.children.all())
#self.assertTrue(gx4 in gx2.children.all())
@ -944,13 +935,10 @@ class InventoryTest(BaseTest):
# Mark group C inactive. Its child groups and hosts should now also be
# attached to group A. Group D hosts should be unchanged. Group C
# should also no longer have any group or host relationships.
g_c.mark_inactive()
g_c.delete()
self.assertTrue(g_d in g_a.children.all())
self.assertTrue(h_c in g_a.hosts.all())
self.assertFalse(h_d in g_a.hosts.all())
self.assertFalse(g_c.parents.all())
self.assertFalse(g_c.children.all())
self.assertFalse(g_c.hosts.all())
def test_safe_delete_recursion(self):
# First hierarchy
@ -989,11 +977,9 @@ class InventoryTest(BaseTest):
self.assertTrue(other_sub_group in sub_group.children.all())
# Now recursively remove its parent and the reference from subgroup should remain
other_top_group.mark_inactive_recursive()
other_top_group = Group.objects.get(pk=other_top_group.pk)
other_top_group.delete_recursive()
self.assertTrue(s2 in sub_group.all_hosts.all())
self.assertTrue(other_sub_group in sub_group.children.all())
self.assertFalse(other_top_group.active)
def test_group_parents_and_children(self):
# Test for various levels of group parent/child relations, with hosts,
@ -1173,7 +1159,7 @@ class InventoryTest(BaseTest):
# Delete recently added hosts and verify the count drops.
hostnames4 = list('defg')
for host in Host.objects.filter(name__in=hostnames4):
host.mark_inactive()
host.delete()
with self.current_user(self.super_django_user):
response = self.get(url)
for n, d in enumerate(reversed(response['hosts'])):
@ -1270,7 +1256,7 @@ class InventoryUpdatesTest(BaseTransactionTest):
url = reverse('api:inventory_source_hosts_list', args=(inventory_source.pk,))
response = self.get(url, expect=200)
self.assertNotEqual(response['count'], 0)
for host in inventory.hosts.filter(active=True):
for host in inventory.hosts:
source_pks = host.inventory_sources.values_list('pk', flat=True)
self.assertTrue(inventory_source.pk in source_pks)
self.assertTrue(host.has_inventory_sources)
@ -1284,12 +1270,12 @@ class InventoryUpdatesTest(BaseTransactionTest):
url = reverse('api:host_inventory_sources_list', args=(host.pk,))
response = self.get(url, expect=200)
self.assertNotEqual(response['count'], 0)
for group in inventory.groups.filter(active=True):
for group in inventory.groups:
source_pks = group.inventory_sources.values_list('pk', flat=True)
self.assertTrue(inventory_source.pk in source_pks)
self.assertTrue(group.has_inventory_sources)
self.assertTrue(group.children.filter(active=True).exists() or
group.hosts.filter(active=True).exists())
self.assertTrue(group.children.exists() or
group.hosts.exists())
# Make sure EC2 instance ID groups and RDS groups are excluded.
if inventory_source.source == 'ec2' and not instance_id_group_ok:
self.assertFalse(re.match(r'^i-[0-9a-f]{8}$', group.name, re.I),
@ -1307,7 +1293,7 @@ class InventoryUpdatesTest(BaseTransactionTest):
self.assertNotEqual(response['count'], 0)
# Try to set a source on a child group that was imported. Should not
# be allowed.
for group in inventory_source.group.children.filter(active=True):
for group in inventory_source.group.children:
inv_src_2 = group.inventory_source
inv_src_url2 = reverse('api:inventory_source_detail', args=(inv_src_2.pk,))
with self.current_user(self.super_django_user):
@ -1663,7 +1649,7 @@ class InventoryUpdatesTest(BaseTransactionTest):
inventory_source.overwrite = True
inventory_source.save()
self.check_inventory_source(inventory_source, initial=False)
for host in self.inventory.hosts.filter(active=True):
for host in self.inventory.hosts:
self.assertEqual(host.variables_dict['ec2_instance_type'], instance_type)
# Try invalid instance filters that should be ignored:
@ -1797,12 +1783,12 @@ class InventoryUpdatesTest(BaseTransactionTest):
inventory_source.save()
self.check_inventory_source(inventory_source, initial=False)
# Verify that only the desired groups are returned.
child_names = self.group.children.filter(active=True).values_list('name', flat=True)
child_names = self.group.children.values_list('name', flat=True)
self.assertTrue('ec2' in child_names)
self.assertTrue('regions' in child_names)
self.assertTrue(self.group.children.get(name='regions').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='regions').children.count())
self.assertTrue('types' in child_names)
self.assertTrue(self.group.children.get(name='types').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='types').children.count())
self.assertFalse('keys' in child_names)
self.assertFalse('security_groups' in child_names)
self.assertFalse('tags' in child_names)
@ -1819,27 +1805,27 @@ class InventoryUpdatesTest(BaseTransactionTest):
self.check_inventory_source(inventory_source, initial=False, instance_id_group_ok=True)
# Verify that only the desired groups are returned.
# Skip vpcs as selected inventory may or may not have any.
child_names = self.group.children.filter(active=True).values_list('name', flat=True)
child_names = self.group.children.values_list('name', flat=True)
self.assertTrue('ec2' in child_names)
self.assertFalse('tag_none' in child_names)
self.assertTrue('regions' in child_names)
self.assertTrue(self.group.children.get(name='regions').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='regions').children.count())
self.assertTrue('types' in child_names)
self.assertTrue(self.group.children.get(name='types').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='types').children.count())
self.assertTrue('keys' in child_names)
self.assertTrue(self.group.children.get(name='keys').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='keys').children.count())
self.assertTrue('security_groups' in child_names)
self.assertTrue(self.group.children.get(name='security_groups').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='security_groups').children.count())
self.assertTrue('tags' in child_names)
self.assertTrue(self.group.children.get(name='tags').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='tags').children.count())
# Only check for tag_none as a child of tags if there is a tag_none group;
# the test inventory *may* have tags set for all hosts.
if self.inventory.groups.filter(name='tag_none').exists():
self.assertTrue('tag_none' in self.group.children.get(name='tags').children.values_list('name', flat=True))
self.assertTrue('images' in child_names)
self.assertTrue(self.group.children.get(name='images').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='images').children.count())
self.assertTrue('instances' in child_names)
self.assertTrue(self.group.children.get(name='instances').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='instances').children.count())
# Sync again with overwrite set to False after renaming a group that
# was created by the sync. With overwrite false, the renamed group and
# the original group (created again by the sync) will both exist.
@ -1853,7 +1839,7 @@ class InventoryUpdatesTest(BaseTransactionTest):
inventory_source.overwrite = False
inventory_source.save()
self.check_inventory_source(inventory_source, initial=False, instance_id_group_ok=True)
child_names = self.group.children.filter(active=True).values_list('name', flat=True)
child_names = self.group.children.values_list('name', flat=True)
self.assertTrue(region_group_original_name in self.group.children.get(name='regions').children.values_list('name', flat=True))
self.assertTrue(region_group.name in self.group.children.get(name='regions').children.values_list('name', flat=True))
# Replacement text should not be left in inventory source name.

View File

@ -96,7 +96,7 @@ class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase):
def test_credential_explicit(self):
# Explicit, credential
with self.current_user(self.user_sue):
self.cred_sue.mark_inactive()
self.cred_sue.delete()
response = self.post(self.launch_url, {'credential': self.cred_doug.pk}, expect=202)
j = Job.objects.get(pk=response['job'])
self.assertEqual(j.status, 'new')
@ -105,7 +105,7 @@ class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase):
def test_credential_explicit_via_credential_id(self):
# Explicit, credential
with self.current_user(self.user_sue):
self.cred_sue.mark_inactive()
self.cred_sue.delete()
response = self.post(self.launch_url, {'credential_id': self.cred_doug.pk}, expect=202)
j = Job.objects.get(pk=response['job'])
self.assertEqual(j.status, 'new')
@ -131,15 +131,16 @@ class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase):
# Can't launch a job template without a credential defined (or if we
# pass an invalid/inactive credential value).
with self.current_user(self.user_sue):
self.cred_sue.mark_inactive()
self.cred_sue.delete()
self.post(self.launch_url, {}, expect=400)
self.post(self.launch_url, {'credential': 0}, expect=400)
self.post(self.launch_url, {'credential_id': 0}, expect=400)
self.post(self.launch_url, {'credential': 'one'}, expect=400)
self.post(self.launch_url, {'credential_id': 'one'}, expect=400)
self.cred_doug.mark_inactive()
self.post(self.launch_url, {'credential': self.cred_doug.pk}, expect=400)
self.post(self.launch_url, {'credential_id': self.cred_doug.pk}, expect=400)
cred_doug_pk = self.cred_doug.pk
self.cred_doug.delete()
self.post(self.launch_url, {'credential': cred_doug_pk}, expect=400)
self.post(self.launch_url, {'credential_id': cred_doug_pk}, expect=400)
def test_explicit_unowned_cred(self):
# Explicitly specify a credential that we don't have access to
@ -174,7 +175,7 @@ class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase):
def test_deleted_credential_fail(self):
# Job Templates with deleted credentials cannot be launched.
self.cred_sue.mark_inactive()
self.cred_sue.delete()
with self.current_user(self.user_sue):
self.post(self.launch_url, {}, expect=400)
@ -202,7 +203,7 @@ class JobTemplateLaunchPasswordsTest(BaseJobTestMixin, django.test.TransactionTe
passwords_required = ['ssh_password', 'become_password', 'ssh_key_unlock']
# Job Templates with deleted credentials cannot be launched.
with self.current_user(self.user_sue):
self.cred_sue_ask.mark_inactive()
self.cred_sue_ask.delete()
response = self.post(self.launch_url, {'credential_id': self.cred_sue_ask_many.pk}, expect=400)
for p in passwords_required:
self.assertIn(p, response['passwords_needed_to_start'])

View File

@ -436,10 +436,8 @@ class OrganizationsTest(BaseTest):
self.delete(urls[0], expect=204, auth=self.get_super_credentials())
# check that when we have deleted an object it comes back 404 via GET
# but that it's still in the database as inactive
self.get(urls[1], expect=404, auth=self.get_normal_credentials())
org1 = Organization.objects.get(pk=urldata1['id'])
self.assertEquals(org1.active, False)
assert Organization.objects.filter(pk=urldata1['id']).count() == 0
# also check that DELETE on the collection doesn't work
self.delete(self.collection(), expect=405, auth=self.get_super_credentials())

View File

@ -162,14 +162,14 @@ class ProjectsTest(BaseTransactionTest):
set(Project.get_local_path_choices()))
# return local paths are only the ones not used by any active project.
qs = Project.objects.filter(active=True)
qs = Project.objects
used_paths = qs.values_list('local_path', flat=True)
self.assertFalse(set(response['project_local_paths']) & set(used_paths))
for project in self.projects:
local_path = project.local_path
response = self.get(url, expect=200, auth=self.get_super_credentials())
self.assertTrue(local_path not in response['project_local_paths'])
project.mark_inactive()
project.delete()
response = self.get(url, expect=200, auth=self.get_super_credentials())
self.assertTrue(local_path in response['project_local_paths'])
@ -402,7 +402,7 @@ class ProjectsTest(BaseTransactionTest):
# =====================================================================
# TEAM PROJECTS
team = Team.objects.filter(active=True, organization__pk=self.organizations[1].pk)[0]
team = Team.objects.filter( organization__pk=self.organizations[1].pk)[0]
team_projects = reverse('api:team_projects_list', args=(team.pk,))
p1 = self.projects[0]
@ -419,7 +419,7 @@ class ProjectsTest(BaseTransactionTest):
# =====================================================================
# TEAMS USER MEMBERSHIP
team = Team.objects.filter(active=True, organization__pk=self.organizations[1].pk)[0]
team = Team.objects.filter( organization__pk=self.organizations[1].pk)[0]
team_users = reverse('api:team_users_list', args=(team.pk,))
for x in team.deprecated_users.all():
team.deprecated_users.remove(x)
@ -1262,7 +1262,7 @@ class ProjectUpdatesTest(BaseTransactionTest):
else:
self.check_project_update(project, should_fail=should_still_fail)
# Test that we can delete project updates.
for pu in project.project_updates.filter(active=True):
for pu in project.project_updates:
pu_url = reverse('api:project_update_detail', args=(pu.pk,))
with self.current_user(self.super_django_user):
self.delete(pu_url, expect=204)

View File

@ -88,7 +88,8 @@ class InventoryScriptTest(BaseScriptTest):
inventory=inventory,
variables=variables)
if x in (3, 7):
host.mark_inactive()
host.delete()
continue
hosts.append(host)
# add localhost just to make sure it's thrown into all (Ansible github bug)
@ -106,7 +107,8 @@ class InventoryScriptTest(BaseScriptTest):
inventory=inventory,
variables=variables)
if x == 2:
group.mark_inactive()
group.delete()
continue
groups.append(group)
group.hosts.add(hosts[x])
group.hosts.add(hosts[x + 5])
@ -144,12 +146,11 @@ class InventoryScriptTest(BaseScriptTest):
def test_list_with_inventory_id_as_argument(self):
inventory = self.inventories[0]
self.assertTrue(inventory.active)
rc, stdout, stderr = self.run_inventory_script(list=True,
inventory=inventory.pk)
self.assertEqual(rc, 0, stderr)
data = json.loads(stdout)
groups = inventory.groups.filter(active=True)
groups = inventory.groups
groupnames = [ x for x in groups.values_list('name', flat=True)]
# it's ok for all to be here because due to an Ansible inventory workaround
@ -165,16 +166,13 @@ class InventoryScriptTest(BaseScriptTest):
self.assertTrue(isinstance(v['children'], (list,tuple)))
self.assertTrue(isinstance(v['hosts'], (list,tuple)))
self.assertTrue(isinstance(v['vars'], (dict)))
group = inventory.groups.get(active=True, name=k)
hosts = group.hosts.filter(active=True)
group = inventory.groups.get(name=k)
hosts = group.hosts
hostnames = hosts.values_list('name', flat=True)
self.assertEqual(set(v['hosts']), set(hostnames))
else:
self.assertTrue(v['hosts'] == ['localhost'])
for group in inventory.groups.filter(active=False):
self.assertFalse(group.name in data.keys(),
'deleted group %s should not be in data' % group)
# Command line argument for inventory ID should take precedence over
# environment variable.
inventory_pks = set(map(lambda x: x.pk, self.inventories))
@ -187,12 +185,11 @@ class InventoryScriptTest(BaseScriptTest):
def test_list_with_inventory_id_in_environment(self):
inventory = self.inventories[1]
self.assertTrue(inventory.active)
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script(list=True)
self.assertEqual(rc, 0, stderr)
data = json.loads(stdout)
groups = inventory.groups.filter(active=True)
groups = inventory.groups
groupnames = list(groups.values_list('name', flat=True)) + ['all']
self.assertEqual(set(data.keys()), set(groupnames))
# Groups for this inventory should have hosts, variable data, and one
@ -202,14 +199,14 @@ class InventoryScriptTest(BaseScriptTest):
if k == 'all':
self.assertEqual(v.get('vars', {}), inventory.variables_dict)
continue
group = inventory.groups.get(active=True, name=k)
hosts = group.hosts.filter(active=True)
group = inventory.groups.get(name=k)
hosts = group.hosts
hostnames = hosts.values_list('name', flat=True)
self.assertEqual(set(v.get('hosts', [])), set(hostnames))
if group.variables:
self.assertEqual(v.get('vars', {}), group.variables_dict)
if k == 'group-3':
children = group.children.filter(active=True)
children = group.children
childnames = children.values_list('name', flat=True)
self.assertEqual(set(v.get('children', [])), set(childnames))
else:
@ -217,13 +214,12 @@ class InventoryScriptTest(BaseScriptTest):
def test_list_with_hostvars_inline(self):
inventory = self.inventories[1]
self.assertTrue(inventory.active)
rc, stdout, stderr = self.run_inventory_script(list=True,
inventory=inventory.pk,
hostvars=True)
self.assertEqual(rc, 0, stderr)
data = json.loads(stdout)
groups = inventory.groups.filter(active=True)
groups = inventory.groups
groupnames = list(groups.values_list('name', flat=True))
groupnames.extend(['all', '_meta'])
self.assertEqual(set(data.keys()), set(groupnames))
@ -237,15 +233,15 @@ class InventoryScriptTest(BaseScriptTest):
continue
if k == '_meta':
continue
group = inventory.groups.get(active=True, name=k)
hosts = group.hosts.filter(active=True)
group = inventory.groups.get(name=k)
hosts = group.hosts
hostnames = hosts.values_list('name', flat=True)
all_hostnames.update(hostnames)
self.assertEqual(set(v.get('hosts', [])), set(hostnames))
if group.variables:
self.assertEqual(v.get('vars', {}), group.variables_dict)
if k == 'group-3':
children = group.children.filter(active=True)
children = group.children
childnames = children.values_list('name', flat=True)
self.assertEqual(set(v.get('children', [])), set(childnames))
else:
@ -267,8 +263,7 @@ class InventoryScriptTest(BaseScriptTest):
def test_valid_host(self):
# Host without variable data.
inventory = self.inventories[0]
self.assertTrue(inventory.active)
host = inventory.hosts.filter(active=True)[2]
host = inventory.hosts[2]
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script(host=host.name)
self.assertEqual(rc, 0, stderr)
@ -276,8 +271,7 @@ class InventoryScriptTest(BaseScriptTest):
self.assertEqual(data, {})
# Host with variable data.
inventory = self.inventories[1]
self.assertTrue(inventory.active)
host = inventory.hosts.filter(active=True)[4]
host = inventory.hosts[4]
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script(host=host.name)
self.assertEqual(rc, 0, stderr)
@ -287,8 +281,7 @@ class InventoryScriptTest(BaseScriptTest):
def test_invalid_host(self):
# Valid host, but not part of the specified inventory.
inventory = self.inventories[0]
self.assertTrue(inventory.active)
host = Host.objects.filter(active=True).exclude(inventory=inventory)[0]
host = Host.objects.exclude(inventory=inventory)[0]
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script(host=host.name)
self.assertNotEqual(rc, 0, stderr)
@ -320,16 +313,15 @@ class InventoryScriptTest(BaseScriptTest):
def test_with_deleted_inventory(self):
inventory = self.inventories[0]
inventory.mark_inactive()
self.assertFalse(inventory.active)
os.environ['INVENTORY_ID'] = str(inventory.pk)
pk = inventory.pk
inventory.delete()
os.environ['INVENTORY_ID'] = str(pk)
rc, stdout, stderr = self.run_inventory_script(list=True)
self.assertNotEqual(rc, 0, stderr)
self.assertEqual(json.loads(stdout), {'failed': True})
def test_without_list_or_host_argument(self):
inventory = self.inventories[0]
self.assertTrue(inventory.active)
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script()
self.assertNotEqual(rc, 0, stderr)
@ -337,7 +329,6 @@ class InventoryScriptTest(BaseScriptTest):
def test_with_both_list_and_host_arguments(self):
inventory = self.inventories[0]
self.assertTrue(inventory.active)
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script(list=True, host='blah')
self.assertNotEqual(rc, 0, stderr)
@ -345,8 +336,7 @@ class InventoryScriptTest(BaseScriptTest):
def test_with_disabled_hosts(self):
inventory = self.inventories[1]
self.assertTrue(inventory.active)
for host in inventory.hosts.filter(active=True, enabled=True):
for host in inventory.hosts.filter(enabled=True):
host.enabled = False
host.save(update_fields=['enabled'])
os.environ['INVENTORY_ID'] = str(inventory.pk)
@ -354,7 +344,7 @@ class InventoryScriptTest(BaseScriptTest):
rc, stdout, stderr = self.run_inventory_script(list=True)
self.assertEqual(rc, 0, stderr)
data = json.loads(stdout)
groups = inventory.groups.filter(active=True)
groups = inventory.groups
groupnames = list(groups.values_list('name', flat=True)) + ['all']
self.assertEqual(set(data.keys()), set(groupnames))
for k,v in data.items():
@ -362,15 +352,15 @@ class InventoryScriptTest(BaseScriptTest):
if k == 'all':
self.assertEqual(v.get('vars', {}), inventory.variables_dict)
continue
group = inventory.groups.get(active=True, name=k)
hosts = group.hosts.filter(active=True, enabled=True)
group = inventory.groups.get(name=k)
hosts = group.hosts.filter(enabled=True)
hostnames = hosts.values_list('name', flat=True)
self.assertEqual(set(v.get('hosts', [])), set(hostnames))
self.assertFalse(hostnames)
if group.variables:
self.assertEqual(v.get('vars', {}), group.variables_dict)
if k == 'group-3':
children = group.children.filter(active=True)
children = group.children
childnames = children.values_list('name', flat=True)
self.assertEqual(set(v.get('children', [])), set(childnames))
else:
@ -379,7 +369,7 @@ class InventoryScriptTest(BaseScriptTest):
rc, stdout, stderr = self.run_inventory_script(list=True, all=True)
self.assertEqual(rc, 0, stderr)
data = json.loads(stdout)
groups = inventory.groups.filter(active=True)
groups = inventory.groups
groupnames = list(groups.values_list('name', flat=True)) + ['all']
self.assertEqual(set(data.keys()), set(groupnames))
for k,v in data.items():
@ -387,15 +377,15 @@ class InventoryScriptTest(BaseScriptTest):
if k == 'all':
self.assertEqual(v.get('vars', {}), inventory.variables_dict)
continue
group = inventory.groups.get(active=True, name=k)
hosts = group.hosts.filter(active=True)
group = inventory.groups.get(name=k)
hosts = group.hosts
hostnames = hosts.values_list('name', flat=True)
self.assertEqual(set(v.get('hosts', [])), set(hostnames))
self.assertTrue(hostnames)
if group.variables:
self.assertEqual(v.get('vars', {}), group.variables_dict)
if k == 'group-3':
children = group.children.filter(active=True)
children = group.children
childnames = children.values_list('name', flat=True)
self.assertEqual(set(v.get('children', [])), set(childnames))
else:

View File

@ -592,26 +592,8 @@ class RunJobTest(BaseJobExecutionTest):
new_group.children.remove(self.group)
new_group = Group.objects.get(pk=new_group.pk)
self.assertFalse(new_group.has_active_failures)
# Mark host inactive (should clear flag on parent group and inventory)
self.host.mark_inactive()
self.group = Group.objects.get(pk=self.group.pk)
self.assertFalse(self.group.has_active_failures)
self.inventory = Inventory.objects.get(pk=self.inventory.pk)
self.assertFalse(self.inventory.has_active_failures)
# Un-mark host as inactive (need to force update of flag on group and
# inventory)
host = self.host
host.name = '_'.join(host.name.split('_')[3:]) or 'undeleted host'
host.active = True
host.save()
host.update_computed_fields()
self.group = Group.objects.get(pk=self.group.pk)
self.assertTrue(self.group.has_active_failures)
self.inventory = Inventory.objects.get(pk=self.inventory.pk)
self.assertTrue(self.inventory.has_active_failures)
# Delete host. (should clear flag)
# Delete host (should clear flag on parent group and inventory)
self.host.delete()
self.host = None
self.group = Group.objects.get(pk=self.group.pk)
self.assertFalse(self.group.has_active_failures)
self.inventory = Inventory.objects.get(pk=self.inventory.pk)
@ -619,30 +601,7 @@ class RunJobTest(BaseJobExecutionTest):
def test_update_has_active_failures_when_job_removed(self):
job = self.test_run_job_that_fails()
# Mark job as inactive (should clear flags).
job.mark_inactive()
self.host = Host.objects.get(pk=self.host.pk)
self.assertFalse(self.host.has_active_failures)
self.group = Group.objects.get(pk=self.group.pk)
self.assertFalse(self.group.has_active_failures)
self.inventory = Inventory.objects.get(pk=self.inventory.pk)
self.assertFalse(self.inventory.has_active_failures)
# Un-mark job as inactive (need to force update of flag)
job.active = True
job.save()
# Need to manually update last_job on host...
host = Host.objects.get(pk=self.host.pk)
host.last_job = job
host.last_job_host_summary = JobHostSummary.objects.get(job=job, host=host)
host.save()
self.inventory.update_computed_fields()
self.host = Host.objects.get(pk=self.host.pk)
self.assertTrue(self.host.has_active_failures)
self.group = Group.objects.get(pk=self.group.pk)
self.assertTrue(self.group.has_active_failures)
self.inventory = Inventory.objects.get(pk=self.inventory.pk)
self.assertTrue(self.inventory.has_active_failures)
# Delete job entirely.
# Delete (should clear flags).
job.delete()
self.host = Host.objects.get(pk=self.host.pk)
self.assertFalse(self.host.has_active_failures)
@ -662,8 +621,8 @@ class RunJobTest(BaseJobExecutionTest):
self.host = Host.objects.get(pk=self.host.pk)
self.assertEqual(self.host.last_job, job1)
self.assertEqual(self.host.last_job_host_summary.job, job1)
# Mark job1 inactive (should update host.last_job to None).
job1.mark_inactive()
# Delete job1 (should update host.last_job to None).
job1.delete()
self.host = Host.objects.get(pk=self.host.pk)
self.assertEqual(self.host.last_job, None)
self.assertEqual(self.host.last_job_host_summary, None)

View File

@ -196,7 +196,7 @@ class UsersTest(BaseTest):
self.post(url, expect=201, data=new_user2, auth=self.get_normal_credentials())
self.post(url, expect=400, data=new_user2, auth=self.get_normal_credentials())
# Normal user cannot add users after his org is marked inactive.
self.organizations[0].mark_inactive()
self.organizations[0].delete()
new_user3 = dict(username='blippy3')
self.post(url, expect=403, data=new_user3, auth=self.get_normal_credentials())
@ -316,7 +316,7 @@ class UsersTest(BaseTest):
remote_addr=remote_addr)
# Token auth should be denied if the user is inactive.
self.normal_django_user.mark_inactive()
self.normal_django_user.delete()
response = self.get(user_me_url, expect=401, auth=auth_token2,
remote_addr=remote_addr)
self.assertEqual(response['detail'], 'User inactive or deleted')
@ -422,7 +422,7 @@ class UsersTest(BaseTest):
# Normal user can no longer see all users after the organization he
# admins is marked inactive, nor can he see any other users that were
# in that org, so he only sees himself.
self.organizations[0].mark_inactive()
self.organizations[0].delete()
data3 = self.get(url, expect=200, auth=self.get_normal_credentials())
self.assertEquals(data3['count'], 1)

View File

@ -209,7 +209,6 @@ REST_FRAMEWORK = {
'awx.api.permissions.ModelAccessPermission',
),
'DEFAULT_FILTER_BACKENDS': (
'awx.api.filters.ActiveOnlyBackend',
'awx.api.filters.TypeFilterBackend',
'awx.api.filters.FieldLookupBackend',
'rest_framework.filters.SearchFilter',

View File

@ -90,7 +90,7 @@ def update_user_orgs(backend, details, user=None, *args, **kwargs):
org = Organization.objects.get_or_create(name=org_name)[0]
else:
try:
org = Organization.objects.filter(active=True).order_by('pk')[0]
org = Organization.objects.order_by('pk')[0]
except IndexError:
continue
@ -126,7 +126,7 @@ def update_user_teams(backend, details, user=None, *args, **kwargs):
org = Organization.objects.get_or_create(name=team_opts['organization'])[0]
else:
try:
org = Organization.objects.filter(active=True).order_by('pk')[0]
org = Organization.objects.order_by('pk')[0]
except IndexError:
continue