Merge branch 'devel' of https://github.com/ansible/ansible-tower into can_CRUD

This commit is contained in:
AlanCoding 2016-09-15 17:19:38 -04:00
commit 7ff5c40564
68 changed files with 2509 additions and 734 deletions

View File

@ -15,6 +15,8 @@ COMPOSE_TAG ?= devel
# NOTE: This defaults the container image version to the branch that's active
# COMPOSE_TAG ?= $(GIT_BRANCH)
COMPOSE_HOST ?= $(shell hostname)
VENV_BASE ?= /venv
SCL_PREFIX ?=
CELERY_SCHEDULE_FILE ?= /celerybeat-schedule
@ -299,7 +301,7 @@ requirements_jenkins:
. $(VENV_BASE)/tower/bin/activate; \
$(VENV_BASE)/tower/bin/pip install -Ir requirements/requirements_jenkins.txt; \
else \
pip install -Ir requirements/requirements_jenkins..txt; \
pip install -Ir requirements/requirements_jenkins.txt; \
fi && \
$(NPM_BIN) install csslint
@ -328,7 +330,7 @@ init:
if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/tower/bin/activate; \
fi; \
tower-manage register_instance --primary --hostname=127.0.0.1; \
tower-manage register_instance --hostname=$(COMPOSE_HOST); \
# Refresh development environment after pulling new code.
refresh: clean requirements_dev version_file develop migrate
@ -379,6 +381,12 @@ honcho:
fi; \
honcho start
flower:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/tower/bin/activate; \
fi; \
$(PYTHON) manage.py celery flower --address=0.0.0.0 --port=5555 --broker=amqp://guest:guest@$(RABBITMQ_HOST):5672//
# Run the built-in development webserver (by default on http://localhost:8013).
runserver:
@if [ "$(VENV_BASE)" ]; then \
@ -391,7 +399,8 @@ celeryd:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/tower/bin/activate; \
fi; \
$(PYTHON) manage.py celeryd -l DEBUG -B --autoscale=20,2 -Ofair --schedule=$(CELERY_SCHEDULE_FILE)
$(PYTHON) manage.py celeryd -l DEBUG -B --autoscale=20,3 --schedule=$(CELERY_SCHEDULE_FILE) -Q projects,jobs,default
#$(PYTHON) manage.py celery multi show projects jobs default -l DEBUG -Q:projects projects -Q:jobs jobs -Q:default default -c:projects 1 -c:jobs 3 -c:default 3 -Ofair -B --schedule=$(CELERY_SCHEDULE_FILE)
# Run to start the zeromq callback receiver
receiver:
@ -404,7 +413,11 @@ taskmanager:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/tower/bin/activate; \
fi; \
$(PYTHON) manage.py run_task_system
if [ "$(COMPOSE_HOST)" == "tower_1" ] || [ "$(COMPOSE_HOST)" == "tower" ]; then \
$(PYTHON) manage.py run_task_system; \
else \
while true; do sleep 2; done; \
fi
socketservice:
@if [ "$(VENV_BASE)" ]; then \
@ -472,7 +485,7 @@ test_jenkins : test_coverage
# --------------------------------------
ui-deps-built: awx/ui/package.json
$(NPM_BIN) --prefix awx/ui install awx/ui
$(NPM_BIN) --unsafe-perm --prefix awx/ui install awx/ui
touch awx/ui/.deps_built
ui-docker-machine: ui-deps-built
@ -749,6 +762,9 @@ docker-auth:
docker-compose: docker-auth
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose.yml up --no-recreate
docker-compose-cluster: docker-auth
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose-cluster.yml up
docker-compose-test: docker-auth
cd tools && TAG=$(COMPOSE_TAG) docker-compose run --rm --service-ports tower /bin/bash
@ -761,8 +777,7 @@ MACHINE?=default
docker-clean:
rm -f awx/lib/.deps_built
eval $$(docker-machine env $(MACHINE))
docker stop $$(docker ps -a -q)
-docker rm $$(docker ps -f name=tools_tower -a -q)
$(foreach container_id,$(shell docker ps -f name=tools_tower -aq),docker stop $(container_id); docker rm -f $(container_id);)
-docker images | grep "tower_devel" | awk '{print $3}' | xargs docker rmi
docker-refresh: docker-clean docker-compose

View File

@ -4,3 +4,4 @@ taskmanager: make taskmanager
receiver: make receiver
socketservice: make socketservice
factcacher: make factcacher
flower: make flower

View File

@ -286,7 +286,8 @@ class BaseSerializer(serializers.ModelSerializer):
# because it results in additional queries.
if fk == 'job' and isinstance(obj, UnifiedJob):
continue
if fk == 'project' and isinstance(obj, InventorySource):
if fk == 'project' and (isinstance(obj, InventorySource) or
isinstance(obj, Project)):
continue
fkval = getattr(obj, fk, None)
@ -327,7 +328,7 @@ class BaseSerializer(serializers.ModelSerializer):
roles[field.name] = {
'id': role.id,
'name': role.name,
'description': role.description,
'description': role.get_description(reference_content_object=obj),
}
if len(roles) > 0:
summary_fields['object_roles'] = roles
@ -527,7 +528,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
def get_types(self):
if type(self) is UnifiedJobTemplateSerializer:
return ['project', 'inventory_source', 'job_template', 'system_job_template']
return ['project', 'inventory_source', 'job_template', 'system_job_template', 'workflow_job_template',]
else:
return super(UnifiedJobTemplateSerializer, self).get_types()
@ -542,6 +543,8 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
serializer_class = JobTemplateSerializer
elif isinstance(obj, SystemJobTemplate):
serializer_class = SystemJobTemplateSerializer
elif isinstance(obj, WorkflowJobTemplate):
serializer_class = WorkflowJobTemplateSerializer
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
return serializer.to_representation(obj)
@ -573,7 +576,7 @@ class UnifiedJobSerializer(BaseSerializer):
def get_types(self):
if type(self) is UnifiedJobSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job']
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job',]
else:
return super(UnifiedJobSerializer, self).get_types()
@ -606,6 +609,8 @@ class UnifiedJobSerializer(BaseSerializer):
serializer_class = AdHocCommandSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobSerializer
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
ret = serializer.to_representation(obj)
@ -653,6 +658,8 @@ class UnifiedJobListSerializer(UnifiedJobSerializer):
serializer_class = AdHocCommandListSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobListSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobSerializer
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
ret = serializer.to_representation(obj)
@ -2193,6 +2200,123 @@ class SystemJobCancelSerializer(SystemJobSerializer):
class Meta:
fields = ('can_cancel',)
class WorkflowJobTemplateSerializer(UnifiedJobTemplateSerializer):
class Meta:
model = WorkflowJobTemplate
fields = ('*',)
def get_related(self, obj):
res = super(WorkflowJobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = reverse('api:workflow_job_template_jobs_list', args=(obj.pk,)),
#schedules = reverse('api:workflow_job_template_schedules_list', args=(obj.pk,)),
launch = reverse('api:workflow_job_template_launch', args=(obj.pk,)),
workflow_nodes = reverse('api:workflow_job_template_workflow_nodes_list', args=(obj.pk,)),
# TODO: Implement notifications
#notification_templates_any = reverse('api:system_job_template_notification_templates_any_list', args=(obj.pk,)),
#notification_templates_success = reverse('api:system_job_template_notification_templates_success_list', args=(obj.pk,)),
#notification_templates_error = reverse('api:system_job_template_notification_templates_error_list', args=(obj.pk,)),
))
return res
# TODO:
class WorkflowJobTemplateListSerializer(WorkflowJobTemplateSerializer):
pass
# TODO:
class WorkflowJobSerializer(UnifiedJobSerializer):
class Meta:
model = WorkflowJob
fields = ('*', 'workflow_job_template', 'extra_vars')
def get_related(self, obj):
res = super(WorkflowJobSerializer, self).get_related(obj)
if obj.workflow_job_template:
res['workflow_job_template'] = reverse('api:workflow_job_template_detail',
args=(obj.workflow_job_template.pk,))
# TODO:
#res['notifications'] = reverse('api:system_job_notifications_list', args=(obj.pk,))
res['workflow_nodes'] = reverse('api:workflow_job_workflow_nodes_list', args=(obj.pk,))
# TODO: Cancel job
'''
if obj.can_cancel or True:
res['cancel'] = reverse('api:workflow_job_cancel', args=(obj.pk,))
'''
return res
# TODO:
class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):
pass
class WorkflowNodeBaseSerializer(BaseSerializer):
class Meta:
# TODO: workflow_job and job read-only
fields = ('id', 'url', 'related', 'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
def get_related(self, obj):
res = super(WorkflowNodeBaseSerializer, self).get_related(obj)
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url()
return res
class WorkflowJobTemplateNodeSerializer(WorkflowNodeBaseSerializer):
class Meta:
model = WorkflowJobTemplateNode
fields = ('*', 'workflow_job_template',)
def get_related(self, obj):
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
res['success_nodes'] = reverse('api:workflow_job_template_node_success_nodes_list', args=(obj.pk,))
res['failure_nodes'] = reverse('api:workflow_job_template_node_failure_nodes_list', args=(obj.pk,))
res['always_nodes'] = reverse('api:workflow_job_template_node_always_nodes_list', args=(obj.pk,))
if obj.workflow_job_template:
res['workflow_job_template'] = reverse('api:workflow_job_template_detail', args=(obj.workflow_job_template.pk,))
return res
class WorkflowJobNodeSerializer(WorkflowNodeBaseSerializer):
class Meta:
model = WorkflowJobTemplateNode
fields = ('*', 'job', 'workflow_job',)
def get_related(self, obj):
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
res['success_nodes'] = reverse('api:workflow_job_node_success_nodes_list', args=(obj.pk,))
res['failure_nodes'] = reverse('api:workflow_job_node_failure_nodes_list', args=(obj.pk,))
res['always_nodes'] = reverse('api:workflow_job_node_always_nodes_list', args=(obj.pk,))
if obj.job:
res['job'] = reverse('api:job_detail', args=(obj.job.pk,))
if obj.workflow_job:
res['workflow_job'] = reverse('api:workflow_job_detail', args=(obj.workflow_job.pk,))
return res
class WorkflowJobNodeListSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobNodeDetailSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobTemplateNodeDetailSerializer(WorkflowJobTemplateNodeSerializer):
'''
Influence the api browser sample data to not include workflow_job_template
when editing a WorkflowNode.
Note: I was not able to accomplish this trough the use of extra_kwargs.
Maybe something to do with workflow_job_template being a relational field?
'''
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeDetailSerializer, self).build_relational_field(field_name, relation_info)
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
class WorkflowJobTemplateNodeListSerializer(WorkflowJobTemplateNodeSerializer):
pass
class JobListSerializer(JobSerializer, UnifiedJobListSerializer):
pass

View File

@ -257,6 +257,24 @@ system_job_urls = patterns('awx.api.views',
url(r'^(?P<pk>[0-9]+)/notifications/$', 'system_job_notifications_list'),
)
workflow_job_template_urls = patterns('awx.api.views',
url(r'^$', 'workflow_job_template_list'),
url(r'^(?P<pk>[0-9]+)/$', 'workflow_job_template_detail'),
url(r'^(?P<pk>[0-9]+)/jobs/$', 'workflow_job_template_jobs_list'),
url(r'^(?P<pk>[0-9]+)/launch/$', 'workflow_job_template_launch'),
url(r'^(?P<pk>[0-9]+)/workflow_nodes/$', 'workflow_job_template_workflow_nodes_list'),
# url(r'^(?P<pk>[0-9]+)/cancel/$', 'workflow_job_template_cancel'),
)
workflow_job_urls = patterns('awx.api.views',
url(r'^$', 'workflow_job_list'),
url(r'^(?P<pk>[0-9]+)/$', 'workflow_job_detail'),
url(r'^(?P<pk>[0-9]+)/workflow_nodes/$', 'workflow_job_workflow_nodes_list'),
# url(r'^(?P<pk>[0-9]+)/cancel/$', 'workflow_job_cancel'),
#url(r'^(?P<pk>[0-9]+)/notifications/$', 'workflow_job_notifications_list'),
)
notification_template_urls = patterns('awx.api.views',
url(r'^$', 'notification_template_list'),
url(r'^(?P<pk>[0-9]+)/$', 'notification_template_detail'),
@ -274,6 +292,22 @@ label_urls = patterns('awx.api.views',
url(r'^(?P<pk>[0-9]+)/$', 'label_detail'),
)
workflow_job_template_node_urls = patterns('awx.api.views',
url(r'^$', 'workflow_job_template_node_list'),
url(r'^(?P<pk>[0-9]+)/$', 'workflow_job_template_node_detail'),
url(r'^(?P<pk>[0-9]+)/success_nodes/$', 'workflow_job_template_node_success_nodes_list'),
url(r'^(?P<pk>[0-9]+)/failure_nodes/$', 'workflow_job_template_node_failure_nodes_list'),
url(r'^(?P<pk>[0-9]+)/always_nodes/$', 'workflow_job_template_node_always_nodes_list'),
)
workflow_job_node_urls = patterns('awx.api.views',
url(r'^$', 'workflow_job_node_list'),
url(r'^(?P<pk>[0-9]+)/$', 'workflow_job_node_detail'),
url(r'^(?P<pk>[0-9]+)/success_nodes/$', 'workflow_job_node_success_nodes_list'),
url(r'^(?P<pk>[0-9]+)/failure_nodes/$', 'workflow_job_node_failure_nodes_list'),
url(r'^(?P<pk>[0-9]+)/always_nodes/$', 'workflow_job_node_always_nodes_list'),
)
schedule_urls = patterns('awx.api.views',
url(r'^$', 'schedule_list'),
url(r'^(?P<pk>[0-9]+)/$', 'schedule_detail'),
@ -323,7 +357,11 @@ v1_urls = patterns('awx.api.views',
url(r'^system_jobs/', include(system_job_urls)),
url(r'^notification_templates/', include(notification_template_urls)),
url(r'^notifications/', include(notification_urls)),
url(r'^workflow_job_templates/',include(workflow_job_template_urls)),
url(r'^workflow_jobs/' ,include(workflow_job_urls)),
url(r'^labels/', include(label_urls)),
url(r'^workflow_job_template_nodes/', include(workflow_job_template_node_urls)),
url(r'^workflow_job_nodes/', include(workflow_job_node_urls)),
url(r'^unified_job_templates/$','unified_job_template_list'),
url(r'^unified_jobs/$', 'unified_job_list'),
url(r'^activity_stream/', include(activity_stream_urls)),

View File

@ -148,6 +148,8 @@ class ApiV1RootView(APIView):
data['unified_job_templates'] = reverse('api:unified_job_template_list')
data['unified_jobs'] = reverse('api:unified_job_list')
data['activity_stream'] = reverse('api:activity_stream_list')
data['workflow_job_templates'] = reverse('api:workflow_job_template_list')
data['workflow_jobs'] = reverse('api:workflow_job_list')
return Response(data)
@ -169,28 +171,13 @@ class ApiV1PingView(APIView):
# Most of this response is canned; just build the dictionary.
response = {
'ha': is_ha_environment(),
'role': Instance.objects.my_role(),
'version': get_awx_version(),
}
# If this is an HA environment, we also include the IP address of
# all of the instances.
#
# Set up a default structure.
response['instances'] = {
'primary': None,
'secondaries': [],
}
# Add all of the instances into the structure.
response['instances'] = []
for instance in Instance.objects.all():
if instance.primary:
response['instances']['primary'] = instance.hostname
else:
response['instances']['secondaries'].append(instance.hostname)
response['instances']['secondaries'].sort()
# Done; return the response.
response['instances'].append(instance.hostname)
response['instances'].sort()
return Response(response)
@ -1760,16 +1747,24 @@ class GroupList(ListCreateAPIView):
serializer_class = GroupSerializer
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc', 'inventory.update']
class GroupChildrenList(SubListCreateAttachDetachAPIView):
'''
Useful when you have a self-refering ManyToManyRelationship.
* Tower uses a shallow (2-deep only) url pattern. For example:
model = Group
serializer_class = GroupSerializer
parent_model = Group
relationship = 'children'
When an object hangs off of a parent object you would have the url of the
form /api/v1/parent_model/34/child_model. If you then wanted a child of the
child model you would NOT do /api/v1/parent_model/34/child_model/87/child_child_model
Instead, you would access the child_child_model via /api/v1/child_child_model/87/
and you would create child_child_model's off of /api/v1/child_model/87/child_child_model_set
Now, when creating child_child_model related to child_model you still want to
link child_child_model to parent_model. That's what this class is for
'''
class EnforceParentRelationshipMixin(object):
enforce_parent_relationship = ''
def update_raw_data(self, data):
data.pop('inventory', None)
return super(GroupChildrenList, self).update_raw_data(data)
data.pop(self.enforce_parent_relationship, None)
return super(EnforceParentRelationshipMixin, self).update_raw_data(data)
def create(self, request, *args, **kwargs):
# Inject parent group inventory ID into new group data.
@ -1777,16 +1772,16 @@ class GroupChildrenList(SubListCreateAttachDetachAPIView):
# HACK: Make request data mutable.
if getattr(data, '_mutable', None) is False:
data._mutable = True
data['inventory'] = self.get_parent_object().inventory_id
return super(GroupChildrenList, self).create(request, *args, **kwargs)
data[self.enforce_parent_relationship] = getattr(self.get_parent_object(), '%s_id' % self.enforce_parent_relationship)
return super(EnforceParentRelationshipMixin, self).create(request, *args, **kwargs)
def unattach(self, request, *args, **kwargs):
sub_id = request.data.get('id', None)
if sub_id is not None:
return super(GroupChildrenList, self).unattach(request, *args, **kwargs)
parent = self.get_parent_object()
parent.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class GroupChildrenList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
model = Group
serializer_class = GroupSerializer
parent_model = Group
relationship = 'children'
enforce_parent_relationship = 'inventory'
class GroupPotentialChildrenList(SubListAPIView):
@ -2626,6 +2621,182 @@ class JobTemplateObjectRolesList(SubListAPIView):
content_type = ContentType.objects.get_for_model(self.parent_model)
return Role.objects.filter(content_type=content_type, object_id=po.pk)
# TODO:
class WorkflowJobNodeList(ListCreateAPIView):
model = WorkflowJobNode
serializer_class = WorkflowJobNodeListSerializer
new_in_310 = True
# TODO:
class WorkflowJobNodeDetail(RetrieveUpdateDestroyAPIView):
model = WorkflowJobNode
serializer_class = WorkflowJobNodeDetailSerializer
new_in_310 = True
# TODO:
class WorkflowJobTemplateNodeList(ListCreateAPIView):
model = WorkflowJobTemplateNode
serializer_class = WorkflowJobTemplateNodeListSerializer
new_in_310 = True
# TODO:
class WorkflowJobTemplateNodeDetail(RetrieveUpdateDestroyAPIView):
model = WorkflowJobTemplateNode
serializer_class = WorkflowJobTemplateNodeDetailSerializer
new_in_310 = True
class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
model = WorkflowJobTemplateNode
serializer_class = WorkflowJobTemplateNodeListSerializer
always_allow_superuser = True # TODO: RBAC
parent_model = WorkflowJobTemplateNode
relationship = ''
enforce_parent_relationship = 'workflow_job_template'
new_in_310 = True
'''
Limit the set of WorkflowJobTemplateNodes to the related nodes of specified by
'relationship'
'''
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).all()
class WorkflowJobTemplateNodeSuccessNodesList(WorkflowJobTemplateNodeChildrenBaseList):
relationship = 'success_nodes'
class WorkflowJobTemplateNodeFailureNodesList(WorkflowJobTemplateNodeChildrenBaseList):
relationship = 'failure_nodes'
class WorkflowJobTemplateNodeAlwaysNodesList(WorkflowJobTemplateNodeChildrenBaseList):
relationship = 'always_nodes'
class WorkflowJobNodeChildrenBaseList(SubListAPIView):
model = WorkflowJobNode
serializer_class = WorkflowJobNodeListSerializer
always_allow_superuser = True # TODO: RBAC
parent_model = Job
relationship = ''
'''
enforce_parent_relationship = 'workflow_job_template'
new_in_310 = True
'''
#
#Limit the set of WorkflowJobeNodes to the related nodes of specified by
#'relationship'
#
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).all()
class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList):
relationship = 'success_nodes'
class WorkflowJobNodeFailureNodesList(WorkflowJobNodeChildrenBaseList):
relationship = 'failure_nodes'
class WorkflowJobNodeAlwaysNodesList(WorkflowJobNodeChildrenBaseList):
relationship = 'always_nodes'
# TODO:
class WorkflowJobTemplateList(ListCreateAPIView):
model = WorkflowJobTemplate
serializer_class = WorkflowJobTemplateListSerializer
always_allow_superuser = False
# TODO: RBAC
'''
def post(self, request, *args, **kwargs):
ret = super(WorkflowJobTemplateList, self).post(request, *args, **kwargs)
if ret.status_code == 201:
workflow_job_template = WorkflowJobTemplate.objects.get(id=ret.data['id'])
workflow_job_template.admin_role.members.add(request.user)
return ret
'''
# TODO:
class WorkflowJobTemplateDetail(RetrieveUpdateDestroyAPIView):
model = WorkflowJobTemplate
serializer_class = WorkflowJobTemplateSerializer
always_allow_superuser = False
# TODO:
class WorkflowJobTemplateLaunch(GenericAPIView):
model = WorkflowJobTemplate
serializer_class = EmptySerializer
def get(self, request, *args, **kwargs):
return Response({})
def post(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'start', obj):
raise PermissionDenied()
new_job = obj.create_unified_job(**request.data)
new_job.signal_start(**request.data)
data = dict(workflow_job=new_job.id)
return Response(data, status=status.HTTP_201_CREATED)
# TODO:
class WorkflowJobTemplateWorkflowNodesList(SubListCreateAPIView):
model = WorkflowJobTemplateNode
serializer_class = WorkflowJobTemplateNodeListSerializer
always_allow_superuser = True # TODO: RBAC
parent_model = WorkflowJobTemplate
relationship = 'workflow_job_template_nodes'
parent_key = 'workflow_job_template'
# TODO:
class WorkflowJobTemplateJobsList(SubListAPIView):
model = WorkflowJob
serializer_class = WorkflowJobListSerializer
parent_model = WorkflowJobTemplate
relationship = 'jobs'
parent_key = 'workflow_job_template'
# TODO:
class WorkflowJobList(ListCreateAPIView):
model = WorkflowJob
serializer_class = WorkflowJobListSerializer
def get(self, request, *args, **kwargs):
if not request.user.is_superuser and not request.user.is_system_auditor:
raise PermissionDenied("Superuser privileges needed.")
return super(WorkflowJobList, self).get(request, *args, **kwargs)
# TODO:
class WorkflowJobDetail(RetrieveDestroyAPIView):
model = WorkflowJob
serializer_class = WorkflowJobSerializer
class WorkflowJobWorkflowNodesList(SubListAPIView):
model = WorkflowJobNode
serializer_class = WorkflowJobNodeListSerializer
always_allow_superuser = True # TODO: RBAC
parent_model = WorkflowJob
relationship = 'workflow_job_nodes'
parent_key = 'workflow_job'
class SystemJobTemplateList(ListAPIView):
model = SystemJobTemplate

View File

@ -1228,6 +1228,173 @@ class SystemJobAccess(BaseAccess):
def can_start(self, obj):
return False # no relaunching of system jobs
# TODO:
class WorkflowJobTemplateNodeAccess(BaseAccess):
'''
I can see/use a WorkflowJobTemplateNode if I have permission to associated Workflow Job Template
'''
model = WorkflowJobTemplateNode
def get_queryset(self):
if self.user.is_superuser or self.user.is_system_auditor:
return self.model.objects.all()
@check_superuser
def can_read(self, obj):
return True
@check_superuser
def can_add(self, data):
if not data: # So the browseable API will work
return True
return True
@check_superuser
def can_change(self, obj, data):
if self.can_add(data) is False:
return False
return True
def can_delete(self, obj):
return self.can_change(obj, None)
# TODO:
class WorkflowJobNodeAccess(BaseAccess):
'''
I can see/use a WorkflowJobNode if I have permission to associated Workflow Job
'''
model = WorkflowJobNode
def get_queryset(self):
if self.user.is_superuser or self.user.is_system_auditor:
return self.model.objects.all()
@check_superuser
def can_read(self, obj):
return True
@check_superuser
def can_add(self, data):
if not data: # So the browseable API will work
return True
return True
@check_superuser
def can_change(self, obj, data):
if self.can_add(data) is False:
return False
return True
def can_delete(self, obj):
return self.can_change(obj, None)
# TODO:
class WorkflowJobTemplateAccess(BaseAccess):
'''
I can only see/manage Workflow Job Templates if I'm a super user
'''
model = WorkflowJobTemplate
def get_queryset(self):
if self.user.is_superuser or self.user.is_system_auditor:
qs = self.model.objects.all()
else:
qs = self.model.accessible_objects(self.user, 'read_role')
return qs.select_related('created_by', 'modified_by', 'next_schedule').all()
@check_superuser
def can_read(self, obj):
return self.user in obj.read_role
def can_add(self, data):
'''
a user can create a job template if they are a superuser, an org admin
of any org that the project is a member, or if they have user or team
based permissions tying the project to the inventory source for the
given action as well as the 'create' deploy permission.
Users who are able to create deploy jobs can also run normal and check (dry run) jobs.
'''
if not data: # So the browseable API will work
return True
# if reference_obj is provided, determine if it can be coppied
reference_obj = data.pop('reference_obj', None)
if 'survey_enabled' in data and data['survey_enabled']:
self.check_license(feature='surveys')
if self.user.is_superuser:
return True
def get_value(Class, field):
if reference_obj:
return getattr(reference_obj, field, None)
else:
pk = get_pk_from_dict(data, field)
if pk:
return get_object_or_400(Class, pk=pk)
else:
return None
return False
def can_start(self, obj, validate_license=True):
# TODO: Are workflows allowed for all licenses ??
# Check license.
'''
if validate_license:
self.check_license()
if obj.job_type == PERM_INVENTORY_SCAN:
self.check_license(feature='system_tracking')
if obj.survey_enabled:
self.check_license(feature='surveys')
'''
# Super users can start any job
if self.user.is_superuser:
return True
return self.can_read(obj)
# TODO: We should use execute role rather than read role
#return self.user in obj.execute_role
def can_change(self, obj, data):
data_for_change = data
if self.user not in obj.admin_role and not self.user.is_superuser:
return False
if data is not None:
data = dict(data)
if 'survey_enabled' in data and obj.survey_enabled != data['survey_enabled'] and data['survey_enabled']:
self.check_license(feature='surveys')
return True
return self.can_read(obj) and self.can_add(data_for_change)
def can_delete(self, obj):
is_delete_allowed = self.user.is_superuser or self.user in obj.admin_role
if not is_delete_allowed:
return False
active_jobs = [dict(type="job", id=o.id)
for o in obj.jobs.filter(status__in=ACTIVE_STATES)]
if len(active_jobs) > 0:
raise StateConflict({"conflict": "Resource is being used by running jobs",
"active_jobs": active_jobs})
return True
class WorkflowJobAccess(BaseAccess):
'''
I can only see Workflow Jobs if I'm a super user
'''
model = WorkflowJob
class AdHocCommandAccess(BaseAccess):
'''
I can only see/run ad hoc commands when:
@ -1391,10 +1558,12 @@ class UnifiedJobTemplateAccess(BaseAccess):
inventory_source_qs = self.user.get_queryset(InventorySource).filter(source__in=CLOUD_INVENTORY_SOURCES)
job_template_qs = self.user.get_queryset(JobTemplate)
system_job_template_qs = self.user.get_queryset(SystemJobTemplate)
workflow_job_template_qs = self.user.get_queryset(WorkflowJobTemplate)
qs = qs.filter(Q(Project___in=project_qs) |
Q(InventorySource___in=inventory_source_qs) |
Q(JobTemplate___in=job_template_qs) |
Q(systemjobtemplate__in=system_job_template_qs))
Q(systemjobtemplate__in=system_job_template_qs) |
Q(workflowjobtemplate__in=workflow_job_template_qs))
qs = qs.select_related(
'created_by',
'modified_by',
@ -1430,11 +1599,13 @@ class UnifiedJobAccess(BaseAccess):
job_qs = self.user.get_queryset(Job)
ad_hoc_command_qs = self.user.get_queryset(AdHocCommand)
system_job_qs = self.user.get_queryset(SystemJob)
workflow_job_qs = self.user.get_queryset(WorkflowJob)
qs = qs.filter(Q(ProjectUpdate___in=project_update_qs) |
Q(InventoryUpdate___in=inventory_update_qs) |
Q(Job___in=job_qs) |
Q(AdHocCommand___in=ad_hoc_command_qs) |
Q(SystemJob___in=system_job_qs))
Q(SystemJob___in=system_job_qs) |
Q(WorkflowJob___in=workflow_job_qs))
qs = qs.select_related(
'created_by',
'modified_by',
@ -1825,3 +1996,7 @@ register_access(Role, RoleAccess)
register_access(NotificationTemplate, NotificationTemplateAccess)
register_access(Notification, NotificationAccess)
register_access(Label, LabelAccess)
register_access(WorkflowJobTemplateNode, WorkflowJobTemplateNodeAccess)
register_access(WorkflowJobNode, WorkflowJobNodeAccess)
register_access(WorkflowJobTemplate, WorkflowJobTemplateAccess)
register_access(WorkflowJob, WorkflowJobAccess)

View File

@ -1,13 +1,12 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
import socket
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from awx.main.models import Project
class OptionEnforceError(Exception):
def __init__(self, value):
@ -21,13 +20,9 @@ class BaseCommandInstance(BaseCommand):
def __init__(self):
super(BaseCommandInstance, self).__init__()
self.enforce_primary_role = False
self.enforce_roles = False
self.enforce_hostname_set = False
self.enforce_unique_find = False
self.option_primary = False
self.option_secondary = False
self.option_hostname = None
self.option_uuid = None
@ -38,48 +33,24 @@ class BaseCommandInstance(BaseCommand):
def generate_option_hostname():
return make_option('--hostname',
dest='hostname',
default='',
default=socket.gethostname(),
help='Find instance by specified hostname.')
@staticmethod
def generate_option_hostname_set():
return make_option('--hostname',
dest='hostname',
default='',
default=socket.gethostname(),
help='Hostname to assign to the new instance.')
@staticmethod
def generate_option_primary():
return make_option('--primary',
action='store_true',
default=False,
dest='primary',
help='Register instance as primary.')
@staticmethod
def generate_option_secondary():
return make_option('--secondary',
action='store_true',
default=False,
dest='secondary',
help='Register instance as secondary.')
@staticmethod
def generate_option_uuid():
#TODO: Likely deprecated, maybe uuid becomes the cluster ident?
return make_option('--uuid',
dest='uuid',
default='',
help='Find instance by specified uuid.')
def include_option_primary_role(self):
BaseCommand.option_list += ( BaseCommandInstance.generate_option_primary(), )
self.enforce_primary_role = True
def include_options_roles(self):
self.include_option_primary_role()
BaseCommand.option_list += ( BaseCommandInstance.generate_option_secondary(), )
self.enforce_roles = True
def include_option_hostname_set(self):
BaseCommand.option_list += ( BaseCommandInstance.generate_option_hostname_set(), )
self.enforce_hostname_set = True
@ -94,12 +65,6 @@ class BaseCommandInstance(BaseCommand):
def get_option_uuid(self):
return self.option_uuid
def is_option_primary(self):
return self.option_primary
def is_option_secondary(self):
return self.option_secondary
def get_UUID(self):
return self.UUID
@ -109,31 +74,13 @@ class BaseCommandInstance(BaseCommand):
@property
def usage_error(self):
if self.enforce_roles and self.enforce_hostname_set:
return CommandError('--hostname and one of --primary or --secondary is required.')
elif self.enforce_hostname_set:
if self.enforce_hostname_set:
return CommandError('--hostname is required.')
elif self.enforce_primary_role:
return CommandError('--primary is required.')
elif self.enforce_roles:
return CommandError('One of --primary or --secondary is required.')
def handle(self, *args, **options):
if self.enforce_hostname_set and self.enforce_unique_find:
raise OptionEnforceError('Can not enforce --hostname as a setter and --hostname as a getter')
if self.enforce_roles:
self.option_primary = options['primary']
self.option_secondary = options['secondary']
if self.is_option_primary() and self.is_option_secondary() or not (self.is_option_primary() or self.is_option_secondary()):
raise self.usage_error
elif self.enforce_primary_role:
if options['primary']:
self.option_primary = options['primary']
else:
raise self.usage_error
if self.enforce_hostname_set:
if options['hostname']:
self.option_hostname = options['hostname']
@ -162,11 +109,4 @@ class BaseCommandInstance(BaseCommand):
@staticmethod
def instance_str(instance):
return BaseCommandInstance.__instance_str(instance, ('uuid', 'hostname', 'role'))
def update_projects(self, instance):
"""Update all projects, ensuring the job runs against this instance,
which is the primary instance.
"""
for project in Project.objects.all():
project.update()
return BaseCommandInstance.__instance_str(instance, ('uuid', 'hostname'))

View File

@ -1,30 +1,20 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved
from django.core.management.base import CommandError
from awx.main.management.commands._base_instance import BaseCommandInstance
from awx.main.models import Instance
instance_str = BaseCommandInstance.instance_str
class Command(BaseCommandInstance):
"""Internal tower command.
"""
Internal tower command.
Regsiter this instance with the database for HA tracking.
This command is idempotent.
This command will error out in the following conditions:
* Attempting to register a secondary machine with no primary machines.
* Attempting to register a primary instance when a different primary
instance exists.
* Attempting to re-register an instance with changed values.
"""
def __init__(self):
super(Command, self).__init__()
self.include_options_roles()
self.include_option_hostname_set()
def handle(self, *args, **options):
@ -32,32 +22,10 @@ class Command(BaseCommandInstance):
uuid = self.get_UUID()
# Is there an existing record for this machine? If so, retrieve that record and look for issues.
try:
instance = Instance.objects.get(uuid=uuid)
if instance.hostname != self.get_option_hostname():
raise CommandError('Instance already registered with a different hostname %s.' % instance_str(instance))
print("Instance already registered %s" % instance_str(instance))
except Instance.DoesNotExist:
# Get a status on primary machines (excluding this one, regardless of its status).
other_instances = Instance.objects.exclude(uuid=uuid)
primaries = other_instances.filter(primary=True).count()
# If this instance is being set to primary and a *different* primary machine alreadyexists, error out.
if self.is_option_primary() and primaries:
raise CommandError('Another instance is already registered as primary.')
# Lastly, if there are no primary machines at all, then don't allow this to be registered as a secondary machine.
if self.is_option_secondary() and not primaries:
raise CommandError('Unable to register a secondary machine until another primary machine has been registered.')
# Okay, we've checked for appropriate errata; perform the registration.
instance = Instance(uuid=uuid, primary=self.is_option_primary(), hostname=self.get_option_hostname())
instance.save()
# If this is a primary instance, update projects.
if instance.primary:
self.update_projects(instance)
# Done!
print('Successfully registered instance %s.' % instance_str(instance))
instance = Instance.objects.filter(hostname=self.get_option_hostname())
if instance.exists():
print("Instance already registered %s" % instance_str(instance[0]))
return
instance = Instance(uuid=uuid, hostname=self.get_option_hostname())
instance.save()
print('Successfully registered instance %s.' % instance_str(instance))

View File

@ -2,179 +2,68 @@
# All Rights Reserved.
# Python
import os
import sys
import datetime
import logging
import signal
import time
from multiprocessing import Process, Queue
from Queue import Empty as QueueEmpty
from kombu import Connection, Exchange, Queue
from kombu.mixins import ConsumerMixin
# Django
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.db import transaction, DatabaseError
from django.core.cache import cache
from django.db import DatabaseError
from django.utils.dateparse import parse_datetime
from django.utils.timezone import FixedOffset
from django.db import connection
# AWX
from awx.main.models import * # noqa
from awx.main.socket import Socket
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
WORKERS = 4
class CallbackBrokerWorker(ConsumerMixin):
class CallbackReceiver(object):
def __init__(self):
self.parent_mappings = {}
def __init__(self, connection):
self.connection = connection
def run_subscriber(self, use_workers=True):
def shutdown_handler(active_workers):
def _handler(signum, frame):
try:
for active_worker in active_workers:
active_worker.terminate()
signal.signal(signum, signal.SIG_DFL)
os.kill(os.getpid(), signum) # Rethrow signal, this time without catching it
except Exception:
# TODO: LOG
pass
return _handler
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[Queue(settings.CALLBACK_QUEUE,
Exchange(settings.CALLBACK_QUEUE, type='direct'),
routing_key=settings.CALLBACK_QUEUE)],
accept=['json'],
callbacks=[self.process_task])]
def check_pre_handle(data):
event = data.get('event', '')
if event == 'playbook_on_play_start':
return True
return False
worker_queues = []
if use_workers:
connection.close()
for idx in range(WORKERS):
queue_actual = Queue(settings.JOB_EVENT_MAX_QUEUE_SIZE)
w = Process(target=self.callback_worker, args=(queue_actual, idx,))
w.start()
if settings.DEBUG:
logger.info('Started worker %s' % str(idx))
worker_queues.append([0, queue_actual, w])
elif settings.DEBUG:
logger.warn('Started callback receiver (no workers)')
main_process = Process(
target=self.callback_handler,
args=(use_workers, worker_queues,)
)
main_process.daemon = True
main_process.start()
signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in worker_queues] + [main_process]))
signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in worker_queues] + [main_process]))
while True:
workers_changed = False
idx = 0
for queue_worker in worker_queues:
if not queue_worker[2].is_alive():
logger.warn("Worker %s was not alive, restarting" % str(queue_worker))
workers_changed = True
queue_worker[2].join()
w = Process(target=self.callback_worker, args=(queue_worker[1], idx,))
w.daemon = True
w.start()
signal.signal(signal.SIGINT, shutdown_handler([w]))
signal.signal(signal.SIGTERM, shutdown_handler([w]))
queue_worker[2] = w
idx += 1
if workers_changed:
signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in worker_queues] + [main_process]))
signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in worker_queues] + [main_process]))
if not main_process.is_alive():
logger.error("Main process is not alive")
for queue_worker in worker_queues:
queue_worker[2].terminate()
break
time.sleep(0.1)
def write_queue_worker(self, preferred_queue, worker_queues, message):
queue_order = sorted(range(WORKERS), cmp=lambda x, y: -1 if x==preferred_queue else 0)
for queue_actual in queue_order:
try:
worker_actual = worker_queues[queue_actual]
worker_actual[1].put(message, block=True, timeout=2)
worker_actual[0] += 1
return queue_actual
except Exception:
logger.warn("Could not write to queue %s" % preferred_queue)
continue
return None
def callback_handler(self, use_workers, worker_queues):
total_messages = 0
last_parent_events = {}
with Socket('callbacks', 'r') as callbacks:
for message in callbacks.listen():
total_messages += 1
if 'ad_hoc_command_id' in message:
self.process_ad_hoc_event(message)
elif not use_workers:
self.process_job_event(message)
else:
job_parent_events = last_parent_events.get(message['job_id'], {})
if message['event'] in ('playbook_on_play_start', 'playbook_on_stats', 'playbook_on_vars_prompt'):
parent = job_parent_events.get('playbook_on_start', None)
elif message['event'] in ('playbook_on_notify',
'playbook_on_setup',
'playbook_on_task_start',
'playbook_on_no_hosts_matched',
'playbook_on_no_hosts_remaining',
'playbook_on_include',
'playbook_on_import_for_host',
'playbook_on_not_import_for_host'):
parent = job_parent_events.get('playbook_on_play_start', None)
elif message['event'].startswith('runner_on_') or message['event'].startswith('runner_item_on_'):
list_parents = []
list_parents.append(job_parent_events.get('playbook_on_setup', None))
list_parents.append(job_parent_events.get('playbook_on_task_start', None))
list_parents = sorted(filter(lambda x: x is not None, list_parents), cmp=lambda x, y: y.id - x.id)
parent = list_parents[0] if len(list_parents) > 0 else None
else:
parent = None
if parent is not None:
message['parent'] = parent.id
if 'created' in message:
del(message['created'])
if message['event'] in ('playbook_on_start', 'playbook_on_play_start',
'playbook_on_setup', 'playbook_on_task_start'):
job_parent_events[message['event']] = self.process_job_event(message)
else:
if message['event'] == 'playbook_on_stats':
job_parent_events = {}
actual_queue = self.write_queue_worker(total_messages % WORKERS, worker_queues, message)
# NOTE: It might be better to recycle the entire callback receiver process if one or more of the queues are too full
# the drawback is that if we under extremely high load we may be legitimately taking a while to process messages
if actual_queue is None:
logger.error("All queues full!")
sys.exit(1)
last_parent_events[message['job_id']] = job_parent_events
@transaction.atomic
def process_job_event(self, data):
# Sanity check: Do we need to do anything at all?
event = data.get('event', '')
parent_id = data.get('parent', None)
if not event or 'job_id' not in data:
return
def process_task(self, body, message):
try:
if "event" not in body:
raise Exception("Payload does not have an event")
if "job_id" not in body:
raise Exception("Payload does not have a job_id")
if settings.DEBUG:
logger.info("Body: {}".format(body))
logger.info("Message: {}".format(message))
self.process_job_event(body)
except Exception as exc:
import traceback
traceback.print_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
message.ack()
def process_job_event(self, payload):
# Get the correct "verbose" value from the job.
# If for any reason there's a problem, just use 0.
if 'ad_hoc_command_id' in payload:
event_type_key = 'ad_hoc_command_id'
event_object_type = AdHocCommand
else:
event_type_key = 'job_id'
event_object_type = Job
try:
verbose = Job.objects.get(id=data['job_id']).verbosity
verbose = event_object_type.objects.get(id=payload[event_type_key]).verbosity
except Exception as e:
verbose = 0
verbose=0
# TODO: cache
# Convert the datetime for the job event's creation appropriately,
# and include a time zone for it.
@ -182,120 +71,58 @@ class CallbackReceiver(object):
# In the event of any issue, throw it out, and Django will just save
# the current time.
try:
if not isinstance(data['created'], datetime.datetime):
data['created'] = parse_datetime(data['created'])
if not data['created'].tzinfo:
data['created'] = data['created'].replace(tzinfo=FixedOffset(0))
if not isinstance(payload['created'], datetime.datetime):
payload['created'] = parse_datetime(payload['created'])
if not payload['created'].tzinfo:
payload['created'] = payload['created'].replace(tzinfo=FixedOffset(0))
except (KeyError, ValueError):
data.pop('created', None)
payload.pop('created', None)
# Print the data to stdout if we're in DEBUG mode.
if settings.DEBUG:
print(data)
event_uuid = payload.get("uuid", '')
parent_event_uuid = payload.get("parent_uuid", '')
# Sanity check: Don't honor keys that we don't recognize.
for key in data.keys():
if key not in ('job_id', 'event', 'event_data',
'created', 'counter'):
data.pop(key)
for key in payload.keys():
if key not in (event_type_key, 'event', 'event_data',
'created', 'counter', 'uuid'):
payload.pop(key)
# Save any modifications to the job event to the database.
# If we get a database error of some kind, bail out.
try:
# If we're not in verbose mode, wipe out any module
# arguments.
res = data['event_data'].get('res', {})
res = payload['event_data'].get('res', {})
if isinstance(res, dict):
i = res.get('invocation', {})
if verbose == 0 and 'module_args' in i:
i['module_args'] = ''
# Create a new JobEvent object.
job_event = JobEvent(**data)
if parent_id is not None:
job_event.parent = JobEvent.objects.get(id=parent_id)
job_event.save(post_process=True)
# Retrun the job event object.
return job_event
if 'ad_hoc_command_id' in payload:
AdHocCommandEvent.objects.create(**data)
return
j = JobEvent(**payload)
if payload['event'] == 'playbook_on_start':
j.save()
cache.set("{}_{}".format(payload['job_id'], event_uuid), j.id, 300)
return
else:
if parent_event_uuid:
parent_id = cache.get("{}_{}".format(payload['job_id'], parent_event_uuid), None)
if parent_id is None:
parent_id_obj = JobEvent.objects.filter(uuid=parent_event_uuid, job_id=payload['job_id'])
if parent_id_obj.exists(): # Problematic if not there, means the parent hasn't been written yet... TODO
j.parent_id = parent_id_obj[0].id
print("Settings cache: {}_{} with value {}".format(payload['job_id'], parent_event_uuid, j.parent_id))
cache.set("{}_{}".format(payload['job_id'], parent_event_uuid), j.parent_id, 300)
else:
print("Cache hit")
j.parent_id = parent_id
j.save()
if event_uuid:
cache.set("{}_{}".format(payload['job_id'], event_uuid), j.id, 300)
except DatabaseError as e:
# Log the error and bail out.
logger.error('Database error saving job event: %s', e)
return None
logger.error("Database Error Saving Job Event: {}".format(e))
@transaction.atomic
def process_ad_hoc_event(self, data):
# Sanity check: Do we need to do anything at all?
event = data.get('event', '')
if not event or 'ad_hoc_command_id' not in data:
return
# Get the correct "verbose" value from the job.
# If for any reason there's a problem, just use 0.
try:
verbose = AdHocCommand.objects.get(id=data['ad_hoc_command_id']).verbosity
except Exception as e:
verbose = 0
# Convert the datetime for the job event's creation appropriately,
# and include a time zone for it.
#
# In the event of any issue, throw it out, and Django will just save
# the current time.
try:
if not isinstance(data['created'], datetime.datetime):
data['created'] = parse_datetime(data['created'])
if not data['created'].tzinfo:
data['created'] = data['created'].replace(tzinfo=FixedOffset(0))
except (KeyError, ValueError):
data.pop('created', None)
# Print the data to stdout if we're in DEBUG mode.
if settings.DEBUG:
print(data)
# Sanity check: Don't honor keys that we don't recognize.
for key in data.keys():
if key not in ('ad_hoc_command_id', 'event', 'event_data',
'created', 'counter'):
data.pop(key)
# Save any modifications to the ad hoc command event to the database.
# If we get a database error of some kind, bail out.
try:
# If we're not in verbose mode, wipe out any module
# arguments. FIXME: Needed for adhoc?
res = data['event_data'].get('res', {})
if isinstance(res, dict):
i = res.get('invocation', {})
if verbose == 0 and 'module_args' in i:
i['module_args'] = ''
# Create a new AdHocCommandEvent object.
ad_hoc_command_event = AdHocCommandEvent.objects.create(**data)
# Retrun the ad hoc comamnd event object.
return ad_hoc_command_event
except DatabaseError as e:
# Log the error and bail out.
logger.error('Database error saving ad hoc command event: %s', e)
return None
def callback_worker(self, queue_actual, idx):
messages_processed = 0
while True:
try:
message = queue_actual.get(block=True, timeout=1)
except QueueEmpty:
continue
except Exception as e:
logger.error("Exception on listen socket, restarting: " + str(e))
break
self.process_job_event(message)
messages_processed += 1
if messages_processed >= settings.JOB_EVENT_RECYCLE_THRESHOLD:
logger.info("Shutting down message receiver")
break
class Command(NoArgsCommand):
'''
@ -306,9 +133,10 @@ class Command(NoArgsCommand):
help = 'Launch the job callback receiver'
def handle_noargs(self, **options):
cr = CallbackReceiver()
try:
cr.run_subscriber()
except KeyboardInterrupt:
pass
with Connection(settings.BROKER_URL) as conn:
try:
worker = CallbackBrokerWorker(conn)
worker.run()
except KeyboardInterrupt:
print('Terminating Callback Receiver')

View File

@ -14,7 +14,7 @@ from django.utils import timezone
# AWX
from awx.main.models.fact import Fact
from awx.main.models.inventory import Host
from awx.main.socket import Socket
from awx.main.socket_queue import Socket
logger = logging.getLogger('awx.main.commands.run_fact_cache_receiver')

View File

@ -16,7 +16,7 @@ from django.core.management.base import NoArgsCommand
# AWX
import awx
from awx.main.models import * # noqa
from awx.main.socket import Socket
from awx.main.socket_queue import Socket
# socketio
from socketio import socketio_manage

View File

@ -54,6 +54,8 @@ class SimpleDAG(object):
type_str = "Inventory"
elif type(obj) == ProjectUpdate:
type_str = "Project"
elif type(obj) == WorkflowJob:
type_str = "Workflow"
else:
type_str = "Unknown"
type_str += "%s" % str(obj.id)
@ -68,10 +70,11 @@ class SimpleDAG(object):
short_string_obj(n['node_object']),
"red" if n['node_object'].status == 'running' else "black",
)
for from_node, to_node in self.edges:
doc += "%s -> %s;\n" % (
for from_node, to_node, label in self.edges:
doc += "%s -> %s [ label=\"%s\" ];\n" % (
short_string_obj(self.nodes[from_node]['node_object']),
short_string_obj(self.nodes[to_node]['node_object']),
label,
)
doc += "}\n"
gv_file = open('/tmp/graph.gv', 'w')
@ -82,16 +85,16 @@ class SimpleDAG(object):
if self.find_ord(obj) is None:
self.nodes.append(dict(node_object=obj, metadata=metadata))
def add_edge(self, from_obj, to_obj):
def add_edge(self, from_obj, to_obj, label=None):
from_obj_ord = self.find_ord(from_obj)
to_obj_ord = self.find_ord(to_obj)
if from_obj_ord is None or to_obj_ord is None:
raise LookupError("Object not found")
self.edges.append((from_obj_ord, to_obj_ord))
self.edges.append((from_obj_ord, to_obj_ord, label))
def add_edges(self, edgelist):
for edge_pair in edgelist:
self.add_edge(edge_pair[0], edge_pair[1])
self.add_edge(edge_pair[0], edge_pair[1], edge_pair[2])
def find_ord(self, obj):
for idx in range(len(self.nodes)):
@ -110,22 +113,32 @@ class SimpleDAG(object):
return "project_update"
elif type(obj) == SystemJob:
return "system_job"
elif type(obj) == WorkflowJob:
return "workflow_job"
return "unknown"
def get_dependencies(self, obj):
def get_dependencies(self, obj, label=None):
antecedents = []
this_ord = self.find_ord(obj)
for node, dep in self.edges:
if node == this_ord:
antecedents.append(self.nodes[dep])
for node, dep, lbl in self.edges:
if label:
if node == this_ord and lbl == label:
antecedents.append(self.nodes[dep])
else:
if node == this_ord:
antecedents.append(self.nodes[dep])
return antecedents
def get_dependents(self, obj):
def get_dependents(self, obj, label=None):
decendents = []
this_ord = self.find_ord(obj)
for node, dep in self.edges:
if dep == this_ord:
decendents.append(self.nodes[node])
for node, dep, lbl in self.edges:
if label:
if dep == this_ord and lbl == label:
decendents.append(self.nodes[node])
else:
if dep == this_ord:
decendents.append(self.nodes[node])
return decendents
def get_leaf_nodes(self):
@ -135,6 +148,85 @@ class SimpleDAG(object):
leafs.append(n)
return leafs
def get_root_nodes(self):
roots = []
for n in self.nodes:
if len(self.get_dependents(n['node_object'])) < 1:
roots.append(n)
return roots
class WorkflowDAG(SimpleDAG):
def __init__(self, workflow_job=None):
super(WorkflowDAG, self).__init__()
if workflow_job:
self._init_graph(workflow_job)
def _init_graph(self, workflow_job):
workflow_nodes = workflow_job.workflow_job_nodes.all()
for workflow_node in workflow_nodes:
self.add_node(workflow_node)
for node_type in ['success_nodes', 'failure_nodes', 'always_nodes']:
for workflow_node in workflow_nodes:
related_nodes = getattr(workflow_node, node_type).all()
for related_node in related_nodes:
self.add_edge(workflow_node, related_node, node_type)
def bfs_nodes_to_run(self):
root_nodes = self.get_root_nodes()
nodes = root_nodes
nodes_found = []
for index, n in enumerate(nodes):
obj = n['node_object']
job = obj.job
if not job:
nodes_found.append(n)
# Job is about to run or is running. Hold our horses and wait for
# the job to finish. We can't proceed down the graph path until we
# have the job result.
elif job.status not in ['failed', 'error', 'successful']:
continue
elif job.status in ['failed', 'error']:
children_failed = self.get_dependencies(obj, 'failure_nodes')
children_always = self.get_dependencies(obj, 'always_nodes')
children_all = children_failed + children_always
nodes.extend(children_all)
elif job.status in ['successful']:
children_success = self.get_dependencies(obj, 'success_nodes')
nodes.extend(children_success)
else:
logger.warn("Incorrect graph structure")
return [n['node_object'] for n in nodes_found]
def is_workflow_done(self):
root_nodes = self.get_root_nodes()
nodes = root_nodes
for index, n in enumerate(nodes):
obj = n['node_object']
job = obj.job
if not job:
return False
# Job is about to run or is running. Hold our horses and wait for
# the job to finish. We can't proceed down the graph path until we
# have the job result.
elif job.status not in ['failed', 'error', 'successful']:
return False
elif job.status in ['failed', 'error']:
children_failed = self.get_dependencies(obj, 'failure_nodes')
children_always = self.get_dependencies(obj, 'always_nodes')
children_all = children_failed + children_always
nodes.extend(children_all)
elif job.status in ['successful']:
children_success = self.get_dependencies(obj, 'success_nodes')
nodes.extend(children_success)
else:
logger.warn("Incorrect graph structure")
return True
def get_tasks():
"""Fetch all Tower tasks that are relevant to the task management
system.
@ -149,11 +241,42 @@ def get_tasks():
ProjectUpdate.objects.filter(status__in=RELEVANT_JOBS)]
graph_system_jobs = [sj for sj in
SystemJob.objects.filter(status__in=RELEVANT_JOBS)]
graph_workflow_jobs = [wf for wf in
WorkflowJob.objects.filter(status__in=RELEVANT_JOBS)]
all_actions = sorted(graph_jobs + graph_ad_hoc_commands + graph_inventory_updates +
graph_project_updates + graph_system_jobs,
graph_project_updates + graph_system_jobs +
graph_workflow_jobs,
key=lambda task: task.created)
return all_actions
def get_running_workflow_jobs():
graph_workflow_jobs = [wf for wf in
WorkflowJob.objects.filter(status='running')]
return graph_workflow_jobs
def do_spawn_workflow_jobs():
workflow_jobs = get_running_workflow_jobs()
for workflow_job in workflow_jobs:
dag = WorkflowDAG(workflow_job)
spawn_nodes = dag.bfs_nodes_to_run()
for spawn_node in spawn_nodes:
# TODO: Inject job template template params as kwargs.
# Make sure to take into account extra_vars merge logic
kv = {}
job = spawn_node.unified_job_template.create_unified_job(**kv)
spawn_node.job = job
spawn_node.save()
can_start = job.signal_start(**kv)
if not can_start:
job.status = 'failed'
job.job_explanation = "Workflow job could not start because it was not in the right state or required manual credentials"
job.save(update_fields=['status', 'job_explanation'])
job.socketio_emit_status("failed")
# TODO: should we emit a status on the socket here similar to tasks.py tower_periodic_scheduler() ?
#emit_websocket_notification('/socket.io/jobs', '', dict(id=))
def rebuild_graph(message):
"""Regenerate the task graph by refreshing known tasks from Tower, purging
orphaned running tasks, and creating dependencies for new tasks before
@ -170,6 +293,8 @@ def rebuild_graph(message):
logger.warn("Ignoring celery task inspector")
active_task_queues = None
do_spawn_workflow_jobs()
all_sorted_tasks = get_tasks()
if not len(all_sorted_tasks):
return None
@ -184,6 +309,7 @@ def rebuild_graph(message):
# as a whole that celery appears to be down.
if not hasattr(settings, 'CELERY_UNIT_TEST'):
return None
running_tasks = filter(lambda t: t.status == 'running', all_sorted_tasks)
waiting_tasks = filter(lambda t: t.status != 'running', all_sorted_tasks)
new_tasks = filter(lambda t: t.status == 'pending', all_sorted_tasks)

View File

@ -2,8 +2,8 @@
# All Rights Reserved.
import sys
import socket
from django.conf import settings
from django.db import models
@ -28,31 +28,12 @@ class InstanceManager(models.Manager):
# If we are running unit tests, return a stub record.
if len(sys.argv) >= 2 and sys.argv[1] == 'test':
return self.model(id=1, primary=True,
hostname='localhost',
uuid='00000000-0000-0000-0000-000000000000')
# Return the appropriate record from the database.
return self.get(uuid=settings.SYSTEM_UUID)
return self.get(hostname=socket.gethostname())
def my_role(self):
"""Return the role of the currently active instance, as a string
('primary' or 'secondary').
"""
# If we are running unit tests, we are primary, because reasons.
if len(sys.argv) >= 2 and sys.argv[1] == 'test':
return 'primary'
# Check if this instance is primary; if so, return "primary", otherwise
# "secondary".
if self.me().primary:
return 'primary'
return 'secondary'
def primary(self):
"""Return the primary instance."""
# If we are running unit tests, return a stub record.
if len(sys.argv) >= 2 and sys.argv[1] == 'test':
return self.model(id=1, primary=True,
uuid='00000000-0000-0000-0000-000000000000')
# Return the appropriate record from the database.
return self.get(primary=True)
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
return "tower"

View File

@ -8,12 +8,9 @@ import uuid
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.db import IntegrityError
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils.functional import curry
from awx import __version__ as version
from awx.main.models import ActivityStream, Instance
from awx.main.models import ActivityStream
from awx.main.conf import tower_settings
from awx.api.authentication import TokenAuthentication
@ -71,41 +68,6 @@ class ActivityStreamMiddleware(threading.local):
if instance.id not in self.instance_ids:
self.instance_ids.append(instance.id)
class HAMiddleware(object):
"""A middleware class that checks to see whether the request is being
served on a secondary instance, and redirects the request back to the
primary instance if so.
"""
def process_request(self, request):
"""Process the request, and redirect if this is a request on a
secondary node.
"""
# Is this the primary node? If so, we can just return None and be done;
# we just want normal behavior in this case.
if Instance.objects.my_role() == 'primary':
return None
# Always allow the /ping/ endpoint.
if request.path.startswith('/api/v1/ping'):
return None
# Get the primary instance.
primary = Instance.objects.primary()
# If this is a request to /, then we return a special landing page that
# informs the user that they are on the secondary instance and will
# be redirected.
if request.path == '/':
return TemplateResponse(request, 'ha/redirect.html', {
'primary': primary,
'redirect_seconds': 30,
'version': version,
})
# Redirect to the base page of the primary instance.
return HttpResponseRedirect('http://%s%s' % (primary.hostname, request.path))
class AuthTokenTimeoutMiddleware(object):
"""Presume that when the user includes the auth header, they go through the
authentication mechanism. Further, that mechanism is presumed to extend

View File

@ -0,0 +1,104 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import awx.main.models.notifications
import django.db.models.deletion
import awx.main.models.workflow
import awx.main.fields
class Migration(migrations.Migration):
dependencies = [
('main', '0032_v302_credential_permissions_update'),
]
operations = [
migrations.CreateModel(
name='WorkflowJob',
fields=[
('unifiedjob_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='main.UnifiedJob')),
('extra_vars', models.TextField(default=b'', blank=True)),
],
options={
'ordering': ('id',),
},
bases=('main.unifiedjob', models.Model, awx.main.models.notifications.JobNotificationMixin, awx.main.models.workflow.WorkflowJobInheritNodesMixin),
),
migrations.CreateModel(
name='WorkflowJobNode',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('always_nodes', models.ManyToManyField(related_name='workflowjobnodes_always', to='main.WorkflowJobNode', blank=True)),
('failure_nodes', models.ManyToManyField(related_name='workflowjobnodes_failure', to='main.WorkflowJobNode', blank=True)),
('job', models.ForeignKey(related_name='unified_job_nodes', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.UnifiedJob', null=True)),
('success_nodes', models.ManyToManyField(related_name='workflowjobnodes_success', to='main.WorkflowJobNode', blank=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='WorkflowJobTemplate',
fields=[
('unifiedjobtemplate_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='main.UnifiedJobTemplate')),
('extra_vars', models.TextField(default=b'', blank=True)),
('admin_role', awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'singleton:system_administrator', to='main.Role', null=b'True')),
],
bases=('main.unifiedjobtemplate', models.Model),
),
migrations.CreateModel(
name='WorkflowJobTemplateNode',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('always_nodes', models.ManyToManyField(related_name='workflowjobtemplatenodes_always', to='main.WorkflowJobTemplateNode', blank=True)),
('failure_nodes', models.ManyToManyField(related_name='workflowjobtemplatenodes_failure', to='main.WorkflowJobTemplateNode', blank=True)),
('success_nodes', models.ManyToManyField(related_name='workflowjobtemplatenodes_success', to='main.WorkflowJobTemplateNode', blank=True)),
('unified_job_template', models.ForeignKey(related_name='workflowjobtemplatenodes', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.UnifiedJobTemplate', null=True)),
('workflow_job_template', models.ForeignKey(related_name='workflow_job_template_nodes', default=None, blank=True, to='main.WorkflowJobTemplate', null=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='workflowjobnode',
name='unified_job_template',
field=models.ForeignKey(related_name='workflowjobnodes', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.UnifiedJobTemplate', null=True),
),
migrations.AddField(
model_name='workflowjobnode',
name='workflow_job',
field=models.ForeignKey(related_name='workflow_job_nodes', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.WorkflowJob', null=True),
),
migrations.AddField(
model_name='workflowjob',
name='workflow_job_template',
field=models.ForeignKey(related_name='jobs', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.WorkflowJobTemplate', null=True),
),
migrations.AddField(
model_name='activitystream',
name='workflow_job',
field=models.ManyToManyField(to='main.WorkflowJob', blank=True),
),
migrations.AddField(
model_name='activitystream',
name='workflow_job_node',
field=models.ManyToManyField(to='main.WorkflowJobNode', blank=True),
),
migrations.AddField(
model_name='activitystream',
name='workflow_job_template',
field=models.ManyToManyField(to='main.WorkflowJobTemplate', blank=True),
),
migrations.AddField(
model_name='activitystream',
name='workflow_job_template_node',
field=models.ManyToManyField(to='main.WorkflowJobTemplateNode', blank=True),
),
]

View File

@ -0,0 +1,23 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0033_v310_add_workflows'),
]
operations = [
migrations.RemoveField(
model_name='instance',
name='primary',
),
migrations.AlterField(
model_name='instance',
name='uuid',
field=models.CharField(max_length=40),
),
]

View File

@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0034_v310_modify_ha_instance'),
]
operations = [
migrations.AddField(
model_name='jobevent',
name='uuid',
field=models.CharField(default=b'', max_length=1024, editable=False),
),
]

View File

@ -22,6 +22,7 @@ from awx.main.models.mixins import * # noqa
from awx.main.models.notifications import * # noqa
from awx.main.models.fact import * # noqa
from awx.main.models.label import * # noqa
from awx.main.models.workflow import * # noqa
# Monkeypatch Django serializer to ignore django-taggit fields (which break
# the dumpdata command; see https://github.com/alex/django-taggit/issues/155).

View File

@ -49,6 +49,10 @@ class ActivityStream(models.Model):
permission = models.ManyToManyField("Permission", blank=True)
job_template = models.ManyToManyField("JobTemplate", blank=True)
job = models.ManyToManyField("Job", blank=True)
workflow_job_template_node = models.ManyToManyField("WorkflowJobTemplateNode", blank=True)
workflow_job_node = models.ManyToManyField("WorkflowJobNode", blank=True)
workflow_job_template = models.ManyToManyField("WorkflowJobTemplate", blank=True)
workflow_job = models.ManyToManyField("WorkflowJob", blank=True)
unified_job_template = models.ManyToManyField("UnifiedJobTemplate", blank=True, related_name='activity_stream_as_unified_job_template+')
unified_job = models.ManyToManyField("UnifiedJob", blank=True, related_name='activity_stream_as_unified_job+')
ad_hoc_command = models.ManyToManyField("AdHocCommand", blank=True)

View File

@ -1,8 +1,6 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
import functools
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
@ -11,7 +9,7 @@ from awx.main.managers import InstanceManager
from awx.main.models.inventory import InventoryUpdate
from awx.main.models.jobs import Job
from awx.main.models.projects import ProjectUpdate
from awx.main.models.unified_jobs import UnifiedJob, CAN_CANCEL
from awx.main.models.unified_jobs import UnifiedJob
__all__ = ('Instance', 'JobOrigin')
@ -22,9 +20,8 @@ class Instance(models.Model):
"""
objects = InstanceManager()
uuid = models.CharField(max_length=40, unique=True)
uuid = models.CharField(max_length=40)
hostname = models.CharField(max_length=250, unique=True)
primary = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
@ -33,29 +30,8 @@ class Instance(models.Model):
@property
def role(self):
"""Return the role of this instance, as a string."""
if self.primary:
return 'primary'
return 'secondary'
@functools.wraps(models.Model.save)
def save(self, *args, **kwargs):
"""Save the instance. If this is a secondary instance, then ensure
that any currently-running jobs that this instance started are
canceled.
"""
# Perform the normal save.
result = super(Instance, self).save(*args, **kwargs)
# If this is not a primary instance, then kill any jobs that this
# instance was responsible for starting.
if not self.primary:
for job in UnifiedJob.objects.filter(job_origin__instance=self,
status__in=CAN_CANCEL):
job.cancel()
# Return back the original result.
return result
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
return "tower"
class JobOrigin(models.Model):

View File

@ -964,6 +964,11 @@ class JobEvent(CreatedModifiedModel):
default=False,
editable=False,
)
uuid = models.CharField(
max_length=1024,
default='',
editable=False,
)
host = models.ForeignKey(
'Host',
related_name='job_events_as_primary_host',

View File

@ -164,17 +164,22 @@ class Role(models.Model):
global role_names
return role_names[self.role_field]
@property
def description(self):
def get_description(self, reference_content_object=None):
global role_descriptions
description = role_descriptions[self.role_field]
if '%s' in description and self.content_type:
model = self.content_type.model_class()
if reference_content_object:
content_type = ContentType.objects.get_for_model(reference_content_object)
else:
content_type = self.content_type
if '%s' in description and content_type:
model = content_type.model_class()
model_name = re.sub(r'([a-z])([A-Z])', r'\1 \2', model.__name__).lower()
description = description % model_name
return description
description = property(get_description)
@staticmethod
def rebuild_role_ancestor_list(additions, removals):
'''

242
awx/main/models/workflow.py Normal file
View File

@ -0,0 +1,242 @@
# Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
# Python
#import urlparse
# Django
from django.db import models
from django.core.urlresolvers import reverse
#from django import settings as tower_settings
# AWX
from awx.main.models import UnifiedJobTemplate, UnifiedJob
from awx.main.models.notifications import JobNotificationMixin
from awx.main.models.base import BaseModel, CreatedModifiedModel, VarsDictProperty
from awx.main.models.rbac import (
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
)
from awx.main.fields import ImplicitRoleField
__all__ = ['WorkflowJobTemplate', 'WorkflowJob', 'WorkflowJobOptions', 'WorkflowJobNode', 'WorkflowJobTemplateNode',]
class WorkflowNodeBase(CreatedModifiedModel):
class Meta:
abstract = True
app_label = 'main'
# TODO: RBAC
'''
admin_role = ImplicitRoleField(
parent_role='workflow_job_template.admin_role',
)
'''
success_nodes = models.ManyToManyField(
'self',
blank=True,
symmetrical=False,
related_name='%(class)ss_success',
)
failure_nodes = models.ManyToManyField(
'self',
blank=True,
symmetrical=False,
related_name='%(class)ss_failure',
)
always_nodes = models.ManyToManyField(
'self',
blank=True,
symmetrical=False,
related_name='%(class)ss_always',
)
unified_job_template = models.ForeignKey(
'UnifiedJobTemplate',
related_name='%(class)ss',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
)
class WorkflowJobTemplateNode(WorkflowNodeBase):
# TODO: Ensure the API forces workflow_job_template being set
workflow_job_template = models.ForeignKey(
'WorkflowJobTemplate',
related_name='workflow_job_template_nodes',
blank=True,
null=True,
default=None,
on_delete=models.CASCADE,
)
def get_absolute_url(self):
return reverse('api:workflow_job_template_node_detail', args=(self.pk,))
class WorkflowJobNode(WorkflowNodeBase):
job = models.ForeignKey(
'UnifiedJob',
related_name='unified_job_nodes',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
)
workflow_job = models.ForeignKey(
'WorkflowJob',
related_name='workflow_job_nodes',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
)
def get_absolute_url(self):
return reverse('api:workflow_job_node_detail', args=(self.pk,))
class WorkflowJobOptions(BaseModel):
class Meta:
abstract = True
extra_vars = models.TextField(
blank=True,
default='',
)
class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions):
class Meta:
app_label = 'main'
admin_role = ImplicitRoleField(
parent_role='singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
)
@classmethod
def _get_unified_job_class(cls):
return WorkflowJob
@classmethod
def _get_unified_job_field_names(cls):
# TODO: ADD LABELS
return ['name', 'description', 'extra_vars',]
def get_absolute_url(self):
return reverse('api:workflow_job_template_detail', args=(self.pk,))
@property
def cache_timeout_blocked(self):
# TODO: don't allow running of job template if same workflow template running
return False
# TODO: Notifications
# TODO: Surveys
#def create_job(self, **kwargs):
# '''
# Create a new job based on this template.
# '''
# return self.create_unified_job(**kwargs)
# TODO: Delete create_unified_job here and explicitly call create_workflow_job() .. figure out where the call is
def create_unified_job(self, **kwargs):
#def create_workflow_job(self, **kwargs):
#workflow_job = self.create_unified_job(**kwargs)
workflow_job = super(WorkflowJobTemplate, self).create_unified_job(**kwargs)
workflow_job.inherit_job_template_workflow_nodes()
return workflow_job
class WorkflowJobInheritNodesMixin(object):
def _inherit_relationship(self, old_node, new_node, node_ids_map, node_type):
old_related_nodes = self._get_all_by_type(old_node, node_type)
new_node_type_mgr = getattr(new_node, node_type)
for old_related_node in old_related_nodes:
new_related_node = self._get_workflow_job_node_by_id(node_ids_map[old_related_node.id])
new_node_type_mgr.add(new_related_node)
'''
Create a WorkflowJobNode for each WorkflowJobTemplateNode
'''
def _create_workflow_job_nodes(self, old_nodes):
return [WorkflowJobNode.objects.create(workflow_job=self, unified_job_template=old_node.unified_job_template) for old_node in old_nodes]
def _map_workflow_job_nodes(self, old_nodes, new_nodes):
node_ids_map = {}
for i, old_node in enumerate(old_nodes):
node_ids_map[old_node.id] = new_nodes[i].id
return node_ids_map
def _get_workflow_job_template_nodes(self):
return self.workflow_job_template.workflow_job_template_nodes.all()
def _get_workflow_job_node_by_id(self, id):
return WorkflowJobNode.objects.get(id=id)
def _get_all_by_type(self, node, node_type):
return getattr(node, node_type).all()
def inherit_job_template_workflow_nodes(self):
old_nodes = self._get_workflow_job_template_nodes()
new_nodes = self._create_workflow_job_nodes(old_nodes)
node_ids_map = self._map_workflow_job_nodes(old_nodes, new_nodes)
for index, old_node in enumerate(old_nodes):
new_node = new_nodes[index]
for node_type in ['success_nodes', 'failure_nodes', 'always_nodes']:
self._inherit_relationship(old_node, new_node, node_ids_map, node_type)
class WorkflowJob(UnifiedJob, WorkflowJobOptions, JobNotificationMixin, WorkflowJobInheritNodesMixin):
class Meta:
app_label = 'main'
ordering = ('id',)
workflow_job_template = models.ForeignKey(
'WorkflowJobTemplate',
related_name='jobs',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
)
extra_vars_dict = VarsDictProperty('extra_vars', True)
@classmethod
def _get_parent_field_name(cls):
return 'workflow_job_template'
@classmethod
def _get_task_class(cls):
from awx.main.tasks import RunWorkflowJob
return RunWorkflowJob
def socketio_emit_data(self):
return {}
def get_absolute_url(self):
return reverse('api:workflow_job_detail', args=(self.pk,))
# TODO: Ask UI if this is needed ?
#def get_ui_url(self):
# return urlparse.urljoin(tower_settings.TOWER_URL_BASE, "/#/workflow_jobs/{}".format(self.pk))
def is_blocked_by(self, obj):
return True
@property
def task_impact(self):
return 0
# TODO: workflow job notifications
def get_notification_templates(self):
return []
# TODO: workflow job notifications
def get_notification_friendly_name(self):
return "Workflow Job"

View File

@ -55,8 +55,10 @@ from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field,
check_proot_installed, build_proot_temp_dir, wrap_args_with_proot)
__all__ = ['RunJob', 'RunSystemJob', 'RunProjectUpdate', 'RunInventoryUpdate',
'RunAdHocCommand', 'handle_work_error', 'handle_work_success',
'update_inventory_computed_fields', 'send_notifications', 'run_administrative_checks']
'RunAdHocCommand', 'RunWorkflowJob', 'handle_work_error',
'handle_work_success', 'update_inventory_computed_fields',
'send_notifications', 'run_administrative_checks',
'run_workflow_job']
HIDDEN_PASSWORD = '**********'
@ -80,7 +82,7 @@ def celery_startup(conf=None, **kwargs):
except Exception as e:
logger.error("Failed to rebuild schedule {}: {}".format(sch, e))
@task()
@task(queue='default')
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
@ -101,7 +103,7 @@ def send_notifications(notification_list, job_id=None):
if job_id is not None:
job_actual.notifications.add(notification)
@task(bind=True)
@task(bind=True, queue='default')
def run_administrative_checks(self):
if not tower_settings.TOWER_ADMIN_ALERTS:
return
@ -122,11 +124,11 @@ def run_administrative_checks(self):
tower_admin_emails,
fail_silently=True)
@task(bind=True)
@task(bind=True, queue='default')
def cleanup_authtokens(self):
AuthToken.objects.filter(expires__lt=now()).delete()
@task(bind=True)
@task(bind=True, queue='default')
def tower_periodic_scheduler(self):
def get_last_run():
if not os.path.exists(settings.SCHEDULE_METADATA_LOCATION):
@ -177,7 +179,7 @@ def tower_periodic_scheduler(self):
new_unified_job.socketio_emit_status("failed")
emit_websocket_notification('/socket.io/schedules', 'schedule_changed', dict(id=schedule.id))
@task()
@task(queue='default')
def notify_task_runner(metadata_dict):
"""Add the given task into the Tower task manager's queue, to be consumed
by the task system.
@ -185,11 +187,9 @@ def notify_task_runner(metadata_dict):
queue = FifoQueue('tower_task_manager')
queue.push(metadata_dict)
def _send_notification_templates(instance, status_str):
if status_str not in ['succeeded', 'failed']:
raise ValueError("status_str must be either succeeded or failed")
print("Instance has some shit in it %s" % instance)
notification_templates = instance.get_notification_templates()
if notification_templates:
all_notification_templates = set(notification_templates.get('success', []) + notification_templates.get('any', []))
@ -202,7 +202,7 @@ def _send_notification_templates(instance, status_str):
for n in all_notification_templates],
job_id=instance.id)
@task(bind=True)
@task(bind=True, queue='default')
def handle_work_success(self, result, task_actual):
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
if not instance:
@ -210,7 +210,7 @@ def handle_work_success(self, result, task_actual):
_send_notification_templates(instance, 'succeeded')
@task(bind=True)
@task(bind=True, queue='default')
def handle_work_error(self, task_id, subtasks=None):
print('Executing error task id %s, subtasks: %s' %
(str(self.request.id), str(subtasks)))
@ -237,11 +237,9 @@ def handle_work_error(self, task_id, subtasks=None):
instance.socketio_emit_status("failed")
if first_instance:
print("Instance type is %s" % first_instance_type)
print("Instance passing along %s" % first_instance.name)
_send_notification_templates(first_instance, 'failed')
@task()
@task(queue='default')
def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
'''
Signal handler and wrapper around inventory.update_computed_fields to
@ -741,7 +739,8 @@ class RunJob(BaseTask):
env['ANSIBLE_CALLBACK_PLUGINS'] = plugin_path
env['REST_API_URL'] = settings.INTERNAL_API_URL
env['REST_API_TOKEN'] = job.task_auth_token or ''
env['CALLBACK_CONSUMER_PORT'] = str(settings.CALLBACK_CONSUMER_PORT)
env['CALLBACK_QUEUE'] = settings.CALLBACK_QUEUE
env['CALLBACK_CONNECTION'] = settings.BROKER_URL
if getattr(settings, 'JOB_CALLBACK_DEBUG', False):
env['JOB_CALLBACK_DEBUG'] = '2'
elif settings.DEBUG:
@ -1663,3 +1662,28 @@ class RunSystemJob(BaseTask):
def build_cwd(self, instance, **kwargs):
return settings.BASE_DIR
class RunWorkflowJob(BaseTask):
name = 'awx.main.tasks.run_workflow_job'
model = WorkflowJob
def run(self, pk, **kwargs):
from awx.main.management.commands.run_task_system import WorkflowDAG
'''
Run the job/task and capture its output.
'''
pass
instance = self.update_model(pk, status='running', celery_task_id=self.request.id)
instance.socketio_emit_status("running")
# FIXME: Detect workflow run completion
while True:
dag = WorkflowDAG(instance)
if dag.is_workflow_done():
# TODO: update with accurate finish status (i.e. canceled, error, etc.)
instance = self.update_model(instance.pk, status='successful')
break
time.sleep(1)
instance.socketio_emit_status(instance.status)
# TODO: Handle cancel

View File

@ -7,6 +7,7 @@ from awx.main.tests.factories import (
create_job_template,
create_notification_template,
create_survey_spec,
create_workflow_job_template,
)
@pytest.fixture
@ -40,6 +41,10 @@ def job_template_with_survey_passwords_factory(job_template_factory):
def job_with_secret_key_unit(job_with_secret_key_factory):
return job_with_secret_key_factory(persisted=False)
@pytest.fixture
def workflow_job_template_factory():
return create_workflow_job_template
@pytest.fixture
def get_ssh_version(mocker):
return mocker.patch('awx.main.tasks.get_ssh_version', return_value='OpenSSH_6.9p1, LibreSSL 2.1.8')

View File

@ -3,6 +3,7 @@ from .tower import (
create_job_template,
create_notification_template,
create_survey_spec,
create_workflow_job_template,
)
from .exc import (
@ -14,5 +15,6 @@ __all__ = [
'create_job_template',
'create_notification_template',
'create_survey_spec',
'create_workflow_job_template',
'NotUnique',
]

View File

@ -13,6 +13,10 @@ from awx.main.models import (
Credential,
Inventory,
Label,
WorkflowJobTemplate,
WorkflowJob,
WorkflowJobNode,
WorkflowJobTemplateNode,
)
# mk methods should create only a single object of a single type.
@ -152,3 +156,60 @@ def mk_job_template(name, job_type='run',
if persisted:
jt.save()
return jt
def mk_workflow_job(status='new', workflow_job_template=None, extra_vars={},
persisted=True):
job = WorkflowJob(status=status, extra_vars=json.dumps(extra_vars))
job.workflow_job_template = workflow_job_template
if persisted:
job.save()
return job
def mk_workflow_job_template(name, extra_vars='', spec=None, persisted=True):
if extra_vars:
extra_vars = json.dumps(extra_vars)
wfjt = WorkflowJobTemplate(name=name, extra_vars=extra_vars)
wfjt.survey_spec = spec
if wfjt.survey_spec is not None:
wfjt.survey_enabled = True
if persisted:
wfjt.save()
return wfjt
def mk_workflow_job_template_node(workflow_job_template=None,
unified_job_template=None,
success_nodes=None,
failure_nodes=None,
always_nodes=None,
persisted=True):
workflow_node = WorkflowJobTemplateNode(workflow_job_template=workflow_job_template,
unified_job_template=unified_job_template,
success_nodes=success_nodes,
failure_nodes=failure_nodes,
always_nodes=always_nodes)
if persisted:
workflow_node.save()
return workflow_node
def mk_workflow_job_node(unified_job_template=None,
success_nodes=None,
failure_nodes=None,
always_nodes=None,
workflow_job=None,
job=None,
persisted=True):
workflow_node = WorkflowJobNode(unified_job_template=unified_job_template,
success_nodes=success_nodes,
failure_nodes=failure_nodes,
always_nodes=always_nodes,
workflow_job=workflow_job,
job=job)
if persisted:
workflow_node.save()
return workflow_node

View File

@ -9,6 +9,7 @@ from awx.main.models import (
Inventory,
Job,
Label,
WorkflowJobTemplateNode,
)
from .objects import (
@ -28,6 +29,7 @@ from .fixtures import (
mk_project,
mk_label,
mk_notification_template,
mk_workflow_job_template,
)
@ -343,3 +345,66 @@ def create_notification_template(name, roles=None, persisted=True, **kwargs):
users=_Mapped(users),
superusers=_Mapped(superusers),
teams=teams)
def generate_workflow_job_template_nodes(workflow_job_template,
persisted,
**kwargs):
workflow_job_template_nodes = kwargs.get('workflow_job_template_nodes', [])
if len(workflow_job_template_nodes) > 0 and not persisted:
raise RuntimeError('workflow job template nodes can not be used when persisted=False')
new_nodes = []
for i, node in enumerate(workflow_job_template_nodes):
new_node = WorkflowJobTemplateNode(workflow_job_template=workflow_job_template,
unified_job_template=node['unified_job_template'],
id=i)
new_nodes.append(new_node)
node_types = ['success_nodes', 'failure_nodes', 'always_nodes']
for node_type in node_types:
for i, new_node in enumerate(new_nodes):
for related_index in workflow_job_template_nodes[i][node_type]:
getattr(new_node, node_type).add(new_nodes[related_index])
# TODO: Implement survey and jobs
def create_workflow_job_template(name, persisted=True, **kwargs):
Objects = generate_objects(["workflow_job_template",
"workflow_job_template_nodes",
"survey",], kwargs)
spec = None
#jobs = None
extra_vars = kwargs.get('extra_vars', '')
if 'survey' in kwargs:
spec = create_survey_spec(kwargs['survey'])
wfjt = mk_workflow_job_template(name,
spec=spec,
extra_vars=extra_vars,
persisted=persisted)
workflow_jt_nodes = generate_workflow_job_template_nodes(wfjt,
persisted,
workflow_job_template_nodes=kwargs.get('workflow_job_template_nodes', []))
'''
if 'jobs' in kwargs:
for i in kwargs['jobs']:
if type(i) is Job:
jobs[i.pk] = i
else:
# TODO: Create the job
raise RuntimeError("Currently, only already created jobs are supported")
'''
return Objects(workflow_job_template=wfjt,
#jobs=jobs,
workflow_job_template_nodes=workflow_jt_nodes,
survey=spec,)

View File

@ -319,18 +319,18 @@ def test_cant_change_organization(patch, credential, organization, org_admin):
credential.organization = organization
credential.save()
response = patch(reverse('api:credential_detail', args=(organization.id,)), {
response = patch(reverse('api:credential_detail', args=(credential.id,)), {
'name': 'Some new name',
}, org_admin)
assert response.status_code == 200
response = patch(reverse('api:credential_detail', args=(organization.id,)), {
response = patch(reverse('api:credential_detail', args=(credential.id,)), {
'name': 'Some new name2',
'organization': organization.id, # fine for it to be the same
}, org_admin)
assert response.status_code == 200
response = patch(reverse('api:credential_detail', args=(organization.id,)), {
response = patch(reverse('api:credential_detail', args=(credential.id,)), {
'name': 'Some new name3',
'organization': None
}, org_admin)
@ -339,7 +339,7 @@ def test_cant_change_organization(patch, credential, organization, org_admin):
@pytest.mark.django_db
def test_cant_add_organization(patch, credential, organization, org_admin):
assert credential.organization is None
response = patch(reverse('api:credential_detail', args=(organization.id,)), {
response = patch(reverse('api:credential_detail', args=(credential.id,)), {
'name': 'Some new name',
'organization': organization.id
}, org_admin)

View File

@ -0,0 +1,34 @@
# Python
import pytest
# AWX
from awx.main.models.workflow import WorkflowJob, WorkflowJobTemplateNode
class TestWorkflowJob:
@pytest.fixture
def workflow_job(self, workflow_job_template_factory):
wfjt = workflow_job_template_factory('blah').workflow_job_template
wfj = WorkflowJob.objects.create(workflow_job_template=wfjt)
nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt) for i in range(0, 5)]
nodes[0].success_nodes.add(nodes[1])
nodes[1].success_nodes.add(nodes[2])
nodes[0].failure_nodes.add(nodes[3])
nodes[3].failure_nodes.add(nodes[4])
return wfj
@pytest.mark.django_db
def test_inherit_job_template_workflow_nodes(self, mocker, workflow_job):
workflow_job.inherit_job_template_workflow_nodes()
nodes = WorkflowJob.objects.get(id=workflow_job.id).workflow_job_nodes.all().order_by('created')
assert nodes[0].success_nodes.filter(id=nodes[1].id).exists()
assert nodes[1].success_nodes.filter(id=nodes[2].id).exists()
assert nodes[0].failure_nodes.filter(id=nodes[3].id).exists()
assert nodes[3].failure_nodes.filter(id=nodes[4].id).exists()

View File

@ -0,0 +1,40 @@
# AWX
from awx.main.models import (
WorkflowJobTemplateNode,
WorkflowJobTemplate,
)
from awx.main.models.jobs import JobTemplate
def do_init_workflow(job_template_success, job_template_fail, job_template_never):
wfjt, created = WorkflowJobTemplate.objects.get_or_create(name="linear workflow")
wfjt.delete()
wfjt, created = WorkflowJobTemplate.objects.get_or_create(name="linear workflow")
print(wfjt.id)
WorkflowJobTemplateNode.objects.all().delete()
if created:
nodes_success = []
nodes_fail = []
nodes_never = []
for i in range(0, 2):
nodes_success.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_success))
nodes_fail.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_fail))
nodes_never.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_never))
nodes_never.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_never))
nodes_fail[1].delete()
nodes_success[0].success_nodes.add(nodes_fail[0])
nodes_success[0].failure_nodes.add(nodes_never[0])
nodes_fail[0].failure_nodes.add(nodes_success[1])
nodes_fail[0].success_nodes.add(nodes_never[1])
nodes_success[1].failure_nodes.add(nodes_never[2])
def do_init():
jt_success = JobTemplate.objects.get(id=5)
jt_fail= JobTemplate.objects.get(id=6)
jt_never= JobTemplate.objects.get(id=7)
do_init_workflow(jt_success, jt_fail, jt_never)
if __name__ == "__main__":
do_init()

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 8.2 KiB

View File

@ -0,0 +1,45 @@
# AWX
from awx.main.models import (
WorkflowJobTemplateNode,
WorkflowJobTemplate,
)
from awx.main.models.jobs import JobTemplate
def do_init_workflow(job_template_success, job_template_fail, job_template_never, jts_parallel):
wfjt, created = WorkflowJobTemplate.objects.get_or_create(name="parallel workflow")
wfjt.delete()
wfjt, created = WorkflowJobTemplate.objects.get_or_create(name="parallel workflow")
print(wfjt.id)
WorkflowJobTemplateNode.objects.all().delete()
if created:
node_success = WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_success)
nodes_never = []
for x in range(0, 3):
nodes_never.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_never))
nodes_parallel = []
for jt in jts_parallel:
nodes_parallel.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=jt))
node_success.success_nodes.add(nodes_parallel[0])
node_success.success_nodes.add(nodes_parallel[1])
node_success.success_nodes.add(nodes_parallel[2])
# Add a failure node for each paralell node
for i, n in enumerate(nodes_parallel):
n.failure_nodes.add(nodes_never[i])
def do_init():
jt_success = JobTemplate.objects.get(id=5)
jt_fail= JobTemplate.objects.get(id=6)
jt_never= JobTemplate.objects.get(id=7)
jt_parallel = []
jt_parallel.append(JobTemplate.objects.get(id=16))
jt_parallel.append(JobTemplate.objects.get(id=17))
jt_parallel.append(JobTemplate.objects.get(id=18))
do_init_workflow(jt_success, jt_fail, jt_never, jt_parallel)
if __name__ == "__main__":
do_init()

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 8.6 KiB

View File

@ -0,0 +1,46 @@
import pytest
@pytest.fixture
def get_related_assert():
def fn(model_obj, related, resource_name, related_resource_name):
assert related_resource_name in related
assert related[related_resource_name] == '/api/v1/%s/%d/%s/' % (resource_name, model_obj.pk, related_resource_name)
return fn
@pytest.fixture
def get_related_mock_and_run():
def fn(serializer_class, model_obj):
serializer = serializer_class()
related = serializer.get_related(model_obj)
return related
return fn
@pytest.fixture
def test_get_related(get_related_assert, get_related_mock_and_run):
def fn(serializer_class, model_obj, resource_name, related_resource_name):
related = get_related_mock_and_run(serializer_class, model_obj)
get_related_assert(model_obj, related, resource_name, related_resource_name)
return related
return fn
@pytest.fixture
def get_summary_fields_assert():
def fn(summary, summary_field_name):
assert summary_field_name in summary
return fn
@pytest.fixture
def get_summary_fields_mock_and_run():
def fn(serializer_class, model_obj):
serializer = serializer_class()
return serializer.get_summary_fields(model_obj)
return fn
@pytest.fixture
def test_get_summary_fields(get_summary_fields_mock_and_run, get_summary_fields_assert):
def fn(serializer_class, model_obj, summary_field_name):
summary = get_summary_fields_mock_and_run(serializer_class, model_obj)
get_summary_fields_assert(summary, summary_field_name)
return summary
return fn

View File

@ -0,0 +1,47 @@
# Python
import pytest
import mock
from mock import PropertyMock
# AWX
from awx.api.serializers import (
CustomInventoryScriptSerializer,
)
from awx.main.models import (
CustomInventoryScript,
User,
)
#DRF
from rest_framework.request import Request
from rest_framework.test import (
APIRequestFactory,
force_authenticate,
)
class TestCustomInventoryScriptSerializer(object):
@pytest.mark.parametrize("superuser,sysaudit,admin_role,value",
((True, False, False, '#!/python'),
(False, True, False, '#!/python'),
(False, False, True, '#!/python'),
(False, False, False, None)))
def test_to_representation_orphan(self, superuser, sysaudit, admin_role, value):
with mock.patch.object(CustomInventoryScriptSerializer, 'get_summary_fields', return_value={}):
User.add_to_class('is_system_auditor', sysaudit)
user = User(username="root", is_superuser=superuser)
roles = [user] if admin_role else []
with mock.patch('awx.main.models.CustomInventoryScript.admin_role', new_callable=PropertyMock, return_value=roles):
cis = CustomInventoryScript(pk=1, script='#!/python')
serializer = CustomInventoryScriptSerializer()
factory = APIRequestFactory()
wsgi_request = factory.post("/inventory_script/1", {'id':1}, format="json")
force_authenticate(wsgi_request, user)
request = Request(wsgi_request)
serializer.context['request'] = request
representation = serializer.to_representation(cis)
assert representation['script'] == value

View File

@ -0,0 +1,91 @@
# Python
import pytest
import mock
import json
# AWX
from awx.api.serializers import (
JobSerializer,
JobOptionsSerializer,
)
from awx.main.models import (
Label,
Job,
)
def mock_JT_resource_data():
return ({}, [])
@pytest.fixture
def job_template(mocker):
mock_jt = mocker.MagicMock(pk=5)
mock_jt.resource_validation_data = mock_JT_resource_data
return mock_jt
@pytest.fixture
def job(mocker, job_template):
return mocker.MagicMock(pk=5, job_template=job_template)
@pytest.fixture
def labels(mocker):
return [Label(id=x, name='label-%d' % x) for x in xrange(0, 25)]
@pytest.fixture
def jobs(mocker):
return [Job(id=x, name='job-%d' % x) for x in xrange(0, 25)]
@mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x,y: {})
@mock.patch('awx.api.serializers.JobOptionsSerializer.get_related', lambda x,y: {})
class TestJobSerializerGetRelated():
@pytest.mark.parametrize("related_resource_name", [
'job_events',
'job_plays',
'job_tasks',
'relaunch',
'labels',
])
def test_get_related(self, test_get_related, job, related_resource_name):
test_get_related(JobSerializer, job, 'jobs', related_resource_name)
def test_job_template_absent(self, job):
job.job_template = None
serializer = JobSerializer()
related = serializer.get_related(job)
assert 'job_template' not in related
def test_job_template_present(self, get_related_mock_and_run, job):
related = get_related_mock_and_run(JobSerializer, job)
assert 'job_template' in related
assert related['job_template'] == '/api/v1/%s/%d/' % ('job_templates', job.job_template.pk)
@mock.patch('awx.api.serializers.BaseSerializer.to_representation', lambda self,obj: {
'extra_vars': obj.extra_vars})
class TestJobSerializerSubstitution():
def test_survey_password_hide(self, mocker):
job = mocker.MagicMock(**{
'display_extra_vars.return_value': '{\"secret_key\": \"$encrypted$\"}',
'extra_vars.return_value': '{\"secret_key\": \"my_password\"}'})
serializer = JobSerializer(job)
rep = serializer.to_representation(job)
extra_vars = json.loads(rep['extra_vars'])
assert extra_vars['secret_key'] == '$encrypted$'
job.display_extra_vars.assert_called_once_with()
assert 'my_password' not in extra_vars
@mock.patch('awx.api.serializers.BaseSerializer.get_summary_fields', lambda x,y: {})
class TestJobOptionsSerializerGetSummaryFields():
def test__summary_field_labels_10_max(self, mocker, job_template, labels):
job_template.labels.all = mocker.MagicMock(**{'order_by.return_value': labels})
job_template.labels.all.return_value = job_template.labels.all
serializer = JobOptionsSerializer()
summary_labels = serializer._summary_field_labels(job_template)
job_template.labels.all.order_by.assert_called_with('name')
assert len(summary_labels['results']) == 10
assert summary_labels['results'] == [{'id': x.id, 'name': x.name} for x in labels[:10]]
def test_labels_exists(self, test_get_summary_fields, job_template):
test_get_summary_fields(JobOptionsSerializer, job_template, 'labels')

View File

@ -0,0 +1,124 @@
# Python
import pytest
import mock
# AWX
from awx.api.serializers import (
JobTemplateSerializer,
)
from awx.api.views import JobTemplateDetail
from awx.main.models import (
Role,
User,
Job,
)
from rest_framework.test import APIRequestFactory
#DRF
from rest_framework import serializers
def mock_JT_resource_data():
return ({}, [])
@pytest.fixture
def job_template(mocker):
mock_jt = mocker.MagicMock(pk=5)
mock_jt.resource_validation_data = mock_JT_resource_data
return mock_jt
@pytest.fixture
def job(mocker, job_template):
return mocker.MagicMock(pk=5, job_template=job_template)
@pytest.fixture
def jobs(mocker):
return [Job(id=x, name='job-%d' % x) for x in xrange(0, 25)]
@mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x,y: {})
@mock.patch('awx.api.serializers.JobOptionsSerializer.get_related', lambda x,y: {})
class TestJobTemplateSerializerGetRelated():
@pytest.mark.parametrize("related_resource_name", [
'jobs',
'schedules',
'activity_stream',
'launch',
'notification_templates_any',
'notification_templates_success',
'notification_templates_error',
'survey_spec',
'labels',
'callback',
])
def test_get_related(self, test_get_related, job_template, related_resource_name):
test_get_related(JobTemplateSerializer, job_template, 'job_templates', related_resource_name)
def test_callback_absent(self, get_related_mock_and_run, job_template):
job_template.host_config_key = None
related = get_related_mock_and_run(JobTemplateSerializer, job_template)
assert 'callback' not in related
class TestJobTemplateSerializerGetSummaryFields():
def test__recent_jobs(self, mocker, job_template, jobs):
job_template.jobs.all = mocker.MagicMock(**{'order_by.return_value': jobs})
job_template.jobs.all.return_value = job_template.jobs.all
serializer = JobTemplateSerializer()
recent_jobs = serializer._recent_jobs(job_template)
job_template.jobs.all.assert_called_once_with()
job_template.jobs.all.order_by.assert_called_once_with('-created')
assert len(recent_jobs) == 10
for x in jobs[:10]:
assert recent_jobs == [{'id': x.id, 'status': x.status, 'finished': x.finished} for x in jobs[:10]]
def test_survey_spec_exists(self, test_get_summary_fields, mocker, job_template):
job_template.survey_spec = {'name': 'blah', 'description': 'blah blah'}
test_get_summary_fields(JobTemplateSerializer, job_template, 'survey')
def test_survey_spec_absent(self, get_summary_fields_mock_and_run, job_template):
job_template.survey_spec = None
summary = get_summary_fields_mock_and_run(JobTemplateSerializer, job_template)
assert 'survey' not in summary
def test_copy_edit_standard(self, mocker, job_template_factory):
"""Verify that the exact output of the access.py methods
are put into the serializer user_capabilities"""
jt_obj = job_template_factory('testJT', project='proj1', persisted=False).job_template
jt_obj.id = 5
jt_obj.admin_role = Role(id=9, role_field='admin_role')
jt_obj.execute_role = Role(id=8, role_field='execute_role')
jt_obj.read_role = Role(id=7, role_field='execute_role')
user = User(username="auser")
serializer = JobTemplateSerializer(job_template)
serializer.show_capabilities = ['copy', 'edit']
serializer._summary_field_labels = lambda self: []
serializer._recent_jobs = lambda self: []
request = APIRequestFactory().get('/api/v1/job_templates/42/')
request.user = user
view = JobTemplateDetail()
view.request = request
serializer.context['view'] = view
with mocker.patch("awx.main.models.rbac.Role.get_description", return_value='Can eat pie'):
with mocker.patch("awx.main.access.JobTemplateAccess.can_change", return_value='foobar'):
with mocker.patch("awx.main.access.JobTemplateAccess.can_add", return_value='foo'):
response = serializer.get_summary_fields(jt_obj)
assert response['user_capabilities']['copy'] == 'foo'
assert response['user_capabilities']['edit'] == 'foobar'
class TestJobTemplateSerializerValidation(object):
good_extra_vars = ["{\"test\": \"keys\"}", "---\ntest: key"]
bad_extra_vars = ["{\"test\": \"keys\"", "---\ntest: [2"]
def test_validate_extra_vars(self):
serializer = JobTemplateSerializer()
for ev in self.good_extra_vars:
serializer.validate_extra_vars(ev)
for ev in self.bad_extra_vars:
with pytest.raises(serializers.ValidationError):
serializer.validate_extra_vars(ev)

View File

@ -0,0 +1,154 @@
# Python
import pytest
import mock
# AWX
from awx.api.serializers import (
WorkflowJobTemplateSerializer,
WorkflowNodeBaseSerializer,
WorkflowJobTemplateNodeSerializer,
WorkflowJobNodeSerializer,
)
from awx.main.models import (
Job,
WorkflowJobTemplateNode,
WorkflowJob,
WorkflowJobNode,
)
@mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x,y: {})
class TestWorkflowJobTemplateSerializerGetRelated():
@pytest.fixture
def workflow_job_template(self, workflow_job_template_factory):
wfjt = workflow_job_template_factory('hello world', persisted=False).workflow_job_template
wfjt.pk = 3
return wfjt
@pytest.mark.parametrize("related_resource_name", [
'jobs',
'launch',
'workflow_nodes',
])
def test_get_related(self, mocker, test_get_related, workflow_job_template, related_resource_name):
test_get_related(WorkflowJobTemplateSerializer,
workflow_job_template,
'workflow_job_templates',
related_resource_name)
@mock.patch('awx.api.serializers.BaseSerializer.get_related', lambda x,y: {})
class TestWorkflowNodeBaseSerializerGetRelated():
@pytest.fixture
def job_template(self, job_template_factory):
jt = job_template_factory(name="blah", persisted=False).job_template
jt.pk = 1
return jt
@pytest.fixture
def workflow_job_template_node_related(self, job_template):
return WorkflowJobTemplateNode(pk=1, unified_job_template=job_template)
@pytest.fixture
def workflow_job_template_node(self):
return WorkflowJobTemplateNode(pk=1)
def test_workflow_unified_job_template_present(self, get_related_mock_and_run, workflow_job_template_node_related):
related = get_related_mock_and_run(WorkflowNodeBaseSerializer, workflow_job_template_node_related)
assert 'unified_job_template' in related
assert related['unified_job_template'] == '/api/v1/%s/%d/' % ('job_templates', workflow_job_template_node_related.unified_job_template.pk)
def test_workflow_unified_job_template_absent(self, workflow_job_template_node):
related = WorkflowJobTemplateNodeSerializer().get_related(workflow_job_template_node)
assert 'unified_job_template' not in related
@mock.patch('awx.api.serializers.WorkflowNodeBaseSerializer.get_related', lambda x,y: {})
class TestWorkflowJobTemplateNodeSerializerGetRelated():
@pytest.fixture
def workflow_job_template_node(self):
return WorkflowJobTemplateNode(pk=1)
@pytest.fixture
def workflow_job_template(self, workflow_job_template_factory):
wfjt = workflow_job_template_factory("bliggity", persisted=False).workflow_job_template
wfjt.pk = 1
return wfjt
@pytest.fixture
def job_template(self, job_template_factory):
jt = job_template_factory(name="blah", persisted=False).job_template
jt.pk = 1
return jt
@pytest.fixture
def workflow_job_template_node_related(self, workflow_job_template_node, workflow_job_template):
workflow_job_template_node.workflow_job_template = workflow_job_template
return workflow_job_template_node
@pytest.mark.parametrize("related_resource_name", [
'success_nodes',
'failure_nodes',
'always_nodes',
])
def test_get_related(self, test_get_related, workflow_job_template_node, related_resource_name):
test_get_related(WorkflowJobTemplateNodeSerializer,
workflow_job_template_node,
'workflow_job_template_nodes',
related_resource_name)
def test_workflow_job_template_present(self, get_related_mock_and_run, workflow_job_template_node_related):
related = get_related_mock_and_run(WorkflowJobTemplateNodeSerializer, workflow_job_template_node_related)
assert 'workflow_job_template' in related
assert related['workflow_job_template'] == '/api/v1/%s/%d/' % ('workflow_job_templates', workflow_job_template_node_related.workflow_job_template.pk)
def test_workflow_job_template_absent(self, workflow_job_template_node):
related = WorkflowJobTemplateNodeSerializer().get_related(workflow_job_template_node)
assert 'workflow_job_template' not in related
@mock.patch('awx.api.serializers.WorkflowNodeBaseSerializer.get_related', lambda x,y: {})
class TestWorkflowJobNodeSerializerGetRelated():
@pytest.fixture
def workflow_job_node(self):
return WorkflowJobNode(pk=1)
@pytest.fixture
def workflow_job(self):
return WorkflowJob(pk=1)
@pytest.fixture
def job(self):
return Job(name="blah", pk=1)
@pytest.fixture
def workflow_job_node_related(self, workflow_job_node, workflow_job, job):
workflow_job_node.workflow_job = workflow_job
workflow_job_node.job = job
return workflow_job_node
@pytest.mark.parametrize("related_resource_name", [
'success_nodes',
'failure_nodes',
'always_nodes',
])
def test_get_related(self, test_get_related, workflow_job_node, related_resource_name):
test_get_related(WorkflowJobNodeSerializer,
workflow_job_node,
'workflow_job_nodes',
related_resource_name)
def test_workflow_job_present(self, get_related_mock_and_run, workflow_job_node_related):
related = get_related_mock_and_run(WorkflowJobNodeSerializer, workflow_job_node_related)
assert 'workflow_job' in related
assert related['workflow_job'] == '/api/v1/%s/%d/' % ('workflow_jobs', workflow_job_node_related.workflow_job.pk)
def test_workflow_job_absent(self, workflow_job_node):
related = WorkflowJobNodeSerializer().get_related(workflow_job_node)
assert 'workflow_job' not in related
def test_job_present(self, get_related_mock_and_run, workflow_job_node_related):
related = get_related_mock_and_run(WorkflowJobNodeSerializer, workflow_job_node_related)
assert 'job' in related
assert related['job'] == '/api/v1/%s/%d/' % ('jobs', workflow_job_node_related.job.pk)
def test_job_absent(self, workflow_job_node):
related = WorkflowJobNodeSerializer().get_related(workflow_job_node)
assert 'job' not in related

View File

@ -43,6 +43,8 @@ class TestApiV1RootView:
'unified_job_templates',
'unified_jobs',
'activity_stream',
'workflow_job_templates',
'workflow_jobs',
]
view = ApiV1RootView()
ret = view.get(mocker.MagicMock())

View File

@ -0,0 +1,167 @@
from awx.main.management.commands.run_task_system import (
SimpleDAG,
WorkflowDAG,
)
from awx.main.models import Job
from awx.main.models.workflow import WorkflowJobNode
import pytest
@pytest.fixture
def dag_root():
dag = SimpleDAG()
data = [
{1: 1},
{2: 2},
{3: 3},
{4: 4},
{5: 5},
{6: 6},
]
# Add all the nodes to the DAG
[dag.add_node(d) for d in data]
dag.add_edge(data[0], data[1])
dag.add_edge(data[2], data[3])
dag.add_edge(data[4], data[5])
return dag
@pytest.fixture
def dag_simple_edge_labels():
dag = SimpleDAG()
data = [
{1: 1},
{2: 2},
{3: 3},
{4: 4},
{5: 5},
{6: 6},
]
# Add all the nodes to the DAG
[dag.add_node(d) for d in data]
dag.add_edge(data[0], data[1], 'one')
dag.add_edge(data[2], data[3], 'two')
dag.add_edge(data[4], data[5], 'three')
return dag
'''
class TestSimpleDAG(object):
def test_get_root_nodes(self, dag_root):
leafs = dag_root.get_leaf_nodes()
roots = dag_root.get_root_nodes()
def test_get_labeled_edges(self, dag_simple_edge_labels):
dag = dag_simple_edge_labels
nodes = dag.get_dependencies(dag.nodes[0]['node_object'], 'one')
nodes = dag.get_dependencies(dag.nodes[0]['node_object'], 'two')
'''
@pytest.fixture
def factory_node():
def fn(id, status):
wfn = WorkflowJobNode(id=id)
if status:
j = Job(status=status)
wfn.job = j
return wfn
return fn
@pytest.fixture
def workflow_dag_level_2(factory_node):
dag = WorkflowDAG()
data = [
factory_node(0, 'successful'),
factory_node(1, 'successful'),
factory_node(2, 'successful'),
factory_node(3, None),
factory_node(4, None),
factory_node(5, None),
]
[dag.add_node(d) for d in data]
dag.add_edge(data[0], data[3], 'success_nodes')
dag.add_edge(data[1], data[4], 'success_nodes')
dag.add_edge(data[2], data[5], 'success_nodes')
return (dag, data[3:6], False)
@pytest.fixture
def workflow_dag_multiple_roots(factory_node):
dag = WorkflowDAG()
data = [
factory_node(1, None),
factory_node(2, None),
factory_node(3, None),
factory_node(4, None),
factory_node(5, None),
factory_node(6, None),
]
[dag.add_node(d) for d in data]
dag.add_edge(data[0], data[3], 'success_nodes')
dag.add_edge(data[1], data[4], 'success_nodes')
dag.add_edge(data[2], data[5], 'success_nodes')
expected = data[0:3]
return (dag, expected, False)
@pytest.fixture
def workflow_dag_multiple_edges_labeled(factory_node):
dag = WorkflowDAG()
data = [
factory_node(0, 'failed'),
factory_node(1, None),
factory_node(2, 'failed'),
factory_node(3, None),
factory_node(4, 'failed'),
factory_node(5, None),
]
[dag.add_node(d) for d in data]
dag.add_edge(data[0], data[1], 'success_nodes')
dag.add_edge(data[0], data[2], 'failure_nodes')
dag.add_edge(data[2], data[3], 'success_nodes')
dag.add_edge(data[2], data[4], 'failure_nodes')
dag.add_edge(data[4], data[5], 'failure_nodes')
expected = data[5:6]
return (dag, expected, False)
@pytest.fixture
def workflow_dag_finished(factory_node):
dag = WorkflowDAG()
data = [
factory_node(0, 'failed'),
factory_node(1, None),
factory_node(2, 'failed'),
factory_node(3, None),
factory_node(4, 'failed'),
factory_node(5, 'successful'),
]
[dag.add_node(d) for d in data]
dag.add_edge(data[0], data[1], 'success_nodes')
dag.add_edge(data[0], data[2], 'failure_nodes')
dag.add_edge(data[2], data[3], 'success_nodes')
dag.add_edge(data[2], data[4], 'failure_nodes')
dag.add_edge(data[4], data[5], 'failure_nodes')
expected = []
return (dag, expected, True)
@pytest.fixture(params=['workflow_dag_multiple_roots', 'workflow_dag_level_2', 'workflow_dag_multiple_edges_labeled', 'workflow_dag_finished'])
def workflow_dag(request):
return request.getfuncargvalue(request.param)
class TestWorkflowDAG():
def test_bfs_nodes_to_run(self, workflow_dag):
dag, expected, is_done = workflow_dag
assert dag.bfs_nodes_to_run() == expected
def test_is_workflow_done(self, workflow_dag):
dag, expected, is_done = workflow_dag
assert dag.is_workflow_done() == is_done

View File

@ -0,0 +1,81 @@
import pytest
from awx.main.models.jobs import JobTemplate
from awx.main.models.workflow import WorkflowJobTemplateNode, WorkflowJobInheritNodesMixin, WorkflowJobNode
class TestWorkflowJobInheritNodesMixin():
class TestCreateWorkflowJobNodes():
@pytest.fixture
def job_templates(self):
return [JobTemplate() for i in range(0, 10)]
@pytest.fixture
def job_template_nodes(self, job_templates):
return [WorkflowJobTemplateNode(unified_job_template=job_templates[i]) for i in range(0, 10)]
def test__create_workflow_job_nodes(self, mocker, job_template_nodes):
workflow_job_node_create = mocker.patch('awx.main.models.WorkflowJobNode.objects.create')
mixin = WorkflowJobInheritNodesMixin()
mixin._create_workflow_job_nodes(job_template_nodes)
for job_template_node in job_template_nodes:
workflow_job_node_create.assert_any_call(workflow_job=mixin,
unified_job_template=job_template_node.unified_job_template)
class TestMapWorkflowJobNodes():
@pytest.fixture
def job_template_nodes(self):
return [WorkflowJobTemplateNode(id=i) for i in range(0, 20)]
@pytest.fixture
def job_nodes(self):
return [WorkflowJobNode(id=i) for i in range(100, 120)]
def test__map_workflow_job_nodes(self, job_template_nodes, job_nodes):
mixin = WorkflowJobInheritNodesMixin()
node_ids_map = mixin._map_workflow_job_nodes(job_template_nodes, job_nodes)
assert len(node_ids_map) == len(job_template_nodes)
for i, job_template_node in enumerate(job_template_nodes):
assert node_ids_map[job_template_node.id] == job_nodes[i].id
class TestInheritRelationship():
@pytest.fixture
def job_template_nodes(self, mocker):
nodes = [mocker.MagicMock(id=i) for i in range(0, 10)]
for i in range(0, 9):
nodes[i].success_nodes = [mocker.MagicMock(id=i + 1)]
return nodes
@pytest.fixture
def job_nodes(self, mocker):
nodes = [mocker.MagicMock(id=i) for i in range(100, 110)]
return nodes
@pytest.fixture
def job_nodes_dict(self, job_nodes):
_map = {}
for n in job_nodes:
_map[n.id] = n
return _map
def test__inherit_relationship(self, mocker, job_template_nodes, job_nodes, job_nodes_dict):
mixin = WorkflowJobInheritNodesMixin()
mixin._get_workflow_job_node_by_id = lambda x: job_nodes_dict[x]
mixin._get_all_by_type = lambda x,node_type: x.success_nodes
node_ids_map = mixin._map_workflow_job_nodes(job_template_nodes, job_nodes)
for i, job_template_node in enumerate(job_template_nodes):
mixin._inherit_relationship(job_template_node, job_nodes[i], node_ids_map, 'success_nodes')
for i in range(0, 9):
job_nodes[i].success_nodes.add.assert_any_call(job_nodes[i + 1])

View File

@ -491,7 +491,7 @@ def get_system_task_capacity():
def emit_websocket_notification(endpoint, event, payload, token_key=None):
from awx.main.socket import Socket
from awx.main.socket_queue import Socket
try:
with Socket('websocket', 'w', nowait=True, logger=logger) as websocket:

View File

@ -39,37 +39,16 @@ import pwd
import urlparse
import re
from copy import deepcopy
from uuid import uuid4
# Kombu
from kombu import Connection, Exchange, Producer
# Requests
import requests
# ZeroMQ
import zmq
import psutil
# Only use statsd if there's a statsd host in the environment
# otherwise just do a noop.
# NOTE: I've disabled this for the time being until we sort through the venv dependency around this
# if os.environ.get('GRAPHITE_PORT_8125_UDP_ADDR'):
# from statsd import StatsClient
# statsd = StatsClient(host=os.environ['GRAPHITE_PORT_8125_UDP_ADDR'],
# port=8125,
# prefix='tower.job.event_callback',
# maxudpsize=512)
# else:
# from statsd import StatsClient
# class NoStatsClient(StatsClient):
# def __init__(self, *args, **kwargs):
# pass
# def _prepare(self, stat, value, rate):
# pass
# def _send_stat(self, stat, value, rate):
# pass
# def _send(self, *args, **kwargs):
# pass
# statsd = NoStatsClient()
CENSOR_FIELD_WHITELIST = [
'msg',
'failed',
@ -124,6 +103,7 @@ class TokenAuth(requests.auth.AuthBase):
return request
# TODO: non v2_ events are deprecated and should be purge/refactored out
class BaseCallbackModule(object):
'''
Callback module for logging ansible-playbook job events via the REST API.
@ -132,12 +112,16 @@ class BaseCallbackModule(object):
def __init__(self):
self.base_url = os.getenv('REST_API_URL', '')
self.auth_token = os.getenv('REST_API_TOKEN', '')
self.callback_consumer_port = os.getenv('CALLBACK_CONSUMER_PORT', '')
self.context = None
self.socket = None
self.callback_connection = os.getenv('CALLBACK_CONNECTION', None)
self.connection_queue = os.getenv('CALLBACK_QUEUE', '')
self.connection = None
self.exchange = None
self._init_logging()
self._init_connection()
self.counter = 0
self.active_playbook = None
self.active_play = None
self.active_task = None
def _init_logging(self):
try:
@ -158,15 +142,11 @@ class BaseCallbackModule(object):
self.logger.propagate = False
def _init_connection(self):
self.context = None
self.socket = None
self.connection = None
def _start_connection(self):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
self.socket.setsockopt(zmq.RCVTIMEO, 4000)
self.socket.setsockopt(zmq.LINGER, 2000)
self.socket.connect(self.callback_consumer_port)
self.connection = Connection(self.callback_connection)
self.exchange = Exchange(self.connection_queue, type='direct')
def _post_job_event_queue_msg(self, event, event_data):
self.counter += 1
@ -176,6 +156,29 @@ class BaseCallbackModule(object):
'counter': self.counter,
'created': datetime.datetime.utcnow().isoformat(),
}
if event in ('playbook_on_play_start',
'playbook_on_stats',
'playbook_on_vars_prompt'):
msg['parent_uuid'] = str(self.active_playbook)
elif event in ('playbook_on_notify',
'playbook_on_setup',
'playbook_on_task_start',
'playbook_on_no_hosts_matched',
'playbook_on_no_hosts_remaining',
'playbook_on_include',
'playbook_on_import_for_host',
'playbook_on_not_import_for_host'):
msg['parent_uuid'] = str(self.active_play)
elif event.startswith('runner_on_') or event.startswith('runner_item_on_'):
msg['parent_uuid'] = str(self.active_task)
else:
msg['parent_uuid'] = ''
if "uuid" in event_data:
msg['uuid'] = str(event_data['uuid'])
else:
msg['uuid'] = ''
if getattr(self, 'job_id', None):
msg['job_id'] = self.job_id
if getattr(self, 'ad_hoc_command_id', None):
@ -192,11 +195,16 @@ class BaseCallbackModule(object):
self.connection_pid = active_pid
if self.connection_pid != active_pid:
self._init_connection()
if self.context is None:
if self.connection is None:
self._start_connection()
self.socket.send_json(msg)
self.socket.recv()
producer = Producer(self.connection)
producer.publish(msg,
serializer='json',
compression='bzip2',
exchange=self.exchange,
declare=[self.exchange],
routing_key=self.connection_queue)
return
except Exception, e:
self.logger.info('Publish Job Event Exception: %r, retry=%d', e,
@ -230,7 +238,7 @@ class BaseCallbackModule(object):
if 'res' in event_data:
event_data['res'] = censor(deepcopy(event_data['res']))
if self.callback_consumer_port:
if self.callback_connection:
self._post_job_event_queue_msg(event, event_data)
else:
self._post_rest_api_event(event, event_data)
@ -416,7 +424,9 @@ class JobCallbackModule(BaseCallbackModule):
def v2_playbook_on_start(self, playbook):
# NOTE: the playbook parameter was added late in Ansible 2.0 development
# so we don't currently utilize but could later.
self.playbook_on_start()
# NOTE: Ansible doesn't generate a UUID for playbook_on_start so we'll do it for them
self.active_playbook = str(uuid4())
self._log_event('playbook_on_start', uuid=self.active_playbook)
def playbook_on_notify(self, host, handler):
self._log_event('playbook_on_notify', host=host, handler=handler)
@ -446,14 +456,16 @@ class JobCallbackModule(BaseCallbackModule):
is_conditional=is_conditional)
def v2_playbook_on_task_start(self, task, is_conditional):
self._log_event('playbook_on_task_start', task=task,
self.active_task = task._uuid
self._log_event('playbook_on_task_start', task=task, uuid=str(task._uuid),
name=task.get_name(), is_conditional=is_conditional)
def v2_playbook_on_cleanup_task_start(self, task):
# re-using playbook_on_task_start event here for this v2-specific
# event, though we may consider any changes necessary to distinguish
# this from a normal task
self._log_event('playbook_on_task_start', task=task,
self.active_task = task._uuid
self._log_event('playbook_on_task_start', task=task, uuid=str(task._uuid),
name=task.get_name())
def playbook_on_vars_prompt(self, varname, private=True, prompt=None,
@ -507,7 +519,8 @@ class JobCallbackModule(BaseCallbackModule):
play.name = ','.join(play.hosts)
else:
play.name = play.hosts
self._log_event('playbook_on_play_start', name=play.name,
self.active_play = play._uuid
self._log_event('playbook_on_play_start', name=play.name, uuid=str(play._uuid),
pattern=play.hosts)
def playbook_on_stats(self, stats):

View File

@ -8,6 +8,9 @@ import ldap
import djcelery
from datetime import timedelta
from kombu import Queue, Exchange
from kombu.common import Broadcast
# Update this module's local settings from the global settings module.
from django.conf import global_settings
this_module = sys.modules[__name__]
@ -152,7 +155,6 @@ MIDDLEWARE_CLASSES = ( # NOQA
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'awx.main.middleware.HAMiddleware',
'awx.main.middleware.ActivityStreamMiddleware',
'awx.sso.middleware.SocialAuthMiddleware',
'crum.CurrentRequestUserMiddleware',
@ -327,6 +329,7 @@ os.environ.setdefault('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:9013-9199')
djcelery.setup_loader()
BROKER_URL = 'redis://localhost/'
CELERY_DEFAULT_QUEUE = 'default'
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
@ -336,6 +339,22 @@ CELERYD_TASK_SOFT_TIME_LIMIT = None
CELERYBEAT_SCHEDULER = 'celery.beat.PersistentScheduler'
CELERYBEAT_MAX_LOOP_INTERVAL = 60
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERY_QUEUES = (
Queue('default', Exchange('default'), routing_key='default'),
Queue('jobs', Exchange('jobs'), routing_key='jobs'),
# Projects use a fanout queue, this isn't super well supported
Broadcast('projects'),
)
CELERY_ROUTES = ({'awx.main.tasks.run_job': {'queue': 'jobs',
'routing_key': 'jobs'},
'awx.main.tasks.run_project_update': {'queue': 'projects'},
'awx.main.tasks.run_inventory_update': {'queue': 'jobs',
'routing_key': 'jobs'},
'awx.main.tasks.run_ad_hoc_command': {'queue': 'jobs',
'routing_key': 'jobs'},
'awx.main.tasks.run_system_job': {'queue': 'jobs',
'routing_key': 'jobs'}})
CELERYBEAT_SCHEDULE = {
'tower_scheduler': {
'task': 'awx.main.tasks.tower_periodic_scheduler',
@ -973,15 +992,6 @@ LOGGING = {
'backupCount': 5,
'formatter':'simple',
},
'fact_receiver': {
'level': 'WARNING',
'class':'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(LOG_ROOT, 'fact_receiver.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
'system_tracking_migrations': {
'level': 'WARNING',
'class':'logging.handlers.RotatingFileHandler',
@ -1015,11 +1025,6 @@ LOGGING = {
'level': 'WARNING',
'propagate': False,
},
'qpid.messaging': {
'handlers': ['console', 'file', 'tower_warnings'],
'propagate': False,
'level': 'WARNING',
},
'py.warnings': {
'handlers': ['console'],
},

View File

@ -41,6 +41,8 @@ if 'celeryd' in sys.argv:
CALLBACK_CONSUMER_PORT = "tcp://127.0.0.1:5557"
CALLBACK_QUEUE_PORT = "ipc:///tmp/callback_receiver_dev.ipc"
CALLBACK_QUEUE = "callback_tasks"
# Enable PROOT for tower-qa integration tests
AWX_PROOT_ENABLED = True

View File

@ -11,6 +11,36 @@
###############################################################################
# MISC PROJECT SETTINGS
###############################################################################
import os
def patch_broken_pipe_error():
"""Monkey Patch BaseServer.handle_error to not write
a stacktrace to stderr on broken pipe.
http://stackoverflow.com/a/22618740/362702"""
import sys
from SocketServer import BaseServer
from wsgiref import handlers
handle_error = BaseServer.handle_error
log_exception = handlers.BaseHandler.log_exception
def is_broken_pipe_error():
type, err, tb = sys.exc_info()
return "Connection reset by peer" in repr(err)
def my_handle_error(self, request, client_address):
if not is_broken_pipe_error():
handle_error(self, request, client_address)
def my_log_exception(self, exc_info):
if not is_broken_pipe_error():
log_exception(self, exc_info)
BaseServer.handle_error = my_handle_error
handlers.BaseHandler.log_exception = my_log_exception
patch_broken_pipe_error()
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
@ -49,7 +79,10 @@ if is_testing(sys.argv):
MONGO_DB = 'system_tracking_test'
# Celery AMQP configuration.
BROKER_URL = 'amqp://guest:guest@rabbitmq//'
BROKER_URL = "amqp://{}:{}@{}/{}".format(os.environ.get("RABBITMQ_USER"),
os.environ.get("RABBITMQ_PASS"),
os.environ.get("RABBITMQ_HOST"),
os.environ.get("RABBITMQ_VHOST"))
# Mongo host configuration
MONGO_HOST = NotImplemented

View File

@ -8,7 +8,7 @@ import threading
xmlsec_init_lock = threading.Lock()
xmlsec_initialized = False
import dm.xmlsec.binding
import dm.xmlsec.binding # noqa
original_xmlsec_initialize = dm.xmlsec.binding.initialize
def xmlsec_initialize(*args, **kwargs):

View File

@ -6,7 +6,7 @@ azure==2.0.0rc2
Babel==2.2.0
billiard==3.3.0.16
boto==2.40.0
celery==3.1.10
celery==3.1.23
cliff==1.15.0
cmd2==0.6.8
d2to1==0.2.11 # TODO: Still needed?
@ -50,7 +50,7 @@ jsonpatch==1.12
jsonpointer==1.10
jsonschema==2.5.1
keyring==4.1
kombu==3.0.30
kombu==3.0.35
apache-libcloud==0.20.1
lxml==3.4.4
Markdown==2.4.1

View File

@ -1,7 +1,7 @@
anyjson==0.3.3
apache-libcloud==0.20.1
appdirs==1.4.0
azure==2.0.0rc2
azure==2.0.0rc5
Babel==2.2.0
boto==2.40.0
cliff==1.15.0
@ -25,6 +25,7 @@ jsonpatch==1.12
jsonpointer==1.10
jsonschema==2.5.1
keyring==4.1
kombu==3.0.35
lxml==3.4.4
mock==1.0.1
monotonic==0.6
@ -69,7 +70,7 @@ rackspace-auth-openstack==1.3
rackspace-novaclient==1.5
rax-default-network-flags-python-novaclient-ext==0.3.2
rax-scheduled-images-python-novaclient-ext==0.3.1
requests==2.5.1
requests==2.11.0
requestsexceptions==1.1.1
shade==1.4.0
simplejson==3.8.1

View File

@ -4,9 +4,10 @@ ipython
unittest2
pep8
flake8
pyflakes==1.0.0 # Pinned until PR merges https://gitlab.com/pycqa/flake8/merge_requests/56
pyflakes
pytest==2.9.2
pytest-cov
pytest-django
pytest-pythonpath
pytest-mock
flower

View File

@ -1,6 +1,6 @@
ansible==1.9.4
coverage
pyflakes==1.0.0 # Pinned until PR merges https://gitlab.com/pycqa/flake8/merge_requests/56
pyflakes
pep8
pylint
flake8

View File

@ -17,5 +17,5 @@ ignore=E201,E203,E221,E225,E231,E241,E251,E261,E265,E302,E303,E501,W291,W391,W29
exclude=.tox,venv,awx/lib/site-packages,awx/plugins/inventory/ec2.py,awx/plugins/inventory/gce.py,awx/plugins/inventory/vmware.py,awx/plugins/inventory/windows_azure.py,awx/plugins/inventory/openstack.py,awx/ui,awx/api/urls.py,awx/main/migrations,awx/main/south_migrations,awx/main/tests/data
[flake8]
ignore=E201,E203,E221,E225,E231,E241,E251,E261,E265,E302,E303,E501,W291,W391,W293,E731
ignore=E201,E203,E221,E225,E231,E241,E251,E261,E265,E302,E303,E501,W291,W391,W293,E731,F405
exclude=.tox,venv,awx/lib/site-packages,awx/plugins/inventory,awx/ui,awx/api/urls.py,awx/main/migrations,awx/main/south_migrations,awx/main/tests/data,node_modules/,awx/projects/,tools/docker,awx/settings/local_settings.py

View File

@ -0,0 +1,65 @@
version: '2'
services:
haproxy:
build:
context: ./docker-compose
dockerfile: Dockerfile-haproxy
depends_on:
- "tower_1"
- "tower_2"
- "tower_3"
ports:
- "8013:8013"
- "1936:1936"
- "5555:5555"
- "15672:15672"
tower_1:
image: gcr.io/ansible-tower-engineering/tower_devel:${TAG}
hostname: tower_1
environment:
RABBITMQ_HOST: rabbitmq_1
RABBITMQ_USER: guest
RABBITMQ_PASS: guest
RABBITMQ_VHOST: /
volumes:
- "../:/tower_devel"
tower_2:
image: gcr.io/ansible-tower-engineering/tower_devel:${TAG}
hostname: tower_2
environment:
RABBITMQ_HOST: rabbitmq_2
RABBITMQ_USER: guest
RABBITMQ_PASS: guest
RABBITMQ_VHOST: /
volumes:
- "../:/tower_devel"
tower_3:
image: gcr.io/ansible-tower-engineering/tower_devel:${TAG}
hostname: tower_3
environment:
RABBITMQ_HOST: rabbitmq_3
RABBITMQ_USER: guest
RABBITMQ_PASS: guest
RABBITMQ_VHOST: /
volumes:
- "../:/tower_devel"
rabbitmq_1:
image: gcr.io/ansible-tower-engineering/rabbit_cluster_node:latest
hostname: rabbitmq_1
rabbitmq_2:
image: gcr.io/ansible-tower-engineering/rabbit_cluster_node:latest
hostname: rabbitmq_2
environment:
- CLUSTERED=true
- CLUSTER_WITH=rabbitmq_1
rabbitmq_3:
image: gcr.io/ansible-tower-engineering/rabbit_cluster_node:latest
hostname: rabbitmq_3
environment:
- CLUSTERED=true
- CLUSTER_WITH=rabbitmq_1
postgres:
image: postgres:9.4.1
memcached:
image: memcached:alpine

View File

@ -3,9 +3,16 @@ services:
# Primary Tower Development Container
tower:
image: gcr.io/ansible-tower-engineering/tower_devel:${TAG}
hostname: tower
environment:
RABBITMQ_HOST: rabbitmq
RABBITMQ_USER: guest
RABBITMQ_PASS: guest
RABBITMQ_VHOST: /
ports:
- "8080:8080"
- "8013:8013"
- "5555:5555"
links:
- postgres
- memcached
@ -15,16 +22,17 @@ services:
# - sync
volumes:
- "../:/tower_devel"
privileged: true
# Postgres Database Container
postgres:
image: postgres:9.4.1
memcached:
image: memcached:alpine
rabbitmq:
image: rabbitmq:3-management
ports:
- "15672:15672"
# Source Code Synchronization Container
# sync:

View File

@ -0,0 +1,2 @@
FROM haproxy:1.6-alpine
COPY haproxy.cfg /usr/local/etc/haproxy/haproxy.cfg

View File

@ -0,0 +1,70 @@
global
debug
stats socket /tmp/admin.sock
stats timeout 30s
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
frontend localnodes
bind *:8013
mode http
default_backend nodes
frontend flower
bind *:5555
mode http
default_backend flower_nodes
frontend rabbitctl
bind *:15672
mode http
default_backend rabbitctl_nodes
backend nodes
mode http
balance roundrobin
option forwardfor
option http-pretend-keepalive
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk HEAD / HTTP/1.1\r\nHost:localhost
server tower_1 tower_1:8013 check
server tower_2 tower_2:8013 check
server tower_3 tower_3:8013 check
backend flower_nodes
mode http
balance roundrobin
option forwardfor
option http-pretend-keepalive
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
#option httpchk HEAD / HTTP/1.1\r\nHost:localhost
server tower_1 tower_1:5555
server tower_2 tower_2:5555
server tower_3 tower_3:5555
backend rabbitctl_nodes
mode http
balance roundrobin
option forwardfor
option http-pretend-keepalive
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
#option httpchk HEAD / HTTP/1.1\r\nHost:localhost
server rabbitmq_1 rabbitmq_1:15672
server rabbitmq_2 rabbitmq_2:15672
server rabbitmq_3 rabbitmq_3:15672
listen stats
bind *:1936
stats enable
stats uri /

View File

@ -4,7 +4,7 @@ set +x
# Wait for the databases to come up
ansible -i "127.0.0.1," -c local -v -m wait_for -a "host=postgres port=5432" all
ansible -i "127.0.0.1," -c local -v -m wait_for -a "host=memcached port=11211" all
ansible -i "127.0.0.1," -c local -v -m wait_for -a "host=rabbitmq port=5672" all
ansible -i "127.0.0.1," -c local -v -m wait_for -a "host=${RABBITMQ_HOST} port=5672" all
# In case Tower in the container wants to connect to itself, use "docker exec" to attach to the container otherwise
# TODO: FIX

View File

@ -1,2 +1,2 @@
#!/bin/bash
ansible-playbook -i "127.0.0.1," tools/git_hooks/pre_commit.yml
#ansible-playbook -i "127.0.0.1," tools/git_hooks/pre_commit.yml

View File

@ -1,16 +0,0 @@
#!/bin/sh
case $1 in
config)
cat <<'EOM'
graph_title Callback Receiver Processes
graph_vlabel num processes
graph_category tower
callbackr.label Callback Receiver Processes
EOM
exit 0;;
esac
printf "callbackr.value "
ps ax | grep run_callback_receiver | grep -v grep | wc -l
printf "\n"

View File

@ -1,16 +0,0 @@
#!/bin/sh
case $1 in
config)
cat <<'EOM'
graph_title Celery Processes
graph_vlabel num processes
graph_category tower
celeryd.label Celery Processes
EOM
exit 0;;
esac
printf "celeryd.value "
ps ax | grep celeryd | grep -v grep | wc -l
printf "\n"

View File

@ -1,16 +0,0 @@
#!/bin/sh
case $1 in
config)
cat <<'EOM'
graph_title Postmaster Processes
graph_vlabel num processes
graph_category tower
postmaster.label Postmaster Processes
EOM
exit 0;;
esac
printf "postmaster.value "
ps ax | grep postmaster | grep -v grep | wc -l
printf "\n"

View File

@ -1,16 +0,0 @@
#!/bin/sh
case $1 in
config)
cat <<'EOM'
graph_title Redis Processes
graph_vlabel num processes
graph_category tower
redis.label Redis Processes
EOM
exit 0;;
esac
printf "redis.value "
ps ax | grep redis | grep -v grep | wc -l
printf "\n"

View File

@ -1,16 +0,0 @@
#!/bin/sh
case $1 in
config)
cat <<'EOM'
graph_title SocketIO Service Processes
graph_vlabel num processes
graph_category tower
socketio.label SocketIO Service Processes
EOM
exit 0;;
esac
printf "socketio.value "
ps ax | grep run_socketio_service | grep -v grep | wc -l
printf "\n"

View File

@ -1,16 +0,0 @@
#!/bin/sh
case $1 in
config)
cat <<'EOM'
graph_title Task Manager Processes
graph_vlabel num processes
graph_category tower
taskm.label Task Manager Processes
EOM
exit 0;;
esac
printf "taskm.value "
ps ax | grep run_task_system | grep -v grep | wc -l
printf "\n"

View File

@ -1,27 +0,0 @@
#!/bin/sh
case $1 in
config)
cat <<'EOM'
multigraph tower_jobs
graph_title Running Jobs breakdown
graph_vlabel job count
graph_category tower
running.label Running jobs
waiting.label Waiting jobs
pending.label Pending jobs
EOM
exit 0;;
esac
printf "running.value "
awx-manage stats --stat jobs_running
printf "\n"
printf "waiting.value "
awx-manage stats --stat jobs_waiting
printf "\n"
printf "pending.value "
awx-manage stats --stat jobs_pending
printf "\n"