mirror of
https://github.com/ansible/awx.git
synced 2026-01-14 03:10:42 -03:30
resolve conflict
This commit is contained in:
commit
576db5120f
22
Makefile
22
Makefile
@ -185,7 +185,8 @@ UI_RELEASE_FLAG_FILE = awx/ui/.release_built
|
||||
virtualbox-ovf virtualbox-centos-7 virtualbox-centos-6 \
|
||||
clean-bundle setup_bundle_tarball \
|
||||
ui-docker-machine ui-docker ui-release \
|
||||
ui-test ui-test-ci ui-test-saucelabs
|
||||
ui-test ui-deps ui-test-ci ui-test-saucelabs jlaska
|
||||
|
||||
|
||||
# Remove setup build files
|
||||
clean-tar:
|
||||
@ -391,11 +392,17 @@ flower:
|
||||
fi; \
|
||||
$(PYTHON) manage.py celery flower --address=0.0.0.0 --port=5555 --broker=amqp://guest:guest@$(RABBITMQ_HOST):5672//
|
||||
|
||||
uwsgi:
|
||||
collectstatic:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
uwsgi --socket :8050 --module=awx.wsgi:application --home=/venv/tower --chdir=/tower_devel/ --vacuum --processes=5 --harakiri=60 --static-map /static=/tower_devel/awx/ui/static
|
||||
mkdir -p awx/public/static && $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
||||
|
||||
uwsgi: collectstatic
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
uwsgi -b 32768 --socket :8050 --module=awx.wsgi:application --home=/venv/tower --chdir=/tower_devel/ --vacuum --processes=5 --harakiri=60 --py-autoreload 1
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@ -421,7 +428,7 @@ celeryd:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py celeryd -l DEBUG -B --autoscale=20,3 --schedule=$(CELERY_SCHEDULE_FILE) -Q projects,jobs,default,scheduler,$(COMPOSE_HOST)
|
||||
$(PYTHON) manage.py celeryd -l DEBUG -B --autoreload --autoscale=20,3 --schedule=$(CELERY_SCHEDULE_FILE) -Q projects,jobs,default,scheduler,$(COMPOSE_HOST)
|
||||
#$(PYTHON) manage.py celery multi show projects jobs default -l DEBUG -Q:projects projects -Q:jobs jobs -Q:default default -c:projects 1 -c:jobs 3 -c:default 3 -Ofair -B --schedule=$(CELERY_SCHEDULE_FILE)
|
||||
|
||||
# Run to start the zeromq callback receiver
|
||||
@ -443,6 +450,9 @@ factcacher:
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_fact_cache_receiver
|
||||
|
||||
nginx:
|
||||
nginx -g "daemon off;"
|
||||
|
||||
reports:
|
||||
mkdir -p $@
|
||||
|
||||
@ -515,6 +525,8 @@ languages:
|
||||
# UI TASKS
|
||||
# --------------------------------------
|
||||
|
||||
ui-deps: $(UI_DEPS_FLAG_FILE)
|
||||
|
||||
$(UI_DEPS_FLAG_FILE): awx/ui/package.json
|
||||
$(NPM_BIN) --unsafe-perm --prefix awx/ui install awx/ui
|
||||
touch $(UI_DEPS_FLAG_FILE)
|
||||
@ -794,7 +806,7 @@ docker-auth:
|
||||
|
||||
# Docker Compose Development environment
|
||||
docker-compose: docker-auth
|
||||
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose.yml up --no-recreate nginx tower
|
||||
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose.yml up --no-recreate tower
|
||||
|
||||
docker-compose-cluster: docker-auth
|
||||
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose-cluster.yml up
|
||||
|
||||
1
Procfile
1
Procfile
@ -1,3 +1,4 @@
|
||||
nginx: make nginx
|
||||
runworker: make runworker
|
||||
daphne: make daphne
|
||||
celeryd: make celeryd
|
||||
|
||||
@ -562,7 +562,7 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
fields = ('*', 'unified_job_template', 'launch_type', 'status',
|
||||
'failed', 'started', 'finished', 'elapsed', 'job_args',
|
||||
'job_cwd', 'job_env', 'job_explanation', 'result_stdout',
|
||||
'result_traceback')
|
||||
'execution_node', 'result_traceback')
|
||||
extra_kwargs = {
|
||||
'unified_job_template': {
|
||||
'source': 'unified_job_template_id',
|
||||
@ -914,7 +914,7 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
class Meta:
|
||||
model = Project
|
||||
fields = ('*', 'organization', 'scm_delete_on_next_update', 'scm_update_on_launch',
|
||||
'scm_update_cache_timeout') + \
|
||||
'scm_update_cache_timeout', 'scm_revision', 'timeout',) + \
|
||||
('last_update_failed', 'last_updated') # Backwards compatibility
|
||||
read_only_fields = ('scm_delete_on_next_update',)
|
||||
|
||||
@ -961,12 +961,15 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
|
||||
class ProjectPlaybooksSerializer(ProjectSerializer):
|
||||
|
||||
playbooks = serializers.ReadOnlyField(help_text=_('Array of playbooks available within this project.'))
|
||||
playbooks = serializers.SerializerMethodField(help_text=_('Array of playbooks available within this project.'))
|
||||
|
||||
class Meta:
|
||||
model = Project
|
||||
fields = ('playbooks',)
|
||||
|
||||
def get_playbooks(self, obj):
|
||||
return obj.playbook_files
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
ret = super(ProjectPlaybooksSerializer, self).data
|
||||
@ -986,7 +989,7 @@ class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
|
||||
|
||||
class Meta:
|
||||
model = ProjectUpdate
|
||||
fields = ('*', 'project')
|
||||
fields = ('*', 'project', 'job_type')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(ProjectUpdateSerializer, self).get_related(obj)
|
||||
@ -1329,7 +1332,8 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
|
||||
class Meta:
|
||||
fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',
|
||||
'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars')
|
||||
'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
|
||||
'timeout')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
|
||||
@ -1783,13 +1787,23 @@ class OrganizationCredentialSerializerCreate(CredentialSerializerCreate):
|
||||
fields = ('*', '-user', '-team')
|
||||
|
||||
|
||||
class JobOptionsSerializer(BaseSerializer):
|
||||
class LabelsListMixin(object):
|
||||
|
||||
def _summary_field_labels(self, obj):
|
||||
return {'count': obj.labels.count(), 'results': [{'id': x.id, 'name': x.name} for x in obj.labels.all().order_by('name')[:10]]}
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
res = super(LabelsListMixin, self).get_summary_fields(obj)
|
||||
res['labels'] = self._summary_field_labels(obj)
|
||||
return res
|
||||
|
||||
class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
|
||||
|
||||
class Meta:
|
||||
fields = ('*', 'job_type', 'inventory', 'project', 'playbook',
|
||||
'credential', 'cloud_credential', 'network_credential', 'forks', 'limit',
|
||||
'verbosity', 'extra_vars', 'job_tags', 'force_handlers',
|
||||
'skip_tags', 'start_at_task',)
|
||||
'skip_tags', 'start_at_task', 'timeout')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobOptionsSerializer, self).get_related(obj)
|
||||
@ -1808,14 +1822,6 @@ class JobOptionsSerializer(BaseSerializer):
|
||||
args=(obj.network_credential.pk,))
|
||||
return res
|
||||
|
||||
def _summary_field_labels(self, obj):
|
||||
return {'count': obj.labels.count(), 'results': [{'id': x.id, 'name': x.name} for x in obj.labels.all().order_by('name')[:10]]}
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
res = super(JobOptionsSerializer, self).get_summary_fields(obj)
|
||||
res['labels'] = self._summary_field_labels(obj)
|
||||
return res
|
||||
|
||||
def to_representation(self, obj):
|
||||
ret = super(JobOptionsSerializer, self).to_representation(obj)
|
||||
if obj is None:
|
||||
@ -1927,7 +1933,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
fields = ('*', 'job_template', 'passwords_needed_to_start', 'ask_variables_on_launch',
|
||||
'ask_limit_on_launch', 'ask_tags_on_launch', 'ask_skip_tags_on_launch',
|
||||
'ask_job_type_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch',
|
||||
'allow_simultaneous', 'artifacts',)
|
||||
'allow_simultaneous', 'artifacts', 'scm_revision',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobSerializer, self).get_related(obj)
|
||||
@ -2178,7 +2184,7 @@ class SystemJobCancelSerializer(SystemJobSerializer):
|
||||
class Meta:
|
||||
fields = ('can_cancel',)
|
||||
|
||||
class WorkflowJobTemplateSerializer(UnifiedJobTemplateSerializer):
|
||||
class WorkflowJobTemplateSerializer(LabelsListMixin, UnifiedJobTemplateSerializer):
|
||||
show_capabilities = ['start', 'edit', 'delete']
|
||||
|
||||
class Meta:
|
||||
@ -2192,6 +2198,7 @@ class WorkflowJobTemplateSerializer(UnifiedJobTemplateSerializer):
|
||||
#schedules = reverse('api:workflow_job_template_schedules_list', args=(obj.pk,)),
|
||||
launch = reverse('api:workflow_job_template_launch', args=(obj.pk,)),
|
||||
workflow_nodes = reverse('api:workflow_job_template_workflow_nodes_list', args=(obj.pk,)),
|
||||
labels = reverse('api:workflow_job_template_label_list', args=(obj.pk,)),
|
||||
# TODO: Implement notifications
|
||||
#notification_templates_any = reverse('api:system_job_template_notification_templates_any_list', args=(obj.pk,)),
|
||||
#notification_templates_success = reverse('api:system_job_template_notification_templates_success_list', args=(obj.pk,)),
|
||||
@ -2208,7 +2215,7 @@ class WorkflowJobTemplateListSerializer(WorkflowJobTemplateSerializer):
|
||||
pass
|
||||
|
||||
# TODO:
|
||||
class WorkflowJobSerializer(UnifiedJobSerializer):
|
||||
class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJob
|
||||
@ -2222,6 +2229,7 @@ class WorkflowJobSerializer(UnifiedJobSerializer):
|
||||
# TODO:
|
||||
#res['notifications'] = reverse('api:system_job_notifications_list', args=(obj.pk,))
|
||||
res['workflow_nodes'] = reverse('api:workflow_job_workflow_nodes_list', args=(obj.pk,))
|
||||
res['labels'] = reverse('api:workflow_job_label_list', args=(obj.pk,))
|
||||
# TODO: Cancel job
|
||||
'''
|
||||
if obj.can_cancel or True:
|
||||
|
||||
@ -263,6 +263,7 @@ workflow_job_template_urls = patterns('awx.api.views',
|
||||
url(r'^(?P<pk>[0-9]+)/jobs/$', 'workflow_job_template_jobs_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/launch/$', 'workflow_job_template_launch'),
|
||||
url(r'^(?P<pk>[0-9]+)/workflow_nodes/$', 'workflow_job_template_workflow_nodes_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/labels/$', 'workflow_job_template_label_list'),
|
||||
# url(r'^(?P<pk>[0-9]+)/cancel/$', 'workflow_job_template_cancel'),
|
||||
)
|
||||
|
||||
@ -270,6 +271,7 @@ workflow_job_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'workflow_job_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'workflow_job_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/workflow_nodes/$', 'workflow_job_workflow_nodes_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/labels/$', 'workflow_job_label_list'),
|
||||
# url(r'^(?P<pk>[0-9]+)/cancel/$', 'workflow_job_cancel'),
|
||||
#url(r'^(?P<pk>[0-9]+)/notifications/$', 'workflow_job_notifications_list'),
|
||||
)
|
||||
|
||||
@ -955,15 +955,6 @@ class ProjectList(ListCreateAPIView):
|
||||
)
|
||||
return projects_qs
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
# Not optimal, but make sure the project status and last_updated fields
|
||||
# are up to date here...
|
||||
projects_qs = Project.objects
|
||||
projects_qs = projects_qs.select_related('current_job', 'last_job')
|
||||
for project in projects_qs:
|
||||
project._set_status_and_last_job_run()
|
||||
return super(ProjectList, self).get(request, *args, **kwargs)
|
||||
|
||||
class ProjectDetail(RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = Project
|
||||
@ -2678,13 +2669,9 @@ class WorkflowJobNodeChildrenBaseList(SubListAPIView):
|
||||
|
||||
model = WorkflowJobNode
|
||||
serializer_class = WorkflowJobNodeListSerializer
|
||||
always_allow_superuser = True # TODO: RBAC
|
||||
parent_model = Job
|
||||
parent_model = WorkflowJobNode
|
||||
relationship = ''
|
||||
'''
|
||||
enforce_parent_relationship = 'workflow_job_template'
|
||||
new_in_310 = True
|
||||
'''
|
||||
|
||||
#
|
||||
#Limit the set of WorkflowJobeNodes to the related nodes of specified by
|
||||
@ -2729,6 +2716,11 @@ class WorkflowJobTemplateDetail(RetrieveUpdateDestroyAPIView):
|
||||
serializer_class = WorkflowJobTemplateSerializer
|
||||
always_allow_superuser = False
|
||||
|
||||
|
||||
class WorkflowJobTemplateLabelList(JobTemplateLabelList):
|
||||
parent_model = WorkflowJobTemplate
|
||||
|
||||
|
||||
# TODO:
|
||||
class WorkflowJobTemplateLaunch(GenericAPIView):
|
||||
|
||||
@ -2886,6 +2878,9 @@ class JobLabelList(SubListAPIView):
|
||||
relationship = 'labels'
|
||||
parent_key = 'job'
|
||||
|
||||
class WorkflowJobLabelList(JobLabelList):
|
||||
parent_model = WorkflowJob
|
||||
|
||||
class JobActivityStreamList(SubListAPIView):
|
||||
|
||||
model = ActivityStream
|
||||
|
||||
@ -1045,6 +1045,8 @@ class JobTemplateAccess(BaseAccess):
|
||||
self.check_license(feature='system_tracking')
|
||||
if obj.survey_enabled:
|
||||
self.check_license(feature='surveys')
|
||||
if Instance.objects.active_count() > 1:
|
||||
self.check_license(feature='ha')
|
||||
|
||||
# Super users can start any job
|
||||
if self.user.is_superuser:
|
||||
@ -1170,6 +1172,29 @@ class JobAccess(BaseAccess):
|
||||
Q(inventory__organization__in=org_access_qs) |
|
||||
Q(project__organization__in=org_access_qs)).distinct()
|
||||
|
||||
def related_orgs(self, obj):
|
||||
orgs = []
|
||||
if obj.inventory and obj.inventory.organization:
|
||||
orgs.append(obj.inventory.organization)
|
||||
if obj.project and obj.project.organization and obj.project.organization not in orgs:
|
||||
orgs.append(obj.project.organization)
|
||||
return orgs
|
||||
|
||||
def org_access(self, obj, role_types=['admin_role']):
|
||||
orgs = self.related_orgs(obj)
|
||||
for org in orgs:
|
||||
for role_type in role_types:
|
||||
role = getattr(org, role_type)
|
||||
if self.user in role:
|
||||
return True
|
||||
return False
|
||||
|
||||
@check_superuser
|
||||
def can_read(self, obj):
|
||||
if obj.job_template and self.user in obj.job_template.read_role:
|
||||
return True
|
||||
return self.org_access(obj, role_types=['auditor_role', 'admin_role'])
|
||||
|
||||
def can_add(self, data):
|
||||
if not data: # So the browseable API will work
|
||||
return True
|
||||
@ -1198,12 +1223,7 @@ class JobAccess(BaseAccess):
|
||||
|
||||
@check_superuser
|
||||
def can_delete(self, obj):
|
||||
if obj.inventory is not None and self.user in obj.inventory.organization.admin_role:
|
||||
return True
|
||||
if (obj.project is not None and obj.project.organization is not None and
|
||||
self.user in obj.project.organization.admin_role):
|
||||
return True
|
||||
return False
|
||||
return self.org_access(obj)
|
||||
|
||||
def can_start(self, obj, validate_license=True):
|
||||
if validate_license:
|
||||
@ -1481,6 +1501,8 @@ class WorkflowJobTemplateAccess(BaseAccess):
|
||||
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
if data is None:
|
||||
return self.user in obj.admin_role
|
||||
|
||||
org_pk = get_pk_from_dict(data, 'organization')
|
||||
if ('organization' not in data or
|
||||
|
||||
@ -31,13 +31,14 @@ class InstanceManager(models.Manager):
|
||||
hostname='localhost',
|
||||
uuid='00000000-0000-0000-0000-000000000000')
|
||||
|
||||
# If we can determine the instance we are on then return
|
||||
# that, otherwise None which would be the standalone
|
||||
# case
|
||||
# TODO: Replace, this doesn't work if the hostname
|
||||
# is different from the Instance.name
|
||||
# node = self.filter(hostname=socket.gethostname())
|
||||
return self.all()[0]
|
||||
node = self.filter(hostname=settings.CLUSTER_HOST_ID)
|
||||
if node.exists():
|
||||
return node[0]
|
||||
raise RuntimeError("No instance found with the current cluster host id")
|
||||
|
||||
def active_count(self):
|
||||
"""Return count of active Tower nodes for licensing."""
|
||||
return self.all().count()
|
||||
|
||||
def my_role(self):
|
||||
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
|
||||
|
||||
44
awx/main/migrations/0041_v310_job_timeout.py
Normal file
44
awx/main/migrations/0041_v310_job_timeout.py
Normal file
@ -0,0 +1,44 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0040_v310_artifacts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='inventorysource',
|
||||
name='timeout',
|
||||
field=models.PositiveIntegerField(default=0, blank=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventoryupdate',
|
||||
name='timeout',
|
||||
field=models.PositiveIntegerField(default=0, blank=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='job',
|
||||
name='timeout',
|
||||
field=models.PositiveIntegerField(default=0, blank=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='timeout',
|
||||
field=models.PositiveIntegerField(default=0, blank=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='project',
|
||||
name='timeout',
|
||||
field=models.PositiveIntegerField(default=0, blank=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='projectupdate',
|
||||
name='timeout',
|
||||
field=models.PositiveIntegerField(default=0, blank=True),
|
||||
),
|
||||
]
|
||||
19
awx/main/migrations/0042_v310_executionnode.py
Normal file
19
awx/main/migrations/0042_v310_executionnode.py
Normal file
@ -0,0 +1,19 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0041_v310_job_timeout'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='execution_node',
|
||||
field=models.TextField(default=b'', editable=False, blank=True),
|
||||
),
|
||||
]
|
||||
30
awx/main/migrations/0043_v310_scm_revision.py
Normal file
30
awx/main/migrations/0043_v310_scm_revision.py
Normal file
@ -0,0 +1,30 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0042_v310_executionnode'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='project',
|
||||
name='scm_revision',
|
||||
field=models.CharField(default=b'', editable=False, max_length=1024, blank=True, help_text='The last revision fetched by a project update', verbose_name='SCM Revision'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='projectupdate',
|
||||
name='job_type',
|
||||
field=models.CharField(default=b'check', max_length=64, choices=[(b'run', 'Run'), (b'check', 'Check')]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='job',
|
||||
name='scm_revision',
|
||||
field=models.CharField(default=b'', editable=False, max_length=1024, blank=True, help_text='The SCM Revision from the Project used for this job, if available', verbose_name='SCM Revision'),
|
||||
),
|
||||
|
||||
]
|
||||
20
awx/main/migrations/0044_v310_project_playbook_files.py
Normal file
20
awx/main/migrations/0044_v310_project_playbook_files.py
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
import jsonfield.fields
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0043_v310_scm_revision'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='project',
|
||||
name='playbook_files',
|
||||
field=jsonfield.fields.JSONField(default=[], help_text='List of playbooks found in the project', verbose_name='Playbook Files', editable=False, blank=True),
|
||||
),
|
||||
]
|
||||
@ -29,7 +29,8 @@ __all__ = ['VarsDictProperty', 'BaseModel', 'CreatedModifiedModel',
|
||||
'PERM_INVENTORY_ADMIN', 'PERM_INVENTORY_READ',
|
||||
'PERM_INVENTORY_WRITE', 'PERM_INVENTORY_DEPLOY', 'PERM_INVENTORY_SCAN',
|
||||
'PERM_INVENTORY_CHECK', 'PERM_JOBTEMPLATE_CREATE', 'JOB_TYPE_CHOICES',
|
||||
'AD_HOC_JOB_TYPE_CHOICES', 'PERMISSION_TYPE_CHOICES', 'CLOUD_INVENTORY_SOURCES',
|
||||
'AD_HOC_JOB_TYPE_CHOICES', 'PROJECT_UPDATE_JOB_TYPE_CHOICES',
|
||||
'PERMISSION_TYPE_CHOICES', 'CLOUD_INVENTORY_SOURCES',
|
||||
'VERBOSITY_CHOICES']
|
||||
|
||||
PERM_INVENTORY_ADMIN = 'admin'
|
||||
@ -51,6 +52,11 @@ AD_HOC_JOB_TYPE_CHOICES = [
|
||||
(PERM_INVENTORY_CHECK, _('Check')),
|
||||
]
|
||||
|
||||
PROJECT_UPDATE_JOB_TYPE_CHOICES = [
|
||||
(PERM_INVENTORY_DEPLOY, _('Run')),
|
||||
(PERM_INVENTORY_CHECK, _('Check')),
|
||||
]
|
||||
|
||||
PERMISSION_TYPE_CHOICES = [
|
||||
(PERM_INVENTORY_READ, _('Read Inventory')),
|
||||
(PERM_INVENTORY_WRITE, _('Edit Inventory')),
|
||||
|
||||
@ -860,6 +860,10 @@ class InventorySourceOptions(BaseModel):
|
||||
default=False,
|
||||
help_text=_('Overwrite local variables from remote inventory source.'),
|
||||
)
|
||||
timeout = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=0,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_ec2_region_choices(cls):
|
||||
@ -1084,7 +1088,8 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return ['name', 'description', 'source', 'source_path', 'source_script', 'source_vars', 'schedule',
|
||||
'credential', 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars']
|
||||
'credential', 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
|
||||
'timeout']
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
|
||||
@ -143,6 +143,10 @@ class JobOptions(BaseModel):
|
||||
allow_simultaneous = models.BooleanField(
|
||||
default=False,
|
||||
)
|
||||
timeout = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=0,
|
||||
)
|
||||
|
||||
extra_vars_dict = VarsDictProperty('extra_vars', True)
|
||||
|
||||
@ -253,7 +257,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin):
|
||||
'playbook', 'credential', 'cloud_credential', 'network_credential', 'forks', 'schedule',
|
||||
'limit', 'verbosity', 'job_tags', 'extra_vars', 'launch_type',
|
||||
'force_handlers', 'skip_tags', 'start_at_task', 'become_enabled',
|
||||
'labels', 'survey_passwords', 'allow_simultaneous',]
|
||||
'labels', 'survey_passwords', 'allow_simultaneous', 'timeout']
|
||||
|
||||
def resource_validation_data(self):
|
||||
'''
|
||||
@ -555,6 +559,15 @@ class Job(UnifiedJob, JobOptions, JobNotificationMixin):
|
||||
default={},
|
||||
editable=False,
|
||||
)
|
||||
scm_revision = models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
default='',
|
||||
editable=False,
|
||||
verbose_name=_('SCM Revision'),
|
||||
help_text=_('The SCM Revision from the Project used for this job, if available'),
|
||||
)
|
||||
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
@ -1328,6 +1341,7 @@ class SystemJobOptions(BaseModel):
|
||||
default='',
|
||||
)
|
||||
|
||||
|
||||
class SystemJobTemplate(UnifiedJobTemplate, SystemJobOptions):
|
||||
|
||||
class Meta:
|
||||
|
||||
@ -7,6 +7,9 @@ import os
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
# JSONField
|
||||
from jsonfield import JSONField
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import models
|
||||
@ -106,6 +109,10 @@ class ProjectOptions(models.Model):
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
timeout = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=0,
|
||||
)
|
||||
|
||||
def clean_scm_type(self):
|
||||
return self.scm_type or ''
|
||||
@ -223,6 +230,23 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
|
||||
blank=True,
|
||||
)
|
||||
|
||||
scm_revision = models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
default='',
|
||||
editable=False,
|
||||
verbose_name=_('SCM Revision'),
|
||||
help_text=_('The last revision fetched by a project update'),
|
||||
)
|
||||
|
||||
playbook_files = JSONField(
|
||||
blank=True,
|
||||
default=[],
|
||||
editable=False,
|
||||
verbose_name=_('Playbook Files'),
|
||||
help_text=_('List of playbooks found in the project'),
|
||||
)
|
||||
|
||||
admin_role = ImplicitRoleField(parent_role=[
|
||||
'organization.admin_role',
|
||||
'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
@ -251,7 +275,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
|
||||
def _get_unified_job_field_names(cls):
|
||||
return ['name', 'description', 'local_path', 'scm_type', 'scm_url',
|
||||
'scm_branch', 'scm_clean', 'scm_delete_on_update',
|
||||
'credential', 'schedule']
|
||||
'credential', 'schedule', 'timeout']
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
new_instance = not bool(self.pk)
|
||||
@ -294,10 +318,6 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
|
||||
# inherit the child job status on failure
|
||||
elif self.last_job_failed:
|
||||
return self.last_job.status
|
||||
# Even on a successful child run, a missing project path overides
|
||||
# the successful status
|
||||
elif not self.get_project_path():
|
||||
return 'missing'
|
||||
# Return the successful status
|
||||
else:
|
||||
return self.last_job.status
|
||||
@ -389,6 +409,12 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin):
|
||||
editable=False,
|
||||
)
|
||||
|
||||
job_type = models.CharField(
|
||||
max_length=64,
|
||||
choices=PROJECT_UPDATE_JOB_TYPE_CHOICES,
|
||||
default='check',
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
return 'project'
|
||||
|
||||
@ -438,6 +438,11 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
editable=False,
|
||||
related_name='%(class)s_blocked_jobs+',
|
||||
)
|
||||
execution_node = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
notifications = models.ManyToManyField(
|
||||
'Notification',
|
||||
editable=False,
|
||||
@ -801,7 +806,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
|
||||
def pre_start(self, **kwargs):
|
||||
if not self.can_start:
|
||||
self.job_explanation = u'%s is not in a startable status: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting')))
|
||||
self.job_explanation = u'%s is not in a startable state: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting')))
|
||||
self.save(update_fields=['job_explanation'])
|
||||
return (False, None)
|
||||
|
||||
|
||||
@ -275,8 +275,7 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, ResourceMixin)
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
# TODO: ADD LABELS
|
||||
return ['name', 'description', 'extra_vars',]
|
||||
return ['name', 'description', 'extra_vars', 'labels',]
|
||||
|
||||
def get_absolute_url(self):
|
||||
return reverse('api:workflow_job_template_detail', args=(self.pk,))
|
||||
|
||||
@ -53,10 +53,9 @@ from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field,
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
|
||||
__all__ = ['RunJob', 'RunSystemJob', 'RunProjectUpdate', 'RunInventoryUpdate',
|
||||
'RunAdHocCommand', 'RunWorkflowJob', 'handle_work_error',
|
||||
'RunAdHocCommand', 'handle_work_error',
|
||||
'handle_work_success', 'update_inventory_computed_fields',
|
||||
'send_notifications', 'run_administrative_checks',
|
||||
'RunJobLaunch']
|
||||
'send_notifications', 'run_administrative_checks']
|
||||
|
||||
HIDDEN_PASSWORD = '**********'
|
||||
|
||||
@ -161,12 +160,6 @@ def tower_periodic_scheduler(self):
|
||||
logger.debug("Last run was: %s", last_run)
|
||||
write_last_run(run_now)
|
||||
|
||||
# Sanity check: If this is a secondary machine, there is nothing
|
||||
# on the schedule.
|
||||
# TODO: Fix for clustering/ha
|
||||
if Instance.objects.my_role() == 'secondary':
|
||||
return
|
||||
|
||||
old_schedules = Schedule.objects.enabled().before(last_run)
|
||||
for schedule in old_schedules:
|
||||
schedule.save()
|
||||
@ -234,8 +227,9 @@ def handle_work_error(self, task_id, subtasks=None):
|
||||
if instance.celery_task_id != task_id:
|
||||
instance.status = 'failed'
|
||||
instance.failed = True
|
||||
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \
|
||||
(first_instance_type, first_instance.name, first_instance.id)
|
||||
if not instance.job_explanation:
|
||||
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \
|
||||
(first_instance_type, first_instance.name, first_instance.id)
|
||||
instance.save()
|
||||
instance.websocket_emit_status("failed")
|
||||
|
||||
@ -501,7 +495,7 @@ class BaseTask(Task):
|
||||
return OrderedDict()
|
||||
|
||||
def run_pexpect(self, instance, args, cwd, env, passwords, stdout_handle,
|
||||
output_replacements=None):
|
||||
output_replacements=None, extra_update_fields=None):
|
||||
'''
|
||||
Run the given command using pexpect to capture output and provide
|
||||
passwords when requested.
|
||||
@ -517,9 +511,17 @@ class BaseTask(Task):
|
||||
if pexpect_sleep is not None:
|
||||
logger.info("Suspending Job Execution for QA Work")
|
||||
time.sleep(pexpect_sleep)
|
||||
global_timeout = getattr(settings, 'DEFAULT_JOB_TIMEOUTS', {})
|
||||
cls_name = instance.__class__.__name__
|
||||
if cls_name in global_timeout:
|
||||
local_timeout = getattr(instance, 'timeout', 0)
|
||||
job_timeout = global_timeout[cls_name] if local_timeout == 0 else local_timeout
|
||||
else:
|
||||
job_timeout = 0
|
||||
child = pexpect.spawnu(args[0], args[1:], cwd=cwd, env=env)
|
||||
child.logfile_read = logfile
|
||||
canceled = False
|
||||
timed_out = False
|
||||
last_stdout_update = time.time()
|
||||
idle_timeout = self.get_idle_timeout()
|
||||
expect_list = []
|
||||
@ -530,7 +532,9 @@ class BaseTask(Task):
|
||||
expect_passwords[n] = passwords.get(item[1], '') or ''
|
||||
expect_list.extend([pexpect.TIMEOUT, pexpect.EOF])
|
||||
instance = self.update_model(instance.pk, status='running',
|
||||
execution_node=settings.CLUSTER_HOST_ID,
|
||||
output_replacements=output_replacements)
|
||||
job_start = time.time()
|
||||
while child.isalive():
|
||||
result_id = child.expect(expect_list, timeout=pexpect_timeout)
|
||||
if result_id in expect_passwords:
|
||||
@ -541,45 +545,65 @@ class BaseTask(Task):
|
||||
# Refresh model instance from the database (to check cancel flag).
|
||||
instance = self.update_model(instance.pk)
|
||||
if instance.cancel_flag:
|
||||
try:
|
||||
if settings.AWX_PROOT_ENABLED and self.should_use_proot(instance):
|
||||
# NOTE: Refactor this once we get a newer psutil across the board
|
||||
if not psutil:
|
||||
os.kill(child.pid, signal.SIGKILL)
|
||||
else:
|
||||
try:
|
||||
main_proc = psutil.Process(pid=child.pid)
|
||||
if hasattr(main_proc, "children"):
|
||||
child_procs = main_proc.children(recursive=True)
|
||||
else:
|
||||
child_procs = main_proc.get_children(recursive=True)
|
||||
for child_proc in child_procs:
|
||||
os.kill(child_proc.pid, signal.SIGKILL)
|
||||
os.kill(main_proc.pid, signal.SIGKILL)
|
||||
except TypeError:
|
||||
os.kill(child.pid, signal.SIGKILL)
|
||||
else:
|
||||
os.kill(child.pid, signal.SIGTERM)
|
||||
time.sleep(3)
|
||||
canceled = True
|
||||
except OSError:
|
||||
logger.warn("Attempted to cancel already finished job, ignoring")
|
||||
canceled = True
|
||||
elif job_timeout != 0 and (time.time() - job_start) > job_timeout:
|
||||
timed_out = True
|
||||
if isinstance(extra_update_fields, dict):
|
||||
extra_update_fields['job_explanation'] = "Job terminated due to timeout"
|
||||
if canceled or timed_out:
|
||||
self._handle_termination(instance, child, is_cancel=canceled)
|
||||
if idle_timeout and (time.time() - last_stdout_update) > idle_timeout:
|
||||
child.close(True)
|
||||
canceled = True
|
||||
if canceled:
|
||||
return 'canceled', child.exitstatus
|
||||
elif child.exitstatus == 0:
|
||||
elif child.exitstatus == 0 and not timed_out:
|
||||
return 'successful', child.exitstatus
|
||||
else:
|
||||
return 'failed', child.exitstatus
|
||||
|
||||
def _handle_termination(self, instance, job, is_cancel=True):
|
||||
'''Helper function to properly terminate specified job.
|
||||
|
||||
Args:
|
||||
instance: The corresponding model instance of this task.
|
||||
job: The pexpect subprocess running the job.
|
||||
is_cancel: Flag showing whether this termination is caused by instance's
|
||||
cancel_flag.
|
||||
|
||||
Return:
|
||||
None.
|
||||
'''
|
||||
try:
|
||||
if settings.AWX_PROOT_ENABLED and self.should_use_proot(instance):
|
||||
# NOTE: Refactor this once we get a newer psutil across the board
|
||||
if not psutil:
|
||||
os.kill(job.pid, signal.SIGKILL)
|
||||
else:
|
||||
try:
|
||||
main_proc = psutil.Process(pid=job.pid)
|
||||
if hasattr(main_proc, "children"):
|
||||
child_procs = main_proc.children(recursive=True)
|
||||
else:
|
||||
child_procs = main_proc.get_children(recursive=True)
|
||||
for child_proc in child_procs:
|
||||
os.kill(child_proc.pid, signal.SIGKILL)
|
||||
os.kill(main_proc.pid, signal.SIGKILL)
|
||||
except TypeError:
|
||||
os.kill(job.pid, signal.SIGKILL)
|
||||
else:
|
||||
os.kill(job.pid, signal.SIGTERM)
|
||||
time.sleep(3)
|
||||
except OSError:
|
||||
keyword = 'cancel' if is_cancel else 'timeout'
|
||||
logger.warn("Attempted to %s already finished job, ignoring" % keyword)
|
||||
|
||||
def pre_run_hook(self, instance, **kwargs):
|
||||
'''
|
||||
Hook for any steps to run before the job/task starts
|
||||
'''
|
||||
|
||||
def post_run_hook(self, instance, **kwargs):
|
||||
def post_run_hook(self, instance, status, **kwargs):
|
||||
'''
|
||||
Hook for any steps to run after job/task is complete.
|
||||
'''
|
||||
@ -588,11 +612,12 @@ class BaseTask(Task):
|
||||
'''
|
||||
Run the job/task and capture its output.
|
||||
'''
|
||||
instance = self.update_model(pk, status='running', celery_task_id=self.request.id)
|
||||
instance = self.update_model(pk, status='running', celery_task_id='' if self.request.id is None else self.request.id)
|
||||
|
||||
instance.websocket_emit_status("running")
|
||||
status, rc, tb = 'error', None, ''
|
||||
output_replacements = []
|
||||
extra_update_fields = {}
|
||||
try:
|
||||
self.pre_run_hook(instance, **kwargs)
|
||||
if instance.cancel_flag:
|
||||
@ -636,7 +661,8 @@ class BaseTask(Task):
|
||||
safe_args = self.wrap_args_with_ssh_agent(safe_args, ssh_key_path, ssh_auth_sock)
|
||||
instance = self.update_model(pk, job_args=json.dumps(safe_args),
|
||||
job_cwd=cwd, job_env=safe_env, result_stdout_file=stdout_filename)
|
||||
status, rc = self.run_pexpect(instance, args, cwd, env, kwargs['passwords'], stdout_handle)
|
||||
status, rc = self.run_pexpect(instance, args, cwd, env, kwargs['passwords'], stdout_handle,
|
||||
extra_update_fields=extra_update_fields)
|
||||
except Exception:
|
||||
if status != 'canceled':
|
||||
tb = traceback.format_exc()
|
||||
@ -657,8 +683,9 @@ class BaseTask(Task):
|
||||
except Exception:
|
||||
pass
|
||||
instance = self.update_model(pk, status=status, result_traceback=tb,
|
||||
output_replacements=output_replacements)
|
||||
self.post_run_hook(instance, **kwargs)
|
||||
output_replacements=output_replacements,
|
||||
**extra_update_fields)
|
||||
self.post_run_hook(instance, status, **kwargs)
|
||||
instance.websocket_emit_status(status)
|
||||
if status != 'successful' and not hasattr(settings, 'CELERY_UNIT_TEST'):
|
||||
# Raising an exception will mark the job as 'failed' in celery
|
||||
@ -749,6 +776,8 @@ class RunJob(BaseTask):
|
||||
# callbacks to work.
|
||||
env['JOB_ID'] = str(job.pk)
|
||||
env['INVENTORY_ID'] = str(job.inventory.pk)
|
||||
if job.project:
|
||||
env['PROJECT_REVISION'] = job.project.scm_revision
|
||||
env['ANSIBLE_CALLBACK_PLUGINS'] = plugin_path
|
||||
env['REST_API_URL'] = settings.INTERNAL_API_URL
|
||||
env['REST_API_TOKEN'] = job.task_auth_token or ''
|
||||
@ -882,6 +911,10 @@ class RunJob(BaseTask):
|
||||
'tower_job_id': job.pk,
|
||||
'tower_job_launch_type': job.launch_type,
|
||||
}
|
||||
if job.project:
|
||||
extra_vars.update({
|
||||
'tower_project_revision': job.project.scm_revision,
|
||||
})
|
||||
if job.job_template:
|
||||
extra_vars.update({
|
||||
'tower_job_template_id': job.job_template.pk,
|
||||
@ -958,11 +991,28 @@ class RunJob(BaseTask):
|
||||
'''
|
||||
return getattr(settings, 'AWX_PROOT_ENABLED', False)
|
||||
|
||||
def post_run_hook(self, job, **kwargs):
|
||||
def pre_run_hook(self, job, **kwargs):
|
||||
if job.project and job.project.scm_type:
|
||||
local_project_sync = job.project.create_project_update()
|
||||
local_project_sync.job_type = 'run'
|
||||
local_project_sync.save()
|
||||
project_update_task = local_project_sync._get_task_class()
|
||||
try:
|
||||
project_update_task().run(local_project_sync.id)
|
||||
job.scm_revision = job.project.scm_revision
|
||||
job.save()
|
||||
except Exception:
|
||||
job.status = 'failed'
|
||||
job.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \
|
||||
('project_update', local_project_sync.name, local_project_sync.id)
|
||||
job.save()
|
||||
raise
|
||||
|
||||
def post_run_hook(self, job, status, **kwargs):
|
||||
'''
|
||||
Hook for actions to run after job/task has completed.
|
||||
'''
|
||||
super(RunJob, self).post_run_hook(job, **kwargs)
|
||||
super(RunJob, self).post_run_hook(job, status, **kwargs)
|
||||
try:
|
||||
inventory = job.inventory
|
||||
except Inventory.DoesNotExist:
|
||||
@ -1063,7 +1113,10 @@ class RunProjectUpdate(BaseTask):
|
||||
args.append('-v')
|
||||
scm_url, extra_vars = self._build_scm_url_extra_vars(project_update,
|
||||
**kwargs)
|
||||
scm_branch = project_update.scm_branch or {'hg': 'tip'}.get(project_update.scm_type, 'HEAD')
|
||||
if project_update.project.scm_revision and project_update.job_type == 'run':
|
||||
scm_branch = project_update.project.scm_revision
|
||||
else:
|
||||
scm_branch = project_update.scm_branch or {'hg': 'tip'}.get(project_update.scm_type, 'HEAD')
|
||||
extra_vars.update({
|
||||
'project_path': project_update.get_project_path(check_if_exists=False),
|
||||
'scm_type': project_update.scm_type,
|
||||
@ -1071,6 +1124,8 @@ class RunProjectUpdate(BaseTask):
|
||||
'scm_branch': scm_branch,
|
||||
'scm_clean': project_update.scm_clean,
|
||||
'scm_delete_on_update': project_update.scm_delete_on_update,
|
||||
'scm_full_checkout': True if project_update.job_type == 'run' else False,
|
||||
'scm_revision_output': '/tmp/_{}_syncrev'.format(project_update.id) # TODO: TempFile
|
||||
})
|
||||
args.extend(['-e', json.dumps(extra_vars)])
|
||||
args.append('project_update.yml')
|
||||
@ -1144,6 +1199,18 @@ class RunProjectUpdate(BaseTask):
|
||||
'''
|
||||
return kwargs.get('private_data_files', {}).get('scm_credential', '')
|
||||
|
||||
def post_run_hook(self, instance, status, **kwargs):
|
||||
if instance.job_type == 'check' and status not in ('failed', 'canceled',):
|
||||
p = instance.project
|
||||
fd = open('/tmp/_{}_syncrev'.format(instance.id), 'r')
|
||||
lines = fd.readlines()
|
||||
if lines:
|
||||
p.scm_revision = lines[0].strip()
|
||||
p.playbook_files = p.playbooks
|
||||
p.save()
|
||||
else:
|
||||
logger.error("Could not find scm revision in check")
|
||||
|
||||
class RunInventoryUpdate(BaseTask):
|
||||
|
||||
name = 'awx.main.tasks.run_inventory_update'
|
||||
@ -1638,7 +1705,7 @@ class RunAdHocCommand(BaseTask):
|
||||
'''
|
||||
return getattr(settings, 'AWX_PROOT_ENABLED', False)
|
||||
|
||||
def post_run_hook(self, ad_hoc_command, **kwargs):
|
||||
def post_run_hook(self, ad_hoc_command, status, **kwargs):
|
||||
'''
|
||||
Hook for actions to run after ad hoc command has completed.
|
||||
'''
|
||||
@ -1675,38 +1742,3 @@ class RunSystemJob(BaseTask):
|
||||
|
||||
def build_cwd(self, instance, **kwargs):
|
||||
return settings.BASE_DIR
|
||||
|
||||
'''
|
||||
class RunWorkflowJob(BaseTask):
|
||||
|
||||
name = 'awx.main.tasks.run_workflow_job'
|
||||
model = WorkflowJob
|
||||
|
||||
def run(self, pk, **kwargs):
|
||||
#Run the job/task and capture its output.
|
||||
instance = self.update_model(pk, status='running', celery_task_id=self.request.id)
|
||||
instance.websocket_emit_status("running")
|
||||
|
||||
# FIXME: Currently, the workflow job busy waits until the graph run is
|
||||
# complete. Instead, the workflow job should return or never even run,
|
||||
# because all of the "launch logic" can be done schedule().
|
||||
|
||||
# However, other aspects of our system depend on a 1-1 relationship
|
||||
# between a Job and a Celery Task.
|
||||
#
|
||||
# * If we let the workflow job task (RunWorkflowJob.run()) complete
|
||||
# then how do we trigger the handle_work_error and
|
||||
# handle_work_success subtasks?
|
||||
#
|
||||
# * How do we handle the recovery process? (i.e. there is an entry in
|
||||
# the database but not in celery).
|
||||
while True:
|
||||
dag = WorkflowDAG(instance)
|
||||
if dag.is_workflow_done():
|
||||
# TODO: update with accurate finish status (i.e. canceled, error, etc.)
|
||||
instance = self.update_model(instance.pk, status='successful')
|
||||
break
|
||||
time.sleep(1)
|
||||
instance.websocket_emit_status(instance.status)
|
||||
# TODO: Handle cancel
|
||||
'''
|
||||
|
||||
@ -10,7 +10,8 @@ from awx.main.access import (
|
||||
JobTemplateAccess,
|
||||
WorkflowJobTemplateAccess,
|
||||
)
|
||||
from awx.main.models import Credential, Inventory, Project, Role, Organization
|
||||
from awx.conf.license import LicenseForbids
|
||||
from awx.main.models import Credential, Inventory, Project, Role, Organization, Instance
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@ -106,6 +107,18 @@ def test_jt_add_scan_job_check(job_template_with_ids, user_unit):
|
||||
'job_type': 'scan'
|
||||
})
|
||||
|
||||
def mock_raise_license_forbids(self, add_host=False, feature=None, check_expiration=True):
|
||||
raise LicenseForbids("Feature not enabled")
|
||||
|
||||
def mock_raise_none(self, add_host=False, feature=None, check_expiration=True):
|
||||
return None
|
||||
|
||||
def test_jt_can_start_ha(job_template_with_ids):
|
||||
with mock.patch.object(Instance.objects, 'active_count', return_value=2):
|
||||
with mock.patch('awx.main.access.BaseAccess.check_license', new=mock_raise_license_forbids):
|
||||
with pytest.raises(LicenseForbids):
|
||||
JobTemplateAccess(user_unit).can_start(job_template_with_ids)
|
||||
|
||||
def test_jt_can_add_bad_data(user_unit):
|
||||
"Assure that no server errors are returned if we call JT can_add with bad data"
|
||||
access = JobTemplateAccess(user_unit)
|
||||
|
||||
@ -17,28 +17,93 @@
|
||||
tasks:
|
||||
|
||||
- name: delete project directory before update
|
||||
file: path={{project_path|quote}} state=absent
|
||||
file:
|
||||
path: "{{project_path|quote}}"
|
||||
state: absent
|
||||
when: scm_delete_on_update|default('')
|
||||
|
||||
- name: update project using git and accept hostkey
|
||||
git: dest={{project_path|quote}} repo={{scm_url|quote}} version={{scm_branch|quote}} force={{scm_clean}} accept_hostkey={{scm_accept_hostkey}}
|
||||
git:
|
||||
dest: "{{project_path|quote}}"
|
||||
repo: "{{scm_url|quote}}"
|
||||
version: "{{scm_branch|quote}}"
|
||||
force: "{{scm_clean}}"
|
||||
accept_hostkey: "{{scm_accept_hostkey}}"
|
||||
#clone: "{{ scm_full_checkout }}"
|
||||
#update: "{{ scm_full_checkout }}"
|
||||
when: scm_type == 'git' and scm_accept_hostkey is defined
|
||||
register: scm_result
|
||||
|
||||
- name: Set the git repository version
|
||||
set_fact:
|
||||
scm_version: "{{ scm_result['after'] }}"
|
||||
when: "'after' in scm_result"
|
||||
|
||||
- name: update project using git
|
||||
git: dest={{project_path|quote}} repo={{scm_url|quote}} version={{scm_branch|quote}} force={{scm_clean}}
|
||||
git:
|
||||
dest: "{{project_path|quote}}"
|
||||
repo: "{{scm_url|quote}}"
|
||||
version: "{{scm_branch|quote}}"
|
||||
force: "{{scm_clean}}"
|
||||
#clone: "{{ scm_full_checkout }}"
|
||||
#update: "{{ scm_full_checkout }}"
|
||||
when: scm_type == 'git' and scm_accept_hostkey is not defined
|
||||
register: scm_result
|
||||
|
||||
- name: Set the git repository version
|
||||
set_fact:
|
||||
scm_version: "{{ scm_result['after'] }}"
|
||||
when: "'after' in scm_result"
|
||||
|
||||
- name: update project using hg
|
||||
hg: dest={{project_path|quote}} repo={{scm_url|quote}} revision={{scm_branch|quote}} force={{scm_clean}}
|
||||
hg:
|
||||
dest: "{{project_path|quote}}"
|
||||
repo: "{{scm_url|quote}}"
|
||||
revision: "{{scm_branch|quote}}"
|
||||
force: "{{scm_clean}}"
|
||||
#clone: "{{ scm_full_checkout }}"
|
||||
#update: "{{ scm_full_checkout }}"
|
||||
when: scm_type == 'hg'
|
||||
register: scm_result
|
||||
|
||||
- name: Set the hg repository version
|
||||
set_fact:
|
||||
scm_version: "{{ scm_result['after'] }}"
|
||||
when: "'after' in scm_result"
|
||||
|
||||
- name: update project using svn
|
||||
subversion: dest={{project_path|quote}} repo={{scm_url|quote}} revision={{scm_branch|quote}} force={{scm_clean}}
|
||||
subversion:
|
||||
dest: "{{project_path|quote}}"
|
||||
repo: "{{scm_url|quote}}"
|
||||
revision: "{{scm_branch|quote}}"
|
||||
force: "{{scm_clean}}"
|
||||
#checkout: "{{ scm_full_checkout }}"
|
||||
#update: "{{ scm_full_checkout }}"
|
||||
when: scm_type == 'svn' and not scm_username|default('')
|
||||
register: scm_result
|
||||
|
||||
- name: Set the svn repository version
|
||||
set_fact:
|
||||
scm_version: "{{ scm_result['after'] }}"
|
||||
when: "'after' in scm_result"
|
||||
|
||||
- name: update project using svn with auth
|
||||
subversion: dest={{project_path|quote}} repo={{scm_url|quote}} revision={{scm_branch|quote}} force={{scm_clean}} username={{scm_username|quote}} password={{scm_password|quote}}
|
||||
subversion:
|
||||
dest: "{{project_path|quote}}"
|
||||
repo: "{{scm_url|quote}}"
|
||||
revision: "{{scm_branch|quote}}"
|
||||
force: "{{scm_clean}}"
|
||||
username: "{{scm_username|quote}}"
|
||||
password: "{{scm_password|quote}}"
|
||||
#checkout: "{{ scm_full_checkout }}"
|
||||
#update: "{{ scm_full_checkout }}"
|
||||
when: scm_type == 'svn' and scm_username|default('')
|
||||
register: scm_result
|
||||
|
||||
- name: Set the svn repository version
|
||||
set_fact:
|
||||
scm_version: "{{ scm_result['after'] }}"
|
||||
when: "'after' in scm_result"
|
||||
|
||||
- name: detect requirements.yml
|
||||
stat: path={{project_path|quote}}/roles/requirements.yml
|
||||
@ -48,4 +113,14 @@
|
||||
command: ansible-galaxy install -r requirements.yml -p {{project_path|quote}}/roles/ --force
|
||||
args:
|
||||
chdir: "{{project_path|quote}}/roles"
|
||||
when: doesRequirementsExist.stat.exists
|
||||
when: doesRequirementsExist.stat.exists and scm_full_checkout|bool
|
||||
|
||||
- name: Repository Version
|
||||
debug: msg="Repository Version {{ scm_version }}"
|
||||
when: scm_version is defined
|
||||
|
||||
- name: Write Repository Version
|
||||
copy:
|
||||
dest: "{{ scm_revision_output }}"
|
||||
content: "{{ scm_version }}"
|
||||
when: scm_version is defined and scm_revision_output is defined
|
||||
|
||||
@ -9,7 +9,6 @@ import djcelery
|
||||
from datetime import timedelta
|
||||
|
||||
from kombu import Queue, Exchange
|
||||
from kombu.common import Broadcast
|
||||
|
||||
# Update this module's local settings from the global settings module.
|
||||
from django.conf import global_settings
|
||||
@ -367,11 +366,11 @@ CELERY_QUEUES = (
|
||||
Queue('jobs', Exchange('jobs'), routing_key='jobs'),
|
||||
Queue('scheduler', Exchange('scheduler', type='topic'), routing_key='scheduler.job.#', durable=False),
|
||||
# Projects use a fanout queue, this isn't super well supported
|
||||
Broadcast('projects'),
|
||||
)
|
||||
CELERY_ROUTES = {'awx.main.tasks.run_job': {'queue': 'jobs',
|
||||
'routing_key': 'jobs'},
|
||||
'awx.main.tasks.run_project_update': {'queue': 'projects'},
|
||||
'awx.main.tasks.run_project_update': {'queue': 'jobs',
|
||||
'routing_key': 'jobs'},
|
||||
'awx.main.tasks.run_inventory_update': {'queue': 'jobs',
|
||||
'routing_key': 'jobs'},
|
||||
'awx.main.tasks.run_ad_hoc_command': {'queue': 'jobs',
|
||||
@ -383,7 +382,7 @@ CELERY_ROUTES = {'awx.main.tasks.run_job': {'queue': 'jobs',
|
||||
'awx.main.scheduler.tasks.run_job_complete': {'queue': 'scheduler',
|
||||
'routing_key': 'scheduler.job.complete'},
|
||||
'awx.main.tasks.cluster_node_heartbeat': {'queue': 'default',
|
||||
'routing_key': 'cluster.heartbeat'},}
|
||||
'routing_key': 'cluster.heartbeat'}}
|
||||
|
||||
CELERYBEAT_SCHEDULE = {
|
||||
'tower_scheduler': {
|
||||
|
||||
@ -276,3 +276,10 @@ TEST_OPENSTACK_PROJECT = ''
|
||||
# Azure credentials.
|
||||
TEST_AZURE_USERNAME = ''
|
||||
TEST_AZURE_KEY_DATA = ''
|
||||
|
||||
# Exemplary global job timeout settings
|
||||
# DEFAULT_JOB_TIMEOUTS = {
|
||||
# 'Job': 10,
|
||||
# 'InventoryUpdate': 15,
|
||||
# 'ProjectUpdate': 20,
|
||||
# }
|
||||
|
||||
@ -48,7 +48,7 @@ http {
|
||||
server_name _;
|
||||
keepalive_timeout 70;
|
||||
|
||||
ssl_certificate /etc/tower/tower.crt;
|
||||
ssl_certificate /etc/tower/tower.cert;
|
||||
ssl_certificate_key /etc/tower/tower.key;
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
|
||||
@ -21,6 +21,7 @@ It's important to point out a few existing things:
|
||||
by its needs. Thus we are pretty inflexible to customization beyond what our setup playbook allows. Each Tower node has a
|
||||
deployment of RabbitMQ that will cluster with the other nodes' RabbitMQ instances.
|
||||
* Existing old-style HA deployments will be transitioned automatically to the new HA system during the upgrade process.
|
||||
* Manual projects will need to be synced to all nodes by the customer
|
||||
|
||||
## Important Changes
|
||||
|
||||
@ -168,6 +169,7 @@ When verifying acceptance we should ensure the following statements are true
|
||||
can communicate with the database.
|
||||
* Crucially when network partitioning is resolved all nodes should recover into a consistent state
|
||||
* Upgrade Testing, verify behavior before and after are the same for the end user.
|
||||
* Project Updates should be thoroughly tested for all scm types (git, svn, hg) and for manual projects.
|
||||
|
||||
## Performance Testing
|
||||
|
||||
|
||||
@ -133,6 +133,6 @@ wrapt==1.10.6
|
||||
wsgiref==0.1.2
|
||||
xmltodict==0.9.2
|
||||
channels==0.17.2
|
||||
asgi_amqp==0.3
|
||||
asgi_amqp==0.3.1
|
||||
uwsgi==2.0.14
|
||||
daphne==0.15.0
|
||||
|
||||
@ -33,7 +33,7 @@ msgpack-python==0.4.7
|
||||
munch==2.0.4
|
||||
netaddr==0.7.18
|
||||
netifaces==0.10.4
|
||||
os-client-config==1.14.0
|
||||
os-client-config==1.22.0
|
||||
os-diskconfig-python-novaclient-ext==0.1.3
|
||||
os-networksv2-python-novaclient-ext==0.25
|
||||
os-virtual-interfacesv2-python-novaclient-ext==0.19
|
||||
@ -72,7 +72,7 @@ rax-default-network-flags-python-novaclient-ext==0.3.2
|
||||
rax-scheduled-images-python-novaclient-ext==0.3.1
|
||||
requests==2.11.0
|
||||
requestsexceptions==1.1.1
|
||||
shade==1.4.0
|
||||
shade==1.12.1
|
||||
simplejson==3.8.1
|
||||
six==1.9.0
|
||||
stevedore==1.10.0
|
||||
|
||||
@ -12,8 +12,8 @@ services:
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "5555:5555"
|
||||
- "8050:8050"
|
||||
- "8051:8051"
|
||||
- "8013:8013"
|
||||
- "8043:8043"
|
||||
links:
|
||||
- postgres
|
||||
- memcached
|
||||
@ -35,14 +35,6 @@ services:
|
||||
ports:
|
||||
- "15672:15672"
|
||||
|
||||
nginx:
|
||||
image: gcr.io/ansible-tower-engineering/tower_nginx:${TAG}
|
||||
ports:
|
||||
- "8043:443"
|
||||
- "8013:80"
|
||||
links:
|
||||
- tower
|
||||
|
||||
# Source Code Synchronization Container
|
||||
# sync:
|
||||
# build:
|
||||
|
||||
@ -11,7 +11,7 @@ RUN yum -y update && yum -y install curl epel-release
|
||||
RUN curl --silent --location https://rpm.nodesource.com/setup_6.x | bash -
|
||||
RUN yum -y localinstall http://download.postgresql.org/pub/repos/yum/9.4/redhat/rhel-6-x86_64/pgdg-centos94-9.4-3.noarch.rpm
|
||||
ADD tools/docker-compose/proot.repo /etc/yum.repos.d/proot.repo
|
||||
RUN yum -y update && yum -y install openssh-server ansible mg vim tmux git mercurial subversion python-devel python-psycopg2 make postgresql postgresql-devel nodejs python-psutil libxml2-devel libxslt-devel libstdc++.so.6 gcc cyrus-sasl-devel cyrus-sasl openldap-devel libffi-devel zeromq-devel proot python-pip xmlsec1-devel swig krb5-devel xmlsec1-openssl xmlsec1 xmlsec1-openssl-devel libtool-ltdl-devel rabbitmq-server
|
||||
RUN yum -y update && yum -y install openssh-server ansible mg vim tmux git mercurial subversion python-devel python-psycopg2 make postgresql postgresql-devel nginx nodejs python-psutil libxml2-devel libxslt-devel libstdc++.so.6 gcc cyrus-sasl-devel cyrus-sasl openldap-devel libffi-devel zeromq-devel proot python-pip xmlsec1-devel swig krb5-devel xmlsec1-openssl xmlsec1 xmlsec1-openssl-devel libtool-ltdl-devel rabbitmq-server
|
||||
RUN pip install flake8 pytest==2.9.2 pytest-pythonpath pytest-django pytest-cov pytest-mock dateutils django-debug-toolbar==1.4 pyflakes==1.0.0 virtualenv
|
||||
RUN /usr/bin/ssh-keygen -q -t rsa -N "" -f /root/.ssh/id_rsa
|
||||
RUN mkdir -p /etc/tower
|
||||
@ -23,10 +23,14 @@ ADD tools/docker-compose/ansible-tower.egg-link /tmp/ansible-tower.egg-link
|
||||
ADD tools/docker-compose/tower-manage /usr/local/bin/tower-manage
|
||||
ADD tools/docker-compose/awx-manage /usr/local/bin/awx-manage
|
||||
ADD tools/docker-compose/ansible_tower.egg-info /tmp/ansible_tower.egg-info
|
||||
RUN ln -Ffs /tower_devel/tools/docker-compose/nginx.conf /etc/nginx/nginx.conf
|
||||
RUN ln -Ffs /tower_devel/tools/docker-compose/nginx.vh.default.conf /etc/nginx/conf.d/nginx.vh.default.conf
|
||||
RUN ln -s /tower_devel/tools/docker-compose/start_development.sh /start_development.sh
|
||||
RUN openssl req -nodes -newkey rsa:2048 -keyout /etc/nginx/nginx.key -out /etc/nginx/nginx.csr -subj "/C=US/ST=North Carolina/L=Durham/O=Ansible/OU=Tower Development/CN=tower.localhost"
|
||||
RUN openssl x509 -req -days 365 -in /etc/nginx/nginx.csr -signkey /etc/nginx/nginx.key -out /etc/nginx/nginx.crt
|
||||
WORKDIR /tmp
|
||||
RUN SWIG_FEATURES="-cpperraswarn -includeall -D__`uname -m`__ -I/usr/include/openssl" VENV_BASE="/venv" make requirements_dev
|
||||
WORKDIR /
|
||||
EXPOSE 8050 8051 8080 22
|
||||
EXPOSE 8043 8013 8080 22
|
||||
ENTRYPOINT ["/usr/bin/dumb-init"]
|
||||
CMD /start_development.sh
|
||||
|
||||
37
tools/docker-compose/nginx.conf
Normal file
37
tools/docker-compose/nginx.conf
Normal file
@ -0,0 +1,37 @@
|
||||
user nginx;
|
||||
worker_processes 1;
|
||||
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
sendfile on;
|
||||
#tcp_nopush on;
|
||||
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_timeout 10m;
|
||||
|
||||
keepalive_timeout 65;
|
||||
|
||||
#gzip on;
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
55
tools/docker-compose/nginx.vh.default.conf
Normal file
55
tools/docker-compose/nginx.vh.default.conf
Normal file
@ -0,0 +1,55 @@
|
||||
upstream uwsgi {
|
||||
server localhost:8050;
|
||||
}
|
||||
|
||||
upstream daphne {
|
||||
server localhost:8051;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8013 default_server;
|
||||
listen 8043 default_server ssl;
|
||||
|
||||
# If you have a domain name, this is where to add it
|
||||
server_name _;
|
||||
keepalive_timeout 70;
|
||||
|
||||
ssl_certificate /etc/nginx/nginx.crt;
|
||||
ssl_certificate_key /etc/nginx/nginx.key;
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
|
||||
location /static/ {
|
||||
root /tower_devel;
|
||||
try_files /awx/ui/$uri /awx/$uri /awx/public/$uri =404;
|
||||
access_log off;
|
||||
sendfile off;
|
||||
}
|
||||
|
||||
location /websocket {
|
||||
# Pass request to the upstream alias
|
||||
proxy_pass http://daphne;
|
||||
# Require http version 1.1 to allow for upgrade requests
|
||||
proxy_http_version 1.1;
|
||||
# We want proxy_buffering off for proxying to websockets.
|
||||
proxy_buffering off;
|
||||
# http://en.wikipedia.org/wiki/X-Forwarded-For
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# enable this if you use HTTPS:
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
# pass the Host: header from the client for the sake of redirects
|
||||
proxy_set_header Host $http_host;
|
||||
# We've set the Host header, so we don't need Nginx to muddle
|
||||
# about with redirects
|
||||
proxy_redirect off;
|
||||
# Depending on the request value, set the Upgrade and
|
||||
# connection headers
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
}
|
||||
|
||||
location / {
|
||||
include /etc/nginx/uwsgi_params;
|
||||
uwsgi_pass uwsgi;
|
||||
}
|
||||
}
|
||||
@ -37,7 +37,12 @@ RUN yum install -y nodejs
|
||||
WORKDIR "/ansible-tower"
|
||||
|
||||
# Copy requirements files
|
||||
COPY requirements/*.txt requirements/
|
||||
# NOTE: '*' is not used as it invalidates docker caching
|
||||
COPY requirements/requirements.txt requirements/
|
||||
COPY requirements/requirements_ansible.txt requirements/
|
||||
COPY requirements/requirements_dev.txt requirements/
|
||||
COPY requirements/requirements_jenkins.txt requirements/
|
||||
|
||||
|
||||
# Copy __init__.py so the Makefile can retrieve `awx.__version__`
|
||||
COPY awx/__init__.py awx/
|
||||
@ -58,7 +63,7 @@ COPY awx/ui/package.json awx/ui/
|
||||
|
||||
RUN npm set progress=false
|
||||
|
||||
RUN make ui-deps-built
|
||||
RUN make ui-deps
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c"]
|
||||
CMD ["bash"]
|
||||
|
||||
@ -5,6 +5,7 @@ services:
|
||||
build:
|
||||
context: ../../../
|
||||
dockerfile: tools/docker-compose/unit-tests/Dockerfile
|
||||
image: gcr.io/ansible-tower-engineering/unit-test-runner:latest
|
||||
environment:
|
||||
SWIG_FEATURES: "-cpperraswarn -includeall -I/usr/include/openssl"
|
||||
TEST_DIRS: "awx/main/tests/unit"
|
||||
|
||||
@ -1,14 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Enable needed Software Collections, if installed
|
||||
for scl in python27 httpd24; do
|
||||
if [ -f /etc/scl/prefixes/$scl ]; then
|
||||
if [ -f `cat /etc/scl/prefixes/$scl`/$scl/enable ]; then
|
||||
. `cat /etc/scl/prefixes/$scl`/$scl/enable
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Enable Tower virtualenv
|
||||
if [ -f /var/lib/awx/venv/tower/bin/activate ]; then
|
||||
. /var/lib/awx/venv/tower/bin/activate
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user