mirror of
https://github.com/ansible/awx.git
synced 2026-01-16 12:20:45 -03:30
Merge branch 'devel' into can_CRUD
This commit is contained in:
commit
0933a91174
5
Makefile
5
Makefile
@ -170,7 +170,7 @@ endif
|
||||
|
||||
.DEFAULT_GOAL := build
|
||||
|
||||
.PHONY: clean clean-tmp rebase push requirements requirements_dev \
|
||||
.PHONY: clean clean-tmp clean-venv rebase push requirements requirements_dev \
|
||||
requirements_jenkins \
|
||||
develop refresh adduser migrate dbchange dbshell runserver celeryd \
|
||||
receiver test test_unit test_coverage coverage_html test_jenkins dev_build \
|
||||
@ -216,6 +216,9 @@ clean-ui:
|
||||
clean-tmp:
|
||||
rm -rf tmp/
|
||||
|
||||
clean-venv:
|
||||
rm -rf venv/
|
||||
|
||||
# Remove temporary build files, compiled Python files.
|
||||
clean: clean-rpm clean-deb clean-ui clean-tar clean-packer clean-bundle
|
||||
rm -rf awx/lib/site-packages
|
||||
|
||||
@ -19,7 +19,7 @@ from awx.main.utils import get_object_or_400
|
||||
logger = logging.getLogger('awx.api.permissions')
|
||||
|
||||
__all__ = ['ModelAccessPermission', 'JobTemplateCallbackPermission',
|
||||
'TaskPermission', 'ProjectUpdatePermission']
|
||||
'TaskPermission', 'ProjectUpdatePermission', 'UserPermission']
|
||||
|
||||
class ModelAccessPermission(permissions.BasePermission):
|
||||
'''
|
||||
@ -205,3 +205,10 @@ class ProjectUpdatePermission(ModelAccessPermission):
|
||||
def check_post_permissions(self, request, view, obj=None):
|
||||
project = get_object_or_400(view.model, pk=view.kwargs['pk'])
|
||||
return check_user_access(request.user, view.model, 'start', project)
|
||||
|
||||
|
||||
class UserPermission(ModelAccessPermission):
|
||||
def check_post_permissions(self, request, view, obj=None):
|
||||
if request.user.is_superuser:
|
||||
return True
|
||||
raise PermissionDenied()
|
||||
|
||||
@ -60,6 +60,7 @@ project_urls = patterns('awx.api.views',
|
||||
)
|
||||
|
||||
project_update_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'project_update_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'project_update_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/cancel/$', 'project_update_cancel'),
|
||||
url(r'^(?P<pk>[0-9]+)/stdout/$', 'project_update_stdout'),
|
||||
@ -145,6 +146,7 @@ inventory_source_urls = patterns('awx.api.views',
|
||||
)
|
||||
|
||||
inventory_update_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'inventory_update_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'inventory_update_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/cancel/$', 'inventory_update_cancel'),
|
||||
url(r'^(?P<pk>[0-9]+)/stdout/$', 'inventory_update_stdout'),
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
@ -124,11 +125,13 @@ class ApiV1RootView(APIView):
|
||||
data['organizations'] = reverse('api:organization_list')
|
||||
data['users'] = reverse('api:user_list')
|
||||
data['projects'] = reverse('api:project_list')
|
||||
data['project_updates'] = reverse('api:project_update_list')
|
||||
data['teams'] = reverse('api:team_list')
|
||||
data['credentials'] = reverse('api:credential_list')
|
||||
data['inventory'] = reverse('api:inventory_list')
|
||||
data['inventory_scripts'] = reverse('api:inventory_script_list')
|
||||
data['inventory_sources'] = reverse('api:inventory_source_list')
|
||||
data['inventory_updates'] = reverse('api:inventory_update_list')
|
||||
data['groups'] = reverse('api:group_list')
|
||||
data['hosts'] = reverse('api:host_list')
|
||||
data['job_templates'] = reverse('api:job_template_list')
|
||||
@ -1107,6 +1110,11 @@ class ProjectUpdateView(RetrieveAPIView):
|
||||
else:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
|
||||
class ProjectUpdateList(ListAPIView):
|
||||
|
||||
model = ProjectUpdate
|
||||
serializer_class = ProjectUpdateListSerializer
|
||||
|
||||
class ProjectUpdateDetail(RetrieveDestroyAPIView):
|
||||
|
||||
model = ProjectUpdate
|
||||
@ -1158,6 +1166,7 @@ class UserList(ListCreateAPIView):
|
||||
model = User
|
||||
serializer_class = UserSerializer
|
||||
capabilities_prefetch = ['admin']
|
||||
permission_classes = (UserPermission,)
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
ret = super(UserList, self).post( request, *args, **kwargs)
|
||||
@ -1323,7 +1332,7 @@ class UserDetail(RetrieveUpdateDestroyAPIView):
|
||||
can_admin = request.user.can_access(User, 'admin', obj, request.data)
|
||||
|
||||
su_only_edit_fields = ('is_superuser', 'is_system_auditor')
|
||||
admin_only_edit_fields = ('last_name', 'first_name', 'username', 'is_active')
|
||||
admin_only_edit_fields = ('username', 'is_active')
|
||||
|
||||
fields_to_check = ()
|
||||
if not request.user.is_superuser:
|
||||
@ -2172,6 +2181,11 @@ class InventorySourceUpdateView(RetrieveAPIView):
|
||||
else:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
|
||||
class InventoryUpdateList(ListAPIView):
|
||||
|
||||
model = InventoryUpdate
|
||||
serializer_class = InventoryUpdateListSerializer
|
||||
|
||||
class InventoryUpdateDetail(RetrieveDestroyAPIView):
|
||||
|
||||
model = InventoryUpdate
|
||||
@ -3010,7 +3024,7 @@ class JobJobTasksList(BaseJobEventsList):
|
||||
# need stats on grandchildren, sorted by child.
|
||||
queryset = (JobEvent.objects.filter(parent__parent=parent_task,
|
||||
parent__event__in=STARTING_EVENTS)
|
||||
.values('parent__id', 'event', 'changed', 'failed')
|
||||
.values('parent__id', 'event', 'changed')
|
||||
.annotate(num=Count('event'))
|
||||
.order_by('parent__id'))
|
||||
|
||||
@ -3071,13 +3085,10 @@ class JobJobTasksList(BaseJobEventsList):
|
||||
# make appropriate changes to the task data.
|
||||
for child_data in data.get(task_start_event.id, []):
|
||||
if child_data['event'] == 'runner_on_failed':
|
||||
task_data['failed'] = True
|
||||
task_data['host_count'] += child_data['num']
|
||||
task_data['reported_hosts'] += child_data['num']
|
||||
if child_data['failed']:
|
||||
task_data['failed'] = True
|
||||
task_data['failed_count'] += child_data['num']
|
||||
else:
|
||||
task_data['skipped_count'] += child_data['num']
|
||||
task_data['failed_count'] += child_data['num']
|
||||
elif child_data['event'] == 'runner_on_ok':
|
||||
task_data['host_count'] += child_data['num']
|
||||
task_data['reported_hosts'] += child_data['num']
|
||||
|
||||
@ -54,10 +54,6 @@ class AutoOneToOneField(models.OneToOneField):
|
||||
AutoSingleRelatedObjectDescriptor(related))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def resolve_role_field(obj, field):
|
||||
ret = []
|
||||
|
||||
@ -71,8 +67,8 @@ def resolve_role_field(obj, field):
|
||||
return []
|
||||
|
||||
if len(field_components) == 1:
|
||||
Role_ = get_current_apps().get_model('main', 'Role')
|
||||
if type(obj) is not Role_:
|
||||
role_cls = str(get_current_apps().get_model('main', 'Role'))
|
||||
if not str(type(obj)) == role_cls:
|
||||
raise Exception(smart_text('{} refers to a {}, not a Role'.format(field, type(obj))))
|
||||
ret.append(obj.id)
|
||||
else:
|
||||
|
||||
@ -22,6 +22,7 @@ import yaml
|
||||
from django.conf import settings
|
||||
from django.core.management.base import NoArgsCommand, CommandError
|
||||
from django.db import connection, transaction
|
||||
from django.utils.encoding import smart_text
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
@ -606,7 +607,7 @@ class Command(NoArgsCommand):
|
||||
break
|
||||
instance_id = from_dict.get(key, default)
|
||||
from_dict = instance_id
|
||||
return instance_id
|
||||
return smart_text(instance_id)
|
||||
|
||||
def _get_enabled(self, from_dict, default=None):
|
||||
'''
|
||||
|
||||
@ -25,5 +25,6 @@ class Migration(migrations.Migration):
|
||||
name='use_role',
|
||||
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'admin_role'], to='main.Role', null=b'True'),
|
||||
),
|
||||
migrations.RunPython(rbac.infer_credential_org_from_team),
|
||||
migrations.RunPython(rbac.rebuild_role_hierarchy),
|
||||
]
|
||||
|
||||
@ -2,7 +2,9 @@ import logging
|
||||
from time import time
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.db import transaction
|
||||
from django.db.models import Q
|
||||
from django.db.utils import IntegrityError
|
||||
|
||||
from collections import defaultdict
|
||||
from awx.main.utils import getattrd
|
||||
@ -490,3 +492,11 @@ def rebuild_role_hierarchy(apps, schema_editor):
|
||||
logger.info('Done.')
|
||||
|
||||
|
||||
def infer_credential_org_from_team(apps, schema_editor):
|
||||
Credential = apps.get_model('main', "Credential")
|
||||
for cred in Credential.objects.exclude(deprecated_team__isnull=True):
|
||||
try:
|
||||
with transaction.atomic():
|
||||
_update_credential_parents(cred.deprecated_team.organization, cred)
|
||||
except IntegrityError:
|
||||
logger.info("Organization<{}> credential for old Team<{}> credential already created".format(cred.deprecated_team.organization.pk, cred.pk))
|
||||
|
||||
@ -23,13 +23,14 @@ from awx.main.models.base import * # noqa
|
||||
from awx.main.models.unified_jobs import * # noqa
|
||||
from awx.main.utils import decrypt_field
|
||||
from awx.main.conf import tower_settings
|
||||
from awx.main.models.notifications import JobNotificationMixin
|
||||
|
||||
logger = logging.getLogger('awx.main.models.ad_hoc_commands')
|
||||
|
||||
__all__ = ['AdHocCommand', 'AdHocCommandEvent']
|
||||
|
||||
|
||||
class AdHocCommand(UnifiedJob):
|
||||
class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
class Meta(object):
|
||||
app_label = 'main'
|
||||
@ -237,6 +238,14 @@ class AdHocCommand(UnifiedJob):
|
||||
update_fields.append('name')
|
||||
super(AdHocCommand, self).save(*args, **kwargs)
|
||||
|
||||
'''
|
||||
JobNotificationMixin
|
||||
'''
|
||||
def get_notification_templates(self):
|
||||
return self.notification_templates
|
||||
|
||||
def get_notification_friendly_name(self):
|
||||
return "AdHoc Command"
|
||||
|
||||
class AdHocCommandEvent(CreatedModifiedModel):
|
||||
'''
|
||||
|
||||
@ -25,7 +25,10 @@ from awx.main.models.base import * # noqa
|
||||
from awx.main.models.jobs import Job
|
||||
from awx.main.models.unified_jobs import * # noqa
|
||||
from awx.main.models.mixins import ResourceMixin
|
||||
from awx.main.models.notifications import NotificationTemplate
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import _inventory_updates
|
||||
from awx.main.conf import tower_settings
|
||||
|
||||
@ -1192,7 +1195,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
|
||||
return source
|
||||
|
||||
|
||||
class InventoryUpdate(UnifiedJob, InventorySourceOptions):
|
||||
class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin):
|
||||
'''
|
||||
Internal job for tracking inventory updates from external sources.
|
||||
'''
|
||||
@ -1268,6 +1271,15 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions):
|
||||
return False
|
||||
return True
|
||||
|
||||
'''
|
||||
JobNotificationMixin
|
||||
'''
|
||||
def get_notification_templates(self):
|
||||
return self.inventory_source.notification_templates
|
||||
|
||||
def get_notification_friendly_name(self):
|
||||
return "Inventory Update"
|
||||
|
||||
|
||||
class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin):
|
||||
|
||||
|
||||
@ -24,7 +24,10 @@ from jsonfield import JSONField
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.unified_jobs import * # noqa
|
||||
from awx.main.models.notifications import NotificationTemplate
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import decrypt_field, ignore_inventory_computed_fields
|
||||
from awx.main.utils import emit_websocket_notification
|
||||
from awx.main.redact import PlainTextCleaner
|
||||
@ -499,7 +502,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin):
|
||||
any_notification_templates = set(any_notification_templates + list(base_notification_templates.filter(organization_notification_templates_for_any=self.project.organization)))
|
||||
return dict(error=list(error_notification_templates), success=list(success_notification_templates), any=list(any_notification_templates))
|
||||
|
||||
class Job(UnifiedJob, JobOptions):
|
||||
class Job(UnifiedJob, JobOptions, JobNotificationMixin):
|
||||
'''
|
||||
A job applies a project (with playbook) to an inventory source with a given
|
||||
credential. It represents a single invocation of ansible-playbook with the
|
||||
@ -792,6 +795,15 @@ class Job(UnifiedJob, JobOptions):
|
||||
|
||||
return True
|
||||
|
||||
'''
|
||||
JobNotificationMixin
|
||||
'''
|
||||
def get_notification_templates(self):
|
||||
return self.job_template.notification_templates
|
||||
|
||||
def get_notification_friendly_name(self):
|
||||
return "Job"
|
||||
|
||||
class JobHostSummary(CreatedModifiedModel):
|
||||
'''
|
||||
Per-host statistics for each job.
|
||||
@ -1315,7 +1327,7 @@ class SystemJobTemplate(UnifiedJobTemplate, SystemJobOptions):
|
||||
any=list(any_notification_templates))
|
||||
|
||||
|
||||
class SystemJob(UnifiedJob, SystemJobOptions):
|
||||
class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
@ -1378,3 +1390,13 @@ class SystemJob(UnifiedJob, SystemJobOptions):
|
||||
@property
|
||||
def task_impact(self):
|
||||
return 150
|
||||
|
||||
'''
|
||||
JobNotificationMixin
|
||||
'''
|
||||
def get_notification_templates(self):
|
||||
return self.system_job_template.notification_templates
|
||||
|
||||
def get_notification_friendly_name(self):
|
||||
return "System Job"
|
||||
|
||||
|
||||
@ -171,3 +171,27 @@ class Notification(CreatedModifiedModel):
|
||||
|
||||
def get_absolute_url(self):
|
||||
return reverse('api:notification_detail', args=(self.pk,))
|
||||
|
||||
class JobNotificationMixin(object):
|
||||
def get_notification_templates(self):
|
||||
raise RuntimeError("Define me")
|
||||
|
||||
def get_notification_friendly_name(self):
|
||||
raise RuntimeError("Define me")
|
||||
|
||||
def _build_notification_message(self, status_str):
|
||||
notification_body = self.notification_data()
|
||||
notification_subject = "{} #{} '{}' {} on Ansible Tower: {}".format(self.get_notification_friendly_name(),
|
||||
self.id,
|
||||
self.name,
|
||||
status_str,
|
||||
notification_body['url'])
|
||||
notification_body['friendly_name'] = self.get_notification_friendly_name()
|
||||
return (notification_subject, notification_body)
|
||||
|
||||
def build_notification_succeeded_message(self):
|
||||
return self._build_notification_message('succeeded')
|
||||
|
||||
def build_notification_failed_message(self):
|
||||
return self._build_notification_message('failed')
|
||||
|
||||
|
||||
@ -20,7 +20,10 @@ from django.utils.timezone import now, make_aware, get_default_timezone
|
||||
from awx.lib.compat import slugify
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.jobs import Job
|
||||
from awx.main.models.notifications import NotificationTemplate
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.models.unified_jobs import * # noqa
|
||||
from awx.main.models.mixins import ResourceMixin
|
||||
from awx.main.utils import update_scm_url
|
||||
@ -372,8 +375,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
|
||||
def get_absolute_url(self):
|
||||
return reverse('api:project_detail', args=(self.pk,))
|
||||
|
||||
|
||||
class ProjectUpdate(UnifiedJob, ProjectOptions):
|
||||
class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin):
|
||||
'''
|
||||
Internal job for tracking project updates from SCM.
|
||||
'''
|
||||
@ -443,3 +445,12 @@ class ProjectUpdate(UnifiedJob, ProjectOptions):
|
||||
if 'scm_delete_on_next_update' not in update_fields:
|
||||
update_fields.append('scm_delete_on_next_update')
|
||||
parent_instance.save(update_fields=update_fields)
|
||||
|
||||
'''
|
||||
JobNotificationMixin
|
||||
'''
|
||||
def get_notification_templates(self):
|
||||
return self.project.notification_templates
|
||||
|
||||
def get_notification_friendly_name(self):
|
||||
return "Project Update"
|
||||
|
||||
@ -18,6 +18,7 @@ from django.core.exceptions import NON_FIELD_ERRORS
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.timezone import now
|
||||
from django.utils.encoding import smart_text
|
||||
from django.apps import apps
|
||||
|
||||
# Django-JSONField
|
||||
from jsonfield import JSONField
|
||||
@ -360,8 +361,30 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
|
||||
return unified_job
|
||||
|
||||
class UnifiedJobTypeStringMixin(object):
|
||||
@classmethod
|
||||
def _underscore_to_camel(cls, word):
|
||||
return ''.join(x.capitalize() or '_' for x in word.split('_'))
|
||||
|
||||
class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique):
|
||||
@classmethod
|
||||
def _model_type(cls, job_type):
|
||||
# Django >= 1.9
|
||||
#app = apps.get_app_config('main')
|
||||
model_str = cls._underscore_to_camel(job_type)
|
||||
try:
|
||||
return apps.get_model('main', model_str)
|
||||
except LookupError:
|
||||
print("Lookup model error")
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def get_instance_by_type(cls, job_type, job_id):
|
||||
model = cls._model_type(job_type)
|
||||
if not model:
|
||||
return None
|
||||
return model.objects.get(id=job_id)
|
||||
|
||||
class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique, UnifiedJobTypeStringMixin):
|
||||
'''
|
||||
Concrete base class for unified job run by the task engine.
|
||||
'''
|
||||
|
||||
@ -185,114 +185,61 @@ def notify_task_runner(metadata_dict):
|
||||
queue = FifoQueue('tower_task_manager')
|
||||
queue.push(metadata_dict)
|
||||
|
||||
|
||||
def _send_notification_templates(instance, status_str):
|
||||
if status_str not in ['succeeded', 'failed']:
|
||||
raise ValueError("status_str must be either succeeded or failed")
|
||||
print("Instance has some shit in it %s" % instance)
|
||||
notification_templates = instance.get_notification_templates()
|
||||
if notification_templates:
|
||||
all_notification_templates = set(notification_templates.get('success', []) + notification_templates.get('any', []))
|
||||
if len(all_notification_templates):
|
||||
try:
|
||||
(notification_subject, notification_body) = getattr(instance, 'build_notification_%s_message' % status_str)()
|
||||
except AttributeError:
|
||||
raise NotImplementedError("build_notification_%s_message() does not exist" % status_str)
|
||||
send_notifications.delay([n.generate_notification(notification_subject, notification_body).id
|
||||
for n in all_notification_templates],
|
||||
job_id=instance.id)
|
||||
|
||||
@task(bind=True)
|
||||
def handle_work_success(self, result, task_actual):
|
||||
if task_actual['type'] == 'project_update':
|
||||
instance = ProjectUpdate.objects.get(id=task_actual['id'])
|
||||
instance_name = instance.name
|
||||
notification_templates = instance.project.notification_templates
|
||||
friendly_name = "Project Update"
|
||||
elif task_actual['type'] == 'inventory_update':
|
||||
instance = InventoryUpdate.objects.get(id=task_actual['id'])
|
||||
instance_name = instance.name
|
||||
notification_templates = instance.inventory_source.notification_templates
|
||||
friendly_name = "Inventory Update"
|
||||
elif task_actual['type'] == 'job':
|
||||
instance = Job.objects.get(id=task_actual['id'])
|
||||
instance_name = instance.job_template.name
|
||||
notification_templates = instance.job_template.notification_templates
|
||||
friendly_name = "Job"
|
||||
elif task_actual['type'] == 'ad_hoc_command':
|
||||
instance = AdHocCommand.objects.get(id=task_actual['id'])
|
||||
instance_name = instance.module_name
|
||||
notification_templates = instance.notification_templates
|
||||
friendly_name = "AdHoc Command"
|
||||
elif task_actual['type'] == 'system_job':
|
||||
instance = SystemJob.objects.get(id=task_actual['id'])
|
||||
instance_name = instance.system_job_template.name
|
||||
notification_templates = instance.system_job_template.notification_templates
|
||||
friendly_name = "System Job"
|
||||
else:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
if not instance:
|
||||
return
|
||||
|
||||
all_notification_templates = set(notification_templates.get('success', []) + notification_templates.get('any', []))
|
||||
if len(all_notification_templates):
|
||||
notification_body = instance.notification_data()
|
||||
notification_subject = "{} #{} '{}' succeeded on Ansible Tower: {}".format(friendly_name,
|
||||
task_actual['id'],
|
||||
smart_str(instance_name),
|
||||
notification_body['url'])
|
||||
notification_body['friendly_name'] = friendly_name
|
||||
send_notifications.delay([n.generate_notification(notification_subject, notification_body).id
|
||||
for n in all_notification_templates],
|
||||
job_id=task_actual['id'])
|
||||
_send_notification_templates(instance, 'succeeded')
|
||||
|
||||
@task(bind=True)
|
||||
def handle_work_error(self, task_id, subtasks=None):
|
||||
print('Executing error task id %s, subtasks: %s' %
|
||||
(str(self.request.id), str(subtasks)))
|
||||
first_task = None
|
||||
first_task_id = None
|
||||
first_task_type = ''
|
||||
first_task_name = ''
|
||||
first_instance = None
|
||||
first_instance_type = ''
|
||||
if subtasks is not None:
|
||||
for each_task in subtasks:
|
||||
instance_name = ''
|
||||
if each_task['type'] == 'project_update':
|
||||
instance = ProjectUpdate.objects.get(id=each_task['id'])
|
||||
instance_name = instance.name
|
||||
notification_templates = instance.project.notification_templates
|
||||
friendly_name = "Project Update"
|
||||
elif each_task['type'] == 'inventory_update':
|
||||
instance = InventoryUpdate.objects.get(id=each_task['id'])
|
||||
instance_name = instance.name
|
||||
notification_templates = instance.inventory_source.notification_templates
|
||||
friendly_name = "Inventory Update"
|
||||
elif each_task['type'] == 'job':
|
||||
instance = Job.objects.get(id=each_task['id'])
|
||||
instance_name = instance.job_template.name
|
||||
notification_templates = instance.job_template.notification_templates
|
||||
friendly_name = "Job"
|
||||
elif each_task['type'] == 'ad_hoc_command':
|
||||
instance = AdHocCommand.objects.get(id=each_task['id'])
|
||||
instance_name = instance.module_name
|
||||
notification_templates = instance.notification_templates
|
||||
friendly_name = "AdHoc Command"
|
||||
elif each_task['type'] == 'system_job':
|
||||
instance = SystemJob.objects.get(id=each_task['id'])
|
||||
instance_name = instance.system_job_template.name
|
||||
notification_templates = instance.system_job_template.notification_templates
|
||||
friendly_name = "System Job"
|
||||
else:
|
||||
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
|
||||
if not instance:
|
||||
# Unknown task type
|
||||
logger.warn("Unknown task type: {}".format(each_task['type']))
|
||||
continue
|
||||
if first_task is None:
|
||||
first_task = instance
|
||||
first_task_id = instance.id
|
||||
first_task_type = each_task['type']
|
||||
first_task_name = instance_name
|
||||
first_task_friendly_name = friendly_name
|
||||
|
||||
if first_instance is None:
|
||||
first_instance = instance
|
||||
first_instance_type = each_task['type']
|
||||
|
||||
if instance.celery_task_id != task_id:
|
||||
instance.status = 'failed'
|
||||
instance.failed = True
|
||||
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \
|
||||
(first_task_type, first_task_name, first_task_id)
|
||||
(first_instance_type, first_instance.name, first_instance.id)
|
||||
instance.save()
|
||||
instance.socketio_emit_status("failed")
|
||||
|
||||
all_notification_templates = set(notification_templates.get('error', []) + notification_templates.get('any', []))
|
||||
if len(all_notification_templates):
|
||||
notification_body = first_task.notification_data()
|
||||
notification_subject = "{} #{} '{}' failed on Ansible Tower: {}".format(first_task_friendly_name,
|
||||
first_task_id,
|
||||
smart_str(first_task_name),
|
||||
notification_body['url'])
|
||||
notification_body['friendly_name'] = first_task_friendly_name
|
||||
send_notifications.delay([n.generate_notification(notification_subject, notification_body).id
|
||||
for n in all_notification_templates],
|
||||
job_id=first_task_id)
|
||||
|
||||
if first_instance:
|
||||
print("Instance type is %s" % first_instance_type)
|
||||
print("Instance passing along %s" % first_instance.name)
|
||||
_send_notification_templates(first_instance, 'failed')
|
||||
|
||||
@task()
|
||||
def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
@ -1306,9 +1253,14 @@ class RunInventoryUpdate(BaseTask):
|
||||
|
||||
credential = inventory_update.credential
|
||||
if credential:
|
||||
cp.set(section, 'hostname', credential.host)
|
||||
cp.set(section, 'url', credential.host)
|
||||
cp.set(section, 'username', credential.username)
|
||||
cp.set(section, 'password', decrypt_field(credential, 'password'))
|
||||
cp.set(section, 'ssl_verify', "false")
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'max_age', "0")
|
||||
|
||||
elif inventory_update.source == 'azure_rm':
|
||||
section = 'azure'
|
||||
@ -1710,3 +1662,4 @@ class RunSystemJob(BaseTask):
|
||||
|
||||
def build_cwd(self, instance, **kwargs):
|
||||
return settings.BASE_DIR
|
||||
|
||||
|
||||
@ -71,7 +71,6 @@ def test_create_user_credential_via_user_credentials_list_xfail(post, alice, bob
|
||||
def test_create_team_credential(post, get, team, organization, org_admin, team_member):
|
||||
response = post(reverse('api:credential_list'), {
|
||||
'team': team.id,
|
||||
'organization': organization.id,
|
||||
'name': 'Some name',
|
||||
'username': 'someusername'
|
||||
}, org_admin)
|
||||
@ -81,6 +80,9 @@ def test_create_team_credential(post, get, team, organization, org_admin, team_m
|
||||
assert response.status_code == 200
|
||||
assert response.data['count'] == 1
|
||||
|
||||
# Assure that credential's organization is implictly set to team's org
|
||||
assert response.data['results'][0]['summary_fields']['organization']['id'] == team.organization.id
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_team_credential_via_team_credentials_list(post, get, team, org_admin, team_member):
|
||||
response = post(reverse('api:team_credentials_list', args=(team.pk,)), {
|
||||
|
||||
@ -54,21 +54,40 @@ def test_credential_migration_team_member(credential, team, user, permissions):
|
||||
|
||||
rbac.migrate_credential(apps, None)
|
||||
|
||||
# Admin permissions post migration
|
||||
# User permissions post migration
|
||||
assert u in credential.use_role
|
||||
assert u not in credential.admin_role
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_credential_migration_team_admin(credential, team, user, permissions):
|
||||
u = user('user', False)
|
||||
team.member_role.members.add(u)
|
||||
team.admin_role.members.add(u)
|
||||
credential.deprecated_team = team
|
||||
credential.save()
|
||||
|
||||
assert u not in credential.use_role
|
||||
|
||||
# Usage permissions post migration
|
||||
# Admin permissions post migration
|
||||
rbac.migrate_credential(apps, None)
|
||||
assert u in credential.use_role
|
||||
assert u in credential.admin_role
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_credential_migration_org_auditor(credential, team, org_auditor):
|
||||
# Team's organization is the org_auditor's org
|
||||
credential.deprecated_team = team
|
||||
credential.save()
|
||||
|
||||
# No permissions pre-migration (this happens automatically so we patch this)
|
||||
team.admin_role.children.remove(credential.admin_role)
|
||||
team.member_role.children.remove(credential.use_role)
|
||||
assert org_auditor not in credential.read_role
|
||||
|
||||
rbac.migrate_credential(apps, None)
|
||||
rbac.infer_credential_org_from_team(apps, None)
|
||||
|
||||
# Read permissions post migration
|
||||
assert org_auditor not in credential.use_role
|
||||
assert org_auditor in credential.read_role
|
||||
|
||||
def test_credential_access_superuser():
|
||||
u = User(username='admin', is_superuser=True)
|
||||
|
||||
@ -192,8 +192,12 @@ class UsersTest(BaseTest):
|
||||
self.post(url, expect=403, data=new_user, auth=self.get_other_credentials())
|
||||
self.post(url, expect=201, data=new_user, auth=self.get_super_credentials())
|
||||
self.post(url, expect=400, data=new_user, auth=self.get_super_credentials())
|
||||
self.post(url, expect=201, data=new_user2, auth=self.get_normal_credentials())
|
||||
self.post(url, expect=400, data=new_user2, auth=self.get_normal_credentials())
|
||||
# org admin cannot create orphaned users
|
||||
self.post(url, expect=403, data=new_user2, auth=self.get_normal_credentials())
|
||||
# org admin can create org users
|
||||
org_url = reverse('api:organization_users_list', args=(self.organizations[0].pk,))
|
||||
self.post(org_url, expect=201, data=new_user2, auth=self.get_normal_credentials())
|
||||
self.post(org_url, expect=400, data=new_user2, auth=self.get_normal_credentials())
|
||||
# Normal user cannot add users after his org is marked inactive.
|
||||
self.organizations[0].delete()
|
||||
new_user3 = dict(username='blippy3')
|
||||
@ -325,9 +329,9 @@ class UsersTest(BaseTest):
|
||||
detail_url = reverse('api:user_detail', args=(self.other_django_user.pk,))
|
||||
data = self.get(detail_url, expect=200, auth=self.get_other_credentials())
|
||||
|
||||
# can't change first_name, last_name, etc
|
||||
# can change first_name, last_name, etc
|
||||
data['last_name'] = "NewLastName"
|
||||
self.put(detail_url, data, expect=403, auth=self.get_other_credentials())
|
||||
self.put(detail_url, data, expect=200, auth=self.get_other_credentials())
|
||||
|
||||
# can't change username
|
||||
data['username'] = 'newUsername'
|
||||
@ -367,23 +371,20 @@ class UsersTest(BaseTest):
|
||||
url = reverse('api:user_list')
|
||||
data = dict(username='username', password='password')
|
||||
data2 = dict(username='username2', password='password2')
|
||||
data = self.post(url, expect=201, data=data, auth=self.get_normal_credentials())
|
||||
|
||||
# but a regular user cannot create users
|
||||
self.post(url, expect=403, data=data2, auth=self.get_other_credentials())
|
||||
# org admins cannot create orphaned users
|
||||
self.post(url, expect=403, data=data2, auth=self.get_normal_credentials())
|
||||
|
||||
# a super user can create new users
|
||||
self.post(url, expect=201, data=data, auth=self.get_super_credentials())
|
||||
# verify that the login works...
|
||||
self.get(url, expect=200, auth=('username', 'password'))
|
||||
|
||||
# but a regular user cannot
|
||||
data = self.post(url, expect=403, data=data2, auth=self.get_other_credentials())
|
||||
|
||||
# a super user can also create new users
|
||||
data = self.post(url, expect=201, data=data2, auth=self.get_super_credentials())
|
||||
|
||||
# verify that the login works
|
||||
self.get(url, expect=200, auth=('username2', 'password2'))
|
||||
|
||||
# verify that if you post a user with a pk, you do not alter that user's password info
|
||||
mod = dict(id=self.super_django_user.pk, username='change', password='change')
|
||||
data = self.post(url, expect=201, data=mod, auth=self.get_super_credentials())
|
||||
self.post(url, expect=201, data=mod, auth=self.get_super_credentials())
|
||||
orig = User.objects.get(pk=self.super_django_user.pk)
|
||||
self.assertTrue(orig.username != 'change')
|
||||
|
||||
|
||||
@ -19,7 +19,6 @@ from awx.main.models import (
|
||||
Role,
|
||||
)
|
||||
|
||||
@pytest.mark.skip(reason="Seeing pk error, suspect weirdness in mocking requests")
|
||||
@pytest.mark.parametrize("pk, err", [
|
||||
(111, "not change the membership"),
|
||||
(1, "may not perform"),
|
||||
@ -38,18 +37,17 @@ def test_user_roles_list_user_admin_role(pk, err):
|
||||
factory = APIRequestFactory()
|
||||
view = UserRolesList.as_view()
|
||||
|
||||
user = User(username="root", is_superuser=True)
|
||||
user = User(username="root", is_superuser=True, pk=1, id=1)
|
||||
|
||||
request = factory.post("/user/1/roles", {'id':pk}, format="json")
|
||||
force_authenticate(request, user)
|
||||
|
||||
response = view(request)
|
||||
response = view(request, pk=user.pk)
|
||||
response.render()
|
||||
|
||||
assert response.status_code == 403
|
||||
assert err in response.content
|
||||
|
||||
@pytest.mark.skip(reason="db access or mocking needed for new tests in role assignment code")
|
||||
@pytest.mark.parametrize("admin_role, err", [
|
||||
(True, "may not perform"),
|
||||
(False, "not change the membership"),
|
||||
@ -70,10 +68,13 @@ def test_role_users_list_other_user_admin_role(admin_role, err):
|
||||
view = RoleUsersList.as_view()
|
||||
|
||||
user = User(username="root", is_superuser=True, pk=1, id=1)
|
||||
queried_user = User(username="maynard")
|
||||
|
||||
request = factory.post("/role/1/users", {'id':1}, format="json")
|
||||
force_authenticate(request, user)
|
||||
|
||||
response = view(request)
|
||||
with mock.patch('awx.api.views.get_object_or_400', return_value=queried_user):
|
||||
response = view(request)
|
||||
response.render()
|
||||
|
||||
assert response.status_code == 403
|
||||
|
||||
@ -1,144 +1,462 @@
|
||||
#!/usr/bin/python
|
||||
# vim: set fileencoding=utf-8 :
|
||||
#
|
||||
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>
|
||||
#
|
||||
# This script is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with it. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
# This is loosely based on the foreman inventory script
|
||||
# -- Josh Preston <jpreston@redhat.com>
|
||||
#
|
||||
|
||||
'''
|
||||
CloudForms external inventory script
|
||||
==================================================
|
||||
Generates inventory that Ansible can understand by making API request to CloudForms.
|
||||
Modeled after https://raw.githubusercontent.com/ansible/ansible/stable-1.9/plugins/inventory/ec2.py
|
||||
jlabocki <at> redhat.com or @jameslabocki on twitter
|
||||
'''
|
||||
|
||||
import os
|
||||
from __future__ import print_function
|
||||
import argparse
|
||||
import ConfigParser
|
||||
import os
|
||||
import re
|
||||
from time import time
|
||||
import requests
|
||||
import json
|
||||
from requests.auth import HTTPBasicAuth
|
||||
import warnings
|
||||
|
||||
# This disables warnings and is not a good idea, but hey, this is a demo
|
||||
# http://urllib3.readthedocs.org/en/latest/security.html#disabling-warnings
|
||||
requests.packages.urllib3.disable_warnings()
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
|
||||
class CloudFormsInventory(object):
|
||||
|
||||
def _empty_inventory(self):
|
||||
return {"_meta": {"hostvars": {}}}
|
||||
|
||||
def __init__(self):
|
||||
''' Main execution path '''
|
||||
"""
|
||||
Main execution path
|
||||
"""
|
||||
self.inventory = dict() # A list of groups and the hosts in that group
|
||||
self.hosts = dict() # Details about hosts in the inventory
|
||||
|
||||
# Inventory grouped by instance IDs, tags, security groups, regions,
|
||||
# and availability zones
|
||||
self.inventory = self._empty_inventory()
|
||||
|
||||
# Index of hostname (address) to instance ID
|
||||
self.index = {}
|
||||
|
||||
# Read CLI arguments
|
||||
self.read_settings()
|
||||
# Parse CLI arguments
|
||||
self.parse_cli_args()
|
||||
|
||||
# Get Hosts
|
||||
if self.args.list:
|
||||
self.get_hosts()
|
||||
# Read settings
|
||||
self.read_settings()
|
||||
|
||||
# This doesn't exist yet and needs to be added
|
||||
# Cache
|
||||
if self.args.refresh_cache or not self.is_cache_valid():
|
||||
self.update_cache()
|
||||
else:
|
||||
self.load_inventory_from_cache()
|
||||
self.load_hosts_from_cache()
|
||||
|
||||
data_to_print = ""
|
||||
|
||||
# Data to print
|
||||
if self.args.host:
|
||||
data2 = {}
|
||||
print json.dumps(data2, indent=2)
|
||||
if self.args.debug:
|
||||
print("Fetching host [%s]" % self.args.host)
|
||||
data_to_print += self.get_host_info(self.args.host)
|
||||
else:
|
||||
self.inventory['_meta'] = {'hostvars': {}}
|
||||
for hostname in self.hosts:
|
||||
self.inventory['_meta']['hostvars'][hostname] = {
|
||||
'cloudforms': self.hosts[hostname],
|
||||
}
|
||||
# include the ansible_ssh_host in the top level
|
||||
if 'ansible_ssh_host' in self.hosts[hostname]:
|
||||
self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.hosts[hostname]['ansible_ssh_host']
|
||||
|
||||
def parse_cli_args(self):
|
||||
''' Command line argument processing '''
|
||||
data_to_print += self.json_format_dict(self.inventory, self.args.pretty)
|
||||
|
||||
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms')
|
||||
parser.add_argument('--list', action='store_true', default=False,
|
||||
help='List instances (default: False)')
|
||||
parser.add_argument('--host', action='store',
|
||||
help='Get all the variables about a specific instance')
|
||||
self.args = parser.parse_args()
|
||||
print(data_to_print)
|
||||
|
||||
def is_cache_valid(self):
|
||||
"""
|
||||
Determines if the cache files have expired, or if it is still valid
|
||||
"""
|
||||
if self.args.debug:
|
||||
print("Determining if cache [%s] is still valid (< %s seconds old)" % (self.cache_path_hosts, self.cache_max_age))
|
||||
|
||||
if os.path.isfile(self.cache_path_hosts):
|
||||
mod_time = os.path.getmtime(self.cache_path_hosts)
|
||||
current_time = time()
|
||||
if (mod_time + self.cache_max_age) > current_time:
|
||||
if os.path.isfile(self.cache_path_inventory):
|
||||
if self.args.debug:
|
||||
print("Cache is still valid!")
|
||||
return True
|
||||
|
||||
if self.args.debug:
|
||||
print("Cache is stale or does not exist.")
|
||||
|
||||
return False
|
||||
|
||||
def read_settings(self):
|
||||
''' Reads the settings from the cloudforms.ini file '''
|
||||
|
||||
"""
|
||||
Reads the settings from the cloudforms.ini file
|
||||
"""
|
||||
config = ConfigParser.SafeConfigParser()
|
||||
config_paths = [
|
||||
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cloudforms.ini'),
|
||||
"/opt/rh/cloudforms.ini",
|
||||
os.path.dirname(os.path.realpath(__file__)) + '/cloudforms.ini',
|
||||
"/etc/ansible/cloudforms.ini",
|
||||
]
|
||||
|
||||
env_value = os.environ.get('CLOUDFORMS_INI_PATH')
|
||||
if env_value is not None:
|
||||
config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
|
||||
|
||||
if self.args.debug:
|
||||
for config_path in config_paths:
|
||||
print("Reading from configuration file [%s]" % config_path)
|
||||
|
||||
config.read(config_paths)
|
||||
|
||||
# Version
|
||||
if config.has_option('cloudforms', 'version'):
|
||||
self.cloudforms_version = config.get('cloudforms', 'version')
|
||||
# CloudForms API related
|
||||
if config.has_option('cloudforms', 'url'):
|
||||
self.cloudforms_url = config.get('cloudforms', 'url')
|
||||
else:
|
||||
self.cloudforms_version = "none"
|
||||
self.cloudforms_url = None
|
||||
|
||||
# CloudForms Endpoint
|
||||
if config.has_option('cloudforms', 'hostname'):
|
||||
self.cloudforms_hostname = config.get('cloudforms', 'hostname')
|
||||
else:
|
||||
self.cloudforms_hostname = None
|
||||
if not self.cloudforms_url:
|
||||
warnings.warn("No url specified, expected something like 'https://cfme.example.com'")
|
||||
|
||||
# CloudForms Username
|
||||
if config.has_option('cloudforms', 'username'):
|
||||
self.cloudforms_username = config.get('cloudforms', 'username')
|
||||
else:
|
||||
self.cloudforms_username = "none"
|
||||
self.cloudforms_username = None
|
||||
|
||||
if not self.cloudforms_username:
|
||||
warnings.warn("No username specified, you need to specify a CloudForms username.")
|
||||
|
||||
# CloudForms Password
|
||||
if config.has_option('cloudforms', 'password'):
|
||||
self.cloudforms_password = config.get('cloudforms', 'password')
|
||||
self.cloudforms_pw = config.get('cloudforms', 'password')
|
||||
else:
|
||||
self.cloudforms_password = "none"
|
||||
self.cloudforms_pw = None
|
||||
|
||||
def get_hosts(self):
|
||||
''' Gets host from CloudForms '''
|
||||
r = requests.get("https://{0}/api/vms?expand=resources&attributes=all".format(self.cloudforms_hostname),
|
||||
auth=(self.cloudforms_username, self.cloudforms_password), verify=False)
|
||||
obj = r.json()
|
||||
if not self.cloudforms_pw:
|
||||
warnings.warn("No password specified, you need to specify a password for the CloudForms user.")
|
||||
|
||||
# Create groups+hosts based on host data
|
||||
for resource in obj.get('resources', []):
|
||||
if config.has_option('cloudforms', 'ssl_verify'):
|
||||
self.cloudforms_ssl_verify = config.getboolean('cloudforms', 'ssl_verify')
|
||||
else:
|
||||
self.cloudforms_ssl_verify = True
|
||||
|
||||
# Maintain backwards compat by creating `Dynamic_CloudForms` group
|
||||
if 'Dynamic_CloudForms' not in self.inventory:
|
||||
self.inventory['Dynamic_CloudForms'] = []
|
||||
self.inventory['Dynamic_CloudForms'].append(resource['name'])
|
||||
if config.has_option('cloudforms', 'version'):
|
||||
self.cloudforms_version = config.get('cloudforms', 'version')
|
||||
else:
|
||||
self.cloudforms_version = None
|
||||
|
||||
# Add host to desired groups
|
||||
for key in ('vendor', 'type', 'location'):
|
||||
if key in resource:
|
||||
# Create top-level group
|
||||
if key not in self.inventory:
|
||||
self.inventory[key] = dict(children=[], vars={}, hosts=[])
|
||||
# if resource['name'] not in self.inventory[key]['hosts']:
|
||||
# self.inventory[key]['hosts'].append(resource['name'])
|
||||
if config.has_option('cloudforms', 'limit'):
|
||||
self.cloudforms_limit = config.getint('cloudforms', 'limit')
|
||||
else:
|
||||
self.cloudforms_limit = 100
|
||||
|
||||
# Create sub-group
|
||||
if resource[key] not in self.inventory:
|
||||
self.inventory[resource[key]] = dict(children=[], vars={}, hosts=[])
|
||||
# self.inventory[resource[key]]['hosts'].append(resource['name'])
|
||||
if config.has_option('cloudforms', 'purge_actions'):
|
||||
self.cloudforms_purge_actions = config.getboolean('cloudforms', 'purge_actions')
|
||||
else:
|
||||
self.cloudforms_purge_actions = True
|
||||
|
||||
# Add sub-group, as a child of top-level
|
||||
if resource[key] not in self.inventory[key]['children']:
|
||||
self.inventory[key]['children'].append(resource[key])
|
||||
if config.has_option('cloudforms', 'clean_group_keys'):
|
||||
self.cloudforms_clean_group_keys = config.getboolean('cloudforms', 'clean_group_keys')
|
||||
else:
|
||||
self.cloudforms_clean_group_keys = True
|
||||
|
||||
if config.has_option('cloudforms', 'nest_tags'):
|
||||
self.cloudforms_nest_tags = config.getboolean('cloudforms', 'nest_tags')
|
||||
else:
|
||||
self.cloudforms_nest_tags = False
|
||||
|
||||
# Ansible related
|
||||
try:
|
||||
group_patterns = config.get('ansible', 'group_patterns')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
group_patterns = "[]"
|
||||
|
||||
self.group_patterns = eval(group_patterns)
|
||||
|
||||
# Cache related
|
||||
try:
|
||||
cache_path = os.path.expanduser(config.get('cache', 'path'))
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
cache_path = '.'
|
||||
(script, ext) = os.path.splitext(os.path.basename(__file__))
|
||||
self.cache_path_hosts = cache_path + "/%s.hosts" % script
|
||||
self.cache_path_inventory = cache_path + "/%s.inventory" % script
|
||||
self.cache_max_age = config.getint('cache', 'max_age')
|
||||
|
||||
if self.args.debug:
|
||||
print("CloudForms settings:")
|
||||
print("cloudforms_url = %s" % self.cloudforms_url)
|
||||
print("cloudforms_username = %s" % self.cloudforms_username)
|
||||
print("cloudforms_pw = %s" % self.cloudforms_pw)
|
||||
print("cloudforms_ssl_verify = %s" % self.cloudforms_ssl_verify)
|
||||
print("cloudforms_version = %s" % self.cloudforms_version)
|
||||
print("cloudforms_limit = %s" % self.cloudforms_limit)
|
||||
print("cloudforms_purge_actions = %s" % self.cloudforms_purge_actions)
|
||||
print("Cache settings:")
|
||||
print("cache_max_age = %s" % self.cache_max_age)
|
||||
print("cache_path_hosts = %s" % self.cache_path_hosts)
|
||||
print("cache_path_inventory = %s" % self.cache_path_inventory)
|
||||
|
||||
def parse_cli_args(self):
|
||||
"""
|
||||
Command line argument processing
|
||||
"""
|
||||
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms managed VMs')
|
||||
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
|
||||
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
|
||||
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output (default: False)')
|
||||
parser.add_argument('--refresh-cache', action='store_true', default=False,
|
||||
help='Force refresh of cache by making API requests to CloudForms (default: False - use cache files)')
|
||||
parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)')
|
||||
self.args = parser.parse_args()
|
||||
|
||||
def _get_json(self, url):
|
||||
"""
|
||||
Make a request and return the JSON
|
||||
"""
|
||||
results = []
|
||||
|
||||
ret = requests.get(url,
|
||||
auth=HTTPBasicAuth(self.cloudforms_username, self.cloudforms_pw),
|
||||
verify=self.cloudforms_ssl_verify)
|
||||
|
||||
ret.raise_for_status()
|
||||
|
||||
try:
|
||||
results = json.loads(ret.text)
|
||||
except ValueError:
|
||||
warnings.warn("Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason))
|
||||
results = {}
|
||||
|
||||
if self.args.debug:
|
||||
print("=======================================================================")
|
||||
print("=======================================================================")
|
||||
print("=======================================================================")
|
||||
print(ret.text)
|
||||
print("=======================================================================")
|
||||
print("=======================================================================")
|
||||
print("=======================================================================")
|
||||
|
||||
return results
|
||||
|
||||
def _get_hosts(self):
|
||||
"""
|
||||
Get all hosts by paging through the results
|
||||
"""
|
||||
limit = self.cloudforms_limit
|
||||
|
||||
page = 0
|
||||
last_page = False
|
||||
|
||||
results = []
|
||||
|
||||
while not last_page:
|
||||
offset = page * limit
|
||||
ret = self._get_json("%s/api/vms?offset=%s&limit=%s&expand=resources,tags,hosts,&attributes=ipaddresses" % (self.cloudforms_url, offset, limit))
|
||||
results += ret['resources']
|
||||
if ret['subcount'] < limit:
|
||||
last_page = True
|
||||
page += 1
|
||||
|
||||
return results
|
||||
|
||||
def update_cache(self):
|
||||
"""
|
||||
Make calls to cloudforms and save the output in a cache
|
||||
"""
|
||||
self.groups = dict()
|
||||
self.hosts = dict()
|
||||
|
||||
if self.args.debug:
|
||||
print("Updating cache...")
|
||||
|
||||
for host in self._get_hosts():
|
||||
# Ignore VMs that are not powered on
|
||||
if host['power_state'] != 'on':
|
||||
if self.args.debug:
|
||||
print("Skipping %s because power_state = %s" % (host['name'], host['power_state']))
|
||||
continue
|
||||
|
||||
# purge actions
|
||||
if self.cloudforms_purge_actions and 'actions' in host:
|
||||
del host['actions']
|
||||
|
||||
# Create ansible groups for tags
|
||||
if 'tags' in host:
|
||||
|
||||
# Create top-level group
|
||||
if 'tags' not in self.inventory:
|
||||
self.inventory['tags'] = dict(children=[], vars={}, hosts=[])
|
||||
|
||||
if not self.cloudforms_nest_tags:
|
||||
# don't expand tags, just use them in a safe way
|
||||
for group in host['tags']:
|
||||
# Add sub-group, as a child of top-level
|
||||
safe_key = self.to_safe(group['name'])
|
||||
if safe_key:
|
||||
if self.args.debug:
|
||||
print("Adding sub-group '%s' to parent 'tags'" % safe_key)
|
||||
|
||||
if safe_key not in self.inventory['tags']['children']:
|
||||
self.push(self.inventory['tags'], 'children', safe_key)
|
||||
|
||||
self.push(self.inventory, safe_key, host['name'])
|
||||
|
||||
if self.args.debug:
|
||||
print("Found tag [%s] for host which will be mapped to [%s]" % (group['name'], safe_key))
|
||||
else:
|
||||
# expand the tags into nested groups / sub-groups
|
||||
# Create nested groups for tags
|
||||
safe_parent_tag_name = 'tags'
|
||||
for tag in host['tags']:
|
||||
tag_hierarchy = tag['name'][1:].split('/')
|
||||
|
||||
if self.args.debug:
|
||||
print("Working on list %s" % tag_hierarchy)
|
||||
|
||||
for tag_name in tag_hierarchy:
|
||||
if self.args.debug:
|
||||
print("Working on tag_name = %s" % tag_name)
|
||||
|
||||
safe_tag_name = self.to_safe(tag_name)
|
||||
if self.args.debug:
|
||||
print("Using sanitized name %s" % safe_tag_name)
|
||||
|
||||
# Create sub-group
|
||||
if safe_tag_name not in self.inventory:
|
||||
self.inventory[safe_tag_name] = dict(children=[], vars={}, hosts=[])
|
||||
|
||||
# Add sub-group, as a child of top-level
|
||||
if safe_parent_tag_name:
|
||||
if self.args.debug:
|
||||
print("Adding sub-group '%s' to parent '%s'" % (safe_tag_name, safe_parent_tag_name))
|
||||
|
||||
if safe_tag_name not in self.inventory[safe_parent_tag_name]['children']:
|
||||
self.push(self.inventory[safe_parent_tag_name], 'children', safe_tag_name)
|
||||
|
||||
# Make sure the next one uses this one as it's parent
|
||||
safe_parent_tag_name = safe_tag_name
|
||||
|
||||
# Add the host to the last tag
|
||||
self.push(self.inventory[safe_parent_tag_name], 'hosts', host['name'])
|
||||
|
||||
# Set ansible_ssh_host to the first available ip address
|
||||
if 'ipaddresses' in host and host['ipaddresses'] and isinstance(host['ipaddresses'], list):
|
||||
host['ansible_ssh_host'] = host['ipaddresses'][0]
|
||||
|
||||
# Create additional groups
|
||||
for key in ('location', 'type', 'vendor'):
|
||||
safe_key = self.to_safe(host[key])
|
||||
|
||||
# Create top-level group
|
||||
if key not in self.inventory:
|
||||
self.inventory[key] = dict(children=[], vars={}, hosts=[])
|
||||
|
||||
# Create sub-group
|
||||
if safe_key not in self.inventory:
|
||||
self.inventory[safe_key] = dict(children=[], vars={}, hosts=[])
|
||||
|
||||
# Add sub-group, as a child of top-level
|
||||
if safe_key not in self.inventory[key]['children']:
|
||||
self.push(self.inventory[key], 'children', safe_key)
|
||||
|
||||
if key in host:
|
||||
# Add host to sub-group
|
||||
if resource['name'] not in self.inventory[resource[key]]:
|
||||
self.inventory[resource[key]]['hosts'].append(resource['name'])
|
||||
self.push(self.inventory[safe_key], 'hosts', host['name'])
|
||||
|
||||
# Delete 'actions' key
|
||||
del resource['actions']
|
||||
self.hosts[host['name']] = host
|
||||
self.push(self.inventory, 'all', host['name'])
|
||||
|
||||
# Add _meta hostvars
|
||||
self.inventory['_meta']['hostvars'][resource['name']] = resource
|
||||
if self.args.debug:
|
||||
print("Saving cached data")
|
||||
|
||||
print json.dumps(self.inventory, indent=2)
|
||||
self.write_to_cache(self.hosts, self.cache_path_hosts)
|
||||
self.write_to_cache(self.inventory, self.cache_path_inventory)
|
||||
|
||||
def get_host_info(self, host):
|
||||
"""
|
||||
Get variables about a specific host
|
||||
"""
|
||||
if not self.hosts or len(self.hosts) == 0:
|
||||
# Need to load cache from cache
|
||||
self.load_hosts_from_cache()
|
||||
|
||||
if host not in self.hosts:
|
||||
if self.args.debug:
|
||||
print("[%s] not found in cache." % host)
|
||||
|
||||
# try updating the cache
|
||||
self.update_cache()
|
||||
|
||||
if host not in self.hosts:
|
||||
if self.args.debug:
|
||||
print("[%s] does not exist after cache update." % host)
|
||||
# host might not exist anymore
|
||||
return self.json_format_dict({}, self.args.pretty)
|
||||
|
||||
return self.json_format_dict(self.hosts[host], self.args.pretty)
|
||||
|
||||
def push(self, d, k, v):
|
||||
"""
|
||||
Safely puts a new entry onto an array.
|
||||
"""
|
||||
if k in d:
|
||||
d[k].append(v)
|
||||
else:
|
||||
d[k] = [v]
|
||||
|
||||
def load_inventory_from_cache(self):
|
||||
"""
|
||||
Reads the inventory from the cache file sets self.inventory
|
||||
"""
|
||||
cache = open(self.cache_path_inventory, 'r')
|
||||
json_inventory = cache.read()
|
||||
self.inventory = json.loads(json_inventory)
|
||||
|
||||
def load_hosts_from_cache(self):
|
||||
"""
|
||||
Reads the cache from the cache file sets self.hosts
|
||||
"""
|
||||
cache = open(self.cache_path_hosts, 'r')
|
||||
json_cache = cache.read()
|
||||
self.hosts = json.loads(json_cache)
|
||||
|
||||
def write_to_cache(self, data, filename):
|
||||
"""
|
||||
Writes data in JSON format to a file
|
||||
"""
|
||||
json_data = self.json_format_dict(data, True)
|
||||
cache = open(filename, 'w')
|
||||
cache.write(json_data)
|
||||
cache.close()
|
||||
|
||||
def to_safe(self, word):
|
||||
"""
|
||||
Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
|
||||
"""
|
||||
if self.cloudforms_clean_group_keys:
|
||||
regex = "[^A-Za-z0-9\_]"
|
||||
return re.sub(regex, "_", word.replace(" ", ""))
|
||||
else:
|
||||
return word
|
||||
|
||||
def json_format_dict(self, data, pretty=False):
|
||||
"""
|
||||
Converts a dict to a JSON object and dumps it as a formatted string
|
||||
"""
|
||||
if pretty:
|
||||
return json.dumps(data, sort_keys=True, indent=2)
|
||||
else:
|
||||
return json.dumps(data)
|
||||
|
||||
# Run the script
|
||||
CloudFormsInventory()
|
||||
|
||||
|
||||
@ -1,8 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/env python
|
||||
# vim: set fileencoding=utf-8 :
|
||||
#
|
||||
# NOTE FOR TOWER: change foreman_ to sattelite_ for the group prefix
|
||||
#
|
||||
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>
|
||||
#
|
||||
# This script is free software: you can redistribute it and/or modify
|
||||
@ -41,6 +39,7 @@ class ForemanInventory(object):
|
||||
self.inventory = dict() # A list of groups and the hosts in that group
|
||||
self.cache = dict() # Details about hosts in the inventory
|
||||
self.params = dict() # Params of each host
|
||||
self.facts = dict() # Facts of each host
|
||||
self.hostgroups = dict() # host groups
|
||||
|
||||
# Read settings and parse CLI arguments
|
||||
@ -55,6 +54,7 @@ class ForemanInventory(object):
|
||||
else:
|
||||
self.load_inventory_from_cache()
|
||||
self.load_params_from_cache()
|
||||
self.load_facts_from_cache()
|
||||
self.load_cache_from_cache()
|
||||
|
||||
data_to_print = ""
|
||||
@ -69,6 +69,9 @@ class ForemanInventory(object):
|
||||
'foreman': self.cache[hostname],
|
||||
'foreman_params': self.params[hostname],
|
||||
}
|
||||
if self.want_facts:
|
||||
self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname]
|
||||
|
||||
data_to_print += self.json_format_dict(self.inventory, True)
|
||||
|
||||
print(data_to_print)
|
||||
@ -81,7 +84,8 @@ class ForemanInventory(object):
|
||||
current_time = time()
|
||||
if (mod_time + self.cache_max_age) > current_time:
|
||||
if (os.path.isfile(self.cache_path_inventory) and
|
||||
os.path.isfile(self.cache_path_params)):
|
||||
os.path.isfile(self.cache_path_params) and
|
||||
os.path.isfile(self.cache_path_facts)):
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -114,6 +118,16 @@ class ForemanInventory(object):
|
||||
|
||||
self.group_patterns = eval(group_patterns)
|
||||
|
||||
try:
|
||||
self.group_prefix = config.get('ansible', 'group_prefix')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.group_prefix = "foreman_"
|
||||
|
||||
try:
|
||||
self.want_facts = config.getboolean('ansible', 'want_facts')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_facts = True
|
||||
|
||||
# Cache related
|
||||
try:
|
||||
cache_path = os.path.expanduser(config.get('cache', 'path'))
|
||||
@ -123,6 +137,7 @@ class ForemanInventory(object):
|
||||
self.cache_path_cache = cache_path + "/%s.cache" % script
|
||||
self.cache_path_inventory = cache_path + "/%s.index" % script
|
||||
self.cache_path_params = cache_path + "/%s.params" % script
|
||||
self.cache_path_facts = cache_path + "/%s.facts" % script
|
||||
self.cache_max_age = config.getint('cache', 'max_age')
|
||||
|
||||
def parse_cli_args(self):
|
||||
@ -135,7 +150,7 @@ class ForemanInventory(object):
|
||||
help='Force refresh of cache by making API requests to foreman (default: False - use cache files)')
|
||||
self.args = parser.parse_args()
|
||||
|
||||
def _get_json(self, url):
|
||||
def _get_json(self, url, ignore_errors=None):
|
||||
page = 1
|
||||
results = []
|
||||
while True:
|
||||
@ -143,10 +158,14 @@ class ForemanInventory(object):
|
||||
auth=HTTPBasicAuth(self.foreman_user, self.foreman_pw),
|
||||
verify=self.foreman_ssl_verify,
|
||||
params={'page': page, 'per_page': 250})
|
||||
if ignore_errors and ret.status_code in ignore_errors:
|
||||
break
|
||||
ret.raise_for_status()
|
||||
json = ret.json()
|
||||
if not json.has_key('results'):
|
||||
return json
|
||||
if type(json['results']) == type({}):
|
||||
return json['results']
|
||||
results = results + json['results']
|
||||
if len(results) >= json['total']:
|
||||
break
|
||||
@ -162,38 +181,44 @@ class ForemanInventory(object):
|
||||
self.hostgroups[hid] = self._get_json(url)
|
||||
return self.hostgroups[hid]
|
||||
|
||||
def _get_params_by_id(self, hid):
|
||||
url = "%s/api/v2/hosts/%s/parameters" % (self.foreman_url, hid)
|
||||
def _get_all_params_by_id(self, hid):
|
||||
url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
|
||||
ret = self._get_json(url, [404])
|
||||
if ret == []: ret = {}
|
||||
return ret.get('all_parameters', {})
|
||||
|
||||
def _get_facts_by_id(self, hid):
|
||||
url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
|
||||
return self._get_json(url)
|
||||
|
||||
def _resolve_params(self, host):
|
||||
"""
|
||||
Resolve all host group params of the host using the top level
|
||||
hostgroup and the ancestry.
|
||||
Fetch host params and convert to dict
|
||||
"""
|
||||
hostgroup_id = host['hostgroup_id']
|
||||
paramgroups = []
|
||||
params = {}
|
||||
|
||||
if hostgroup_id:
|
||||
hostgroup = self._get_hostgroup_by_id(hostgroup_id)
|
||||
ancestry_path = hostgroup.get('ancestry', '')
|
||||
ancestry = ancestry_path.split('/') if ancestry_path is not None else []
|
||||
|
||||
# Append top level hostgroup last to overwrite lower levels
|
||||
# values
|
||||
ancestry.append(hostgroup_id)
|
||||
paramgroups = [self._get_hostgroup_by_id(hostgroup_id)['parameters']
|
||||
for hostgroup_id in ancestry]
|
||||
|
||||
paramgroups += [self._get_params_by_id(host['id'])]
|
||||
for paramgroup in paramgroups:
|
||||
for param in paramgroup:
|
||||
name = param['name']
|
||||
params[name] = param['value']
|
||||
for param in self._get_all_params_by_id(host['id']):
|
||||
name = param['name']
|
||||
params[name] = param['value']
|
||||
|
||||
return params
|
||||
|
||||
def _get_facts(self, host):
|
||||
"""
|
||||
Fetch all host facts of the host
|
||||
"""
|
||||
if not self.want_facts:
|
||||
return {}
|
||||
|
||||
ret = self._get_facts_by_id(host['id'])
|
||||
if len(ret.values()) == 0:
|
||||
facts = {}
|
||||
elif len(ret.values()) == 1:
|
||||
facts = ret.values()[0]
|
||||
else:
|
||||
raise ValueError("More than one set of facts returned for '%s'" % host)
|
||||
return facts
|
||||
|
||||
def update_cache(self):
|
||||
"""Make calls to foreman and save the output in a cache"""
|
||||
|
||||
@ -203,11 +228,17 @@ class ForemanInventory(object):
|
||||
for host in self._get_hosts():
|
||||
dns_name = host['name']
|
||||
|
||||
# Create ansible groups for hostgroup, location and organization
|
||||
for group in ['hostgroup', 'location', 'organization']:
|
||||
# Create ansible groups for hostgroup, environment, location and organization
|
||||
for group in ['hostgroup', 'environment', 'location', 'organization']:
|
||||
val = host.get('%s_name' % group)
|
||||
if val:
|
||||
safe_key = self.to_safe('satellite_%s_%s' % (group, val.lower()))
|
||||
safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower()))
|
||||
self.push(self.inventory, safe_key, dns_name)
|
||||
|
||||
for group in ['lifecycle_environment', 'content_view']:
|
||||
val = host.get('content_facet_attributes', {}).get('%s_name' % group)
|
||||
if val:
|
||||
safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower()))
|
||||
self.push(self.inventory, safe_key, dns_name)
|
||||
|
||||
params = self._resolve_params(host)
|
||||
@ -231,11 +262,13 @@ class ForemanInventory(object):
|
||||
|
||||
self.cache[dns_name] = host
|
||||
self.params[dns_name] = params
|
||||
self.facts[dns_name] = self._get_facts(host)
|
||||
self.push(self.inventory, 'all', dns_name)
|
||||
|
||||
self.write_to_cache(self.cache, self.cache_path_cache)
|
||||
self.write_to_cache(self.inventory, self.cache_path_inventory)
|
||||
self.write_to_cache(self.params, self.cache_path_params)
|
||||
self.write_to_cache(self.facts, self.cache_path_facts)
|
||||
|
||||
def get_host_info(self):
|
||||
""" Get variables about a specific host """
|
||||
@ -274,6 +307,14 @@ class ForemanInventory(object):
|
||||
json_params = cache.read()
|
||||
self.params = json.loads(json_params)
|
||||
|
||||
def load_facts_from_cache(self):
|
||||
""" Reads the index from the cache file sets self.index """
|
||||
if not self.want_facts:
|
||||
return
|
||||
cache = open(self.cache_path_facts, 'r')
|
||||
json_facts = cache.read()
|
||||
self.facts = json.loads(json_facts)
|
||||
|
||||
def load_cache_from_cache(self):
|
||||
""" Reads the cache from the cache file sets self.cache """
|
||||
|
||||
@ -301,4 +342,7 @@ class ForemanInventory(object):
|
||||
else:
|
||||
return json.dumps(data)
|
||||
|
||||
ForemanInventory()
|
||||
if __name__ == '__main__':
|
||||
ForemanInventory()
|
||||
|
||||
|
||||
|
||||
@ -351,7 +351,7 @@ CELERYBEAT_SCHEDULE = {
|
||||
},
|
||||
}
|
||||
|
||||
# Use Redis as cache backend (except when testing).
|
||||
# Django Caching Configuration
|
||||
if is_testing():
|
||||
CACHES = {
|
||||
'default': {
|
||||
@ -361,8 +361,8 @@ if is_testing():
|
||||
else:
|
||||
CACHES = {
|
||||
'default': {
|
||||
'BACKEND': 'redis_cache.RedisCache',
|
||||
'LOCATION': BROKER_URL,
|
||||
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
|
||||
'LOCATION': 'memcached:11211',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@ -7,18 +7,31 @@
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
# Centos-7 doesn't include the svg mime type
|
||||
# /usr/lib64/python/mimetypes.py
|
||||
import mimetypes
|
||||
|
||||
# Django Split Settings
|
||||
from split_settings.tools import optional, include
|
||||
|
||||
# Load default settings.
|
||||
from defaults import * # NOQA
|
||||
|
||||
mimetypes.add_type("image/svg+xml", ".svg", True)
|
||||
mimetypes.add_type("image/svg+xml", ".svgz", True)
|
||||
|
||||
MONGO_HOST = '127.0.0.1'
|
||||
MONGO_PORT = 27017
|
||||
MONGO_USERNAME = None
|
||||
MONGO_PASSWORD = None
|
||||
MONGO_DB = 'system_tracking_dev'
|
||||
|
||||
# Override django.template.loaders.cached.Loader in defaults.py
|
||||
TEMPLATE_LOADERS = (
|
||||
'django.template.loaders.filesystem.Loader',
|
||||
'django.template.loaders.app_directories.Loader',
|
||||
)
|
||||
|
||||
# Disable capturing all SQL queries when running celeryd in development.
|
||||
if 'celeryd' in sys.argv:
|
||||
SQL_DEBUG = False
|
||||
@ -71,9 +84,9 @@ include(optional('/etc/tower/settings.py'), scope=locals())
|
||||
include(optional('/etc/tower/conf.d/*.py'), scope=locals())
|
||||
|
||||
ANSIBLE_USE_VENV = True
|
||||
ANSIBLE_VENV_PATH = "/tower_devel/venv/ansible"
|
||||
ANSIBLE_VENV_PATH = "/venv/ansible"
|
||||
TOWER_USE_VENV = True
|
||||
TOWER_VENV_PATH = "/tower_devel/venv/tower"
|
||||
TOWER_VENV_PATH = "/venv/tower"
|
||||
|
||||
# If any local_*.py files are present in awx/settings/, use them to override
|
||||
# default settings for development. If not present, we can still run using
|
||||
|
||||
@ -48,23 +48,8 @@ if is_testing(sys.argv):
|
||||
|
||||
MONGO_DB = 'system_tracking_test'
|
||||
|
||||
# Django Caching Configuration
|
||||
if is_testing():
|
||||
CACHES = {
|
||||
'default': {
|
||||
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
|
||||
},
|
||||
}
|
||||
else:
|
||||
CACHES = {
|
||||
'default': {
|
||||
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
|
||||
'LOCATION': 'memcached:11211',
|
||||
},
|
||||
}
|
||||
|
||||
# Celery AMQP configuration.
|
||||
BROKER_URL = 'qpid://qpid:5672'
|
||||
BROKER_URL = 'amqp://guest:guest@rabbitmq//'
|
||||
|
||||
# Mongo host configuration
|
||||
MONGO_HOST = NotImplemented
|
||||
|
||||
@ -1,2 +1,21 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import threading
|
||||
|
||||
# Monkeypatch xmlsec.initialize() to only run once (https://github.com/ansible/ansible-tower/issues/3241).
|
||||
xmlsec_init_lock = threading.Lock()
|
||||
xmlsec_initialized = False
|
||||
|
||||
import dm.xmlsec.binding
|
||||
original_xmlsec_initialize = dm.xmlsec.binding.initialize
|
||||
|
||||
def xmlsec_initialize(*args, **kwargs):
|
||||
global xmlsec_init_lock, xmlsec_initialized, original_xmlsec_initialize
|
||||
with xmlsec_init_lock:
|
||||
if not xmlsec_initialized:
|
||||
original_xmlsec_initialize(*args, **kwargs)
|
||||
xmlsec_initialized = True
|
||||
|
||||
dm.xmlsec.binding.initialize = xmlsec_initialize
|
||||
|
||||
@ -14,7 +14,6 @@ import 'jquery.resize';
|
||||
import 'codemirror';
|
||||
import 'js-yaml';
|
||||
import 'select2';
|
||||
import 'rrule';
|
||||
|
||||
// Configuration dependencies
|
||||
global.$AnsibleConfig = null;
|
||||
|
||||
@ -38,9 +38,6 @@
|
||||
<label class="Form-inputLabel">
|
||||
<span class="red-text">*</span>
|
||||
Start Date
|
||||
<span class="fmt-help">
|
||||
(mm/dd/yyyy)
|
||||
</span>
|
||||
</label>
|
||||
<div class="input-group Form-inputGroup SchedulerForm-inputGroup--date">
|
||||
<scheduler-date-picker date="schedulerStartDt">
|
||||
@ -487,9 +484,6 @@
|
||||
<label class="Form-inputLabel">
|
||||
<span class="red-text">*</span>
|
||||
End Date
|
||||
<span class="fmt-help">
|
||||
(mm/dd/yyyy)
|
||||
</span>
|
||||
</label>
|
||||
<div class="input-group Form-inputGroup SchedulerForm-inputGroup--date">
|
||||
<scheduler-date-picker date="$parent.schedulerEndDt">
|
||||
|
||||
@ -38,9 +38,6 @@
|
||||
<label class="Form-inputLabel">
|
||||
<span class="red-text">*</span>
|
||||
Start Date
|
||||
<span class="fmt-help">
|
||||
(mm/dd/yyyy)
|
||||
</span>
|
||||
</label>
|
||||
<div class="input-group Form-inputGroup SchedulerForm-inputGroup--date">
|
||||
<scheduler-date-picker date="schedulerStartDt">
|
||||
@ -469,9 +466,6 @@
|
||||
<label class="Form-inputLabel">
|
||||
<span class="red-text">*</span>
|
||||
End Date
|
||||
<span class="fmt-help">
|
||||
(mm/dd/yyyy)
|
||||
</span>
|
||||
</label>
|
||||
<div class="input-group Form-inputGroup SchedulerForm-inputGroup--date">
|
||||
<scheduler-date-picker date="$parent.schedulerEndDt">
|
||||
|
||||
@ -5,7 +5,10 @@ module.exports = {
|
||||
http: {
|
||||
bsFiles: {
|
||||
src: [
|
||||
'static/**/*'
|
||||
'static/**/*',
|
||||
'!static/tower.vendor.js',
|
||||
'!static/tower.vendor.map.js',
|
||||
'!static/tower.js.map'
|
||||
]
|
||||
},
|
||||
options: {
|
||||
|
||||
@ -74,7 +74,6 @@ module.exports = function(config) {
|
||||
test: /\.angular.js$/,
|
||||
loader: 'expose?angular'
|
||||
},
|
||||
|
||||
{
|
||||
test: /\.js$/,
|
||||
loader: 'babel-loader',
|
||||
@ -85,11 +84,12 @@ module.exports = function(config) {
|
||||
}
|
||||
}, {
|
||||
test: /\.js$/,
|
||||
loader: 'babel-istanbul',
|
||||
loader: 'babel-loader',
|
||||
include: [path.resolve() + '/client/src/'],
|
||||
exclude: '/(node_modules)/',
|
||||
query: {
|
||||
presets: ['es2015']
|
||||
presets: ['es2015'],
|
||||
plugins: ['istanbul']
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@ -58,11 +58,12 @@ module.exports = function(config) {
|
||||
}
|
||||
}, {
|
||||
test: /\.js$/,
|
||||
loader: 'babel-istanbul',
|
||||
loader: 'babel-loader',
|
||||
include: [path.resolve() + '/client/src/'],
|
||||
exclude: '/(node_modules)/',
|
||||
query: {
|
||||
presets: ['es2015']
|
||||
presets: ['es2015'],
|
||||
plugins: ['istanbul']
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
844
awx/ui/npm-shrinkwrap.json
generated
844
awx/ui/npm-shrinkwrap.json
generated
File diff suppressed because it is too large
Load Diff
@ -29,8 +29,8 @@
|
||||
"angular-mocks": "^1.5.8",
|
||||
"babel-core": "^6.11.4",
|
||||
"babel-istanbul": "^0.11.0",
|
||||
"babel-istanbul-loader": "^0.1.0",
|
||||
"babel-loader": "^6.2.4",
|
||||
"babel-plugin-istanbul": "^2.0.0",
|
||||
"babel-preset-es2015": "^6.9.0",
|
||||
"browser-sync": "^2.14.0",
|
||||
"expose-loader": "^0.7.1",
|
||||
@ -54,6 +54,7 @@
|
||||
"karma-chrome-launcher": "^1.0.1",
|
||||
"karma-coverage": "^1.1.1",
|
||||
"karma-firefox-launcher": "^1.0.0",
|
||||
"karma-html2js-preprocessor": "^1.0.0",
|
||||
"karma-jasmine": "^1.0.2",
|
||||
"karma-sauce-launcher": "^1.0.0",
|
||||
"karma-sourcemap-loader": "^0.3.7",
|
||||
@ -61,7 +62,6 @@
|
||||
"less-plugin-autoprefix": "^1.4.2",
|
||||
"load-grunt-configs": "^1.0.0",
|
||||
"load-grunt-tasks": "^3.5.0",
|
||||
"stats-webpack-plugin": "^0.4.0",
|
||||
"time-grunt": "^1.4.0",
|
||||
"webpack": "^1.13.1",
|
||||
"webpack-dev-server": "^1.14.1"
|
||||
@ -76,7 +76,7 @@
|
||||
"angular-moment": "^0.10.1",
|
||||
"angular-resource": "^1.4.3",
|
||||
"angular-sanitize": "^1.4.3",
|
||||
"angular-scheduler": "chouseknecht/angular-scheduler#0.0.20",
|
||||
"angular-scheduler": "chouseknecht/angular-scheduler#0.1.0",
|
||||
"angular-tz-extensions": "chouseknecht/angular-tz-extensions#0.3.11",
|
||||
"angular-ui-router": "^0.2.15",
|
||||
"bootstrap": "^3.1.1",
|
||||
@ -95,7 +95,6 @@
|
||||
"moment": "^2.10.2",
|
||||
"ng-toast": "leigh-johnson/ngToast#2.0.1",
|
||||
"nvd3": "leigh-johnson/nvd3#1.7.1",
|
||||
"rrule": "jkbrzt/rrule#4ff63b2f8524fd6d5ba6e80db770953b5cd08a0c",
|
||||
"select2": "^4.0.2",
|
||||
"socket.io-client": "^0.9.17"
|
||||
}
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
var path = require('path'),
|
||||
webpack = require('webpack'),
|
||||
StatsPlugin = require('stats-webpack-plugin');
|
||||
webpack = require('webpack');
|
||||
|
||||
var vendorPkgs = [
|
||||
'angular',
|
||||
@ -51,13 +50,9 @@ var dev = {
|
||||
'CodeMirror': 'codemirror',
|
||||
'jsyaml': 'js-yaml',
|
||||
'jsonlint': 'codemirror.jsonlint',
|
||||
'RRule': 'rrule'
|
||||
}),
|
||||
// (chunkName, outfileName)
|
||||
new webpack.optimize.CommonsChunkPlugin('vendor', 'tower.vendor.js'),
|
||||
new StatsPlugin('stats.json', {
|
||||
chunkModules: true
|
||||
})
|
||||
],
|
||||
module: {
|
||||
preLoaders: [{
|
||||
@ -70,15 +65,6 @@ var dev = {
|
||||
}
|
||||
}],
|
||||
loaders: [
|
||||
{ // expose RRule global for nlp module, whose AMD/CJS loading methods are broken
|
||||
test: /\.rrule.js$/,
|
||||
loader: 'expose?RRule'
|
||||
},
|
||||
{
|
||||
test: /\.nlp.js$/,
|
||||
// disable CommonJS & AMD loading (broken in this lib)
|
||||
loader: 'imports?require=>false&define=>false'
|
||||
},
|
||||
{
|
||||
// disable AMD loading (broken in this lib) and default to CommonJS (not broken)
|
||||
test: /\.angular-tz-extensions.js$/,
|
||||
@ -91,9 +77,6 @@ var dev = {
|
||||
query: {
|
||||
presets: ['es2015']
|
||||
}
|
||||
}, {
|
||||
test: /\.nlp.js$/,
|
||||
loader: 'imports?RRule=rrule'
|
||||
}]
|
||||
},
|
||||
resolve: {
|
||||
@ -134,15 +117,7 @@ var release = {
|
||||
})
|
||||
],
|
||||
module: {
|
||||
loaders: [{
|
||||
test: /\.rrule.js$/,
|
||||
loader: 'expose?RRule'
|
||||
},
|
||||
{
|
||||
test: /\.nlp.js$/,
|
||||
// disable CommonJS (broken in this lib)
|
||||
loader: 'imports?require=>false'
|
||||
},
|
||||
loaders: [
|
||||
{
|
||||
// disable AMD loading (broken in this lib) and default to CommonJS (not broken)
|
||||
test: /\.angular-tz-extensions.js$/,
|
||||
|
||||
11232
npm-shrinkwrap.json
generated
11232
npm-shrinkwrap.json
generated
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
[pytest]
|
||||
DJANGO_SETTINGS_MODULE = awx.settings.development
|
||||
python_paths = venv/tower/lib/python2.7/site-packages
|
||||
site_dirs = venv/tower/lib/python2.7/site-packages
|
||||
python_paths = /venv/tower/lib/python2.7/site-packages
|
||||
site_dirs = /venv/tower/lib/python2.7/site-packages
|
||||
python_files = *.py
|
||||
addopts = --reuse-db --nomigrations --tb=native
|
||||
markers =
|
||||
|
||||
@ -21,8 +21,6 @@ django-extensions==1.5.9
|
||||
git+https://github.com/chrismeyersfsu/django-jsonbfield@fix-sqlite_serialization#egg=jsonbfield
|
||||
django-polymorphic==0.7.2
|
||||
django-radius==1.0.0
|
||||
# NOTE: Remove when we transition packaging
|
||||
django-redis-cache==1.6.5
|
||||
djangorestframework==3.3.2
|
||||
djangorestframework-yaml==1.0.2
|
||||
django-split-settings==0.1.1
|
||||
@ -110,13 +108,10 @@ python-troveclient==1.4.0
|
||||
pytz==2015.7
|
||||
PyYAML==3.11
|
||||
pyzmq==14.5.0
|
||||
qpid-python==0.32.1
|
||||
rackspace-auth-openstack==1.3
|
||||
rackspace-novaclient==1.5
|
||||
rax-default-network-flags-python-novaclient-ext==0.3.2
|
||||
rax-scheduled-images-python-novaclient-ext==0.3.1
|
||||
# NOTE: Remove this when we transition packaging
|
||||
redis==2.10.3
|
||||
requests-oauthlib==0.5.0
|
||||
requests==2.9.1
|
||||
requestsexceptions==1.1.1
|
||||
|
||||
@ -10,4 +10,3 @@ pytest-cov
|
||||
pytest-django
|
||||
pytest-pythonpath
|
||||
pytest-mock
|
||||
qpid-tools
|
||||
|
||||
@ -9,7 +9,7 @@ services:
|
||||
links:
|
||||
- postgres
|
||||
- memcached
|
||||
- qpid
|
||||
- rabbitmq
|
||||
# - sync
|
||||
# volumes_from:
|
||||
# - sync
|
||||
@ -23,9 +23,8 @@ services:
|
||||
memcached:
|
||||
image: memcached:alpine
|
||||
|
||||
qpid:
|
||||
image: fedora/qpid:latest
|
||||
entrypoint: qpidd --auth=no
|
||||
rabbitmq:
|
||||
image: rabbitmq:3-management
|
||||
|
||||
# Source Code Synchronization Container
|
||||
# sync:
|
||||
|
||||
@ -4,7 +4,7 @@ set +x
|
||||
# Wait for the databases to come up
|
||||
ansible -i "127.0.0.1," -c local -v -m wait_for -a "host=postgres port=5432" all
|
||||
ansible -i "127.0.0.1," -c local -v -m wait_for -a "host=memcached port=11211" all
|
||||
ansible -i "127.0.0.1," -c local -v -m wait_for -a "host=qpid port=5672" all
|
||||
ansible -i "127.0.0.1," -c local -v -m wait_for -a "host=rabbitmq port=5672" all
|
||||
|
||||
# In case Tower in the container wants to connect to itself, use "docker exec" to attach to the container otherwise
|
||||
# TODO: FIX
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user