mirror of
https://github.com/ansible/awx.git
synced 2026-01-15 20:00:43 -03:30
commit
501c91f035
19
CHANGELOG.md
19
CHANGELOG.md
@ -2,6 +2,25 @@
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
## 15.0.0 (September 30, 2020)
|
||||
- AWX now utilizes a version of certifi that auto-discovers certificates in the system certificate store - https://github.com/ansible/awx/pull/8242
|
||||
- Added support for arbitrary custom inventory plugin configuration: https://github.com/ansible/awx/issues/5150
|
||||
- Added improved support for fetching Ansible collections from private Galaxy content sources (such as https://github.com/ansible/galaxy_ng) - https://github.com/ansible/awx/issues/7813
|
||||
- Added an optional setting to disable the auto-creation of organizations and teams on successful SAML login. - https://github.com/ansible/awx/pull/8069
|
||||
- Added a number of optimizations to AWX's callback receiver to improve the speed of stdout processing for simultaneous playbooks runs - https://github.com/ansible/awx/pull/8193 https://github.com/ansible/awx/pull/8191
|
||||
- Added the ability to use `!include` and `!import` constructors when constructing YAML for use with the AWX CLI - https://github.com/ansible/awx/issues/8135
|
||||
- Fixed a bug that prevented certain users from being able to edit approval nodes in Workflows - https://github.com/ansible/awx/pull/8253
|
||||
- Fixed a bug that broke password prompting for credentials in certain cases - https://github.com/ansible/awx/issues/8202
|
||||
- Fixed a bug which can cause PostgreSQL deadlocks when running many parallel playbooks against large shared inventories - https://github.com/ansible/awx/issues/8145
|
||||
- Fixed a bug which can cause delays in AWX's task manager when large numbers of simultaneous jobs are scheduled - https://github.com/ansible/awx/issues/7655
|
||||
- Fixed a bug which can cause certain scheduled jobs - those that run every X minute(s) or hour(s) - to fail to run at the proper time - https://github.com/ansible/awx/issues/8071
|
||||
- Fixed a performance issue for playbooks that store large amounts of data using the `set_stats` module - https://github.com/ansible/awx/issues/8006
|
||||
- Fixed a bug related to AWX's handling of the auth_path argument for the HashiVault KeyValue credential plugin - https://github.com/ansible/awx/pull/7991
|
||||
- Fixed a bug that broke support for Remote Archive SCM Type project syncs on platforms that utilize Python2 - https://github.com/ansible/awx/pull/8057
|
||||
- Updated to the latest version of Django Rest Framework to address CVE-2020-25626
|
||||
- Updated to the latest version of Django to address CVE-2020-24583 and CVE-2020-24584
|
||||
- Updated to the latest verson of channels_redis to address a bug that slowly causes Daphne processes to leak memory over time - https://github.com/django/channels_redis/issues/212
|
||||
|
||||
## 14.1.0 (Aug 25, 2020)
|
||||
- AWX images can now be built on ARM64 - https://github.com/ansible/awx/pull/7607
|
||||
- Added the Remote Archive SCM Type to support using immutable artifacts and releases (such as tarballs and zip files) as projects - https://github.com/ansible/awx/issues/7954
|
||||
|
||||
@ -80,7 +80,7 @@ For Linux platforms, refer to the following from Docker:
|
||||
If you're not using Docker for Mac, or Docker for Windows, you may need, or choose to, install the Docker compose Python module separately, in which case you'll need to run the following:
|
||||
|
||||
```bash
|
||||
(host)$ pip install docker-compose
|
||||
(host)$ pip3 install docker-compose
|
||||
```
|
||||
|
||||
#### Frontend Development
|
||||
|
||||
@ -16,6 +16,7 @@ register(
|
||||
help_text=_('Number of seconds that a user is inactive before they will need to login again.'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
register(
|
||||
'SESSIONS_PER_USER',
|
||||
@ -49,6 +50,7 @@ register(
|
||||
'in the number of seconds.'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
register(
|
||||
'ALLOW_OAUTH2_FOR_EXTERNAL_USERS',
|
||||
|
||||
@ -39,7 +39,7 @@ class Metadata(metadata.SimpleMetadata):
|
||||
'min_length', 'max_length',
|
||||
'min_value', 'max_value',
|
||||
'category', 'category_slug',
|
||||
'defined_in_file'
|
||||
'defined_in_file', 'unit',
|
||||
]
|
||||
|
||||
for attr in text_attrs:
|
||||
|
||||
@ -1269,6 +1269,7 @@ class OrganizationSerializer(BaseSerializer):
|
||||
object_roles = self.reverse('api:organization_object_roles_list', kwargs={'pk': obj.pk}),
|
||||
access_list = self.reverse('api:organization_access_list', kwargs={'pk': obj.pk}),
|
||||
instance_groups = self.reverse('api:organization_instance_groups_list', kwargs={'pk': obj.pk}),
|
||||
galaxy_credentials = self.reverse('api:organization_galaxy_credentials_list', kwargs={'pk': obj.pk}),
|
||||
))
|
||||
return res
|
||||
|
||||
@ -2536,10 +2537,11 @@ class CredentialTypeSerializer(BaseSerializer):
|
||||
class CredentialSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete', 'copy', 'use']
|
||||
capabilities_prefetch = ['admin', 'use']
|
||||
managed_by_tower = serializers.ReadOnlyField()
|
||||
|
||||
class Meta:
|
||||
model = Credential
|
||||
fields = ('*', 'organization', 'credential_type', 'inputs', 'kind', 'cloud', 'kubernetes')
|
||||
fields = ('*', 'organization', 'credential_type', 'managed_by_tower', 'inputs', 'kind', 'cloud', 'kubernetes')
|
||||
extra_kwargs = {
|
||||
'credential_type': {
|
||||
'label': _('Credential Type'),
|
||||
@ -2603,6 +2605,13 @@ class CredentialSerializer(BaseSerializer):
|
||||
|
||||
return summary_dict
|
||||
|
||||
def validate(self, attrs):
|
||||
if self.instance and self.instance.managed_by_tower:
|
||||
raise PermissionDenied(
|
||||
detail=_("Modifications not allowed for managed credentials")
|
||||
)
|
||||
return super(CredentialSerializer, self).validate(attrs)
|
||||
|
||||
def get_validation_exclusions(self, obj=None):
|
||||
ret = super(CredentialSerializer, self).get_validation_exclusions(obj)
|
||||
for field in ('credential_type', 'inputs'):
|
||||
@ -2610,6 +2619,17 @@ class CredentialSerializer(BaseSerializer):
|
||||
ret.remove(field)
|
||||
return ret
|
||||
|
||||
def validate_organization(self, org):
|
||||
if (
|
||||
self.instance and
|
||||
self.instance.credential_type.kind == 'galaxy' and
|
||||
org is None
|
||||
):
|
||||
raise serializers.ValidationError(_(
|
||||
"Galaxy credentials must be owned by an Organization."
|
||||
))
|
||||
return org
|
||||
|
||||
def validate_credential_type(self, credential_type):
|
||||
if self.instance and credential_type.pk != self.instance.credential_type.pk:
|
||||
for related_objects in (
|
||||
@ -2674,6 +2694,15 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
if attrs.get('team'):
|
||||
attrs['organization'] = attrs['team'].organization
|
||||
|
||||
if (
|
||||
'credential_type' in attrs and
|
||||
attrs['credential_type'].kind == 'galaxy' and
|
||||
list(owner_fields) != ['organization']
|
||||
):
|
||||
raise serializers.ValidationError({"organization": _(
|
||||
"Galaxy credentials must be owned by an Organization."
|
||||
)})
|
||||
|
||||
return super(CredentialSerializerCreate, self).validate(attrs)
|
||||
|
||||
def create(self, validated_data):
|
||||
@ -4128,7 +4157,10 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
# verify that credentials (either provided or existing) don't
|
||||
# require launch-time passwords that have not been provided
|
||||
if 'credentials' in accepted:
|
||||
launch_credentials = accepted['credentials']
|
||||
launch_credentials = Credential.unique_dict(
|
||||
list(template_credentials.all()) +
|
||||
list(accepted['credentials'])
|
||||
).values()
|
||||
else:
|
||||
launch_credentials = template_credentials
|
||||
passwords = attrs.get('credential_passwords', {}) # get from original attrs
|
||||
|
||||
@ -21,6 +21,7 @@ from awx.api.views import (
|
||||
OrganizationNotificationTemplatesSuccessList,
|
||||
OrganizationNotificationTemplatesApprovalList,
|
||||
OrganizationInstanceGroupsList,
|
||||
OrganizationGalaxyCredentialsList,
|
||||
OrganizationObjectRolesList,
|
||||
OrganizationAccessList,
|
||||
OrganizationApplicationList,
|
||||
@ -49,6 +50,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/notification_templates_approvals/$', OrganizationNotificationTemplatesApprovalList.as_view(),
|
||||
name='organization_notification_templates_approvals_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/instance_groups/$', OrganizationInstanceGroupsList.as_view(), name='organization_instance_groups_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/galaxy_credentials/$', OrganizationGalaxyCredentialsList.as_view(), name='organization_galaxy_credentials_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/object_roles/$', OrganizationObjectRolesList.as_view(), name='organization_object_roles_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/access_list/$', OrganizationAccessList.as_view(), name='organization_access_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/applications/$', OrganizationApplicationList.as_view(), name='organization_applications_list'),
|
||||
|
||||
@ -124,6 +124,7 @@ from awx.api.views.organization import ( # noqa
|
||||
OrganizationNotificationTemplatesSuccessList,
|
||||
OrganizationNotificationTemplatesApprovalList,
|
||||
OrganizationInstanceGroupsList,
|
||||
OrganizationGalaxyCredentialsList,
|
||||
OrganizationAccessList,
|
||||
OrganizationObjectRolesList,
|
||||
)
|
||||
@ -1355,6 +1356,13 @@ class CredentialDetail(RetrieveUpdateDestroyAPIView):
|
||||
model = models.Credential
|
||||
serializer_class = serializers.CredentialSerializer
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
if instance.managed_by_tower:
|
||||
raise PermissionDenied(detail=_("Deletion not allowed for managed credentials"))
|
||||
return super(CredentialDetail, self).destroy(request, *args, **kwargs)
|
||||
|
||||
|
||||
|
||||
class CredentialActivityStreamList(SubListAPIView):
|
||||
|
||||
|
||||
@ -22,7 +22,7 @@ from awx.api.generics import (
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
logger = logging.getLogger('awx.analytics')
|
||||
|
||||
|
||||
class MetricsView(APIView):
|
||||
|
||||
@ -7,6 +7,7 @@ import logging
|
||||
# Django
|
||||
from django.db.models import Count
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
@ -20,7 +21,8 @@ from awx.main.models import (
|
||||
Role,
|
||||
User,
|
||||
Team,
|
||||
InstanceGroup
|
||||
InstanceGroup,
|
||||
Credential
|
||||
)
|
||||
from awx.api.generics import (
|
||||
ListCreateAPIView,
|
||||
@ -42,7 +44,8 @@ from awx.api.serializers import (
|
||||
RoleSerializer,
|
||||
NotificationTemplateSerializer,
|
||||
InstanceGroupSerializer,
|
||||
ProjectSerializer, JobTemplateSerializer, WorkflowJobTemplateSerializer
|
||||
ProjectSerializer, JobTemplateSerializer, WorkflowJobTemplateSerializer,
|
||||
CredentialSerializer
|
||||
)
|
||||
from awx.api.views.mixin import (
|
||||
RelatedJobsPreventDeleteMixin,
|
||||
@ -214,6 +217,20 @@ class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
relationship = 'instance_groups'
|
||||
|
||||
|
||||
class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
|
||||
|
||||
model = Credential
|
||||
serializer_class = CredentialSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'galaxy_credentials'
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.kind != 'galaxy_api_token':
|
||||
return {'msg': _(
|
||||
f"Credential must be a Galaxy credential, not {sub.credential_type.name}."
|
||||
)}
|
||||
|
||||
|
||||
class OrganizationAccessList(ResourceAccessList):
|
||||
|
||||
model = User # needs to be User for AccessLists's
|
||||
|
||||
@ -21,6 +21,7 @@ import requests
|
||||
|
||||
from awx.api.generics import APIView
|
||||
from awx.conf.registry import settings_registry
|
||||
from awx.main.analytics import all_collectors
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.utils import (
|
||||
get_awx_version,
|
||||
@ -252,6 +253,7 @@ class ApiV2ConfigView(APIView):
|
||||
ansible_version=get_ansible_version(),
|
||||
eula=render_to_string("eula.md") if license_data.get('license_type', 'UNLICENSED') != 'open' else '',
|
||||
analytics_status=pendo_state,
|
||||
analytics_collectors=all_collectors(),
|
||||
become_methods=PRIVILEGE_ESCALATION_METHODS,
|
||||
)
|
||||
|
||||
|
||||
@ -129,12 +129,14 @@ class SettingsRegistry(object):
|
||||
placeholder = field_kwargs.pop('placeholder', empty)
|
||||
encrypted = bool(field_kwargs.pop('encrypted', False))
|
||||
defined_in_file = bool(field_kwargs.pop('defined_in_file', False))
|
||||
unit = field_kwargs.pop('unit', None)
|
||||
if getattr(field_kwargs.get('child', None), 'source', None) is not None:
|
||||
field_kwargs['child'].source = None
|
||||
field_instance = field_class(**field_kwargs)
|
||||
field_instance.category_slug = category_slug
|
||||
field_instance.category = category
|
||||
field_instance.depends_on = depends_on
|
||||
field_instance.unit = unit
|
||||
if placeholder is not empty:
|
||||
field_instance.placeholder = placeholder
|
||||
field_instance.defined_in_file = defined_in_file
|
||||
|
||||
@ -17,6 +17,8 @@ from django.utils.functional import cached_property
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import empty, SkipField
|
||||
|
||||
import cachetools
|
||||
|
||||
# Tower
|
||||
from awx.main.utils import encrypt_field, decrypt_field
|
||||
from awx.conf import settings_registry
|
||||
@ -28,6 +30,8 @@ from awx.conf.migrations._reencrypt import decrypt_field as old_decrypt_field
|
||||
|
||||
logger = logging.getLogger('awx.conf.settings')
|
||||
|
||||
SETTING_MEMORY_TTL = 5 if 'callback_receiver' in ' '.join(sys.argv) else 0
|
||||
|
||||
# Store a special value to indicate when a setting is not set in the database.
|
||||
SETTING_CACHE_NOTSET = '___notset___'
|
||||
|
||||
@ -406,6 +410,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
def SETTINGS_MODULE(self):
|
||||
return self._get_default('SETTINGS_MODULE')
|
||||
|
||||
@cachetools.cached(cache=cachetools.TTLCache(maxsize=2048, ttl=SETTING_MEMORY_TTL))
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
if name in self.all_supported_settings:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1103,11 +1103,6 @@ class CredentialTypeAccess(BaseAccess):
|
||||
def can_use(self, obj):
|
||||
return True
|
||||
|
||||
def get_method_capability(self, method, obj, parent_obj):
|
||||
if obj.managed_by_tower:
|
||||
return False
|
||||
return super(CredentialTypeAccess, self).get_method_capability(method, obj, parent_obj)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.all()
|
||||
|
||||
@ -1182,6 +1177,8 @@ class CredentialAccess(BaseAccess):
|
||||
def get_user_capabilities(self, obj, **kwargs):
|
||||
user_capabilities = super(CredentialAccess, self).get_user_capabilities(obj, **kwargs)
|
||||
user_capabilities['use'] = self.can_use(obj)
|
||||
if getattr(obj, 'managed_by_tower', False) is True:
|
||||
user_capabilities['edit'] = user_capabilities['delete'] = False
|
||||
return user_capabilities
|
||||
|
||||
|
||||
@ -2753,6 +2750,9 @@ class WorkflowApprovalTemplateAccess(BaseAccess):
|
||||
else:
|
||||
return (self.check_related('workflow_approval_template', UnifiedJobTemplate, role_field='admin_role'))
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return self.user.can_access(WorkflowJobTemplate, 'change', obj.workflow_job_template, data={})
|
||||
|
||||
def can_start(self, obj, validate_license=False):
|
||||
# for copying WFJTs that contain approval nodes
|
||||
if self.user.is_superuser:
|
||||
|
||||
@ -1 +1 @@
|
||||
from .core import register, gather, ship, table_version # noqa
|
||||
from .core import all_collectors, expensive_collectors, register, gather, ship # noqa
|
||||
|
||||
@ -20,7 +20,7 @@ from django.conf import settings
|
||||
BROADCAST_WEBSOCKET_REDIS_KEY_NAME = 'broadcast_websocket_stats'
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.analytics.broadcast_websocket')
|
||||
logger = logging.getLogger('awx.analytics.broadcast_websocket')
|
||||
|
||||
|
||||
def dt_to_seconds(dt):
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import io
|
||||
import os
|
||||
import os.path
|
||||
import platform
|
||||
@ -6,13 +7,14 @@ from django.db import connection
|
||||
from django.db.models import Count
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils import (get_awx_version, get_ansible_version,
|
||||
get_custom_venv_choices, camelcase_to_underscore)
|
||||
from awx.main import models
|
||||
from django.contrib.sessions.models import Session
|
||||
from awx.main.analytics import register, table_version
|
||||
from awx.main.analytics import register
|
||||
|
||||
'''
|
||||
This module is used to define metrics collected by awx.main.analytics.gather()
|
||||
@ -31,8 +33,8 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
|
||||
'''
|
||||
|
||||
|
||||
@register('config', '1.1')
|
||||
def config(since):
|
||||
@register('config', '1.1', description=_('General platform configuration.'))
|
||||
def config(since, **kwargs):
|
||||
license_info = get_license(show_key=False)
|
||||
install_type = 'traditional'
|
||||
if os.environ.get('container') == 'oci':
|
||||
@ -63,8 +65,8 @@ def config(since):
|
||||
}
|
||||
|
||||
|
||||
@register('counts', '1.0')
|
||||
def counts(since):
|
||||
@register('counts', '1.0', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||
def counts(since, **kwargs):
|
||||
counts = {}
|
||||
for cls in (models.Organization, models.Team, models.User,
|
||||
models.Inventory, models.Credential, models.Project,
|
||||
@ -98,8 +100,8 @@ def counts(since):
|
||||
return counts
|
||||
|
||||
|
||||
@register('org_counts', '1.0')
|
||||
def org_counts(since):
|
||||
@register('org_counts', '1.0', description=_('Counts of users and teams by organization'))
|
||||
def org_counts(since, **kwargs):
|
||||
counts = {}
|
||||
for org in models.Organization.objects.annotate(num_users=Count('member_role__members', distinct=True),
|
||||
num_teams=Count('teams', distinct=True)).values('name', 'id', 'num_users', 'num_teams'):
|
||||
@ -110,8 +112,8 @@ def org_counts(since):
|
||||
return counts
|
||||
|
||||
|
||||
@register('cred_type_counts', '1.0')
|
||||
def cred_type_counts(since):
|
||||
@register('cred_type_counts', '1.0', description=_('Counts of credentials by credential type'))
|
||||
def cred_type_counts(since, **kwargs):
|
||||
counts = {}
|
||||
for cred_type in models.CredentialType.objects.annotate(num_credentials=Count(
|
||||
'credentials', distinct=True)).values('name', 'id', 'managed_by_tower', 'num_credentials'):
|
||||
@ -122,8 +124,8 @@ def cred_type_counts(since):
|
||||
return counts
|
||||
|
||||
|
||||
@register('inventory_counts', '1.2')
|
||||
def inventory_counts(since):
|
||||
@register('inventory_counts', '1.2', description=_('Inventories, their inventory sources, and host counts'))
|
||||
def inventory_counts(since, **kwargs):
|
||||
counts = {}
|
||||
for inv in models.Inventory.objects.filter(kind='').annotate(num_sources=Count('inventory_sources', distinct=True),
|
||||
num_hosts=Count('hosts', distinct=True)).only('id', 'name', 'kind'):
|
||||
@ -147,8 +149,8 @@ def inventory_counts(since):
|
||||
return counts
|
||||
|
||||
|
||||
@register('projects_by_scm_type', '1.0')
|
||||
def projects_by_scm_type(since):
|
||||
@register('projects_by_scm_type', '1.0', description=_('Counts of projects by source control type'))
|
||||
def projects_by_scm_type(since, **kwargs):
|
||||
counts = dict(
|
||||
(t[0] or 'manual', 0)
|
||||
for t in models.Project.SCM_TYPE_CHOICES
|
||||
@ -166,8 +168,8 @@ def _get_isolated_datetime(last_check):
|
||||
return last_check
|
||||
|
||||
|
||||
@register('instance_info', '1.0')
|
||||
def instance_info(since, include_hostnames=False):
|
||||
@register('instance_info', '1.0', description=_('Cluster topology and capacity'))
|
||||
def instance_info(since, include_hostnames=False, **kwargs):
|
||||
info = {}
|
||||
instances = models.Instance.objects.values_list('hostname').values(
|
||||
'uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'hostname', 'last_isolated_check', 'enabled')
|
||||
@ -192,8 +194,8 @@ def instance_info(since, include_hostnames=False):
|
||||
return info
|
||||
|
||||
|
||||
@register('job_counts', '1.0')
|
||||
def job_counts(since):
|
||||
@register('job_counts', '1.0', description=_('Counts of jobs by status'))
|
||||
def job_counts(since, **kwargs):
|
||||
counts = {}
|
||||
counts['total_jobs'] = models.UnifiedJob.objects.exclude(launch_type='sync').count()
|
||||
counts['status'] = dict(models.UnifiedJob.objects.exclude(launch_type='sync').values_list('status').annotate(Count('status')).order_by())
|
||||
@ -202,8 +204,8 @@ def job_counts(since):
|
||||
return counts
|
||||
|
||||
|
||||
@register('job_instance_counts', '1.0')
|
||||
def job_instance_counts(since):
|
||||
@register('job_instance_counts', '1.0', description=_('Counts of jobs by execution node'))
|
||||
def job_instance_counts(since, **kwargs):
|
||||
counts = {}
|
||||
job_types = models.UnifiedJob.objects.exclude(launch_type='sync').values_list(
|
||||
'execution_node', 'launch_type').annotate(job_launch_type=Count('launch_type')).order_by()
|
||||
@ -217,30 +219,71 @@ def job_instance_counts(since):
|
||||
return counts
|
||||
|
||||
|
||||
@register('query_info', '1.0')
|
||||
def query_info(since, collection_type):
|
||||
@register('query_info', '1.0', description=_('Metadata about the analytics collected'))
|
||||
def query_info(since, collection_type, until, **kwargs):
|
||||
query_info = {}
|
||||
query_info['last_run'] = str(since)
|
||||
query_info['current_time'] = str(now())
|
||||
query_info['current_time'] = str(until)
|
||||
query_info['collection_type'] = collection_type
|
||||
return query_info
|
||||
|
||||
|
||||
# Copies Job Events from db to a .csv to be shipped
|
||||
@table_version('events_table.csv', '1.1')
|
||||
@table_version('unified_jobs_table.csv', '1.1')
|
||||
@table_version('unified_job_template_table.csv', '1.0')
|
||||
@table_version('workflow_job_node_table.csv', '1.0')
|
||||
@table_version('workflow_job_template_node_table.csv', '1.0')
|
||||
def copy_tables(since, full_path, subset=None):
|
||||
def _copy_table(table, query, path):
|
||||
file_path = os.path.join(path, table + '_table.csv')
|
||||
file = open(file_path, 'w', encoding='utf-8')
|
||||
with connection.cursor() as cursor:
|
||||
cursor.copy_expert(query, file)
|
||||
file.close()
|
||||
return file_path
|
||||
'''
|
||||
The event table can be *very* large, and we have a 100MB upload limit.
|
||||
|
||||
Split large table dumps at dump time into a series of files.
|
||||
'''
|
||||
MAX_TABLE_SIZE = 200 * 1048576
|
||||
|
||||
|
||||
class FileSplitter(io.StringIO):
|
||||
def __init__(self, filespec=None, *args, **kwargs):
|
||||
self.filespec = filespec
|
||||
self.files = []
|
||||
self.currentfile = None
|
||||
self.header = None
|
||||
self.counter = 0
|
||||
self.cycle_file()
|
||||
|
||||
def cycle_file(self):
|
||||
if self.currentfile:
|
||||
self.currentfile.close()
|
||||
self.counter = 0
|
||||
fname = '{}_split{}'.format(self.filespec, len(self.files))
|
||||
self.currentfile = open(fname, 'w', encoding='utf-8')
|
||||
self.files.append(fname)
|
||||
if self.header:
|
||||
self.currentfile.write('{}\n'.format(self.header))
|
||||
|
||||
def file_list(self):
|
||||
self.currentfile.close()
|
||||
# Check for an empty dump
|
||||
if len(self.header) + 1 == self.counter:
|
||||
os.remove(self.files[-1])
|
||||
self.files = self.files[:-1]
|
||||
# If we only have one file, remove the suffix
|
||||
if len(self.files) == 1:
|
||||
os.rename(self.files[0],self.files[0].replace('_split0',''))
|
||||
return self.files
|
||||
|
||||
def write(self, s):
|
||||
if not self.header:
|
||||
self.header = s[0:s.index('\n')]
|
||||
self.counter += self.currentfile.write(s)
|
||||
if self.counter >= MAX_TABLE_SIZE:
|
||||
self.cycle_file()
|
||||
|
||||
|
||||
def _copy_table(table, query, path):
|
||||
file_path = os.path.join(path, table + '_table.csv')
|
||||
file = FileSplitter(filespec=file_path)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.copy_expert(query, file)
|
||||
return file.file_list()
|
||||
|
||||
|
||||
@register('events_table', '1.1', format='csv', description=_('Automation task records'), expensive=True)
|
||||
def events_table(since, full_path, until, **kwargs):
|
||||
events_query = '''COPY (SELECT main_jobevent.id,
|
||||
main_jobevent.created,
|
||||
main_jobevent.uuid,
|
||||
@ -262,18 +305,21 @@ def copy_tables(since, full_path, subset=None):
|
||||
main_jobevent.event_data::json->'res'->'warnings' AS warnings,
|
||||
main_jobevent.event_data::json->'res'->'deprecations' AS deprecations
|
||||
FROM main_jobevent
|
||||
WHERE main_jobevent.created > {}
|
||||
ORDER BY main_jobevent.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
if not subset or 'events' in subset:
|
||||
_copy_table(table='events', query=events_query, path=full_path)
|
||||
WHERE (main_jobevent.created > '{}' AND main_jobevent.created <= '{}')
|
||||
ORDER BY main_jobevent.id ASC) TO STDOUT WITH CSV HEADER
|
||||
'''.format(since.isoformat(),until.isoformat())
|
||||
return _copy_table(table='events', query=events_query, path=full_path)
|
||||
|
||||
|
||||
@register('unified_jobs_table', '1.1', format='csv', description=_('Data on jobs run'), expensive=True)
|
||||
def unified_jobs_table(since, full_path, until, **kwargs):
|
||||
unified_job_query = '''COPY (SELECT main_unifiedjob.id,
|
||||
main_unifiedjob.polymorphic_ctype_id,
|
||||
django_content_type.model,
|
||||
main_unifiedjob.organization_id,
|
||||
main_organization.name as organization_name,
|
||||
main_job.inventory_id,
|
||||
main_inventory.name,
|
||||
main_inventory.name as inventory_name,
|
||||
main_unifiedjob.created,
|
||||
main_unifiedjob.name,
|
||||
main_unifiedjob.unified_job_template_id,
|
||||
@ -294,12 +340,16 @@ def copy_tables(since, full_path, subset=None):
|
||||
LEFT JOIN main_job ON main_unifiedjob.id = main_job.unifiedjob_ptr_id
|
||||
LEFT JOIN main_inventory ON main_job.inventory_id = main_inventory.id
|
||||
LEFT JOIN main_organization ON main_organization.id = main_unifiedjob.organization_id
|
||||
WHERE (main_unifiedjob.created > {0} OR main_unifiedjob.finished > {0})
|
||||
WHERE ((main_unifiedjob.created > '{0}' AND main_unifiedjob.created <= '{1}')
|
||||
OR (main_unifiedjob.finished > '{0}' AND main_unifiedjob.finished <= '{1}'))
|
||||
AND main_unifiedjob.launch_type != 'sync'
|
||||
ORDER BY main_unifiedjob.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
if not subset or 'unified_jobs' in subset:
|
||||
_copy_table(table='unified_jobs', query=unified_job_query, path=full_path)
|
||||
ORDER BY main_unifiedjob.id ASC) TO STDOUT WITH CSV HEADER
|
||||
'''.format(since.isoformat(),until.isoformat())
|
||||
return _copy_table(table='unified_jobs', query=unified_job_query, path=full_path)
|
||||
|
||||
|
||||
@register('unified_job_template_table', '1.0', format='csv', description=_('Data on job templates'))
|
||||
def unified_job_template_table(since, full_path, **kwargs):
|
||||
unified_job_template_query = '''COPY (SELECT main_unifiedjobtemplate.id,
|
||||
main_unifiedjobtemplate.polymorphic_ctype_id,
|
||||
django_content_type.model,
|
||||
@ -318,9 +368,11 @@ def copy_tables(since, full_path, subset=None):
|
||||
FROM main_unifiedjobtemplate, django_content_type
|
||||
WHERE main_unifiedjobtemplate.polymorphic_ctype_id = django_content_type.id
|
||||
ORDER BY main_unifiedjobtemplate.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
if not subset or 'unified_job_template' in subset:
|
||||
_copy_table(table='unified_job_template', query=unified_job_template_query, path=full_path)
|
||||
return _copy_table(table='unified_job_template', query=unified_job_template_query, path=full_path)
|
||||
|
||||
|
||||
@register('workflow_job_node_table', '1.0', format='csv', description=_('Data on workflow runs'), expensive=True)
|
||||
def workflow_job_node_table(since, full_path, until, **kwargs):
|
||||
workflow_job_node_query = '''COPY (SELECT main_workflowjobnode.id,
|
||||
main_workflowjobnode.created,
|
||||
main_workflowjobnode.modified,
|
||||
@ -349,11 +401,14 @@ def copy_tables(since, full_path, subset=None):
|
||||
FROM main_workflowjobnode_always_nodes
|
||||
GROUP BY from_workflowjobnode_id
|
||||
) always_nodes ON main_workflowjobnode.id = always_nodes.from_workflowjobnode_id
|
||||
WHERE main_workflowjobnode.modified > {}
|
||||
ORDER BY main_workflowjobnode.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
if not subset or 'workflow_job_node' in subset:
|
||||
_copy_table(table='workflow_job_node', query=workflow_job_node_query, path=full_path)
|
||||
WHERE (main_workflowjobnode.modified > '{}' AND main_workflowjobnode.modified <= '{}')
|
||||
ORDER BY main_workflowjobnode.id ASC) TO STDOUT WITH CSV HEADER
|
||||
'''.format(since.isoformat(),until.isoformat())
|
||||
return _copy_table(table='workflow_job_node', query=workflow_job_node_query, path=full_path)
|
||||
|
||||
|
||||
@register('workflow_job_template_node_table', '1.0', format='csv', description=_('Data on workflows'))
|
||||
def workflow_job_template_node_table(since, full_path, **kwargs):
|
||||
workflow_job_template_node_query = '''COPY (SELECT main_workflowjobtemplatenode.id,
|
||||
main_workflowjobtemplatenode.created,
|
||||
main_workflowjobtemplatenode.modified,
|
||||
@ -381,7 +436,4 @@ def copy_tables(since, full_path, subset=None):
|
||||
GROUP BY from_workflowjobtemplatenode_id
|
||||
) always_nodes ON main_workflowjobtemplatenode.id = always_nodes.from_workflowjobtemplatenode_id
|
||||
ORDER BY main_workflowjobtemplatenode.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
if not subset or 'workflow_job_template_node' in subset:
|
||||
_copy_table(table='workflow_job_template_node', query=workflow_job_template_node_query, path=full_path)
|
||||
|
||||
return
|
||||
return _copy_table(table='workflow_job_template_node', query=workflow_job_template_node_query, path=full_path)
|
||||
|
||||
@ -14,17 +14,13 @@ from rest_framework.exceptions import PermissionDenied
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.models import Job
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.models.ha import TowerAnalyticsState
|
||||
from awx.main.utils import get_awx_http_client_headers, set_environ
|
||||
|
||||
|
||||
__all__ = ['register', 'gather', 'ship', 'table_version']
|
||||
__all__ = ['register', 'gather', 'ship']
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
manifest = dict()
|
||||
|
||||
|
||||
def _valid_license():
|
||||
try:
|
||||
@ -37,11 +33,38 @@ def _valid_license():
|
||||
return True
|
||||
|
||||
|
||||
def register(key, version):
|
||||
def all_collectors():
|
||||
from awx.main.analytics import collectors
|
||||
|
||||
collector_dict = {}
|
||||
module = collectors
|
||||
for name, func in inspect.getmembers(module):
|
||||
if inspect.isfunction(func) and hasattr(func, '__awx_analytics_key__'):
|
||||
key = func.__awx_analytics_key__
|
||||
desc = func.__awx_analytics_description__ or ''
|
||||
version = func.__awx_analytics_version__
|
||||
collector_dict[key] = { 'name': key, 'version': version, 'description': desc}
|
||||
return collector_dict
|
||||
|
||||
|
||||
def expensive_collectors():
|
||||
from awx.main.analytics import collectors
|
||||
|
||||
ret = []
|
||||
module = collectors
|
||||
for name, func in inspect.getmembers(module):
|
||||
if inspect.isfunction(func) and hasattr(func, '__awx_analytics_key__') and func.__awx_expensive__:
|
||||
ret.append(func.__awx_analytics_key__)
|
||||
return ret
|
||||
|
||||
|
||||
def register(key, version, description=None, format='json', expensive=False):
|
||||
"""
|
||||
A decorator used to register a function as a metric collector.
|
||||
|
||||
Decorated functions should return JSON-serializable objects.
|
||||
Decorated functions should do the following based on format:
|
||||
- json: return JSON-serializable objects.
|
||||
- csv: write CSV data to a filename named 'key'
|
||||
|
||||
@register('projects_by_scm_type', 1)
|
||||
def projects_by_scm_type():
|
||||
@ -51,100 +74,153 @@ def register(key, version):
|
||||
def decorate(f):
|
||||
f.__awx_analytics_key__ = key
|
||||
f.__awx_analytics_version__ = version
|
||||
f.__awx_analytics_description__ = description
|
||||
f.__awx_analytics_type__ = format
|
||||
f.__awx_expensive__ = expensive
|
||||
return f
|
||||
|
||||
return decorate
|
||||
|
||||
|
||||
def table_version(file_name, version):
|
||||
|
||||
global manifest
|
||||
manifest[file_name] = version
|
||||
|
||||
def decorate(f):
|
||||
return f
|
||||
|
||||
return decorate
|
||||
|
||||
|
||||
def gather(dest=None, module=None, collection_type='scheduled'):
|
||||
def gather(dest=None, module=None, subset = None, since = None, until = now(), collection_type='scheduled'):
|
||||
"""
|
||||
Gather all defined metrics and write them as JSON files in a .tgz
|
||||
|
||||
:param dest: the (optional) absolute path to write a compressed tarball
|
||||
:pararm module: the module to search for registered analytic collector
|
||||
:param module: the module to search for registered analytic collector
|
||||
functions; defaults to awx.main.analytics.collectors
|
||||
"""
|
||||
def _write_manifest(destdir, manifest):
|
||||
path = os.path.join(destdir, 'manifest.json')
|
||||
with open(path, 'w', encoding='utf-8') as f:
|
||||
try:
|
||||
json.dump(manifest, f)
|
||||
except Exception:
|
||||
f.close()
|
||||
os.remove(f.name)
|
||||
logger.exception("Could not generate manifest.json")
|
||||
|
||||
run_now = now()
|
||||
state = TowerAnalyticsState.get_solo()
|
||||
last_run = state.last_run
|
||||
logger.debug("Last analytics run was: {}".format(last_run))
|
||||
last_run = since or settings.AUTOMATION_ANALYTICS_LAST_GATHER or (now() - timedelta(weeks=4))
|
||||
logger.debug("Last analytics run was: {}".format(settings.AUTOMATION_ANALYTICS_LAST_GATHER))
|
||||
|
||||
max_interval = now() - timedelta(weeks=4)
|
||||
if last_run < max_interval or not last_run:
|
||||
last_run = max_interval
|
||||
|
||||
if _valid_license() is False:
|
||||
logger.exception("Invalid License provided, or No License Provided")
|
||||
return "Error: Invalid License provided, or No License Provided"
|
||||
return None
|
||||
|
||||
if collection_type != 'dry-run' and not settings.INSIGHTS_TRACKING_STATE:
|
||||
logger.error("Automation Analytics not enabled. Use --dry-run to gather locally without sending.")
|
||||
return
|
||||
return None
|
||||
|
||||
if module is None:
|
||||
collector_list = []
|
||||
if module:
|
||||
collector_module = module
|
||||
else:
|
||||
from awx.main.analytics import collectors
|
||||
module = collectors
|
||||
|
||||
collector_module = collectors
|
||||
for name, func in inspect.getmembers(collector_module):
|
||||
if (
|
||||
inspect.isfunction(func) and
|
||||
hasattr(func, '__awx_analytics_key__') and
|
||||
(not subset or name in subset)
|
||||
):
|
||||
collector_list.append((name, func))
|
||||
|
||||
manifest = dict()
|
||||
dest = dest or tempfile.mkdtemp(prefix='awx_analytics')
|
||||
for name, func in inspect.getmembers(module):
|
||||
if inspect.isfunction(func) and hasattr(func, '__awx_analytics_key__'):
|
||||
gather_dir = os.path.join(dest, 'stage')
|
||||
os.mkdir(gather_dir, 0o700)
|
||||
num_splits = 1
|
||||
for name, func in collector_list:
|
||||
if func.__awx_analytics_type__ == 'json':
|
||||
key = func.__awx_analytics_key__
|
||||
manifest['{}.json'.format(key)] = func.__awx_analytics_version__
|
||||
path = '{}.json'.format(os.path.join(dest, key))
|
||||
path = '{}.json'.format(os.path.join(gather_dir, key))
|
||||
with open(path, 'w', encoding='utf-8') as f:
|
||||
try:
|
||||
if func.__name__ == 'query_info':
|
||||
json.dump(func(last_run, collection_type=collection_type), f)
|
||||
else:
|
||||
json.dump(func(last_run), f)
|
||||
json.dump(func(last_run, collection_type=collection_type, until=until), f)
|
||||
manifest['{}.json'.format(key)] = func.__awx_analytics_version__
|
||||
except Exception:
|
||||
logger.exception("Could not generate metric {}.json".format(key))
|
||||
f.close()
|
||||
os.remove(f.name)
|
||||
|
||||
path = os.path.join(dest, 'manifest.json')
|
||||
with open(path, 'w', encoding='utf-8') as f:
|
||||
try:
|
||||
json.dump(manifest, f)
|
||||
except Exception:
|
||||
logger.exception("Could not generate manifest.json")
|
||||
f.close()
|
||||
os.remove(f.name)
|
||||
elif func.__awx_analytics_type__ == 'csv':
|
||||
key = func.__awx_analytics_key__
|
||||
try:
|
||||
files = func(last_run, full_path=gather_dir, until=until)
|
||||
if files:
|
||||
manifest['{}.csv'.format(key)] = func.__awx_analytics_version__
|
||||
if len(files) > num_splits:
|
||||
num_splits = len(files)
|
||||
except Exception:
|
||||
logger.exception("Could not generate metric {}.csv".format(key))
|
||||
|
||||
try:
|
||||
collectors.copy_tables(since=last_run, full_path=dest)
|
||||
except Exception:
|
||||
logger.exception("Could not copy tables")
|
||||
|
||||
# can't use isoformat() since it has colons, which GNU tar doesn't like
|
||||
tarname = '_'.join([
|
||||
settings.SYSTEM_UUID,
|
||||
run_now.strftime('%Y-%m-%d-%H%M%S%z')
|
||||
])
|
||||
try:
|
||||
tgz = shutil.make_archive(
|
||||
os.path.join(os.path.dirname(dest), tarname),
|
||||
'gztar',
|
||||
dest
|
||||
)
|
||||
return tgz
|
||||
except Exception:
|
||||
logger.exception("Failed to write analytics archive file")
|
||||
finally:
|
||||
if not manifest:
|
||||
# No data was collected
|
||||
logger.warning("No data from {} to {}".format(last_run, until))
|
||||
shutil.rmtree(dest)
|
||||
return None
|
||||
|
||||
# Always include config.json if we're using our collectors
|
||||
if 'config.json' not in manifest.keys() and not module:
|
||||
from awx.main.analytics import collectors
|
||||
config = collectors.config
|
||||
path = '{}.json'.format(os.path.join(gather_dir, config.__awx_analytics_key__))
|
||||
with open(path, 'w', encoding='utf-8') as f:
|
||||
try:
|
||||
json.dump(collectors.config(last_run), f)
|
||||
manifest['config.json'] = config.__awx_analytics_version__
|
||||
except Exception:
|
||||
logger.exception("Could not generate metric {}.json".format(key))
|
||||
f.close()
|
||||
os.remove(f.name)
|
||||
shutil.rmtree(dest)
|
||||
return None
|
||||
|
||||
stage_dirs = [gather_dir]
|
||||
if num_splits > 1:
|
||||
for i in range(0, num_splits):
|
||||
split_path = os.path.join(dest, 'split{}'.format(i))
|
||||
os.mkdir(split_path, 0o700)
|
||||
filtered_manifest = {}
|
||||
shutil.copy(os.path.join(gather_dir, 'config.json'), split_path)
|
||||
filtered_manifest['config.json'] = manifest['config.json']
|
||||
suffix = '_split{}'.format(i)
|
||||
for file in os.listdir(gather_dir):
|
||||
if file.endswith(suffix):
|
||||
old_file = os.path.join(gather_dir, file)
|
||||
new_filename = file.replace(suffix, '')
|
||||
new_file = os.path.join(split_path, new_filename)
|
||||
shutil.move(old_file, new_file)
|
||||
filtered_manifest[new_filename] = manifest[new_filename]
|
||||
_write_manifest(split_path, filtered_manifest)
|
||||
stage_dirs.append(split_path)
|
||||
|
||||
for item in list(manifest.keys()):
|
||||
if not os.path.exists(os.path.join(gather_dir, item)):
|
||||
manifest.pop(item)
|
||||
_write_manifest(gather_dir, manifest)
|
||||
|
||||
tarfiles = []
|
||||
try:
|
||||
for i in range(0, len(stage_dirs)):
|
||||
stage_dir = stage_dirs[i]
|
||||
# can't use isoformat() since it has colons, which GNU tar doesn't like
|
||||
tarname = '_'.join([
|
||||
settings.SYSTEM_UUID,
|
||||
until.strftime('%Y-%m-%d-%H%M%S%z'),
|
||||
str(i)
|
||||
])
|
||||
tgz = shutil.make_archive(
|
||||
os.path.join(os.path.dirname(dest), tarname),
|
||||
'gztar',
|
||||
stage_dir
|
||||
)
|
||||
tarfiles.append(tgz)
|
||||
except Exception:
|
||||
shutil.rmtree(stage_dir, ignore_errors = True)
|
||||
logger.exception("Failed to write analytics archive file")
|
||||
finally:
|
||||
shutil.rmtree(dest, ignore_errors = True)
|
||||
return tarfiles
|
||||
|
||||
|
||||
def ship(path):
|
||||
@ -154,6 +230,9 @@ def ship(path):
|
||||
if not path:
|
||||
logger.error('Automation Analytics TAR not found')
|
||||
return
|
||||
if not os.path.exists(path):
|
||||
logger.error('Automation Analytics TAR {} not found'.format(path))
|
||||
return
|
||||
if "Error:" in str(path):
|
||||
return
|
||||
try:
|
||||
@ -184,10 +263,7 @@ def ship(path):
|
||||
if response.status_code >= 300:
|
||||
return logger.exception('Upload failed with status {}, {}'.format(response.status_code,
|
||||
response.text))
|
||||
run_now = now()
|
||||
state = TowerAnalyticsState.get_solo()
|
||||
state.last_run = run_now
|
||||
state.save()
|
||||
finally:
|
||||
# cleanup tar.gz
|
||||
os.remove(path)
|
||||
if os.path.exists(path):
|
||||
os.remove(path)
|
||||
|
||||
179
awx/main/conf.py
179
awx/main/conf.py
@ -2,7 +2,6 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from distutils.version import LooseVersion as Version
|
||||
|
||||
# Django
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
@ -149,7 +148,7 @@ register(
|
||||
default='https://example.com',
|
||||
schemes=('http', 'https'),
|
||||
allow_plain_hostname=True, # Allow hostname only without TLD.
|
||||
label=_('Automation Analytics upload URL.'),
|
||||
label=_('Automation Analytics upload URL'),
|
||||
help_text=_('This setting is used to to configure data collection for the Automation Analytics dashboard'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
@ -254,6 +253,7 @@ register(
|
||||
help_text=_('The number of seconds to sleep between status checks for jobs running on isolated instances.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
@ -265,6 +265,7 @@ register(
|
||||
'This includes the time needed to copy source control files (playbooks) to the isolated instance.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
@ -277,6 +278,7 @@ register(
|
||||
'Value should be substantially greater than expected network latency.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
@ -436,93 +438,12 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'PRIMARY_GALAXY_URL',
|
||||
field_class=fields.URLField,
|
||||
required=False,
|
||||
allow_blank=True,
|
||||
label=_('Primary Galaxy Server URL'),
|
||||
help_text=_(
|
||||
'For organizations that run their own Galaxy service, this gives the option to specify a '
|
||||
'host as the primary galaxy server. Requirements will be downloaded from the primary if the '
|
||||
'specific role or collection is available there. If the content is not avilable in the primary, '
|
||||
'or if this field is left blank, it will default to galaxy.ansible.com.'
|
||||
),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs'
|
||||
)
|
||||
|
||||
register(
|
||||
'PRIMARY_GALAXY_USERNAME',
|
||||
field_class=fields.CharField,
|
||||
required=False,
|
||||
allow_blank=True,
|
||||
label=_('Primary Galaxy Server Username'),
|
||||
help_text=_('(This setting is deprecated and will be removed in a future release) '
|
||||
'For using a galaxy server at higher precedence than the public Ansible Galaxy. '
|
||||
'The username to use for basic authentication against the Galaxy instance, '
|
||||
'this is mutually exclusive with PRIMARY_GALAXY_TOKEN.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs'
|
||||
)
|
||||
|
||||
register(
|
||||
'PRIMARY_GALAXY_PASSWORD',
|
||||
field_class=fields.CharField,
|
||||
encrypted=True,
|
||||
required=False,
|
||||
allow_blank=True,
|
||||
label=_('Primary Galaxy Server Password'),
|
||||
help_text=_('(This setting is deprecated and will be removed in a future release) '
|
||||
'For using a galaxy server at higher precedence than the public Ansible Galaxy. '
|
||||
'The password to use for basic authentication against the Galaxy instance, '
|
||||
'this is mutually exclusive with PRIMARY_GALAXY_TOKEN.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs'
|
||||
)
|
||||
|
||||
register(
|
||||
'PRIMARY_GALAXY_TOKEN',
|
||||
field_class=fields.CharField,
|
||||
encrypted=True,
|
||||
required=False,
|
||||
allow_blank=True,
|
||||
label=_('Primary Galaxy Server Token'),
|
||||
help_text=_('For using a galaxy server at higher precedence than the public Ansible Galaxy. '
|
||||
'The token to use for connecting with the Galaxy instance, '
|
||||
'this is mutually exclusive with corresponding username and password settings.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs'
|
||||
)
|
||||
|
||||
register(
|
||||
'PRIMARY_GALAXY_AUTH_URL',
|
||||
field_class=fields.CharField,
|
||||
required=False,
|
||||
allow_blank=True,
|
||||
label=_('Primary Galaxy Authentication URL'),
|
||||
help_text=_('For using a galaxy server at higher precedence than the public Ansible Galaxy. '
|
||||
'The token_endpoint of a Keycloak server.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs'
|
||||
)
|
||||
|
||||
register(
|
||||
'PUBLIC_GALAXY_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
default=True,
|
||||
label=_('Allow Access to Public Galaxy'),
|
||||
help_text=_('Allow or deny access to the public Ansible Galaxy during project updates.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs'
|
||||
)
|
||||
|
||||
register(
|
||||
'GALAXY_IGNORE_CERTS',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Ignore Ansible Galaxy SSL Certificate Verification'),
|
||||
help_text=_('If set to true, certificate validation will not be done when'
|
||||
help_text=_('If set to true, certificate validation will not be done when '
|
||||
'installing content from any Galaxy server.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs'
|
||||
@ -579,6 +500,7 @@ register(
|
||||
'timeout should be imposed. A timeout set on an individual job template will override this.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
@ -591,6 +513,7 @@ register(
|
||||
'timeout should be imposed. A timeout set on an individual inventory source will override this.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
@ -603,6 +526,7 @@ register(
|
||||
'timeout should be imposed. A timeout set on an individual project will override this.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
@ -617,6 +541,7 @@ register(
|
||||
'Use a value of 0 to indicate that no timeout should be imposed.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
@ -624,7 +549,7 @@ register(
|
||||
field_class=fields.IntegerField,
|
||||
allow_null=False,
|
||||
default=200,
|
||||
label=_('Maximum number of forks per job.'),
|
||||
label=_('Maximum number of forks per job'),
|
||||
help_text=_('Saving a Job Template with more than this number of forks will result in an error. '
|
||||
'When set to 0, no limit is applied.'),
|
||||
category=_('Jobs'),
|
||||
@ -754,6 +679,7 @@ register(
|
||||
'aggregator protocols.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_VERIFY_CERT',
|
||||
@ -834,7 +760,8 @@ register(
|
||||
default=14400, # every 4 hours
|
||||
min_value=1800, # every 30 minutes
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
category_slug='system',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
|
||||
@ -856,84 +783,4 @@ def logging_validate(serializer, attrs):
|
||||
return attrs
|
||||
|
||||
|
||||
def galaxy_validate(serializer, attrs):
|
||||
"""Ansible Galaxy config options have mutual exclusivity rules, these rules
|
||||
are enforced here on serializer validation so that users will not be able
|
||||
to save settings which obviously break all project updates.
|
||||
"""
|
||||
prefix = 'PRIMARY_GALAXY_'
|
||||
errors = {}
|
||||
|
||||
def _new_value(setting_name):
|
||||
if setting_name in attrs:
|
||||
return attrs[setting_name]
|
||||
elif not serializer.instance:
|
||||
return ''
|
||||
return getattr(serializer.instance, setting_name, '')
|
||||
|
||||
if not _new_value('PRIMARY_GALAXY_URL'):
|
||||
if _new_value('PUBLIC_GALAXY_ENABLED') is False:
|
||||
msg = _('A URL for Primary Galaxy must be defined before disabling public Galaxy.')
|
||||
# put error in both keys because UI has trouble with errors in toggles
|
||||
for key in ('PRIMARY_GALAXY_URL', 'PUBLIC_GALAXY_ENABLED'):
|
||||
errors.setdefault(key, [])
|
||||
errors[key].append(msg)
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
from awx.main.constants import GALAXY_SERVER_FIELDS
|
||||
if not any('{}{}'.format(prefix, subfield.upper()) in attrs for subfield in GALAXY_SERVER_FIELDS):
|
||||
return attrs
|
||||
|
||||
galaxy_data = {}
|
||||
for subfield in GALAXY_SERVER_FIELDS:
|
||||
galaxy_data[subfield] = _new_value('{}{}'.format(prefix, subfield.upper()))
|
||||
if not galaxy_data['url']:
|
||||
for k, v in galaxy_data.items():
|
||||
if v:
|
||||
setting_name = '{}{}'.format(prefix, k.upper())
|
||||
errors.setdefault(setting_name, [])
|
||||
errors[setting_name].append(_(
|
||||
'Cannot provide field if PRIMARY_GALAXY_URL is not set.'
|
||||
))
|
||||
for k in GALAXY_SERVER_FIELDS:
|
||||
if galaxy_data[k]:
|
||||
setting_name = '{}{}'.format(prefix, k.upper())
|
||||
if (not serializer.instance) or (not getattr(serializer.instance, setting_name, '')):
|
||||
# new auth is applied, so check if compatible with version
|
||||
from awx.main.utils import get_ansible_version
|
||||
current_version = get_ansible_version()
|
||||
min_version = '2.9'
|
||||
if Version(current_version) < Version(min_version):
|
||||
errors.setdefault(setting_name, [])
|
||||
errors[setting_name].append(_(
|
||||
'Galaxy server settings are not available until Ansible {min_version}, '
|
||||
'you are running {current_version}.'
|
||||
).format(min_version=min_version, current_version=current_version))
|
||||
if (galaxy_data['password'] or galaxy_data['username']) and (galaxy_data['token'] or galaxy_data['auth_url']):
|
||||
for k in ('password', 'username', 'token', 'auth_url'):
|
||||
setting_name = '{}{}'.format(prefix, k.upper())
|
||||
if setting_name in attrs:
|
||||
errors.setdefault(setting_name, [])
|
||||
errors[setting_name].append(_(
|
||||
'Setting Galaxy token and authentication URL is mutually exclusive with username and password.'
|
||||
))
|
||||
if bool(galaxy_data['username']) != bool(galaxy_data['password']):
|
||||
msg = _('If authenticating via username and password, both must be provided.')
|
||||
for k in ('username', 'password'):
|
||||
setting_name = '{}{}'.format(prefix, k.upper())
|
||||
errors.setdefault(setting_name, [])
|
||||
errors[setting_name].append(msg)
|
||||
if bool(galaxy_data['token']) != bool(galaxy_data['auth_url']):
|
||||
msg = _('If authenticating via token, both token and authentication URL must be provided.')
|
||||
for k in ('token', 'auth_url'):
|
||||
setting_name = '{}{}'.format(prefix, k.upper())
|
||||
errors.setdefault(setting_name, [])
|
||||
errors[setting_name].append(msg)
|
||||
|
||||
if errors:
|
||||
raise serializers.ValidationError(errors)
|
||||
return attrs
|
||||
|
||||
|
||||
register_validate('logging', logging_validate)
|
||||
register_validate('jobs', galaxy_validate)
|
||||
|
||||
@ -50,7 +50,3 @@ LOGGER_BLOCKLIST = (
|
||||
# loggers that may be called getting logging settings
|
||||
'awx.conf'
|
||||
)
|
||||
|
||||
# these correspond to both AWX and Ansible settings to keep naming consistent
|
||||
# for instance, settings.PRIMARY_GALAXY_AUTH_URL vs env var ANSIBLE_GALAXY_SERVER_FOO_AUTH_URL
|
||||
GALAXY_SERVER_FIELDS = ('url', 'username', 'password', 'token', 'auth_url')
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
import collections
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
@ -14,40 +12,12 @@ from django.contrib.auth.models import User
|
||||
from channels.generic.websocket import AsyncJsonWebsocketConsumer
|
||||
from channels.layers import get_channel_layer
|
||||
from channels.db import database_sync_to_async
|
||||
from channels_redis.core import RedisChannelLayer
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.consumers')
|
||||
XRF_KEY = '_auth_user_xrf'
|
||||
|
||||
|
||||
class BoundedQueue(asyncio.Queue):
|
||||
|
||||
def put_nowait(self, item):
|
||||
if self.full():
|
||||
# dispose the oldest item
|
||||
# if we actually get into this code block, it likely means that
|
||||
# this specific consumer has stopped reading
|
||||
# unfortunately, channels_redis will just happily continue to
|
||||
# queue messages specific to their channel until the heat death
|
||||
# of the sun: https://github.com/django/channels_redis/issues/212
|
||||
# this isn't a huge deal for browser clients that disconnect,
|
||||
# but it *does* cause a problem for our global broadcast topic
|
||||
# that's used to broadcast messages to peers in a cluster
|
||||
# if we get into this code block, it's better to drop messages
|
||||
# than to continue to malloc() forever
|
||||
self.get_nowait()
|
||||
return super(BoundedQueue, self).put_nowait(item)
|
||||
|
||||
|
||||
class ExpiringRedisChannelLayer(RedisChannelLayer):
|
||||
def __init__(self, *args, **kw):
|
||||
super(ExpiringRedisChannelLayer, self).__init__(*args, **kw)
|
||||
self.receive_buffer = collections.defaultdict(
|
||||
functools.partial(BoundedQueue, self.capacity)
|
||||
)
|
||||
|
||||
|
||||
class WebsocketSecretAuthHelper:
|
||||
"""
|
||||
Middlewareish for websockets to verify node websocket broadcast interconnect.
|
||||
|
||||
@ -40,6 +40,13 @@ base_inputs = {
|
||||
'multiline': False,
|
||||
'secret': True,
|
||||
'help_text': _('The Secret ID for AppRole Authentication')
|
||||
}, {
|
||||
'id': 'default_auth_path',
|
||||
'label': _('Path to Approle Auth'),
|
||||
'type': 'string',
|
||||
'multiline': False,
|
||||
'default': 'approle',
|
||||
'help_text': _('The AppRole Authentication path to use if one isn\'t provided in the metadata when linking to an input field. Defaults to \'approle\'')
|
||||
}
|
||||
],
|
||||
'metadata': [{
|
||||
@ -47,10 +54,11 @@ base_inputs = {
|
||||
'label': _('Path to Secret'),
|
||||
'type': 'string',
|
||||
'help_text': _('The path to the secret stored in the secret backend e.g, /some/secret/')
|
||||
},{
|
||||
}, {
|
||||
'id': 'auth_path',
|
||||
'label': _('Path to Auth'),
|
||||
'type': 'string',
|
||||
'multiline': False,
|
||||
'help_text': _('The path where the Authentication method is mounted e.g, approle')
|
||||
}],
|
||||
'required': ['url', 'secret_path'],
|
||||
@ -118,7 +126,9 @@ def handle_auth(**kwargs):
|
||||
def approle_auth(**kwargs):
|
||||
role_id = kwargs['role_id']
|
||||
secret_id = kwargs['secret_id']
|
||||
auth_path = kwargs.get('auth_path') or 'approle'
|
||||
# we first try to use the 'auth_path' from the metadata
|
||||
# if not found we try to fetch the 'default_auth_path' from inputs
|
||||
auth_path = kwargs.get('auth_path') or kwargs['default_auth_path']
|
||||
|
||||
url = urljoin(kwargs['url'], 'v1')
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
@ -2,6 +2,9 @@ import logging
|
||||
import uuid
|
||||
import json
|
||||
|
||||
from django.conf import settings
|
||||
import redis
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
|
||||
from . import pg_bus_conn
|
||||
@ -21,7 +24,15 @@ class Control(object):
|
||||
self.queuename = host or get_local_queuename()
|
||||
|
||||
def status(self, *args, **kwargs):
|
||||
return self.control_with_reply('status', *args, **kwargs)
|
||||
r = redis.Redis.from_url(settings.BROKER_URL)
|
||||
if self.service == 'dispatcher':
|
||||
stats = r.get(f'awx_{self.service}_statistics') or b''
|
||||
return stats.decode('utf-8')
|
||||
else:
|
||||
workers = []
|
||||
for key in r.keys('awx_callback_receiver_statistics_*'):
|
||||
workers.append(r.get(key).decode('utf-8'))
|
||||
return '\n'.join(workers)
|
||||
|
||||
def running(self, *args, **kwargs):
|
||||
return self.control_with_reply('running', *args, **kwargs)
|
||||
|
||||
@ -5,6 +5,7 @@ import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from uuid import uuid4
|
||||
|
||||
import collections
|
||||
@ -27,6 +28,12 @@ else:
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
class NoOpResultQueue(object):
|
||||
|
||||
def put(self, item):
|
||||
pass
|
||||
|
||||
|
||||
class PoolWorker(object):
|
||||
'''
|
||||
Used to track a worker child process and its pending and finished messages.
|
||||
@ -56,11 +63,13 @@ class PoolWorker(object):
|
||||
It is "idle" when self.managed_tasks is empty.
|
||||
'''
|
||||
|
||||
def __init__(self, queue_size, target, args):
|
||||
track_managed_tasks = False
|
||||
|
||||
def __init__(self, queue_size, target, args, **kwargs):
|
||||
self.messages_sent = 0
|
||||
self.messages_finished = 0
|
||||
self.managed_tasks = collections.OrderedDict()
|
||||
self.finished = MPQueue(queue_size)
|
||||
self.finished = MPQueue(queue_size) if self.track_managed_tasks else NoOpResultQueue()
|
||||
self.queue = MPQueue(queue_size)
|
||||
self.process = Process(target=target, args=(self.queue, self.finished) + args)
|
||||
self.process.daemon = True
|
||||
@ -74,7 +83,8 @@ class PoolWorker(object):
|
||||
if not body.get('uuid'):
|
||||
body['uuid'] = str(uuid4())
|
||||
uuid = body['uuid']
|
||||
self.managed_tasks[uuid] = body
|
||||
if self.track_managed_tasks:
|
||||
self.managed_tasks[uuid] = body
|
||||
self.queue.put(body, block=True, timeout=5)
|
||||
self.messages_sent += 1
|
||||
self.calculate_managed_tasks()
|
||||
@ -111,6 +121,8 @@ class PoolWorker(object):
|
||||
return str(self.process.exitcode)
|
||||
|
||||
def calculate_managed_tasks(self):
|
||||
if not self.track_managed_tasks:
|
||||
return
|
||||
# look to see if any tasks were finished
|
||||
finished = []
|
||||
for _ in range(self.finished.qsize()):
|
||||
@ -135,6 +147,8 @@ class PoolWorker(object):
|
||||
|
||||
@property
|
||||
def current_task(self):
|
||||
if not self.track_managed_tasks:
|
||||
return None
|
||||
self.calculate_managed_tasks()
|
||||
# the task at [0] is the one that's running right now (or is about to
|
||||
# be running)
|
||||
@ -145,6 +159,8 @@ class PoolWorker(object):
|
||||
|
||||
@property
|
||||
def orphaned_tasks(self):
|
||||
if not self.track_managed_tasks:
|
||||
return []
|
||||
orphaned = []
|
||||
if not self.alive:
|
||||
# if this process had a running task that never finished,
|
||||
@ -179,6 +195,11 @@ class PoolWorker(object):
|
||||
return not self.busy
|
||||
|
||||
|
||||
class StatefulPoolWorker(PoolWorker):
|
||||
|
||||
track_managed_tasks = True
|
||||
|
||||
|
||||
class WorkerPool(object):
|
||||
'''
|
||||
Creates a pool of forked PoolWorkers.
|
||||
@ -200,6 +221,7 @@ class WorkerPool(object):
|
||||
)
|
||||
'''
|
||||
|
||||
pool_cls = PoolWorker
|
||||
debug_meta = ''
|
||||
|
||||
def __init__(self, min_workers=None, queue_size=None):
|
||||
@ -225,7 +247,7 @@ class WorkerPool(object):
|
||||
# for the DB and cache connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
worker = PoolWorker(self.queue_size, self.target, (idx,) + self.target_args)
|
||||
worker = self.pool_cls(self.queue_size, self.target, (idx,) + self.target_args)
|
||||
self.workers.append(worker)
|
||||
try:
|
||||
worker.start()
|
||||
@ -236,13 +258,13 @@ class WorkerPool(object):
|
||||
return idx, worker
|
||||
|
||||
def debug(self, *args, **kwargs):
|
||||
self.cleanup()
|
||||
tmpl = Template(
|
||||
'Recorded at: {{ dt }} \n'
|
||||
'{{ pool.name }}[pid:{{ pool.pid }}] workers total={{ workers|length }} {{ meta }} \n'
|
||||
'{% for w in workers %}'
|
||||
'. worker[pid:{{ w.pid }}]{% if not w.alive %} GONE exit={{ w.exitcode }}{% endif %}'
|
||||
' sent={{ w.messages_sent }}'
|
||||
' finished={{ w.messages_finished }}'
|
||||
'{% if w.messages_finished %} finished={{ w.messages_finished }}{% endif %}'
|
||||
' qsize={{ w.managed_tasks|length }}'
|
||||
' rss={{ w.mb }}MB'
|
||||
'{% for task in w.managed_tasks.values() %}'
|
||||
@ -260,7 +282,11 @@ class WorkerPool(object):
|
||||
'\n'
|
||||
'{% endfor %}'
|
||||
)
|
||||
return tmpl.render(pool=self, workers=self.workers, meta=self.debug_meta)
|
||||
now = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
return tmpl.render(
|
||||
pool=self, workers=self.workers, meta=self.debug_meta,
|
||||
dt=now
|
||||
)
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
queue_order = sorted(range(len(self.workers)), key=lambda x: -1 if x==preferred_queue else x)
|
||||
@ -293,6 +319,8 @@ class AutoscalePool(WorkerPool):
|
||||
down based on demand
|
||||
'''
|
||||
|
||||
pool_cls = StatefulPoolWorker
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.max_workers = kwargs.pop('max_workers', None)
|
||||
super(AutoscalePool, self).__init__(*args, **kwargs)
|
||||
@ -309,6 +337,10 @@ class AutoscalePool(WorkerPool):
|
||||
# max workers can't be less than min_workers
|
||||
self.max_workers = max(self.min_workers, self.max_workers)
|
||||
|
||||
def debug(self, *args, **kwargs):
|
||||
self.cleanup()
|
||||
return super(AutoscalePool, self).debug(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def should_grow(self):
|
||||
if len(self.workers) < self.min_workers:
|
||||
|
||||
@ -43,6 +43,9 @@ class WorkerSignalHandler:
|
||||
|
||||
|
||||
class AWXConsumerBase(object):
|
||||
|
||||
last_stats = time.time()
|
||||
|
||||
def __init__(self, name, worker, queues=[], pool=None):
|
||||
self.should_stop = False
|
||||
|
||||
@ -54,6 +57,7 @@ class AWXConsumerBase(object):
|
||||
if pool is None:
|
||||
self.pool = WorkerPool()
|
||||
self.pool.init_workers(self.worker.work_loop)
|
||||
self.redis = redis.Redis.from_url(settings.BROKER_URL)
|
||||
|
||||
@property
|
||||
def listening_on(self):
|
||||
@ -99,6 +103,16 @@ class AWXConsumerBase(object):
|
||||
queue = 0
|
||||
self.pool.write(queue, body)
|
||||
self.total_messages += 1
|
||||
self.record_statistics()
|
||||
|
||||
def record_statistics(self):
|
||||
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
||||
try:
|
||||
self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
|
||||
self.last_stats = time.time()
|
||||
except Exception:
|
||||
logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
|
||||
self.last_stats = time.time()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
signal.signal(signal.SIGINT, self.stop)
|
||||
@ -118,23 +132,9 @@ class AWXConsumerRedis(AWXConsumerBase):
|
||||
super(AWXConsumerRedis, self).run(*args, **kwargs)
|
||||
self.worker.on_start()
|
||||
|
||||
time_to_sleep = 1
|
||||
while True:
|
||||
queue = redis.Redis.from_url(settings.BROKER_URL)
|
||||
while True:
|
||||
try:
|
||||
res = queue.blpop(self.queues)
|
||||
time_to_sleep = 1
|
||||
res = json.loads(res[1])
|
||||
self.process_task(res)
|
||||
except redis.exceptions.RedisError:
|
||||
time_to_sleep = min(time_to_sleep * 2, 30)
|
||||
logger.exception(f"encountered an error communicating with redis. Reconnect attempt in {time_to_sleep} seconds")
|
||||
time.sleep(time_to_sleep)
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
logger.exception("failed to decode JSON message from redis")
|
||||
if self.should_stop:
|
||||
return
|
||||
logger.debug(f'{os.getpid()} is alive')
|
||||
time.sleep(60)
|
||||
|
||||
|
||||
class AWXConsumerPG(AWXConsumerBase):
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import cProfile
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pstats
|
||||
@ -6,12 +7,15 @@ import signal
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError, IntegrityError
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
|
||||
import psutil
|
||||
|
||||
import redis
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
|
||||
@ -24,10 +28,6 @@ from .base import BaseWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
# the number of seconds to buffer events in memory before flushing
|
||||
# using JobEvent.objects.bulk_create()
|
||||
BUFFER_SECONDS = .1
|
||||
|
||||
|
||||
class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
@ -39,21 +39,57 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
|
||||
MAX_RETRIES = 2
|
||||
last_stats = time.time()
|
||||
total = 0
|
||||
last_event = ''
|
||||
prof = None
|
||||
|
||||
def __init__(self):
|
||||
self.buff = {}
|
||||
self.pid = os.getpid()
|
||||
self.redis = redis.Redis.from_url(settings.BROKER_URL)
|
||||
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
|
||||
self.redis.delete(key)
|
||||
|
||||
def read(self, queue):
|
||||
try:
|
||||
return queue.get(block=True, timeout=BUFFER_SECONDS)
|
||||
except QueueEmpty:
|
||||
return {'event': 'FLUSH'}
|
||||
res = self.redis.blpop(settings.CALLBACK_QUEUE, timeout=settings.JOB_EVENT_BUFFER_SECONDS)
|
||||
if res is None:
|
||||
return {'event': 'FLUSH'}
|
||||
self.total += 1
|
||||
return json.loads(res[1])
|
||||
except redis.exceptions.RedisError:
|
||||
logger.exception("encountered an error communicating with redis")
|
||||
time.sleep(1)
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
logger.exception("failed to decode JSON message from redis")
|
||||
finally:
|
||||
self.record_statistics()
|
||||
return {'event': 'FLUSH'}
|
||||
|
||||
def record_statistics(self):
|
||||
# buffer stat recording to once per (by default) 5s
|
||||
if time.time() - self.last_stats > settings.JOB_EVENT_STATISTICS_INTERVAL:
|
||||
try:
|
||||
self.redis.set(f'awx_callback_receiver_statistics_{self.pid}', self.debug())
|
||||
self.last_stats = time.time()
|
||||
except Exception:
|
||||
logger.exception("encountered an error communicating with redis")
|
||||
self.last_stats = time.time()
|
||||
|
||||
def debug(self):
|
||||
return f'. worker[pid:{self.pid}] sent={self.total} rss={self.mb}MB {self.last_event}'
|
||||
|
||||
@property
|
||||
def mb(self):
|
||||
return '{:0.3f}'.format(
|
||||
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
|
||||
)
|
||||
|
||||
def toggle_profiling(self, *args):
|
||||
if self.prof:
|
||||
self.prof.disable()
|
||||
filename = f'callback-{os.getpid()}.pstats'
|
||||
filename = f'callback-{self.pid}.pstats'
|
||||
filepath = os.path.join(tempfile.gettempdir(), filename)
|
||||
with open(filepath, 'w') as f:
|
||||
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
|
||||
@ -84,20 +120,12 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
e.modified = now
|
||||
try:
|
||||
cls.objects.bulk_create(events)
|
||||
except Exception as exc:
|
||||
except Exception:
|
||||
# if an exception occurs, we should re-attempt to save the
|
||||
# events one-by-one, because something in the list is
|
||||
# broken/stale (e.g., an IntegrityError on a specific event)
|
||||
# broken/stale
|
||||
for e in events:
|
||||
try:
|
||||
if (
|
||||
isinstance(exc, IntegrityError) and
|
||||
getattr(e, 'host_id', '')
|
||||
):
|
||||
# this is one potential IntegrityError we can
|
||||
# work around - if the host disappears before
|
||||
# the event can be processed
|
||||
e.host_id = None
|
||||
e.save()
|
||||
except Exception:
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
@ -108,6 +136,8 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
def perform_work(self, body):
|
||||
try:
|
||||
flush = body.get('event') == 'FLUSH'
|
||||
if flush:
|
||||
self.last_event = ''
|
||||
if not flush:
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
@ -123,6 +153,8 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
job_identifier = body[key]
|
||||
break
|
||||
|
||||
self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get("event", "")} {body.get("uuid", "")})' # noqa
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
final_counter = body.get('final_counter', 0)
|
||||
|
||||
@ -42,6 +42,16 @@ class Command(BaseCommand):
|
||||
},
|
||||
created_by=superuser)
|
||||
c.admin_role.members.add(superuser)
|
||||
public_galaxy_credential = Credential(
|
||||
name='Ansible Galaxy',
|
||||
managed_by_tower=True,
|
||||
credential_type=CredentialType.objects.get(kind='galaxy'),
|
||||
inputs = {
|
||||
'url': 'https://galaxy.ansible.com/'
|
||||
}
|
||||
)
|
||||
public_galaxy_credential.save()
|
||||
o.galaxy_credentials.add(public_galaxy_credential)
|
||||
i = Inventory.objects.create(name='Demo Inventory',
|
||||
organization=o,
|
||||
created_by=superuser)
|
||||
|
||||
@ -1,6 +1,9 @@
|
||||
import logging
|
||||
|
||||
from awx.main.analytics import gather, ship
|
||||
from dateutil import parser
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.utils.timezone import now
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@ -15,6 +18,10 @@ class Command(BaseCommand):
|
||||
help='Gather analytics without shipping. Works even if analytics are disabled in settings.')
|
||||
parser.add_argument('--ship', dest='ship', action='store_true',
|
||||
help='Enable to ship metrics to the Red Hat Cloud')
|
||||
parser.add_argument('--since', dest='since', action='store',
|
||||
help='Start date for collection')
|
||||
parser.add_argument('--until', dest='until', action='store',
|
||||
help='End date for collection')
|
||||
|
||||
def init_logging(self):
|
||||
self.logger = logging.getLogger('awx.main.analytics')
|
||||
@ -28,11 +35,28 @@ class Command(BaseCommand):
|
||||
self.init_logging()
|
||||
opt_ship = options.get('ship')
|
||||
opt_dry_run = options.get('dry-run')
|
||||
opt_since = options.get('since') or None
|
||||
opt_until = options.get('until') or None
|
||||
|
||||
if opt_since:
|
||||
since = parser.parse(opt_since)
|
||||
else:
|
||||
since = None
|
||||
if opt_until:
|
||||
until = parser.parse(opt_until)
|
||||
else:
|
||||
until = now()
|
||||
|
||||
if opt_ship and opt_dry_run:
|
||||
self.logger.error('Both --ship and --dry-run cannot be processed at the same time.')
|
||||
return
|
||||
tgz = gather(collection_type='manual' if not opt_dry_run else 'dry-run')
|
||||
if tgz:
|
||||
self.logger.debug(tgz)
|
||||
tgzfiles = gather(collection_type='manual' if not opt_dry_run else 'dry-run', since = since, until = until)
|
||||
if tgzfiles:
|
||||
for tgz in tgzfiles:
|
||||
self.logger.info(tgz)
|
||||
else:
|
||||
self.logger.error('No analytics collected')
|
||||
if opt_ship:
|
||||
ship(tgz)
|
||||
if tgzfiles:
|
||||
for tgz in tgzfiles:
|
||||
ship(tgz)
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker
|
||||
|
||||
|
||||
@ -15,7 +16,14 @@ class Command(BaseCommand):
|
||||
'''
|
||||
help = 'Launch the job callback receiver'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--status', dest='status', action='store_true',
|
||||
help='print the internal state of any running dispatchers')
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
print(Control('callback_receiver').status())
|
||||
return
|
||||
consumer = None
|
||||
try:
|
||||
consumer = AWXConsumerRedis(
|
||||
|
||||
51
awx/main/migrations/0120_galaxy_credentials.py
Normal file
51
awx/main/migrations/0120_galaxy_credentials.py
Normal file
@ -0,0 +1,51 @@
|
||||
# Generated by Django 2.2.11 on 2020-08-04 15:19
|
||||
|
||||
import logging
|
||||
|
||||
import awx.main.fields
|
||||
from awx.main.utils.encryption import encrypt_field, decrypt_field
|
||||
|
||||
from django.db import migrations, models
|
||||
from django.utils.timezone import now
|
||||
import django.db.models.deletion
|
||||
|
||||
from awx.main.migrations import _galaxy as galaxy
|
||||
from awx.main.models import CredentialType as ModernCredentialType
|
||||
from awx.main.utils.common import set_current_apps
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0119_inventory_plugins'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='credentialtype',
|
||||
name='kind',
|
||||
field=models.CharField(choices=[('ssh', 'Machine'), ('vault', 'Vault'), ('net', 'Network'), ('scm', 'Source Control'), ('cloud', 'Cloud'), ('token', 'Personal Access Token'), ('insights', 'Insights'), ('external', 'External'), ('kubernetes', 'Kubernetes'), ('galaxy', 'Galaxy/Automation Hub')], max_length=32),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='OrganizationGalaxyCredentialMembership',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||
('credential', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Credential')),
|
||||
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Organization')),
|
||||
],
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='galaxy_credentials',
|
||||
field=awx.main.fields.OrderedManyToManyField(blank=True, related_name='organization_galaxy_credentials', through='main.OrganizationGalaxyCredentialMembership', to='main.Credential'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='credential',
|
||||
name='managed_by_tower',
|
||||
field=models.BooleanField(default=False, editable=False),
|
||||
),
|
||||
migrations.RunPython(galaxy.migrate_galaxy_settings)
|
||||
]
|
||||
16
awx/main/migrations/0121_delete_toweranalyticsstate.py
Normal file
16
awx/main/migrations/0121_delete_toweranalyticsstate.py
Normal file
@ -0,0 +1,16 @@
|
||||
# Generated by Django 2.2.11 on 2020-07-24 17:41
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0120_galaxy_credentials'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.DeleteModel(
|
||||
name='TowerAnalyticsState',
|
||||
),
|
||||
]
|
||||
125
awx/main/migrations/_galaxy.py
Normal file
125
awx/main/migrations/_galaxy.py
Normal file
@ -0,0 +1,125 @@
|
||||
# Generated by Django 2.2.11 on 2020-08-04 15:19
|
||||
|
||||
import logging
|
||||
|
||||
from awx.main.utils.encryption import encrypt_field, decrypt_field
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.main.models import CredentialType as ModernCredentialType
|
||||
from awx.main.utils.common import set_current_apps
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
def migrate_galaxy_settings(apps, schema_editor):
|
||||
Organization = apps.get_model('main', 'Organization')
|
||||
if Organization.objects.count() == 0:
|
||||
# nothing to migrate
|
||||
return
|
||||
set_current_apps(apps)
|
||||
ModernCredentialType.setup_tower_managed_defaults()
|
||||
CredentialType = apps.get_model('main', 'CredentialType')
|
||||
Credential = apps.get_model('main', 'Credential')
|
||||
Setting = apps.get_model('conf', 'Setting')
|
||||
|
||||
galaxy_type = CredentialType.objects.get(kind='galaxy')
|
||||
private_galaxy_url = Setting.objects.filter(key='PRIMARY_GALAXY_URL').first()
|
||||
|
||||
# by default, prior versions of AWX/Tower automatically pulled content
|
||||
# from galaxy.ansible.com
|
||||
public_galaxy_enabled = True
|
||||
public_galaxy_setting = Setting.objects.filter(key='PUBLIC_GALAXY_ENABLED').first()
|
||||
if public_galaxy_setting and public_galaxy_setting.value is False:
|
||||
# ...UNLESS this behavior was explicitly disabled via this setting
|
||||
public_galaxy_enabled = False
|
||||
|
||||
public_galaxy_credential = Credential(
|
||||
created=now(),
|
||||
modified=now(),
|
||||
name='Ansible Galaxy',
|
||||
managed_by_tower=True,
|
||||
credential_type=galaxy_type,
|
||||
inputs = {
|
||||
'url': 'https://galaxy.ansible.com/'
|
||||
}
|
||||
)
|
||||
public_galaxy_credential.save()
|
||||
|
||||
for org in Organization.objects.all():
|
||||
if private_galaxy_url and private_galaxy_url.value:
|
||||
# If a setting exists for a private Galaxy URL, make a credential for it
|
||||
username = Setting.objects.filter(key='PRIMARY_GALAXY_USERNAME').first()
|
||||
password = Setting.objects.filter(key='PRIMARY_GALAXY_PASSWORD').first()
|
||||
if (username and username.value) or (password and password.value):
|
||||
logger.error(
|
||||
f'Specifying HTTP basic auth for the Ansible Galaxy API '
|
||||
f'({private_galaxy_url.value}) is no longer supported. '
|
||||
'Please provide an API token instead after your upgrade '
|
||||
'has completed',
|
||||
)
|
||||
inputs = {
|
||||
'url': private_galaxy_url.value
|
||||
}
|
||||
token = Setting.objects.filter(key='PRIMARY_GALAXY_TOKEN').first()
|
||||
if token and token.value:
|
||||
inputs['token'] = decrypt_field(token, 'value')
|
||||
auth_url = Setting.objects.filter(key='PRIMARY_GALAXY_AUTH_URL').first()
|
||||
if auth_url and auth_url.value:
|
||||
inputs['auth_url'] = auth_url.value
|
||||
name = f'Private Galaxy ({private_galaxy_url.value})'
|
||||
if 'cloud.redhat.com' in inputs['url']:
|
||||
name = f'Ansible Automation Hub ({private_galaxy_url.value})'
|
||||
cred = Credential(
|
||||
created=now(),
|
||||
modified=now(),
|
||||
name=name,
|
||||
organization=org,
|
||||
credential_type=galaxy_type,
|
||||
inputs=inputs
|
||||
)
|
||||
cred.save()
|
||||
if token and token.value:
|
||||
# encrypt based on the primary key from the prior save
|
||||
cred.inputs['token'] = encrypt_field(cred, 'token')
|
||||
cred.save()
|
||||
org.galaxy_credentials.add(cred)
|
||||
|
||||
fallback_servers = getattr(settings, 'FALLBACK_GALAXY_SERVERS', [])
|
||||
for fallback in fallback_servers:
|
||||
url = fallback.get('url', None)
|
||||
auth_url = fallback.get('auth_url', None)
|
||||
username = fallback.get('username', None)
|
||||
password = fallback.get('password', None)
|
||||
token = fallback.get('token', None)
|
||||
if username or password:
|
||||
logger.error(
|
||||
f'Specifying HTTP basic auth for the Ansible Galaxy API '
|
||||
f'({url}) is no longer supported. '
|
||||
'Please provide an API token instead after your upgrade '
|
||||
'has completed',
|
||||
)
|
||||
inputs = {'url': url}
|
||||
if token:
|
||||
inputs['token'] = token
|
||||
if auth_url:
|
||||
inputs['auth_url'] = auth_url
|
||||
cred = Credential(
|
||||
created=now(),
|
||||
modified=now(),
|
||||
name=f'Ansible Galaxy ({url})',
|
||||
organization=org,
|
||||
credential_type=galaxy_type,
|
||||
inputs=inputs
|
||||
)
|
||||
cred.save()
|
||||
if token:
|
||||
# encrypt based on the primary key from the prior save
|
||||
cred.inputs['token'] = encrypt_field(cred, 'token')
|
||||
cred.save()
|
||||
org.galaxy_credentials.add(cred)
|
||||
|
||||
if public_galaxy_enabled:
|
||||
# If public Galaxy was enabled, associate it to the org
|
||||
org.galaxy_credentials.add(public_galaxy_credential)
|
||||
@ -96,6 +96,10 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
help_text=_('Specify the type of credential you want to create. Refer '
|
||||
'to the Ansible Tower documentation for details on each type.')
|
||||
)
|
||||
managed_by_tower = models.BooleanField(
|
||||
default=False,
|
||||
editable=False
|
||||
)
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
null=True,
|
||||
@ -331,6 +335,7 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
('insights', _('Insights')),
|
||||
('external', _('External')),
|
||||
('kubernetes', _('Kubernetes')),
|
||||
('galaxy', _('Galaxy/Automation Hub')),
|
||||
)
|
||||
|
||||
kind = models.CharField(
|
||||
@ -1173,6 +1178,38 @@ ManagedCredentialType(
|
||||
)
|
||||
|
||||
|
||||
ManagedCredentialType(
|
||||
namespace='galaxy_api_token',
|
||||
kind='galaxy',
|
||||
name=ugettext_noop('Ansible Galaxy/Automation Hub API Token'),
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'url',
|
||||
'label': ugettext_noop('Galaxy Server URL'),
|
||||
'type': 'string',
|
||||
'help_text': ugettext_noop('The URL of the Galaxy instance to connect to.')
|
||||
},{
|
||||
'id': 'auth_url',
|
||||
'label': ugettext_noop('Auth Server URL'),
|
||||
'type': 'string',
|
||||
'help_text': ugettext_noop(
|
||||
'The URL of a Keycloak server token_endpoint, if using '
|
||||
'SSO auth.'
|
||||
)
|
||||
},{
|
||||
'id': 'token',
|
||||
'label': ugettext_noop('API Token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'help_text': ugettext_noop(
|
||||
'A token to use for authentication against the Galaxy instance.'
|
||||
)
|
||||
}],
|
||||
'required': ['url'],
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class CredentialInputSource(PrimordialModel):
|
||||
|
||||
class Meta:
|
||||
|
||||
@ -4,6 +4,8 @@ import datetime
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import models, DatabaseError, connection
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
@ -57,7 +59,18 @@ def create_host_status_counts(event_data):
|
||||
return dict(host_status_counts)
|
||||
|
||||
|
||||
MINIMAL_EVENTS = set([
|
||||
'playbook_on_play_start', 'playbook_on_task_start',
|
||||
'playbook_on_stats', 'EOF'
|
||||
])
|
||||
|
||||
|
||||
def emit_event_detail(event):
|
||||
if (
|
||||
settings.UI_LIVE_UPDATES_ENABLED is False and
|
||||
event.event not in MINIMAL_EVENTS
|
||||
):
|
||||
return
|
||||
cls = event.__class__
|
||||
relation = {
|
||||
JobEvent: 'job_id',
|
||||
@ -337,41 +350,47 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
pass
|
||||
|
||||
if isinstance(self, JobEvent):
|
||||
hostnames = self._hostnames()
|
||||
self._update_host_summary_from_stats(set(hostnames))
|
||||
if self.job.inventory:
|
||||
try:
|
||||
self.job.inventory.update_computed_fields()
|
||||
except DatabaseError:
|
||||
logger.exception('Computed fields database error saving event {}'.format(self.pk))
|
||||
try:
|
||||
job = self.job
|
||||
except ObjectDoesNotExist:
|
||||
job = None
|
||||
if job:
|
||||
hostnames = self._hostnames()
|
||||
self._update_host_summary_from_stats(set(hostnames))
|
||||
if job.inventory:
|
||||
try:
|
||||
job.inventory.update_computed_fields()
|
||||
except DatabaseError:
|
||||
logger.exception('Computed fields database error saving event {}'.format(self.pk))
|
||||
|
||||
# find parent links and progagate changed=T and failed=T
|
||||
changed = self.job.job_events.filter(changed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct() # noqa
|
||||
failed = self.job.job_events.filter(failed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct() # noqa
|
||||
# find parent links and progagate changed=T and failed=T
|
||||
changed = job.job_events.filter(changed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct() # noqa
|
||||
failed = job.job_events.filter(failed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct() # noqa
|
||||
|
||||
JobEvent.objects.filter(
|
||||
job_id=self.job_id, uuid__in=changed
|
||||
).update(changed=True)
|
||||
JobEvent.objects.filter(
|
||||
job_id=self.job_id, uuid__in=failed
|
||||
).update(failed=True)
|
||||
JobEvent.objects.filter(
|
||||
job_id=self.job_id, uuid__in=changed
|
||||
).update(changed=True)
|
||||
JobEvent.objects.filter(
|
||||
job_id=self.job_id, uuid__in=failed
|
||||
).update(failed=True)
|
||||
|
||||
# send success/failure notifications when we've finished handling the playbook_on_stats event
|
||||
from awx.main.tasks import handle_success_and_failure_notifications # circular import
|
||||
# send success/failure notifications when we've finished handling the playbook_on_stats event
|
||||
from awx.main.tasks import handle_success_and_failure_notifications # circular import
|
||||
|
||||
def _send_notifications():
|
||||
handle_success_and_failure_notifications.apply_async([self.job.id])
|
||||
connection.on_commit(_send_notifications)
|
||||
def _send_notifications():
|
||||
handle_success_and_failure_notifications.apply_async([job.id])
|
||||
connection.on_commit(_send_notifications)
|
||||
|
||||
|
||||
for field in ('playbook', 'play', 'task', 'role'):
|
||||
value = force_text(event_data.get(field, '')).strip()
|
||||
if value != getattr(self, field):
|
||||
setattr(self, field, value)
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
if settings.LOG_AGGREGATOR_ENABLED:
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from_data(cls, **kwargs):
|
||||
@ -484,7 +503,11 @@ class JobEvent(BasePlaybookEvent):
|
||||
|
||||
def _update_host_summary_from_stats(self, hostnames):
|
||||
with ignore_inventory_computed_fields():
|
||||
if not self.job or not self.job.inventory:
|
||||
try:
|
||||
if not self.job or not self.job.inventory:
|
||||
logger.info('Event {} missing job or inventory, host summaries not updated'.format(self.pk))
|
||||
return
|
||||
except ObjectDoesNotExist:
|
||||
logger.info('Event {} missing job or inventory, host summaries not updated'.format(self.pk))
|
||||
return
|
||||
job = self.job
|
||||
@ -520,13 +543,21 @@ class JobEvent(BasePlaybookEvent):
|
||||
(summary['host_id'], summary['id'])
|
||||
for summary in JobHostSummary.objects.filter(job_id=job.id).values('id', 'host_id')
|
||||
)
|
||||
updated_hosts = set()
|
||||
for h in all_hosts:
|
||||
# if the hostname *shows up* in the playbook_on_stats event
|
||||
if h.name in hostnames:
|
||||
h.last_job_id = job.id
|
||||
updated_hosts.add(h)
|
||||
if h.id in host_mapping:
|
||||
h.last_job_host_summary_id = host_mapping[h.id]
|
||||
Host.objects.bulk_update(all_hosts, ['last_job_id', 'last_job_host_summary_id'])
|
||||
updated_hosts.add(h)
|
||||
|
||||
Host.objects.bulk_update(
|
||||
list(updated_hosts),
|
||||
['last_job_id', 'last_job_host_summary_id'],
|
||||
batch_size=100
|
||||
)
|
||||
|
||||
|
||||
@property
|
||||
|
||||
@ -12,6 +12,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now, timedelta
|
||||
|
||||
import redis
|
||||
from solo.models import SingletonModel
|
||||
|
||||
from awx import __version__ as awx_application_version
|
||||
@ -23,7 +24,7 @@ from awx.main.models.unified_jobs import UnifiedJob
|
||||
from awx.main.utils import get_cpu_capacity, get_mem_capacity, get_system_task_capacity
|
||||
from awx.main.models.mixins import RelatedJobsMixin
|
||||
|
||||
__all__ = ('Instance', 'InstanceGroup', 'TowerScheduleState', 'TowerAnalyticsState')
|
||||
__all__ = ('Instance', 'InstanceGroup', 'TowerScheduleState')
|
||||
|
||||
|
||||
class HasPolicyEditsMixin(HasEditsMixin):
|
||||
@ -152,6 +153,14 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.capacity = get_system_task_capacity(self.capacity_adjustment)
|
||||
else:
|
||||
self.capacity = 0
|
||||
|
||||
try:
|
||||
# if redis is down for some reason, that means we can't persist
|
||||
# playbook event data; we should consider this a zero capacity event
|
||||
redis.Redis.from_url(settings.BROKER_URL).ping()
|
||||
except redis.ConnectionError:
|
||||
self.capacity = 0
|
||||
|
||||
self.cpu = cpu[0]
|
||||
self.memory = mem[0]
|
||||
self.cpu_capacity = cpu[1]
|
||||
@ -287,10 +296,6 @@ class TowerScheduleState(SingletonModel):
|
||||
schedule_last_run = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
|
||||
class TowerAnalyticsState(SingletonModel):
|
||||
last_run = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
|
||||
def schedule_policy_task():
|
||||
from awx.main.tasks import apply_cluster_membership_policies
|
||||
connection.on_commit(lambda: apply_cluster_membership_policies.apply_async())
|
||||
|
||||
@ -393,7 +393,11 @@ class JobNotificationMixin(object):
|
||||
'job': job_context,
|
||||
'job_friendly_name': self.get_notification_friendly_name(),
|
||||
'url': self.get_ui_url(),
|
||||
'job_metadata': json.dumps(self.notification_data(), indent=4)
|
||||
'job_metadata': json.dumps(
|
||||
self.notification_data(),
|
||||
ensure_ascii=False,
|
||||
indent=4
|
||||
)
|
||||
}
|
||||
|
||||
def build_context(node, fields, allowed_fields):
|
||||
|
||||
@ -45,6 +45,12 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
blank=True,
|
||||
through='OrganizationInstanceGroupMembership'
|
||||
)
|
||||
galaxy_credentials = OrderedManyToManyField(
|
||||
'Credential',
|
||||
blank=True,
|
||||
through='OrganizationGalaxyCredentialMembership',
|
||||
related_name='%(class)s_galaxy_credentials'
|
||||
)
|
||||
max_hosts = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=0,
|
||||
@ -108,6 +114,23 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
return UnifiedJob.objects.non_polymorphic().filter(organization=self)
|
||||
|
||||
|
||||
class OrganizationGalaxyCredentialMembership(models.Model):
|
||||
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
on_delete=models.CASCADE
|
||||
)
|
||||
credential = models.ForeignKey(
|
||||
'Credential',
|
||||
on_delete=models.CASCADE
|
||||
)
|
||||
position = models.PositiveIntegerField(
|
||||
null=True,
|
||||
default=None,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
|
||||
class Team(CommonModelNameNotUnique, ResourceMixin):
|
||||
'''
|
||||
A team is a group of users that work on common projects.
|
||||
|
||||
@ -205,10 +205,15 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
'A valid TZID must be provided (e.g., America/New_York)'
|
||||
)
|
||||
|
||||
if fast_forward and ('MINUTELY' in rrule or 'HOURLY' in rrule):
|
||||
if (
|
||||
fast_forward and
|
||||
('MINUTELY' in rrule or 'HOURLY' in rrule) and
|
||||
'COUNT=' not in rrule
|
||||
):
|
||||
try:
|
||||
first_event = x[0]
|
||||
if first_event < now():
|
||||
# If the first event was over a week ago...
|
||||
if (now() - first_event).days > 7:
|
||||
# hourly/minutely rrules with far-past DTSTART values
|
||||
# are *really* slow to precompute
|
||||
# start *from* one week ago to speed things up drastically
|
||||
|
||||
@ -776,6 +776,10 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
self.send_approval_notification('running')
|
||||
return can_start
|
||||
|
||||
@property
|
||||
def event_processing_finished(self):
|
||||
return True
|
||||
|
||||
def send_approval_notification(self, approval_status):
|
||||
from awx.main.tasks import send_notifications # avoid circular import
|
||||
if self.workflow_job_template is None:
|
||||
|
||||
@ -1,8 +1,6 @@
|
||||
import re
|
||||
import urllib.parse as urlparse
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
REPLACE_STR = '$encrypted$'
|
||||
|
||||
|
||||
@ -12,12 +10,6 @@ class UriCleaner(object):
|
||||
|
||||
@staticmethod
|
||||
def remove_sensitive(cleartext):
|
||||
# exclude_list contains the items that will _not_ be redacted
|
||||
exclude_list = [settings.PUBLIC_GALAXY_SERVER['url']]
|
||||
if settings.PRIMARY_GALAXY_URL:
|
||||
exclude_list += [settings.PRIMARY_GALAXY_URL]
|
||||
if settings.FALLBACK_GALAXY_SERVERS:
|
||||
exclude_list += [server['url'] for server in settings.FALLBACK_GALAXY_SERVERS]
|
||||
redactedtext = cleartext
|
||||
text_index = 0
|
||||
while True:
|
||||
@ -25,10 +17,6 @@ class UriCleaner(object):
|
||||
if not match:
|
||||
break
|
||||
uri_str = match.group(1)
|
||||
# Do not redact items from the exclude list
|
||||
if any(uri_str.startswith(exclude_uri) for exclude_uri in exclude_list):
|
||||
text_index = match.start() + len(uri_str)
|
||||
continue
|
||||
try:
|
||||
# May raise a ValueError if invalid URI for one reason or another
|
||||
o = urlparse.urlsplit(uri_str)
|
||||
|
||||
@ -12,6 +12,7 @@ import random
|
||||
from django.db import transaction, connection
|
||||
from django.utils.translation import ugettext_lazy as _, gettext_noop
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.conf import settings
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.reaper import reap_job
|
||||
@ -45,6 +46,12 @@ class TaskManager():
|
||||
|
||||
def __init__(self):
|
||||
self.graph = dict()
|
||||
# start task limit indicates how many pending jobs can be started on this
|
||||
# .schedule() run. Starting jobs is expensive, and there is code in place to reap
|
||||
# the task manager after 5 minutes. At scale, the task manager can easily take more than
|
||||
# 5 minutes to start pending jobs. If this limit is reached, pending jobs
|
||||
# will no longer be started and will be started on the next task manager cycle.
|
||||
self.start_task_limit = settings.START_TASK_LIMIT
|
||||
for rampart_group in InstanceGroup.objects.prefetch_related('instances'):
|
||||
self.graph[rampart_group.name] = dict(graph=DependencyGraph(rampart_group.name),
|
||||
capacity_total=rampart_group.capacity,
|
||||
@ -189,6 +196,10 @@ class TaskManager():
|
||||
return result
|
||||
|
||||
def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
|
||||
self.start_task_limit -= 1
|
||||
if self.start_task_limit == 0:
|
||||
# schedule another run immediately after this task manager
|
||||
schedule_task_manager()
|
||||
from awx.main.tasks import handle_work_error, handle_work_success
|
||||
|
||||
dependent_tasks = dependent_tasks or []
|
||||
@ -448,6 +459,8 @@ class TaskManager():
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
for task in pending_tasks:
|
||||
if self.start_task_limit <= 0:
|
||||
break
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("{} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
|
||||
@ -51,8 +51,9 @@ import ansible_runner
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, GALAXY_SERVER_FIELDS
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.analytics import all_collectors, expensive_collectors
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.models import (
|
||||
Schedule, TowerScheduleState, Instance, InstanceGroup,
|
||||
@ -355,6 +356,26 @@ def send_notifications(notification_list, job_id=None):
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def gather_analytics():
|
||||
def _gather_and_ship(subset, since, until):
|
||||
tgzfiles = []
|
||||
try:
|
||||
tgzfiles = analytics.gather(subset=subset, since=since, until=until)
|
||||
# empty analytics without raising an exception is not an error
|
||||
if not tgzfiles:
|
||||
return True
|
||||
logger.info('Gathered analytics from {} to {}: {}'.format(since, until, tgzfiles))
|
||||
for tgz in tgzfiles:
|
||||
analytics.ship(tgz)
|
||||
except Exception:
|
||||
logger.exception('Error gathering and sending analytics for {} to {}.'.format(since,until))
|
||||
return False
|
||||
finally:
|
||||
if tgzfiles:
|
||||
for tgz in tgzfiles:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
return True
|
||||
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
if not settings.INSIGHTS_TRACKING_STATE:
|
||||
@ -373,16 +394,29 @@ def gather_analytics():
|
||||
if acquired is False:
|
||||
logger.debug('Not gathering analytics, another task holds lock')
|
||||
return
|
||||
try:
|
||||
tgz = analytics.gather()
|
||||
if not tgz:
|
||||
return
|
||||
logger.info('gathered analytics: {}'.format(tgz))
|
||||
analytics.ship(tgz)
|
||||
settings.AUTOMATION_ANALYTICS_LAST_GATHER = gather_time
|
||||
finally:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
subset = list(all_collectors().keys())
|
||||
incremental_collectors = []
|
||||
for collector in expensive_collectors():
|
||||
if collector in subset:
|
||||
subset.remove(collector)
|
||||
incremental_collectors.append(collector)
|
||||
|
||||
# Cap gathering at 4 weeks of data if there has been no data gathering
|
||||
since = last_time or (gather_time - timedelta(weeks=4))
|
||||
|
||||
if incremental_collectors:
|
||||
start = since
|
||||
until = None
|
||||
while start < gather_time:
|
||||
until = start + timedelta(hours = 4)
|
||||
if (until > gather_time):
|
||||
until = gather_time
|
||||
if not _gather_and_ship(incremental_collectors, since=start, until=until):
|
||||
break
|
||||
start = until
|
||||
settings.AUTOMATION_ANALYTICS_LAST_GATHER = until
|
||||
if subset:
|
||||
_gather_and_ship(subset, since=since, until=gather_time)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@ -1472,6 +1506,8 @@ class BaseTask(object):
|
||||
self.instance.job_explanation = "Job terminated due to timeout"
|
||||
status = 'failed'
|
||||
extra_update_fields['job_explanation'] = self.instance.job_explanation
|
||||
# ensure failure notification sends even if playbook_on_stats event is not triggered
|
||||
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
|
||||
|
||||
except InvalidVirtualenvError as e:
|
||||
extra_update_fields['job_explanation'] = e.message
|
||||
@ -1632,11 +1668,6 @@ class RunJob(BaseTask):
|
||||
# callbacks to work.
|
||||
env['JOB_ID'] = str(job.pk)
|
||||
env['INVENTORY_ID'] = str(job.inventory.pk)
|
||||
if job.use_fact_cache:
|
||||
library_source = self.get_path_to('..', 'plugins', 'library')
|
||||
library_dest = os.path.join(private_data_dir, 'library')
|
||||
copy_tree(library_source, library_dest)
|
||||
env['ANSIBLE_LIBRARY'] = library_dest
|
||||
if job.project:
|
||||
env['PROJECT_REVISION'] = job.project.scm_revision
|
||||
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
|
||||
@ -2020,35 +2051,25 @@ class RunProjectUpdate(BaseTask):
|
||||
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
|
||||
if settings.GALAXY_IGNORE_CERTS:
|
||||
env['ANSIBLE_GALAXY_IGNORE'] = True
|
||||
# Set up the public Galaxy server, if enabled
|
||||
galaxy_configured = False
|
||||
if settings.PUBLIC_GALAXY_ENABLED:
|
||||
galaxy_servers = [settings.PUBLIC_GALAXY_SERVER] # static setting
|
||||
else:
|
||||
galaxy_configured = True
|
||||
galaxy_servers = []
|
||||
# Set up fallback Galaxy servers, if configured
|
||||
if settings.FALLBACK_GALAXY_SERVERS:
|
||||
galaxy_configured = True
|
||||
galaxy_servers = settings.FALLBACK_GALAXY_SERVERS + galaxy_servers
|
||||
# Set up the primary Galaxy server, if configured
|
||||
if settings.PRIMARY_GALAXY_URL:
|
||||
galaxy_configured = True
|
||||
galaxy_servers = [{'id': 'primary_galaxy'}] + galaxy_servers
|
||||
for key in GALAXY_SERVER_FIELDS:
|
||||
value = getattr(settings, 'PRIMARY_GALAXY_{}'.format(key.upper()))
|
||||
if value:
|
||||
galaxy_servers[0][key] = value
|
||||
if galaxy_configured:
|
||||
for server in galaxy_servers:
|
||||
for key in GALAXY_SERVER_FIELDS:
|
||||
if not server.get(key):
|
||||
continue
|
||||
env_key = ('ANSIBLE_GALAXY_SERVER_{}_{}'.format(server.get('id', 'unnamed'), key)).upper()
|
||||
env[env_key] = server[key]
|
||||
if galaxy_servers:
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
|
||||
# build out env vars for Galaxy credentials (in order)
|
||||
galaxy_server_list = []
|
||||
if project_update.project.organization:
|
||||
for i, cred in enumerate(
|
||||
project_update.project.organization.galaxy_credentials.all()
|
||||
):
|
||||
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
|
||||
auth_url = cred.get_input('auth_url', default=None)
|
||||
token = cred.get_input('token', default=None)
|
||||
if token:
|
||||
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
|
||||
if auth_url:
|
||||
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
|
||||
galaxy_server_list.append(f'server{i}')
|
||||
|
||||
if galaxy_server_list:
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
|
||||
|
||||
return env
|
||||
|
||||
def _build_scm_url_extra_vars(self, project_update):
|
||||
@ -2121,6 +2142,19 @@ class RunProjectUpdate(BaseTask):
|
||||
raise RuntimeError('Could not determine a revision to run from project.')
|
||||
elif not scm_branch:
|
||||
scm_branch = {'hg': 'tip'}.get(project_update.scm_type, 'HEAD')
|
||||
|
||||
galaxy_creds_are_defined = (
|
||||
project_update.project.organization and
|
||||
project_update.project.organization.galaxy_credentials.exists()
|
||||
)
|
||||
if not galaxy_creds_are_defined and (
|
||||
settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED
|
||||
):
|
||||
logger.debug(
|
||||
'Galaxy role/collection syncing is enabled, but no '
|
||||
f'credentials are configured for {project_update.project.organization}.'
|
||||
)
|
||||
|
||||
extra_vars.update({
|
||||
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
|
||||
'local_path': os.path.basename(project_update.project.local_path),
|
||||
@ -2131,8 +2165,8 @@ class RunProjectUpdate(BaseTask):
|
||||
'scm_url': scm_url,
|
||||
'scm_branch': scm_branch,
|
||||
'scm_clean': project_update.scm_clean,
|
||||
'roles_enabled': settings.AWX_ROLES_ENABLED,
|
||||
'collections_enabled': settings.AWX_COLLECTIONS_ENABLED,
|
||||
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
|
||||
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
|
||||
})
|
||||
# apply custom refspec from user for PR refs and the like
|
||||
if project_update.scm_refspec:
|
||||
|
||||
@ -52,11 +52,11 @@ patterns
|
||||
--------
|
||||
|
||||
`mk` functions are single object fixtures. They should create only a single object with the minimum deps.
|
||||
They should also accept a `persited` flag, if they must be persisted to work, they raise an error if persisted=False
|
||||
They should also accept a `persisted` flag, if they must be persisted to work, they raise an error if persisted=False
|
||||
|
||||
`generate` and `apply` functions are helpers that build up the various parts of a `create` functions objects. These
|
||||
should be useful for more than one create function to use and should explicitly accept all of the values needed
|
||||
to execute. These functions should also be robust and have very speciifc error reporting about constraints and/or
|
||||
to execute. These functions should also be robust and have very specific error reporting about constraints and/or
|
||||
bad values.
|
||||
|
||||
`create` functions compose many of the `mk` and `generate` functions to make different object
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import pytest
|
||||
import tempfile
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import csv
|
||||
|
||||
@ -27,7 +28,8 @@ def sqlite_copy_expert(request):
|
||||
|
||||
def write_stdout(self, sql, fd):
|
||||
# Would be cool if we instead properly disected the SQL query and verified
|
||||
# it that way. But instead, we just take the nieve approach here.
|
||||
# it that way. But instead, we just take the naive approach here.
|
||||
sql = sql.strip()
|
||||
assert sql.startswith("COPY (")
|
||||
assert sql.endswith(") TO STDOUT WITH CSV HEADER")
|
||||
|
||||
@ -35,6 +37,10 @@ def sqlite_copy_expert(request):
|
||||
sql = sql.replace(") TO STDOUT WITH CSV HEADER", "")
|
||||
# sqlite equivalent
|
||||
sql = sql.replace("ARRAY_AGG", "GROUP_CONCAT")
|
||||
# SQLite doesn't support isoformatted dates, because that would be useful
|
||||
sql = sql.replace("+00:00", "")
|
||||
i = re.compile(r'(?P<date>\d\d\d\d-\d\d-\d\d)T')
|
||||
sql = i.sub(r'\g<date> ', sql)
|
||||
|
||||
# Remove JSON style queries
|
||||
# TODO: could replace JSON style queries with sqlite kind of equivalents
|
||||
@ -86,7 +92,7 @@ def test_copy_tables_unified_job_query(
|
||||
job_name = job_template.create_unified_job().name
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
collectors.copy_tables(time_start, tmpdir, subset="unified_jobs")
|
||||
collectors.unified_jobs_table(time_start, tmpdir, until = now() + timedelta(seconds=1))
|
||||
with open(os.path.join(tmpdir, "unified_jobs_table.csv")) as f:
|
||||
lines = "".join([line for line in f])
|
||||
|
||||
@ -134,7 +140,7 @@ def test_copy_tables_workflow_job_node_query(sqlite_copy_expert, workflow_job):
|
||||
time_start = now() - timedelta(hours=9)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
collectors.copy_tables(time_start, tmpdir, subset="workflow_job_node_query")
|
||||
collectors.workflow_job_node_table(time_start, tmpdir, until = now() + timedelta(seconds=1))
|
||||
with open(os.path.join(tmpdir, "workflow_job_node_table.csv")) as f:
|
||||
reader = csv.reader(f)
|
||||
# Pop the headers
|
||||
|
||||
@ -10,17 +10,17 @@ from awx.main.analytics import gather, register
|
||||
|
||||
|
||||
@register('example', '1.0')
|
||||
def example(since):
|
||||
def example(since, **kwargs):
|
||||
return {'awx': 123}
|
||||
|
||||
|
||||
@register('bad_json', '1.0')
|
||||
def bad_json(since):
|
||||
def bad_json(since, **kwargs):
|
||||
return set()
|
||||
|
||||
|
||||
@register('throws_error', '1.0')
|
||||
def throws_error(since):
|
||||
def throws_error(since, **kwargs):
|
||||
raise ValueError()
|
||||
|
||||
|
||||
@ -39,9 +39,9 @@ def mock_valid_license():
|
||||
def test_gather(mock_valid_license):
|
||||
settings.INSIGHTS_TRACKING_STATE = True
|
||||
|
||||
tgz = gather(module=importlib.import_module(__name__))
|
||||
tgzfiles = gather(module=importlib.import_module(__name__))
|
||||
files = {}
|
||||
with tarfile.open(tgz, "r:gz") as archive:
|
||||
with tarfile.open(tgzfiles[0], "r:gz") as archive:
|
||||
for member in archive.getmembers():
|
||||
files[member.name] = archive.extractfile(member)
|
||||
|
||||
@ -53,7 +53,8 @@ def test_gather(mock_valid_license):
|
||||
assert './bad_json.json' not in files.keys()
|
||||
assert './throws_error.json' not in files.keys()
|
||||
try:
|
||||
os.remove(tgz)
|
||||
for tgz in tgzfiles:
|
||||
os.remove(tgz)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
@ -220,7 +220,7 @@ def test_create_valid_kind(kind, get, post, admin):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('kind', ['ssh', 'vault', 'scm', 'insights', 'kubernetes'])
|
||||
@pytest.mark.parametrize('kind', ['ssh', 'vault', 'scm', 'insights', 'kubernetes', 'galaxy'])
|
||||
def test_create_invalid_kind(kind, get, post, admin):
|
||||
response = post(reverse('api:credential_type_list'), {
|
||||
'kind': kind,
|
||||
|
||||
@ -359,6 +359,71 @@ def test_job_launch_fails_with_missing_vault_password(machine_credential, vault_
|
||||
assert response.data['passwords_needed_to_start'] == ['vault_password']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_launch_with_added_cred_and_vault_password(credential, machine_credential, vault_credential,
|
||||
deploy_jobtemplate, post, admin):
|
||||
# see: https://github.com/ansible/awx/issues/8202
|
||||
vault_credential.inputs['vault_password'] = 'ASK'
|
||||
vault_credential.save()
|
||||
payload = {
|
||||
'credentials': [vault_credential.id, machine_credential.id],
|
||||
'credential_passwords': {'vault_password': 'vault-me'},
|
||||
}
|
||||
|
||||
deploy_jobtemplate.ask_credential_on_launch = True
|
||||
deploy_jobtemplate.credentials.remove(credential)
|
||||
deploy_jobtemplate.credentials.add(vault_credential)
|
||||
deploy_jobtemplate.save()
|
||||
|
||||
with mock.patch.object(Job, 'signal_start') as signal_start:
|
||||
post(
|
||||
reverse('api:job_template_launch', kwargs={'pk': deploy_jobtemplate.pk}),
|
||||
payload,
|
||||
admin,
|
||||
expect=201,
|
||||
)
|
||||
signal_start.assert_called_with(**{
|
||||
'vault_password': 'vault-me'
|
||||
})
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_launch_with_multiple_launch_time_passwords(credential, machine_credential, vault_credential,
|
||||
deploy_jobtemplate, post, admin):
|
||||
# see: https://github.com/ansible/awx/issues/8202
|
||||
deploy_jobtemplate.ask_credential_on_launch = True
|
||||
deploy_jobtemplate.credentials.remove(credential)
|
||||
deploy_jobtemplate.credentials.add(machine_credential)
|
||||
deploy_jobtemplate.credentials.add(vault_credential)
|
||||
deploy_jobtemplate.save()
|
||||
|
||||
second_machine_credential = Credential(
|
||||
name='SSH #2',
|
||||
credential_type=machine_credential.credential_type,
|
||||
inputs={'password': 'ASK'}
|
||||
)
|
||||
second_machine_credential.save()
|
||||
|
||||
vault_credential.inputs['vault_password'] = 'ASK'
|
||||
vault_credential.save()
|
||||
payload = {
|
||||
'credentials': [vault_credential.id, second_machine_credential.id],
|
||||
'credential_passwords': {'ssh_password': 'ssh-me', 'vault_password': 'vault-me'},
|
||||
}
|
||||
|
||||
with mock.patch.object(Job, 'signal_start') as signal_start:
|
||||
post(
|
||||
reverse('api:job_template_launch', kwargs={'pk': deploy_jobtemplate.pk}),
|
||||
payload,
|
||||
admin,
|
||||
expect=201,
|
||||
)
|
||||
signal_start.assert_called_with(**{
|
||||
'ssh_password': 'ssh-me',
|
||||
'vault_password': 'vault-me',
|
||||
})
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('launch_kwargs', [
|
||||
{'vault_password.abc': 'vault-me-1', 'vault_password.xyz': 'vault-me-2'},
|
||||
|
||||
@ -9,7 +9,7 @@ from django.conf import settings
|
||||
import pytest
|
||||
|
||||
# AWX
|
||||
from awx.main.models import ProjectUpdate
|
||||
from awx.main.models import ProjectUpdate, CredentialType, Credential
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@ -288,3 +288,90 @@ def test_organization_delete_with_active_jobs(delete, admin, organization, organ
|
||||
|
||||
assert resp.data['error'] == u"Resource is being used by running jobs."
|
||||
assert resp_sorted == expect_sorted
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_galaxy_credential_association_forbidden(alice, organization, post):
|
||||
galaxy = CredentialType.defaults['galaxy_api_token']()
|
||||
galaxy.save()
|
||||
|
||||
cred = Credential.objects.create(
|
||||
credential_type=galaxy,
|
||||
name='Public Galaxy',
|
||||
organization=organization,
|
||||
inputs={
|
||||
'url': 'https://galaxy.ansible.com/'
|
||||
}
|
||||
)
|
||||
url = reverse('api:organization_galaxy_credentials_list', kwargs={'pk': organization.id})
|
||||
post(
|
||||
url,
|
||||
{'associate': True, 'id': cred.pk},
|
||||
user=alice,
|
||||
expect=403
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_galaxy_credential_type_enforcement(admin, organization, post):
|
||||
ssh = CredentialType.defaults['ssh']()
|
||||
ssh.save()
|
||||
|
||||
cred = Credential.objects.create(
|
||||
credential_type=ssh,
|
||||
name='SSH Credential',
|
||||
organization=organization,
|
||||
)
|
||||
url = reverse('api:organization_galaxy_credentials_list', kwargs={'pk': organization.id})
|
||||
resp = post(
|
||||
url,
|
||||
{'associate': True, 'id': cred.pk},
|
||||
user=admin,
|
||||
expect=400
|
||||
)
|
||||
assert resp.data['msg'] == 'Credential must be a Galaxy credential, not Machine.'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_galaxy_credential_association(alice, admin, organization, post, get):
|
||||
galaxy = CredentialType.defaults['galaxy_api_token']()
|
||||
galaxy.save()
|
||||
|
||||
for i in range(5):
|
||||
cred = Credential.objects.create(
|
||||
credential_type=galaxy,
|
||||
name=f'Public Galaxy {i + 1}',
|
||||
organization=organization,
|
||||
inputs={
|
||||
'url': 'https://galaxy.ansible.com/'
|
||||
}
|
||||
)
|
||||
url = reverse('api:organization_galaxy_credentials_list', kwargs={'pk': organization.id})
|
||||
post(
|
||||
url,
|
||||
{'associate': True, 'id': cred.pk},
|
||||
user=admin,
|
||||
expect=204
|
||||
)
|
||||
resp = get(url, user=admin)
|
||||
assert [cred['name'] for cred in resp.data['results']] == [
|
||||
'Public Galaxy 1',
|
||||
'Public Galaxy 2',
|
||||
'Public Galaxy 3',
|
||||
'Public Galaxy 4',
|
||||
'Public Galaxy 5',
|
||||
]
|
||||
|
||||
post(
|
||||
url,
|
||||
{'disassociate': True, 'id': Credential.objects.get(name='Public Galaxy 3').pk},
|
||||
user=admin,
|
||||
expect=204
|
||||
)
|
||||
resp = get(url, user=admin)
|
||||
assert [cred['name'] for cred in resp.data['results']] == [
|
||||
'Public Galaxy 1',
|
||||
'Public Galaxy 2',
|
||||
'Public Galaxy 4',
|
||||
'Public Galaxy 5',
|
||||
]
|
||||
|
||||
@ -123,6 +123,15 @@ class TestJobNotificationMixin(object):
|
||||
context = job.context(job_serialization)
|
||||
check_structure(TestJobNotificationMixin.CONTEXT_STRUCTURE, context)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_context_job_metadata_with_unicode(self):
|
||||
job = Job.objects.create(name='批量安装项目')
|
||||
job_serialization = UnifiedJobSerializer(job).to_representation(job)
|
||||
context = job.context(job_serialization)
|
||||
assert '批量安装项目' in context['job_metadata']
|
||||
|
||||
|
||||
def test_context_stub(self):
|
||||
"""The context stub is a fake context used to validate custom notification messages. Ensure that
|
||||
this also has the expected structure. Furthermore, ensure that the stub context contains
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
from unittest import mock
|
||||
|
||||
from awx.main.models import Project
|
||||
from awx.main.models import Project, Credential, CredentialType
|
||||
from awx.main.models.organization import Organization
|
||||
|
||||
|
||||
@ -57,3 +57,31 @@ def test_foreign_key_change_changes_modified_by(project, organization):
|
||||
def test_project_related_jobs(project):
|
||||
update = project.create_unified_job()
|
||||
assert update.id in [u.id for u in project._get_related_jobs()]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_galaxy_credentials(project):
|
||||
org = project.organization
|
||||
galaxy = CredentialType.defaults['galaxy_api_token']()
|
||||
galaxy.save()
|
||||
for i in range(5):
|
||||
cred = Credential.objects.create(
|
||||
name=f'Ansible Galaxy {i + 1}',
|
||||
organization=org,
|
||||
credential_type=galaxy,
|
||||
inputs={
|
||||
'url': 'https://galaxy.ansible.com/'
|
||||
}
|
||||
)
|
||||
cred.save()
|
||||
org.galaxy_credentials.add(cred)
|
||||
|
||||
assert [
|
||||
cred.name for cred in org.galaxy_credentials.all()
|
||||
] == [
|
||||
'Ansible Galaxy 1',
|
||||
'Ansible Galaxy 2',
|
||||
'Ansible Galaxy 3',
|
||||
'Ansible Galaxy 4',
|
||||
'Ansible Galaxy 5',
|
||||
]
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timedelta
|
||||
from contextlib import contextmanager
|
||||
|
||||
from django.utils.timezone import now
|
||||
@ -161,6 +161,58 @@ class TestComputedFields:
|
||||
assert job_template.next_schedule == expected_schedule
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('freq, delta', (
|
||||
('MINUTELY', 1),
|
||||
('HOURLY', 1)
|
||||
))
|
||||
def test_past_week_rrule(job_template, freq, delta):
|
||||
# see: https://github.com/ansible/awx/issues/8071
|
||||
recent = (datetime.utcnow() - timedelta(days=3))
|
||||
recent = recent.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
recent_dt = recent.strftime('%Y%m%d')
|
||||
rrule = f'DTSTART;TZID=America/New_York:{recent_dt}T000000 RRULE:FREQ={freq};INTERVAL={delta};COUNT=5' # noqa
|
||||
sched = Schedule.objects.create(
|
||||
name='example schedule',
|
||||
rrule=rrule,
|
||||
unified_job_template=job_template
|
||||
)
|
||||
first_event = sched.rrulestr(sched.rrule)[0]
|
||||
assert first_event.replace(tzinfo=None) == recent
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('freq, delta', (
|
||||
('MINUTELY', 1),
|
||||
('HOURLY', 1)
|
||||
))
|
||||
def test_really_old_dtstart(job_template, freq, delta):
|
||||
# see: https://github.com/ansible/awx/issues/8071
|
||||
# If an event is per-minute/per-hour and was created a *really long*
|
||||
# time ago, we should just bump forward to start counting "in the last week"
|
||||
rrule = f'DTSTART;TZID=America/New_York:20150101T000000 RRULE:FREQ={freq};INTERVAL={delta}' # noqa
|
||||
sched = Schedule.objects.create(
|
||||
name='example schedule',
|
||||
rrule=rrule,
|
||||
unified_job_template=job_template
|
||||
)
|
||||
last_week = (datetime.utcnow() - timedelta(days=7)).date()
|
||||
first_event = sched.rrulestr(sched.rrule)[0]
|
||||
assert last_week == first_event.date()
|
||||
|
||||
# the next few scheduled events should be the next minute/hour incremented
|
||||
next_five_events = list(sched.rrulestr(sched.rrule).xafter(now(), count=5))
|
||||
|
||||
assert next_five_events[0] > now()
|
||||
last = None
|
||||
for event in next_five_events:
|
||||
if last:
|
||||
assert event == last + (
|
||||
timedelta(minutes=1) if freq == 'MINUTELY' else timedelta(hours=1)
|
||||
)
|
||||
last = event
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_repeats_forever(job_template):
|
||||
s = Schedule(
|
||||
|
||||
@ -81,6 +81,7 @@ def test_default_cred_types():
|
||||
'azure_rm',
|
||||
'cloudforms',
|
||||
'conjur',
|
||||
'galaxy_api_token',
|
||||
'gce',
|
||||
'github_token',
|
||||
'gitlab_token',
|
||||
|
||||
@ -10,7 +10,7 @@ import pytest
|
||||
|
||||
from awx.main.models import Job, WorkflowJob, Instance
|
||||
from awx.main.dispatch import reaper
|
||||
from awx.main.dispatch.pool import PoolWorker, WorkerPool, AutoscalePool
|
||||
from awx.main.dispatch.pool import StatefulPoolWorker, WorkerPool, AutoscalePool
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch.worker import BaseWorker, TaskWorker
|
||||
|
||||
@ -80,7 +80,7 @@ class SlowResultWriter(BaseWorker):
|
||||
class TestPoolWorker:
|
||||
|
||||
def setup_method(self, test_method):
|
||||
self.worker = PoolWorker(1000, self.tick, tuple())
|
||||
self.worker = StatefulPoolWorker(1000, self.tick, tuple())
|
||||
|
||||
def tick(self):
|
||||
self.worker.finished.put(self.worker.queue.get()['uuid'])
|
||||
|
||||
115
awx/main/tests/functional/test_galaxy_credential_migration.py
Normal file
115
awx/main/tests/functional/test_galaxy_credential_migration.py
Normal file
@ -0,0 +1,115 @@
|
||||
import importlib
|
||||
|
||||
from django.conf import settings
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
import pytest
|
||||
|
||||
from awx.main.models import Credential, Organization
|
||||
from awx.conf.models import Setting
|
||||
from awx.main.migrations import _galaxy as galaxy
|
||||
|
||||
|
||||
class FakeApps(object):
|
||||
def get_model(self, app, model):
|
||||
if app == 'contenttypes':
|
||||
return ContentType
|
||||
return getattr(importlib.import_module(f'awx.{app}.models'), model)
|
||||
|
||||
|
||||
apps = FakeApps()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_default_public_galaxy():
|
||||
org = Organization.objects.create()
|
||||
assert org.galaxy_credentials.count() == 0
|
||||
galaxy.migrate_galaxy_settings(apps, None)
|
||||
assert org.galaxy_credentials.count() == 1
|
||||
creds = org.galaxy_credentials.all()
|
||||
assert creds[0].name == 'Ansible Galaxy'
|
||||
assert creds[0].inputs['url'] == 'https://galaxy.ansible.com/'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_public_galaxy_disabled():
|
||||
Setting.objects.create(key='PUBLIC_GALAXY_ENABLED', value=False)
|
||||
org = Organization.objects.create()
|
||||
assert org.galaxy_credentials.count() == 0
|
||||
galaxy.migrate_galaxy_settings(apps, None)
|
||||
assert org.galaxy_credentials.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_rh_automation_hub():
|
||||
Setting.objects.create(key='PRIMARY_GALAXY_URL', value='https://cloud.redhat.com/api/automation-hub/')
|
||||
Setting.objects.create(key='PRIMARY_GALAXY_TOKEN', value='secret123')
|
||||
org = Organization.objects.create()
|
||||
assert org.galaxy_credentials.count() == 0
|
||||
galaxy.migrate_galaxy_settings(apps, None)
|
||||
assert org.galaxy_credentials.count() == 2
|
||||
assert org.galaxy_credentials.first().name == 'Ansible Automation Hub (https://cloud.redhat.com/api/automation-hub/)' # noqa
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_multiple_galaxies():
|
||||
for i in range(5):
|
||||
Organization.objects.create(name=f'Org {i}')
|
||||
|
||||
Setting.objects.create(key='PRIMARY_GALAXY_URL', value='https://example.org/')
|
||||
Setting.objects.create(key='PRIMARY_GALAXY_AUTH_URL', value='https://auth.example.org/')
|
||||
Setting.objects.create(key='PRIMARY_GALAXY_USERNAME', value='user')
|
||||
Setting.objects.create(key='PRIMARY_GALAXY_PASSWORD', value='pass')
|
||||
Setting.objects.create(key='PRIMARY_GALAXY_TOKEN', value='secret123')
|
||||
|
||||
for org in Organization.objects.all():
|
||||
assert org.galaxy_credentials.count() == 0
|
||||
|
||||
galaxy.migrate_galaxy_settings(apps, None)
|
||||
|
||||
for org in Organization.objects.all():
|
||||
assert org.galaxy_credentials.count() == 2
|
||||
creds = org.galaxy_credentials.all()
|
||||
assert creds[0].name == 'Private Galaxy (https://example.org/)'
|
||||
assert creds[0].inputs['url'] == 'https://example.org/'
|
||||
assert creds[0].inputs['auth_url'] == 'https://auth.example.org/'
|
||||
assert creds[0].inputs['token'].startswith('$encrypted$')
|
||||
assert creds[0].get_input('token') == 'secret123'
|
||||
|
||||
assert creds[1].name == 'Ansible Galaxy'
|
||||
assert creds[1].inputs['url'] == 'https://galaxy.ansible.com/'
|
||||
|
||||
public_galaxy_creds = Credential.objects.filter(name='Ansible Galaxy')
|
||||
assert public_galaxy_creds.count() == 1
|
||||
assert public_galaxy_creds.first().managed_by_tower is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_fallback_galaxies():
|
||||
org = Organization.objects.create()
|
||||
assert org.galaxy_credentials.count() == 0
|
||||
Setting.objects.create(key='PRIMARY_GALAXY_URL', value='https://example.org/')
|
||||
Setting.objects.create(key='PRIMARY_GALAXY_AUTH_URL', value='https://auth.example.org/')
|
||||
Setting.objects.create(key='PRIMARY_GALAXY_TOKEN', value='secret123')
|
||||
try:
|
||||
settings.FALLBACK_GALAXY_SERVERS = [{
|
||||
'id': 'abc123',
|
||||
'url': 'https://some-other-galaxy.example.org/',
|
||||
'auth_url': 'https://some-other-galaxy.sso.example.org/',
|
||||
'username': 'user',
|
||||
'password': 'pass',
|
||||
'token': 'fallback123',
|
||||
}]
|
||||
galaxy.migrate_galaxy_settings(apps, None)
|
||||
finally:
|
||||
settings.FALLBACK_GALAXY_SERVERS = []
|
||||
assert org.galaxy_credentials.count() == 3
|
||||
creds = org.galaxy_credentials.all()
|
||||
assert creds[0].name == 'Private Galaxy (https://example.org/)'
|
||||
assert creds[0].inputs['url'] == 'https://example.org/'
|
||||
assert creds[1].name == 'Ansible Galaxy (https://some-other-galaxy.example.org/)'
|
||||
assert creds[1].inputs['url'] == 'https://some-other-galaxy.example.org/'
|
||||
assert creds[1].inputs['auth_url'] == 'https://some-other-galaxy.sso.example.org/'
|
||||
assert creds[1].inputs['token'].startswith('$encrypted$')
|
||||
assert creds[1].get_input('token') == 'fallback123'
|
||||
assert creds[2].name == 'Ansible Galaxy'
|
||||
assert creds[2].inputs['url'] == 'https://galaxy.ansible.com/'
|
||||
@ -1,3 +1,4 @@
|
||||
import redis
|
||||
import pytest
|
||||
from unittest import mock
|
||||
import json
|
||||
@ -25,7 +26,8 @@ def test_orphan_unified_job_creation(instance, inventory):
|
||||
@mock.patch('awx.main.utils.common.get_mem_capacity', lambda: (8000,62))
|
||||
def test_job_capacity_and_with_inactive_node():
|
||||
i = Instance.objects.create(hostname='test-1')
|
||||
i.refresh_capacity()
|
||||
with mock.patch.object(redis.client.Redis, 'ping', lambda self: True):
|
||||
i.refresh_capacity()
|
||||
assert i.capacity == 62
|
||||
i.enabled = False
|
||||
i.save()
|
||||
@ -35,6 +37,19 @@ def test_job_capacity_and_with_inactive_node():
|
||||
assert i.capacity == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.utils.common.get_cpu_capacity', lambda: (2,8))
|
||||
@mock.patch('awx.main.utils.common.get_mem_capacity', lambda: (8000,62))
|
||||
def test_job_capacity_with_redis_disabled():
|
||||
i = Instance.objects.create(hostname='test-1')
|
||||
|
||||
def _raise(self):
|
||||
raise redis.ConnectionError()
|
||||
with mock.patch.object(redis.client.Redis, 'ping', _raise):
|
||||
i.refresh_capacity()
|
||||
assert i.capacity == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_type_name():
|
||||
job = Job.objects.create()
|
||||
|
||||
@ -25,6 +25,7 @@ from awx.main.models import (
|
||||
Job,
|
||||
JobTemplate,
|
||||
Notification,
|
||||
Organization,
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
UnifiedJob,
|
||||
@ -59,6 +60,19 @@ def patch_Job():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patch_Organization():
|
||||
_credentials = []
|
||||
credentials_mock = mock.Mock(**{
|
||||
'all': lambda: _credentials,
|
||||
'add': _credentials.append,
|
||||
'exists': lambda: len(_credentials) > 0,
|
||||
'spec_set': ['all', 'add', 'exists'],
|
||||
})
|
||||
with mock.patch.object(Organization, 'galaxy_credentials', credentials_mock):
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def job():
|
||||
return Job(
|
||||
@ -131,7 +145,6 @@ def test_send_notifications_list(mock_notifications_filter, mock_job_get, mocker
|
||||
('SECRET_KEY', 'SECRET'),
|
||||
('VMWARE_PASSWORD', 'SECRET'),
|
||||
('API_SECRET', 'SECRET'),
|
||||
('ANSIBLE_GALAXY_SERVER_PRIMARY_GALAXY_PASSWORD', 'SECRET'),
|
||||
('ANSIBLE_GALAXY_SERVER_PRIMARY_GALAXY_TOKEN', 'SECRET'),
|
||||
])
|
||||
def test_safe_env_filtering(key, value):
|
||||
@ -1780,10 +1793,108 @@ class TestJobCredentials(TestJobExecution):
|
||||
assert env['FOO'] == 'BAR'
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("patch_Organization")
|
||||
class TestProjectUpdateGalaxyCredentials(TestJobExecution):
|
||||
|
||||
@pytest.fixture
|
||||
def project_update(self):
|
||||
org = Organization(pk=1)
|
||||
proj = Project(pk=1, organization=org)
|
||||
project_update = ProjectUpdate(pk=1, project=proj, scm_type='git')
|
||||
project_update.websocket_emit_status = mock.Mock()
|
||||
return project_update
|
||||
|
||||
parametrize = {
|
||||
'test_galaxy_credentials_ignore_certs': [
|
||||
dict(ignore=True),
|
||||
dict(ignore=False),
|
||||
],
|
||||
}
|
||||
|
||||
def test_galaxy_credentials_ignore_certs(self, private_data_dir, project_update, ignore):
|
||||
settings.GALAXY_IGNORE_CERTS = ignore
|
||||
task = tasks.RunProjectUpdate()
|
||||
env = task.build_env(project_update, private_data_dir)
|
||||
if ignore:
|
||||
assert env['ANSIBLE_GALAXY_IGNORE'] is True
|
||||
else:
|
||||
assert 'ANSIBLE_GALAXY_IGNORE' not in env
|
||||
|
||||
def test_galaxy_credentials_empty(self, private_data_dir, project_update):
|
||||
|
||||
class RunProjectUpdate(tasks.RunProjectUpdate):
|
||||
__vars__ = {}
|
||||
|
||||
def _write_extra_vars_file(self, private_data_dir, extra_vars, *kw):
|
||||
self.__vars__ = extra_vars
|
||||
|
||||
task = RunProjectUpdate()
|
||||
env = task.build_env(project_update, private_data_dir)
|
||||
task.build_extra_vars_file(project_update, private_data_dir)
|
||||
assert task.__vars__['roles_enabled'] is False
|
||||
assert task.__vars__['collections_enabled'] is False
|
||||
for k in env:
|
||||
assert not k.startswith('ANSIBLE_GALAXY_SERVER')
|
||||
|
||||
def test_single_public_galaxy(self, private_data_dir, project_update):
|
||||
class RunProjectUpdate(tasks.RunProjectUpdate):
|
||||
__vars__ = {}
|
||||
|
||||
def _write_extra_vars_file(self, private_data_dir, extra_vars, *kw):
|
||||
self.__vars__ = extra_vars
|
||||
|
||||
credential_type = CredentialType.defaults['galaxy_api_token']()
|
||||
public_galaxy = Credential(pk=1, credential_type=credential_type, inputs={
|
||||
'url': 'https://galaxy.ansible.com/',
|
||||
})
|
||||
project_update.project.organization.galaxy_credentials.add(public_galaxy)
|
||||
task = RunProjectUpdate()
|
||||
env = task.build_env(project_update, private_data_dir)
|
||||
task.build_extra_vars_file(project_update, private_data_dir)
|
||||
assert task.__vars__['roles_enabled'] is True
|
||||
assert task.__vars__['collections_enabled'] is True
|
||||
assert sorted([
|
||||
(k, v) for k, v in env.items()
|
||||
if k.startswith('ANSIBLE_GALAXY')
|
||||
]) == [
|
||||
('ANSIBLE_GALAXY_SERVER_LIST', 'server0'),
|
||||
('ANSIBLE_GALAXY_SERVER_SERVER0_URL', 'https://galaxy.ansible.com/'),
|
||||
]
|
||||
|
||||
def test_multiple_galaxy_endpoints(self, private_data_dir, project_update):
|
||||
credential_type = CredentialType.defaults['galaxy_api_token']()
|
||||
public_galaxy = Credential(pk=1, credential_type=credential_type, inputs={
|
||||
'url': 'https://galaxy.ansible.com/',
|
||||
})
|
||||
rh = Credential(pk=2, credential_type=credential_type, inputs={
|
||||
'url': 'https://cloud.redhat.com/api/automation-hub/',
|
||||
'auth_url': 'https://sso.redhat.com/example/openid-connect/token/',
|
||||
'token': 'secret123'
|
||||
})
|
||||
project_update.project.organization.galaxy_credentials.add(public_galaxy)
|
||||
project_update.project.organization.galaxy_credentials.add(rh)
|
||||
task = tasks.RunProjectUpdate()
|
||||
env = task.build_env(project_update, private_data_dir)
|
||||
assert sorted([
|
||||
(k, v) for k, v in env.items()
|
||||
if k.startswith('ANSIBLE_GALAXY')
|
||||
]) == [
|
||||
('ANSIBLE_GALAXY_SERVER_LIST', 'server0,server1'),
|
||||
('ANSIBLE_GALAXY_SERVER_SERVER0_URL', 'https://galaxy.ansible.com/'),
|
||||
('ANSIBLE_GALAXY_SERVER_SERVER1_AUTH_URL', 'https://sso.redhat.com/example/openid-connect/token/'), # noqa
|
||||
('ANSIBLE_GALAXY_SERVER_SERVER1_TOKEN', 'secret123'),
|
||||
('ANSIBLE_GALAXY_SERVER_SERVER1_URL', 'https://cloud.redhat.com/api/automation-hub/'),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("patch_Organization")
|
||||
class TestProjectUpdateCredentials(TestJobExecution):
|
||||
@pytest.fixture
|
||||
def project_update(self):
|
||||
project_update = ProjectUpdate(pk=1, project=Project(pk=1))
|
||||
project_update = ProjectUpdate(
|
||||
pk=1,
|
||||
project=Project(pk=1, organization=Organization(pk=1)),
|
||||
)
|
||||
project_update.websocket_emit_status = mock.Mock()
|
||||
return project_update
|
||||
|
||||
|
||||
@ -1,36 +0,0 @@
|
||||
---
|
||||
- hosts: all
|
||||
vars:
|
||||
scan_use_checksum: false
|
||||
scan_use_recursive: false
|
||||
tasks:
|
||||
|
||||
- name: "Scan packages (Unix/Linux)"
|
||||
scan_packages:
|
||||
os_family: '{{ ansible_os_family }}'
|
||||
when: ansible_os_family != "Windows"
|
||||
- name: "Scan services (Unix/Linux)"
|
||||
scan_services:
|
||||
when: ansible_os_family != "Windows"
|
||||
- name: "Scan files (Unix/Linux)"
|
||||
scan_files:
|
||||
paths: '{{ scan_file_paths }}'
|
||||
get_checksum: '{{ scan_use_checksum }}'
|
||||
recursive: '{{ scan_use_recursive }}'
|
||||
when: scan_file_paths is defined and ansible_os_family != "Windows"
|
||||
- name: "Scan Insights for Machine ID (Unix/Linux)"
|
||||
scan_insights:
|
||||
when: ansible_os_family != "Windows"
|
||||
|
||||
- name: "Scan packages (Windows)"
|
||||
win_scan_packages:
|
||||
when: ansible_os_family == "Windows"
|
||||
- name: "Scan services (Windows)"
|
||||
win_scan_services:
|
||||
when: ansible_os_family == "Windows"
|
||||
- name: "Scan files (Windows)"
|
||||
win_scan_files:
|
||||
paths: '{{ scan_file_paths }}'
|
||||
get_checksum: '{{ scan_use_checksum }}'
|
||||
recursive: '{{ scan_use_recursive }}'
|
||||
when: scan_file_paths is defined and ansible_os_family == "Windows"
|
||||
@ -1,166 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import stat
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: scan_files
|
||||
short_description: Return file state information as fact data for a directory tree
|
||||
description:
|
||||
- Return file state information recursively for a directory tree on the filesystem
|
||||
version_added: "1.9"
|
||||
options:
|
||||
path:
|
||||
description: The path containing files to be analyzed
|
||||
required: true
|
||||
default: null
|
||||
recursive:
|
||||
description: scan this directory and all subdirectories
|
||||
required: false
|
||||
default: no
|
||||
get_checksum:
|
||||
description: Checksum files that you can access
|
||||
required: false
|
||||
default: false
|
||||
requirements: [ ]
|
||||
author: Matthew Jones
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
# "files": [
|
||||
# {
|
||||
# "atime": 1427313854.0755742,
|
||||
# "checksum": "cf7566e6149ad9af91e7589e0ea096a08de9c1e5",
|
||||
# "ctime": 1427129299.22948,
|
||||
# "dev": 51713,
|
||||
# "gid": 0,
|
||||
# "inode": 149601,
|
||||
# "isblk": false,
|
||||
# "ischr": false,
|
||||
# "isdir": false,
|
||||
# "isfifo": false,
|
||||
# "isgid": false,
|
||||
# "islnk": false,
|
||||
# "isreg": true,
|
||||
# "issock": false,
|
||||
# "isuid": false,
|
||||
# "mode": "0644",
|
||||
# "mtime": 1427112663.0321455,
|
||||
# "nlink": 1,
|
||||
# "path": "/var/log/dmesg.1.gz",
|
||||
# "rgrp": true,
|
||||
# "roth": true,
|
||||
# "rusr": true,
|
||||
# "size": 28,
|
||||
# "uid": 0,
|
||||
# "wgrp": false,
|
||||
# "woth": false,
|
||||
# "wusr": true,
|
||||
# "xgrp": false,
|
||||
# "xoth": false,
|
||||
# "xusr": false
|
||||
# },
|
||||
# {
|
||||
# "atime": 1427314385.1155744,
|
||||
# "checksum": "16fac7be61a6e4591a33ef4b729c5c3302307523",
|
||||
# "ctime": 1427384148.5755742,
|
||||
# "dev": 51713,
|
||||
# "gid": 43,
|
||||
# "inode": 149564,
|
||||
# "isblk": false,
|
||||
# "ischr": false,
|
||||
# "isdir": false,
|
||||
# "isfifo": false,
|
||||
# "isgid": false,
|
||||
# "islnk": false,
|
||||
# "isreg": true,
|
||||
# "issock": false,
|
||||
# "isuid": false,
|
||||
# "mode": "0664",
|
||||
# "mtime": 1427384148.5755742,
|
||||
# "nlink": 1,
|
||||
# "path": "/var/log/wtmp",
|
||||
# "rgrp": true,
|
||||
# "roth": true,
|
||||
# "rusr": true,
|
||||
# "size": 48768,
|
||||
# "uid": 0,
|
||||
# "wgrp": true,
|
||||
# "woth": false,
|
||||
# "wusr": true,
|
||||
# "xgrp": false,
|
||||
# "xoth": false,
|
||||
# "xusr": false
|
||||
# },
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule( # noqa
|
||||
argument_spec = dict(paths=dict(required=True, type='list'),
|
||||
recursive=dict(required=False, default='no', type='bool'),
|
||||
get_checksum=dict(required=False, default='no', type='bool')))
|
||||
files = []
|
||||
paths = module.params.get('paths')
|
||||
for path in paths:
|
||||
path = os.path.expanduser(path)
|
||||
if not os.path.exists(path) or not os.path.isdir(path):
|
||||
module.fail_json(msg = "Given path must exist and be a directory")
|
||||
|
||||
get_checksum = module.params.get('get_checksum')
|
||||
should_recurse = module.params.get('recursive')
|
||||
if not should_recurse:
|
||||
path_list = [os.path.join(path, subpath) for subpath in os.listdir(path)]
|
||||
else:
|
||||
path_list = [os.path.join(w_path, f) for w_path, w_names, w_file in os.walk(path) for f in w_file]
|
||||
for filepath in path_list:
|
||||
try:
|
||||
st = os.stat(filepath)
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
mode = st.st_mode
|
||||
d = {
|
||||
'path' : filepath,
|
||||
'mode' : "%04o" % stat.S_IMODE(mode),
|
||||
'isdir' : stat.S_ISDIR(mode),
|
||||
'ischr' : stat.S_ISCHR(mode),
|
||||
'isblk' : stat.S_ISBLK(mode),
|
||||
'isreg' : stat.S_ISREG(mode),
|
||||
'isfifo' : stat.S_ISFIFO(mode),
|
||||
'islnk' : stat.S_ISLNK(mode),
|
||||
'issock' : stat.S_ISSOCK(mode),
|
||||
'uid' : st.st_uid,
|
||||
'gid' : st.st_gid,
|
||||
'size' : st.st_size,
|
||||
'inode' : st.st_ino,
|
||||
'dev' : st.st_dev,
|
||||
'nlink' : st.st_nlink,
|
||||
'atime' : st.st_atime,
|
||||
'mtime' : st.st_mtime,
|
||||
'ctime' : st.st_ctime,
|
||||
'wusr' : bool(mode & stat.S_IWUSR),
|
||||
'rusr' : bool(mode & stat.S_IRUSR),
|
||||
'xusr' : bool(mode & stat.S_IXUSR),
|
||||
'wgrp' : bool(mode & stat.S_IWGRP),
|
||||
'rgrp' : bool(mode & stat.S_IRGRP),
|
||||
'xgrp' : bool(mode & stat.S_IXGRP),
|
||||
'woth' : bool(mode & stat.S_IWOTH),
|
||||
'roth' : bool(mode & stat.S_IROTH),
|
||||
'xoth' : bool(mode & stat.S_IXOTH),
|
||||
'isuid' : bool(mode & stat.S_ISUID),
|
||||
'isgid' : bool(mode & stat.S_ISGID),
|
||||
}
|
||||
if get_checksum and stat.S_ISREG(mode) and os.access(filepath, os.R_OK):
|
||||
d['checksum'] = module.sha1(filepath)
|
||||
files.append(d)
|
||||
results = dict(ansible_facts=dict(files=files))
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
main()
|
||||
@ -1,66 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: scan_insights
|
||||
short_description: Return insights id as fact data
|
||||
description:
|
||||
- Inspects the /etc/redhat-access-insights/machine-id file for insights id and returns the found id as fact data
|
||||
version_added: "2.3"
|
||||
options:
|
||||
requirements: [ ]
|
||||
author: Chris Meyers
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
# "insights": {
|
||||
# "system_id": "4da7d1f8-14f3-4cdc-acd5-a3465a41f25d"
|
||||
# }, ... }
|
||||
'''
|
||||
|
||||
|
||||
INSIGHTS_SYSTEM_ID_FILE='/etc/redhat-access-insights/machine-id'
|
||||
|
||||
|
||||
def get_system_id(filname):
|
||||
system_id = None
|
||||
try:
|
||||
f = open(INSIGHTS_SYSTEM_ID_FILE, "r")
|
||||
except IOError:
|
||||
return None
|
||||
else:
|
||||
try:
|
||||
data = f.readline()
|
||||
system_id = str(data)
|
||||
except (IOError, ValueError):
|
||||
pass
|
||||
finally:
|
||||
f.close()
|
||||
if system_id:
|
||||
system_id = system_id.strip()
|
||||
return system_id
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule( # noqa
|
||||
argument_spec = dict()
|
||||
)
|
||||
|
||||
system_id = get_system_id(INSIGHTS_SYSTEM_ID_FILE)
|
||||
|
||||
results = {
|
||||
'ansible_facts': {
|
||||
'insights': {
|
||||
'system_id': system_id
|
||||
}
|
||||
}
|
||||
}
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
main()
|
||||
@ -1,111 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: scan_packages
|
||||
short_description: Return installed packages information as fact data
|
||||
description:
|
||||
- Return information about installed packages as fact data
|
||||
version_added: "1.9"
|
||||
options:
|
||||
requirements: [ ]
|
||||
author: Matthew Jones
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
# "packages": {
|
||||
# "libbz2-1.0": [
|
||||
# {
|
||||
# "version": "1.0.6-5",
|
||||
# "source": "apt",
|
||||
# "arch": "amd64",
|
||||
# "name": "libbz2-1.0"
|
||||
# }
|
||||
# ],
|
||||
# "patch": [
|
||||
# {
|
||||
# "version": "2.7.1-4ubuntu1",
|
||||
# "source": "apt",
|
||||
# "arch": "amd64",
|
||||
# "name": "patch"
|
||||
# }
|
||||
# ],
|
||||
# "gcc-4.8-base": [
|
||||
# {
|
||||
# "version": "4.8.2-19ubuntu1",
|
||||
# "source": "apt",
|
||||
# "arch": "amd64",
|
||||
# "name": "gcc-4.8-base"
|
||||
# },
|
||||
# {
|
||||
# "version": "4.9.2-19ubuntu1",
|
||||
# "source": "apt",
|
||||
# "arch": "amd64",
|
||||
# "name": "gcc-4.8-base"
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
'''
|
||||
|
||||
|
||||
def rpm_package_list():
|
||||
import rpm
|
||||
trans_set = rpm.TransactionSet()
|
||||
installed_packages = {}
|
||||
for package in trans_set.dbMatch():
|
||||
package_details = dict(name=package[rpm.RPMTAG_NAME],
|
||||
version=package[rpm.RPMTAG_VERSION],
|
||||
release=package[rpm.RPMTAG_RELEASE],
|
||||
epoch=package[rpm.RPMTAG_EPOCH],
|
||||
arch=package[rpm.RPMTAG_ARCH],
|
||||
source='rpm')
|
||||
if package_details['name'] not in installed_packages:
|
||||
installed_packages[package_details['name']] = [package_details]
|
||||
else:
|
||||
installed_packages[package_details['name']].append(package_details)
|
||||
return installed_packages
|
||||
|
||||
|
||||
def deb_package_list():
|
||||
import apt
|
||||
apt_cache = apt.Cache()
|
||||
installed_packages = {}
|
||||
apt_installed_packages = [pk for pk in apt_cache.keys() if apt_cache[pk].is_installed]
|
||||
for package in apt_installed_packages:
|
||||
ac_pkg = apt_cache[package].installed
|
||||
package_details = dict(name=package,
|
||||
version=ac_pkg.version,
|
||||
arch=ac_pkg.architecture,
|
||||
source='apt')
|
||||
if package_details['name'] not in installed_packages:
|
||||
installed_packages[package_details['name']] = [package_details]
|
||||
else:
|
||||
installed_packages[package_details['name']].append(package_details)
|
||||
return installed_packages
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule( # noqa
|
||||
argument_spec = dict(os_family=dict(required=True))
|
||||
)
|
||||
ans_os = module.params['os_family']
|
||||
if ans_os in ('RedHat', 'Suse', 'openSUSE Leap'):
|
||||
packages = rpm_package_list()
|
||||
elif ans_os == 'Debian':
|
||||
packages = deb_package_list()
|
||||
else:
|
||||
packages = None
|
||||
|
||||
if packages is not None:
|
||||
results = dict(ansible_facts=dict(packages=packages))
|
||||
else:
|
||||
results = dict(skipped=True, msg="Unsupported Distribution")
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
main()
|
||||
@ -1,190 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import re
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: scan_services
|
||||
short_description: Return service state information as fact data
|
||||
description:
|
||||
- Return service state information as fact data for various service management utilities
|
||||
version_added: "1.9"
|
||||
options:
|
||||
requirements: [ ]
|
||||
author: Matthew Jones
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- monit: scan_services
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
# "services": {
|
||||
# "network": {
|
||||
# "source": "sysv",
|
||||
# "state": "running",
|
||||
# "name": "network"
|
||||
# },
|
||||
# "arp-ethers.service": {
|
||||
# "source": "systemd",
|
||||
# "state": "stopped",
|
||||
# "name": "arp-ethers.service"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
'''
|
||||
|
||||
|
||||
class BaseService(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.incomplete_warning = False
|
||||
|
||||
|
||||
class ServiceScanService(BaseService):
|
||||
|
||||
def gather_services(self):
|
||||
services = {}
|
||||
service_path = self.module.get_bin_path("service")
|
||||
if service_path is None:
|
||||
return None
|
||||
initctl_path = self.module.get_bin_path("initctl")
|
||||
chkconfig_path = self.module.get_bin_path("chkconfig")
|
||||
|
||||
# sysvinit
|
||||
if service_path is not None and chkconfig_path is None:
|
||||
rc, stdout, stderr = self.module.run_command("%s --status-all 2>&1 | grep -E \"\\[ (\\+|\\-) \\]\"" % service_path, use_unsafe_shell=True)
|
||||
for line in stdout.split("\n"):
|
||||
line_data = line.split()
|
||||
if len(line_data) < 4:
|
||||
continue # Skipping because we expected more data
|
||||
service_name = " ".join(line_data[3:])
|
||||
if line_data[1] == "+":
|
||||
service_state = "running"
|
||||
else:
|
||||
service_state = "stopped"
|
||||
services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
|
||||
|
||||
# Upstart
|
||||
if initctl_path is not None and chkconfig_path is None:
|
||||
p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
|
||||
rc, stdout, stderr = self.module.run_command("%s list" % initctl_path)
|
||||
real_stdout = stdout.replace("\r","")
|
||||
for line in real_stdout.split("\n"):
|
||||
m = p.match(line)
|
||||
if not m:
|
||||
continue
|
||||
service_name = m.group('name')
|
||||
service_goal = m.group('goal')
|
||||
service_state = m.group('state')
|
||||
if m.group('pid'):
|
||||
pid = m.group('pid')
|
||||
else:
|
||||
pid = None # NOQA
|
||||
payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
|
||||
services[service_name] = payload
|
||||
|
||||
# RH sysvinit
|
||||
elif chkconfig_path is not None:
|
||||
#print '%s --status-all | grep -E "is (running|stopped)"' % service_path
|
||||
p = re.compile(
|
||||
r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
|
||||
r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
|
||||
rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)
|
||||
# Check for special cases where stdout does not fit pattern
|
||||
match_any = False
|
||||
for line in stdout.split('\n'):
|
||||
if p.match(line):
|
||||
match_any = True
|
||||
if not match_any:
|
||||
p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
|
||||
match_any = False
|
||||
for line in stdout.split('\n'):
|
||||
if p_simple.match(line):
|
||||
match_any = True
|
||||
if match_any:
|
||||
# Try extra flags " -l --allservices" needed for SLES11
|
||||
rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True)
|
||||
elif '--list' in stderr:
|
||||
# Extra flag needed for RHEL5
|
||||
rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True)
|
||||
for line in stdout.split('\n'):
|
||||
m = p.match(line)
|
||||
if m:
|
||||
service_name = m.group('service')
|
||||
service_state = 'stopped'
|
||||
if m.group('rl3') == 'on':
|
||||
rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True)
|
||||
service_state = rc
|
||||
if rc in (0,):
|
||||
service_state = 'running'
|
||||
#elif rc in (1,3):
|
||||
else:
|
||||
if 'root' in stderr or 'permission' in stderr.lower() or 'not in sudoers' in stderr.lower():
|
||||
self.incomplete_warning = True
|
||||
continue
|
||||
else:
|
||||
service_state = 'stopped'
|
||||
service_data = {"name": service_name, "state": service_state, "source": "sysv"}
|
||||
services[service_name] = service_data
|
||||
return services
|
||||
|
||||
|
||||
class SystemctlScanService(BaseService):
|
||||
|
||||
def systemd_enabled(self):
|
||||
# Check if init is the systemd command, using comm as cmdline could be symlink
|
||||
try:
|
||||
f = open('/proc/1/comm', 'r')
|
||||
except IOError:
|
||||
# If comm doesn't exist, old kernel, no systemd
|
||||
return False
|
||||
for line in f:
|
||||
if 'systemd' in line:
|
||||
return True
|
||||
return False
|
||||
|
||||
def gather_services(self):
|
||||
services = {}
|
||||
if not self.systemd_enabled():
|
||||
return None
|
||||
systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
|
||||
if systemctl_path is None:
|
||||
return None
|
||||
rc, stdout, stderr = self.module.run_command("%s list-unit-files --type=service | tail -n +2 | head -n -2" % systemctl_path, use_unsafe_shell=True)
|
||||
for line in stdout.split("\n"):
|
||||
line_data = line.split()
|
||||
if len(line_data) != 2:
|
||||
continue
|
||||
if line_data[1] == "enabled":
|
||||
state_val = "running"
|
||||
else:
|
||||
state_val = "stopped"
|
||||
services[line_data[0]] = {"name": line_data[0], "state": state_val, "source": "systemd"}
|
||||
return services
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec = dict()) # noqa
|
||||
service_modules = (ServiceScanService, SystemctlScanService)
|
||||
all_services = {}
|
||||
incomplete_warning = False
|
||||
for svc_module in service_modules:
|
||||
svcmod = svc_module(module)
|
||||
svc = svcmod.gather_services()
|
||||
if svc is not None:
|
||||
all_services.update(svc)
|
||||
if svcmod.incomplete_warning:
|
||||
incomplete_warning = True
|
||||
if len(all_services) == 0:
|
||||
results = dict(skipped=True, msg="Failed to find any services. Sometimes this is due to insufficient privileges.")
|
||||
else:
|
||||
results = dict(ansible_facts=dict(services=all_services))
|
||||
if incomplete_warning:
|
||||
results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
main()
|
||||
@ -1,102 +0,0 @@
|
||||
#!powershell
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# WANT_JSON
|
||||
# POWERSHELL_COMMON
|
||||
|
||||
$params = Parse-Args $args $true;
|
||||
|
||||
$paths = Get-Attr $params "paths" $FALSE;
|
||||
If ($paths -eq $FALSE)
|
||||
{
|
||||
Fail-Json (New-Object psobject) "missing required argument: paths";
|
||||
}
|
||||
|
||||
$get_checksum = Get-Attr $params "get_checksum" $false | ConvertTo-Bool;
|
||||
$recursive = Get-Attr $params "recursive" $false | ConvertTo-Bool;
|
||||
|
||||
function Date_To_Timestamp($start_date, $end_date)
|
||||
{
|
||||
If($start_date -and $end_date)
|
||||
{
|
||||
Write-Output (New-TimeSpan -Start $start_date -End $end_date).TotalSeconds
|
||||
}
|
||||
}
|
||||
|
||||
$files = @()
|
||||
|
||||
ForEach ($path In $paths)
|
||||
{
|
||||
"Path: " + $path
|
||||
ForEach ($file in Get-ChildItem $path -Recurse: $recursive)
|
||||
{
|
||||
"File: " + $file.FullName
|
||||
$fileinfo = New-Object psobject
|
||||
Set-Attr $fileinfo "path" $file.FullName
|
||||
$info = Get-Item $file.FullName;
|
||||
$iscontainer = Get-Attr $info "PSIsContainer" $null;
|
||||
$length = Get-Attr $info "Length" $null;
|
||||
$extension = Get-Attr $info "Extension" $null;
|
||||
$attributes = Get-Attr $info "Attributes" "";
|
||||
If ($info)
|
||||
{
|
||||
$accesscontrol = $info.GetAccessControl();
|
||||
}
|
||||
Else
|
||||
{
|
||||
$accesscontrol = $null;
|
||||
}
|
||||
$owner = Get-Attr $accesscontrol "Owner" $null;
|
||||
$creationtime = Get-Attr $info "CreationTime" $null;
|
||||
$lastaccesstime = Get-Attr $info "LastAccessTime" $null;
|
||||
$lastwritetime = Get-Attr $info "LastWriteTime" $null;
|
||||
|
||||
$epoch_date = Get-Date -Date "01/01/1970"
|
||||
If ($iscontainer)
|
||||
{
|
||||
Set-Attr $fileinfo "isdir" $TRUE;
|
||||
}
|
||||
Else
|
||||
{
|
||||
Set-Attr $fileinfo "isdir" $FALSE;
|
||||
Set-Attr $fileinfo "size" $length;
|
||||
}
|
||||
Set-Attr $fileinfo "extension" $extension;
|
||||
Set-Attr $fileinfo "attributes" $attributes.ToString();
|
||||
# Set-Attr $fileinfo "owner" $getaccesscontrol.Owner;
|
||||
# Set-Attr $fileinfo "owner" $info.GetAccessControl().Owner;
|
||||
Set-Attr $fileinfo "owner" $owner;
|
||||
Set-Attr $fileinfo "creationtime" (Date_To_Timestamp $epoch_date $creationtime);
|
||||
Set-Attr $fileinfo "lastaccesstime" (Date_To_Timestamp $epoch_date $lastaccesstime);
|
||||
Set-Attr $fileinfo "lastwritetime" (Date_To_Timestamp $epoch_date $lastwritetime);
|
||||
|
||||
If (($get_checksum) -and -not $fileinfo.isdir)
|
||||
{
|
||||
$hash = Get-FileChecksum($file.FullName);
|
||||
Set-Attr $fileinfo "checksum" $hash;
|
||||
}
|
||||
|
||||
$files += $fileinfo
|
||||
}
|
||||
}
|
||||
|
||||
$result = New-Object psobject @{
|
||||
ansible_facts = New-Object psobject @{
|
||||
files = $files
|
||||
}
|
||||
}
|
||||
|
||||
Exit-Json $result;
|
||||
@ -1,66 +0,0 @@
|
||||
#!powershell
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# WANT_JSON
|
||||
# POWERSHELL_COMMON
|
||||
|
||||
$uninstall_native_path = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall"
|
||||
$uninstall_wow6432_path = "HKLM:\SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall"
|
||||
|
||||
if ([System.IntPtr]::Size -eq 4) {
|
||||
|
||||
# This is a 32-bit Windows system, so we only check for 32-bit programs, which will be
|
||||
# at the native registry location.
|
||||
|
||||
[PSObject []]$packages = Get-ChildItem -Path $uninstall_native_path |
|
||||
Get-ItemProperty |
|
||||
Select-Object -Property @{Name="name"; Expression={$_."DisplayName"}},
|
||||
@{Name="version"; Expression={$_."DisplayVersion"}},
|
||||
@{Name="publisher"; Expression={$_."Publisher"}},
|
||||
@{Name="arch"; Expression={ "Win32" }} |
|
||||
Where-Object { $_.name }
|
||||
|
||||
} else {
|
||||
|
||||
# This is a 64-bit Windows system, so we check for 64-bit programs in the native
|
||||
# registry location, and also for 32-bit programs under Wow6432Node.
|
||||
|
||||
[PSObject []]$packages = Get-ChildItem -Path $uninstall_native_path |
|
||||
Get-ItemProperty |
|
||||
Select-Object -Property @{Name="name"; Expression={$_."DisplayName"}},
|
||||
@{Name="version"; Expression={$_."DisplayVersion"}},
|
||||
@{Name="publisher"; Expression={$_."Publisher"}},
|
||||
@{Name="arch"; Expression={ "Win64" }} |
|
||||
Where-Object { $_.name }
|
||||
|
||||
$packages += Get-ChildItem -Path $uninstall_wow6432_path |
|
||||
Get-ItemProperty |
|
||||
Select-Object -Property @{Name="name"; Expression={$_."DisplayName"}},
|
||||
@{Name="version"; Expression={$_."DisplayVersion"}},
|
||||
@{Name="publisher"; Expression={$_."Publisher"}},
|
||||
@{Name="arch"; Expression={ "Win32" }} |
|
||||
Where-Object { $_.name }
|
||||
|
||||
}
|
||||
|
||||
$result = New-Object psobject @{
|
||||
ansible_facts = New-Object psobject @{
|
||||
packages = $packages
|
||||
}
|
||||
changed = $false
|
||||
}
|
||||
|
||||
Exit-Json $result;
|
||||
@ -1,30 +0,0 @@
|
||||
#!powershell
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# WANT_JSON
|
||||
# POWERSHELL_COMMON
|
||||
|
||||
$result = New-Object psobject @{
|
||||
ansible_facts = New-Object psobject @{
|
||||
services = Get-Service |
|
||||
Select-Object -Property @{Name="name"; Expression={$_."DisplayName"}},
|
||||
@{Name="win_svc_name"; Expression={$_."Name"}},
|
||||
@{Name="state"; Expression={$_."Status".ToString().ToLower()}}
|
||||
}
|
||||
changed = $false
|
||||
}
|
||||
|
||||
Exit-Json $result;
|
||||
@ -197,12 +197,23 @@ LOCAL_STDOUT_EXPIRE_TIME = 2592000
|
||||
# events into the database
|
||||
JOB_EVENT_WORKERS = 4
|
||||
|
||||
# The number of seconds (must be an integer) to buffer callback receiver bulk
|
||||
# writes in memory before flushing via JobEvent.objects.bulk_create()
|
||||
JOB_EVENT_BUFFER_SECONDS = 1
|
||||
|
||||
# The interval at which callback receiver statistics should be
|
||||
# recorded
|
||||
JOB_EVENT_STATISTICS_INTERVAL = 5
|
||||
|
||||
# The maximum size of the job event worker queue before requests are blocked
|
||||
JOB_EVENT_MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# The number of job events to migrate per-transaction when moving from int -> bigint
|
||||
JOB_EVENT_MIGRATION_CHUNK_SIZE = 1000000
|
||||
|
||||
# The maximum allowed jobs to start on a given task manager cycle
|
||||
START_TASK_LIMIT = 100
|
||||
|
||||
# Disallow sending session cookies over insecure connections
|
||||
SESSION_COOKIE_SECURE = True
|
||||
|
||||
@ -477,6 +488,7 @@ SOCIAL_AUTH_SAML_PIPELINE = _SOCIAL_AUTH_PIPELINE_BASE + (
|
||||
'awx.sso.pipeline.update_user_orgs',
|
||||
'awx.sso.pipeline.update_user_teams',
|
||||
)
|
||||
SAML_AUTO_CREATE_OBJECTS = True
|
||||
|
||||
SOCIAL_AUTH_LOGIN_URL = '/'
|
||||
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/sso/complete/'
|
||||
@ -567,28 +579,9 @@ AWX_COLLECTIONS_ENABLED = True
|
||||
# Follow symlinks when scanning for playbooks
|
||||
AWX_SHOW_PLAYBOOK_LINKS = False
|
||||
|
||||
# Settings for primary galaxy server, should be set in the UI
|
||||
PRIMARY_GALAXY_URL = ''
|
||||
PRIMARY_GALAXY_USERNAME = ''
|
||||
PRIMARY_GALAXY_TOKEN = ''
|
||||
PRIMARY_GALAXY_PASSWORD = ''
|
||||
PRIMARY_GALAXY_AUTH_URL = ''
|
||||
|
||||
# Settings for the public galaxy server(s).
|
||||
PUBLIC_GALAXY_ENABLED = True
|
||||
PUBLIC_GALAXY_SERVER = {
|
||||
'id': 'galaxy',
|
||||
'url': 'https://galaxy.ansible.com'
|
||||
}
|
||||
|
||||
# Applies to any galaxy server
|
||||
GALAXY_IGNORE_CERTS = False
|
||||
|
||||
# List of dicts of fallback (additional) Galaxy servers. If configured, these
|
||||
# will be higher precedence than public Galaxy, but lower than primary Galaxy.
|
||||
# Available options: 'id', 'url', 'username', 'password', 'token', 'auth_url'
|
||||
FALLBACK_GALAXY_SERVERS = []
|
||||
|
||||
# Enable bubblewrap support for running jobs (playbook runs only).
|
||||
# Note: This setting may be overridden by database settings.
|
||||
AWX_PROOT_ENABLED = True
|
||||
@ -789,7 +782,7 @@ ASGI_APPLICATION = "awx.main.routing.application"
|
||||
|
||||
CHANNEL_LAYERS = {
|
||||
"default": {
|
||||
"BACKEND": "awx.main.consumers.ExpiringRedisChannelLayer",
|
||||
"BACKEND": "channels_redis.core.RedisChannelLayer",
|
||||
"CONFIG": {
|
||||
"hosts": [BROKER_URL],
|
||||
"capacity": 10000,
|
||||
@ -1002,6 +995,11 @@ LOGGING = {
|
||||
'handlers': ['task_system', 'external_logger'],
|
||||
'propagate': False
|
||||
},
|
||||
'awx.main.analytics': {
|
||||
'handlers': ['task_system', 'external_logger'],
|
||||
'level': 'INFO',
|
||||
'propagate': False
|
||||
},
|
||||
'awx.main.scheduler': {
|
||||
'handlers': ['task_system', 'external_logger'],
|
||||
'propagate': False
|
||||
|
||||
@ -515,6 +515,7 @@ register(
|
||||
help_text=_('TACACS+ session timeout value in seconds, 0 disables timeout.'),
|
||||
category=_('TACACS+'),
|
||||
category_slug='tacacsplus',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
@ -575,7 +576,7 @@ register(
|
||||
'SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS',
|
||||
field_class=fields.StringListField,
|
||||
default=[],
|
||||
label=_('Google OAuth2 Whitelisted Domains'),
|
||||
label=_('Google OAuth2 Allowed Domains'),
|
||||
help_text=_('Update this setting to restrict the domains who are allowed to '
|
||||
'login using Google OAuth2.'),
|
||||
category=_('Google OAuth2'),
|
||||
@ -919,6 +920,17 @@ def get_saml_entity_id():
|
||||
return settings.TOWER_URL_BASE
|
||||
|
||||
|
||||
register(
|
||||
'SAML_AUTO_CREATE_OBJECTS',
|
||||
field_class=fields.BooleanField,
|
||||
default=True,
|
||||
label=_('Automatically Create Organizations and Teams on SAML Login'),
|
||||
help_text=_('When enabled (the default), mapped Organizations and Teams '
|
||||
'will be created automatically on successful SAML login.'),
|
||||
category=_('SAML'),
|
||||
category_slug='saml',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_SAML_CALLBACK_URL',
|
||||
field_class=fields.CharField,
|
||||
|
||||
@ -10,6 +10,7 @@ import logging
|
||||
from social_core.exceptions import AuthException
|
||||
|
||||
# Django
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.db.models import Q
|
||||
|
||||
@ -80,11 +81,18 @@ def _update_m2m_from_expression(user, related, expr, remove=True):
|
||||
|
||||
def _update_org_from_attr(user, related, attr, remove, remove_admins, remove_auditors):
|
||||
from awx.main.models import Organization
|
||||
from django.conf import settings
|
||||
|
||||
org_ids = []
|
||||
|
||||
for org_name in attr:
|
||||
org = Organization.objects.get_or_create(name=org_name)[0]
|
||||
try:
|
||||
if settings.SAML_AUTO_CREATE_OBJECTS:
|
||||
org = Organization.objects.get_or_create(name=org_name)[0]
|
||||
else:
|
||||
org = Organization.objects.get(name=org_name)
|
||||
except ObjectDoesNotExist:
|
||||
continue
|
||||
|
||||
org_ids.append(org.id)
|
||||
getattr(org, related).members.add(user)
|
||||
@ -199,11 +207,24 @@ def update_user_teams_by_saml_attr(backend, details, user=None, *args, **kwargs)
|
||||
|
||||
if organization_alias:
|
||||
organization_name = organization_alias
|
||||
org = Organization.objects.get_or_create(name=organization_name)[0]
|
||||
|
||||
try:
|
||||
if settings.SAML_AUTO_CREATE_OBJECTS:
|
||||
org = Organization.objects.get_or_create(name=organization_name)[0]
|
||||
else:
|
||||
org = Organization.objects.get(name=organization_name)
|
||||
except ObjectDoesNotExist:
|
||||
continue
|
||||
|
||||
if team_alias:
|
||||
team_name = team_alias
|
||||
team = Team.objects.get_or_create(name=team_name, organization=org)[0]
|
||||
try:
|
||||
if settings.SAML_AUTO_CREATE_OBJECTS:
|
||||
team = Team.objects.get_or_create(name=team_name, organization=org)[0]
|
||||
else:
|
||||
team = Team.objects.get(name=team_name, organization=org)
|
||||
except ObjectDoesNotExist:
|
||||
continue
|
||||
|
||||
team_ids.append(team.id)
|
||||
team.member_role.members.add(user)
|
||||
|
||||
@ -174,8 +174,15 @@ class TestSAMLAttr():
|
||||
return (o1, o2, o3)
|
||||
|
||||
@pytest.fixture
|
||||
def mock_settings(self):
|
||||
def mock_settings(self, request):
|
||||
fixture_args = request.node.get_closest_marker('fixture_args')
|
||||
if fixture_args and 'autocreate' in fixture_args.kwargs:
|
||||
autocreate = fixture_args.kwargs['autocreate']
|
||||
else:
|
||||
autocreate = True
|
||||
|
||||
class MockSettings():
|
||||
SAML_AUTO_CREATE_OBJECTS = autocreate
|
||||
SOCIAL_AUTH_SAML_ORGANIZATION_ATTR = {
|
||||
'saml_attr': 'memberOf',
|
||||
'saml_admin_attr': 'admins',
|
||||
@ -304,3 +311,41 @@ class TestSAMLAttr():
|
||||
assert Team.objects.get(
|
||||
name='Yellow_Alias', organization__name='Default4_Alias').member_role.members.count() == 1
|
||||
|
||||
@pytest.mark.fixture_args(autocreate=False)
|
||||
def test_autocreate_disabled(self, users, kwargs, mock_settings):
|
||||
kwargs['response']['attributes']['memberOf'] = ['Default1', 'Default2', 'Default3']
|
||||
kwargs['response']['attributes']['groups'] = ['Blue', 'Red', 'Green']
|
||||
with mock.patch('django.conf.settings', mock_settings):
|
||||
for u in users:
|
||||
update_user_orgs_by_saml_attr(None, None, u, **kwargs)
|
||||
update_user_teams_by_saml_attr(None, None, u, **kwargs)
|
||||
assert Organization.objects.count() == 0
|
||||
assert Team.objects.count() == 0
|
||||
|
||||
# precreate everything
|
||||
o1 = Organization.objects.create(name='Default1')
|
||||
o2 = Organization.objects.create(name='Default2')
|
||||
o3 = Organization.objects.create(name='Default3')
|
||||
Team.objects.create(name='Blue', organization_id=o1.id)
|
||||
Team.objects.create(name='Blue', organization_id=o2.id)
|
||||
Team.objects.create(name='Blue', organization_id=o3.id)
|
||||
Team.objects.create(name='Red', organization_id=o1.id)
|
||||
Team.objects.create(name='Green', organization_id=o1.id)
|
||||
Team.objects.create(name='Green', organization_id=o3.id)
|
||||
|
||||
for u in users:
|
||||
update_user_orgs_by_saml_attr(None, None, u, **kwargs)
|
||||
update_user_teams_by_saml_attr(None, None, u, **kwargs)
|
||||
|
||||
assert o1.member_role.members.count() == 3
|
||||
assert o2.member_role.members.count() == 3
|
||||
assert o3.member_role.members.count() == 3
|
||||
|
||||
assert Team.objects.get(name='Blue', organization__name='Default1').member_role.members.count() == 3
|
||||
assert Team.objects.get(name='Blue', organization__name='Default2').member_role.members.count() == 3
|
||||
assert Team.objects.get(name='Blue', organization__name='Default3').member_role.members.count() == 3
|
||||
|
||||
assert Team.objects.get(name='Red', organization__name='Default1').member_role.members.count() == 3
|
||||
|
||||
assert Team.objects.get(name='Green', organization__name='Default1').member_role.members.count() == 3
|
||||
assert Team.objects.get(name='Green', organization__name='Default3').member_role.members.count() == 3
|
||||
|
||||
@ -119,6 +119,10 @@ function OutputStream ($q) {
|
||||
this.counters.ready = ready;
|
||||
this.counters.used = used;
|
||||
this.counters.missing = missing;
|
||||
|
||||
if (!window.liveUpdates) {
|
||||
this.counters.ready = event.counter;
|
||||
}
|
||||
};
|
||||
|
||||
this.bufferEmpty = threshold => {
|
||||
@ -141,6 +145,10 @@ function OutputStream ($q) {
|
||||
const { total } = this.counters;
|
||||
const readyCount = this.getReadyCount();
|
||||
|
||||
if (!window.liveUpdates) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (readyCount <= 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -23,12 +23,12 @@
|
||||
icon="external"
|
||||
tag="state._tagValue"
|
||||
remove-tag="state._onRemoveTag(state)"
|
||||
/>
|
||||
></at-tag>
|
||||
<at-tag
|
||||
ng-show="state._disabled && state._tagValue"
|
||||
icon="external"
|
||||
tag="state._tagValue"
|
||||
/>
|
||||
></at-tag>
|
||||
</div>
|
||||
</span>
|
||||
<input ng-if="!state.asTag" type="{{ type }}"
|
||||
|
||||
@ -65,6 +65,9 @@ export default ['i18n', function(i18n) {
|
||||
PROJECT_UPDATE_VVV: {
|
||||
type: 'toggleSwitch',
|
||||
},
|
||||
GALAXY_IGNORE_CERTS: {
|
||||
type: 'toggleSwitch',
|
||||
},
|
||||
AWX_ROLES_ENABLED: {
|
||||
type: 'toggleSwitch',
|
||||
},
|
||||
@ -74,31 +77,6 @@ export default ['i18n', function(i18n) {
|
||||
AWX_SHOW_PLAYBOOK_LINKS: {
|
||||
type: 'toggleSwitch',
|
||||
},
|
||||
PRIMARY_GALAXY_URL: {
|
||||
type: 'text',
|
||||
reset: 'PRIMARY_GALAXY_URL',
|
||||
},
|
||||
PRIMARY_GALAXY_USERNAME: {
|
||||
type: 'text',
|
||||
reset: 'PRIMARY_GALAXY_USERNAME',
|
||||
},
|
||||
PRIMARY_GALAXY_PASSWORD: {
|
||||
type: 'sensitive',
|
||||
hasShowInputButton: true,
|
||||
reset: 'PRIMARY_GALAXY_PASSWORD',
|
||||
},
|
||||
PRIMARY_GALAXY_TOKEN: {
|
||||
type: 'sensitive',
|
||||
hasShowInputButton: true,
|
||||
reset: 'PRIMARY_GALAXY_TOKEN',
|
||||
},
|
||||
PRIMARY_GALAXY_AUTH_URL: {
|
||||
type: 'text',
|
||||
reset: 'PRIMARY_GALAXY_AUTH_URL',
|
||||
},
|
||||
PUBLIC_GALAXY_ENABLED: {
|
||||
type: 'toggleSwitch',
|
||||
},
|
||||
AWX_TASK_ENV: {
|
||||
type: 'textarea',
|
||||
reset: 'AWX_TASK_ENV',
|
||||
|
||||
@ -11,8 +11,8 @@
|
||||
* Controller for handling third party supported login options.
|
||||
*/
|
||||
|
||||
export default ['$window', '$scope', 'thirdPartySignOnService',
|
||||
function ($window, $scope, thirdPartySignOnService) {
|
||||
export default ['$window', '$scope', 'thirdPartySignOnService', '$cookies', 'Authorization',
|
||||
function ($window, $scope, thirdPartySignOnService, $cookies, Authorization) {
|
||||
|
||||
thirdPartySignOnService(
|
||||
{scope: $scope, url: "api/v2/auth/"}).then(function (data) {
|
||||
@ -29,8 +29,16 @@ export default ['$window', '$scope', 'thirdPartySignOnService',
|
||||
});
|
||||
|
||||
$scope.goTo = function(link) {
|
||||
// this is used because $location only lets you navigate inside
|
||||
// the "/#/" path, and these are API urls.
|
||||
$window.location.href = link;
|
||||
// clear out any prior auth state that might exist (e.g: from other
|
||||
// tabs, etc.) before redirecting to the auth service
|
||||
Authorization.logout().then(() => {
|
||||
angular.forEach($cookies.getAll(), (val, name) => {
|
||||
$cookies.remove(name);
|
||||
});
|
||||
$window.location.reload();
|
||||
// this is used because $location only lets you navigate inside
|
||||
// the "/#/" path, and these are API urls.
|
||||
$window.location.href = link;
|
||||
});
|
||||
};
|
||||
}];
|
||||
|
||||
@ -4,11 +4,12 @@
|
||||
* All Rights Reserved
|
||||
*************************************************/
|
||||
|
||||
export default ['$scope', '$rootScope', '$location', '$stateParams',
|
||||
'OrganizationForm', 'GenerateForm', 'Rest', 'Alert',
|
||||
'ProcessErrors', 'GetBasePath', 'Wait', 'CreateSelect2', '$state','InstanceGroupsService', 'ConfigData',
|
||||
function($scope, $rootScope, $location, $stateParams, OrganizationForm,
|
||||
GenerateForm, Rest, Alert, ProcessErrors, GetBasePath, Wait, CreateSelect2, $state, InstanceGroupsService, ConfigData) {
|
||||
export default ['$scope', '$rootScope', '$location', '$stateParams', 'OrganizationForm',
|
||||
'GenerateForm', 'Rest', 'Alert', 'ProcessErrors', 'GetBasePath', 'Wait', 'CreateSelect2',
|
||||
'$state','InstanceGroupsService', 'ConfigData', 'MultiCredentialService', 'defaultGalaxyCredential',
|
||||
function($scope, $rootScope, $location, $stateParams, OrganizationForm,
|
||||
GenerateForm, Rest, Alert, ProcessErrors, GetBasePath, Wait, CreateSelect2,
|
||||
$state, InstanceGroupsService, ConfigData, MultiCredentialService, defaultGalaxyCredential) {
|
||||
|
||||
Rest.setUrl(GetBasePath('organizations'));
|
||||
Rest.options()
|
||||
@ -37,6 +38,8 @@ export default ['$scope', '$rootScope', '$location', '$stateParams',
|
||||
|
||||
// apply form definition's default field values
|
||||
GenerateForm.applyDefaults(form, $scope);
|
||||
|
||||
$scope.credentials = defaultGalaxyCredential || [];
|
||||
}
|
||||
|
||||
// Save
|
||||
@ -57,18 +60,32 @@ export default ['$scope', '$rootScope', '$location', '$stateParams',
|
||||
const organization_id = data.id,
|
||||
instance_group_url = data.related.instance_groups;
|
||||
|
||||
InstanceGroupsService.addInstanceGroups(instance_group_url, $scope.instance_groups)
|
||||
MultiCredentialService
|
||||
.saveRelatedSequentially({
|
||||
related: {
|
||||
credentials: data.related.galaxy_credentials
|
||||
}
|
||||
}, $scope.credentials)
|
||||
.then(() => {
|
||||
Wait('stop');
|
||||
$rootScope.$broadcast("EditIndicatorChange", "organizations", organization_id);
|
||||
$state.go('organizations.edit', {organization_id: organization_id}, {reload: true});
|
||||
})
|
||||
.catch(({data, status}) => {
|
||||
InstanceGroupsService.addInstanceGroups(instance_group_url, $scope.instance_groups)
|
||||
.then(() => {
|
||||
Wait('stop');
|
||||
$rootScope.$broadcast("EditIndicatorChange", "organizations", organization_id);
|
||||
$state.go('organizations.edit', {organization_id: organization_id}, {reload: true});
|
||||
})
|
||||
.catch(({data, status}) => {
|
||||
ProcessErrors($scope, data, status, form, {
|
||||
hdr: 'Error!',
|
||||
msg: 'Failed to save instance groups. POST returned status: ' + status
|
||||
});
|
||||
});
|
||||
}).catch(({data, status}) => {
|
||||
ProcessErrors($scope, data, status, form, {
|
||||
hdr: 'Error!',
|
||||
msg: 'Failed to save instance groups. POST returned status: ' + status
|
||||
msg: 'Failed to save Galaxy credentials. POST returned status: ' + status
|
||||
});
|
||||
});
|
||||
|
||||
})
|
||||
.catch(({data, status}) => {
|
||||
let explanation = _.has(data, "name") ? data.name[0] : "";
|
||||
|
||||
@ -6,10 +6,12 @@
|
||||
|
||||
export default ['$scope', '$location', '$stateParams', 'isOrgAdmin', 'isNotificationAdmin',
|
||||
'OrganizationForm', 'Rest', 'ProcessErrors', 'Prompt', 'i18n', 'isOrgAuditor',
|
||||
'GetBasePath', 'Wait', '$state', 'ToggleNotification', 'CreateSelect2', 'InstanceGroupsService', 'InstanceGroupsData', 'ConfigData',
|
||||
'GetBasePath', 'Wait', '$state', 'ToggleNotification', 'CreateSelect2', 'InstanceGroupsService',
|
||||
'InstanceGroupsData', 'ConfigData', 'GalaxyCredentialsData', 'MultiCredentialService',
|
||||
function($scope, $location, $stateParams, isOrgAdmin, isNotificationAdmin,
|
||||
OrganizationForm, Rest, ProcessErrors, Prompt, i18n, isOrgAuditor,
|
||||
GetBasePath, Wait, $state, ToggleNotification, CreateSelect2, InstanceGroupsService, InstanceGroupsData, ConfigData) {
|
||||
GetBasePath, Wait, $state, ToggleNotification, CreateSelect2, InstanceGroupsService,
|
||||
InstanceGroupsData, ConfigData, GalaxyCredentialsData, MultiCredentialService) {
|
||||
|
||||
let form = OrganizationForm(),
|
||||
defaultUrl = GetBasePath('organizations'),
|
||||
@ -29,6 +31,7 @@ export default ['$scope', '$location', '$stateParams', 'isOrgAdmin', 'isNotifica
|
||||
});
|
||||
|
||||
$scope.instance_groups = InstanceGroupsData;
|
||||
$scope.credentials = GalaxyCredentialsData;
|
||||
const virtualEnvs = ConfigData.custom_virtualenvs || [];
|
||||
$scope.custom_virtualenvs_visible = virtualEnvs.length > 1;
|
||||
$scope.custom_virtualenvs_options = virtualEnvs.filter(
|
||||
@ -100,7 +103,14 @@ export default ['$scope', '$location', '$stateParams', 'isOrgAdmin', 'isNotifica
|
||||
Rest.setUrl(defaultUrl + id + '/');
|
||||
Rest.put(params)
|
||||
.then(() => {
|
||||
InstanceGroupsService.editInstanceGroups(instance_group_url, $scope.instance_groups)
|
||||
MultiCredentialService
|
||||
.saveRelatedSequentially({
|
||||
related: {
|
||||
credentials: $scope.organization_obj.related.galaxy_credentials
|
||||
}
|
||||
}, $scope.credentials)
|
||||
.then(() => {
|
||||
InstanceGroupsService.editInstanceGroups(instance_group_url, $scope.instance_groups)
|
||||
.then(() => {
|
||||
Wait('stop');
|
||||
$state.go($state.current, {}, { reload: true });
|
||||
@ -111,6 +121,12 @@ export default ['$scope', '$location', '$stateParams', 'isOrgAdmin', 'isNotifica
|
||||
msg: 'Failed to update instance groups. POST returned status: ' + status
|
||||
});
|
||||
});
|
||||
}).catch(({data, status}) => {
|
||||
ProcessErrors($scope, data, status, form, {
|
||||
hdr: 'Error!',
|
||||
msg: 'Failed to save Galaxy credentials. POST returned status: ' + status
|
||||
});
|
||||
});
|
||||
$scope.organization_name = $scope.name;
|
||||
main = params;
|
||||
})
|
||||
|
||||
@ -0,0 +1,123 @@
|
||||
export default ['templateUrl', '$window', function(templateUrl, $window) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
galaxyCredentials: '='
|
||||
},
|
||||
templateUrl: templateUrl('organizations/galaxy-credentials-multiselect/galaxy-credentials-modal/galaxy-credentials-modal'),
|
||||
|
||||
link: function(scope, element) {
|
||||
|
||||
$('#galaxy-credentials-modal').on('hidden.bs.modal', function () {
|
||||
$('#galaxy-credentials-modal').off('hidden.bs.modal');
|
||||
$(element).remove();
|
||||
});
|
||||
|
||||
scope.showModal = function() {
|
||||
$('#galaxy-credentials-modal').modal('show');
|
||||
};
|
||||
|
||||
scope.destroyModal = function() {
|
||||
$('#galaxy-credentials-modal').modal('hide');
|
||||
};
|
||||
},
|
||||
|
||||
controller: ['$scope', '$compile', 'QuerySet', 'GetBasePath','generateList', 'CredentialList', function($scope, $compile, qs, GetBasePath, GenerateList, CredentialList) {
|
||||
|
||||
function init() {
|
||||
|
||||
$scope.credential_queryset = {
|
||||
order_by: 'name',
|
||||
page_size: 5,
|
||||
credential_type__kind: 'galaxy'
|
||||
};
|
||||
|
||||
$scope.credential_default_params = {
|
||||
order_by: 'name',
|
||||
page_size: 5,
|
||||
credential_type__kind: 'galaxy'
|
||||
};
|
||||
|
||||
qs.search(GetBasePath('credentials'), $scope.credential_queryset)
|
||||
.then(res => {
|
||||
$scope.credential_dataset = res.data;
|
||||
$scope.credentials = $scope.credential_dataset.results;
|
||||
|
||||
let credentialList = _.cloneDeep(CredentialList);
|
||||
|
||||
credentialList.listTitle = false;
|
||||
credentialList.well = false;
|
||||
credentialList.multiSelect = true;
|
||||
credentialList.multiSelectPreview = {
|
||||
selectedRows: 'credTags',
|
||||
availableRows: 'credentials'
|
||||
};
|
||||
credentialList.fields.name.ngClick = "linkoutCredential(credential)";
|
||||
credentialList.fields.name.columnClass = 'col-md-11 col-sm-11 col-xs-11';
|
||||
delete credentialList.fields.consumed_capacity;
|
||||
delete credentialList.fields.jobs_running;
|
||||
|
||||
let html = `${GenerateList.build({
|
||||
list: credentialList,
|
||||
input_type: 'galaxy-credentials-modal-body',
|
||||
hideViewPerPage: true,
|
||||
mode: 'lookup'
|
||||
})}`;
|
||||
|
||||
$scope.list = credentialList;
|
||||
$('#galaxy-credentials-modal-body').append($compile(html)($scope));
|
||||
|
||||
if ($scope.galaxyCredentials) {
|
||||
$scope.galaxyCredentials = $scope.galaxyCredentials.map( (item) => {
|
||||
item.isSelected = true;
|
||||
if (!$scope.credTags) {
|
||||
$scope.credTags = [];
|
||||
}
|
||||
$scope.credTags.push(item);
|
||||
return item;
|
||||
});
|
||||
}
|
||||
|
||||
$scope.showModal();
|
||||
});
|
||||
|
||||
$scope.$watch('credentials', function(){
|
||||
angular.forEach($scope.credentials, function(credentialRow) {
|
||||
angular.forEach($scope.credTags, function(selectedCredential){
|
||||
if(selectedCredential.id === credentialRow.id) {
|
||||
credentialRow.isSelected = true;
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
init();
|
||||
|
||||
$scope.$on("selectedOrDeselected", function(e, value) {
|
||||
let item = value.value;
|
||||
if (value.isSelected) {
|
||||
if(!$scope.credTags) {
|
||||
$scope.credTags = [];
|
||||
}
|
||||
$scope.credTags.push(item);
|
||||
} else {
|
||||
_.remove($scope.credTags, { id: item.id });
|
||||
}
|
||||
});
|
||||
|
||||
$scope.linkoutCredential = function(credential) {
|
||||
$window.open('/#/credentials/' + credential.id,'_blank');
|
||||
};
|
||||
|
||||
$scope.cancelForm = function() {
|
||||
$scope.destroyModal();
|
||||
};
|
||||
|
||||
$scope.saveForm = function() {
|
||||
$scope.galaxyCredentials = $scope.credTags;
|
||||
$scope.destroyModal();
|
||||
};
|
||||
}]
|
||||
};
|
||||
}];
|
||||
@ -0,0 +1,22 @@
|
||||
<div id="galaxy-credentials-modal" class="Lookup modal fade">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header Form-header">
|
||||
<div class="Form-title Form-title--uppercase" translate>Select Galaxy Credentials</div>
|
||||
<div class="Form-header--fields"></div>
|
||||
<div class="Form-exitHolder">
|
||||
<button aria-label="{{'Close'|translate}}" type="button" class="Form-exit" ng-click="cancelForm()">
|
||||
<i class="fa fa-times-circle"></i>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<div id="galaxy-credentials-modal-body"> {{ credential }} </div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" ng-click="cancelForm()" class="btn btn-default" translate>CANCEL</button>
|
||||
<button type="button" ng-click="saveForm()" ng-disabled="!credentials || credentials.length === 0" class="Lookup-save btn btn-primary" translate>SAVE</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -0,0 +1,14 @@
|
||||
export default ['$scope',
|
||||
function($scope) {
|
||||
|
||||
$scope.galaxyCredentialsTags = [];
|
||||
|
||||
$scope.$watch('galaxyCredentials', function() {
|
||||
$scope.galaxyCredentialsTags = $scope.galaxyCredentials;
|
||||
}, true);
|
||||
|
||||
$scope.deleteTag = function(tag){
|
||||
_.remove($scope.galaxyCredentials, {id: tag.id});
|
||||
};
|
||||
}
|
||||
];
|
||||
@ -0,0 +1,15 @@
|
||||
#instance-groups-panel {
|
||||
table {
|
||||
overflow: hidden;
|
||||
}
|
||||
.List-header {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
.isActive {
|
||||
border-left: 10px solid @list-row-select-bord;
|
||||
}
|
||||
.instances-list,
|
||||
.instance-jobs-list {
|
||||
margin-top: 20px;
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,19 @@
|
||||
import galaxyCredentialsMultiselectController from './galaxy-credentials-multiselect.controller';
|
||||
export default ['templateUrl', '$compile',
|
||||
function(templateUrl, $compile) {
|
||||
return {
|
||||
scope: {
|
||||
galaxyCredentials: '=',
|
||||
fieldIsDisabled: '='
|
||||
},
|
||||
restrict: 'E',
|
||||
templateUrl: templateUrl('organizations/galaxy-credentials-multiselect/galaxy-credentials'),
|
||||
controller: galaxyCredentialsMultiselectController,
|
||||
link: function(scope) {
|
||||
scope.openInstanceGroupsModal = function() {
|
||||
$('#content-container').append($compile('<galaxy-credentials-modal galaxy-credentials="galaxyCredentials"></galaxy-credentials-modal>')(scope));
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
];
|
||||
@ -0,0 +1,18 @@
|
||||
<div class="input-group Form-mixedInputGroup">
|
||||
<span class="input-group-btn input-group-prepend Form-variableHeightButtonGroup">
|
||||
<button aria-label="{{'Open Galaxy credentials'|translate}}" type="button" class="Form-lookupButton Form-lookupButton--variableHeight btn btn-default" ng-click="openInstanceGroupsModal()"
|
||||
ng-disabled="fieldIsDisabled">
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
</span>
|
||||
<span id="InstanceGroups" class="form-control Form-textInput Form-textInput--variableHeight input-medium lookup LabelList-lookupTags"
|
||||
ng-disabled="fieldIsDisabled"
|
||||
ng-class="{'LabelList-lookupTags--disabled' : fieldIsDisabled}">
|
||||
<div ng-if="!fieldIsDisabled" class="LabelList-tagContainer" ng-repeat="tag in galaxyCredentialsTags">
|
||||
<at-tag tag="tag.name" remove-tag="deleteTag(tag)"></at-tag>
|
||||
</div>
|
||||
<div ng-if="fieldIsDisabled" class="LabelList-tag" ng-repeat="tag in galaxyCredentialsTags">
|
||||
<span class="LabelList-name">{{tag.name | sanitize}}</span>
|
||||
</div>
|
||||
</span>
|
||||
</div>
|
||||
@ -12,8 +12,10 @@ import organizationsLinkout from './linkout/main';
|
||||
import OrganizationsLinkoutStates from './linkout/organizations-linkout.route';
|
||||
import OrganizationForm from './organizations.form';
|
||||
import OrganizationList from './organizations.list';
|
||||
import { N_ } from '../i18n';
|
||||
import galaxyCredentialsMultiselect from './galaxy-credentials-multiselect/galaxy-credentials.directive';
|
||||
import galaxyCredentialsModal from './galaxy-credentials-multiselect/galaxy-credentials-modal/galaxy-credentials-modal.directive';
|
||||
|
||||
import { N_ } from '../i18n';
|
||||
|
||||
export default
|
||||
angular.module('Organizations', [
|
||||
@ -24,6 +26,8 @@ angular.module('Organizations', [
|
||||
.controller('OrganizationsEdit', OrganizationsEdit)
|
||||
.factory('OrganizationForm', OrganizationForm)
|
||||
.factory('OrganizationList', OrganizationList)
|
||||
.directive('galaxyCredentialsMultiselect', galaxyCredentialsMultiselect)
|
||||
.directive('galaxyCredentialsModal', galaxyCredentialsModal)
|
||||
.config(['$stateProvider', 'stateDefinitionsProvider', '$stateExtenderProvider',
|
||||
function($stateProvider, stateDefinitionsProvider, $stateExtenderProvider) {
|
||||
let stateExtender = $stateExtenderProvider.$get(),
|
||||
@ -67,7 +71,29 @@ angular.module('Organizations', [
|
||||
});
|
||||
});
|
||||
|
||||
}]
|
||||
}],
|
||||
defaultGalaxyCredential: ['Rest', 'GetBasePath', 'ProcessErrors',
|
||||
function(Rest, GetBasePath, ProcessErrors){
|
||||
Rest.setUrl(GetBasePath('credentials'));
|
||||
return Rest.get({
|
||||
params: {
|
||||
credential_type__kind: 'galaxy',
|
||||
managed_by_tower: true
|
||||
}
|
||||
})
|
||||
.then(({data}) => {
|
||||
if (data.results.length > 0) {
|
||||
return data.results;
|
||||
}
|
||||
})
|
||||
.catch(({data, status}) => {
|
||||
ProcessErrors(null, data, status, null, {
|
||||
hdr: 'Error!',
|
||||
msg: 'Failed to get default Galaxy credential. GET returned ' +
|
||||
'status: ' + status
|
||||
});
|
||||
});
|
||||
}],
|
||||
},
|
||||
edit: {
|
||||
ConfigData: ['ConfigService', 'ProcessErrors', (ConfigService, ProcessErrors) => {
|
||||
@ -81,6 +107,24 @@ angular.module('Organizations', [
|
||||
});
|
||||
});
|
||||
}],
|
||||
GalaxyCredentialsData: ['$stateParams', 'Rest', 'GetBasePath', 'ProcessErrors',
|
||||
function($stateParams, Rest, GetBasePath, ProcessErrors){
|
||||
let path = `${GetBasePath('organizations')}${$stateParams.organization_id}/galaxy_credentials/`;
|
||||
Rest.setUrl(path);
|
||||
return Rest.get()
|
||||
.then(({data}) => {
|
||||
if (data.results.length > 0) {
|
||||
return data.results;
|
||||
}
|
||||
})
|
||||
.catch(({data, status}) => {
|
||||
ProcessErrors(null, data, status, null, {
|
||||
hdr: 'Error!',
|
||||
msg: 'Failed to get credentials. GET returned ' +
|
||||
'status: ' + status
|
||||
});
|
||||
});
|
||||
}],
|
||||
InstanceGroupsData: ['$stateParams', 'Rest', 'GetBasePath', 'ProcessErrors',
|
||||
function($stateParams, Rest, GetBasePath, ProcessErrors){
|
||||
let path = `${GetBasePath('organizations')}${$stateParams.organization_id}/instance_groups/`;
|
||||
|
||||
@ -55,6 +55,15 @@ export default ['NotificationsList', 'i18n',
|
||||
ngDisabled: '!(organization_obj.summary_fields.user_capabilities.edit || canAdd)',
|
||||
ngShow: 'custom_virtualenvs_visible'
|
||||
},
|
||||
credential: {
|
||||
label: i18n._('Galaxy Credentials'),
|
||||
type: 'custom',
|
||||
awPopOver: "<p>" + i18n._("Select Galaxy credentials. The selection order sets the order in which Tower will download roles/collections using `ansible-galaxy`.") + "</p>",
|
||||
dataTitle: i18n._('Galaxy Credentials'),
|
||||
dataContainer: 'body',
|
||||
dataPlacement: 'right',
|
||||
control: '<galaxy-credentials-multiselect galaxy-credentials="credentials" field-is-disabled="!(organization_obj.summary_fields.user_capabilities.edit || canAdd) || (!current_user.is_superuser && isOrgAdmin)"></galaxy-credentials-multiselect>',
|
||||
},
|
||||
max_hosts: {
|
||||
label: i18n._('Max Hosts'),
|
||||
type: 'number',
|
||||
@ -69,7 +78,7 @@ export default ['NotificationsList', 'i18n',
|
||||
awPopOver: "<p>" + i18n._("The maximum number of hosts allowed to be managed by this organization. Value defaults to 0 which means no limit. Refer to the Ansible documentation for more details.") + "</p>",
|
||||
ngDisabled: '!current_user.is_superuser',
|
||||
ngShow: 'BRAND_NAME === "Tower"'
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
buttons: { //for now always generates <button> tags
|
||||
|
||||
@ -203,7 +203,7 @@ angular.module('ModalDialog', ['Utilities'])
|
||||
* })
|
||||
*
|
||||
* Use to resize a textarea field contained on a modal. Has only been tested where the
|
||||
* form contains 1 textarea and the the textarea is at the bottom of the form/modal.
|
||||
* form contains 1 textarea and the textarea is at the bottom of the form/modal.
|
||||
*
|
||||
**/
|
||||
.factory('TextareaResize', ['ParseTypeChange', 'Wait', function(ParseTypeChange, Wait){
|
||||
|
||||
@ -122,7 +122,7 @@
|
||||
selected-id="webhookCredential.modalSelectedId"
|
||||
on-ready="handleWebhookCredentialModalReady"
|
||||
on-item-select="handleWebhookCredentialModalItemSelect"
|
||||
/>
|
||||
></at-lookup-list>
|
||||
<at-action-group col="12" pos="right">
|
||||
<at-action-button
|
||||
variant="tertiary"
|
||||
|
||||
@ -171,7 +171,7 @@ export default
|
||||
selected-id="webhookCredential.modalSelectedId"
|
||||
on-ready="handleWebhookCredentialModalReady"
|
||||
on-item-select="handleWebhookCredentialModalItemSelect"
|
||||
/>
|
||||
></at-lookup-list>
|
||||
<at-action-group col="12" pos="right">
|
||||
<at-action-button
|
||||
variant="tertiary"
|
||||
|
||||
@ -46,6 +46,52 @@ function MultiCredentialService (Rest, ProcessErrors, $q, GetBasePath) {
|
||||
});
|
||||
};
|
||||
|
||||
this.saveRelatedSequentially = ({ related }, credentials) => {
|
||||
Rest.setUrl(related.credentials);
|
||||
return Rest
|
||||
.get()
|
||||
.then(res => {
|
||||
const { data: { results = [] } } = res;
|
||||
const updatedCredentialIds = (credentials || []).map(({ id }) => id);
|
||||
const currentCredentialIds = results.map(({ id }) => id);
|
||||
const credentialIdsToAssociate = [];
|
||||
const credentialIdsToDisassociate = [];
|
||||
let disassociateRemainingIds = false;
|
||||
|
||||
currentCredentialIds.forEach((currentId, position) => {
|
||||
if (!disassociateRemainingIds && updatedCredentialIds[position] !== currentId) {
|
||||
disassociateRemainingIds = true;
|
||||
}
|
||||
|
||||
if (disassociateRemainingIds) {
|
||||
credentialIdsToDisassociate.push(currentId);
|
||||
}
|
||||
});
|
||||
|
||||
updatedCredentialIds.forEach(updatedId => {
|
||||
if (credentialIdsToDisassociate.includes(updatedId)) {
|
||||
credentialIdsToAssociate.push(updatedId);
|
||||
} else if (!currentCredentialIds.includes(updatedId)) {
|
||||
credentialIdsToAssociate.push(updatedId);
|
||||
}
|
||||
});
|
||||
|
||||
let disassociationPromise = Promise.resolve();
|
||||
credentialIdsToDisassociate.forEach(id => {
|
||||
disassociationPromise = disassociationPromise.then(() => disassociate({ related }, id));
|
||||
});
|
||||
|
||||
return disassociationPromise
|
||||
.then(() => {
|
||||
let associationPromise = Promise.resolve();
|
||||
credentialIdsToAssociate.forEach(id => {
|
||||
associationPromise = associationPromise.then(() => associate({ related }, id));
|
||||
});
|
||||
return associationPromise;
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
this.getRelated = ({ related }, params = { permitted: [] }) => {
|
||||
Rest.setUrl(related.credentials);
|
||||
return Rest
|
||||
|
||||
@ -140,7 +140,7 @@ export default [
|
||||
selected-id="webhookCredential.modalSelectedId"
|
||||
on-ready="handleWebhookCredentialModalReady"
|
||||
on-item-select="handleWebhookCredentialModalItemSelect"
|
||||
/>
|
||||
></at-lookup-list>
|
||||
<at-action-group col="12" pos="right">
|
||||
<at-action-button
|
||||
variant="tertiary"
|
||||
|
||||
@ -179,7 +179,7 @@ export default [
|
||||
selected-id="webhookCredential.modalSelectedId"
|
||||
on-ready="handleWebhookCredentialModalReady"
|
||||
on-item-select="handleWebhookCredentialModalItemSelect"
|
||||
/>
|
||||
></at-lookup-list>
|
||||
<at-action-group col="12" pos="right">
|
||||
<at-action-button
|
||||
variant="tertiary"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
640
awx/ui/po/ja.po
640
awx/ui/po/ja.po
File diff suppressed because it is too large
Load Diff
@ -152,7 +152,7 @@ export default {
|
||||
"date": "2017-05-25T14:01:19.000Z",
|
||||
"rule": {
|
||||
"summary_html": "<p>A flaw was found in the Linux kernel's memory subsystem. An unprivileged local user could use this flaw to write to files they would normally only have read-only access to and thus increase their privileges on the system.</p>\n",
|
||||
"generic_html": "<p>A race condition was found in the way Linux kernel's memory subsystem handled breakage of the the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.</p>\n<p>A process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. </p>\n<p>Red Hat recommends that you update the kernel package or apply mitigations.</p>\n",
|
||||
"generic_html": "<p>A race condition was found in the way Linux kernel's memory subsystem handled breakage of the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.</p>\n<p>A process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. </p>\n<p>Red Hat recommends that you update the kernel package or apply mitigations.</p>\n",
|
||||
"more_info_html": "<ul>\n<li>For more information about the flaw see <a href=\"https://access.redhat.com/security/cve/CVE-2016-5195\">CVE-2016-5195</a></li>\n<li>To learn how to upgrade packages, see "<a href=\"https://access.redhat.com/solutions/9934\">What is yum and how do I use it?</a>"</li>\n<li>The Customer Portal page for the <a href=\"https://access.redhat.com/security/\">Red Hat Security Team</a> contains more information about policies, procedures, and alerts for Red Hat Products.</li>\n<li>The Security Team also maintains a frequently updated blog at <a href=\"https://securityblog.redhat.com\">securityblog.redhat.com</a>.</li>\n</ul>\n",
|
||||
"severity": "WARN",
|
||||
"ansible": true,
|
||||
@ -163,7 +163,7 @@ export default {
|
||||
"plugin": "CVE_2016_5195_kernel",
|
||||
"description": "Kernel vulnerable to privilege escalation via permission bypass (CVE-2016-5195)",
|
||||
"summary": "A flaw was found in the Linux kernel's memory subsystem. An unprivileged local user could use this flaw to write to files they would normally only have read-only access to and thus increase their privileges on the system.",
|
||||
"generic": "A race condition was found in the way Linux kernel's memory subsystem handled breakage of the the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.\n\nA process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. \n\nRed Hat recommends that you update the kernel package or apply mitigations.",
|
||||
"generic": "A race condition was found in the way Linux kernel's memory subsystem handled breakage of the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.\n\nA process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. \n\nRed Hat recommends that you update the kernel package or apply mitigations.",
|
||||
"reason": "<p>A flaw was found in the Linux kernel's memory subsystem. An unprivileged local user could use this flaw to write to files they would normally have read-only access to and thus increase their privileges on the system.</p>\n<p>This host is affected because it is running kernel <strong>3.10.0-123.el7</strong>. </p>\n<p>There is currently no mitigation applied and your system is vulnerable.</p>\n",
|
||||
"type": null,
|
||||
"more_info": "* For more information about the flaw see [CVE-2016-5195](https://access.redhat.com/security/cve/CVE-2016-5195)\n* To learn how to upgrade packages, see \"[What is yum and how do I use it?](https://access.redhat.com/solutions/9934)\"\n* The Customer Portal page for the [Red Hat Security Team](https://access.redhat.com/security/) contains more information about policies, procedures, and alerts for Red Hat Products.\n* The Security Team also maintains a frequently updated blog at [securityblog.redhat.com](https://securityblog.redhat.com).",
|
||||
|
||||
@ -95,7 +95,7 @@ export default [
|
||||
"date": "2017-05-25T14:01:19.000Z",
|
||||
"rule": {
|
||||
"summary_html": "<p>A flaw was found in the Linux kernel's memory subsystem. An unprivileged local user could use this flaw to write to files they would normally only have read-only access to and thus increase their privileges on the system.</p>\n",
|
||||
"generic_html": "<p>A race condition was found in the way Linux kernel's memory subsystem handled breakage of the the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.</p>\n<p>A process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. </p>\n<p>Red Hat recommends that you update the kernel package or apply mitigations.</p>\n",
|
||||
"generic_html": "<p>A race condition was found in the way Linux kernel's memory subsystem handled breakage of the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.</p>\n<p>A process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. </p>\n<p>Red Hat recommends that you update the kernel package or apply mitigations.</p>\n",
|
||||
"more_info_html": "<ul>\n<li>For more information about the flaw see <a href=\"https://access.redhat.com/security/cve/CVE-2016-5195\">CVE-2016-5195</a></li>\n<li>To learn how to upgrade packages, see "<a href=\"https://access.redhat.com/solutions/9934\">What is yum and how do I use it?</a>"</li>\n<li>The Customer Portal page for the <a href=\"https://access.redhat.com/security/\">Red Hat Security Team</a> contains more information about policies, procedures, and alerts for Red Hat Products.</li>\n<li>The Security Team also maintains a frequently updated blog at <a href=\"https://securityblog.redhat.com\">securityblog.redhat.com</a>.</li>\n</ul>\n",
|
||||
"severity": "WARN",
|
||||
"ansible": true,
|
||||
@ -106,7 +106,7 @@ export default [
|
||||
"plugin": "CVE_2016_5195_kernel",
|
||||
"description": "Kernel vulnerable to privilege escalation via permission bypass (CVE-2016-5195)",
|
||||
"summary": "A flaw was found in the Linux kernel's memory subsystem. An unprivileged local user could use this flaw to write to files they would normally only have read-only access to and thus increase their privileges on the system.",
|
||||
"generic": "A race condition was found in the way Linux kernel's memory subsystem handled breakage of the the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.\n\nA process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. \n\nRed Hat recommends that you update the kernel package or apply mitigations.",
|
||||
"generic": "A race condition was found in the way Linux kernel's memory subsystem handled breakage of the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.\n\nA process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. \n\nRed Hat recommends that you update the kernel package or apply mitigations.",
|
||||
"reason": "<p>A flaw was found in the Linux kernel's memory subsystem. An unprivileged local user could use this flaw to write to files they would normally have read-only access to and thus increase their privileges on the system.</p>\n<p>This host is affected because it is running kernel <strong>3.10.0-123.el7</strong>. </p>\n<p>There is currently no mitigation applied and your system is vulnerable.</p>\n",
|
||||
"type": null,
|
||||
"more_info": "* For more information about the flaw see [CVE-2016-5195](https://access.redhat.com/security/cve/CVE-2016-5195)\n* To learn how to upgrade packages, see \"[What is yum and how do I use it?](https://access.redhat.com/solutions/9934)\"\n* The Customer Portal page for the [Red Hat Security Team](https://access.redhat.com/security/) contains more information about policies, procedures, and alerts for Red Hat Products.\n* The Security Team also maintains a frequently updated blog at [securityblog.redhat.com](https://securityblog.redhat.com).",
|
||||
|
||||
@ -13,7 +13,7 @@ export default [
|
||||
"date": "2017-05-25T14:01:19.000Z",
|
||||
"rule": {
|
||||
"summary_html": "<p>A flaw was found in the Linux kernel's memory subsystem. An unprivileged local user could use this flaw to write to files they would normally only have read-only access to and thus increase their privileges on the system.</p>\n",
|
||||
"generic_html": "<p>A race condition was found in the way Linux kernel's memory subsystem handled breakage of the the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.</p>\n<p>A process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. </p>\n<p>Red Hat recommends that you update the kernel package or apply mitigations.</p>\n",
|
||||
"generic_html": "<p>A race condition was found in the way Linux kernel's memory subsystem handled breakage of the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.</p>\n<p>A process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. </p>\n<p>Red Hat recommends that you update the kernel package or apply mitigations.</p>\n",
|
||||
"more_info_html": "<ul>\n<li>For more information about the flaw see <a href=\"https://access.redhat.com/security/cve/CVE-2016-5195\">CVE-2016-5195</a></li>\n<li>To learn how to upgrade packages, see "<a href=\"https://access.redhat.com/solutions/9934\">What is yum and how do I use it?</a>"</li>\n<li>The Customer Portal page for the <a href=\"https://access.redhat.com/security/\">Red Hat Security Team</a> contains more information about policies, procedures, and alerts for Red Hat Products.</li>\n<li>The Security Team also maintains a frequently updated blog at <a href=\"https://securityblog.redhat.com\">securityblog.redhat.com</a>.</li>\n</ul>\n",
|
||||
"severity": "WARN",
|
||||
"ansible": true,
|
||||
@ -24,7 +24,7 @@ export default [
|
||||
"plugin": "CVE_2016_5195_kernel",
|
||||
"description": "Kernel vulnerable to privilege escalation via permission bypass (CVE-2016-5195)",
|
||||
"summary": "A flaw was found in the Linux kernel's memory subsystem. An unprivileged local user could use this flaw to write to files they would normally only have read-only access to and thus increase their privileges on the system.",
|
||||
"generic": "A race condition was found in the way Linux kernel's memory subsystem handled breakage of the the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.\n\nA process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. \n\nRed Hat recommends that you update the kernel package or apply mitigations.",
|
||||
"generic": "A race condition was found in the way Linux kernel's memory subsystem handled breakage of the read only shared mappings COW situation on write access. An unprivileged local user could use this flaw to write to files they should normally have read-only access to, and thus increase their privileges on the system.\n\nA process that is able to mmap a file is able to race Copy on Write (COW) page creation (within get_user_pages) with madvise(MADV_DONTNEED) kernel system calls. This would allow modified pages to bypass the page protection mechanism and modify the mapped file. The vulnerability could be abused by allowing an attacker to modify existing setuid files with instructions to elevate permissions. This attack has been found in the wild. \n\nRed Hat recommends that you update the kernel package or apply mitigations.",
|
||||
"reason": "<p>A flaw was found in the Linux kernel's memory subsystem. An unprivileged local user could use this flaw to write to files they would normally have read-only access to and thus increase their privileges on the system.</p>\n<p>This host is affected because it is running kernel <strong>3.10.0-123.el7</strong>. </p>\n<p>There is currently no mitigation applied and your system is vulnerable.</p>\n",
|
||||
"type": null,
|
||||
"more_info": "* For more information about the flaw see [CVE-2016-5195](https://access.redhat.com/security/cve/CVE-2016-5195)\n* To learn how to upgrade packages, see \"[What is yum and how do I use it?](https://access.redhat.com/solutions/9934)\"\n* The Customer Portal page for the [Red Hat Security Team](https://access.redhat.com/security/) contains more information about policies, procedures, and alerts for Red Hat Products.\n* The Security Team also maintains a frequently updated blog at [securityblog.redhat.com](https://securityblog.redhat.com).",
|
||||
|
||||
@ -51,7 +51,13 @@ describe('Output | StreamService', () => {
|
||||
});
|
||||
|
||||
describe('isReadyToRender', () => {
|
||||
it("it's never ready to render unless the result of getReadyCount is greater than 0", () => {
|
||||
it("it's never ready to render when live updates are enabled unless the result of getReadyCount is greater than 0", () => {
|
||||
delete window.liveUpdates;
|
||||
Object.defineProperty(window, 'liveUpdates', {
|
||||
value: true,
|
||||
writable: false
|
||||
});
|
||||
|
||||
const params = [
|
||||
[-1, false],
|
||||
[0, false],
|
||||
|
||||
386
awx/ui_next/package-lock.json
generated
386
awx/ui_next/package-lock.json
generated
@ -2849,19 +2849,19 @@
|
||||
}
|
||||
},
|
||||
"@lingui/babel-plugin-extract-messages": {
|
||||
"version": "2.9.1",
|
||||
"resolved": "https://registry.npmjs.org/@lingui/babel-plugin-extract-messages/-/babel-plugin-extract-messages-2.9.1.tgz",
|
||||
"integrity": "sha512-ZguvJK/ByupNgmmxvlO43DewGTMVtPsolA/Uxm24YTLg0jf7cu/GRaqYxYt+SojWHuo2/mn6dzDJZPFcK1A2og==",
|
||||
"version": "2.9.2",
|
||||
"resolved": "https://registry.npmjs.org/@lingui/babel-plugin-extract-messages/-/babel-plugin-extract-messages-2.9.2.tgz",
|
||||
"integrity": "sha512-nkRufTupyWjRpzX5ZXB1qMKWT9B+gAuMXYD4blZ/HHCJlEOXeds9W5bugVd3N8Ts5m4o9iRoqeaCuVcH7sJ8Wg==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@lingui/conf": "2.9.1",
|
||||
"@lingui/conf": "2.9.2",
|
||||
"babel-generator": "^6.26.1"
|
||||
}
|
||||
},
|
||||
"@lingui/babel-plugin-transform-js": {
|
||||
"version": "2.9.1",
|
||||
"resolved": "https://registry.npmjs.org/@lingui/babel-plugin-transform-js/-/babel-plugin-transform-js-2.9.1.tgz",
|
||||
"integrity": "sha512-m1RAKUKffyxfWQ2Y0KfGHhYofdHdM+0aSsi2kgcebqzsuE8Hwuy+r4GZr593cSIqBu6Ugb6/WKoAUGUoEF9ZHw==",
|
||||
"version": "2.9.2",
|
||||
"resolved": "https://registry.npmjs.org/@lingui/babel-plugin-transform-js/-/babel-plugin-transform-js-2.9.2.tgz",
|
||||
"integrity": "sha512-yWoyhOfjRa9744TbVb/WN1OWxZYFLuXcWH5aVCu/sZ2b1YpsGCtfhplc5lRVWN8QcsfpjYmFiPqzU6swE5OFdQ==",
|
||||
"dev": true
|
||||
},
|
||||
"@lingui/babel-plugin-transform-react": {
|
||||
@ -2871,15 +2871,15 @@
|
||||
"dev": true
|
||||
},
|
||||
"@lingui/cli": {
|
||||
"version": "2.9.1",
|
||||
"resolved": "https://registry.npmjs.org/@lingui/cli/-/cli-2.9.1.tgz",
|
||||
"integrity": "sha512-Ruzg4UxZzqnJDMpdGE04G8NnXFRAd5nH5dZ7rAYBurSddlLEqE3DVrxMToYC1BfCpbmWznHguPwusljrCUkMeg==",
|
||||
"version": "2.9.2",
|
||||
"resolved": "https://registry.npmjs.org/@lingui/cli/-/cli-2.9.2.tgz",
|
||||
"integrity": "sha512-j46vUe8hSgvsm3j2V4sPLxOdd2HacacGC5E+bWx4wHEhd/yxV4nwPfWpuC7wLoBwM/y2bcF8Q2V7ahEznKSO6A==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@lingui/babel-plugin-extract-messages": "2.9.1",
|
||||
"@lingui/babel-plugin-transform-js": "2.9.1",
|
||||
"@lingui/babel-plugin-transform-react": "2.9.1",
|
||||
"@lingui/conf": "2.9.1",
|
||||
"@lingui/babel-plugin-extract-messages": "2.9.2",
|
||||
"@lingui/babel-plugin-transform-js": "2.9.2",
|
||||
"@lingui/babel-plugin-transform-react": "2.9.2",
|
||||
"@lingui/conf": "2.9.2",
|
||||
"babel-generator": "^6.26.1",
|
||||
"babel-plugin-syntax-jsx": "^6.18.0",
|
||||
"babel-runtime": "^6.26.0",
|
||||
@ -2896,13 +2896,18 @@
|
||||
"make-plural": "^4.1.1",
|
||||
"messageformat-parser": "^2.0.0",
|
||||
"mkdirp": "^0.5.1",
|
||||
"opencollective": "^1.0.3",
|
||||
"ora": "^3.4.0",
|
||||
"pofile": "^1.0.11",
|
||||
"pseudolocale": "^1.1.0",
|
||||
"ramda": "^0.26.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"@lingui/babel-plugin-transform-react": {
|
||||
"version": "2.9.2",
|
||||
"resolved": "https://registry.npmjs.org/@lingui/babel-plugin-transform-react/-/babel-plugin-transform-react-2.9.2.tgz",
|
||||
"integrity": "sha512-bxvrepiS6J9vZqRtpRiAgBIASQscjvu7aFmPqH4Y6001TDXrYuyhhNRt1BI3k2E6C2SckHh5vRtSpsqpjEiY3A==",
|
||||
"dev": true
|
||||
},
|
||||
"ansi-escapes": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.2.0.tgz",
|
||||
@ -3015,9 +3020,9 @@
|
||||
}
|
||||
},
|
||||
"@lingui/conf": {
|
||||
"version": "2.9.1",
|
||||
"resolved": "https://registry.npmjs.org/@lingui/conf/-/conf-2.9.1.tgz",
|
||||
"integrity": "sha512-33mEShmFemYy5tH+fgvAH+mNaO9MbOyDM1lt+frx/ozXBMbGsPrEReDFGtCY2CGEITn5Q9SGJbcRscnfQ2DubQ==",
|
||||
"version": "2.9.2",
|
||||
"resolved": "https://registry.npmjs.org/@lingui/conf/-/conf-2.9.2.tgz",
|
||||
"integrity": "sha512-xHfH+zLhM7PaMawqeK1G+Pq+reVPYR8eU7XixH4VRHWK8n/itTb4fRl24xc5IUgeXJx+NX1qCzBYVz0i13xlVg==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"chalk": "^2.3.0",
|
||||
@ -3025,46 +3030,6 @@
|
||||
"jest-regex-util": "^24.3.0",
|
||||
"jest-validate": "^24.8.0",
|
||||
"pkg-conf": "^3.1.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"cosmiconfig": {
|
||||
"version": "5.2.1",
|
||||
"resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz",
|
||||
"integrity": "sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"import-fresh": "^2.0.0",
|
||||
"is-directory": "^0.3.1",
|
||||
"js-yaml": "^3.13.1",
|
||||
"parse-json": "^4.0.0"
|
||||
}
|
||||
},
|
||||
"import-fresh": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz",
|
||||
"integrity": "sha1-2BNVwVYS04bGH53dOSLUMEgipUY=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"caller-path": "^2.0.0",
|
||||
"resolve-from": "^3.0.0"
|
||||
}
|
||||
},
|
||||
"parse-json": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz",
|
||||
"integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"error-ex": "^1.3.1",
|
||||
"json-parse-better-errors": "^1.0.1"
|
||||
}
|
||||
},
|
||||
"resolve-from": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz",
|
||||
"integrity": "sha1-six699nWiBvItuZTM17rywoYh0g=",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"@lingui/core": {
|
||||
@ -4299,12 +4264,6 @@
|
||||
"resolved": "https://registry.npmjs.org/jsesc/-/jsesc-1.3.0.tgz",
|
||||
"integrity": "sha1-RsP+yMGJKxKwgz25vHYiF226s0s=",
|
||||
"dev": true
|
||||
},
|
||||
"source-map": {
|
||||
"version": "0.5.7",
|
||||
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
|
||||
"integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -4516,25 +4475,6 @@
|
||||
"integrity": "sha512-eqj0hVcJUR57/Ug2zE1Yswsw4LhuqqHhD+8v120T1cl3kjg76QwtyBrdIk4WVwK+lAhBJVYCd/v+4nc4y+8JsA==",
|
||||
"dev": true
|
||||
},
|
||||
"babel-polyfill": {
|
||||
"version": "6.23.0",
|
||||
"resolved": "https://registry.npmjs.org/babel-polyfill/-/babel-polyfill-6.23.0.tgz",
|
||||
"integrity": "sha1-g2TKYt+Or7gwSZ9pkXdGbDsDSZ0=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"babel-runtime": "^6.22.0",
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.10.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"regenerator-runtime": {
|
||||
"version": "0.10.5",
|
||||
"resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.10.5.tgz",
|
||||
"integrity": "sha1-M2w+/BIgrc7dosn6tntaeVWjNlg=",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"babel-preset-jest": {
|
||||
"version": "24.9.0",
|
||||
"resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-24.9.0.tgz",
|
||||
@ -4993,7 +4933,8 @@
|
||||
"brorand": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz",
|
||||
"integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8="
|
||||
"integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=",
|
||||
"dev": true
|
||||
},
|
||||
"browser-process-hrtime": {
|
||||
"version": "1.0.0",
|
||||
@ -5581,9 +5522,9 @@
|
||||
}
|
||||
},
|
||||
"cli-spinners": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.3.0.tgz",
|
||||
"integrity": "sha512-Xs2Hf2nzrvJMFKimOR7YR0QwZ8fc0u98kdtwN1eNAZzNQgH3vK2pXzff6GJtKh7S5hoJ87ECiAiZFS2fb5Ii2w==",
|
||||
"version": "2.4.0",
|
||||
"resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.4.0.tgz",
|
||||
"integrity": "sha512-sJAofoarcm76ZGpuooaO0eDy8saEy+YoZBLjC4h8srt4jeBnkYeOgqxgsJQTpyt2LjI5PTfLJHSL+41Yu4fEJA==",
|
||||
"dev": true
|
||||
},
|
||||
"cli-table": {
|
||||
@ -7199,6 +7140,7 @@
|
||||
"version": "6.5.3",
|
||||
"resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz",
|
||||
"integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"bn.js": "^4.4.0",
|
||||
"brorand": "^1.0.1",
|
||||
@ -7212,7 +7154,8 @@
|
||||
"bn.js": {
|
||||
"version": "4.11.9",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
|
||||
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
|
||||
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -7234,15 +7177,6 @@
|
||||
"integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=",
|
||||
"dev": true
|
||||
},
|
||||
"encoding": {
|
||||
"version": "0.1.12",
|
||||
"resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.12.tgz",
|
||||
"integrity": "sha1-U4tm8+5izRq1HsMjgp0flIDHS+s=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"iconv-lite": "~0.4.13"
|
||||
}
|
||||
},
|
||||
"end-of-stream": {
|
||||
"version": "1.4.4",
|
||||
"resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
|
||||
@ -9117,6 +9051,7 @@
|
||||
"version": "1.1.7",
|
||||
"resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz",
|
||||
"integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"inherits": "^2.0.3",
|
||||
"minimalistic-assert": "^1.0.1"
|
||||
@ -9151,6 +9086,7 @@
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz",
|
||||
"integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"hash.js": "^1.0.3",
|
||||
"minimalistic-assert": "^1.0.0",
|
||||
@ -9567,7 +9503,8 @@
|
||||
"inherits": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
||||
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
|
||||
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
|
||||
"dev": true
|
||||
},
|
||||
"ini": {
|
||||
"version": "1.3.5",
|
||||
@ -11458,12 +11395,14 @@
|
||||
"minimalistic-assert": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz",
|
||||
"integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A=="
|
||||
"integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==",
|
||||
"dev": true
|
||||
},
|
||||
"minimalistic-crypto-utils": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz",
|
||||
"integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo="
|
||||
"integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=",
|
||||
"dev": true
|
||||
},
|
||||
"minimatch": {
|
||||
"version": "3.0.4",
|
||||
@ -11732,20 +11671,10 @@
|
||||
"tslib": "^1.10.0"
|
||||
}
|
||||
},
|
||||
"node-fetch": {
|
||||
"version": "1.6.3",
|
||||
"resolved": "http://registry.npmjs.org/node-fetch/-/node-fetch-1.6.3.tgz",
|
||||
"integrity": "sha1-3CNO3WSJmC1Y6PDbT2lQKavNjAQ=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"encoding": "^0.1.11",
|
||||
"is-stream": "^1.0.1"
|
||||
}
|
||||
},
|
||||
"node-forge": {
|
||||
"version": "0.9.0",
|
||||
"resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.9.0.tgz",
|
||||
"integrity": "sha512-7ASaDa3pD+lJ3WvXFsxekJQelBKRpne+GOVbLbtHYdd7pFspyeuJHnWfLplGf3SwKGbfs/aYl5V/JCIaHVUKKQ==",
|
||||
"version": "0.10.0",
|
||||
"resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.10.0.tgz",
|
||||
"integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==",
|
||||
"dev": true
|
||||
},
|
||||
"node-int64": {
|
||||
@ -12180,213 +12109,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"opencollective": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/opencollective/-/opencollective-1.0.3.tgz",
|
||||
"integrity": "sha1-ruY3K8KBRFg2kMPKja7PwSDdDvE=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"babel-polyfill": "6.23.0",
|
||||
"chalk": "1.1.3",
|
||||
"inquirer": "3.0.6",
|
||||
"minimist": "1.2.0",
|
||||
"node-fetch": "1.6.3",
|
||||
"opn": "4.0.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"ansi-escapes": {
|
||||
"version": "1.4.0",
|
||||
"resolved": "http://registry.npmjs.org/ansi-escapes/-/ansi-escapes-1.4.0.tgz",
|
||||
"integrity": "sha1-06ioOzGapneTZisT52HHkRQiMG4=",
|
||||
"dev": true
|
||||
},
|
||||
"ansi-regex": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
|
||||
"integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
|
||||
"dev": true
|
||||
},
|
||||
"ansi-styles": {
|
||||
"version": "2.2.1",
|
||||
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz",
|
||||
"integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=",
|
||||
"dev": true
|
||||
},
|
||||
"chalk": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz",
|
||||
"integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"ansi-styles": "^2.2.1",
|
||||
"escape-string-regexp": "^1.0.2",
|
||||
"has-ansi": "^2.0.0",
|
||||
"strip-ansi": "^3.0.0",
|
||||
"supports-color": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"chardet": {
|
||||
"version": "0.4.2",
|
||||
"resolved": "https://registry.npmjs.org/chardet/-/chardet-0.4.2.tgz",
|
||||
"integrity": "sha1-tUc7M9yXxCTl2Y3IfVXU2KKci/I=",
|
||||
"dev": true
|
||||
},
|
||||
"cli-cursor": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz",
|
||||
"integrity": "sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"restore-cursor": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"external-editor": {
|
||||
"version": "2.2.0",
|
||||
"resolved": "http://registry.npmjs.org/external-editor/-/external-editor-2.2.0.tgz",
|
||||
"integrity": "sha512-bSn6gvGxKt+b7+6TKEv1ZycHleA7aHhRHyAqJyp5pbUFuYYNIzpZnQDk7AsYckyWdEnTeAnay0aCy2aV6iTk9A==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"chardet": "^0.4.0",
|
||||
"iconv-lite": "^0.4.17",
|
||||
"tmp": "^0.0.33"
|
||||
}
|
||||
},
|
||||
"figures": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz",
|
||||
"integrity": "sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"escape-string-regexp": "^1.0.5"
|
||||
}
|
||||
},
|
||||
"has-ansi": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz",
|
||||
"integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"ansi-regex": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"inquirer": {
|
||||
"version": "3.0.6",
|
||||
"resolved": "https://registry.npmjs.org/inquirer/-/inquirer-3.0.6.tgz",
|
||||
"integrity": "sha1-4EqqnQW3o8ubD0B9BDdfBEcZA0c=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"ansi-escapes": "^1.1.0",
|
||||
"chalk": "^1.0.0",
|
||||
"cli-cursor": "^2.1.0",
|
||||
"cli-width": "^2.0.0",
|
||||
"external-editor": "^2.0.1",
|
||||
"figures": "^2.0.0",
|
||||
"lodash": "^4.3.0",
|
||||
"mute-stream": "0.0.7",
|
||||
"run-async": "^2.2.0",
|
||||
"rx": "^4.1.0",
|
||||
"string-width": "^2.0.0",
|
||||
"strip-ansi": "^3.0.0",
|
||||
"through": "^2.3.6"
|
||||
}
|
||||
},
|
||||
"is-fullwidth-code-point": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
|
||||
"integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
|
||||
"dev": true
|
||||
},
|
||||
"mimic-fn": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz",
|
||||
"integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==",
|
||||
"dev": true
|
||||
},
|
||||
"minimist": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz",
|
||||
"integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=",
|
||||
"dev": true
|
||||
},
|
||||
"mute-stream": {
|
||||
"version": "0.0.7",
|
||||
"resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.7.tgz",
|
||||
"integrity": "sha1-MHXOk7whuPq0PhvE2n6BFe0ee6s=",
|
||||
"dev": true
|
||||
},
|
||||
"onetime": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/onetime/-/onetime-2.0.1.tgz",
|
||||
"integrity": "sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"mimic-fn": "^1.0.0"
|
||||
}
|
||||
},
|
||||
"opn": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "http://registry.npmjs.org/opn/-/opn-4.0.2.tgz",
|
||||
"integrity": "sha1-erwi5kTf9jsKltWrfyeQwPAavJU=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"object-assign": "^4.0.1",
|
||||
"pinkie-promise": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"restore-cursor": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-2.0.0.tgz",
|
||||
"integrity": "sha1-n37ih/gv0ybU/RYpI9YhKe7g368=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"onetime": "^2.0.0",
|
||||
"signal-exit": "^3.0.2"
|
||||
}
|
||||
},
|
||||
"string-width": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz",
|
||||
"integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"is-fullwidth-code-point": "^2.0.0",
|
||||
"strip-ansi": "^4.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"ansi-regex": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
|
||||
"integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=",
|
||||
"dev": true
|
||||
},
|
||||
"strip-ansi": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
|
||||
"integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"ansi-regex": "^3.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"strip-ansi": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
|
||||
"integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"ansi-regex": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"supports-color": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz",
|
||||
"integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"opn": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/opn/-/opn-5.5.0.tgz",
|
||||
@ -12822,16 +12544,6 @@
|
||||
"integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
|
||||
"dev": true
|
||||
},
|
||||
"parse-json": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz",
|
||||
"integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"error-ex": "^1.3.1",
|
||||
"json-parse-better-errors": "^1.0.1"
|
||||
}
|
||||
},
|
||||
"pify": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz",
|
||||
@ -15440,12 +15152,6 @@
|
||||
"resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz",
|
||||
"integrity": "sha1-P4Yt+pGrdmsUiF700BEkv9oHT7Q="
|
||||
},
|
||||
"rx": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/rx/-/rx-4.1.0.tgz",
|
||||
"integrity": "sha1-pfE/957zt0D+MKqAP7CfmIBdR4I=",
|
||||
"dev": true
|
||||
},
|
||||
"rxjs": {
|
||||
"version": "6.5.5",
|
||||
"resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.5.5.tgz",
|
||||
@ -15581,12 +15287,12 @@
|
||||
"dev": true
|
||||
},
|
||||
"selfsigned": {
|
||||
"version": "1.10.7",
|
||||
"resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.7.tgz",
|
||||
"integrity": "sha512-8M3wBCzeWIJnQfl43IKwOmC4H/RAp50S8DF60znzjW5GVqTcSe2vWclt7hmYVPkKPlHWOu5EaWOMZ2Y6W8ZXTA==",
|
||||
"version": "1.10.8",
|
||||
"resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.8.tgz",
|
||||
"integrity": "sha512-2P4PtieJeEwVgTU9QEcwIRDQ/mXJLX8/+I3ur+Pg16nS8oNbrGxEso9NyYWy8NAmXiNl4dlAp5MwoNeCWzON4w==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"node-forge": "0.9.0"
|
||||
"node-forge": "^0.10.0"
|
||||
}
|
||||
},
|
||||
"semver": {
|
||||
|
||||
@ -30,7 +30,7 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/polyfill": "^7.8.7",
|
||||
"@lingui/cli": "^2.9.1",
|
||||
"@lingui/cli": "^2.9.2",
|
||||
"@lingui/macro": "^2.9.1",
|
||||
"@nteract/mockument": "^1.0.4",
|
||||
"babel-core": "^7.0.0-bridge.0",
|
||||
|
||||
@ -17,12 +17,14 @@ import Jobs from './models/Jobs';
|
||||
import Labels from './models/Labels';
|
||||
import Me from './models/Me';
|
||||
import NotificationTemplates from './models/NotificationTemplates';
|
||||
import Notifications from './models/Notifications';
|
||||
import Organizations from './models/Organizations';
|
||||
import ProjectUpdates from './models/ProjectUpdates';
|
||||
import Projects from './models/Projects';
|
||||
import Roles from './models/Roles';
|
||||
import Root from './models/Root';
|
||||
import Schedules from './models/Schedules';
|
||||
import Settings from './models/Settings';
|
||||
import SystemJobs from './models/SystemJobs';
|
||||
import Teams from './models/Teams';
|
||||
import Tokens from './models/Tokens';
|
||||
@ -53,12 +55,14 @@ const JobsAPI = new Jobs();
|
||||
const LabelsAPI = new Labels();
|
||||
const MeAPI = new Me();
|
||||
const NotificationTemplatesAPI = new NotificationTemplates();
|
||||
const NotificationsAPI = new Notifications();
|
||||
const OrganizationsAPI = new Organizations();
|
||||
const ProjectUpdatesAPI = new ProjectUpdates();
|
||||
const ProjectsAPI = new Projects();
|
||||
const RolesAPI = new Roles();
|
||||
const RootAPI = new Root();
|
||||
const SchedulesAPI = new Schedules();
|
||||
const SettingsAPI = new Settings();
|
||||
const SystemJobsAPI = new SystemJobs();
|
||||
const TeamsAPI = new Teams();
|
||||
const TokensAPI = new Tokens();
|
||||
@ -90,12 +94,14 @@ export {
|
||||
LabelsAPI,
|
||||
MeAPI,
|
||||
NotificationTemplatesAPI,
|
||||
NotificationsAPI,
|
||||
OrganizationsAPI,
|
||||
ProjectUpdatesAPI,
|
||||
ProjectsAPI,
|
||||
RolesAPI,
|
||||
RootAPI,
|
||||
SchedulesAPI,
|
||||
SettingsAPI,
|
||||
SystemJobsAPI,
|
||||
TeamsAPI,
|
||||
TokensAPI,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user