mirror of
https://github.com/ansible/awx.git
synced 2026-01-11 01:57:35 -03:30
Merge remote-tracking branch 'tower/release_3.3.0' into devel
This commit is contained in:
commit
18f6f68540
1
.gitignore
vendored
1
.gitignore
vendored
@ -67,6 +67,7 @@ pep8.txt
|
||||
scratch
|
||||
testem.log
|
||||
awx/awx_test.sqlite3-journal
|
||||
.pytest_cache/
|
||||
|
||||
# Mac OS X
|
||||
*.DS_Store
|
||||
|
||||
14
Makefile
14
Makefile
@ -30,6 +30,8 @@ DEV_DOCKER_TAG_BASE ?= gcr.io/ansible-tower-engineering
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
|
||||
CURWD = $(shell pwd)
|
||||
|
||||
# Determine appropriate shasum command
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S),Linux)
|
||||
@ -219,7 +221,7 @@ init:
|
||||
if [ "$(AWX_GROUP_QUEUES)" == "tower,thepentagon" ]; then \
|
||||
$(MANAGEMENT_COMMAND) provision_instance --hostname=isolated; \
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename='thepentagon' --hostnames=isolated --controller=tower; \
|
||||
$(MANAGEMENT_COMMAND) generate_isolated_key | ssh -o "StrictHostKeyChecking no" root@isolated 'cat > /root/.ssh/authorized_keys'; \
|
||||
$(MANAGEMENT_COMMAND) generate_isolated_key | ssh -o "StrictHostKeyChecking no" root@isolated 'cat >> /root/.ssh/authorized_keys'; \
|
||||
fi;
|
||||
|
||||
# Refresh development environment after pulling new code.
|
||||
@ -372,7 +374,7 @@ awx-link:
|
||||
sed -i "s/placeholder/$(shell git describe --long | sed 's/\./\\./g')/" /awx_devel/awx.egg-info/PKG-INFO
|
||||
cp /tmp/awx.egg-link /venv/awx/lib/python2.7/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests awx/network_ui/tests/unit
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
|
||||
# Run all API unit tests.
|
||||
test:
|
||||
@ -387,7 +389,7 @@ test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit awx/network_ui/tests/unit
|
||||
py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit
|
||||
|
||||
test_ansible:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@ -560,7 +562,7 @@ docker-isolated:
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml create
|
||||
docker start tools_awx_1
|
||||
docker start tools_isolated_1
|
||||
echo "__version__ = '`python setup.py --version`'" | docker exec -i tools_isolated_1 /bin/bash -c "cat > /venv/awx/lib/python2.7/site-packages/awx.py"
|
||||
echo "__version__ = '`git describe --long | cut -d - -f 1-1`'" | docker exec -i tools_isolated_1 /bin/bash -c "cat > /venv/awx/lib/python2.7/site-packages/awx.py"
|
||||
if [ "`docker exec -i -t tools_isolated_1 cat /root/.ssh/authorized_keys`" == "`docker exec -t tools_awx_1 cat /root/.ssh/id_rsa.pub`" ]; then \
|
||||
echo "SSH keys already copied to isolated instance"; \
|
||||
else \
|
||||
@ -607,6 +609,10 @@ docker-compose-elk: docker-auth
|
||||
docker-compose-cluster-elk: docker-auth
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
minishift-dev:
|
||||
ansible-playbook -i localhost, -e devtree_directory=$(CURWD) tools/clusterdevel/start_minishift_dev.yml
|
||||
|
||||
|
||||
clean-elk:
|
||||
docker stop tools_kibana_1
|
||||
docker stop tools_logstash_1
|
||||
|
||||
@ -7,11 +7,18 @@ import sys
|
||||
import warnings
|
||||
|
||||
from pkg_resources import get_distribution
|
||||
from .celery import app as celery_app # noqa
|
||||
|
||||
__version__ = get_distribution('awx').version
|
||||
__all__ = ['__version__']
|
||||
|
||||
|
||||
# Isolated nodes do not have celery installed
|
||||
try:
|
||||
from .celery import app as celery_app # noqa
|
||||
__all__.append('celery_app')
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__all__ = ['__version__', 'celery_app']
|
||||
|
||||
# Check for the presence/absence of "devonly" module to determine if running
|
||||
# from a source code checkout or release packaage.
|
||||
|
||||
@ -11,7 +11,7 @@ from django.utils.encoding import smart_text
|
||||
# Django REST Framework
|
||||
from rest_framework import authentication
|
||||
|
||||
# Django OAuth Toolkit
|
||||
# Django-OAuth-Toolkit
|
||||
from oauth2_provider.contrib.rest_framework import OAuth2Authentication
|
||||
|
||||
logger = logging.getLogger('awx.api.authentication')
|
||||
@ -25,7 +25,7 @@ class LoggedBasicAuthentication(authentication.BasicAuthentication):
|
||||
ret = super(LoggedBasicAuthentication, self).authenticate(request)
|
||||
if ret:
|
||||
username = ret[0].username if ret[0] else '<none>'
|
||||
logger.debug(smart_text(u"User {} performed a {} to {} through the API".format(username, request.method, request.path)))
|
||||
logger.info(smart_text(u"User {} performed a {} to {} through the API".format(username, request.method, request.path)))
|
||||
return ret
|
||||
|
||||
def authenticate_header(self, request):
|
||||
@ -39,9 +39,6 @@ class SessionAuthentication(authentication.SessionAuthentication):
|
||||
def authenticate_header(self, request):
|
||||
return 'Session'
|
||||
|
||||
def enforce_csrf(self, request):
|
||||
return None
|
||||
|
||||
|
||||
class LoggedOAuth2Authentication(OAuth2Authentication):
|
||||
|
||||
@ -50,8 +47,8 @@ class LoggedOAuth2Authentication(OAuth2Authentication):
|
||||
if ret:
|
||||
user, token = ret
|
||||
username = user.username if user else '<none>'
|
||||
logger.debug(smart_text(
|
||||
u"User {} performed a {} to {} through the API using OAuth token {}.".format(
|
||||
logger.info(smart_text(
|
||||
u"User {} performed a {} to {} through the API using OAuth 2 token {}.".format(
|
||||
username, request.method, request.path, token.pk
|
||||
)
|
||||
))
|
||||
|
||||
@ -47,3 +47,15 @@ register(
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
register(
|
||||
'ALLOW_OAUTH2_FOR_EXTERNAL_USERS',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Allow External Users to Create OAuth2 Tokens'),
|
||||
help_text=_('For security reasons, users from external auth providers (LDAP, SAML, '
|
||||
'SSO, Radius, and others) are not allowed to create OAuth2 tokens. '
|
||||
'To change this behavior, enable this setting. Existing tokens will '
|
||||
'not be deleted when this setting is toggled off.'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
|
||||
@ -12,7 +12,11 @@ class ActiveJobConflict(ValidationError):
|
||||
status_code = 409
|
||||
|
||||
def __init__(self, active_jobs):
|
||||
super(ActiveJobConflict, self).__init__({
|
||||
# During APIException.__init__(), Django Rest Framework
|
||||
# turn everything in self.detail into string by using force_text.
|
||||
# Declare detail afterwards circumvent this behavior.
|
||||
super(ActiveJobConflict, self).__init__()
|
||||
self.detail = {
|
||||
"error": _("Resource is being used by running jobs."),
|
||||
"active_jobs": active_jobs
|
||||
})
|
||||
}
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
# Python
|
||||
import re
|
||||
import json
|
||||
from functools import reduce
|
||||
|
||||
# Django
|
||||
from django.core.exceptions import FieldError, ValidationError
|
||||
@ -238,7 +239,11 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
or_filters = []
|
||||
chain_filters = []
|
||||
role_filters = []
|
||||
search_filters = []
|
||||
search_filters = {}
|
||||
# Can only have two values: 'AND', 'OR'
|
||||
# If 'AND' is used, an iterm must satisfy all condition to show up in the results.
|
||||
# If 'OR' is used, an item just need to satisfy one condition to appear in results.
|
||||
search_filter_relation = 'OR'
|
||||
for key, values in request.query_params.lists():
|
||||
if key in self.RESERVED_NAMES:
|
||||
continue
|
||||
@ -262,11 +267,13 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
|
||||
# Search across related objects.
|
||||
if key.endswith('__search'):
|
||||
if values and ',' in values[0]:
|
||||
search_filter_relation = 'AND'
|
||||
values = reduce(lambda list1, list2: list1 + list2, [i.split(',') for i in values])
|
||||
for value in values:
|
||||
search_value, new_keys = self.value_to_python(queryset.model, key, force_text(value))
|
||||
assert isinstance(new_keys, list)
|
||||
for new_key in new_keys:
|
||||
search_filters.append((new_key, search_value))
|
||||
search_filters[search_value] = new_keys
|
||||
continue
|
||||
|
||||
# Custom chain__ and or__ filters, mutually exclusive (both can
|
||||
@ -355,11 +362,18 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
else:
|
||||
q |= Q(**{k:v})
|
||||
args.append(q)
|
||||
if search_filters:
|
||||
if search_filters and search_filter_relation == 'OR':
|
||||
q = Q()
|
||||
for k,v in search_filters:
|
||||
q |= Q(**{k:v})
|
||||
for term, constrains in search_filters.iteritems():
|
||||
for constrain in constrains:
|
||||
q |= Q(**{constrain: term})
|
||||
args.append(q)
|
||||
elif search_filters and search_filter_relation == 'AND':
|
||||
for term, constrains in search_filters.iteritems():
|
||||
q_chain = Q()
|
||||
for constrain in constrains:
|
||||
q_chain |= Q(**{constrain: term})
|
||||
queryset = queryset.filter(q_chain)
|
||||
for n,k,v in chain_filters:
|
||||
if n:
|
||||
q = ~Q(**{k:v})
|
||||
|
||||
@ -23,14 +23,14 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from django.contrib.auth import views as auth_views
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.authentication import get_authorization_header
|
||||
from rest_framework.exceptions import PermissionDenied, AuthenticationFailed, ParseError
|
||||
from rest_framework.exceptions import PermissionDenied, AuthenticationFailed, ParseError, NotAcceptable, UnsupportedMediaType
|
||||
from rest_framework import generics
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
from rest_framework import views
|
||||
from rest_framework.permissions import AllowAny
|
||||
from rest_framework.renderers import JSONRenderer
|
||||
from rest_framework.renderers import StaticHTMLRenderer, JSONRenderer
|
||||
from rest_framework.negotiation import DefaultContentNegotiation
|
||||
|
||||
# cryptography
|
||||
from cryptography.fernet import InvalidToken
|
||||
@ -64,21 +64,36 @@ analytics_logger = logging.getLogger('awx.analytics.performance')
|
||||
|
||||
class LoggedLoginView(auth_views.LoginView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
# The django.auth.contrib login form doesn't perform the content
|
||||
# negotiation we've come to expect from DRF; add in code to catch
|
||||
# situations where Accept != text/html (or */*) and reply with
|
||||
# an HTTP 406
|
||||
try:
|
||||
DefaultContentNegotiation().select_renderer(
|
||||
request,
|
||||
[StaticHTMLRenderer],
|
||||
'html'
|
||||
)
|
||||
except NotAcceptable:
|
||||
resp = Response(status=status.HTTP_406_NOT_ACCEPTABLE)
|
||||
resp.accepted_renderer = StaticHTMLRenderer()
|
||||
resp.accepted_media_type = 'text/plain'
|
||||
resp.renderer_context = {}
|
||||
return resp
|
||||
return super(LoggedLoginView, self).get(request, *args, **kwargs)
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
original_user = getattr(request, 'user', None)
|
||||
ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
|
||||
current_user = getattr(request, 'user', None)
|
||||
|
||||
if current_user and getattr(current_user, 'pk', None) and current_user != original_user:
|
||||
logger.info("User {} logged in.".format(current_user.username))
|
||||
if request.user.is_authenticated:
|
||||
logger.info(smart_text(u"User {} logged in".format(self.request.user.username)))
|
||||
logger.info(smart_text(u"User {} logged in.".format(self.request.user.username)))
|
||||
ret.set_cookie('userLoggedIn', 'true')
|
||||
current_user = UserSerializer(self.request.user)
|
||||
current_user = JSONRenderer().render(current_user.data)
|
||||
current_user = urllib.quote('%s' % current_user, '')
|
||||
ret.set_cookie('current_user', current_user)
|
||||
|
||||
|
||||
return ret
|
||||
else:
|
||||
ret.status_code = 401
|
||||
@ -175,9 +190,13 @@ class APIView(views.APIView):
|
||||
request.drf_request_user = getattr(drf_request, 'user', False)
|
||||
except AuthenticationFailed:
|
||||
request.drf_request_user = None
|
||||
except ParseError as exc:
|
||||
except (PermissionDenied, ParseError) as exc:
|
||||
request.drf_request_user = None
|
||||
self.__init_request_error__ = exc
|
||||
except UnsupportedMediaType as exc:
|
||||
exc.detail = _('You did not use correct Content-Type in your HTTP request. '
|
||||
'If you are using our REST API, the Content-Type must be application/json')
|
||||
self.__init_request_error__ = exc
|
||||
return drf_request
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
@ -190,6 +209,7 @@ class APIView(views.APIView):
|
||||
if hasattr(self, '__init_request_error__'):
|
||||
response = self.handle_exception(self.__init_request_error__)
|
||||
if response.status_code == 401:
|
||||
response.data['detail'] += ' To establish a login session, visit /api/login/.'
|
||||
logger.info(status_msg)
|
||||
else:
|
||||
logger.warn(status_msg)
|
||||
@ -208,26 +228,35 @@ class APIView(views.APIView):
|
||||
return response
|
||||
|
||||
def get_authenticate_header(self, request):
|
||||
"""
|
||||
Determine the WWW-Authenticate header to use for 401 responses. Try to
|
||||
use the request header as an indication for which authentication method
|
||||
was attempted.
|
||||
"""
|
||||
for authenticator in self.get_authenticators():
|
||||
resp_hdr = authenticator.authenticate_header(request)
|
||||
if not resp_hdr:
|
||||
continue
|
||||
req_hdr = get_authorization_header(request)
|
||||
if not req_hdr:
|
||||
continue
|
||||
if resp_hdr.split()[0] and resp_hdr.split()[0] == req_hdr.split()[0]:
|
||||
return resp_hdr
|
||||
# If it can't be determined from the request, use the last
|
||||
# authenticator (should be Basic).
|
||||
try:
|
||||
return authenticator.authenticate_header(request)
|
||||
except NameError:
|
||||
pass
|
||||
# HTTP Basic auth is insecure by default, because the basic auth
|
||||
# backend does not provide CSRF protection.
|
||||
#
|
||||
# If you visit `/api/v2/job_templates/` and we return
|
||||
# `WWW-Authenticate: Basic ...`, your browser will prompt you for an
|
||||
# HTTP basic auth username+password and will store it _in the browser_
|
||||
# for subsequent requests. Because basic auth does not require CSRF
|
||||
# validation (because it's commonly used with e.g., tower-cli and other
|
||||
# non-browser clients), browsers that save basic auth in this way are
|
||||
# vulnerable to cross-site request forgery:
|
||||
#
|
||||
# 1. Visit `/api/v2/job_templates/` and specify a user+pass for basic auth.
|
||||
# 2. Visit a nefarious website and submit a
|
||||
# `<form action='POST' method='https://tower.example.org/api/v2/job_templates/N/launch/'>`
|
||||
# 3. The browser will use your persisted user+pass and your login
|
||||
# session is effectively hijacked.
|
||||
#
|
||||
# To prevent this, we will _no longer_ send `WWW-Authenticate: Basic ...`
|
||||
# headers in responses; this means that unauthenticated /api/v2/... requests
|
||||
# will now return HTTP 401 in-browser, rather than popping up an auth dialog.
|
||||
#
|
||||
# This means that people who wish to use the interactive API browser
|
||||
# must _first_ login in via `/api/login/` to establish a session (which
|
||||
# _does_ enforce CSRF).
|
||||
#
|
||||
# CLI users can _still_ specify basic auth credentials explicitly via
|
||||
# a header or in the URL e.g.,
|
||||
# `curl https://user:pass@tower.example.org/api/v2/job_templates/N/launch/`
|
||||
return 'Bearer realm=api authorization_url=/api/o/authorize/'
|
||||
|
||||
def get_view_description(self, html=False):
|
||||
"""
|
||||
@ -298,6 +327,12 @@ class APIView(views.APIView):
|
||||
kwargs.pop('version')
|
||||
return super(APIView, self).dispatch(request, *args, **kwargs)
|
||||
|
||||
def check_permissions(self, request):
|
||||
if request.method not in ('GET', 'OPTIONS', 'HEAD'):
|
||||
if 'write' not in getattr(request.user, 'oauth_scopes', ['write']):
|
||||
raise PermissionDenied()
|
||||
return super(APIView, self).check_permissions(request)
|
||||
|
||||
|
||||
class GenericAPIView(generics.GenericAPIView, APIView):
|
||||
# Base class for all model-based views.
|
||||
@ -726,6 +761,7 @@ class DeleteLastUnattachLabelMixin(object):
|
||||
when the last disassociate is called should inherit from this class. Further,
|
||||
the model should implement is_detached()
|
||||
'''
|
||||
|
||||
def unattach(self, request, *args, **kwargs):
|
||||
(sub_id, res) = super(DeleteLastUnattachLabelMixin, self).unattach_validate(request)
|
||||
if res:
|
||||
@ -801,6 +837,10 @@ class CopyAPIView(GenericAPIView):
|
||||
new_in_330 = True
|
||||
new_in_api_v2 = True
|
||||
|
||||
def v1_not_allowed(self):
|
||||
return Response({'detail': 'Action only possible starting with v2 API.'},
|
||||
status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
def _get_copy_return_serializer(self, *args, **kwargs):
|
||||
if not self.copy_return_serializer_class:
|
||||
return self.get_serializer(*args, **kwargs)
|
||||
@ -885,9 +925,11 @@ class CopyAPIView(GenericAPIView):
|
||||
# not work properly in non-request-response-cycle context.
|
||||
new_obj.created_by = creater
|
||||
new_obj.save()
|
||||
for m2m in m2m_to_preserve:
|
||||
for related_obj in m2m_to_preserve[m2m].all():
|
||||
getattr(new_obj, m2m).add(related_obj)
|
||||
from awx.main.signals import disable_activity_stream
|
||||
with disable_activity_stream():
|
||||
for m2m in m2m_to_preserve:
|
||||
for related_obj in m2m_to_preserve[m2m].all():
|
||||
getattr(new_obj, m2m).add(related_obj)
|
||||
if not old_parent:
|
||||
sub_objects = []
|
||||
for o2m in o2m_to_preserve:
|
||||
@ -902,13 +944,21 @@ class CopyAPIView(GenericAPIView):
|
||||
return ret
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
if get_request_version(request) < 2:
|
||||
return self.v1_not_allowed()
|
||||
obj = self.get_object()
|
||||
if not request.user.can_access(obj.__class__, 'read', obj):
|
||||
raise PermissionDenied()
|
||||
create_kwargs = self._build_create_dict(obj)
|
||||
for key in create_kwargs:
|
||||
create_kwargs[key] = getattr(create_kwargs[key], 'pk', None) or create_kwargs[key]
|
||||
return Response({'can_copy': request.user.can_access(self.model, 'add', create_kwargs)})
|
||||
can_copy = request.user.can_access(self.model, 'add', create_kwargs) and \
|
||||
request.user.can_access(self.model, 'copy_related', obj)
|
||||
return Response({'can_copy': can_copy})
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
if get_request_version(request) < 2:
|
||||
return self.v1_not_allowed()
|
||||
obj = self.get_object()
|
||||
create_kwargs = self._build_create_dict(obj)
|
||||
create_kwargs_check = {}
|
||||
@ -916,6 +966,8 @@ class CopyAPIView(GenericAPIView):
|
||||
create_kwargs_check[key] = getattr(create_kwargs[key], 'pk', None) or create_kwargs[key]
|
||||
if not request.user.can_access(self.model, 'add', create_kwargs_check):
|
||||
raise PermissionDenied()
|
||||
if not request.user.can_access(self.model, 'copy_related', obj):
|
||||
raise PermissionDenied()
|
||||
serializer = self.get_serializer(data=request.data)
|
||||
if not serializer.is_valid():
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
@ -937,4 +989,5 @@ class CopyAPIView(GenericAPIView):
|
||||
permission_check_func=permission_check_func
|
||||
)
|
||||
serializer = self._get_copy_return_serializer(new_obj)
|
||||
return Response(serializer.data, status=status.HTTP_201_CREATED)
|
||||
headers = {'Location': new_obj.get_absolute_url(request=request)}
|
||||
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
|
||||
@ -67,6 +67,8 @@ class Metadata(metadata.SimpleMetadata):
|
||||
if field.field_name == model_field.name:
|
||||
field_info['filterable'] = True
|
||||
break
|
||||
else:
|
||||
field_info['filterable'] = False
|
||||
|
||||
# Indicate if a field has a default value.
|
||||
# FIXME: Still isn't showing all default values?
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,18 +0,0 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.conf.urls import url
|
||||
|
||||
from oauth2_provider.urls import base_urlpatterns
|
||||
|
||||
from awx.api.views import (
|
||||
ApiOAuthAuthorizationRootView,
|
||||
)
|
||||
|
||||
|
||||
urls = [
|
||||
url(r'^$', ApiOAuthAuthorizationRootView.as_view(), name='oauth_authorization_root_view'),
|
||||
] + base_urlpatterns
|
||||
|
||||
|
||||
__all__ = ['urls']
|
||||
@ -11,7 +11,6 @@ from awx.api.views import (
|
||||
OAuth2TokenList,
|
||||
OAuth2TokenDetail,
|
||||
OAuth2TokenActivityStreamList,
|
||||
OAuth2PersonalTokenList
|
||||
)
|
||||
|
||||
|
||||
@ -42,8 +41,7 @@ urls = [
|
||||
r'^tokens/(?P<pk>[0-9]+)/activity_stream/$',
|
||||
OAuth2TokenActivityStreamList.as_view(),
|
||||
name='o_auth2_token_activity_stream_list'
|
||||
),
|
||||
url(r'^personal_tokens/$', OAuth2PersonalTokenList.as_view(), name='o_auth2_personal_token_list'),
|
||||
),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
31
awx/api/urls/oauth2_root.py
Normal file
31
awx/api/urls/oauth2_root.py
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.conf.urls import url
|
||||
|
||||
from oauthlib import oauth2
|
||||
from oauth2_provider import views
|
||||
|
||||
from awx.api.views import (
|
||||
ApiOAuthAuthorizationRootView,
|
||||
)
|
||||
|
||||
|
||||
class TokenView(views.TokenView):
|
||||
|
||||
def create_token_response(self, request):
|
||||
try:
|
||||
return super(TokenView, self).create_token_response(request)
|
||||
except oauth2.AccessDeniedError as e:
|
||||
return request.build_absolute_uri(), {}, str(e), '403'
|
||||
|
||||
|
||||
urls = [
|
||||
url(r'^$', ApiOAuthAuthorizationRootView.as_view(), name='oauth_authorization_root_view'),
|
||||
url(r"^authorize/$", views.AuthorizationView.as_view(), name="authorize"),
|
||||
url(r"^token/$", TokenView.as_view(), name="token"),
|
||||
url(r"^revoke_token/$", views.RevokeTokenView.as_view(), name="revoke-token"),
|
||||
]
|
||||
|
||||
|
||||
__all__ = ['urls']
|
||||
@ -67,8 +67,8 @@ from .schedule import urls as schedule_urls
|
||||
from .activity_stream import urls as activity_stream_urls
|
||||
from .instance import urls as instance_urls
|
||||
from .instance_group import urls as instance_group_urls
|
||||
from .user_oauth import urls as user_oauth_urls
|
||||
from .oauth import urls as oauth_urls
|
||||
from .oauth2 import urls as oauth2_urls
|
||||
from .oauth2_root import urls as oauth2_root_urls
|
||||
|
||||
|
||||
v1_urls = [
|
||||
@ -130,7 +130,7 @@ v2_urls = [
|
||||
url(r'^applications/(?P<pk>[0-9]+)/$', OAuth2ApplicationDetail.as_view(), name='o_auth2_application_detail'),
|
||||
url(r'^applications/(?P<pk>[0-9]+)/tokens/$', ApplicationOAuth2TokenList.as_view(), name='application_o_auth2_token_list'),
|
||||
url(r'^tokens/$', OAuth2TokenList.as_view(), name='o_auth2_token_list'),
|
||||
url(r'^', include(user_oauth_urls)),
|
||||
url(r'^', include(oauth2_urls)),
|
||||
]
|
||||
|
||||
app_name = 'api'
|
||||
@ -145,7 +145,7 @@ urlpatterns = [
|
||||
url(r'^logout/$', LoggedLogoutView.as_view(
|
||||
next_page='/api/', redirect_field_name='next'
|
||||
), name='logout'),
|
||||
url(r'^o/', include(oauth_urls)),
|
||||
url(r'^o/', include(oauth2_root_urls)),
|
||||
]
|
||||
if settings.SETTINGS_MODULE == 'awx.settings.development':
|
||||
from awx.api.swagger import SwaggerSchemaView
|
||||
|
||||
@ -16,7 +16,7 @@ from awx.api.views import (
|
||||
UserAccessList,
|
||||
OAuth2ApplicationList,
|
||||
OAuth2UserTokenList,
|
||||
OAuth2PersonalTokenList,
|
||||
UserPersonalTokenList,
|
||||
UserAuthorizedTokenList,
|
||||
)
|
||||
|
||||
@ -34,7 +34,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/applications/$', OAuth2ApplicationList.as_view(), name='o_auth2_application_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/tokens/$', OAuth2UserTokenList.as_view(), name='o_auth2_token_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/authorized_tokens/$', UserAuthorizedTokenList.as_view(), name='user_authorized_token_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/personal_tokens/$', OAuth2PersonalTokenList.as_view(), name='o_auth2_personal_token_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/personal_tokens/$', UserPersonalTokenList.as_view(), name='user_personal_token_list'),
|
||||
|
||||
]
|
||||
|
||||
|
||||
221
awx/api/views.py
221
awx/api/views.py
@ -24,7 +24,8 @@ from django.shortcuts import get_object_or_404
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.safestring import mark_safe
|
||||
from django.utils.timezone import now
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from django.utils.decorators import method_decorator
|
||||
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
|
||||
from django.template.loader import render_to_string
|
||||
from django.http import HttpResponse
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
@ -60,7 +61,7 @@ import pytz
|
||||
from wsgiref.util import FileWrapper
|
||||
|
||||
# AWX
|
||||
from awx.main.tasks import send_notifications, handle_ha_toplogy_changes
|
||||
from awx.main.tasks import send_notifications
|
||||
from awx.main.access import get_user_queryset
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.api.filters import V1CredentialFilterBackend
|
||||
@ -104,6 +105,8 @@ def api_exception_handler(exc, context):
|
||||
exc = ParseError(exc.args[0])
|
||||
if isinstance(exc, FieldError):
|
||||
exc = ParseError(exc.args[0])
|
||||
if isinstance(context['view'], UnifiedJobStdout):
|
||||
context['view'].renderer_classes = [BrowsableAPIRenderer, renderers.JSONRenderer]
|
||||
return exception_handler(exc, context)
|
||||
|
||||
|
||||
@ -176,29 +179,64 @@ class InstanceGroupMembershipMixin(object):
|
||||
sub_id, res = self.attach_validate(request)
|
||||
if status.is_success(response.status_code):
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(self.model, pk=sub_id)
|
||||
inst_name = ig_obj.hostname
|
||||
else:
|
||||
ig_obj = self.get_parent_object()
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
if inst_name not in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.append(inst_name)
|
||||
ig_obj.save()
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {
|
||||
self.lookup_field: self.kwargs.get(self.lookup_field, None),
|
||||
}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
if inst_name not in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.append(inst_name)
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
return response
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.is_isolated():
|
||||
return {'error': _('Isolated instances may not be added or removed from instances groups via the API.')}
|
||||
if self.parent_model is InstanceGroup:
|
||||
ig_obj = self.get_parent_object()
|
||||
if ig_obj.controller_id is not None:
|
||||
return {'error': _('Isolated instance group membership may not be managed via the API.')}
|
||||
return None
|
||||
|
||||
def unattach_validate(self, request):
|
||||
(sub_id, res) = super(InstanceGroupMembershipMixin, self).unattach_validate(request)
|
||||
if res:
|
||||
return (sub_id, res)
|
||||
sub = get_object_or_400(self.model, pk=sub_id)
|
||||
attach_errors = self.is_valid_relation(None, sub)
|
||||
if attach_errors:
|
||||
return (sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST))
|
||||
return (sub_id, res)
|
||||
|
||||
def unattach(self, request, *args, **kwargs):
|
||||
response = super(InstanceGroupMembershipMixin, self).unattach(request, *args, **kwargs)
|
||||
sub_id, res = self.attach_validate(request)
|
||||
if status.is_success(response.status_code):
|
||||
sub_id = request.data.get('id', None)
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(self.model, pk=sub_id)
|
||||
inst_name = self.get_parent_object().hostname
|
||||
else:
|
||||
ig_obj = self.get_parent_object()
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
if inst_name in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
|
||||
ig_obj.save()
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {
|
||||
self.lookup_field: self.kwargs.get(self.lookup_field, None),
|
||||
}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
if inst_name in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
return response
|
||||
|
||||
|
||||
@ -227,20 +265,20 @@ class ApiRootView(APIView):
|
||||
versioning_class = None
|
||||
swagger_topic = 'Versioning'
|
||||
|
||||
@method_decorator(ensure_csrf_cookie)
|
||||
def get(self, request, format=None):
|
||||
''' List supported API versions '''
|
||||
|
||||
v1 = reverse('api:api_v1_root_view', kwargs={'version': 'v1'})
|
||||
v2 = reverse('api:api_v2_root_view', kwargs={'version': 'v2'})
|
||||
data = dict(
|
||||
description = _('AWX REST API'),
|
||||
current_version = v2,
|
||||
available_versions = dict(v1 = v1, v2 = v2),
|
||||
)
|
||||
data = OrderedDict()
|
||||
data['description'] = _('AWX REST API')
|
||||
data['current_version'] = v2
|
||||
data['available_versions'] = dict(v1 = v1, v2 = v2)
|
||||
data['oauth2'] = drf_reverse('api:oauth_authorization_root_view')
|
||||
if feature_enabled('rebranding'):
|
||||
data['custom_logo'] = settings.CUSTOM_LOGO
|
||||
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
|
||||
data['oauth2'] = drf_reverse('api:oauth_authorization_root_view')
|
||||
return Response(data)
|
||||
|
||||
|
||||
@ -631,7 +669,6 @@ class InstanceDetail(RetrieveUpdateAPIView):
|
||||
else:
|
||||
obj.capacity = 0
|
||||
obj.save()
|
||||
handle_ha_toplogy_changes.apply_async()
|
||||
r.data = InstanceSerializer(obj, context=self.get_serializer_context()).to_representation(obj)
|
||||
return r
|
||||
|
||||
@ -640,7 +677,7 @@ class InstanceUnifiedJobsList(SubListAPIView):
|
||||
|
||||
view_name = _("Instance Jobs")
|
||||
model = UnifiedJob
|
||||
serializer_class = UnifiedJobSerializer
|
||||
serializer_class = UnifiedJobListSerializer
|
||||
parent_model = Instance
|
||||
|
||||
def get_queryset(self):
|
||||
@ -687,7 +724,7 @@ class InstanceGroupUnifiedJobsList(SubListAPIView):
|
||||
|
||||
view_name = _("Instance Group Running Jobs")
|
||||
model = UnifiedJob
|
||||
serializer_class = UnifiedJobSerializer
|
||||
serializer_class = UnifiedJobListSerializer
|
||||
parent_model = InstanceGroup
|
||||
relationship = "unifiedjob_set"
|
||||
|
||||
@ -720,6 +757,7 @@ class SchedulePreview(GenericAPIView):
|
||||
model = Schedule
|
||||
view_name = _('Schedule Recurrence Rule Preview')
|
||||
serializer_class = SchedulePreviewSerializer
|
||||
permission_classes = (IsAuthenticated,)
|
||||
|
||||
def post(self, request):
|
||||
serializer = self.get_serializer(data=request.data)
|
||||
@ -797,7 +835,7 @@ class ScheduleCredentialsList(LaunchConfigCredentialsBase):
|
||||
class ScheduleUnifiedJobsList(SubListAPIView):
|
||||
|
||||
model = UnifiedJob
|
||||
serializer_class = UnifiedJobSerializer
|
||||
serializer_class = UnifiedJobListSerializer
|
||||
parent_model = Schedule
|
||||
relationship = 'unifiedjob_set'
|
||||
view_name = _('Schedule Jobs List')
|
||||
@ -1055,7 +1093,7 @@ class OrganizationProjectsList(SubListCreateAttachDetachAPIView):
|
||||
class OrganizationWorkflowJobTemplatesList(SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = WorkflowJobTemplate
|
||||
serializer_class = WorkflowJobTemplateListSerializer
|
||||
serializer_class = WorkflowJobTemplateSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'workflows'
|
||||
parent_key = 'organization'
|
||||
@ -1144,11 +1182,6 @@ class TeamList(ListCreateAPIView):
|
||||
model = Team
|
||||
serializer_class = TeamSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = Team.accessible_objects(self.request.user, 'read_role').order_by()
|
||||
qs = qs.select_related('admin_role', 'read_role', 'member_role', 'organization')
|
||||
return qs
|
||||
|
||||
|
||||
class TeamDetail(RetrieveUpdateDestroyAPIView):
|
||||
|
||||
@ -1186,8 +1219,8 @@ class TeamRolesList(SubListAttachDetachAPIView):
|
||||
|
||||
role = get_object_or_400(Role, pk=sub_id)
|
||||
org_content_type = ContentType.objects.get_for_model(Organization)
|
||||
if role.content_type == org_content_type:
|
||||
data = dict(msg=_("You cannot assign an Organization role as a child role for a Team."))
|
||||
if role.content_type == org_content_type and role.role_field in ['member_role', 'admin_role']:
|
||||
data = dict(msg=_("You cannot assign an Organization participation role as a child role for a Team."))
|
||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if role.is_singleton():
|
||||
@ -1377,7 +1410,7 @@ class ProjectNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIView):
|
||||
class ProjectUpdatesList(SubListAPIView):
|
||||
|
||||
model = ProjectUpdate
|
||||
serializer_class = ProjectUpdateSerializer
|
||||
serializer_class = ProjectUpdateListSerializer
|
||||
parent_model = Project
|
||||
relationship = 'project_updates'
|
||||
|
||||
@ -1415,7 +1448,7 @@ class ProjectUpdateList(ListAPIView):
|
||||
class ProjectUpdateDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
|
||||
|
||||
model = ProjectUpdate
|
||||
serializer_class = ProjectUpdateSerializer
|
||||
serializer_class = ProjectUpdateDetailSerializer
|
||||
|
||||
|
||||
class ProjectUpdateEventsList(SubListAPIView):
|
||||
@ -1488,7 +1521,7 @@ class ProjectUpdateScmInventoryUpdates(SubListCreateAPIView):
|
||||
|
||||
view_name = _("Project Update SCM Inventory Updates")
|
||||
model = InventoryUpdate
|
||||
serializer_class = InventoryUpdateSerializer
|
||||
serializer_class = InventoryUpdateListSerializer
|
||||
parent_model = ProjectUpdate
|
||||
relationship = 'scm_inventory_updates'
|
||||
parent_key = 'source_project_update'
|
||||
@ -1568,6 +1601,10 @@ class OAuth2ApplicationDetail(RetrieveUpdateDestroyAPIView):
|
||||
serializer_class = OAuth2ApplicationSerializer
|
||||
swagger_topic = 'Authentication'
|
||||
|
||||
def update_raw_data(self, data):
|
||||
data.pop('client_secret', None)
|
||||
return super(OAuth2ApplicationDetail, self).update_raw_data(data)
|
||||
|
||||
|
||||
class ApplicationOAuth2TokenList(SubListCreateAPIView):
|
||||
|
||||
@ -1610,29 +1647,14 @@ class OAuth2UserTokenList(SubListCreateAPIView):
|
||||
relationship = 'main_oauth2accesstoken'
|
||||
parent_key = 'user'
|
||||
swagger_topic = 'Authentication'
|
||||
|
||||
|
||||
class OAuth2AuthorizedTokenList(SubListCreateAPIView):
|
||||
|
||||
view_name = _("OAuth2 Authorized Access Tokens")
|
||||
|
||||
model = OAuth2AccessToken
|
||||
serializer_class = OAuth2AuthorizedTokenSerializer
|
||||
parent_model = OAuth2Application
|
||||
relationship = 'oauth2accesstoken_set'
|
||||
parent_key = 'application'
|
||||
swagger_topic = 'Authentication'
|
||||
|
||||
def get_queryset(self):
|
||||
return get_access_token_model().objects.filter(application__isnull=False, user=self.request.user)
|
||||
|
||||
|
||||
class UserAuthorizedTokenList(SubListCreateAPIView):
|
||||
|
||||
view_name = _("OAuth2 User Authorized Access Tokens")
|
||||
|
||||
|
||||
model = OAuth2AccessToken
|
||||
serializer_class = OAuth2AuthorizedTokenSerializer
|
||||
serializer_class = UserAuthorizedTokenSerializer
|
||||
parent_model = User
|
||||
relationship = 'oauth2accesstoken_set'
|
||||
parent_key = 'user'
|
||||
@ -1640,12 +1662,12 @@ class UserAuthorizedTokenList(SubListCreateAPIView):
|
||||
|
||||
def get_queryset(self):
|
||||
return get_access_token_model().objects.filter(application__isnull=False, user=self.request.user)
|
||||
|
||||
|
||||
|
||||
class OrganizationApplicationList(SubListCreateAPIView):
|
||||
|
||||
view_name = _("Organization OAuth2 Applications")
|
||||
|
||||
|
||||
model = OAuth2Application
|
||||
serializer_class = OAuth2ApplicationSerializer
|
||||
parent_model = Organization
|
||||
@ -1654,17 +1676,17 @@ class OrganizationApplicationList(SubListCreateAPIView):
|
||||
swagger_topic = 'Authentication'
|
||||
|
||||
|
||||
class OAuth2PersonalTokenList(SubListCreateAPIView):
|
||||
|
||||
class UserPersonalTokenList(SubListCreateAPIView):
|
||||
|
||||
view_name = _("OAuth2 Personal Access Tokens")
|
||||
|
||||
|
||||
model = OAuth2AccessToken
|
||||
serializer_class = OAuth2PersonalTokenSerializer
|
||||
serializer_class = UserPersonalTokenSerializer
|
||||
parent_model = User
|
||||
relationship = 'main_oauth2accesstoken'
|
||||
parent_key = 'user'
|
||||
swagger_topic = 'Authentication'
|
||||
|
||||
|
||||
def get_queryset(self):
|
||||
return get_access_token_model().objects.filter(application__isnull=True, user=self.request.user)
|
||||
|
||||
@ -2233,6 +2255,12 @@ class HostDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUp
|
||||
model = Host
|
||||
serializer_class = HostSerializer
|
||||
|
||||
def delete(self, request, *args, **kwargs):
|
||||
if self.get_object().inventory.pending_deletion:
|
||||
return Response({"error": _("The inventory for this host is already being deleted.")},
|
||||
status=status.HTTP_400_BAD_REQUEST)
|
||||
return super(HostDetail, self).delete(request, *args, **kwargs)
|
||||
|
||||
|
||||
class HostAnsibleFactsDetail(RetrieveAPIView):
|
||||
|
||||
@ -2842,7 +2870,7 @@ class InventorySourceGroupsList(SubListDestroyAPIView):
|
||||
class InventorySourceUpdatesList(SubListAPIView):
|
||||
|
||||
model = InventoryUpdate
|
||||
serializer_class = InventoryUpdateSerializer
|
||||
serializer_class = InventoryUpdateListSerializer
|
||||
parent_model = InventorySource
|
||||
relationship = 'inventory_updates'
|
||||
|
||||
@ -3011,12 +3039,12 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
if fd not in modern_data and id_fd in modern_data:
|
||||
modern_data[fd] = modern_data[id_fd]
|
||||
|
||||
# This block causes `extra_credentials` to _always_ be ignored for
|
||||
# This block causes `extra_credentials` to _always_ raise error if
|
||||
# the launch endpoint if we're accessing `/api/v1/`
|
||||
if get_request_version(self.request) == 1 and 'extra_credentials' in modern_data:
|
||||
extra_creds = modern_data.pop('extra_credentials', None)
|
||||
if extra_creds is not None:
|
||||
ignored_fields['extra_credentials'] = extra_creds
|
||||
raise ParseError({"extra_credentials": _(
|
||||
"Field is not allowed for use with v1 API."
|
||||
)})
|
||||
|
||||
# Automatically convert legacy launch credential arguments into a list of `.credentials`
|
||||
if 'credentials' in modern_data and (
|
||||
@ -3037,10 +3065,10 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
existing_credentials = obj.credentials.all()
|
||||
template_credentials = list(existing_credentials) # save copy of existing
|
||||
new_credentials = []
|
||||
for key, conditional in (
|
||||
('credential', lambda cred: cred.credential_type.kind != 'ssh'),
|
||||
('vault_credential', lambda cred: cred.credential_type.kind != 'vault'),
|
||||
('extra_credentials', lambda cred: cred.credential_type.kind not in ('cloud', 'net'))
|
||||
for key, conditional, _type, type_repr in (
|
||||
('credential', lambda cred: cred.credential_type.kind != 'ssh', int, 'pk value'),
|
||||
('vault_credential', lambda cred: cred.credential_type.kind != 'vault', int, 'pk value'),
|
||||
('extra_credentials', lambda cred: cred.credential_type.kind not in ('cloud', 'net'), Iterable, 'a list')
|
||||
):
|
||||
if key in modern_data:
|
||||
# if a specific deprecated key is specified, remove all
|
||||
@ -3049,6 +3077,13 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
existing_credentials = filter(conditional, existing_credentials)
|
||||
prompted_value = modern_data.pop(key)
|
||||
|
||||
# validate type, since these are not covered by a serializer
|
||||
if not isinstance(prompted_value, _type):
|
||||
msg = _(
|
||||
"Incorrect type. Expected {}, received {}."
|
||||
).format(type_repr, prompted_value.__class__.__name__)
|
||||
raise ParseError({key: [msg], 'credentials': [msg]})
|
||||
|
||||
# add the deprecated credential specified in the request
|
||||
if not isinstance(prompted_value, Iterable) or isinstance(prompted_value, basestring):
|
||||
prompted_value = [prompted_value]
|
||||
@ -3108,7 +3143,8 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
data['job'] = new_job.id
|
||||
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
|
||||
data.update(JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
|
||||
return Response(data, status=status.HTTP_201_CREATED)
|
||||
headers = {'Location': new_job.get_absolute_url(request)}
|
||||
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
|
||||
|
||||
def sanitize_for_response(self, data):
|
||||
@ -3320,6 +3356,9 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
|
||||
if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
|
||||
return {"error": _("Cannot assign multiple {credential_type} credentials.".format(
|
||||
credential_type=sub.unique_hash(display=True)))}
|
||||
kind = sub.credential_type.kind
|
||||
if kind not in ('ssh', 'vault', 'cloud', 'net'):
|
||||
return {'error': _('Cannot assign a Credential of kind `{}`.').format(kind)}
|
||||
|
||||
return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created)
|
||||
|
||||
@ -3713,7 +3752,7 @@ class WorkflowJobNodeAlwaysNodesList(WorkflowJobNodeChildrenBaseList):
|
||||
class WorkflowJobTemplateList(WorkflowsEnforcementMixin, ListCreateAPIView):
|
||||
|
||||
model = WorkflowJobTemplate
|
||||
serializer_class = WorkflowJobTemplateListSerializer
|
||||
serializer_class = WorkflowJobTemplateSerializer
|
||||
always_allow_superuser = False
|
||||
|
||||
|
||||
@ -3730,7 +3769,11 @@ class WorkflowJobTemplateCopy(WorkflowsEnforcementMixin, CopyAPIView):
|
||||
copy_return_serializer_class = WorkflowJobTemplateSerializer
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
if get_request_version(request) < 2:
|
||||
return self.v1_not_allowed()
|
||||
obj = self.get_object()
|
||||
if not request.user.can_access(obj.__class__, 'read', obj):
|
||||
raise PermissionDenied()
|
||||
can_copy, messages = request.user.can_access_with_errors(self.model, 'copy', obj)
|
||||
data = OrderedDict([
|
||||
('can_copy', can_copy), ('can_copy_without_user_input', can_copy),
|
||||
@ -3806,7 +3849,8 @@ class WorkflowJobTemplateLaunch(WorkflowsEnforcementMixin, RetrieveAPIView):
|
||||
data['workflow_job'] = new_job.id
|
||||
data['ignored_fields'] = ignored_fields
|
||||
data.update(WorkflowJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
|
||||
return Response(data, status=status.HTTP_201_CREATED)
|
||||
headers = {'Location': new_job.get_absolute_url(request)}
|
||||
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
|
||||
|
||||
class WorkflowJobRelaunch(WorkflowsEnforcementMixin, GenericAPIView):
|
||||
@ -4022,7 +4066,8 @@ class SystemJobTemplateLaunch(GenericAPIView):
|
||||
data = OrderedDict()
|
||||
data['system_job'] = new_job.id
|
||||
data.update(SystemJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
|
||||
return Response(data, status=status.HTTP_201_CREATED)
|
||||
headers = {'Location': new_job.get_absolute_url(request)}
|
||||
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
|
||||
|
||||
class SystemJobTemplateSchedulesList(SubListCreateAPIView):
|
||||
@ -4094,7 +4139,30 @@ class JobDetail(UnifiedJobDeletionMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = Job
|
||||
metadata_class = JobTypeMetadata
|
||||
serializer_class = JobSerializer
|
||||
serializer_class = JobDetailSerializer
|
||||
|
||||
# NOTE: When removing the V1 API in 3.4, delete the following four methods,
|
||||
# and let this class inherit from RetrieveDestroyAPIView instead of
|
||||
# RetrieveUpdateDestroyAPIView.
|
||||
@property
|
||||
def allowed_methods(self):
|
||||
methods = super(JobDetail, self).allowed_methods
|
||||
if get_request_version(getattr(self, 'request', None)) > 1:
|
||||
methods.remove('PUT')
|
||||
methods.remove('PATCH')
|
||||
return methods
|
||||
|
||||
def put(self, request, *args, **kwargs):
|
||||
if get_request_version(self.request) > 1:
|
||||
return Response({"error": _("PUT not allowed for Job Details in version 2 of the API")},
|
||||
status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
return super(JobDetail, self).put(request, *args, **kwargs)
|
||||
|
||||
def patch(self, request, *args, **kwargs):
|
||||
if get_request_version(self.request) > 1:
|
||||
return Response({"error": _("PUT not allowed for Job Details in version 2 of the API")},
|
||||
status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
return super(JobDetail, self).patch(request, *args, **kwargs)
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
@ -4220,7 +4288,6 @@ class JobRelaunch(RetrieveAPIView):
|
||||
data.pop('credential_passwords', None)
|
||||
return data
|
||||
|
||||
@csrf_exempt
|
||||
@transaction.non_atomic_requests
|
||||
def dispatch(self, *args, **kwargs):
|
||||
return super(JobRelaunch, self).dispatch(*args, **kwargs)
|
||||
@ -4466,7 +4533,6 @@ class AdHocCommandList(ListCreateAPIView):
|
||||
serializer_class = AdHocCommandListSerializer
|
||||
always_allow_superuser = False
|
||||
|
||||
@csrf_exempt
|
||||
@transaction.non_atomic_requests
|
||||
def dispatch(self, *args, **kwargs):
|
||||
return super(AdHocCommandList, self).dispatch(*args, **kwargs)
|
||||
@ -4538,7 +4604,7 @@ class HostAdHocCommandsList(AdHocCommandList, SubListCreateAPIView):
|
||||
class AdHocCommandDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
|
||||
|
||||
model = AdHocCommand
|
||||
serializer_class = AdHocCommandSerializer
|
||||
serializer_class = AdHocCommandDetailSerializer
|
||||
|
||||
|
||||
class AdHocCommandCancel(RetrieveAPIView):
|
||||
@ -4564,7 +4630,6 @@ class AdHocCommandRelaunch(GenericAPIView):
|
||||
|
||||
# FIXME: Figure out why OPTIONS request still shows all fields.
|
||||
|
||||
@csrf_exempt
|
||||
@transaction.non_atomic_requests
|
||||
def dispatch(self, *args, **kwargs):
|
||||
return super(AdHocCommandRelaunch, self).dispatch(*args, **kwargs)
|
||||
@ -5041,8 +5106,8 @@ class RoleTeamsList(SubListAttachDetachAPIView):
|
||||
role = Role.objects.get(pk=self.kwargs['pk'])
|
||||
|
||||
organization_content_type = ContentType.objects.get_for_model(Organization)
|
||||
if role.content_type == organization_content_type:
|
||||
data = dict(msg=_("You cannot assign an Organization role as a child role for a Team."))
|
||||
if role.content_type == organization_content_type and role.role_field in ['member_role', 'admin_role']:
|
||||
data = dict(msg=_("You cannot assign an Organization participation role as a child role for a Team."))
|
||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
credential_content_type = ContentType.objects.get_for_model(Credential)
|
||||
|
||||
26
awx/conf/migrations/0005_v330_rename_two_session_settings.py
Normal file
26
awx/conf/migrations/0005_v330_rename_two_session_settings.py
Normal file
@ -0,0 +1,26 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
from django.db import migrations
|
||||
from awx.conf.migrations import _rename_setting
|
||||
|
||||
|
||||
def copy_session_settings(apps, schema_editor):
|
||||
_rename_setting.rename_setting(apps, schema_editor, old_key='AUTH_TOKEN_PER_USER', new_key='SESSIONS_PER_USER')
|
||||
_rename_setting.rename_setting(apps, schema_editor, old_key='AUTH_TOKEN_EXPIRATION', new_key='SESSION_COOKIE_AGE')
|
||||
|
||||
|
||||
def reverse_copy_session_settings(apps, schema_editor):
|
||||
_rename_setting.rename_setting(apps, schema_editor, old_key='SESSION_COOKIE_AGE', new_key='AUTH_TOKEN_EXPIRATION')
|
||||
_rename_setting.rename_setting(apps, schema_editor, old_key='SESSIONS_PER_USER', new_key='AUTH_TOKEN_PER_USER')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('conf', '0004_v320_reencrypt'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(copy_session_settings, reverse_copy_session_settings),
|
||||
]
|
||||
|
||||
32
awx/conf/migrations/_rename_setting.py
Normal file
32
awx/conf/migrations/_rename_setting.py
Normal file
@ -0,0 +1,32 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
import logging
|
||||
from django.utils.timezone import now
|
||||
from django.conf import settings
|
||||
|
||||
logger = logging.getLogger('awx.conf.settings')
|
||||
|
||||
__all__ = ['rename_setting']
|
||||
|
||||
|
||||
def rename_setting(apps, schema_editor, old_key, new_key):
|
||||
|
||||
old_setting = None
|
||||
Setting = apps.get_model('conf', 'Setting')
|
||||
if Setting.objects.filter(key=new_key).exists() or hasattr(settings, new_key):
|
||||
logger.info('Setting ' + new_key + ' unexpectedly exists before this migration, it will be replaced by the value of the ' + old_key + ' setting.')
|
||||
Setting.objects.filter(key=new_key).delete()
|
||||
# Look for db setting, which wouldn't be picked up by SettingsWrapper because the register method is gone
|
||||
if Setting.objects.filter(key=old_key).exists():
|
||||
old_setting = Setting.objects.filter(key=old_key).last().value
|
||||
Setting.objects.filter(key=old_key).delete()
|
||||
# Look for "on-disk" setting (/etc/tower/conf.d)
|
||||
if hasattr(settings, old_key):
|
||||
old_setting = getattr(settings, old_key)
|
||||
if old_setting is not None:
|
||||
Setting.objects.create(key=new_key,
|
||||
value=old_setting,
|
||||
created=now(),
|
||||
modified=now()
|
||||
)
|
||||
|
||||
@ -78,6 +78,14 @@ class Setting(CreatedModifiedModel):
|
||||
def get_cache_id_key(self, key):
|
||||
return '{}_ID'.format(key)
|
||||
|
||||
def display_value(self):
|
||||
if self.key == 'LICENSE' and 'license_key' in self.value:
|
||||
# don't log the license key in activity stream
|
||||
value = self.value.copy()
|
||||
value['license_key'] = '********'
|
||||
return value
|
||||
return self.value
|
||||
|
||||
|
||||
import awx.conf.signals # noqa
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ from django.conf import LazySettings
|
||||
from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db import ProgrammingError, OperationalError
|
||||
from django.db import ProgrammingError, OperationalError, transaction, connection
|
||||
from django.utils.functional import cached_property
|
||||
|
||||
# Django REST Framework
|
||||
@ -61,24 +61,66 @@ __all__ = ['SettingsWrapper', 'get_settings_to_cache', 'SETTING_CACHE_NOTSET']
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _log_database_error():
|
||||
def _ctit_db_wrapper(trans_safe=False):
|
||||
'''
|
||||
Wrapper to avoid undesired actions by Django ORM when managing settings
|
||||
if only getting a setting, can use trans_safe=True, which will avoid
|
||||
throwing errors if the prior context was a broken transaction.
|
||||
Any database errors will be logged, but exception will be suppressed.
|
||||
'''
|
||||
rollback_set = None
|
||||
is_atomic = None
|
||||
try:
|
||||
if trans_safe:
|
||||
is_atomic = connection.in_atomic_block
|
||||
if is_atomic:
|
||||
rollback_set = transaction.get_rollback()
|
||||
if rollback_set:
|
||||
logger.debug('Obtaining database settings in spite of broken transaction.')
|
||||
transaction.set_rollback(False)
|
||||
yield
|
||||
except (ProgrammingError, OperationalError):
|
||||
if 'migrate' in sys.argv and get_tower_migration_version() < '310':
|
||||
logger.info('Using default settings until version 3.1 migration.')
|
||||
else:
|
||||
# Somewhat ugly - craming the full stack trace into the log message
|
||||
# the available exc_info does not give information about the real caller
|
||||
# TODO: replace in favor of stack_info kwarg in python 3
|
||||
sio = StringIO.StringIO()
|
||||
traceback.print_stack(file=sio)
|
||||
sinfo = sio.getvalue()
|
||||
sio.close()
|
||||
sinfo = sinfo.strip('\n')
|
||||
logger.warning('Database settings are not available, using defaults, logged from:\n{}'.format(sinfo))
|
||||
# We want the _full_ traceback with the context
|
||||
# First we get the current call stack, which constitutes the "top",
|
||||
# it has the context up to the point where the context manager is used
|
||||
top_stack = StringIO.StringIO()
|
||||
traceback.print_stack(file=top_stack)
|
||||
top_lines = top_stack.getvalue().strip('\n').split('\n')
|
||||
top_stack.close()
|
||||
# Get "bottom" stack from the local error that happened
|
||||
# inside of the "with" block this wraps
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
bottom_stack = StringIO.StringIO()
|
||||
traceback.print_tb(exc_traceback, file=bottom_stack)
|
||||
bottom_lines = bottom_stack.getvalue().strip('\n').split('\n')
|
||||
# Glue together top and bottom where overlap is found
|
||||
bottom_cutoff = 0
|
||||
for i, line in enumerate(bottom_lines):
|
||||
if line in top_lines:
|
||||
# start of overlapping section, take overlap from bottom
|
||||
top_lines = top_lines[:top_lines.index(line)]
|
||||
bottom_cutoff = i
|
||||
break
|
||||
bottom_lines = bottom_lines[bottom_cutoff:]
|
||||
tb_lines = top_lines + bottom_lines
|
||||
|
||||
tb_string = '\n'.join(
|
||||
['Traceback (most recent call last):'] +
|
||||
tb_lines +
|
||||
['{}: {}'.format(exc_type.__name__, str(exc_value))]
|
||||
)
|
||||
bottom_stack.close()
|
||||
# Log the combined stack
|
||||
if trans_safe:
|
||||
logger.warning('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
else:
|
||||
logger.error('Error modifying something related to database settings.\n{}'.format(tb_string))
|
||||
finally:
|
||||
pass
|
||||
if trans_safe and is_atomic and rollback_set:
|
||||
transaction.set_rollback(rollback_set)
|
||||
|
||||
|
||||
def filter_sensitive(registry, key, value):
|
||||
@ -398,7 +440,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
if name in self.all_supported_settings:
|
||||
with _log_database_error():
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
value = self._get_local(name)
|
||||
if value is not empty:
|
||||
return value
|
||||
@ -430,7 +472,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name in self.all_supported_settings:
|
||||
with _log_database_error():
|
||||
with _ctit_db_wrapper():
|
||||
self._set_local(name, value)
|
||||
else:
|
||||
setattr(self.default_settings, name, value)
|
||||
@ -446,14 +488,14 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name in self.all_supported_settings:
|
||||
with _log_database_error():
|
||||
with _ctit_db_wrapper():
|
||||
self._del_local(name)
|
||||
else:
|
||||
delattr(self.default_settings, name)
|
||||
|
||||
def __dir__(self):
|
||||
keys = []
|
||||
with _log_database_error():
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
for setting in Setting.objects.filter(
|
||||
key__in=self.all_supported_settings, user__isnull=True):
|
||||
# Skip returning settings that have been overridden but are
|
||||
@ -470,7 +512,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
def is_overridden(self, setting):
|
||||
set_locally = False
|
||||
if setting in self.all_supported_settings:
|
||||
with _log_database_error():
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
set_locally = Setting.objects.filter(key=setting, user__isnull=True).exists()
|
||||
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
|
||||
return (set_locally or set_on_default)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -5,6 +5,8 @@
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import six
|
||||
from functools import reduce
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@ -96,8 +98,6 @@ def check_user_access(user, model_class, action, *args, **kwargs):
|
||||
Return True if user can perform action against model_class with the
|
||||
provided parameters.
|
||||
'''
|
||||
if 'write' not in getattr(user, 'oauth_scopes', ['write']) and action != 'read':
|
||||
return False
|
||||
access_class = access_registry[model_class]
|
||||
access_instance = access_class(user)
|
||||
access_method = getattr(access_instance, 'can_%s' % action)
|
||||
@ -217,6 +217,15 @@ class BaseAccess(object):
|
||||
def can_copy(self, obj):
|
||||
return self.can_add({'reference_obj': obj})
|
||||
|
||||
def can_copy_related(self, obj):
|
||||
'''
|
||||
can_copy_related() should only be used to check if the user have access to related
|
||||
many to many credentials in when copying the object. It does not check if the user
|
||||
has permission for any other related objects. Therefore, when checking if the user
|
||||
can copy an object, it should always be used in conjunction with can_add()
|
||||
'''
|
||||
return True
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, data,
|
||||
skip_sub_obj_read_check=False):
|
||||
if skip_sub_obj_read_check:
|
||||
@ -391,21 +400,24 @@ class BaseAccess(object):
|
||||
return user_capabilities
|
||||
|
||||
def get_method_capability(self, method, obj, parent_obj):
|
||||
if method in ['change']: # 3 args
|
||||
return self.can_change(obj, {})
|
||||
elif method in ['delete', 'run_ad_hoc_commands', 'copy']:
|
||||
access_method = getattr(self, "can_%s" % method)
|
||||
return access_method(obj)
|
||||
elif method in ['start']:
|
||||
return self.can_start(obj, validate_license=False)
|
||||
elif method in ['attach', 'unattach']: # parent/sub-object call
|
||||
access_method = getattr(self, "can_%s" % method)
|
||||
if type(parent_obj) == Team:
|
||||
relationship = 'parents'
|
||||
parent_obj = parent_obj.member_role
|
||||
else:
|
||||
relationship = 'members'
|
||||
return access_method(obj, parent_obj, relationship, skip_sub_obj_read_check=True, data={})
|
||||
try:
|
||||
if method in ['change']: # 3 args
|
||||
return self.can_change(obj, {})
|
||||
elif method in ['delete', 'run_ad_hoc_commands', 'copy']:
|
||||
access_method = getattr(self, "can_%s" % method)
|
||||
return access_method(obj)
|
||||
elif method in ['start']:
|
||||
return self.can_start(obj, validate_license=False)
|
||||
elif method in ['attach', 'unattach']: # parent/sub-object call
|
||||
access_method = getattr(self, "can_%s" % method)
|
||||
if type(parent_obj) == Team:
|
||||
relationship = 'parents'
|
||||
parent_obj = parent_obj.member_role
|
||||
else:
|
||||
relationship = 'members'
|
||||
return access_method(obj, parent_obj, relationship, skip_sub_obj_read_check=True, data={})
|
||||
except (ParseError, ObjectDoesNotExist):
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
@ -516,29 +528,28 @@ class UserAccess(BaseAccess):
|
||||
return False
|
||||
return bool(self.user == obj or self.can_admin(obj, data))
|
||||
|
||||
def user_membership_roles(self, u):
|
||||
return Role.objects.filter(
|
||||
content_type=ContentType.objects.get_for_model(Organization),
|
||||
role_field__in=[
|
||||
'admin_role', 'member_role',
|
||||
'execute_role', 'project_admin_role', 'inventory_admin_role',
|
||||
'credential_admin_role', 'workflow_admin_role',
|
||||
'notification_admin_role'
|
||||
],
|
||||
members=u
|
||||
)
|
||||
@staticmethod
|
||||
def user_organizations(u):
|
||||
'''
|
||||
Returns all organizations that count `u` as a member
|
||||
'''
|
||||
return Organization.accessible_objects(u, 'member_role')
|
||||
|
||||
def is_all_org_admin(self, u):
|
||||
return not self.user_membership_roles(u).exclude(
|
||||
ancestors__in=self.user.roles.filter(role_field='admin_role')
|
||||
'''
|
||||
returns True if `u` is member of any organization that is
|
||||
not also an organization that `self.user` admins
|
||||
'''
|
||||
return not self.user_organizations(u).exclude(
|
||||
pk__in=Organization.accessible_pk_qs(self.user, 'admin_role')
|
||||
).exists()
|
||||
|
||||
def user_is_orphaned(self, u):
|
||||
return not self.user_membership_roles(u).exists()
|
||||
return not self.user_organizations(u).exists()
|
||||
|
||||
@check_superuser
|
||||
def can_admin(self, obj, data, allow_orphans=False):
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH:
|
||||
def can_admin(self, obj, data, allow_orphans=False, check_setting=True):
|
||||
if check_setting and (not settings.MANAGE_ORGANIZATION_AUTH):
|
||||
return False
|
||||
if obj.is_superuser or obj.is_system_auditor:
|
||||
# must be superuser to admin users with system roles
|
||||
@ -742,12 +753,13 @@ class InventoryAccess(BaseAccess):
|
||||
# If no data is specified, just checking for generic add permission?
|
||||
if not data:
|
||||
return Organization.accessible_objects(self.user, 'inventory_admin_role').exists()
|
||||
|
||||
return self.check_related('organization', Organization, data, role_field='inventory_admin_role')
|
||||
return (self.check_related('organization', Organization, data, role_field='inventory_admin_role') and
|
||||
self.check_related('insights_credential', Credential, data, role_field='use_role'))
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
return self.can_admin(obj, data)
|
||||
return (self.can_admin(obj, data) and
|
||||
self.check_related('insights_credential', Credential, data, obj=obj, role_field='use_role'))
|
||||
|
||||
@check_superuser
|
||||
def can_admin(self, obj, data):
|
||||
@ -1071,7 +1083,7 @@ class CredentialAccess(BaseAccess):
|
||||
return True
|
||||
if data and data.get('user', None):
|
||||
user_obj = get_object_from_data('user', User, data)
|
||||
return check_user_access(self.user, User, 'change', user_obj, None)
|
||||
return bool(self.user == user_obj or UserAccess(self.user).can_admin(user_obj, None, check_setting=False))
|
||||
if data and data.get('team', None):
|
||||
team_obj = get_object_from_data('team', Team, data)
|
||||
return check_user_access(self.user, Team, 'change', team_obj, None)
|
||||
@ -1114,6 +1126,9 @@ class TeamAccess(BaseAccess):
|
||||
select_related = ('created_by', 'modified_by', 'organization',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and \
|
||||
(self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()):
|
||||
return self.model.objects.all()
|
||||
return self.model.accessible_objects(self.user, 'read_role')
|
||||
|
||||
@check_superuser
|
||||
@ -1197,14 +1212,15 @@ class ProjectAccess(BaseAccess):
|
||||
@check_superuser
|
||||
def can_add(self, data):
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'project_admin_role').exists()
|
||||
return self.check_related('organization', Organization, data, role_field='project_admin_role', mandatory=True)
|
||||
return Organization.accessible_objects(self.user, 'admin_role').exists()
|
||||
return (self.check_related('organization', Organization, data, role_field='project_admin_role', mandatory=True) and
|
||||
self.check_related('credential', Credential, data, role_field='use_role'))
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
if not self.check_related('organization', Organization, data, obj=obj, role_field='project_admin_role'):
|
||||
return False
|
||||
return self.user in obj.admin_role
|
||||
return (self.check_related('organization', Organization, data, obj=obj, role_field='project_admin_role') and
|
||||
self.user in obj.admin_role and
|
||||
self.check_related('credential', Credential, data, obj=obj, role_field='use_role'))
|
||||
|
||||
@check_superuser
|
||||
def can_start(self, obj, validate_license=True):
|
||||
@ -1320,6 +1336,17 @@ class JobTemplateAccess(BaseAccess):
|
||||
return self.user in project.use_role
|
||||
else:
|
||||
return False
|
||||
|
||||
@check_superuser
|
||||
def can_copy_related(self, obj):
|
||||
'''
|
||||
Check if we have access to all the credentials related to Job Templates.
|
||||
Does not verify the user's permission for any other related fields (projects, inventories, etc).
|
||||
'''
|
||||
|
||||
# obj.credentials.all() is accessible ONLY when object is saved (has valid id)
|
||||
credential_manager = getattr(obj, 'credentials', None) if getattr(obj, 'id', False) else Credentials.objects.none()
|
||||
return reduce(lambda prev, cred: prev and self.user in cred.use_role, credential_manager.all(), True)
|
||||
|
||||
def can_start(self, obj, validate_license=True):
|
||||
# Check license.
|
||||
@ -1488,7 +1515,7 @@ class JobAccess(BaseAccess):
|
||||
# Obtain prompts used to start original job
|
||||
JobLaunchConfig = obj._meta.get_field('launch_config').related_model
|
||||
try:
|
||||
config = obj.launch_config
|
||||
config = JobLaunchConfig.objects.prefetch_related('credentials').get(job=obj)
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
config = None
|
||||
|
||||
@ -1496,6 +1523,12 @@ class JobAccess(BaseAccess):
|
||||
if obj.job_template is not None:
|
||||
if config is None:
|
||||
prompts_access = False
|
||||
elif not config.has_user_prompts(obj.job_template):
|
||||
prompts_access = True
|
||||
elif obj.created_by_id != self.user.pk:
|
||||
prompts_access = False
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Job was launched with prompts provided by another user.')
|
||||
else:
|
||||
prompts_access = (
|
||||
JobLaunchConfigAccess(self.user).can_add({'reference_obj': config}) and
|
||||
@ -1507,13 +1540,13 @@ class JobAccess(BaseAccess):
|
||||
elif not jt_access:
|
||||
return False
|
||||
|
||||
org_access = obj.inventory and self.user in obj.inventory.organization.inventory_admin_role
|
||||
org_access = bool(obj.inventory) and self.user in obj.inventory.organization.inventory_admin_role
|
||||
project_access = obj.project is None or self.user in obj.project.admin_role
|
||||
credential_access = all([self.user in cred.use_role for cred in obj.credentials.all()])
|
||||
|
||||
# job can be relaunched if user could make an equivalent JT
|
||||
ret = org_access and credential_access and project_access
|
||||
if not ret and self.save_messages:
|
||||
if not ret and self.save_messages and not self.messages:
|
||||
if not obj.job_template:
|
||||
pretext = _('Job has been orphaned from its job template.')
|
||||
elif config is None:
|
||||
@ -1918,12 +1951,22 @@ class WorkflowJobAccess(BaseAccess):
|
||||
if not wfjt:
|
||||
return False
|
||||
|
||||
# execute permission to WFJT is mandatory for any relaunch
|
||||
if self.user not in wfjt.execute_role:
|
||||
return False
|
||||
# If job was launched by another user, it could have survey passwords
|
||||
if obj.created_by_id != self.user.pk:
|
||||
# Obtain prompts used to start original job
|
||||
JobLaunchConfig = obj._meta.get_field('launch_config').related_model
|
||||
try:
|
||||
config = JobLaunchConfig.objects.get(job=obj)
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
config = None
|
||||
|
||||
# user's WFJT access doesn't guarentee permission to launch, introspect nodes
|
||||
return self.can_recreate(obj)
|
||||
if config is None or config.prompts_dict():
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Job was launched with prompts provided by another user.')
|
||||
return False
|
||||
|
||||
# execute permission to WFJT is mandatory for any relaunch
|
||||
return (self.user in wfjt.execute_role)
|
||||
|
||||
def can_recreate(self, obj):
|
||||
node_qs = obj.workflow_job_nodes.all().prefetch_related('inventory', 'credentials', 'unified_job_template')
|
||||
@ -2342,9 +2385,7 @@ class LabelAccess(BaseAccess):
|
||||
prefetch_related = ('modified_by', 'created_by', 'organization',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
organization__in=Organization.accessible_pk_qs(self.user, 'read_role')
|
||||
)
|
||||
return self.model.objects.all()
|
||||
|
||||
@check_superuser
|
||||
def can_read(self, obj):
|
||||
@ -2531,7 +2572,11 @@ class RoleAccess(BaseAccess):
|
||||
# administrators of that Organization the ability to edit that user. To prevent
|
||||
# unwanted escalations lets ensure that the Organization administartor has the abilty
|
||||
# to admin the user being added to the role.
|
||||
if isinstance(obj.content_object, Organization) and obj.role_field in ['member_role', 'admin_role']:
|
||||
if (isinstance(obj.content_object, Organization) and
|
||||
obj.role_field in (Organization.member_role.field.parent_role + ['member_role'])):
|
||||
if not isinstance(sub_obj, User):
|
||||
logger.error(six.text_type('Unexpected attempt to associate {} with organization role.').format(sub_obj))
|
||||
return False
|
||||
if not UserAccess(self.user).can_admin(sub_obj, None, allow_orphans=True):
|
||||
return False
|
||||
|
||||
|
||||
@ -38,7 +38,8 @@ register(
|
||||
'ORG_ADMINS_CAN_SEE_ALL_USERS',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('All Users Visible to Organization Admins'),
|
||||
help_text=_('Controls whether any Organization Admin can view all users, even those not associated with their Organization.'),
|
||||
help_text=_('Controls whether any Organization Admin can view all users and teams, '
|
||||
'even those not associated with their Organization.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@ -81,7 +82,7 @@ register(
|
||||
help_text=_('HTTP headers and meta keys to search to determine remote host '
|
||||
'name or IP. Add additional items to this list, such as '
|
||||
'"HTTP_X_FORWARDED_FOR", if behind a reverse proxy. '
|
||||
'See the "Proxy Support" section of the Adminstrator guide for'
|
||||
'See the "Proxy Support" section of the Adminstrator guide for '
|
||||
'more details.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
@ -482,10 +483,12 @@ register(
|
||||
register(
|
||||
'LOG_AGGREGATOR_PROTOCOL',
|
||||
field_class=fields.ChoiceField,
|
||||
choices=[('https', 'HTTPS'), ('tcp', 'TCP'), ('udp', 'UDP')],
|
||||
choices=[('https', 'HTTPS/HTTP'), ('tcp', 'TCP'), ('udp', 'UDP')],
|
||||
default='https',
|
||||
label=_('Logging Aggregator Protocol'),
|
||||
help_text=_('Protocol used to communicate with log aggregator.'),
|
||||
help_text=_('Protocol used to communicate with log aggregator. '
|
||||
'HTTPS/HTTP assumes HTTPS unless http:// is explicitly used in '
|
||||
'the Logging Aggregator hostname.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
|
||||
@ -7,7 +7,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
__all__ = [
|
||||
'CLOUD_PROVIDERS', 'SCHEDULEABLE_PROVIDERS', 'PRIVILEGE_ESCALATION_METHODS',
|
||||
'ANSI_SGR_PATTERN', 'CAN_CANCEL', 'ACTIVE_STATES'
|
||||
'ANSI_SGR_PATTERN', 'CAN_CANCEL', 'ACTIVE_STATES', 'STANDARD_INVENTORY_UPDATE_ENV'
|
||||
]
|
||||
|
||||
|
||||
@ -20,6 +20,12 @@ PRIVILEGE_ESCALATION_METHODS = [
|
||||
]
|
||||
CHOICES_PRIVILEGE_ESCALATION_METHODS = [('', _('None'))] + PRIVILEGE_ESCALATION_METHODS
|
||||
ANSI_SGR_PATTERN = re.compile(r'\x1b\[[0-9;]*m')
|
||||
STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
# Failure to parse inventory should always be fatal
|
||||
'ANSIBLE_INVENTORY_UNPARSED_FAILED': 'True',
|
||||
# Always use the --export option for ansible-inventory
|
||||
'ANSIBLE_INVENTORY_EXPORT': 'True'
|
||||
}
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
TOKEN_CENSOR = '************'
|
||||
CENSOR_VALUE = '************'
|
||||
|
||||
@ -4,10 +4,12 @@ import logging
|
||||
from channels import Group
|
||||
from channels.auth import channel_session_user_from_http, channel_session_user
|
||||
|
||||
from django.http.cookie import parse_cookie
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.consumers')
|
||||
XRF_KEY = '_auth_user_xrf'
|
||||
|
||||
|
||||
def discard_groups(message):
|
||||
@ -18,12 +20,20 @@ def discard_groups(message):
|
||||
|
||||
@channel_session_user_from_http
|
||||
def ws_connect(message):
|
||||
headers = dict(message.content.get('headers', ''))
|
||||
message.reply_channel.send({"accept": True})
|
||||
message.content['method'] = 'FAKE'
|
||||
if message.user.is_authenticated():
|
||||
message.reply_channel.send(
|
||||
{"text": json.dumps({"accept": True, "user": message.user.id})}
|
||||
)
|
||||
# store the valid CSRF token from the cookie so we can compare it later
|
||||
# on ws_receive
|
||||
cookie_token = parse_cookie(
|
||||
headers.get('cookie')
|
||||
).get('csrftoken')
|
||||
if cookie_token:
|
||||
message.channel_session[XRF_KEY] = cookie_token
|
||||
else:
|
||||
logger.error("Request user is not authenticated to use websocket.")
|
||||
message.reply_channel.send({"close": True})
|
||||
@ -42,6 +52,20 @@ def ws_receive(message):
|
||||
raw_data = message.content['text']
|
||||
data = json.loads(raw_data)
|
||||
|
||||
xrftoken = data.get('xrftoken')
|
||||
if (
|
||||
not xrftoken or
|
||||
XRF_KEY not in message.channel_session or
|
||||
xrftoken != message.channel_session[XRF_KEY]
|
||||
):
|
||||
logger.error(
|
||||
"access denied to channel, XRF mismatch for {}".format(user.username)
|
||||
)
|
||||
message.reply_channel.send({
|
||||
"text": json.dumps({"error": "access denied to channel"})
|
||||
})
|
||||
return
|
||||
|
||||
if 'groups' in data:
|
||||
discard_groups(message)
|
||||
groups = data['groups']
|
||||
|
||||
@ -318,7 +318,7 @@ class IsolatedManager(object):
|
||||
|
||||
path = self.path_to('artifacts', 'stdout')
|
||||
if os.path.exists(path):
|
||||
with codecs.open(path, 'r', encoding='utf-8') as f:
|
||||
with open(path, 'r') as f:
|
||||
f.seek(seek)
|
||||
for line in f:
|
||||
self.stdout_handle.write(line)
|
||||
@ -434,6 +434,7 @@ class IsolatedManager(object):
|
||||
task_result = {}
|
||||
if 'capacity_cpu' in task_result and 'capacity_mem' in task_result:
|
||||
cls.update_capacity(instance, task_result, awx_application_version)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
@ -468,13 +469,11 @@ class IsolatedManager(object):
|
||||
|
||||
return OutputEventFilter(job_event_callback)
|
||||
|
||||
def run(self, instance, host, private_data_dir, proot_temp_dir):
|
||||
def run(self, instance, private_data_dir, proot_temp_dir):
|
||||
"""
|
||||
Run a job on an isolated host.
|
||||
|
||||
:param instance: a `model.Job` instance
|
||||
:param host: the hostname (or IP address) to run the
|
||||
isolated job on
|
||||
:param private_data_dir: an absolute path on the local file system
|
||||
where job-specific data should be written
|
||||
(i.e., `/tmp/ansible_awx_xyz/`)
|
||||
@ -486,14 +485,11 @@ class IsolatedManager(object):
|
||||
`ansible-playbook` run.
|
||||
"""
|
||||
self.instance = instance
|
||||
self.host = host
|
||||
self.host = instance.execution_node
|
||||
self.private_data_dir = private_data_dir
|
||||
self.proot_temp_dir = proot_temp_dir
|
||||
status, rc = self.dispatch()
|
||||
if status == 'successful':
|
||||
status, rc = self.check()
|
||||
else:
|
||||
# If dispatch fails, attempt to consume artifacts that *might* exist
|
||||
self.check()
|
||||
self.cleanup()
|
||||
return status, rc
|
||||
|
||||
@ -4,7 +4,7 @@ import argparse
|
||||
import base64
|
||||
import codecs
|
||||
import collections
|
||||
import cStringIO
|
||||
import StringIO
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
@ -18,6 +18,7 @@ import time
|
||||
|
||||
import pexpect
|
||||
import psutil
|
||||
import six
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.utils.expect')
|
||||
@ -99,6 +100,12 @@ def run_pexpect(args, cwd, env, logfile,
|
||||
password_patterns = expect_passwords.keys()
|
||||
password_values = expect_passwords.values()
|
||||
|
||||
# pexpect needs all env vars to be utf-8 encoded strings
|
||||
# https://github.com/pexpect/pexpect/issues/512
|
||||
for k, v in env.items():
|
||||
if isinstance(v, six.text_type):
|
||||
env[k] = v.encode('utf-8')
|
||||
|
||||
child = pexpect.spawn(
|
||||
args[0], args[1:], cwd=cwd, env=env, ignore_sighup=True,
|
||||
encoding='utf-8', echo=False, use_poll=True
|
||||
@ -240,7 +247,7 @@ def handle_termination(pid, args, proot_cmd, is_cancel=True):
|
||||
|
||||
|
||||
def __run__(private_data_dir):
|
||||
buff = cStringIO.StringIO()
|
||||
buff = StringIO.StringIO()
|
||||
with open(os.path.join(private_data_dir, 'env'), 'r') as f:
|
||||
for line in f:
|
||||
buff.write(line)
|
||||
|
||||
@ -218,6 +218,7 @@ class ImplicitRoleField(models.ForeignKey):
|
||||
kwargs.setdefault('to', 'Role')
|
||||
kwargs.setdefault('related_name', '+')
|
||||
kwargs.setdefault('null', 'True')
|
||||
kwargs.setdefault('editable', False)
|
||||
super(ImplicitRoleField, self).__init__(*args, **kwargs)
|
||||
|
||||
def deconstruct(self):
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from crum import impersonate
|
||||
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate
|
||||
from awx.main.signals import disable_computed_fields
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@ -22,33 +23,34 @@ class Command(BaseCommand):
|
||||
except IndexError:
|
||||
superuser = None
|
||||
with impersonate(superuser):
|
||||
o = Organization.objects.create(name='Default')
|
||||
p = Project(name='Demo Project',
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible-tower-samples',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=0,
|
||||
organization=o)
|
||||
p.save(skip_update=True)
|
||||
ssh_type = CredentialType.from_v1_kind('ssh')
|
||||
c = Credential.objects.create(credential_type=ssh_type,
|
||||
name='Demo Credential',
|
||||
inputs={
|
||||
'username': superuser.username
|
||||
},
|
||||
created_by=superuser)
|
||||
c.admin_role.members.add(superuser)
|
||||
i = Inventory.objects.create(name='Demo Inventory',
|
||||
organization=o,
|
||||
created_by=superuser)
|
||||
Host.objects.create(name='localhost',
|
||||
inventory=i,
|
||||
variables="ansible_connection: local",
|
||||
created_by=superuser)
|
||||
jt = JobTemplate.objects.create(name='Demo Job Template',
|
||||
playbook='hello_world.yml',
|
||||
project=p,
|
||||
inventory=i)
|
||||
jt.credentials.add(c)
|
||||
with disable_computed_fields():
|
||||
o = Organization.objects.create(name='Default')
|
||||
p = Project(name='Demo Project',
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible-tower-samples',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=0,
|
||||
organization=o)
|
||||
p.save(skip_update=True)
|
||||
ssh_type = CredentialType.from_v1_kind('ssh')
|
||||
c = Credential.objects.create(credential_type=ssh_type,
|
||||
name='Demo Credential',
|
||||
inputs={
|
||||
'username': superuser.username
|
||||
},
|
||||
created_by=superuser)
|
||||
c.admin_role.members.add(superuser)
|
||||
i = Inventory.objects.create(name='Demo Inventory',
|
||||
organization=o,
|
||||
created_by=superuser)
|
||||
Host.objects.create(name='localhost',
|
||||
inventory=i,
|
||||
variables="ansible_connection: local",
|
||||
created_by=superuser)
|
||||
jt = JobTemplate.objects.create(name='Demo Job Template',
|
||||
playbook='hello_world.yml',
|
||||
project=p,
|
||||
inventory=i)
|
||||
jt.credentials.add(c)
|
||||
print('Default organization added.')
|
||||
print('Demo Credential, Inventory, and Job Template added.')
|
||||
|
||||
37
awx/main/management/commands/expire_sessions.py
Normal file
37
awx/main/management/commands/expire_sessions.py
Normal file
@ -0,0 +1,37 @@
|
||||
# Python
|
||||
from importlib import import_module
|
||||
|
||||
# Django
|
||||
from django.utils import timezone
|
||||
from django.conf import settings
|
||||
from django.contrib.auth import logout
|
||||
from django.http import HttpRequest
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.contrib.auth.models import User
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Expire Django auth sessions for a user/all users"""
|
||||
help='Expire Django auth sessions. Will expire all auth sessions if --user option is not supplied.'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--user', dest='user', type=str)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
# Try to see if the user exist
|
||||
try:
|
||||
user = User.objects.get(username=options['user']) if options['user'] else None
|
||||
except ObjectDoesNotExist:
|
||||
raise CommandError('The user does not exist.')
|
||||
# We use the following hack to filter out sessions that are still active,
|
||||
# with consideration for timezones.
|
||||
start = timezone.now()
|
||||
sessions = Session.objects.filter(expire_date__gte=start).iterator()
|
||||
request = HttpRequest()
|
||||
for session in sessions:
|
||||
user_id = session.get_decoded().get('_auth_user_id')
|
||||
if (user is None) or (user_id and user.id == int(user_id)):
|
||||
request.session = import_module(settings.SESSION_ENGINE).SessionStore(session.session_key)
|
||||
logout(request)
|
||||
@ -30,6 +30,7 @@ from awx.main.utils import (
|
||||
)
|
||||
from awx.main.utils.mem_inventory import MemInventory, dict_to_mem_data
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.constants import STANDARD_INVENTORY_UPDATE_ENV
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.inventory_import')
|
||||
|
||||
@ -82,7 +83,10 @@ class AnsibleInventoryLoader(object):
|
||||
env = dict(os.environ.items())
|
||||
env['VIRTUAL_ENV'] = settings.ANSIBLE_VENV_PATH
|
||||
env['PATH'] = os.path.join(settings.ANSIBLE_VENV_PATH, "bin") + ":" + env['PATH']
|
||||
env['ANSIBLE_INVENTORY_UNPARSED_FAILED'] = '1'
|
||||
# Set configuration items that should always be used for updates
|
||||
for key, value in STANDARD_INVENTORY_UPDATE_ENV.items():
|
||||
if key not in env:
|
||||
env[key] = value
|
||||
venv_libdir = os.path.join(settings.ANSIBLE_VENV_PATH, "lib")
|
||||
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
|
||||
if os.path.isdir(os.path.join(venv_libdir, "python2.7")):
|
||||
@ -1001,37 +1005,43 @@ class Command(BaseCommand):
|
||||
self.all_group.debug_tree()
|
||||
|
||||
with batch_role_ancestor_rebuilding():
|
||||
# Ensure that this is managed as an atomic SQL transaction,
|
||||
# and thus properly rolled back if there is an issue.
|
||||
with transaction.atomic():
|
||||
# Merge/overwrite inventory into database.
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('loading into database...')
|
||||
with ignore_inventory_computed_fields():
|
||||
if getattr(settings, 'ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC', True):
|
||||
self.load_into_database()
|
||||
else:
|
||||
with disable_activity_stream():
|
||||
# If using with transaction.atomic() with try ... catch,
|
||||
# with transaction.atomic() must be inside the try section of the code as per Django docs
|
||||
try:
|
||||
# Ensure that this is managed as an atomic SQL transaction,
|
||||
# and thus properly rolled back if there is an issue.
|
||||
with transaction.atomic():
|
||||
# Merge/overwrite inventory into database.
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('loading into database...')
|
||||
with ignore_inventory_computed_fields():
|
||||
if getattr(settings, 'ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC', True):
|
||||
self.load_into_database()
|
||||
if settings.SQL_DEBUG:
|
||||
queries_before2 = len(connection.queries)
|
||||
self.inventory.update_computed_fields()
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('update computed fields took %d queries',
|
||||
len(connection.queries) - queries_before2)
|
||||
try:
|
||||
else:
|
||||
with disable_activity_stream():
|
||||
self.load_into_database()
|
||||
if settings.SQL_DEBUG:
|
||||
queries_before2 = len(connection.queries)
|
||||
self.inventory.update_computed_fields()
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('update computed fields took %d queries',
|
||||
len(connection.queries) - queries_before2)
|
||||
# Check if the license is valid.
|
||||
# If the license is not valid, a CommandError will be thrown,
|
||||
# and inventory update will be marked as invalid.
|
||||
# with transaction.atomic() will roll back the changes.
|
||||
self.check_license()
|
||||
except CommandError as e:
|
||||
self.mark_license_failure(save=True)
|
||||
raise e
|
||||
except CommandError as e:
|
||||
self.mark_license_failure()
|
||||
raise e
|
||||
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('Inventory import completed for %s in %0.1fs',
|
||||
self.inventory_source.name, time.time() - begin)
|
||||
else:
|
||||
logger.info('Inventory import completed for %s in %0.1fs',
|
||||
self.inventory_source.name, time.time() - begin)
|
||||
status = 'successful'
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('Inventory import completed for %s in %0.1fs',
|
||||
self.inventory_source.name, time.time() - begin)
|
||||
else:
|
||||
logger.info('Inventory import completed for %s in %0.1fs',
|
||||
self.inventory_source.name, time.time() - begin)
|
||||
status = 'successful'
|
||||
|
||||
# If we're in debug mode, then log the queries and time
|
||||
# used to do the operation.
|
||||
@ -1058,6 +1068,8 @@ class Command(BaseCommand):
|
||||
self.inventory_update.result_traceback = tb
|
||||
self.inventory_update.status = status
|
||||
self.inventory_update.save(update_fields=['status', 'result_traceback'])
|
||||
self.inventory_source.status = status
|
||||
self.inventory_source.save(update_fields=['status'])
|
||||
|
||||
if exc and isinstance(exc, CommandError):
|
||||
sys.exit(1)
|
||||
|
||||
@ -22,7 +22,7 @@ class Command(BaseCommand):
|
||||
parser.add_argument('--queuename', dest='queuename', type=lambda s: six.text_type(s, 'utf8'),
|
||||
help='Queue to create/update')
|
||||
parser.add_argument('--hostnames', dest='hostnames', type=lambda s: six.text_type(s, 'utf8'),
|
||||
help='Comma-Delimited Hosts to add to the Queue')
|
||||
help='Comma-Delimited Hosts to add to the Queue (will not remove already assigned instances)')
|
||||
parser.add_argument('--controller', dest='controller', type=lambda s: six.text_type(s, 'utf8'),
|
||||
default='', help='The controlling group (makes this an isolated group)')
|
||||
parser.add_argument('--instance_percent', dest='instance_percent', type=int, default=0,
|
||||
@ -44,6 +44,9 @@ class Command(BaseCommand):
|
||||
ig.policy_instance_minimum = instance_min
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
ig.save()
|
||||
|
||||
return (ig, created, changed)
|
||||
|
||||
def update_instance_group_controller(self, ig, controller):
|
||||
@ -72,16 +75,16 @@ class Command(BaseCommand):
|
||||
else:
|
||||
raise InstanceNotFound(six.text_type("Instance does not exist: {}").format(inst_name), changed)
|
||||
|
||||
ig.instances = instances
|
||||
ig.instances.add(*instances)
|
||||
|
||||
instance_list_before = set(ig.policy_instance_list)
|
||||
instance_list_after = set(instance_list_unique)
|
||||
if len(instance_list_before) != len(instance_list_after) or \
|
||||
len(set(instance_list_before) - set(instance_list_after)) != 0:
|
||||
instance_list_before = ig.policy_instance_list
|
||||
instance_list_after = instance_list_unique
|
||||
new_instances = set(instance_list_after) - set(instance_list_before)
|
||||
if new_instances:
|
||||
changed = True
|
||||
ig.policy_instance_list = ig.policy_instance_list + list(new_instances)
|
||||
ig.save()
|
||||
|
||||
ig.policy_instance_list = list(instance_list_unique)
|
||||
ig.save()
|
||||
return (instances, changed)
|
||||
|
||||
def handle(self, **options):
|
||||
@ -97,25 +100,27 @@ class Command(BaseCommand):
|
||||
hostname_list = options.get('hostnames').split(",")
|
||||
|
||||
with advisory_lock(six.text_type('instance_group_registration_{}').format(queuename)):
|
||||
(ig, created, changed) = self.get_create_update_instance_group(queuename, inst_per, inst_min)
|
||||
changed2 = False
|
||||
changed3 = False
|
||||
(ig, created, changed1) = self.get_create_update_instance_group(queuename, inst_per, inst_min)
|
||||
if created:
|
||||
print(six.text_type("Creating instance group {}".format(ig.name)))
|
||||
elif not created:
|
||||
print(six.text_type("Instance Group already registered {}").format(ig.name))
|
||||
|
||||
if ctrl:
|
||||
(ig_ctrl, changed) = self.update_instance_group_controller(ig, ctrl)
|
||||
if changed:
|
||||
(ig_ctrl, changed2) = self.update_instance_group_controller(ig, ctrl)
|
||||
if changed2:
|
||||
print(six.text_type("Set controller group {} on {}.").format(ctrl, queuename))
|
||||
|
||||
try:
|
||||
(instances, changed) = self.add_instances_to_group(ig, hostname_list)
|
||||
(instances, changed3) = self.add_instances_to_group(ig, hostname_list)
|
||||
for i in instances:
|
||||
print(six.text_type("Added instance {} to {}").format(i.hostname, ig.name))
|
||||
except InstanceNotFound as e:
|
||||
instance_not_found_err = e
|
||||
|
||||
if changed:
|
||||
if any([changed1, changed2, changed3]):
|
||||
print('(changed: True)')
|
||||
|
||||
if instance_not_found_err:
|
||||
|
||||
@ -162,6 +162,7 @@ class CallbackBrokerWorker(ConsumerMixin):
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
@ -169,7 +170,7 @@ class CallbackBrokerWorker(ConsumerMixin):
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification(
|
||||
'jobs-summary',
|
||||
dict(group_name='jobs', unified_job_id=job_identifier)
|
||||
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
|
||||
)
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
|
||||
@ -3,7 +3,6 @@ import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from optparse import make_option
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
@ -15,10 +14,9 @@ class Command(BaseCommand):
|
||||
"""Tests SSH connectivity between a controller and target isolated node"""
|
||||
help = 'Tests SSH connectivity between a controller and target isolated node'
|
||||
|
||||
option_list = BaseCommand.option_list + (
|
||||
make_option('--hostname', dest='hostname', type='string',
|
||||
help='Hostname of an isolated node'),
|
||||
)
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str,
|
||||
help='Hostname of an isolated node')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
hostname = options.get('hostname')
|
||||
@ -30,7 +28,7 @@ class Command(BaseCommand):
|
||||
args = [
|
||||
'ansible', 'all', '-i', '{},'.format(hostname), '-u',
|
||||
settings.AWX_ISOLATED_USERNAME, '-T5', '-m', 'shell',
|
||||
'-a', 'hostname', '-vvv'
|
||||
'-a', 'awx-expect -h', '-vvv'
|
||||
]
|
||||
if all([
|
||||
getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True,
|
||||
|
||||
66
awx/main/management/commands/watch_celery.py
Normal file
66
awx/main/management/commands/watch_celery.py
Normal file
@ -0,0 +1,66 @@
|
||||
import datetime
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from celery import Celery
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Watch local celery workers"""
|
||||
help=("Sends a periodic ping to the local celery process over AMQP to ensure "
|
||||
"it's responsive; this command is only intended to run in an environment "
|
||||
"where celeryd is running")
|
||||
|
||||
#
|
||||
# Just because celery is _running_ doesn't mean it's _working_; it's
|
||||
# imperative that celery workers are _actually_ handling AMQP messages on
|
||||
# their appropriate queues for awx to function. Unfortunately, we've been
|
||||
# plagued by a variety of bugs in celery that cause it to hang and become
|
||||
# an unresponsive zombie, such as:
|
||||
#
|
||||
# https://github.com/celery/celery/issues/4185
|
||||
# https://github.com/celery/celery/issues/4457
|
||||
#
|
||||
# The goal of this code is periodically send a broadcast AMQP message to
|
||||
# the celery process on the local host via celery.app.control.ping;
|
||||
# If that _fails_, we attempt to determine the pid of the celery process
|
||||
# and send SIGHUP (which tends to resolve these sorts of issues for us).
|
||||
#
|
||||
|
||||
INTERVAL = 60
|
||||
|
||||
def _log(self, msg):
|
||||
sys.stderr.write(datetime.datetime.utcnow().isoformat())
|
||||
sys.stderr.write(' ')
|
||||
sys.stderr.write(msg)
|
||||
sys.stderr.write('\n')
|
||||
|
||||
def handle(self, **options):
|
||||
app = Celery('awx')
|
||||
app.config_from_object('django.conf:settings')
|
||||
while True:
|
||||
try:
|
||||
pongs = app.control.ping(['celery@{}'.format(settings.CLUSTER_HOST_ID)], timeout=30)
|
||||
except Exception:
|
||||
pongs = []
|
||||
if not pongs:
|
||||
self._log('celery is not responsive to ping over local AMQP')
|
||||
pid = self.getpid()
|
||||
if pid:
|
||||
self._log('sending SIGHUP to {}'.format(pid))
|
||||
os.kill(pid, signal.SIGHUP)
|
||||
time.sleep(self.INTERVAL)
|
||||
|
||||
def getpid(self):
|
||||
cmd = 'supervisorctl pid tower-processes:awx-celeryd'
|
||||
if os.path.exists('/supervisor_task.conf'):
|
||||
cmd = 'supervisorctl -c /supervisor_task.conf pid tower-processes:celery'
|
||||
try:
|
||||
return int(subprocess.check_output(cmd, shell=True))
|
||||
except Exception:
|
||||
self._log('could not detect celery pid')
|
||||
@ -96,7 +96,7 @@ class InstanceManager(models.Manager):
|
||||
instance = self.filter(hostname=hostname)
|
||||
if instance.exists():
|
||||
return (False, instance[0])
|
||||
instance = self.create(uuid=uuid, hostname=hostname)
|
||||
instance = self.create(uuid=uuid, hostname=hostname, capacity=0)
|
||||
return (True, instance)
|
||||
|
||||
def get_or_register(self):
|
||||
|
||||
@ -119,6 +119,20 @@ class ActivityStreamMiddleware(threading.local):
|
||||
self.instance_ids.append(instance.id)
|
||||
|
||||
|
||||
class SessionTimeoutMiddleware(object):
|
||||
"""
|
||||
Resets the session timeout for both the UI and the actual session for the API
|
||||
to the value of SESSION_COOKIE_AGE on every request if there is a valid session.
|
||||
"""
|
||||
|
||||
def process_response(self, request, response):
|
||||
req_session = getattr(request, 'session', None)
|
||||
if req_session and not req_session.is_empty():
|
||||
request.session.set_expiry(request.session.get_expiry_age())
|
||||
response['Session-Timeout'] = int(settings.SESSION_COOKIE_AGE)
|
||||
return response
|
||||
|
||||
|
||||
def _customize_graph():
|
||||
from awx.main.models import Instance, Schedule, UnifiedJobTemplate
|
||||
for model in [Schedule, UnifiedJobTemplate]:
|
||||
|
||||
@ -484,7 +484,7 @@ class Migration(migrations.Migration):
|
||||
migrations.AddField(
|
||||
model_name='instance',
|
||||
name='last_isolated_check',
|
||||
field=models.DateTimeField(auto_now_add=True, null=True),
|
||||
field=models.DateTimeField(editable=False, null=True),
|
||||
),
|
||||
# Migrations that don't change db schema but simply to make Django ORM happy.
|
||||
# e.g. Choice updates, help_text updates, etc.
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# AWX
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
from awx.main.migrations import _credentialtypes as credentialtypes
|
||||
|
||||
from django.db import migrations, models
|
||||
@ -14,6 +15,7 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(credentialtypes.create_rhv_tower_credtype),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# AWX
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
from awx.main.migrations import _credentialtypes as credentialtypes
|
||||
|
||||
from django.db import migrations
|
||||
@ -14,5 +15,6 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(credentialtypes.add_azure_cloud_environment_field),
|
||||
]
|
||||
|
||||
@ -5,7 +5,7 @@ from django.db import migrations, models
|
||||
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
from awx.main.migrations import _credentialtypes as credentialtypes
|
||||
from awx.main.migrations._multi_cred import migrate_to_multi_cred
|
||||
from awx.main.migrations._multi_cred import migrate_to_multi_cred, migrate_back_from_multi_cred
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
@ -13,6 +13,13 @@ class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0012_v322_update_cred_types'),
|
||||
]
|
||||
run_before = [
|
||||
# Django-vendored migrations will make reference to settings
|
||||
# this migration was introduced in Django 1.11 / Tower 3.3 upgrade
|
||||
# migration main-0009 changed the setting model and is not backward compatible,
|
||||
# so we assure that at least all of Tower 3.2 migrations are finished before running it
|
||||
('auth', '0008_alter_user_username_max_length')
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
@ -25,8 +32,8 @@ class Migration(migrations.Migration):
|
||||
name='credentials',
|
||||
field=models.ManyToManyField(related_name='unifiedjobtemplates', to='main.Credential'),
|
||||
),
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(migrate_to_multi_cred),
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations, migrate_back_from_multi_cred),
|
||||
migrations.RunPython(migrate_to_multi_cred, migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RemoveField(
|
||||
model_name='job',
|
||||
name='credential',
|
||||
@ -51,5 +58,6 @@ class Migration(migrations.Migration):
|
||||
model_name='jobtemplate',
|
||||
name='vault_credential',
|
||||
),
|
||||
migrations.RunPython(credentialtypes.add_vault_id_field)
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations, credentialtypes.remove_vault_id_field),
|
||||
migrations.RunPython(credentialtypes.add_vault_id_field, migration_utils.set_current_apps_for_migrations)
|
||||
]
|
||||
|
||||
@ -20,6 +20,11 @@ class Migration(migrations.Migration):
|
||||
name='execute_role',
|
||||
field=awx.main.fields.ImplicitRoleField(null=b'True', on_delete=django.db.models.deletion.CASCADE, parent_role=b'admin_role', related_name='+', to='main.Role'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='job_template_admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null=b'True', on_delete=django.db.models.deletion.CASCADE, parent_role=b'admin_role', related_name='+', to='main.Role'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='credential_admin_role',
|
||||
@ -73,7 +78,7 @@ class Migration(migrations.Migration):
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(null=b'True', on_delete=django.db.models.deletion.CASCADE, parent_role=[b'project.organization.project_admin_role', b'inventory.organization.inventory_admin_role'], related_name='+', to='main.Role'),
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null=b'True', on_delete=django.db.models.deletion.CASCADE, parent_role=[b'project.organization.job_template_admin_role', b'inventory.organization.job_template_admin_role'], related_name='+', to='main.Role'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
@ -83,6 +88,7 @@ class Migration(migrations.Migration):
|
||||
migrations.AlterField(
|
||||
model_name='organization',
|
||||
name='member_role',
|
||||
field=awx.main.fields.ImplicitRoleField(null=b'True', on_delete=django.db.models.deletion.CASCADE, parent_role=[b'admin_role', b'project_admin_role', b'inventory_admin_role', b'workflow_admin_role', b'notification_admin_role', b'credential_admin_role', b'execute_role'], related_name='+', to='main.Role'),
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null=b'True', on_delete=django.db.models.deletion.CASCADE, parent_role=[b'admin_role', b'execute_role', b'project_admin_role', b'inventory_admin_role', b'workflow_admin_role', b'notification_admin_role', b'credential_admin_role', b'job_template_admin_role'], related_name='+', to='main.Role'),
|
||||
),
|
||||
|
||||
]
|
||||
|
||||
@ -19,8 +19,8 @@ class Migration(migrations.Migration):
|
||||
|
||||
operations = [
|
||||
# Run data migration before removing the old credential field
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations, migrations.RunPython.noop),
|
||||
migrations.RunPython(migrate_inventory_source_cred, migrate_inventory_source_cred_reverse),
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations, migrate_inventory_source_cred_reverse),
|
||||
migrations.RunPython(migrate_inventory_source_cred, migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RemoveField(
|
||||
model_name='inventorysource',
|
||||
name='credential',
|
||||
|
||||
@ -13,6 +13,12 @@ class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0024_v330_create_user_session_membership'),
|
||||
]
|
||||
run_before = [
|
||||
# As of this migration, OAuth2Application and OAuth2AccessToken are models in main app
|
||||
# Grant and RefreshToken models are still in the oauth2_provider app and reference
|
||||
# the app and token models, so these must be created before the oauth2_provider models
|
||||
('oauth2_provider', '0001_initial')
|
||||
]
|
||||
|
||||
operations = [
|
||||
|
||||
|
||||
@ -20,4 +20,8 @@ class Migration(migrations.Migration):
|
||||
name='organization',
|
||||
field=models.ForeignKey(help_text='Organization containing this application.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='main.Organization'),
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='oauth2application',
|
||||
unique_together=set([('name', 'organization')]),
|
||||
),
|
||||
]
|
||||
|
||||
@ -20,7 +20,7 @@ class Migration(migrations.Migration):
|
||||
migrations.AlterField(
|
||||
model_name='oauth2accesstoken',
|
||||
name='scope',
|
||||
field=models.TextField(blank=True, help_text="Allowed scopes, further restricts user's permissions."),
|
||||
field=models.TextField(blank=True, default=b'write', help_text="Allowed scopes, further restricts user's permissions."),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='oauth2accesstoken',
|
||||
|
||||
@ -0,0 +1,24 @@
|
||||
#d -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-05-21 19:51
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import awx.main.fields
|
||||
import awx.main.models.activity_stream
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0037_v330_remove_legacy_fact_cleanup'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='activitystream',
|
||||
name='deleted_actor',
|
||||
field=awx.main.fields.JSONField(null=True),
|
||||
),
|
||||
]
|
||||
33
awx/main/migrations/0039_v330_custom_venv_help_text.py
Normal file
33
awx/main/migrations/0039_v330_custom_venv_help_text.py
Normal file
@ -0,0 +1,33 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-05-23 20:17
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import awx.main.fields
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0038_v330_add_deleted_activitystream_actor'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='custom_virtualenv',
|
||||
field=models.CharField(blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='organization',
|
||||
name='custom_virtualenv',
|
||||
field=models.CharField(blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='project',
|
||||
name='custom_virtualenv',
|
||||
field=models.CharField(blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True),
|
||||
),
|
||||
]
|
||||
20
awx/main/migrations/0040_v330_unifiedjob_controller_node.py
Normal file
20
awx/main/migrations/0040_v330_unifiedjob_controller_node.py
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-05-25 18:58
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0039_v330_custom_venv_help_text'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='controller_node',
|
||||
field=models.TextField(blank=True, default=b'', editable=False, help_text='The instance that managed the isolated execution environment.'),
|
||||
),
|
||||
]
|
||||
23
awx/main/migrations/0041_v330_update_oauth_refreshtoken.py
Normal file
23
awx/main/migrations/0041_v330_update_oauth_refreshtoken.py
Normal file
@ -0,0 +1,23 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-06-14 21:03
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
migrations.swappable_dependency(settings.OAUTH2_PROVIDER_REFRESH_TOKEN_MODEL),
|
||||
('main', '0040_v330_unifiedjob_controller_node'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='oauth2accesstoken',
|
||||
name='source_refresh_token',
|
||||
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='refreshed_access_token', to=settings.OAUTH2_PROVIDER_REFRESH_TOKEN_MODEL),
|
||||
),
|
||||
]
|
||||
29
awx/main/migrations/0042_v330_org_member_role_deparent.py
Normal file
29
awx/main/migrations/0042_v330_org_member_role_deparent.py
Normal file
@ -0,0 +1,29 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-07-02 13:47
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations
|
||||
import django.db.models.deletion
|
||||
from awx.main.migrations._rbac import rebuild_role_hierarchy
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0041_v330_update_oauth_refreshtoken'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='organization',
|
||||
name='member_role',
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null=b'True', on_delete=django.db.models.deletion.CASCADE, parent_role=[b'admin_role'], related_name='+', to='main.Role'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='organization',
|
||||
name='read_role',
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null=b'True', on_delete=django.db.models.deletion.CASCADE, parent_role=[b'member_role', b'auditor_role', b'execute_role', b'project_admin_role', b'inventory_admin_role', b'workflow_admin_role', b'notification_admin_role', b'credential_admin_role', b'job_template_admin_role'], related_name='+', to='main.Role'),
|
||||
),
|
||||
migrations.RunPython(rebuild_role_hierarchy),
|
||||
]
|
||||
20
awx/main/migrations/0043_v330_oauth2accesstoken_modified.py
Normal file
20
awx/main/migrations/0043_v330_oauth2accesstoken_modified.py
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-07-10 14:02
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0042_v330_org_member_role_deparent'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='oauth2accesstoken',
|
||||
name='modified',
|
||||
field=models.DateTimeField(editable=False),
|
||||
),
|
||||
]
|
||||
@ -0,0 +1,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-07-17 03:57
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0043_v330_oauth2accesstoken_modified'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='inventoryupdate',
|
||||
name='inventory',
|
||||
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='inventory_updates', to='main.Inventory'),
|
||||
),
|
||||
]
|
||||
20
awx/main/migrations/0045_v330_instance_managed_by_policy.py
Normal file
20
awx/main/migrations/0045_v330_instance_managed_by_policy.py
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-07-25 17:42
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0044_v330_add_inventory_update_inventory'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='instance',
|
||||
name='managed_by_policy',
|
||||
field=models.BooleanField(default=True),
|
||||
)
|
||||
]
|
||||
@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-07-25 21:24
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0045_v330_instance_managed_by_policy'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='oauth2application',
|
||||
name='authorization_grant_type',
|
||||
field=models.CharField(choices=[(b'authorization-code', 'Authorization code'), (b'implicit', 'Implicit'), (b'password', 'Resource owner password-based')], help_text='The Grant type the user must use for acquire tokens for this application.', max_length=32),
|
||||
),
|
||||
]
|
||||
20
awx/main/migrations/0047_v330_activitystream_instance.py
Normal file
20
awx/main/migrations/0047_v330_activitystream_instance.py
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-07-25 20:19
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0046_v330_remove_client_credentials_grant'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='activitystream',
|
||||
name='instance',
|
||||
field=models.ManyToManyField(blank=True, to='main.Instance'),
|
||||
),
|
||||
]
|
||||
@ -180,6 +180,17 @@ def add_vault_id_field(apps, schema_editor):
|
||||
vault_credtype.save()
|
||||
|
||||
|
||||
def remove_vault_id_field(apps, schema_editor):
|
||||
vault_credtype = CredentialType.objects.get(kind='vault')
|
||||
idx = 0
|
||||
for i, input in enumerate(vault_credtype.inputs['fields']):
|
||||
if input['id'] == 'vault_id':
|
||||
idx = i
|
||||
break
|
||||
vault_credtype.inputs['fields'].pop(idx)
|
||||
vault_credtype.save()
|
||||
|
||||
|
||||
def create_rhv_tower_credtype(apps, schema_editor):
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
|
||||
|
||||
@ -1,56 +1,124 @@
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
def migrate_to_multi_cred(app, schema_editor):
|
||||
Job = app.get_model('main', 'Job')
|
||||
JobTemplate = app.get_model('main', 'JobTemplate')
|
||||
|
||||
ct = 0
|
||||
for cls in (Job, JobTemplate):
|
||||
for j in cls.objects.iterator():
|
||||
if j.credential:
|
||||
ct += 1
|
||||
logger.debug('Migrating cred %s to %s %s multi-cred relation.', j.credential_id, cls, j.id)
|
||||
j.credentials.add(j.credential)
|
||||
if j.vault_credential:
|
||||
ct += 1
|
||||
logger.debug('Migrating cred %s to %s %s multi-cred relation.', j.vault_credential_id, cls, j.id)
|
||||
j.credentials.add(j.vault_credential)
|
||||
for cred in j.extra_credentials.all():
|
||||
ct += 1
|
||||
logger.debug('Migrating cred %s to %s %s multi-cred relation.', cred.id, cls, j.id)
|
||||
j.credentials.add(cred)
|
||||
if ct:
|
||||
logger.info('Finished migrating %s credentials to multi-cred', ct)
|
||||
|
||||
|
||||
def migrate_back_from_multi_cred(app, schema_editor):
|
||||
Job = app.get_model('main', 'Job')
|
||||
JobTemplate = app.get_model('main', 'JobTemplate')
|
||||
CredentialType = app.get_model('main', 'CredentialType')
|
||||
vault_credtype = CredentialType.objects.get(kind='vault')
|
||||
ssh_credtype = CredentialType.objects.get(kind='ssh')
|
||||
|
||||
ct = 0
|
||||
for cls in (Job, JobTemplate):
|
||||
for j in cls.objects.iterator():
|
||||
for cred in j.credentials.iterator():
|
||||
changed = False
|
||||
if cred.credential_type_id == vault_credtype.id:
|
||||
changed = True
|
||||
ct += 1
|
||||
logger.debug('Reverse migrating vault cred %s for %s %s', cred.id, cls, j.id)
|
||||
j.vault_credential = cred
|
||||
elif cred.credential_type_id == ssh_credtype.id:
|
||||
changed = True
|
||||
ct += 1
|
||||
logger.debug('Reverse migrating ssh cred %s for %s %s', cred.id, cls, j.id)
|
||||
j.credential = cred
|
||||
else:
|
||||
changed = True
|
||||
ct += 1
|
||||
logger.debug('Reverse migrating cloud cred %s for %s %s', cred.id, cls, j.id)
|
||||
j.extra_credentials.add(cred)
|
||||
if changed:
|
||||
j.save()
|
||||
if ct:
|
||||
logger.info('Finished reverse migrating %s credentials from multi-cred', ct)
|
||||
|
||||
|
||||
def migrate_workflow_cred(app, schema_editor):
|
||||
WorkflowJobTemplateNode = app.get_model('main', 'WorkflowJobTemplateNode')
|
||||
WorkflowJobNode = app.get_model('main', 'WorkflowJobNode')
|
||||
|
||||
ct = 0
|
||||
for cls in (WorkflowJobNode, WorkflowJobTemplateNode):
|
||||
for node in cls.objects.iterator():
|
||||
if node.credential:
|
||||
node.credentials.add(j.credential)
|
||||
logger.debug('Migrating prompted credential %s for %s %s', node.credential_id, cls, node.id)
|
||||
ct += 1
|
||||
node.credentials.add(node.credential)
|
||||
if ct:
|
||||
logger.info('Finished migrating total of %s workflow prompted credentials', ct)
|
||||
|
||||
|
||||
def migrate_workflow_cred_reverse(app, schema_editor):
|
||||
WorkflowJobTemplateNode = app.get_model('main', 'WorkflowJobTemplateNode')
|
||||
WorkflowJobNode = app.get_model('main', 'WorkflowJobNode')
|
||||
|
||||
ct = 0
|
||||
for cls in (WorkflowJobNode, WorkflowJobTemplateNode):
|
||||
for node in cls.objects.iterator():
|
||||
cred = node.credentials.first()
|
||||
if cred:
|
||||
node.credential = cred
|
||||
node.save()
|
||||
logger.debug('Reverse migrating prompted credential %s for %s %s', node.credential_id, cls, node.id)
|
||||
ct += 1
|
||||
node.save(update_fields=['credential'])
|
||||
if ct:
|
||||
logger.info('Finished reverse migrating total of %s workflow prompted credentials', ct)
|
||||
|
||||
|
||||
def migrate_inventory_source_cred(app, schema_editor):
|
||||
InventoryUpdate = app.get_model('main', 'InventoryUpdate')
|
||||
InventorySource = app.get_model('main', 'InventorySource')
|
||||
|
||||
ct = 0
|
||||
for cls in (InventoryUpdate, InventorySource):
|
||||
for obj in cls.objects.iterator():
|
||||
if obj.credential:
|
||||
ct += 1
|
||||
logger.debug('Migrating credential %s for %s %s', obj.credential_id, cls, obj.id)
|
||||
obj.credentials.add(obj.credential)
|
||||
if ct:
|
||||
logger.info('Finished migrating %s inventory source credentials to multi-cred', ct)
|
||||
|
||||
|
||||
def migrate_inventory_source_cred_reverse(app, schema_editor):
|
||||
InventoryUpdate = app.get_model('main', 'InventoryUpdate')
|
||||
InventorySource = app.get_model('main', 'InventorySource')
|
||||
|
||||
ct = 0
|
||||
for cls in (InventoryUpdate, InventorySource):
|
||||
for obj in cls.objects.iterator():
|
||||
cred = obj.credentials.first()
|
||||
if cred:
|
||||
ct += 1
|
||||
logger.debug('Reverse migrating credential %s for %s %s', cred.id, cls, obj.id)
|
||||
obj.credential = cred
|
||||
obj.save()
|
||||
if ct:
|
||||
logger.info('Finished reverse migrating %s inventory source credentials from multi-cred', ct)
|
||||
|
||||
@ -33,6 +33,7 @@ class ActivityStream(models.Model):
|
||||
operation = models.CharField(max_length=13, choices=OPERATION_CHOICES)
|
||||
timestamp = models.DateTimeField(auto_now_add=True)
|
||||
changes = models.TextField(blank=True)
|
||||
deleted_actor = JSONField(null=True)
|
||||
|
||||
object_relationship_type = models.TextField(blank=True)
|
||||
object1 = models.TextField()
|
||||
@ -65,6 +66,7 @@ class ActivityStream(models.Model):
|
||||
notification = models.ManyToManyField("Notification", blank=True)
|
||||
label = models.ManyToManyField("Label", blank=True)
|
||||
role = models.ManyToManyField("Role", blank=True)
|
||||
instance = models.ManyToManyField("Instance", blank=True)
|
||||
instance_group = models.ManyToManyField("InstanceGroup", blank=True)
|
||||
o_auth2_application = models.ManyToManyField("OAuth2Application", blank=True)
|
||||
o_auth2_access_token = models.ManyToManyField("OAuth2AccessToken", blank=True)
|
||||
@ -77,6 +79,18 @@ class ActivityStream(models.Model):
|
||||
return reverse('api:activity_stream_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# Store denormalized actor metadata so that we retain it for accounting
|
||||
# purposes when the User row is deleted.
|
||||
if self.actor:
|
||||
self.deleted_actor = {
|
||||
'id': self.actor_id,
|
||||
'username': self.actor.username,
|
||||
'first_name': self.actor.first_name,
|
||||
'last_name': self.actor.last_name,
|
||||
}
|
||||
if 'update_fields' in kwargs and 'deleted_actor' not in kwargs['update_fields']:
|
||||
kwargs['update_fields'].append('deleted_actor')
|
||||
|
||||
# For compatibility with Django 1.4.x, attempt to handle any calls to
|
||||
# save that pass update_fields.
|
||||
try:
|
||||
|
||||
@ -221,7 +221,46 @@ class PasswordFieldsModel(BaseModel):
|
||||
update_fields.append(field)
|
||||
|
||||
|
||||
class PrimordialModel(CreatedModifiedModel):
|
||||
class HasEditsMixin(BaseModel):
|
||||
"""Mixin which will keep the versions of field values from last edit
|
||||
so we can tell if current model has unsaved changes.
|
||||
"""
|
||||
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
@classmethod
|
||||
def _get_editable_fields(cls):
|
||||
fds = set([])
|
||||
for field in cls._meta.concrete_fields:
|
||||
if hasattr(field, 'attname'):
|
||||
if field.attname == 'id':
|
||||
continue
|
||||
elif field.attname.endswith('ptr_id'):
|
||||
# polymorphic fields should always be non-editable, see:
|
||||
# https://github.com/django-polymorphic/django-polymorphic/issues/349
|
||||
continue
|
||||
if getattr(field, 'editable', True):
|
||||
fds.add(field.attname)
|
||||
return fds
|
||||
|
||||
def _get_fields_snapshot(self, fields_set=None):
|
||||
new_values = {}
|
||||
if fields_set is None:
|
||||
fields_set = self._get_editable_fields()
|
||||
for attr, val in self.__dict__.items():
|
||||
if attr in fields_set:
|
||||
new_values[attr] = val
|
||||
return new_values
|
||||
|
||||
def _values_have_edits(self, new_values):
|
||||
return any(
|
||||
new_values.get(fd_name, None) != self._prior_values_store.get(fd_name, None)
|
||||
for fd_name in new_values.keys()
|
||||
)
|
||||
|
||||
|
||||
class PrimordialModel(HasEditsMixin, CreatedModifiedModel):
|
||||
'''
|
||||
Common model for all object types that have these standard fields
|
||||
must use a subclass CommonModel or CommonModelNameNotUnique though
|
||||
@ -254,9 +293,13 @@ class PrimordialModel(CreatedModifiedModel):
|
||||
|
||||
tags = TaggableManager(blank=True)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
r = super(PrimordialModel, self).__init__(*args, **kwargs)
|
||||
self._prior_values_store = self._get_fields_snapshot()
|
||||
return r
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
fields_are_specified = bool(update_fields)
|
||||
user = get_current_user()
|
||||
if user and not user.id:
|
||||
user = None
|
||||
@ -264,15 +307,14 @@ class PrimordialModel(CreatedModifiedModel):
|
||||
self.created_by = user
|
||||
if 'created_by' not in update_fields:
|
||||
update_fields.append('created_by')
|
||||
# Update modified_by if not called with update_fields, or if any
|
||||
# editable fields are present in update_fields
|
||||
if (
|
||||
(not fields_are_specified) or
|
||||
any(getattr(self._meta.get_field(name), 'editable', True) for name in update_fields)):
|
||||
# Update modified_by if any editable fields have changed
|
||||
new_values = self._get_fields_snapshot()
|
||||
if (not self.pk and not self.modified_by) or self._values_have_edits(new_values):
|
||||
self.modified_by = user
|
||||
if 'modified_by' not in update_fields:
|
||||
update_fields.append('modified_by')
|
||||
super(PrimordialModel, self).save(*args, **kwargs)
|
||||
self._prior_values_store = new_values
|
||||
|
||||
def clean_description(self):
|
||||
# Description should always be empty string, never null.
|
||||
|
||||
@ -14,7 +14,7 @@ from jinja2 import Template
|
||||
|
||||
# Django
|
||||
from django.db import models
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.translation import ugettext_lazy as _, ugettext_noop
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.utils.encoding import force_text
|
||||
|
||||
@ -419,7 +419,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
else:
|
||||
fmt_str = six.text_type('{}_{}')
|
||||
return fmt_str.format(type_alias, self.inputs.get('vault_id'))
|
||||
return str(type_alias)
|
||||
return six.text_type(type_alias)
|
||||
|
||||
@staticmethod
|
||||
def unique_dict(cred_qs):
|
||||
@ -623,6 +623,11 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
if len(value):
|
||||
namespace[field_name] = value
|
||||
|
||||
# default missing boolean fields to False
|
||||
for field in self.inputs.get('fields', []):
|
||||
if field['type'] == 'boolean' and field['id'] not in credential.inputs.keys():
|
||||
namespace[field['id']] = safe_namespace[field['id']] = False
|
||||
|
||||
file_tmpls = self.injectors.get('file', {})
|
||||
# If any file templates are provided, render the files and update the
|
||||
# special `tower` template namespace so the filename can be
|
||||
@ -673,46 +678,46 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
def ssh(cls):
|
||||
return cls(
|
||||
kind='ssh',
|
||||
name='Machine',
|
||||
name=ugettext_noop('Machine'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'ask_at_runtime': True
|
||||
}, {
|
||||
'id': 'ssh_key_data',
|
||||
'label': 'SSH Private Key',
|
||||
'label': ugettext_noop('SSH Private Key'),
|
||||
'type': 'string',
|
||||
'format': 'ssh_private_key',
|
||||
'secret': True,
|
||||
'multiline': True
|
||||
}, {
|
||||
'id': 'ssh_key_unlock',
|
||||
'label': 'Private Key Passphrase',
|
||||
'label': ugettext_noop('Private Key Passphrase'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'ask_at_runtime': True
|
||||
}, {
|
||||
'id': 'become_method',
|
||||
'label': 'Privilege Escalation Method',
|
||||
'label': ugettext_noop('Privilege Escalation Method'),
|
||||
'type': 'become_method',
|
||||
'help_text': ('Specify a method for "become" operations. This is '
|
||||
'equivalent to specifying the --become-method '
|
||||
'Ansible parameter.')
|
||||
'help_text': ugettext_noop('Specify a method for "become" operations. This is '
|
||||
'equivalent to specifying the --become-method '
|
||||
'Ansible parameter.')
|
||||
}, {
|
||||
'id': 'become_username',
|
||||
'label': 'Privilege Escalation Username',
|
||||
'label': ugettext_noop('Privilege Escalation Username'),
|
||||
'type': 'string',
|
||||
}, {
|
||||
'id': 'become_password',
|
||||
'label': 'Privilege Escalation Password',
|
||||
'label': ugettext_noop('Privilege Escalation Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'ask_at_runtime': True
|
||||
@ -728,28 +733,28 @@ def ssh(cls):
|
||||
def scm(cls):
|
||||
return cls(
|
||||
kind='scm',
|
||||
name='Source Control',
|
||||
name=ugettext_noop('Source Control'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'type': 'string',
|
||||
'secret': True
|
||||
}, {
|
||||
'id': 'ssh_key_data',
|
||||
'label': 'SCM Private Key',
|
||||
'label': ugettext_noop('SCM Private Key'),
|
||||
'type': 'string',
|
||||
'format': 'ssh_private_key',
|
||||
'secret': True,
|
||||
'multiline': True
|
||||
}, {
|
||||
'id': 'ssh_key_unlock',
|
||||
'label': 'Private Key Passphrase',
|
||||
'label': ugettext_noop('Private Key Passphrase'),
|
||||
'type': 'string',
|
||||
'secret': True
|
||||
}],
|
||||
@ -764,25 +769,25 @@ def scm(cls):
|
||||
def vault(cls):
|
||||
return cls(
|
||||
kind='vault',
|
||||
name='Vault',
|
||||
name=ugettext_noop('Vault'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'vault_password',
|
||||
'label': 'Vault Password',
|
||||
'label': ugettext_noop('Vault Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'ask_at_runtime': True
|
||||
}, {
|
||||
'id': 'vault_id',
|
||||
'label': 'Vault Identifier',
|
||||
'label': ugettext_noop('Vault Identifier'),
|
||||
'type': 'string',
|
||||
'format': 'vault_id',
|
||||
'help_text': ('Specify an (optional) Vault ID. This is '
|
||||
'equivalent to specifying the --vault-id '
|
||||
'Ansible parameter for providing multiple Vault '
|
||||
'passwords. Note: this feature only works in '
|
||||
'Ansible 2.4+.')
|
||||
'help_text': ugettext_noop('Specify an (optional) Vault ID. This is '
|
||||
'equivalent to specifying the --vault-id '
|
||||
'Ansible parameter for providing multiple Vault '
|
||||
'passwords. Note: this feature only works in '
|
||||
'Ansible 2.4+.')
|
||||
}],
|
||||
'required': ['vault_password'],
|
||||
}
|
||||
@ -793,37 +798,37 @@ def vault(cls):
|
||||
def net(cls):
|
||||
return cls(
|
||||
kind='net',
|
||||
name='Network',
|
||||
name=ugettext_noop('Network'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'ssh_key_data',
|
||||
'label': 'SSH Private Key',
|
||||
'label': ugettext_noop('SSH Private Key'),
|
||||
'type': 'string',
|
||||
'format': 'ssh_private_key',
|
||||
'secret': True,
|
||||
'multiline': True
|
||||
}, {
|
||||
'id': 'ssh_key_unlock',
|
||||
'label': 'Private Key Passphrase',
|
||||
'label': ugettext_noop('Private Key Passphrase'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'authorize',
|
||||
'label': 'Authorize',
|
||||
'label': ugettext_noop('Authorize'),
|
||||
'type': 'boolean',
|
||||
}, {
|
||||
'id': 'authorize_password',
|
||||
'label': 'Authorize Password',
|
||||
'label': ugettext_noop('Authorize Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}],
|
||||
@ -840,27 +845,27 @@ def net(cls):
|
||||
def aws(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='Amazon Web Services',
|
||||
name=ugettext_noop('Amazon Web Services'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'username',
|
||||
'label': 'Access Key',
|
||||
'label': ugettext_noop('Access Key'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Secret Key',
|
||||
'label': ugettext_noop('Secret Key'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'security_token',
|
||||
'label': 'STS Token',
|
||||
'label': ugettext_noop('STS Token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'help_text': ('Security Token Service (STS) is a web service '
|
||||
'that enables you to request temporary, '
|
||||
'limited-privilege credentials for AWS Identity '
|
||||
'and Access Management (IAM) users.'),
|
||||
'help_text': ugettext_noop('Security Token Service (STS) is a web service '
|
||||
'that enables you to request temporary, '
|
||||
'limited-privilege credentials for AWS Identity '
|
||||
'and Access Management (IAM) users.'),
|
||||
}],
|
||||
'required': ['username', 'password']
|
||||
}
|
||||
@ -871,36 +876,36 @@ def aws(cls):
|
||||
def openstack(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='OpenStack',
|
||||
name=ugettext_noop('OpenStack'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password (API Key)',
|
||||
'label': ugettext_noop('Password (API Key)'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'host',
|
||||
'label': 'Host (Authentication URL)',
|
||||
'label': ugettext_noop('Host (Authentication URL)'),
|
||||
'type': 'string',
|
||||
'help_text': ('The host to authenticate with. For example, '
|
||||
'https://openstack.business.com/v2.0/')
|
||||
'help_text': ugettext_noop('The host to authenticate with. For example, '
|
||||
'https://openstack.business.com/v2.0/')
|
||||
}, {
|
||||
'id': 'project',
|
||||
'label': 'Project (Tenant Name)',
|
||||
'label': ugettext_noop('Project (Tenant Name)'),
|
||||
'type': 'string',
|
||||
}, {
|
||||
'id': 'domain',
|
||||
'label': 'Domain Name',
|
||||
'label': ugettext_noop('Domain Name'),
|
||||
'type': 'string',
|
||||
'help_text': ('OpenStack domains define administrative boundaries. '
|
||||
'It is only needed for Keystone v3 authentication '
|
||||
'URLs. Refer to Ansible Tower documentation for '
|
||||
'common scenarios.')
|
||||
'help_text': ugettext_noop('OpenStack domains define administrative boundaries. '
|
||||
'It is only needed for Keystone v3 authentication '
|
||||
'URLs. Refer to Ansible Tower documentation for '
|
||||
'common scenarios.')
|
||||
}],
|
||||
'required': ['username', 'password', 'host', 'project']
|
||||
}
|
||||
@ -911,22 +916,22 @@ def openstack(cls):
|
||||
def vmware(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='VMware vCenter',
|
||||
name=ugettext_noop('VMware vCenter'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'host',
|
||||
'label': 'VCenter Host',
|
||||
'label': ugettext_noop('VCenter Host'),
|
||||
'type': 'string',
|
||||
'help_text': ('Enter the hostname or IP address that corresponds '
|
||||
'to your VMware vCenter.')
|
||||
'help_text': ugettext_noop('Enter the hostname or IP address that corresponds '
|
||||
'to your VMware vCenter.')
|
||||
}, {
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}],
|
||||
@ -939,22 +944,22 @@ def vmware(cls):
|
||||
def satellite6(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='Red Hat Satellite 6',
|
||||
name=ugettext_noop('Red Hat Satellite 6'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'host',
|
||||
'label': 'Satellite 6 URL',
|
||||
'label': ugettext_noop('Satellite 6 URL'),
|
||||
'type': 'string',
|
||||
'help_text': ('Enter the URL that corresponds to your Red Hat '
|
||||
'Satellite 6 server. For example, https://satellite.example.org')
|
||||
'help_text': ugettext_noop('Enter the URL that corresponds to your Red Hat '
|
||||
'Satellite 6 server. For example, https://satellite.example.org')
|
||||
}, {
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}],
|
||||
@ -967,23 +972,23 @@ def satellite6(cls):
|
||||
def cloudforms(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='Red Hat CloudForms',
|
||||
name=ugettext_noop('Red Hat CloudForms'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'host',
|
||||
'label': 'CloudForms URL',
|
||||
'label': ugettext_noop('CloudForms URL'),
|
||||
'type': 'string',
|
||||
'help_text': ('Enter the URL for the virtual machine that '
|
||||
'corresponds to your CloudForm instance. '
|
||||
'For example, https://cloudforms.example.org')
|
||||
'help_text': ugettext_noop('Enter the URL for the virtual machine that '
|
||||
'corresponds to your CloudForm instance. '
|
||||
'For example, https://cloudforms.example.org')
|
||||
}, {
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}],
|
||||
@ -996,32 +1001,32 @@ def cloudforms(cls):
|
||||
def gce(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='Google Compute Engine',
|
||||
name=ugettext_noop('Google Compute Engine'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'username',
|
||||
'label': 'Service Account Email Address',
|
||||
'label': ugettext_noop('Service Account Email Address'),
|
||||
'type': 'string',
|
||||
'help_text': ('The email address assigned to the Google Compute '
|
||||
'Engine service account.')
|
||||
'help_text': ugettext_noop('The email address assigned to the Google Compute '
|
||||
'Engine service account.')
|
||||
}, {
|
||||
'id': 'project',
|
||||
'label': 'Project',
|
||||
'type': 'string',
|
||||
'help_text': ('The Project ID is the GCE assigned identification. '
|
||||
'It is often constructed as three words or two words '
|
||||
'followed by a three-digit number. Examples: project-id-000 '
|
||||
'and another-project-id')
|
||||
'help_text': ugettext_noop('The Project ID is the GCE assigned identification. '
|
||||
'It is often constructed as three words or two words '
|
||||
'followed by a three-digit number. Examples: project-id-000 '
|
||||
'and another-project-id')
|
||||
}, {
|
||||
'id': 'ssh_key_data',
|
||||
'label': 'RSA Private Key',
|
||||
'label': ugettext_noop('RSA Private Key'),
|
||||
'type': 'string',
|
||||
'format': 'ssh_private_key',
|
||||
'secret': True,
|
||||
'multiline': True,
|
||||
'help_text': ('Paste the contents of the PEM file associated '
|
||||
'with the service account email.')
|
||||
'help_text': ugettext_noop('Paste the contents of the PEM file associated '
|
||||
'with the service account email.')
|
||||
}],
|
||||
'required': ['username', 'ssh_key_data'],
|
||||
}
|
||||
@ -1032,43 +1037,43 @@ def gce(cls):
|
||||
def azure_rm(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='Microsoft Azure Resource Manager',
|
||||
name=ugettext_noop('Microsoft Azure Resource Manager'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'subscription',
|
||||
'label': 'Subscription ID',
|
||||
'label': ugettext_noop('Subscription ID'),
|
||||
'type': 'string',
|
||||
'help_text': ('Subscription ID is an Azure construct, which is '
|
||||
'mapped to a username.')
|
||||
'help_text': ugettext_noop('Subscription ID is an Azure construct, which is '
|
||||
'mapped to a username.')
|
||||
}, {
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'client',
|
||||
'label': 'Client ID',
|
||||
'label': ugettext_noop('Client ID'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'secret',
|
||||
'label': 'Client Secret',
|
||||
'label': ugettext_noop('Client Secret'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'tenant',
|
||||
'label': 'Tenant ID',
|
||||
'label': ugettext_noop('Tenant ID'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'cloud_environment',
|
||||
'label': 'Azure Cloud Environment',
|
||||
'label': ugettext_noop('Azure Cloud Environment'),
|
||||
'type': 'string',
|
||||
'help_text': ('Environment variable AZURE_CLOUD_ENVIRONMENT when'
|
||||
' using Azure GovCloud or Azure stack.')
|
||||
'help_text': ugettext_noop('Environment variable AZURE_CLOUD_ENVIRONMENT when'
|
||||
' using Azure GovCloud or Azure stack.')
|
||||
}],
|
||||
'required': ['subscription'],
|
||||
}
|
||||
@ -1079,16 +1084,16 @@ def azure_rm(cls):
|
||||
def insights(cls):
|
||||
return cls(
|
||||
kind='insights',
|
||||
name='Insights',
|
||||
name=ugettext_noop('Insights'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'type': 'string',
|
||||
'secret': True
|
||||
}],
|
||||
@ -1107,28 +1112,28 @@ def insights(cls):
|
||||
def rhv(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='Red Hat Virtualization',
|
||||
name=ugettext_noop('Red Hat Virtualization'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'host',
|
||||
'label': 'Host (Authentication URL)',
|
||||
'label': ugettext_noop('Host (Authentication URL)'),
|
||||
'type': 'string',
|
||||
'help_text': ('The host to authenticate with.')
|
||||
'help_text': ugettext_noop('The host to authenticate with.')
|
||||
}, {
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'ca_file',
|
||||
'label': 'CA File',
|
||||
'label': ugettext_noop('CA File'),
|
||||
'type': 'string',
|
||||
'help_text': ('Absolute file path to the CA file to use (optional)')
|
||||
'help_text': ugettext_noop('Absolute file path to the CA file to use (optional)')
|
||||
}],
|
||||
'required': ['host', 'username', 'password'],
|
||||
},
|
||||
@ -1159,26 +1164,26 @@ def rhv(cls):
|
||||
def tower(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='Ansible Tower',
|
||||
name=ugettext_noop('Ansible Tower'),
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'host',
|
||||
'label': 'Ansible Tower Hostname',
|
||||
'label': ugettext_noop('Ansible Tower Hostname'),
|
||||
'type': 'string',
|
||||
'help_text': ('The Ansible Tower base URL to authenticate with.')
|
||||
'help_text': ugettext_noop('The Ansible Tower base URL to authenticate with.')
|
||||
}, {
|
||||
'id': 'username',
|
||||
'label': 'Username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': 'Password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'verify_ssl',
|
||||
'label': 'Verify SSL',
|
||||
'label': ugettext_noop('Verify SSL'),
|
||||
'type': 'boolean',
|
||||
'secret': False
|
||||
}],
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import datetime
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import models, DatabaseError
|
||||
@ -39,6 +40,21 @@ def sanitize_event_keys(kwargs, valid_keys):
|
||||
kwargs[key] = Truncator(kwargs[key]).chars(1024)
|
||||
|
||||
|
||||
def create_host_status_counts(event_data):
|
||||
host_status = {}
|
||||
host_status_keys = ['skipped', 'ok', 'changed', 'failures', 'dark']
|
||||
|
||||
for key in host_status_keys:
|
||||
for host in event_data.get(key, {}):
|
||||
host_status[host] = key
|
||||
|
||||
host_status_counts = defaultdict(lambda: 0)
|
||||
|
||||
for value in host_status.values():
|
||||
host_status_counts[value] += 1
|
||||
|
||||
return dict(host_status_counts)
|
||||
|
||||
|
||||
class BasePlaybookEvent(CreatedModifiedModel):
|
||||
'''
|
||||
@ -194,6 +210,9 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
def event_level(self):
|
||||
return self.LEVEL_FOR_EVENT.get(self.event, 0)
|
||||
|
||||
def get_host_status_counts(self):
|
||||
return create_host_status_counts(getattr(self, 'event_data', {}))
|
||||
|
||||
def get_event_display2(self):
|
||||
msg = self.get_event_display()
|
||||
if self.event == 'playbook_on_play_start':
|
||||
@ -588,6 +607,9 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
'''
|
||||
return self.event
|
||||
|
||||
def get_host_status_counts(self):
|
||||
return create_host_status_counts(getattr(self, 'event_data', {}))
|
||||
|
||||
|
||||
class AdHocCommandEvent(BaseCommandEvent):
|
||||
|
||||
|
||||
@ -1,8 +1,11 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import six
|
||||
import random
|
||||
from decimal import Decimal
|
||||
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models, connection
|
||||
from django.db.models.signals import post_save, post_delete
|
||||
from django.dispatch import receiver
|
||||
@ -16,6 +19,7 @@ from awx import __version__ as awx_application_version
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.managers import InstanceManager, InstanceGroupManager
|
||||
from awx.main.fields import JSONField
|
||||
from awx.main.models.base import BaseModel, HasEditsMixin
|
||||
from awx.main.models.inventory import InventoryUpdate
|
||||
from awx.main.models.jobs import Job
|
||||
from awx.main.models.projects import ProjectUpdate
|
||||
@ -26,7 +30,37 @@ from awx.main.models.mixins import RelatedJobsMixin
|
||||
__all__ = ('Instance', 'InstanceGroup', 'JobOrigin', 'TowerScheduleState',)
|
||||
|
||||
|
||||
class Instance(models.Model):
|
||||
def validate_queuename(v):
|
||||
# celery and kombu don't play nice with unicode in queue names
|
||||
if v:
|
||||
try:
|
||||
'{}'.format(v.decode('utf-8'))
|
||||
except UnicodeEncodeError:
|
||||
raise ValidationError(_(six.text_type('{} contains unsupported characters')).format(v))
|
||||
|
||||
|
||||
class HasPolicyEditsMixin(HasEditsMixin):
|
||||
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
r = super(BaseModel, self).__init__(*args, **kwargs)
|
||||
self._prior_values_store = self._get_fields_snapshot()
|
||||
return r
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
super(BaseModel, self).save(*args, **kwargs)
|
||||
self._prior_values_store = self._get_fields_snapshot()
|
||||
|
||||
def has_policy_changes(self):
|
||||
if not hasattr(self, 'POLICY_FIELDS'):
|
||||
raise RuntimeError('HasPolicyEditsMixin Model needs to set POLICY_FIELDS')
|
||||
new_values = self._get_fields_snapshot(fields_set=self.POLICY_FIELDS)
|
||||
return self._values_have_edits(new_values)
|
||||
|
||||
|
||||
class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
"""A model representing an AWX instance running against this database."""
|
||||
objects = InstanceManager()
|
||||
|
||||
@ -37,7 +71,6 @@ class Instance(models.Model):
|
||||
last_isolated_check = models.DateTimeField(
|
||||
null=True,
|
||||
editable=False,
|
||||
auto_now_add=True
|
||||
)
|
||||
version = models.CharField(max_length=24, blank=True)
|
||||
capacity = models.PositiveIntegerField(
|
||||
@ -52,6 +85,9 @@ class Instance(models.Model):
|
||||
enabled = models.BooleanField(
|
||||
default=True
|
||||
)
|
||||
managed_by_policy = models.BooleanField(
|
||||
default=True
|
||||
)
|
||||
cpu = models.IntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
@ -72,6 +108,8 @@ class Instance(models.Model):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
|
||||
POLICY_FIELDS = frozenset(('managed_by_policy', 'hostname', 'capacity_adjustment'))
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:instance_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@ -80,6 +118,10 @@ class Instance(models.Model):
|
||||
return sum(x.task_impact for x in UnifiedJob.objects.filter(execution_node=self.hostname,
|
||||
status__in=('running', 'waiting')))
|
||||
|
||||
@property
|
||||
def remaining_capacity(self):
|
||||
return self.capacity - self.consumed_capacity
|
||||
|
||||
@property
|
||||
def role(self):
|
||||
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
|
||||
@ -89,6 +131,10 @@ class Instance(models.Model):
|
||||
def jobs_running(self):
|
||||
return UnifiedJob.objects.filter(execution_node=self.hostname, status__in=('running', 'waiting',)).count()
|
||||
|
||||
@property
|
||||
def jobs_total(self):
|
||||
return UnifiedJob.objects.filter(execution_node=self.hostname).count()
|
||||
|
||||
def is_lost(self, ref_time=None, isolated=False):
|
||||
if ref_time is None:
|
||||
ref_time = now()
|
||||
@ -100,6 +146,8 @@ class Instance(models.Model):
|
||||
def is_controller(self):
|
||||
return Instance.objects.filter(rampart_groups__controller__instances=self).exists()
|
||||
|
||||
def is_isolated(self):
|
||||
return self.rampart_groups.filter(controller__isnull=False).exists()
|
||||
|
||||
def refresh_capacity(self):
|
||||
cpu = get_cpu_capacity()
|
||||
@ -113,9 +161,13 @@ class Instance(models.Model):
|
||||
self.save(update_fields=['capacity', 'version', 'modified', 'cpu',
|
||||
'memory', 'cpu_capacity', 'mem_capacity'])
|
||||
|
||||
def clean_hostname(self):
|
||||
validate_queuename(self.hostname)
|
||||
return self.hostname
|
||||
|
||||
|
||||
|
||||
class InstanceGroup(models.Model, RelatedJobsMixin):
|
||||
class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
"""A model representing a Queue/Group of AWX Instances."""
|
||||
objects = InstanceGroupManager()
|
||||
|
||||
@ -150,6 +202,10 @@ class InstanceGroup(models.Model, RelatedJobsMixin):
|
||||
help_text=_("List of exact-match Instances that will always be automatically assigned to this group")
|
||||
)
|
||||
|
||||
POLICY_FIELDS = frozenset((
|
||||
'policy_instance_list', 'policy_instance_minimum', 'policy_instance_percentage', 'controller'
|
||||
))
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:instance_group_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@ -157,6 +213,15 @@ class InstanceGroup(models.Model, RelatedJobsMixin):
|
||||
def capacity(self):
|
||||
return sum([inst.capacity for inst in self.instances.all()])
|
||||
|
||||
@property
|
||||
def jobs_running(self):
|
||||
return UnifiedJob.objects.filter(status__in=('running', 'waiting'),
|
||||
instance_group=self).count()
|
||||
|
||||
@property
|
||||
def jobs_total(self):
|
||||
return UnifiedJob.objects.filter(instance_group=self).count()
|
||||
|
||||
'''
|
||||
RelatedJobsMixin
|
||||
'''
|
||||
@ -167,6 +232,37 @@ class InstanceGroup(models.Model, RelatedJobsMixin):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
|
||||
def clean_name(self):
|
||||
validate_queuename(self.name)
|
||||
return self.name
|
||||
|
||||
def fit_task_to_most_remaining_capacity_instance(self, task):
|
||||
instance_most_capacity = None
|
||||
for i in self.instances.filter(capacity__gt=0).order_by('hostname'):
|
||||
if not i.enabled:
|
||||
continue
|
||||
if i.remaining_capacity >= task.task_impact and \
|
||||
(instance_most_capacity is None or
|
||||
i.remaining_capacity > instance_most_capacity.remaining_capacity):
|
||||
instance_most_capacity = i
|
||||
return instance_most_capacity
|
||||
|
||||
def find_largest_idle_instance(self):
|
||||
largest_instance = None
|
||||
for i in self.instances.filter(capacity__gt=0).order_by('hostname'):
|
||||
if i.jobs_running == 0:
|
||||
if largest_instance is None:
|
||||
largest_instance = i
|
||||
elif i.capacity > largest_instance.capacity:
|
||||
largest_instance = i
|
||||
return largest_instance
|
||||
|
||||
def choose_online_controller_node(self):
|
||||
return random.choice(list(self.controller
|
||||
.instances
|
||||
.filter(capacity__gt=0)
|
||||
.values_list('hostname', flat=True)))
|
||||
|
||||
|
||||
class TowerScheduleState(SingletonModel):
|
||||
schedule_last_run = models.DateTimeField(auto_now_add=True)
|
||||
@ -190,29 +286,31 @@ class JobOrigin(models.Model):
|
||||
app_label = 'main'
|
||||
|
||||
|
||||
@receiver(post_save, sender=InstanceGroup)
|
||||
def on_instance_group_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||
def schedule_policy_task():
|
||||
from awx.main.tasks import apply_cluster_membership_policies
|
||||
connection.on_commit(lambda: apply_cluster_membership_policies.apply_async())
|
||||
|
||||
|
||||
@receiver(post_save, sender=InstanceGroup)
|
||||
def on_instance_group_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||
if created or instance.has_policy_changes():
|
||||
schedule_policy_task()
|
||||
|
||||
|
||||
@receiver(post_save, sender=Instance)
|
||||
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||
if created:
|
||||
from awx.main.tasks import apply_cluster_membership_policies
|
||||
connection.on_commit(lambda: apply_cluster_membership_policies.apply_async())
|
||||
if created or instance.has_policy_changes():
|
||||
schedule_policy_task()
|
||||
|
||||
|
||||
@receiver(post_delete, sender=InstanceGroup)
|
||||
def on_instance_group_deleted(sender, instance, using, **kwargs):
|
||||
from awx.main.tasks import apply_cluster_membership_policies
|
||||
connection.on_commit(lambda: apply_cluster_membership_policies.apply_async())
|
||||
schedule_policy_task()
|
||||
|
||||
|
||||
@receiver(post_delete, sender=Instance)
|
||||
def on_instance_deleted(sender, instance, using, **kwargs):
|
||||
from awx.main.tasks import apply_cluster_membership_policies
|
||||
connection.on_commit(lambda: apply_cluster_membership_policies.apply_async())
|
||||
schedule_policy_task()
|
||||
|
||||
|
||||
# Unfortunately, the signal can't just be connected against UnifiedJob; it
|
||||
|
||||
@ -1420,7 +1420,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, RelatedJobsMix
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in InventorySourceOptions._meta.fields) | set(
|
||||
['name', 'description', 'schedule', 'credentials']
|
||||
['name', 'description', 'schedule', 'credentials', 'inventory']
|
||||
)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
@ -1599,6 +1599,13 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
|
||||
inventory = models.ForeignKey(
|
||||
'Inventory',
|
||||
related_name='inventory_updates',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.DO_NOTHING,
|
||||
)
|
||||
inventory_source = models.ForeignKey(
|
||||
'InventorySource',
|
||||
related_name='inventory_updates',
|
||||
|
||||
@ -33,7 +33,7 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
@ -278,7 +278,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
allows_field='credentials'
|
||||
)
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role=['project.organization.project_admin_role', 'inventory.organization.inventory_admin_role']
|
||||
parent_role=['project.organization.job_template_admin_role', 'inventory.organization.job_template_admin_role']
|
||||
)
|
||||
execute_role = ImplicitRoleField(
|
||||
parent_role=['admin_role', 'project.organization.execute_role', 'inventory.organization.execute_role'],
|
||||
@ -343,11 +343,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
# not block a provisioning callback from creating/launching jobs.
|
||||
if callback_extra_vars is None:
|
||||
for ask_field_name in set(self.get_ask_mapping().values()):
|
||||
if ask_field_name == 'ask_credential_on_launch':
|
||||
# if ask_credential_on_launch is True, it just means it can
|
||||
# optionally be specified at launch time, not that it's *required*
|
||||
# to launch
|
||||
continue
|
||||
if getattr(self, ask_field_name):
|
||||
prompting_needed = True
|
||||
break
|
||||
@ -402,7 +397,9 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
if 'prompts' not in exclude_errors:
|
||||
errors_dict[field_name] = _('Field is not configured to prompt on launch.').format(field_name=field_name)
|
||||
|
||||
if 'prompts' not in exclude_errors and self.passwords_needed_to_start:
|
||||
if ('prompts' not in exclude_errors and
|
||||
(not getattr(self, 'ask_credential_on_launch', False)) and
|
||||
self.passwords_needed_to_start):
|
||||
errors_dict['passwords_needed_to_start'] = _(
|
||||
'Saved launch configurations cannot provide passwords needed to start.')
|
||||
|
||||
@ -772,9 +769,13 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
try:
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the file so we can check if it changed later
|
||||
modification_times[filepath] = os.path.getmtime(filepath)
|
||||
|
||||
@ -957,24 +958,37 @@ class JobLaunchConfig(LaunchTimeConfig):
|
||||
editable=False,
|
||||
)
|
||||
|
||||
def has_user_prompts(self, template):
|
||||
'''
|
||||
Returns True if any fields exist in the launch config that are
|
||||
not permissions exclusions
|
||||
(has to exist because of callback relaunch exception)
|
||||
'''
|
||||
return self._has_user_prompts(template, only_unprompted=False)
|
||||
|
||||
def has_unprompted(self, template):
|
||||
'''
|
||||
returns False if the template has set ask_ fields to False after
|
||||
returns True if the template has set ask_ fields to False after
|
||||
launching with those prompts
|
||||
'''
|
||||
return self._has_user_prompts(template, only_unprompted=True)
|
||||
|
||||
def _has_user_prompts(self, template, only_unprompted=True):
|
||||
prompts = self.prompts_dict()
|
||||
ask_mapping = template.get_ask_mapping()
|
||||
if template.survey_enabled and (not template.ask_variables_on_launch):
|
||||
ask_mapping.pop('extra_vars')
|
||||
provided_vars = set(prompts['extra_vars'].keys())
|
||||
provided_vars = set(prompts.get('extra_vars', {}).keys())
|
||||
survey_vars = set(
|
||||
element.get('variable') for element in
|
||||
template.survey_spec.get('spec', {}) if 'variable' in element
|
||||
)
|
||||
if provided_vars - survey_vars:
|
||||
if (provided_vars and not only_unprompted) or (provided_vars - survey_vars):
|
||||
return True
|
||||
for field_name, ask_field_name in ask_mapping.items():
|
||||
if field_name in prompts and not getattr(template, ask_field_name):
|
||||
if field_name in prompts and not (getattr(template, ask_field_name) and only_unprompted):
|
||||
if field_name == 'limit' and self.job and self.job.launch_type == 'callback':
|
||||
continue # exception for relaunching callbacks
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -1019,7 +1033,8 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
failed = models.BooleanField(default=False, editable=False)
|
||||
|
||||
def __unicode__(self):
|
||||
hostname = self.host.name if self.host else 'N/A'
|
||||
host = getattr_dne(self, 'host')
|
||||
hostname = host.name if host else 'N/A'
|
||||
return '%s changed=%d dark=%d failures=%d ok=%d processed=%d skipped=%s' % \
|
||||
(hostname, self.changed, self.dark, self.failures, self.ok,
|
||||
self.processed, self.skipped)
|
||||
|
||||
@ -436,7 +436,8 @@ class CustomVirtualEnvMixin(models.Model):
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
max_length=100
|
||||
max_length=100,
|
||||
help_text=_('Local absolute file path containing a custom Python virtualenv to use')
|
||||
)
|
||||
|
||||
def clean_custom_virtualenv(self):
|
||||
@ -465,7 +466,7 @@ class RelatedJobsMixin(object):
|
||||
return self._get_related_jobs().filter(status__in=ACTIVE_STATES)
|
||||
|
||||
'''
|
||||
Returns [{'id': '1', 'type': 'job'}, {'id': 2, 'type': 'project_update'}, ...]
|
||||
Returns [{'id': 1, 'type': 'job'}, {'id': 2, 'type': 'project_update'}, ...]
|
||||
'''
|
||||
def get_active_jobs(self):
|
||||
UnifiedJob = apps.get_model('main', 'UnifiedJob')
|
||||
@ -474,5 +475,5 @@ class RelatedJobsMixin(object):
|
||||
if not isinstance(jobs, QuerySet):
|
||||
raise RuntimeError("Programmer error. Expected _get_active_jobs() to return a QuerySet.")
|
||||
|
||||
return [dict(id=str(t[0]), type=mapping[t[1]]) for t in jobs.values_list('id', 'polymorphic_ctype_id')]
|
||||
return [dict(id=t[0], type=mapping[t[1]]) for t in jobs.values_list('id', 'polymorphic_ctype_id')]
|
||||
|
||||
|
||||
@ -11,7 +11,9 @@ from django.conf import settings
|
||||
# Django OAuth Toolkit
|
||||
from oauth2_provider.models import AbstractApplication, AbstractAccessToken
|
||||
from oauth2_provider.generators import generate_client_secret
|
||||
from oauthlib import oauth2
|
||||
|
||||
from awx.main.utils import get_external_account
|
||||
from awx.main.fields import OAuth2ClientSecretField
|
||||
|
||||
|
||||
@ -25,6 +27,7 @@ class OAuth2Application(AbstractApplication):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
verbose_name = _('application')
|
||||
unique_together = (("name", "organization"),)
|
||||
|
||||
CLIENT_CONFIDENTIAL = "confidential"
|
||||
CLIENT_PUBLIC = "public"
|
||||
@ -36,12 +39,10 @@ class OAuth2Application(AbstractApplication):
|
||||
GRANT_AUTHORIZATION_CODE = "authorization-code"
|
||||
GRANT_IMPLICIT = "implicit"
|
||||
GRANT_PASSWORD = "password"
|
||||
GRANT_CLIENT_CREDENTIALS = "client-credentials"
|
||||
GRANT_TYPES = (
|
||||
(GRANT_AUTHORIZATION_CODE, _("Authorization code")),
|
||||
(GRANT_IMPLICIT, _("Implicit")),
|
||||
(GRANT_PASSWORD, _("Resource owner password-based")),
|
||||
(GRANT_CLIENT_CREDENTIALS, _("Client credentials")),
|
||||
)
|
||||
|
||||
description = models.TextField(
|
||||
@ -109,8 +110,13 @@ class OAuth2AccessToken(AbstractAccessToken):
|
||||
)
|
||||
scope = models.TextField(
|
||||
blank=True,
|
||||
default='write',
|
||||
help_text=_('Allowed scopes, further restricts user\'s permissions. Must be a simple space-separated string with allowed scopes [\'read\', \'write\'].')
|
||||
)
|
||||
modified = models.DateTimeField(
|
||||
editable=False,
|
||||
auto_now=True
|
||||
)
|
||||
|
||||
def is_valid(self, scopes=None):
|
||||
valid = super(OAuth2AccessToken, self).is_valid(scopes)
|
||||
@ -119,3 +125,11 @@ class OAuth2AccessToken(AbstractAccessToken):
|
||||
self.save(update_fields=['last_used'])
|
||||
return valid
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
if self.user and settings.ALLOW_OAUTH2_FOR_EXTERNAL_USERS is False:
|
||||
external_account = get_external_account(self.user)
|
||||
if external_account is not None:
|
||||
raise oauth2.AccessDeniedError(_(
|
||||
'OAuth2 Tokens cannot be created by users associated with an external authentication provider ({})'
|
||||
).format(external_account))
|
||||
super(OAuth2AccessToken, self).save(*args, **kwargs)
|
||||
|
||||
@ -60,16 +60,21 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
notification_admin_role = ImplicitRoleField(
|
||||
parent_role='admin_role',
|
||||
)
|
||||
job_template_admin_role = ImplicitRoleField(
|
||||
parent_role='admin_role',
|
||||
)
|
||||
auditor_role = ImplicitRoleField(
|
||||
parent_role='singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
member_role = ImplicitRoleField(
|
||||
parent_role=['admin_role', 'execute_role', 'project_admin_role',
|
||||
'inventory_admin_role', 'workflow_admin_role',
|
||||
'notification_admin_role', 'credential_admin_role']
|
||||
parent_role=['admin_role']
|
||||
)
|
||||
read_role = ImplicitRoleField(
|
||||
parent_role=['member_role', 'auditor_role'],
|
||||
parent_role=['member_role', 'auditor_role',
|
||||
'execute_role', 'project_admin_role',
|
||||
'inventory_admin_role', 'workflow_admin_role',
|
||||
'notification_admin_role', 'credential_admin_role',
|
||||
'job_template_admin_role',],
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -324,13 +324,9 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
['name', 'description', 'schedule']
|
||||
)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
r = super(Project, self).__init__(*args, **kwargs)
|
||||
self._prior_values_store = self._current_sensitive_fields()
|
||||
return r
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
new_instance = not bool(self.pk)
|
||||
pre_save_vals = getattr(self, '_prior_values_store', {})
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
@ -361,21 +357,13 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
self.save(update_fields=update_fields)
|
||||
# If we just created a new project with SCM, start the initial update.
|
||||
# also update if certain fields have changed
|
||||
relevant_change = False
|
||||
new_values = self._current_sensitive_fields()
|
||||
if hasattr(self, '_prior_values_store') and self._prior_values_store != new_values:
|
||||
relevant_change = True
|
||||
self._prior_values_store = new_values
|
||||
relevant_change = any(
|
||||
pre_save_vals.get(fd_name, None) != self._prior_values_store.get(fd_name, None)
|
||||
for fd_name in self.FIELDS_TRIGGER_UPDATE
|
||||
)
|
||||
if (relevant_change or new_instance) and (not skip_update) and self.scm_type:
|
||||
self.update()
|
||||
|
||||
def _current_sensitive_fields(self):
|
||||
new_values = {}
|
||||
for attr, val in self.__dict__.items():
|
||||
if attr in Project.FIELDS_TRIGGER_UPDATE:
|
||||
new_values[attr] = val
|
||||
return new_values
|
||||
|
||||
def _get_current_status(self):
|
||||
if self.scm_type:
|
||||
if self.current_job and self.current_job.status:
|
||||
|
||||
@ -40,6 +40,7 @@ role_names = {
|
||||
'project_admin_role': _('Project Admin'),
|
||||
'inventory_admin_role': _('Inventory Admin'),
|
||||
'credential_admin_role': _('Credential Admin'),
|
||||
'job_template_admin_role': _('Job Template Admin'),
|
||||
'workflow_admin_role': _('Workflow Admin'),
|
||||
'notification_admin_role': _('Notification Admin'),
|
||||
'auditor_role': _('Auditor'),
|
||||
@ -58,6 +59,7 @@ role_descriptions = {
|
||||
'project_admin_role': _('Can manage all projects of the %s'),
|
||||
'inventory_admin_role': _('Can manage all inventories of the %s'),
|
||||
'credential_admin_role': _('Can manage all credentials of the %s'),
|
||||
'job_template_admin_role': _('Can manage all job templates of the %s'),
|
||||
'workflow_admin_role': _('Can manage all workflows of the %s'),
|
||||
'notification_admin_role': _('Can manage all notifications of the %s'),
|
||||
'auditor_role': _('Can view all settings for the %s'),
|
||||
|
||||
@ -36,7 +36,7 @@ from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin
|
||||
from awx.main.utils import (
|
||||
encrypt_dict, decrypt_field, _inventory_updates,
|
||||
copy_model_by_class, copy_m2m_relationships,
|
||||
get_type_for_model, parse_yaml_or_json
|
||||
get_type_for_model, parse_yaml_or_json, getattr_dne
|
||||
)
|
||||
from awx.main.utils import polymorphic
|
||||
from awx.main.constants import ACTIVE_STATES, CAN_CANCEL
|
||||
@ -507,7 +507,8 @@ class StdoutMaxBytesExceeded(Exception):
|
||||
self.supported = supported
|
||||
|
||||
|
||||
class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique, UnifiedJobTypeStringMixin, TaskManagerUnifiedJobMixin):
|
||||
class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique,
|
||||
UnifiedJobTypeStringMixin, TaskManagerUnifiedJobMixin):
|
||||
'''
|
||||
Concrete base class for unified job run by the task engine.
|
||||
'''
|
||||
@ -571,6 +572,12 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
editable=False,
|
||||
help_text=_("The node the job executed on."),
|
||||
)
|
||||
controller_node = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
editable=False,
|
||||
help_text=_("The instance that managed the isolated execution environment."),
|
||||
)
|
||||
notifications = models.ManyToManyField(
|
||||
'Notification',
|
||||
editable=False,
|
||||
@ -814,7 +821,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
# Done.
|
||||
return result
|
||||
|
||||
def copy_unified_job(self, limit=None):
|
||||
def copy_unified_job(self, _eager_fields=None, **new_prompts):
|
||||
'''
|
||||
Returns saved object, including related fields.
|
||||
Create a copy of this unified job for the purpose of relaunch
|
||||
@ -824,12 +831,14 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
parent_field_name = unified_job_class._get_parent_field_name()
|
||||
fields = unified_jt_class._get_unified_job_field_names() | set([parent_field_name])
|
||||
|
||||
create_data = {"launch_type": "relaunch"}
|
||||
if limit:
|
||||
create_data["limit"] = limit
|
||||
create_data = {}
|
||||
if _eager_fields:
|
||||
create_data = _eager_fields.copy()
|
||||
create_data["launch_type"] = "relaunch"
|
||||
|
||||
prompts = self.launch_prompts()
|
||||
if self.unified_job_template and prompts:
|
||||
if self.unified_job_template and (prompts is not None):
|
||||
prompts.update(new_prompts)
|
||||
prompts['_eager_fields'] = create_data
|
||||
unified_job = self.unified_job_template.create_unified_job(**prompts)
|
||||
else:
|
||||
@ -1226,17 +1235,8 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
raise RuntimeError("Expected celery_task_id to be set on model.")
|
||||
kwargs['task_id'] = self.celery_task_id
|
||||
task_class = self._get_task_class()
|
||||
from awx.main.models.ha import InstanceGroup
|
||||
ig = InstanceGroup.objects.get(name=queue)
|
||||
args = [self.pk]
|
||||
if ig.controller_id:
|
||||
if self.supports_isolation(): # case of jobs and ad hoc commands
|
||||
isolated_instance = ig.instances.order_by('-capacity').first()
|
||||
args.append(isolated_instance.hostname)
|
||||
else: # proj & inv updates, system jobs run on controller
|
||||
queue = ig.controller.name
|
||||
kwargs['queue'] = queue
|
||||
task_class().apply_async(args, opts, **kwargs)
|
||||
task_class().apply_async([self.pk], opts, **kwargs)
|
||||
|
||||
def start(self, error_callback, success_callback, **kwargs):
|
||||
'''
|
||||
@ -1376,25 +1376,34 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
for name in ('awx', 'tower'):
|
||||
r['{}_job_id'.format(name)] = self.pk
|
||||
r['{}_job_launch_type'.format(name)] = self.launch_type
|
||||
if self.created_by:
|
||||
for name in ('awx', 'tower'):
|
||||
r['{}_user_id'.format(name)] = self.created_by.pk
|
||||
r['{}_user_name'.format(name)] = self.created_by.username
|
||||
r['{}_user_email'.format(name)] = self.created_by.email
|
||||
r['{}_user_first_name'.format(name)] = self.created_by.first_name
|
||||
r['{}_user_last_name'.format(name)] = self.created_by.last_name
|
||||
else:
|
||||
|
||||
created_by = getattr_dne(self, 'created_by')
|
||||
|
||||
if not created_by:
|
||||
wj = self.get_workflow_job()
|
||||
if wj:
|
||||
for name in ('awx', 'tower'):
|
||||
r['{}_workflow_job_id'.format(name)] = wj.pk
|
||||
r['{}_workflow_job_name'.format(name)] = wj.name
|
||||
if wj.created_by:
|
||||
for name in ('awx', 'tower'):
|
||||
r['{}_user_id'.format(name)] = wj.created_by.pk
|
||||
r['{}_user_name'.format(name)] = wj.created_by.username
|
||||
if self.schedule:
|
||||
created_by = getattr_dne(wj, 'created_by')
|
||||
|
||||
schedule = getattr_dne(self, 'schedule')
|
||||
if schedule:
|
||||
for name in ('awx', 'tower'):
|
||||
r['{}_schedule_id'.format(name)] = self.schedule.pk
|
||||
r['{}_schedule_name'.format(name)] = self.schedule.name
|
||||
r['{}_schedule_id'.format(name)] = schedule.pk
|
||||
r['{}_schedule_name'.format(name)] = schedule.name
|
||||
|
||||
if created_by:
|
||||
for name in ('awx', 'tower'):
|
||||
r['{}_user_id'.format(name)] = created_by.pk
|
||||
r['{}_user_name'.format(name)] = created_by.username
|
||||
r['{}_user_email'.format(name)] = created_by.email
|
||||
r['{}_user_first_name'.format(name)] = created_by.first_name
|
||||
r['{}_user_last_name'.format(name)] = created_by.last_name
|
||||
return r
|
||||
|
||||
def get_celery_queue_name(self):
|
||||
return self.controller_node or self.execution_node or settings.CELERY_DEFAULT_QUEUE
|
||||
|
||||
def is_isolated(self):
|
||||
return bool(self.controller_node)
|
||||
|
||||
@ -288,7 +288,8 @@ class WorkflowJobOptions(BaseModel):
|
||||
|
||||
def create_relaunch_workflow_job(self):
|
||||
new_workflow_job = self.copy_unified_job()
|
||||
new_workflow_job.copy_nodes_from_original(original=self)
|
||||
if self.workflow_job_template is None:
|
||||
new_workflow_job.copy_nodes_from_original(original=self)
|
||||
return new_workflow_job
|
||||
|
||||
|
||||
@ -370,23 +371,28 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
return workflow_job
|
||||
|
||||
def _accept_or_ignore_job_kwargs(self, _exclude_errors=(), **kwargs):
|
||||
prompted_fields = {}
|
||||
rejected_fields = {}
|
||||
accepted_vars, rejected_vars, errors_dict = self.accept_or_ignore_variables(kwargs.get('extra_vars', {}))
|
||||
exclude_errors = kwargs.pop('_exclude_errors', [])
|
||||
prompted_data = {}
|
||||
rejected_data = {}
|
||||
accepted_vars, rejected_vars, errors_dict = self.accept_or_ignore_variables(
|
||||
kwargs.get('extra_vars', {}),
|
||||
_exclude_errors=exclude_errors,
|
||||
extra_passwords=kwargs.get('survey_passwords', {}))
|
||||
if accepted_vars:
|
||||
prompted_fields['extra_vars'] = accepted_vars
|
||||
prompted_data['extra_vars'] = accepted_vars
|
||||
if rejected_vars:
|
||||
rejected_fields['extra_vars'] = rejected_vars
|
||||
rejected_data['extra_vars'] = rejected_vars
|
||||
|
||||
# WFJTs do not behave like JTs, it can not accept inventory, credential, etc.
|
||||
bad_kwargs = kwargs.copy()
|
||||
bad_kwargs.pop('extra_vars', None)
|
||||
bad_kwargs.pop('survey_passwords', None)
|
||||
if bad_kwargs:
|
||||
rejected_fields.update(bad_kwargs)
|
||||
rejected_data.update(bad_kwargs)
|
||||
for field in bad_kwargs:
|
||||
errors_dict[field] = _('Field is not allowed for use in workflows.')
|
||||
|
||||
return prompted_fields, rejected_fields, errors_dict
|
||||
return prompted_data, rejected_data, errors_dict
|
||||
|
||||
def can_start_without_user_input(self):
|
||||
return not bool(self.variables_needed_to_start)
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
from channels.routing import route
|
||||
from awx.network_ui.routing import channel_routing as network_ui_routing
|
||||
|
||||
|
||||
channel_routing = [
|
||||
@ -7,6 +6,3 @@ channel_routing = [
|
||||
route("websocket.disconnect", "awx.main.consumers.ws_disconnect", path=r'^/websocket/$'),
|
||||
route("websocket.receive", "awx.main.consumers.ws_receive", path=r'^/websocket/$'),
|
||||
]
|
||||
|
||||
|
||||
channel_routing += network_ui_routing
|
||||
|
||||
@ -69,6 +69,7 @@ class WorkflowDAG(SimpleDAG):
|
||||
job = obj.job
|
||||
|
||||
if obj.unified_job_template is None:
|
||||
is_failed = True
|
||||
continue
|
||||
elif not job:
|
||||
return False, False
|
||||
|
||||
@ -7,6 +7,7 @@ import logging
|
||||
import uuid
|
||||
import json
|
||||
import six
|
||||
import random
|
||||
from sets import Set
|
||||
|
||||
# Django
|
||||
@ -234,15 +235,26 @@ class TaskManager():
|
||||
def get_dependent_jobs_for_inv_and_proj_update(self, job_obj):
|
||||
return [{'type': j.model_to_str(), 'id': j.id} for j in job_obj.dependent_jobs.all()]
|
||||
|
||||
def start_task(self, task, rampart_group, dependent_tasks=[]):
|
||||
def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
|
||||
from awx.main.tasks import handle_work_error, handle_work_success
|
||||
|
||||
dependent_tasks = dependent_tasks or []
|
||||
|
||||
task_actual = {
|
||||
'type': get_type_for_model(type(task)),
|
||||
'id': task.id,
|
||||
}
|
||||
dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]
|
||||
|
||||
controller_node = None
|
||||
if task.supports_isolation() and rampart_group.controller_id:
|
||||
try:
|
||||
controller_node = rampart_group.choose_online_controller_node()
|
||||
except IndexError:
|
||||
logger.debug(six.text_type("No controllers available in group {} to run {}").format(
|
||||
rampart_group.name, task.log_format))
|
||||
return
|
||||
|
||||
error_handler = handle_work_error.s(subtasks=[task_actual] + dependencies)
|
||||
success_handler = handle_work_success.s(task_actual=task_actual)
|
||||
|
||||
@ -263,11 +275,21 @@ class TaskManager():
|
||||
elif not task.supports_isolation() and rampart_group.controller_id:
|
||||
# non-Ansible jobs on isolated instances run on controller
|
||||
task.instance_group = rampart_group.controller
|
||||
logger.info('Submitting isolated %s to queue %s via %s.',
|
||||
task.log_format, task.instance_group_id, rampart_group.controller_id)
|
||||
task.execution_node = random.choice(list(rampart_group.controller.instances.all().values_list('hostname', flat=True)))
|
||||
logger.info(six.text_type('Submitting isolated {} to queue {}.').format(
|
||||
task.log_format, task.instance_group.name, task.execution_node))
|
||||
elif controller_node:
|
||||
task.instance_group = rampart_group
|
||||
task.execution_node = instance.hostname
|
||||
task.controller_node = controller_node
|
||||
logger.info(six.text_type('Submitting isolated {} to queue {} controlled by {}.').format(
|
||||
task.log_format, task.execution_node, controller_node))
|
||||
else:
|
||||
task.instance_group = rampart_group
|
||||
logger.info('Submitting %s to instance group %s.', task.log_format, task.instance_group_id)
|
||||
if instance is not None:
|
||||
task.execution_node = instance.hostname
|
||||
logger.info(six.text_type('Submitting {} to <instance group, instance> <{},{}>.').format(
|
||||
task.log_format, task.instance_group_id, task.execution_node))
|
||||
with disable_activity_stream():
|
||||
task.celery_task_id = str(uuid.uuid4())
|
||||
task.save()
|
||||
@ -278,11 +300,10 @@ class TaskManager():
|
||||
def post_commit():
|
||||
task.websocket_emit_status(task.status)
|
||||
if task.status != 'failed':
|
||||
if rampart_group is not None:
|
||||
actual_queue=rampart_group.name
|
||||
else:
|
||||
actual_queue=settings.CELERY_DEFAULT_QUEUE
|
||||
task.start_celery_task(opts, error_callback=error_handler, success_callback=success_handler, queue=actual_queue)
|
||||
task.start_celery_task(opts,
|
||||
error_callback=error_handler,
|
||||
success_callback=success_handler,
|
||||
queue=task.get_celery_queue_name())
|
||||
|
||||
connection.on_commit(post_commit)
|
||||
|
||||
@ -431,21 +452,37 @@ class TaskManager():
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
idle_instance_that_fits = None
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if idle_instance_that_fits is None:
|
||||
idle_instance_that_fits = rampart_group.find_largest_idle_instance()
|
||||
if self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug(six.text_type("Skipping group {} capacity <= 0").format(rampart_group.name))
|
||||
continue
|
||||
if not self.would_exceed_capacity(task, rampart_group.name):
|
||||
logger.debug(six.text_type("Starting dependent {} in group {}").format(task.log_format, rampart_group.name))
|
||||
|
||||
execution_instance = rampart_group.fit_task_to_most_remaining_capacity_instance(task)
|
||||
if execution_instance:
|
||||
logger.debug(six.text_type("Starting dependent {} in group {} instance {}").format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
elif not execution_instance and idle_instance_that_fits:
|
||||
execution_instance = idle_instance_that_fits
|
||||
logger.debug(six.text_type("Starting dependent {} in group {} on idle instance {}").format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
if execution_instance:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
tasks_to_fail = filter(lambda t: t != task, dependency_tasks)
|
||||
tasks_to_fail += [dependent_task]
|
||||
self.start_task(task, rampart_group, tasks_to_fail)
|
||||
self.start_task(task, rampart_group, tasks_to_fail, execution_instance)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
logger.debug(six.text_type("No instance available in group {} to run job {} w/ capacity requirement {}").format(
|
||||
rampart_group.name, task.log_format, task.task_impact))
|
||||
if not found_acceptable_queue:
|
||||
logger.debug(six.text_type("Dependent {} couldn't be scheduled on graph, waiting for next cycle").format(task.log_format))
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.workflow_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
for task in pending_tasks:
|
||||
self.process_dependencies(task, self.generate_dependencies(task))
|
||||
if self.is_job_blocked(task):
|
||||
@ -453,25 +490,41 @@ class TaskManager():
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
idle_instance_that_fits = None
|
||||
if isinstance(task, WorkflowJob):
|
||||
self.start_task(task, None, task.get_jobs_fail_chain())
|
||||
if task.workflow_job_template_id in running_workflow_templates:
|
||||
if not task.allow_simultaneous:
|
||||
logger.debug(six.text_type("{} is blocked from running, workflow already running").format(task.log_format))
|
||||
continue
|
||||
else:
|
||||
running_workflow_templates.add(task.workflow_job_template_id)
|
||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||
continue
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if idle_instance_that_fits is None:
|
||||
idle_instance_that_fits = rampart_group.find_largest_idle_instance()
|
||||
remaining_capacity = self.get_remaining_capacity(rampart_group.name)
|
||||
if remaining_capacity <= 0:
|
||||
logger.debug(six.text_type("Skipping group {}, remaining_capacity {} <= 0").format(
|
||||
rampart_group.name, remaining_capacity))
|
||||
continue
|
||||
if not self.would_exceed_capacity(task, rampart_group.name):
|
||||
logger.debug(six.text_type("Starting {} in group {} (remaining_capacity={})").format(
|
||||
task.log_format, rampart_group.name, remaining_capacity))
|
||||
|
||||
execution_instance = rampart_group.fit_task_to_most_remaining_capacity_instance(task)
|
||||
if execution_instance:
|
||||
logger.debug(six.text_type("Starting {} in group {} instance {} (remaining_capacity={})").format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname, remaining_capacity))
|
||||
elif not execution_instance and idle_instance_that_fits:
|
||||
execution_instance = idle_instance_that_fits
|
||||
logger.debug(six.text_type("Starting {} in group {} instance {} (remaining_capacity={})").format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname, remaining_capacity))
|
||||
if execution_instance:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
self.start_task(task, rampart_group, task.get_jobs_fail_chain())
|
||||
self.start_task(task, rampart_group, task.get_jobs_fail_chain(), execution_instance)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
logger.debug(six.text_type("Not enough capacity to run {} on {} (remaining_capacity={})").format(
|
||||
task.log_format, rampart_group.name, remaining_capacity))
|
||||
logger.debug(six.text_type("No instance available in group {} to run job {} w/ capacity requirement {}").format(
|
||||
rampart_group.name, task.log_format, task.task_impact))
|
||||
if not found_acceptable_queue:
|
||||
logger.debug(six.text_type("{} couldn't be scheduled on graph, waiting for next cycle").format(task.log_format))
|
||||
|
||||
|
||||
@ -6,11 +6,13 @@ import contextlib
|
||||
import logging
|
||||
import threading
|
||||
import json
|
||||
import pkg_resources
|
||||
import sys
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db.models.signals import (
|
||||
post_init,
|
||||
pre_save,
|
||||
post_save,
|
||||
pre_delete,
|
||||
post_delete,
|
||||
@ -18,6 +20,7 @@ from django.db.models.signals import (
|
||||
)
|
||||
from django.dispatch import receiver
|
||||
from django.contrib.auth import SESSION_KEY
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.utils import timezone
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
@ -29,10 +32,9 @@ import six
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
from django.contrib.sessions.models import Session
|
||||
from awx.api.serializers import * # noqa
|
||||
from awx.main.constants import TOKEN_CENSOR
|
||||
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore
|
||||
from awx.main.constants import CENSOR_VALUE
|
||||
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, get_current_apps
|
||||
from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates
|
||||
from awx.main.tasks import update_inventory_computed_fields
|
||||
from awx.main.fields import (
|
||||
@ -52,6 +54,13 @@ logger = logging.getLogger('awx.main.signals')
|
||||
# when a Host-Group or Group-Group relationship is updated, or when a Job is deleted
|
||||
|
||||
|
||||
def get_activity_stream_class():
|
||||
if 'migrate' in sys.argv:
|
||||
return get_current_apps().get_model('main', 'ActivityStream')
|
||||
else:
|
||||
return ActivityStream
|
||||
|
||||
|
||||
def get_current_user_or_none():
|
||||
u = get_current_user()
|
||||
if not isinstance(u, User):
|
||||
@ -200,14 +209,6 @@ def cleanup_detached_labels_on_deleted_parent(sender, instance, **kwargs):
|
||||
l.delete()
|
||||
|
||||
|
||||
def set_original_organization(sender, instance, **kwargs):
|
||||
'''set_original_organization is used to set the original, or
|
||||
pre-save organization, so we can later determine if the organization
|
||||
field is dirty.
|
||||
'''
|
||||
instance.__original_org_id = instance.organization_id
|
||||
|
||||
|
||||
def save_related_job_templates(sender, instance, **kwargs):
|
||||
'''save_related_job_templates loops through all of the
|
||||
job templates that use an Inventory or Project that have had their
|
||||
@ -217,7 +218,7 @@ def save_related_job_templates(sender, instance, **kwargs):
|
||||
if sender not in (Project, Inventory):
|
||||
raise ValueError('This signal callback is only intended for use with Project or Inventory')
|
||||
|
||||
if instance.__original_org_id != instance.organization_id:
|
||||
if instance._prior_values_store.get('organization_id') != instance.organization_id:
|
||||
jtq = JobTemplate.objects.filter(**{sender.__name__.lower(): instance})
|
||||
for jt in jtq:
|
||||
update_role_parentage_for_instance(jt)
|
||||
@ -240,8 +241,6 @@ def connect_computed_field_signals():
|
||||
|
||||
connect_computed_field_signals()
|
||||
|
||||
post_init.connect(set_original_organization, sender=Project)
|
||||
post_init.connect(set_original_organization, sender=Inventory)
|
||||
post_save.connect(save_related_job_templates, sender=Project)
|
||||
post_save.connect(save_related_job_templates, sender=Inventory)
|
||||
post_save.connect(emit_job_event_detail, sender=JobEvent)
|
||||
@ -391,6 +390,7 @@ model_serializer_mapping = {
|
||||
Inventory: InventorySerializer,
|
||||
Host: HostSerializer,
|
||||
Group: GroupSerializer,
|
||||
InstanceGroup: InstanceGroupSerializer,
|
||||
InventorySource: InventorySourceSerializer,
|
||||
CustomInventoryScript: CustomInventoryScriptSerializer,
|
||||
Credential: CredentialSerializer,
|
||||
@ -428,8 +428,8 @@ def activity_stream_create(sender, instance, created, **kwargs):
|
||||
if 'extra_vars' in changes:
|
||||
changes['extra_vars'] = instance.display_extra_vars()
|
||||
if type(instance) == OAuth2AccessToken:
|
||||
changes['token'] = TOKEN_CENSOR
|
||||
activity_entry = ActivityStream(
|
||||
changes['token'] = CENSOR_VALUE
|
||||
activity_entry = get_activity_stream_class()(
|
||||
operation='create',
|
||||
object1=object1,
|
||||
changes=json.dumps(changes),
|
||||
@ -439,7 +439,7 @@ def activity_stream_create(sender, instance, created, **kwargs):
|
||||
# we don't really use them anyway.
|
||||
if instance._meta.model_name != 'setting': # Is not conf.Setting instance
|
||||
activity_entry.save()
|
||||
getattr(activity_entry, object1).add(instance)
|
||||
getattr(activity_entry, object1).add(instance.pk)
|
||||
else:
|
||||
activity_entry.setting = conf_to_dict(instance)
|
||||
activity_entry.save()
|
||||
@ -463,14 +463,14 @@ def activity_stream_update(sender, instance, **kwargs):
|
||||
if getattr(_type, '_deferred', False):
|
||||
return
|
||||
object1 = camelcase_to_underscore(instance.__class__.__name__)
|
||||
activity_entry = ActivityStream(
|
||||
activity_entry = get_activity_stream_class()(
|
||||
operation='update',
|
||||
object1=object1,
|
||||
changes=json.dumps(changes),
|
||||
actor=get_current_user_or_none())
|
||||
if instance._meta.model_name != 'setting': # Is not conf.Setting instance
|
||||
activity_entry.save()
|
||||
getattr(activity_entry, object1).add(instance)
|
||||
getattr(activity_entry, object1).add(instance.pk)
|
||||
else:
|
||||
activity_entry.setting = conf_to_dict(instance)
|
||||
activity_entry.save()
|
||||
@ -495,8 +495,8 @@ def activity_stream_delete(sender, instance, **kwargs):
|
||||
changes = model_to_dict(instance)
|
||||
object1 = camelcase_to_underscore(instance.__class__.__name__)
|
||||
if type(instance) == OAuth2AccessToken:
|
||||
changes['token'] = TOKEN_CENSOR
|
||||
activity_entry = ActivityStream(
|
||||
changes['token'] = CENSOR_VALUE
|
||||
activity_entry = get_activity_stream_class()(
|
||||
operation='delete',
|
||||
changes=json.dumps(changes),
|
||||
object1=object1,
|
||||
@ -543,7 +543,7 @@ def activity_stream_associate(sender, instance, **kwargs):
|
||||
continue
|
||||
if isinstance(obj1, SystemJob) or isinstance(obj2_actual, SystemJob):
|
||||
continue
|
||||
activity_entry = ActivityStream(
|
||||
activity_entry = get_activity_stream_class()(
|
||||
changes=json.dumps(dict(object1=object1,
|
||||
object1_pk=obj1.pk,
|
||||
object2=object2,
|
||||
@ -556,8 +556,8 @@ def activity_stream_associate(sender, instance, **kwargs):
|
||||
object_relationship_type=obj_rel,
|
||||
actor=get_current_user_or_none())
|
||||
activity_entry.save()
|
||||
getattr(activity_entry, object1).add(obj1)
|
||||
getattr(activity_entry, object2).add(obj2_actual)
|
||||
getattr(activity_entry, object1).add(obj1.pk)
|
||||
getattr(activity_entry, object2).add(obj2_actual.pk)
|
||||
|
||||
# Record the role for RBAC changes
|
||||
if 'role' in kwargs:
|
||||
@ -603,6 +603,16 @@ def delete_inventory_for_org(sender, instance, **kwargs):
|
||||
@receiver(post_save, sender=Session)
|
||||
def save_user_session_membership(sender, **kwargs):
|
||||
session = kwargs.get('instance', None)
|
||||
if pkg_resources.get_distribution('channels').version >= '2':
|
||||
# If you get into this code block, it means we upgraded channels, but
|
||||
# didn't make the settings.SESSIONS_PER_USER feature work
|
||||
raise RuntimeError(
|
||||
'save_user_session_membership must be updated for channels>=2: '
|
||||
'http://channels.readthedocs.io/en/latest/one-to-two.html#requirements'
|
||||
)
|
||||
if 'runworker' in sys.argv:
|
||||
# don't track user session membership for websocket per-channel sessions
|
||||
return
|
||||
if not session:
|
||||
return
|
||||
user = session.get_decoded().get(SESSION_KEY, None)
|
||||
@ -611,13 +621,15 @@ def save_user_session_membership(sender, **kwargs):
|
||||
user = User.objects.get(pk=user)
|
||||
if UserSessionMembership.objects.filter(user=user, session=session).exists():
|
||||
return
|
||||
UserSessionMembership.objects.create(user=user, session=session, created=timezone.now())
|
||||
for membership in UserSessionMembership.get_memberships_over_limit(user):
|
||||
UserSessionMembership(user=user, session=session, created=timezone.now()).save()
|
||||
expired = UserSessionMembership.get_memberships_over_limit(user)
|
||||
for membership in expired:
|
||||
Session.objects.filter(session_key__in=[membership.session_id]).delete()
|
||||
membership.delete()
|
||||
if len(expired):
|
||||
consumers.emit_channel_notification(
|
||||
'control-limit_reached',
|
||||
dict(group_name='control',
|
||||
reason=unicode(_('limit_reached')),
|
||||
session_key=membership.session.session_key)
|
||||
'control-limit_reached_{}'.format(user.pk),
|
||||
dict(group_name='control', reason=unicode(_('limit_reached')))
|
||||
)
|
||||
|
||||
|
||||
@ -631,3 +643,7 @@ def create_access_token_user_if_missing(sender, **kwargs):
|
||||
post_save.connect(create_access_token_user_if_missing, sender=OAuth2AccessToken)
|
||||
|
||||
|
||||
# Connect the Instance Group to Activity Stream receivers.
|
||||
post_save.connect(activity_stream_create, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_create")
|
||||
pre_save.connect(activity_stream_update, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_update")
|
||||
pre_delete.connect(activity_stream_delete, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_delete")
|
||||
|
||||
@ -1,65 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import base64
|
||||
from cStringIO import StringIO
|
||||
|
||||
from django.core import files
|
||||
from django.core.files.storage import Storage
|
||||
|
||||
|
||||
class DatabaseStorage(Storage):
|
||||
"""A class for storing uploaded files into the database, rather than
|
||||
on the filesystem.
|
||||
"""
|
||||
def __init__(self, model):
|
||||
self.model = model
|
||||
|
||||
def _open(self, name, mode='rb'):
|
||||
try:
|
||||
f = self.model.objects.get(filename=name)
|
||||
except self.model.DoesNotExist:
|
||||
return None
|
||||
fh = StringIO(base64.b64decode(f.contents))
|
||||
fh.name = name
|
||||
fh.mode = mode
|
||||
fh.size = f.size
|
||||
return files.File(fh)
|
||||
|
||||
def _save(self, name, content):
|
||||
try:
|
||||
file_ = self.model.objects.get(filename=name)
|
||||
except self.model.DoesNotExist:
|
||||
file_ = self.model(filename=name)
|
||||
file_.contents = base64.b64encode(content.read())
|
||||
file_.save()
|
||||
return name
|
||||
|
||||
def exists(self, name):
|
||||
"""Return True if the given file already exists in the database,
|
||||
or False otherwise.
|
||||
"""
|
||||
return bool(self.model.objects.filter(filename=name).count())
|
||||
|
||||
def delete(self, name):
|
||||
"""Delete the file in the database, failing silently if the file
|
||||
does not exist.
|
||||
"""
|
||||
self.model.objects.filter(filename=name).delete()
|
||||
|
||||
def listdir(self, path=None):
|
||||
"""Return a full list of files stored in the database, ignoring
|
||||
whatever may be sent to the `path` argument.
|
||||
"""
|
||||
filenames = [i.filename for i in self.model.order_by('filename')]
|
||||
return ([], filenames)
|
||||
|
||||
def url(self, name):
|
||||
raise NotImplementedError
|
||||
|
||||
def size(self, name):
|
||||
"""Return the size of the given file, if it exists; raise DoesNotExist
|
||||
if the file is not present.
|
||||
"""
|
||||
file_ = self.model.objects.get(filename=name)
|
||||
return len(file_.contents)
|
||||
@ -5,6 +5,7 @@
|
||||
from collections import OrderedDict, namedtuple
|
||||
import ConfigParser
|
||||
import cStringIO
|
||||
import errno
|
||||
import functools
|
||||
import importlib
|
||||
import json
|
||||
@ -28,8 +29,10 @@ except Exception:
|
||||
psutil = None
|
||||
|
||||
# Celery
|
||||
from celery import Task, shared_task, Celery
|
||||
from celery.signals import celeryd_init, worker_shutdown, worker_ready, celeryd_after_setup
|
||||
from kombu import Queue, Exchange
|
||||
from kombu.common import Broadcast
|
||||
from celery import Task, shared_task
|
||||
from celery.signals import celeryd_init, worker_shutdown, celeryd_after_setup
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@ -48,7 +51,7 @@ from crum import impersonate
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
@ -62,7 +65,6 @@ from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field,
|
||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils.ha import register_celery_worker_queues
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.conf import settings_registry
|
||||
|
||||
@ -106,8 +108,6 @@ def log_celery_failure(self, exc, task_id, args, kwargs, einfo):
|
||||
|
||||
@celeryd_init.connect
|
||||
def celery_startup(conf=None, **kwargs):
|
||||
# Re-init all schedules
|
||||
# NOTE: Rework this during the Rampart work
|
||||
startup_logger = logging.getLogger('awx.main.tasks')
|
||||
startup_logger.info("Syncing Schedules")
|
||||
for sch in Schedule.objects.all():
|
||||
@ -119,6 +119,19 @@ def celery_startup(conf=None, **kwargs):
|
||||
except Exception:
|
||||
logger.exception(six.text_type("Failed to rebuild schedule {}.").format(sch))
|
||||
|
||||
# set the queues we want to bind to dynamically at startup
|
||||
queues = []
|
||||
me = Instance.objects.me()
|
||||
for q in [me.hostname] + settings.AWX_CELERY_QUEUES_STATIC:
|
||||
q = q.encode('utf-8')
|
||||
queues.append(Queue(q, Exchange(q), routing_key=q))
|
||||
for q in settings.AWX_CELERY_BCAST_QUEUES_STATIC:
|
||||
queues.append(Broadcast(q.encode('utf-8')))
|
||||
conf.CELERY_QUEUES = list(set(queues))
|
||||
|
||||
# Expedite the first hearbeat run so a node comes online quickly.
|
||||
cluster_node_heartbeat.apply([])
|
||||
|
||||
|
||||
@worker_shutdown.connect
|
||||
def inform_cluster_of_shutdown(*args, **kwargs):
|
||||
@ -135,52 +148,76 @@ def inform_cluster_of_shutdown(*args, **kwargs):
|
||||
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
def apply_cluster_membership_policies(self):
|
||||
with advisory_lock('cluster_policy_lock', wait=True):
|
||||
considered_instances = Instance.objects.all().order_by('id')
|
||||
total_instances = considered_instances.count()
|
||||
filtered_instances = []
|
||||
all_instances = list(Instance.objects.order_by('id'))
|
||||
all_groups = list(InstanceGroup.objects.all())
|
||||
iso_hostnames = set([])
|
||||
for ig in all_groups:
|
||||
if ig.controller_id is not None:
|
||||
iso_hostnames.update(ig.policy_instance_list)
|
||||
|
||||
considered_instances = [inst for inst in all_instances if inst.hostname not in iso_hostnames]
|
||||
total_instances = len(considered_instances)
|
||||
actual_groups = []
|
||||
actual_instances = []
|
||||
Group = namedtuple('Group', ['obj', 'instances'])
|
||||
Node = namedtuple('Instance', ['obj', 'groups'])
|
||||
|
||||
# Process policy instance list first, these will represent manually managed instances
|
||||
# that will not go through automatic policy determination
|
||||
for ig in InstanceGroup.objects.all():
|
||||
logger.info(six.text_type("Applying cluster membership policies to Group {}").format(ig.name))
|
||||
ig.instances.clear()
|
||||
# Process policy instance list first, these will represent manually managed memberships
|
||||
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
|
||||
for ig in all_groups:
|
||||
group_actual = Group(obj=ig, instances=[])
|
||||
for i in ig.policy_instance_list:
|
||||
inst = Instance.objects.filter(hostname=i)
|
||||
if not inst.exists():
|
||||
for hostname in ig.policy_instance_list:
|
||||
if hostname not in instance_hostnames_map:
|
||||
continue
|
||||
inst = inst[0]
|
||||
inst = instance_hostnames_map[hostname]
|
||||
logger.info(six.text_type("Policy List, adding Instance {} to Group {}").format(inst.hostname, ig.name))
|
||||
group_actual.instances.append(inst.id)
|
||||
ig.instances.add(inst)
|
||||
filtered_instances.append(inst)
|
||||
actual_groups.append(group_actual)
|
||||
# NOTE: arguable behavior: policy-list-group is not added to
|
||||
# instance's group count for consideration in minimum-policy rules
|
||||
|
||||
if ig.controller_id is None:
|
||||
actual_groups.append(group_actual)
|
||||
else:
|
||||
# For isolated groups, _only_ apply the policy_instance_list
|
||||
# do not add to in-memory list, so minimum rules not applied
|
||||
logger.info('Committing instances {} to isolated group {}'.format(group_actual.instances, ig.name))
|
||||
ig.instances.set(group_actual.instances)
|
||||
|
||||
# Process Instance minimum policies next, since it represents a concrete lower bound to the
|
||||
# number of instances to make available to instance groups
|
||||
actual_instances = [Node(obj=i, groups=[]) for i in filter(lambda x: x not in filtered_instances, considered_instances)]
|
||||
logger.info("Total instances not directly associated: {}".format(total_instances))
|
||||
actual_instances = [Node(obj=i, groups=[]) for i in considered_instances if i.managed_by_policy]
|
||||
logger.info("Total non-isolated instances:{} available for policy: {}".format(
|
||||
total_instances, len(actual_instances)))
|
||||
for g in sorted(actual_groups, cmp=lambda x,y: len(x.instances) - len(y.instances)):
|
||||
for i in sorted(actual_instances, cmp=lambda x,y: len(x.groups) - len(y.groups)):
|
||||
if len(g.instances) >= g.obj.policy_instance_minimum:
|
||||
break
|
||||
if i.obj.id in g.instances:
|
||||
# If the instance is already _in_ the group, it was
|
||||
# applied earlier via the policy list
|
||||
continue
|
||||
logger.info(six.text_type("Policy minimum, adding Instance {} to Group {}").format(i.obj.hostname, g.obj.name))
|
||||
g.obj.instances.add(i.obj)
|
||||
g.instances.append(i.obj.id)
|
||||
i.groups.append(g.obj.id)
|
||||
# Finally process instance policy percentages
|
||||
|
||||
# Finally, process instance policy percentages
|
||||
for g in sorted(actual_groups, cmp=lambda x,y: len(x.instances) - len(y.instances)):
|
||||
for i in sorted(actual_instances, cmp=lambda x,y: len(x.groups) - len(y.groups)):
|
||||
if i.obj.id in g.instances:
|
||||
# If the instance is already _in_ the group, it was
|
||||
# applied earlier via a minimum policy or policy list
|
||||
continue
|
||||
if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage:
|
||||
break
|
||||
logger.info(six.text_type("Policy percentage, adding Instance {} to Group {}").format(i.obj.hostname, g.obj.name))
|
||||
g.instances.append(i.obj.id)
|
||||
g.obj.instances.add(i.obj)
|
||||
i.groups.append(g.obj.id)
|
||||
handle_ha_toplogy_changes.apply([])
|
||||
|
||||
# On a differential basis, apply instances to non-isolated groups
|
||||
with transaction.atomic():
|
||||
for g in actual_groups:
|
||||
logger.info('Committing instances {} to group {}'.format(g.instances, g.obj.name))
|
||||
g.obj.instances.set(g.instances)
|
||||
|
||||
|
||||
@shared_task(exchange='tower_broadcast_all', bind=True)
|
||||
@ -196,40 +233,32 @@ def handle_setting_changes(self, setting_keys):
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
|
||||
@shared_task(bind=True, exchange='tower_broadcast_all')
|
||||
def handle_ha_toplogy_changes(self):
|
||||
(changed, instance) = Instance.objects.get_or_register()
|
||||
if changed:
|
||||
logger.info(six.text_type("Registered tower node '{}'").format(instance.hostname))
|
||||
logger.debug(six.text_type("Reconfigure celeryd queues task on host {}").format(self.request.hostname))
|
||||
awx_app = Celery('awx')
|
||||
awx_app.config_from_object('django.conf:settings')
|
||||
instances, removed_queues, added_queues = register_celery_worker_queues(awx_app, self.request.hostname)
|
||||
if len(removed_queues) + len(added_queues) > 0:
|
||||
logger.info(six.text_type("Workers on tower node(s) '{}' removed from queues {} and added to queues {}")
|
||||
.format([i.hostname for i in instances], removed_queues, added_queues))
|
||||
|
||||
|
||||
@worker_ready.connect
|
||||
def handle_ha_toplogy_worker_ready(sender, **kwargs):
|
||||
logger.debug(six.text_type("Configure celeryd queues task on host {}").format(sender.hostname))
|
||||
instances, removed_queues, added_queues = register_celery_worker_queues(sender.app, sender.hostname)
|
||||
if len(removed_queues) + len(added_queues) > 0:
|
||||
logger.info(six.text_type("Workers on tower node(s) '{}' removed from queues {} and added to queues {}")
|
||||
.format([i.hostname for i in instances], removed_queues, added_queues))
|
||||
|
||||
# Expedite the first hearbeat run so a node comes online quickly.
|
||||
cluster_node_heartbeat.apply([])
|
||||
apply_cluster_membership_policies.apply([])
|
||||
|
||||
|
||||
@celeryd_after_setup.connect
|
||||
def handle_update_celery_hostname(sender, instance, **kwargs):
|
||||
def auto_register_ha_instance(sender, instance, **kwargs):
|
||||
#
|
||||
# When celeryd starts, if the instance cannot be found in the database,
|
||||
# automatically register it. This is mostly useful for openshift-based
|
||||
# deployments where:
|
||||
#
|
||||
# 2 Instances come online
|
||||
# Instance B encounters a network blip, Instance A notices, and
|
||||
# deprovisions it
|
||||
# Instance B's connectivity is restored, celeryd starts, and it
|
||||
# re-registers itself
|
||||
#
|
||||
# In traditional container-less deployments, instances don't get
|
||||
# deprovisioned when they miss their heartbeat, so this code is mostly a
|
||||
# no-op.
|
||||
#
|
||||
if instance.hostname != 'celery@{}'.format(settings.CLUSTER_HOST_ID):
|
||||
error = six.text_type('celery -n {} does not match settings.CLUSTER_HOST_ID={}').format(
|
||||
instance.hostname, settings.CLUSTER_HOST_ID
|
||||
)
|
||||
logger.error(error)
|
||||
raise RuntimeError(error)
|
||||
(changed, tower_instance) = Instance.objects.get_or_register()
|
||||
if changed:
|
||||
logger.info(six.text_type("Registered tower node '{}'").format(tower_instance.hostname))
|
||||
instance.hostname = 'celery@{}'.format(tower_instance.hostname)
|
||||
logger.warn(six.text_type("Set hostname to {}").format(instance.hostname))
|
||||
|
||||
|
||||
@shared_task(queue=settings.CELERY_DEFAULT_QUEUE)
|
||||
@ -317,11 +346,9 @@ def cluster_node_heartbeat(self):
|
||||
logger.warning(six.text_type('Rejoining the cluster as instance {}.').format(this_inst.hostname))
|
||||
if this_inst.enabled:
|
||||
this_inst.refresh_capacity()
|
||||
handle_ha_toplogy_changes.apply_async()
|
||||
elif this_inst.capacity != 0 and not this_inst.enabled:
|
||||
this_inst.capacity = 0
|
||||
this_inst.save(update_fields=['capacity'])
|
||||
handle_ha_toplogy_changes.apply_async()
|
||||
if startup_event:
|
||||
return
|
||||
else:
|
||||
@ -375,7 +402,11 @@ def awx_isolated_heartbeat(self):
|
||||
accept_before = nowtime - timedelta(seconds=(poll_interval - 10))
|
||||
isolated_instance_qs = Instance.objects.filter(
|
||||
rampart_groups__controller__instances__hostname=local_hostname,
|
||||
)
|
||||
isolated_instance_qs = isolated_instance_qs.filter(
|
||||
last_isolated_check__lt=accept_before
|
||||
) | isolated_instance_qs.filter(
|
||||
last_isolated_check=None
|
||||
)
|
||||
# Fast pass of isolated instances, claiming the nodes to update
|
||||
with transaction.atomic():
|
||||
@ -418,6 +449,8 @@ def awx_periodic_scheduler(self):
|
||||
try:
|
||||
job_kwargs = schedule.get_job_kwargs()
|
||||
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
|
||||
logger.info(six.text_type('Spawned {} from schedule {}-{}.').format(
|
||||
new_unified_job.log_format, schedule.name, schedule.pk))
|
||||
|
||||
if invalid_license:
|
||||
new_unified_job.status = 'failed'
|
||||
@ -860,14 +893,11 @@ class BaseTask(Task):
|
||||
'''
|
||||
|
||||
@with_path_cleanup
|
||||
def run(self, pk, isolated_host=None, **kwargs):
|
||||
def run(self, pk, **kwargs):
|
||||
'''
|
||||
Run the job/task and capture its output.
|
||||
'''
|
||||
execution_node = settings.CLUSTER_HOST_ID
|
||||
if isolated_host is not None:
|
||||
execution_node = isolated_host
|
||||
instance = self.update_model(pk, status='running', execution_node=execution_node,
|
||||
instance = self.update_model(pk, status='running',
|
||||
start_args='') # blank field to remove encrypted passwords
|
||||
|
||||
instance.websocket_emit_status("running")
|
||||
@ -876,8 +906,9 @@ class BaseTask(Task):
|
||||
extra_update_fields = {}
|
||||
event_ct = 0
|
||||
stdout_handle = None
|
||||
|
||||
try:
|
||||
kwargs['isolated'] = isolated_host is not None
|
||||
kwargs['isolated'] = instance.is_isolated()
|
||||
self.pre_run_hook(instance, **kwargs)
|
||||
if instance.cancel_flag:
|
||||
instance = self.update_model(instance.pk, status='canceled')
|
||||
@ -937,7 +968,7 @@ class BaseTask(Task):
|
||||
credential, env, safe_env, args, safe_args, kwargs['private_data_dir']
|
||||
)
|
||||
|
||||
if isolated_host is None:
|
||||
if instance.is_isolated() is False:
|
||||
stdout_handle = self.get_stdout_handle(instance)
|
||||
else:
|
||||
stdout_handle = isolated_manager.IsolatedManager.get_stdout_handle(
|
||||
@ -953,7 +984,7 @@ class BaseTask(Task):
|
||||
ssh_key_path = self.get_ssh_key_path(instance, **kwargs)
|
||||
# If we're executing on an isolated host, don't bother adding the
|
||||
# key to the agent in this environment
|
||||
if ssh_key_path and isolated_host is None:
|
||||
if ssh_key_path and instance.is_isolated() is False:
|
||||
ssh_auth_sock = os.path.join(kwargs['private_data_dir'], 'ssh_auth.sock')
|
||||
args = run.wrap_args_with_ssh_agent(args, ssh_key_path, ssh_auth_sock)
|
||||
safe_args = run.wrap_args_with_ssh_agent(safe_args, ssh_key_path, ssh_auth_sock)
|
||||
@ -973,11 +1004,11 @@ class BaseTask(Task):
|
||||
proot_cmd=getattr(settings, 'AWX_PROOT_CMD', 'bwrap'),
|
||||
)
|
||||
instance = self.update_model(instance.pk, output_replacements=output_replacements)
|
||||
if isolated_host:
|
||||
if instance.is_isolated() is True:
|
||||
manager_instance = isolated_manager.IsolatedManager(
|
||||
args, cwd, env, stdout_handle, ssh_key_path, **_kw
|
||||
)
|
||||
status, rc = manager_instance.run(instance, isolated_host,
|
||||
status, rc = manager_instance.run(instance,
|
||||
kwargs['private_data_dir'],
|
||||
kwargs.get('proot_temp_dir'))
|
||||
else:
|
||||
@ -995,7 +1026,7 @@ class BaseTask(Task):
|
||||
if stdout_handle:
|
||||
stdout_handle.flush()
|
||||
stdout_handle.close()
|
||||
event_ct = getattr(stdout_handle, '_event_ct', 0)
|
||||
event_ct = getattr(stdout_handle, '_counter', 0)
|
||||
logger.info('%s finished running, producing %s events.',
|
||||
instance.log_format, event_ct)
|
||||
except Exception:
|
||||
@ -1008,6 +1039,9 @@ class BaseTask(Task):
|
||||
instance = self.update_model(pk)
|
||||
if instance.cancel_flag:
|
||||
status = 'canceled'
|
||||
cancel_wait = (now() - instance.modified).seconds if instance.modified else 0
|
||||
if cancel_wait > 5:
|
||||
logger.warn(six.text_type('Request to cancel {} took {} seconds to complete.').format(instance.log_format, cancel_wait))
|
||||
|
||||
instance = self.update_model(pk, status=status, result_traceback=tb,
|
||||
output_replacements=output_replacements,
|
||||
@ -1341,7 +1375,7 @@ class RunJob(BaseTask):
|
||||
job_request_id = '' if self.request.id is None else self.request.id
|
||||
pu_ig = job.instance_group
|
||||
pu_en = job.execution_node
|
||||
if kwargs['isolated']:
|
||||
if job.is_isolated() is True:
|
||||
pu_ig = pu_ig.controller
|
||||
pu_en = settings.CLUSTER_HOST_ID
|
||||
local_project_sync = job.project.create_project_update(
|
||||
@ -1682,29 +1716,37 @@ class RunProjectUpdate(BaseTask):
|
||||
logger.error(six.text_type("I/O error({0}) while trying to open lock file [{1}]: {2}").format(e.errno, lock_path, e.strerror))
|
||||
raise
|
||||
|
||||
try:
|
||||
start_time = time.time()
|
||||
fcntl.flock(self.lock_fd, fcntl.LOCK_EX)
|
||||
waiting_time = time.time() - start_time
|
||||
if waiting_time > 1.0:
|
||||
logger.info(six.text_type(
|
||||
'{} spent {} waiting to acquire lock for local source tree '
|
||||
'for path {}.').format(instance.log_format, waiting_time, lock_path))
|
||||
except IOError as e:
|
||||
os.close(self.lock_fd)
|
||||
logger.error(six.text_type("I/O error({0}) while trying to aquire lock on file [{1}]: {2}").format(e.errno, lock_path, e.strerror))
|
||||
raise
|
||||
start_time = time.time()
|
||||
while True:
|
||||
try:
|
||||
instance.refresh_from_db(fields=['cancel_flag'])
|
||||
if instance.cancel_flag:
|
||||
logger.info(six.text_type("ProjectUpdate({0}) was cancelled".format(instance.pk)))
|
||||
return
|
||||
fcntl.flock(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
break
|
||||
except IOError as e:
|
||||
if e.errno not in (errno.EAGAIN, errno.EACCES):
|
||||
os.close(self.lock_fd)
|
||||
logger.error(six.text_type("I/O error({0}) while trying to aquire lock on file [{1}]: {2}").format(e.errno, lock_path, e.strerror))
|
||||
raise
|
||||
else:
|
||||
time.sleep(1.0)
|
||||
waiting_time = time.time() - start_time
|
||||
|
||||
if waiting_time > 1.0:
|
||||
logger.info(six.text_type(
|
||||
'{} spent {} waiting to acquire lock for local source tree '
|
||||
'for path {}.').format(instance.log_format, waiting_time, lock_path))
|
||||
|
||||
def pre_run_hook(self, instance, **kwargs):
|
||||
# re-create root project folder if a natural disaster has destroyed it
|
||||
if not os.path.exists(settings.PROJECTS_ROOT):
|
||||
os.mkdir(settings.PROJECTS_ROOT)
|
||||
if instance.launch_type == 'sync':
|
||||
self.acquire_lock(instance)
|
||||
self.acquire_lock(instance)
|
||||
|
||||
def post_run_hook(self, instance, status, **kwargs):
|
||||
if instance.launch_type == 'sync':
|
||||
self.release_lock(instance)
|
||||
self.release_lock(instance)
|
||||
p = instance.project
|
||||
if instance.job_type == 'check' and status not in ('failed', 'canceled',):
|
||||
fd = open(self.revision_path, 'r')
|
||||
@ -1977,8 +2019,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
# Pass inventory source ID to inventory script.
|
||||
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
|
||||
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
|
||||
# Always use the --export option for ansible-inventory
|
||||
env['ANSIBLE_INVENTORY_EXPORT'] = str(True)
|
||||
env.update(STANDARD_INVENTORY_UPDATE_ENV)
|
||||
plugin_name = inventory_update.get_inventory_plugin_name()
|
||||
if plugin_name is not None:
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = plugin_name
|
||||
@ -2274,7 +2315,10 @@ class RunAdHocCommand(BaseTask):
|
||||
args.extend(['-e', '@%s' % (extra_vars_path)])
|
||||
|
||||
args.extend(['-m', ad_hoc_command.module_name])
|
||||
args.extend(['-a', sanitize_jinja(ad_hoc_command.module_args)])
|
||||
module_args = ad_hoc_command.module_args
|
||||
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
|
||||
module_args = sanitize_jinja(module_args)
|
||||
args.extend(['-a', module_args])
|
||||
|
||||
if ad_hoc_command.limit:
|
||||
args.append(ad_hoc_command.limit)
|
||||
@ -2372,6 +2416,7 @@ def deep_copy_model_obj(
|
||||
):
|
||||
logger.info(six.text_type('Deep copy {} from {} to {}.').format(model_name, obj_pk, new_obj_pk))
|
||||
from awx.api.generics import CopyAPIView
|
||||
from awx.main.signals import disable_activity_stream
|
||||
model = getattr(importlib.import_module(model_module), model_name, None)
|
||||
if model is None:
|
||||
return
|
||||
@ -2382,7 +2427,7 @@ def deep_copy_model_obj(
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning("Object or user no longer exists.")
|
||||
return
|
||||
with transaction.atomic(), ignore_inventory_computed_fields():
|
||||
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
|
||||
copy_mapping = {}
|
||||
for sub_obj_setup in sub_obj_list:
|
||||
sub_model = getattr(importlib.import_module(sub_obj_setup[0]),
|
||||
|
||||
@ -2,7 +2,7 @@ import json
|
||||
import mock
|
||||
import pytest
|
||||
|
||||
from awx.main.models import Credential, Job
|
||||
from awx.main.models import Credential, CredentialType, Job
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@ -151,6 +151,27 @@ def test_prevent_multiple_machine_creds(get, post, job_template, admin, machine_
|
||||
assert 'Cannot assign multiple Machine credentials.' in resp.content
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('kind', ['scm', 'insights'])
|
||||
def test_invalid_credential_type_at_launch(get, post, job_template, admin, kind):
|
||||
cred_type = CredentialType.defaults[kind]()
|
||||
cred_type.save()
|
||||
cred = Credential(
|
||||
name='Some Cred',
|
||||
credential_type=cred_type,
|
||||
inputs={
|
||||
'username': 'bob',
|
||||
'password': 'secret',
|
||||
}
|
||||
)
|
||||
cred.save()
|
||||
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
|
||||
|
||||
resp = post(url, {'credentials': [cred.pk]}, admin, expect=400)
|
||||
assert 'Cannot assign a Credential of kind `{}`'.format(kind) in resp.data.get('credentials', [])
|
||||
assert Job.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_prevent_multiple_machine_creds_at_launch(get, post, job_template, admin, machine_credential):
|
||||
other_cred = Credential(credential_type=machine_credential.credential_type, name="Second",
|
||||
@ -394,3 +415,43 @@ def test_inventory_source_invalid_deprecated_credential(patch, admin, ec2_source
|
||||
url = reverse('api:inventory_source_detail', kwargs={'pk': ec2_source.pk})
|
||||
resp = patch(url, {'credential': 999999}, admin, expect=400)
|
||||
assert 'Credential 999999 does not exist' in resp.content
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_deprecated_credential_activity_stream(patch, admin_user, machine_credential, job_template):
|
||||
job_template.credentials.add(machine_credential)
|
||||
starting_entries = job_template.activitystream_set.count()
|
||||
# no-op patch
|
||||
patch(
|
||||
job_template.get_absolute_url(),
|
||||
admin_user,
|
||||
data={'credential': machine_credential.pk},
|
||||
expect=200
|
||||
)
|
||||
# no-op should not produce activity stream entries
|
||||
assert starting_entries == job_template.activitystream_set.count()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_multi_vault_preserved_on_put(get, put, admin_user, job_template, vault_credential):
|
||||
'''
|
||||
A PUT request will necessarily specify deprecated fields, but if the deprecated
|
||||
field is a singleton while the `credentials` relation has many, that makes
|
||||
it very easy to drop those credentials not specified in the PUT data
|
||||
'''
|
||||
vault2 = Credential.objects.create(
|
||||
name='second-vault',
|
||||
credential_type=vault_credential.credential_type,
|
||||
inputs={'vault_password': 'foo', 'vault_id': 'foo'}
|
||||
)
|
||||
job_template.credentials.add(vault_credential, vault2)
|
||||
assert job_template.credentials.count() == 2 # sanity check
|
||||
r = get(job_template.get_absolute_url(), admin_user, expect=200)
|
||||
# should be a no-op PUT request
|
||||
put(
|
||||
job_template.get_absolute_url(),
|
||||
admin_user,
|
||||
data=r.data,
|
||||
expect=200
|
||||
)
|
||||
assert job_template.credentials.count() == 2
|
||||
|
||||
@ -106,4 +106,14 @@ def test_filterable_fields(options, instance, admin_user):
|
||||
assert 'filterable' in filterable_info
|
||||
assert filterable_info['filterable'] is True
|
||||
|
||||
assert 'filterable' not in non_filterable_info
|
||||
assert not non_filterable_info['filterable']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_handle_content_type(post, admin):
|
||||
''' Tower should return 415 when wrong content type is in HTTP requests '''
|
||||
post(reverse('api:project_list'),
|
||||
{'name': 't', 'organization': None},
|
||||
admin,
|
||||
content_type='text/html',
|
||||
expect=415)
|
||||
|
||||
@ -47,3 +47,6 @@ def test_q1(inventory_structure, get, user):
|
||||
query = '(name="host1" and groups__name="g1") or (name="host3" and groups__name="g2")'
|
||||
evaluate_query(query, [hosts[0], hosts[2]])
|
||||
|
||||
# The following test verifies if the search in host_filter is case insensitive.
|
||||
query = 'search="HOST1"'
|
||||
evaluate_query(query, [hosts[0]])
|
||||
|
||||
@ -2,6 +2,7 @@ import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import (
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
ProjectUpdate,
|
||||
)
|
||||
@ -14,6 +15,17 @@ def tower_instance_group():
|
||||
return ig
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def instance():
|
||||
instance = Instance.objects.create(hostname='iso')
|
||||
return instance
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def non_iso_instance():
|
||||
return Instance.objects.create(hostname='iamnotanisolatedinstance')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def instance_group(job_factory):
|
||||
ig = InstanceGroup(name="east")
|
||||
@ -22,9 +34,11 @@ def instance_group(job_factory):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def isolated_instance_group(instance_group):
|
||||
def isolated_instance_group(instance_group, instance):
|
||||
ig = InstanceGroup(name="iso", controller=instance_group)
|
||||
ig.save()
|
||||
ig.instances.set([instance])
|
||||
ig.save()
|
||||
return ig
|
||||
|
||||
|
||||
@ -73,12 +87,12 @@ def test_delete_instance_group_jobs(delete, instance_group_jobs_successful, inst
|
||||
@pytest.mark.django_db
|
||||
def test_delete_instance_group_jobs_running(delete, instance_group_jobs_running, instance_group_jobs_successful, instance_group, admin):
|
||||
def sort_keys(x):
|
||||
return (x['type'], x['id'])
|
||||
return (x['type'], str(x['id']))
|
||||
|
||||
url = reverse("api:instance_group_detail", kwargs={'pk': instance_group.pk})
|
||||
response = delete(url, None, admin, expect=409)
|
||||
|
||||
expect_transformed = [dict(id=str(j.id), type=j.model_to_str()) for j in instance_group_jobs_running]
|
||||
expect_transformed = [dict(id=j.id, type=j.model_to_str()) for j in instance_group_jobs_running]
|
||||
response_sorted = sorted(response.data['active_jobs'], key=sort_keys)
|
||||
expect_sorted = sorted(expect_transformed, key=sort_keys)
|
||||
|
||||
@ -113,3 +127,52 @@ def test_prevent_delete_iso_and_control_groups(delete, isolated_instance_group,
|
||||
controller_url = reverse("api:instance_group_detail", kwargs={'pk': isolated_instance_group.controller.pk})
|
||||
delete(iso_url, None, admin, expect=403)
|
||||
delete(controller_url, None, admin, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_prevent_isolated_instance_added_to_non_isolated_instance_group(post, admin, instance, instance_group, isolated_instance_group):
|
||||
url = reverse("api:instance_group_instance_list", kwargs={'pk': instance_group.pk})
|
||||
|
||||
assert True is instance.is_isolated()
|
||||
resp = post(url, {'associate': True, 'id': instance.id}, admin, expect=400)
|
||||
assert u"Isolated instances may not be added or removed from instances groups via the API." == resp.data['error']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_prevent_isolated_instance_added_to_non_isolated_instance_group_via_policy_list(patch, admin, instance, instance_group, isolated_instance_group):
|
||||
url = reverse("api:instance_group_detail", kwargs={'pk': instance_group.pk})
|
||||
|
||||
assert True is instance.is_isolated()
|
||||
resp = patch(url, {'policy_instance_list': [instance.hostname]}, user=admin, expect=400)
|
||||
assert [u"Isolated instances may not be added or removed from instances groups via the API."] == resp.data['policy_instance_list']
|
||||
assert instance_group.policy_instance_list == []
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_prevent_isolated_instance_removal_from_isolated_instance_group(post, admin, instance, instance_group, isolated_instance_group):
|
||||
url = reverse("api:instance_group_instance_list", kwargs={'pk': isolated_instance_group.pk})
|
||||
|
||||
assert True is instance.is_isolated()
|
||||
resp = post(url, {'disassociate': True, 'id': instance.id}, admin, expect=400)
|
||||
assert u"Isolated instances may not be added or removed from instances groups via the API." == resp.data['error']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_prevent_non_isolated_instance_added_to_isolated_instance_group(
|
||||
post, admin, non_iso_instance, isolated_instance_group):
|
||||
url = reverse("api:instance_group_instance_list", kwargs={'pk': isolated_instance_group.pk})
|
||||
|
||||
assert False is non_iso_instance.is_isolated()
|
||||
resp = post(url, {'associate': True, 'id': non_iso_instance.id}, admin, expect=400)
|
||||
assert u"Isolated instance group membership may not be managed via the API." == resp.data['error']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_prevent_non_isolated_instance_added_to_isolated_instance_group_via_policy_list(
|
||||
patch, admin, non_iso_instance, isolated_instance_group):
|
||||
url = reverse("api:instance_group_detail", kwargs={'pk': isolated_instance_group.pk})
|
||||
|
||||
assert False is non_iso_instance.is_isolated()
|
||||
resp = patch(url, {'policy_instance_list': [non_iso_instance.hostname]}, user=admin, expect=400)
|
||||
assert [u"Isolated instance group membership may not be managed via the API."] == resp.data['policy_instance_list']
|
||||
assert isolated_instance_group.policy_instance_list == []
|
||||
|
||||
@ -450,6 +450,17 @@ class TestInsightsCredential:
|
||||
{'insights_credential': insights_credential.id}, admin_user,
|
||||
expect=200)
|
||||
|
||||
def test_insights_credential_protection(self, post, patch, insights_inventory, alice, insights_credential):
|
||||
insights_inventory.organization.admin_role.members.add(alice)
|
||||
insights_inventory.admin_role.members.add(alice)
|
||||
post(reverse('api:inventory_list'), {
|
||||
"name": "test",
|
||||
"organization": insights_inventory.organization.id,
|
||||
"insights_credential": insights_credential.id
|
||||
}, alice, expect=403)
|
||||
patch(insights_inventory.get_absolute_url(),
|
||||
{'insights_credential': insights_credential.id}, alice, expect=403)
|
||||
|
||||
def test_non_insights_credential(self, patch, insights_inventory, admin_user, scm_credential):
|
||||
patch(insights_inventory.get_absolute_url(),
|
||||
{'insights_credential': scm_credential.id}, admin_user,
|
||||
|
||||
@ -1,15 +1,23 @@
|
||||
# Python
|
||||
import pytest
|
||||
import mock
|
||||
|
||||
from dateutil.parser import parse
|
||||
from dateutil.relativedelta import relativedelta
|
||||
from crum import impersonate
|
||||
|
||||
# Django rest framework
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.api.views import RelatedJobsPreventDeleteMixin, UnifiedJobDeletionMixin
|
||||
|
||||
from awx.main.models import JobTemplate, User, Job
|
||||
from awx.main.models import (
|
||||
JobTemplate,
|
||||
User,
|
||||
Job,
|
||||
AdHocCommand,
|
||||
ProjectUpdate,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@ -33,7 +41,8 @@ def test_job_relaunch_permission_denied_response(
|
||||
jt.credentials.add(machine_credential)
|
||||
jt_user = User.objects.create(username='jobtemplateuser')
|
||||
jt.execute_role.members.add(jt_user)
|
||||
job = jt.create_unified_job()
|
||||
with impersonate(jt_user):
|
||||
job = jt.create_unified_job()
|
||||
|
||||
# User capability is shown for this
|
||||
r = get(job.get_absolute_url(), jt_user, expect=200)
|
||||
@ -46,6 +55,29 @@ def test_job_relaunch_permission_denied_response(
|
||||
assert 'do not have permission' in r.data['detail']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_relaunch_permission_denied_response_other_user(get, post, inventory, project, alice, bob):
|
||||
'''
|
||||
Asserts custom permission denied message corresponding to
|
||||
awx/main/tests/functional/test_rbac_job.py::TestJobRelaunchAccess::test_other_user_prompts
|
||||
'''
|
||||
jt = JobTemplate.objects.create(
|
||||
name='testjt', inventory=inventory, project=project,
|
||||
ask_credential_on_launch=True,
|
||||
ask_variables_on_launch=True)
|
||||
jt.execute_role.members.add(alice, bob)
|
||||
with impersonate(bob):
|
||||
job = jt.create_unified_job(extra_vars={'job_var': 'foo2'})
|
||||
|
||||
# User capability is shown for this
|
||||
r = get(job.get_absolute_url(), alice, expect=200)
|
||||
assert r.data['summary_fields']['user_capabilities']['start']
|
||||
|
||||
# Job has prompted data, launch denied w/ message
|
||||
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, alice, expect=403)
|
||||
assert 'Job was launched with prompts provided by another user' in r.data['detail']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_relaunch_without_creds(post, inventory, project, admin_user):
|
||||
jt = JobTemplate.objects.create(
|
||||
@ -75,7 +107,7 @@ def test_job_relaunch_on_failed_hosts(post, inventory, project, machine_credenti
|
||||
project=project
|
||||
)
|
||||
jt.credentials.add(machine_credential)
|
||||
job = jt.create_unified_job(_eager_fields={'status': 'failed', 'limit': 'host1,host2,host3'})
|
||||
job = jt.create_unified_job(_eager_fields={'status': 'failed'}, limit='host1,host2,host3')
|
||||
job.job_events.create(event='playbook_on_stats')
|
||||
job.job_host_summaries.create(host=h1, failed=False, ok=1, changed=0, failures=0, host_name=h1.name)
|
||||
job.job_host_summaries.create(host=h2, failed=False, ok=0, changed=1, failures=0, host_name=h2.name)
|
||||
@ -133,3 +165,68 @@ def test_block_related_unprocessed_events(mocker, organization, project, delete,
|
||||
with mock.patch('awx.api.views.now', lambda: time_of_request):
|
||||
with pytest.raises(PermissionDenied):
|
||||
view.perform_destroy(organization)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_disallowed_http_update_methods(put, patch, post, inventory, project, admin_user):
|
||||
jt = JobTemplate.objects.create(
|
||||
name='test_disallowed_methods', inventory=inventory,
|
||||
project=project
|
||||
)
|
||||
job = jt.create_unified_job()
|
||||
post(
|
||||
url=reverse('api:job_detail', kwargs={'pk': job.pk, 'version': 'v2'}),
|
||||
data={},
|
||||
user=admin_user,
|
||||
expect=405
|
||||
)
|
||||
put(
|
||||
url=reverse('api:job_detail', kwargs={'pk': job.pk, 'version': 'v2'}),
|
||||
data={},
|
||||
user=admin_user,
|
||||
expect=405
|
||||
)
|
||||
patch(
|
||||
url=reverse('api:job_detail', kwargs={'pk': job.pk, 'version': 'v2'}),
|
||||
data={},
|
||||
user=admin_user,
|
||||
expect=405
|
||||
)
|
||||
|
||||
|
||||
class TestControllerNode():
|
||||
@pytest.fixture
|
||||
def project_update(self, project):
|
||||
return ProjectUpdate.objects.create(project=project)
|
||||
|
||||
@pytest.fixture
|
||||
def job(self):
|
||||
return JobTemplate.objects.create().create_unified_job()
|
||||
|
||||
@pytest.fixture
|
||||
def adhoc(self, inventory):
|
||||
return AdHocCommand.objects.create(inventory=inventory)
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_field_controller_node_exists(self, sqlite_copy_expert,
|
||||
admin_user, job, project_update,
|
||||
inventory_update, adhoc, get, system_job_factory):
|
||||
system_job = system_job_factory()
|
||||
|
||||
r = get(reverse('api:unified_job_list') + '?id={}'.format(job.id), admin_user, expect=200)
|
||||
assert 'controller_node' in r.data['results'][0]
|
||||
|
||||
r = get(job.get_absolute_url(), admin_user, expect=200)
|
||||
assert 'controller_node' in r.data
|
||||
|
||||
r = get(reverse('api:ad_hoc_command_detail', kwargs={'pk': adhoc.pk}), admin_user, expect=200)
|
||||
assert 'controller_node' in r.data
|
||||
|
||||
r = get(reverse('api:project_update_detail', kwargs={'pk': project_update.pk}), admin_user, expect=200)
|
||||
assert 'controller_node' not in r.data
|
||||
|
||||
r = get(reverse('api:inventory_update_detail', kwargs={'pk': inventory_update.pk}), admin_user, expect=200)
|
||||
assert 'controller_node' not in r.data
|
||||
|
||||
r = get(reverse('api:system_job_detail', kwargs={'pk': system_job.pk}), admin_user, expect=200)
|
||||
assert 'controller_node' not in r.data
|
||||
|
||||
@ -6,7 +6,7 @@ import pytest
|
||||
# AWX
|
||||
from awx.api.serializers import JobTemplateSerializer
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.jobs import Job, JobTemplate
|
||||
from awx.main.models import Job, JobTemplate, CredentialType
|
||||
from awx.main.migrations import _save_password_keys as save_password_keys
|
||||
|
||||
# Django
|
||||
@ -182,6 +182,27 @@ def test_extra_credential_creation(get, post, organization_factory, job_template
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('kind', ['scm', 'insights'])
|
||||
def test_invalid_credential_kind_xfail(get, post, organization_factory, job_template_factory, kind):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_credentials_list', kwargs={'version': 'v2', 'pk': jt.pk})
|
||||
cred_type = CredentialType.defaults[kind]()
|
||||
cred_type.save()
|
||||
response = post(url, {
|
||||
'name': 'My Cred',
|
||||
'credential_type': cred_type.pk,
|
||||
'inputs': {
|
||||
'username': 'bob',
|
||||
'password': 'secret',
|
||||
}
|
||||
}, objs.superusers.admin, expect=400)
|
||||
assert 'Cannot assign a Credential of kind `{}`.'.format(kind) in response.data.values()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credential_unique_type_xfail(get, post, organization_factory, job_template_factory, credentialtype_aws):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
@ -567,16 +588,9 @@ def test_v1_launch_with_extra_credentials(get, post, organization_factory,
|
||||
credential=machine_credential.pk,
|
||||
extra_credentials=[credential.pk, net_credential.pk]
|
||||
),
|
||||
objs.superusers.admin, expect=201
|
||||
objs.superusers.admin, expect=400
|
||||
)
|
||||
job_pk = resp.data.get('id')
|
||||
assert resp.data.get('ignored_fields').keys() == ['extra_credentials']
|
||||
|
||||
resp = get(reverse('api:job_extra_credentials_list', kwargs={'pk': job_pk}), objs.superusers.admin)
|
||||
assert resp.data.get('count') == 0
|
||||
|
||||
resp = get(reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk}), objs.superusers.admin)
|
||||
assert resp.data.get('count') == 0
|
||||
assert 'Field is not allowed for use with v1 API' in resp.data.get('extra_credentials')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@ -1,13 +1,17 @@
|
||||
import pytest
|
||||
import base64
|
||||
import json
|
||||
|
||||
from django.db import connection
|
||||
from django.test.utils import override_settings
|
||||
from django.test import Client
|
||||
|
||||
from awx.main.utils.encryption import decrypt_value, get_encryption_key
|
||||
from awx.api.versioning import reverse, drf_reverse
|
||||
from awx.main.models.oauth import (OAuth2Application as Application,
|
||||
OAuth2AccessToken as AccessToken,
|
||||
)
|
||||
from awx.sso.models import UserEnterpriseAuth
|
||||
from oauth2_provider.models import RefreshToken
|
||||
|
||||
|
||||
@ -29,7 +33,50 @@ def test_personal_access_token_creation(oauth_application, post, alice):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_oauth_application_create(admin, organization, post):
|
||||
@pytest.mark.parametrize('allow_oauth, status', [(True, 201), (False, 403)])
|
||||
def test_token_creation_disabled_for_external_accounts(oauth_application, post, alice, allow_oauth, status):
|
||||
UserEnterpriseAuth(user=alice, provider='radius').save()
|
||||
url = drf_reverse('api:oauth_authorization_root_view') + 'token/'
|
||||
|
||||
with override_settings(RADIUS_SERVER='example.org', ALLOW_OAUTH2_FOR_EXTERNAL_USERS=allow_oauth):
|
||||
resp = post(
|
||||
url,
|
||||
data='grant_type=password&username=alice&password=alice&scope=read',
|
||||
content_type='application/x-www-form-urlencoded',
|
||||
HTTP_AUTHORIZATION='Basic ' + base64.b64encode(':'.join([
|
||||
oauth_application.client_id, oauth_application.client_secret
|
||||
])),
|
||||
status=status
|
||||
)
|
||||
if allow_oauth:
|
||||
assert AccessToken.objects.count() == 1
|
||||
else:
|
||||
assert 'OAuth2 Tokens cannot be created by users associated with an external authentication provider' in resp.content
|
||||
assert AccessToken.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_pat_creation_no_default_scope(oauth_application, post, admin):
|
||||
# tests that the default scope is overriden
|
||||
url = reverse('api:o_auth2_token_list')
|
||||
response = post(url, {'description': 'test token',
|
||||
'scope': 'read',
|
||||
'application': oauth_application.pk,
|
||||
}, admin)
|
||||
assert response.data['scope'] == 'read'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_pat_creation_no_scope(oauth_application, post, admin):
|
||||
url = reverse('api:o_auth2_token_list')
|
||||
response = post(url, {'description': 'test token',
|
||||
'application': oauth_application.pk,
|
||||
}, admin)
|
||||
assert response.data['scope'] == 'write'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_oauth2_application_create(admin, organization, post):
|
||||
response = post(
|
||||
reverse('api:o_auth2_application_list'), {
|
||||
'name': 'test app',
|
||||
@ -47,7 +94,18 @@ def test_oauth_application_create(admin, organization, post):
|
||||
assert created_app.client_type == 'confidential'
|
||||
assert created_app.authorization_grant_type == 'password'
|
||||
assert created_app.organization == organization
|
||||
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_oauth2_validator(admin, oauth_application, post):
|
||||
post(
|
||||
reverse('api:o_auth2_application_list'), {
|
||||
'name': 'Write App Token',
|
||||
'application': oauth_application.pk,
|
||||
'scope': 'Write',
|
||||
}, admin, expect=400
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_oauth_application_update(oauth_application, organization, patch, admin, alice):
|
||||
@ -94,7 +152,7 @@ def test_oauth_token_create(oauth_application, get, post, admin):
|
||||
reverse('api:o_auth2_application_token_list', kwargs={'pk': oauth_application.pk}),
|
||||
{'scope': 'read'}, admin, expect=201
|
||||
)
|
||||
assert 'modified' in response.data
|
||||
assert 'modified' in response.data and response.data['modified'] is not None
|
||||
assert 'updated' not in response.data
|
||||
token = AccessToken.objects.get(token=response.data['token'])
|
||||
refresh_token = RefreshToken.objects.get(token=response.data['refresh_token'])
|
||||
@ -117,6 +175,27 @@ def test_oauth_token_create(oauth_application, get, post, admin):
|
||||
assert response.data['summary_fields']['tokens']['results'][0] == {
|
||||
'id': token.pk, 'scope': token.scope, 'token': '************'
|
||||
}
|
||||
# If the application is implicit grant type, no new refresb tokens should be created.
|
||||
# The following tests check for that.
|
||||
oauth_application.authorization_grant_type = 'implicit'
|
||||
oauth_application.save()
|
||||
token_count = RefreshToken.objects.count()
|
||||
response = post(
|
||||
reverse('api:o_auth2_token_list'),
|
||||
{'scope': 'read', 'application': oauth_application.pk}, admin, expect=201
|
||||
)
|
||||
assert response.data['refresh_token'] is None
|
||||
response = post(
|
||||
reverse('api:user_authorized_token_list', kwargs={'pk': admin.pk}),
|
||||
{'scope': 'read', 'application': oauth_application.pk}, admin, expect=201
|
||||
)
|
||||
assert response.data['refresh_token'] is None
|
||||
response = post(
|
||||
reverse('api:application_o_auth2_token_list', kwargs={'pk': oauth_application.pk}),
|
||||
{'scope': 'read'}, admin, expect=201
|
||||
)
|
||||
assert response.data['refresh_token'] is None
|
||||
assert token_count == RefreshToken.objects.count()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@ -146,7 +225,7 @@ def test_oauth_token_delete(oauth_application, post, delete, get, admin):
|
||||
admin, expect=204
|
||||
)
|
||||
assert AccessToken.objects.count() == 0
|
||||
assert RefreshToken.objects.count() == 0
|
||||
assert RefreshToken.objects.count() == 1
|
||||
response = get(
|
||||
reverse('api:o_auth2_application_token_list', kwargs={'pk': oauth_application.pk}),
|
||||
admin, expect=200
|
||||
@ -181,3 +260,57 @@ def test_oauth_list_user_tokens(oauth_application, post, get, admin, alice):
|
||||
post(url, {'scope': 'read'}, user, expect=201)
|
||||
response = get(url, admin, expect=200)
|
||||
assert response.data['count'] == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_refresh_accesstoken(oauth_application, post, get, delete, admin):
|
||||
response = post(
|
||||
reverse('api:o_auth2_application_token_list', kwargs={'pk': oauth_application.pk}),
|
||||
{'scope': 'read'}, admin, expect=201
|
||||
)
|
||||
token = AccessToken.objects.get(token=response.data['token'])
|
||||
refresh_token = RefreshToken.objects.get(token=response.data['refresh_token'])
|
||||
assert AccessToken.objects.count() == 1
|
||||
assert RefreshToken.objects.count() == 1
|
||||
|
||||
refresh_url = drf_reverse('api:oauth_authorization_root_view') + 'token/'
|
||||
response = post(
|
||||
refresh_url,
|
||||
data='grant_type=refresh_token&refresh_token=' + refresh_token.token,
|
||||
content_type='application/x-www-form-urlencoded',
|
||||
HTTP_AUTHORIZATION='Basic ' + base64.b64encode(':'.join([
|
||||
oauth_application.client_id, oauth_application.client_secret
|
||||
]))
|
||||
)
|
||||
|
||||
new_token = json.loads(response._container[0])['access_token']
|
||||
new_refresh_token = json.loads(response._container[0])['refresh_token']
|
||||
assert token not in AccessToken.objects.all()
|
||||
assert AccessToken.objects.get(token=new_token) != 0
|
||||
assert RefreshToken.objects.get(token=new_refresh_token) != 0
|
||||
refresh_token = RefreshToken.objects.get(token=refresh_token)
|
||||
assert refresh_token.revoked
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_implicit_authorization(oauth_application, admin):
|
||||
oauth_application.client_type = 'confidential'
|
||||
oauth_application.authorization_grant_type = 'implicit'
|
||||
oauth_application.redirect_uris = 'http://test.com'
|
||||
oauth_application.save()
|
||||
data = {
|
||||
'response_type': 'token',
|
||||
'client_id': oauth_application.client_id,
|
||||
'client_secret': oauth_application.client_secret,
|
||||
'scope': 'read',
|
||||
'redirect_uri': 'http://test.com',
|
||||
'allow': True
|
||||
}
|
||||
|
||||
request_client = Client()
|
||||
request_client.force_login(admin, 'django.contrib.auth.backends.ModelBackend')
|
||||
refresh_token_count = RefreshToken.objects.count()
|
||||
response = request_client.post(drf_reverse('api:authorize'), data)
|
||||
assert 'http://test.com' in response.url and 'access_token' in response.url
|
||||
# Make sure no refresh token is created for app with implicit grant type.
|
||||
assert refresh_token_count == RefreshToken.objects.count()
|
||||
|
||||
@ -260,12 +260,12 @@ def test_organization_delete(delete, admin, organization, organization_jobs_succ
|
||||
@pytest.mark.django_db
|
||||
def test_organization_delete_with_active_jobs(delete, admin, organization, organization_jobs_running):
|
||||
def sort_keys(x):
|
||||
return (x['type'], x['id'])
|
||||
return (x['type'], str(x['id']))
|
||||
|
||||
url = reverse('api:organization_detail', kwargs={'pk': organization.id})
|
||||
resp = delete(url, None, user=admin, expect=409)
|
||||
|
||||
expect_transformed = [dict(id=str(j.id), type=j.model_to_str()) for j in organization_jobs_running]
|
||||
expect_transformed = [dict(id=j.id, type=j.model_to_str()) for j in organization_jobs_running]
|
||||
resp_sorted = sorted(resp.data['active_jobs'], key=sort_keys)
|
||||
expect_sorted = sorted(expect_transformed, key=sort_keys)
|
||||
|
||||
|
||||
@ -12,3 +12,28 @@ def test_admin_visible_to_orphaned_users(get, alice):
|
||||
names.add(item['name'])
|
||||
assert 'System Auditor' in names
|
||||
assert 'System Administrator' in names
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('role,code', [
|
||||
('member_role', 400),
|
||||
('admin_role', 400),
|
||||
('inventory_admin_role', 204)
|
||||
])
|
||||
@pytest.mark.parametrize('reversed', [
|
||||
True, False
|
||||
])
|
||||
def test_org_object_role_assigned_to_team(post, team, organization, org_admin, role, code, reversed):
|
||||
if reversed:
|
||||
url = reverse('api:role_teams_list', kwargs={'pk': getattr(organization, role).id})
|
||||
sub_id = team.id
|
||||
else:
|
||||
url = reverse('api:team_roles_list', kwargs={'pk': team.id})
|
||||
sub_id = getattr(organization, role).id
|
||||
|
||||
post(
|
||||
url=url,
|
||||
data={'id': sub_id},
|
||||
user=org_admin,
|
||||
expect=code
|
||||
)
|
||||
|
||||
@ -2,7 +2,8 @@ import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
from awx.main.models import JobTemplate
|
||||
from awx.main.models import JobTemplate, Schedule
|
||||
from awx.main.utils.encryption import decrypt_value, get_encryption_key
|
||||
|
||||
|
||||
RRULE_EXAMPLE = 'DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1'
|
||||
@ -51,6 +52,50 @@ def test_valid_survey_answer(post, admin_user, project, inventory, survey_spec_f
|
||||
admin_user, expect=201)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_encrypted_survey_answer(post, patch, admin_user, project, inventory, survey_spec_factory):
|
||||
job_template = JobTemplate.objects.create(
|
||||
name='test-jt',
|
||||
project=project,
|
||||
playbook='helloworld.yml',
|
||||
inventory=inventory,
|
||||
ask_variables_on_launch=False,
|
||||
survey_enabled=True,
|
||||
survey_spec=survey_spec_factory([{'variable': 'var1', 'type': 'password'}])
|
||||
)
|
||||
|
||||
# test encrypted-on-create
|
||||
url = reverse('api:job_template_schedules_list', kwargs={'pk': job_template.id})
|
||||
r = post(url, {'name': 'test sch', 'rrule': RRULE_EXAMPLE, 'extra_data': '{"var1": "foo"}'},
|
||||
admin_user, expect=201)
|
||||
assert r.data['extra_data']['var1'] == "$encrypted$"
|
||||
schedule = Schedule.objects.get(pk=r.data['id'])
|
||||
assert schedule.extra_data['var1'].startswith('$encrypted$')
|
||||
assert decrypt_value(get_encryption_key('value', pk=None), schedule.extra_data['var1']) == 'foo'
|
||||
|
||||
# test a no-op change
|
||||
r = patch(
|
||||
schedule.get_absolute_url(),
|
||||
data={'extra_data': {'var1': '$encrypted$'}},
|
||||
user=admin_user,
|
||||
expect=200
|
||||
)
|
||||
assert r.data['extra_data']['var1'] == '$encrypted$'
|
||||
schedule.refresh_from_db()
|
||||
assert decrypt_value(get_encryption_key('value', pk=None), schedule.extra_data['var1']) == 'foo'
|
||||
|
||||
# change to a different value
|
||||
r = patch(
|
||||
schedule.get_absolute_url(),
|
||||
data={'extra_data': {'var1': 'bar'}},
|
||||
user=admin_user,
|
||||
expect=200
|
||||
)
|
||||
assert r.data['extra_data']['var1'] == '$encrypted$'
|
||||
schedule.refresh_from_db()
|
||||
assert decrypt_value(get_encryption_key('value', pk=None), schedule.extra_data['var1']) == 'bar'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('rrule, error', [
|
||||
("", "This field may not be blank"),
|
||||
@ -87,6 +132,12 @@ def test_invalid_rrules(post, admin_user, project, inventory, rrule, error):
|
||||
assert error in resp.content
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_normal_users_can_preview_schedules(post, alice):
|
||||
url = reverse('api:schedule_rrule')
|
||||
post(url, {'rrule': get_rrule()}, alice, expect=200)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_utc_preview(post, admin_user):
|
||||
url = reverse('api:schedule_rrule')
|
||||
|
||||
54
awx/main/tests/functional/api/test_search_filter.py
Normal file
54
awx/main/tests/functional/api/test_search_filter.py
Normal file
@ -0,0 +1,54 @@
|
||||
# Python
|
||||
import pytest
|
||||
import json
|
||||
|
||||
# Django Rest Framework
|
||||
from rest_framework.test import APIRequestFactory
|
||||
|
||||
# AWX
|
||||
from awx.api.views import HostList
|
||||
from awx.main.models import Host, Group, Inventory
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestSearchFilter:
|
||||
def test_related_research_filter_relation(self, admin):
|
||||
inv = Inventory.objects.create(name="inv")
|
||||
group1 = Group.objects.create(name="g1", inventory=inv)
|
||||
group2 = Group.objects.create(name="g2", inventory=inv)
|
||||
host1 = Host.objects.create(name="host1", inventory=inv)
|
||||
host2 = Host.objects.create(name="host2", inventory=inv)
|
||||
host3 = Host.objects.create(name="host3", inventory=inv)
|
||||
host1.groups.add(group1)
|
||||
host2.groups.add(group1)
|
||||
host2.groups.add(group2)
|
||||
host3.groups.add(group2)
|
||||
host1.save()
|
||||
host2.save()
|
||||
host3.save()
|
||||
# Login the client
|
||||
factory = APIRequestFactory()
|
||||
# Actually test the endpoint.
|
||||
host_list_url = reverse('api:host_list')
|
||||
|
||||
# Test if the OR releation works.
|
||||
request = factory.get(host_list_url, data={'groups__search': ['g1', 'g2']})
|
||||
request.user = admin
|
||||
response = HostList.as_view()(request)
|
||||
response.render()
|
||||
result = json.loads(response.content)
|
||||
assert result['count'] == 3
|
||||
expected_hosts = ['host1', 'host2', 'host3']
|
||||
for i in result['results']:
|
||||
expected_hosts.remove(i['name'])
|
||||
assert not expected_hosts
|
||||
|
||||
# Test if the AND relation works.
|
||||
request = factory.get(host_list_url, data={'groups__search': ['g1,g2']})
|
||||
request.user = admin
|
||||
response = HostList.as_view()(request)
|
||||
response.render()
|
||||
result = json.loads(response.content)
|
||||
assert result['count'] == 1
|
||||
assert result['results'][0]['name'] == 'host2'
|
||||
@ -101,6 +101,42 @@ def test_ldap_settings(get, put, patch, delete, admin):
|
||||
patch(url, user=admin, data={'AUTH_LDAP_BIND_DN': u'cn=暴力膜,dc=大新闻,dc=真的粉丝'}, expect=200)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('value', [
|
||||
None, '', 'INVALID', 1, [1], ['INVALID'],
|
||||
])
|
||||
def test_ldap_user_flags_by_group_invalid_dn(get, patch, admin, value):
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'ldap'})
|
||||
patch(url, user=admin,
|
||||
data={'AUTH_LDAP_USER_FLAGS_BY_GROUP': {'is_superuser': value}},
|
||||
expect=400)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_ldap_user_flags_by_group_string(get, patch, admin):
|
||||
expected = 'CN=Admins,OU=Groups,DC=example,DC=com'
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'ldap'})
|
||||
patch(url, user=admin,
|
||||
data={'AUTH_LDAP_USER_FLAGS_BY_GROUP': {'is_superuser': expected}},
|
||||
expect=200)
|
||||
resp = get(url, user=admin)
|
||||
assert resp.data['AUTH_LDAP_USER_FLAGS_BY_GROUP']['is_superuser'] == [expected]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_ldap_user_flags_by_group_list(get, patch, admin):
|
||||
expected = [
|
||||
'CN=Admins,OU=Groups,DC=example,DC=com',
|
||||
'CN=Superadmins,OU=Groups,DC=example,DC=com'
|
||||
]
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'ldap'})
|
||||
patch(url, user=admin,
|
||||
data={'AUTH_LDAP_USER_FLAGS_BY_GROUP': {'is_superuser': expected}},
|
||||
expect=200)
|
||||
resp = get(url, user=admin)
|
||||
assert resp.data['AUTH_LDAP_USER_FLAGS_BY_GROUP']['is_superuser'] == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize('setting', [
|
||||
'AUTH_LDAP_USER_DN_TEMPLATE',
|
||||
'AUTH_LDAP_REQUIRE_GROUP',
|
||||
|
||||
@ -3,11 +3,8 @@
|
||||
import base64
|
||||
import json
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
from django.conf import settings
|
||||
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
|
||||
import mock
|
||||
import pytest
|
||||
|
||||
@ -31,28 +28,6 @@ def _mk_inventory_update():
|
||||
return iu
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def sqlite_copy_expert(request):
|
||||
# copy_expert is postgres-specific, and SQLite doesn't support it; mock its
|
||||
# behavior to test that it writes a file that contains stdout from events
|
||||
path = tempfile.mkdtemp(prefix='job-event-stdout')
|
||||
|
||||
def write_stdout(self, sql, fd):
|
||||
# simulate postgres copy_expert support with ORM code
|
||||
parts = sql.split(' ')
|
||||
tablename = parts[parts.index('from') + 1]
|
||||
for cls in (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
|
||||
InventoryUpdateEvent, SystemJobEvent):
|
||||
if cls._meta.db_table == tablename:
|
||||
for event in cls.objects.order_by('start_line').all():
|
||||
fd.write(event.stdout.encode('utf-8'))
|
||||
|
||||
setattr(SQLiteCursorWrapper, 'copy_expert', write_stdout)
|
||||
request.addfinalizer(lambda: shutil.rmtree(path))
|
||||
request.addfinalizer(lambda: delattr(SQLiteCursorWrapper, 'copy_expert'))
|
||||
return path
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('Parent, Child, relation, view', [
|
||||
[Job, JobEvent, 'job', 'api:job_stdout'],
|
||||
|
||||
67
awx/main/tests/functional/commands/test_expire_sessions.py
Normal file
67
awx/main/tests/functional/commands/test_expire_sessions.py
Normal file
@ -0,0 +1,67 @@
|
||||
# Python
|
||||
import pytest
|
||||
import string
|
||||
import random
|
||||
|
||||
# Django
|
||||
from django.utils import timezone
|
||||
from django.test import Client
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.core.management.base import CommandError
|
||||
|
||||
# AWX
|
||||
from awx.main.management.commands.expire_sessions import Command
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestExpireSessionsCommand:
|
||||
@staticmethod
|
||||
def create_and_login_fake_users():
|
||||
# We already have Alice and Bob, so we are going to create Charlie and Dylan
|
||||
charlie = User.objects.create_user('charlie', 'charlie@email.com', 'pass')
|
||||
dylan = User.objects.create_user('dylan', 'dylan@email.com', 'word')
|
||||
client_0 = Client()
|
||||
client_1 = Client()
|
||||
client_0.force_login(charlie, backend=settings.AUTHENTICATION_BACKENDS[0])
|
||||
client_1.force_login(dylan, backend=settings.AUTHENTICATION_BACKENDS[0])
|
||||
return charlie, dylan
|
||||
|
||||
@staticmethod
|
||||
def run_command(username=None):
|
||||
command_obj = Command()
|
||||
command_obj.handle(user=username)
|
||||
|
||||
def test_expire_all_sessions(self):
|
||||
charlie, dylan = self.create_and_login_fake_users()
|
||||
self.run_command()
|
||||
start = timezone.now()
|
||||
sessions = Session.objects.filter(expire_date__gte=start)
|
||||
for session in sessions:
|
||||
user_id = int(session.get_decoded().get('_auth_user_id'))
|
||||
if user_id == charlie.id or user_id == dylan.id:
|
||||
self.fail('The user should not have active sessions.')
|
||||
|
||||
def test_non_existing_user(self):
|
||||
fake_username = ''
|
||||
while fake_username == '' or User.objects.filter(username=fake_username).exists():
|
||||
fake_username = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
|
||||
with pytest.raises(CommandError) as excinfo:
|
||||
self.run_command(fake_username)
|
||||
assert excinfo.value.message.strip() == 'The user does not exist.'
|
||||
|
||||
def test_expire_one_user(self):
|
||||
# alice should be logged out, but bob should not.
|
||||
charlie, dylan = self.create_and_login_fake_users()
|
||||
self.run_command('charlie')
|
||||
start = timezone.now()
|
||||
sessions = Session.objects.filter(expire_date__gte=start)
|
||||
dylan_still_active = False
|
||||
for session in sessions:
|
||||
user_id = int(session.get_decoded().get('_auth_user_id'))
|
||||
if user_id == charlie.id:
|
||||
self.fail('Charlie should not have active sessions.')
|
||||
elif user_id == dylan.id:
|
||||
dylan_still_active = True
|
||||
assert dylan_still_active
|
||||
@ -4,6 +4,8 @@ import mock
|
||||
import json
|
||||
import os
|
||||
import six
|
||||
import tempfile
|
||||
import shutil
|
||||
from datetime import timedelta
|
||||
from six.moves import xrange
|
||||
|
||||
@ -14,6 +16,7 @@ from django.utils import timezone
|
||||
from django.contrib.auth.models import User
|
||||
from django.conf import settings
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
|
||||
from jsonbfield.fields import JSONField
|
||||
|
||||
# AWX
|
||||
@ -44,6 +47,13 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
Notification
|
||||
)
|
||||
from awx.main.models.events import (
|
||||
JobEvent,
|
||||
AdHocCommandEvent,
|
||||
ProjectUpdateEvent,
|
||||
InventoryUpdateEvent,
|
||||
SystemJobEvent,
|
||||
)
|
||||
from awx.main.models.workflow import WorkflowJobTemplate
|
||||
from awx.main.models.ad_hoc_commands import AdHocCommand
|
||||
from awx.main.models.oauth import OAuth2Application as Application
|
||||
@ -553,7 +563,9 @@ def _request(verb):
|
||||
response.data[key] = str(value)
|
||||
except Exception:
|
||||
response.data = data_copy
|
||||
assert response.status_code == expect
|
||||
assert response.status_code == expect, 'Response data: {}'.format(
|
||||
getattr(response, 'data', None)
|
||||
)
|
||||
if hasattr(response, 'render'):
|
||||
response.render()
|
||||
__SWAGGER_REQUESTS__.setdefault(request.path, {})[
|
||||
@ -672,7 +684,7 @@ def job_template_labels(organization, job_template):
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_job_template(organization):
|
||||
wjt = WorkflowJobTemplate(name='test-workflow_job_template')
|
||||
wjt = WorkflowJobTemplate(name='test-workflow_job_template', organization=organization)
|
||||
wjt.save()
|
||||
|
||||
return wjt
|
||||
@ -729,3 +741,26 @@ def oauth_application(admin):
|
||||
name='test app', user=admin, client_type='confidential',
|
||||
authorization_grant_type='password'
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sqlite_copy_expert(request):
|
||||
# copy_expert is postgres-specific, and SQLite doesn't support it; mock its
|
||||
# behavior to test that it writes a file that contains stdout from events
|
||||
path = tempfile.mkdtemp(prefix='job-event-stdout')
|
||||
|
||||
def write_stdout(self, sql, fd):
|
||||
# simulate postgres copy_expert support with ORM code
|
||||
parts = sql.split(' ')
|
||||
tablename = parts[parts.index('from') + 1]
|
||||
for cls in (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
|
||||
InventoryUpdateEvent, SystemJobEvent):
|
||||
if cls._meta.db_table == tablename:
|
||||
for event in cls.objects.order_by('start_line').all():
|
||||
fd.write(event.stdout.encode('utf-8'))
|
||||
|
||||
setattr(SQLiteCursorWrapper, 'copy_expert', write_stdout)
|
||||
request.addfinalizer(lambda: shutil.rmtree(path))
|
||||
request.addfinalizer(lambda: delattr(SQLiteCursorWrapper, 'copy_expert'))
|
||||
return path
|
||||
|
||||
|
||||
@ -184,6 +184,32 @@ def test_annon_user_action():
|
||||
assert not entry.actor
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_activity_stream_deleted_actor(alice, bob):
|
||||
alice.first_name = 'Alice'
|
||||
alice.last_name = 'Doe'
|
||||
alice.save()
|
||||
with impersonate(alice):
|
||||
o = Organization.objects.create(name='test organization')
|
||||
entry = o.activitystream_set.get(operation='create')
|
||||
assert entry.actor == alice
|
||||
|
||||
alice.delete()
|
||||
entry = o.activitystream_set.get(operation='create')
|
||||
assert entry.actor is None
|
||||
deleted = entry.deleted_actor
|
||||
assert deleted['username'] == 'alice'
|
||||
assert deleted['first_name'] == 'Alice'
|
||||
assert deleted['last_name'] == 'Doe'
|
||||
|
||||
entry.actor = bob
|
||||
entry.save(update_fields=['actor'])
|
||||
deleted = entry.deleted_actor
|
||||
|
||||
entry = ActivityStream.objects.get(id=entry.pk)
|
||||
assert entry.deleted_actor['username'] == 'bob'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_modified_not_allowed_field(somecloud_type):
|
||||
'''
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user