mirror of
https://github.com/ansible/awx.git
synced 2026-02-07 20:44:45 -03:30
Compare commits
76 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
94e5795dfc | ||
|
|
c4688d6298 | ||
|
|
6763badea3 | ||
|
|
2c4ad6ef0f | ||
|
|
37f44d7214 | ||
|
|
98bbc836a6 | ||
|
|
b59aff50dc | ||
|
|
a70b0c1ddc | ||
|
|
db72c9d5b8 | ||
|
|
4e0d19914f | ||
|
|
6f2307f50e | ||
|
|
dbc2215bb6 | ||
|
|
7c08b29827 | ||
|
|
407194d320 | ||
|
|
853af295d9 | ||
|
|
4738c8333a | ||
|
|
13dcea0afd | ||
|
|
bc2d339981 | ||
|
|
bef9ef10bb | ||
|
|
8645fe5c57 | ||
|
|
b93aa20362 | ||
|
|
4bbfc8a946 | ||
|
|
2c8eef413b | ||
|
|
d5bad1a533 | ||
|
|
f6c0effcb2 | ||
|
|
31a086b11a | ||
|
|
d94f766fcb | ||
|
|
a7113549eb | ||
|
|
bfd811f408 | ||
|
|
030704a9e1 | ||
|
|
c312d9bce3 | ||
|
|
aadcc217eb | ||
|
|
345c1c11e9 | ||
|
|
2c3a7fafc5 | ||
|
|
dbcd32a1d9 | ||
|
|
d45e258a78 | ||
|
|
d16b69a102 | ||
|
|
8b4efbc973 | ||
|
|
4cb061e7db | ||
|
|
31db6a1447 | ||
|
|
ad9d5904d8 | ||
|
|
b837d549ff | ||
|
|
9e22865d2e | ||
|
|
ee3e3e1516 | ||
|
|
4a8f6e45f8 | ||
|
|
6a317cca1b | ||
|
|
d67af79451 | ||
|
|
fe77fda7b2 | ||
|
|
f613b76baa | ||
|
|
054cbe69d7 | ||
|
|
87e9dcb6d7 | ||
|
|
c8829b057e | ||
|
|
a0b376a6ca | ||
|
|
d675207f99 | ||
|
|
20504042c9 | ||
|
|
0e87e97820 | ||
|
|
1f154742df | ||
|
|
85fc81aab1 | ||
|
|
5cfeeb3e87 | ||
|
|
a8c07b06d8 | ||
|
|
53c5feaf6b | ||
|
|
6f57aaa8f5 | ||
|
|
bea74a401d | ||
|
|
54e85813c8 | ||
|
|
b69ed08fe5 | ||
|
|
de25408a23 | ||
|
|
b17f0a188b | ||
|
|
fb860d76ce | ||
|
|
451f20ce0f | ||
|
|
c1dc0c7b86 | ||
|
|
d65ea2a3d5 | ||
|
|
8827ae7554 | ||
|
|
4915262af1 | ||
|
|
d43c91e1a5 | ||
|
|
b470ca32af | ||
|
|
793777bec7 |
1
.github/workflows/devel_images.yml
vendored
1
.github/workflows/devel_images.yml
vendored
@@ -2,6 +2,7 @@
|
||||
name: Build/Push Development Images
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
DOCKER_CACHE: "--no-cache" # using the cache will not rebuild git requirements and other things
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
|
||||
57
Makefile
57
Makefile
@@ -53,6 +53,8 @@ OTEL ?= false
|
||||
LOKI ?= false
|
||||
# If set to true docker-compose will install editable dependencies
|
||||
EDITABLE_DEPENDENCIES ?= false
|
||||
# If set to true, use tls for postgres connection
|
||||
PG_TLS ?= false
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv
|
||||
|
||||
@@ -61,6 +63,11 @@ DEV_DOCKER_OWNER ?= ansible
|
||||
DEV_DOCKER_OWNER_LOWER = $(shell echo $(DEV_DOCKER_OWNER) | tr A-Z a-z)
|
||||
DEV_DOCKER_TAG_BASE ?= ghcr.io/$(DEV_DOCKER_OWNER_LOWER)
|
||||
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
IMAGE_KUBE_DEV=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
|
||||
IMAGE_KUBE=$(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG)
|
||||
|
||||
# Common command to use for running ansible-playbook
|
||||
ANSIBLE_PLAYBOOK ?= ansible-playbook -e ansible_python_interpreter=$(PYTHON)
|
||||
|
||||
RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
|
||||
@@ -69,7 +76,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==69.0.2 setuptools_scm[toml]==8.0.4 wheel==0.42.0
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==69.0.2 setuptools_scm[toml]==8.0.4 wheel==0.42.0 cython==0.29.37
|
||||
|
||||
NAME ?= awx
|
||||
|
||||
@@ -84,6 +91,18 @@ I18N_FLAG_FILE = .i18n_built
|
||||
## PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
|
||||
PLATFORMS ?= linux/amd64,linux/arm64 # linux/ppc64le,linux/s390x
|
||||
|
||||
# Set up cache variables for image builds, allowing to control whether cache is used or not, ex:
|
||||
# DOCKER_CACHE=--no-cache make docker-compose-build
|
||||
ifeq ($(DOCKER_CACHE),)
|
||||
DOCKER_DEVEL_CACHE_FLAG=--cache-from=$(DEVEL_IMAGE_NAME)
|
||||
DOCKER_KUBE_DEV_CACHE_FLAG=--cache-from=$(IMAGE_KUBE_DEV)
|
||||
DOCKER_KUBE_CACHE_FLAG=--cache-from=$(IMAGE_KUBE)
|
||||
else
|
||||
DOCKER_DEVEL_CACHE_FLAG=$(DOCKER_CACHE)
|
||||
DOCKER_KUBE_DEV_CACHE_FLAG=$(DOCKER_CACHE)
|
||||
DOCKER_KUBE_CACHE_FLAG=$(DOCKER_CACHE)
|
||||
endif
|
||||
|
||||
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
||||
develop refresh adduser migrate dbchange \
|
||||
receiver test test_unit test_coverage coverage_html \
|
||||
@@ -366,7 +385,7 @@ symlink_collection:
|
||||
ln -s $(shell pwd)/awx_collection $(COLLECTION_INSTALL)
|
||||
|
||||
awx_collection_build: $(shell find awx_collection -type f)
|
||||
ansible-playbook -i localhost, awx_collection/tools/template_galaxy.yml \
|
||||
$(ANSIBLE_PLAYBOOK) -i localhost, awx_collection/tools/template_galaxy.yml \
|
||||
-e collection_package=$(COLLECTION_PACKAGE) \
|
||||
-e collection_namespace=$(COLLECTION_NAMESPACE) \
|
||||
-e collection_version=$(COLLECTION_VERSION) \
|
||||
@@ -520,10 +539,10 @@ endif
|
||||
|
||||
docker-compose-sources: .git/hooks/pre-commit
|
||||
@if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\
|
||||
ansible-playbook -i tools/docker-compose/inventory -e minikube_setup=$(MINIKUBE_SETUP) tools/docker-compose-minikube/deploy.yml; \
|
||||
$(ANSIBLE_PLAYBOOK) -i tools/docker-compose/inventory -e minikube_setup=$(MINIKUBE_SETUP) tools/docker-compose-minikube/deploy.yml; \
|
||||
fi;
|
||||
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
|
||||
$(ANSIBLE_PLAYBOOK) -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
|
||||
-e awx_image=$(DEV_DOCKER_TAG_BASE)/awx_devel \
|
||||
-e awx_image_tag=$(COMPOSE_TAG) \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE) \
|
||||
@@ -542,11 +561,12 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e enable_otel=$(OTEL) \
|
||||
-e enable_loki=$(LOKI) \
|
||||
-e install_editable_dependencies=$(EDITABLE_DEPENDENCIES) \
|
||||
-e pg_tls=$(PG_TLS) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
|
||||
$(ANSIBLE_PLAYBOOK) -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
|
||||
-e enable_vault=$(VAULT) \
|
||||
-e vault_tls=$(VAULT_TLS) \
|
||||
-e enable_ldap=$(LDAP); \
|
||||
@@ -589,7 +609,7 @@ docker-compose-container-group-clean:
|
||||
.PHONY: Dockerfile.dev
|
||||
## Generate Dockerfile.dev for awx_devel image
|
||||
Dockerfile.dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
$(ANSIBLE_PLAYBOOK) tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.dev \
|
||||
-e build_dev=True \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE)
|
||||
@@ -600,8 +620,7 @@ docker-compose-build: Dockerfile.dev
|
||||
-f Dockerfile.dev \
|
||||
-t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
|
||||
$(DOCKER_DEVEL_CACHE_FLAG) .
|
||||
|
||||
.PHONY: docker-compose-buildx
|
||||
## Build awx_devel image for docker compose development environment for multiple architectures
|
||||
@@ -611,7 +630,7 @@ docker-compose-buildx: Dockerfile.dev
|
||||
- docker buildx build \
|
||||
--push \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) \
|
||||
$(DOCKER_DEVEL_CACHE_FLAG) \
|
||||
--platform=$(PLATFORMS) \
|
||||
--tag $(DEVEL_IMAGE_NAME) \
|
||||
-f Dockerfile.dev .
|
||||
@@ -664,7 +683,7 @@ version-for-buildyml:
|
||||
.PHONY: Dockerfile
|
||||
## Generate Dockerfile for awx image
|
||||
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
$(ANSIBLE_PLAYBOOK) tools/ansible/dockerfile.yml \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE) \
|
||||
-e headless=$(HEADLESS)
|
||||
|
||||
@@ -674,7 +693,8 @@ awx-kube-build: Dockerfile
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
$(DOCKER_KUBE_CACHE_FLAG) \
|
||||
-t $(IMAGE_KUBE) .
|
||||
|
||||
## Build multi-arch awx image for deployment on Kubernetes environment.
|
||||
awx-kube-buildx: Dockerfile
|
||||
@@ -686,7 +706,8 @@ awx-kube-buildx: Dockerfile
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
--platform=$(PLATFORMS) \
|
||||
--tag $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) \
|
||||
$(DOCKER_KUBE_CACHE_FLAG) \
|
||||
--tag $(IMAGE_KUBE) \
|
||||
-f Dockerfile .
|
||||
- docker buildx rm awx-kube-buildx
|
||||
|
||||
@@ -694,7 +715,7 @@ awx-kube-buildx: Dockerfile
|
||||
.PHONY: Dockerfile.kube-dev
|
||||
## Generate Docker.kube-dev for awx_kube_devel image
|
||||
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
$(ANSIBLE_PLAYBOOK) tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.kube-dev \
|
||||
-e kube_dev=True \
|
||||
-e template_dest=_build_kube_dev \
|
||||
@@ -704,8 +725,8 @@ Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
awx-kube-dev-build: Dockerfile.kube-dev
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile.kube-dev \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
$(DOCKER_KUBE_DEV_CACHE_FLAG) \
|
||||
-t $(IMAGE_KUBE_DEV) .
|
||||
|
||||
## Build and push multi-arch awx_kube_devel image for development on local Kubernetes environment.
|
||||
awx-kube-dev-buildx: Dockerfile.kube-dev
|
||||
@@ -714,14 +735,14 @@ awx-kube-dev-buildx: Dockerfile.kube-dev
|
||||
- docker buildx build \
|
||||
--push \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
$(DOCKER_KUBE_DEV_CACHE_FLAG) \
|
||||
--platform=$(PLATFORMS) \
|
||||
--tag $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
--tag $(IMAGE_KUBE_DEV) \
|
||||
-f Dockerfile.kube-dev .
|
||||
- docker buildx rm awx-kube-dev-buildx
|
||||
|
||||
kind-dev-load: awx-kube-dev-build
|
||||
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
|
||||
$(KIND_BIN) load docker-image $(IMAGE_KUBE_DEV)
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
|
||||
@@ -33,8 +33,10 @@ from rest_framework.negotiation import DefaultContentNegotiation
|
||||
# django-ansible-base
|
||||
from ansible_base.rest_filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
||||
from ansible_base.lib.utils.models import get_all_field_names
|
||||
from ansible_base.lib.utils.requests import get_remote_host
|
||||
from ansible_base.rbac.models import RoleEvaluation, RoleDefinition
|
||||
from ansible_base.rbac.permission_registry import permission_registry
|
||||
from ansible_base.jwt_consumer.common.util import validate_x_trusted_proxy_header
|
||||
|
||||
# AWX
|
||||
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
||||
@@ -42,6 +44,7 @@ from awx.main.models.rbac import give_creator_permissions
|
||||
from awx.main.access import optimize_queryset
|
||||
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
from awx.main.utils.proxy import is_proxy_in_headers, delete_headers_starting_with_http
|
||||
from awx.main.views import ApiErrorView
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer
|
||||
from awx.api.versioning import URLPathVersioning
|
||||
@@ -93,8 +96,9 @@ class LoggedLoginView(auth_views.LoginView):
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
|
||||
ip = get_remote_host(request) # request.META.get('REMOTE_ADDR', None)
|
||||
if request.user.is_authenticated:
|
||||
logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None))))
|
||||
logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, ip)))
|
||||
ret.set_cookie(
|
||||
'userLoggedIn', 'true', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False), samesite=getattr(settings, 'USER_COOKIE_SAMESITE', 'Lax')
|
||||
)
|
||||
@@ -103,7 +107,7 @@ class LoggedLoginView(auth_views.LoginView):
|
||||
return ret
|
||||
else:
|
||||
if 'username' in self.request.POST:
|
||||
logger.warning(smart_str(u"Login failed for user {} from {}".format(self.request.POST.get('username'), request.META.get('REMOTE_ADDR', None))))
|
||||
logger.warning(smart_str(u"Login failed for user {} from {}".format(self.request.POST.get('username'), ip)))
|
||||
ret.status_code = 401
|
||||
return ret
|
||||
|
||||
@@ -151,22 +155,23 @@ class APIView(views.APIView):
|
||||
Store the Django REST Framework Request object as an attribute on the
|
||||
normal Django request, store time the request started.
|
||||
"""
|
||||
remote_headers = ['REMOTE_ADDR', 'REMOTE_HOST']
|
||||
|
||||
self.time_started = time.time()
|
||||
if getattr(settings, 'SQL_DEBUG', False):
|
||||
self.queries_before = len(connection.queries)
|
||||
|
||||
if 'HTTP_X_TRUSTED_PROXY' in request.environ:
|
||||
if validate_x_trusted_proxy_header(request.environ['HTTP_X_TRUSTED_PROXY']):
|
||||
remote_headers = settings.REMOTE_HOST_HEADERS
|
||||
else:
|
||||
logger.warning("Request appeared to be a trusted upstream proxy but failed to provide a matching shared secret.")
|
||||
|
||||
# If there are any custom headers in REMOTE_HOST_HEADERS, make sure
|
||||
# they respect the allowed proxy list
|
||||
if all(
|
||||
[
|
||||
settings.PROXY_IP_ALLOWED_LIST,
|
||||
request.environ.get('REMOTE_ADDR') not in settings.PROXY_IP_ALLOWED_LIST,
|
||||
request.environ.get('REMOTE_HOST') not in settings.PROXY_IP_ALLOWED_LIST,
|
||||
]
|
||||
):
|
||||
for custom_header in settings.REMOTE_HOST_HEADERS:
|
||||
if custom_header.startswith('HTTP_'):
|
||||
request.environ.pop(custom_header, None)
|
||||
if settings.PROXY_IP_ALLOWED_LIST:
|
||||
if not is_proxy_in_headers(self.request, settings.PROXY_IP_ALLOWED_LIST, remote_headers):
|
||||
delete_headers_starting_with_http(request, settings.REMOTE_HOST_HEADERS)
|
||||
|
||||
drf_request = super(APIView, self).initialize_request(request, *args, **kwargs)
|
||||
request.drf_request = drf_request
|
||||
@@ -211,17 +216,21 @@ class APIView(views.APIView):
|
||||
return response
|
||||
|
||||
if response.status_code >= 400:
|
||||
ip = get_remote_host(request) # request.META.get('REMOTE_ADDR', None)
|
||||
msg_data = {
|
||||
'status_code': response.status_code,
|
||||
'user_name': request.user,
|
||||
'url_path': request.path,
|
||||
'remote_addr': request.META.get('REMOTE_ADDR', None),
|
||||
'remote_addr': ip,
|
||||
}
|
||||
|
||||
if type(response.data) is dict:
|
||||
msg_data['error'] = response.data.get('error', response.status_text)
|
||||
elif type(response.data) is list:
|
||||
msg_data['error'] = ", ".join(list(map(lambda x: x.get('error', response.status_text), response.data)))
|
||||
if len(response.data) > 0 and isinstance(response.data[0], str):
|
||||
msg_data['error'] = str(response.data[0])
|
||||
else:
|
||||
msg_data['error'] = ", ".join(list(map(lambda x: x.get('error', response.status_text), response.data)))
|
||||
else:
|
||||
msg_data['error'] = response.status_text
|
||||
|
||||
|
||||
@@ -61,7 +61,9 @@ import pytz
|
||||
from wsgiref.util import FileWrapper
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.lib.utils.requests import get_remote_hosts
|
||||
from ansible_base.rbac.models import RoleEvaluation, ObjectRole
|
||||
from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType
|
||||
|
||||
# AWX
|
||||
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
|
||||
@@ -128,6 +130,7 @@ from awx.api.views.mixin import (
|
||||
from awx.api.pagination import UnifiedJobEventPagination
|
||||
from awx.main.utils import set_environ
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.api.views')
|
||||
|
||||
|
||||
@@ -710,16 +713,81 @@ class AuthView(APIView):
|
||||
return Response(data)
|
||||
|
||||
|
||||
def immutablesharedfields(cls):
|
||||
'''
|
||||
Class decorator to prevent modifying shared resources when ALLOW_LOCAL_RESOURCE_MANAGEMENT setting is set to False.
|
||||
|
||||
Works by overriding these view methods:
|
||||
- create
|
||||
- delete
|
||||
- perform_update
|
||||
create and delete are overridden to raise a PermissionDenied exception.
|
||||
perform_update is overridden to check if any shared fields are being modified,
|
||||
and raise a PermissionDenied exception if so.
|
||||
'''
|
||||
# create instead of perform_create because some of our views
|
||||
# override create instead of perform_create
|
||||
if hasattr(cls, 'create'):
|
||||
cls.original_create = cls.create
|
||||
|
||||
@functools.wraps(cls.create)
|
||||
def create_wrapper(*args, **kwargs):
|
||||
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
return cls.original_create(*args, **kwargs)
|
||||
raise PermissionDenied({'detail': _('Creation of this resource is not allowed. Create this resource via the platform ingress.')})
|
||||
|
||||
cls.create = create_wrapper
|
||||
|
||||
if hasattr(cls, 'delete'):
|
||||
cls.original_delete = cls.delete
|
||||
|
||||
@functools.wraps(cls.delete)
|
||||
def delete_wrapper(*args, **kwargs):
|
||||
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
return cls.original_delete(*args, **kwargs)
|
||||
raise PermissionDenied({'detail': _('Deletion of this resource is not allowed. Delete this resource via the platform ingress.')})
|
||||
|
||||
cls.delete = delete_wrapper
|
||||
|
||||
if hasattr(cls, 'perform_update'):
|
||||
cls.original_perform_update = cls.perform_update
|
||||
|
||||
@functools.wraps(cls.perform_update)
|
||||
def update_wrapper(*args, **kwargs):
|
||||
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
view, serializer = args
|
||||
instance = view.get_object()
|
||||
if instance:
|
||||
if isinstance(instance, models.Organization):
|
||||
shared_fields = OrganizationType._declared_fields.keys()
|
||||
elif isinstance(instance, models.User):
|
||||
shared_fields = UserType._declared_fields.keys()
|
||||
elif isinstance(instance, models.Team):
|
||||
shared_fields = TeamType._declared_fields.keys()
|
||||
attrs = serializer.validated_data
|
||||
for field in shared_fields:
|
||||
if field in attrs and getattr(instance, field) != attrs[field]:
|
||||
raise PermissionDenied({field: _(f"Cannot change shared field '{field}'. Alter this field via the platform ingress.")})
|
||||
return cls.original_perform_update(*args, **kwargs)
|
||||
|
||||
cls.perform_update = update_wrapper
|
||||
|
||||
return cls
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class TeamList(ListCreateAPIView):
|
||||
model = models.Team
|
||||
serializer_class = serializers.TeamSerializer
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class TeamDetail(RetrieveUpdateDestroyAPIView):
|
||||
model = models.Team
|
||||
serializer_class = serializers.TeamSerializer
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class TeamUsersList(BaseUsersList):
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
@@ -1101,6 +1169,7 @@ class ProjectCopy(CopyAPIView):
|
||||
copy_return_serializer_class = serializers.ProjectSerializer
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class UserList(ListCreateAPIView):
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
@@ -1271,7 +1340,16 @@ class UserRolesList(SubListAttachDetachAPIView):
|
||||
user = get_object_or_400(models.User, pk=self.kwargs['pk'])
|
||||
role = get_object_or_400(models.Role, pk=sub_id)
|
||||
|
||||
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
||||
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
|
||||
# Prevent user to be associated with team/org when ALLOW_LOCAL_RESOURCE_MANAGEMENT is False
|
||||
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
for model in [models.Organization, models.Team]:
|
||||
ct = content_types[model]
|
||||
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
|
||||
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
|
||||
return Response(data, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
credential_content_type = content_types[models.Credential]
|
||||
if role.content_type == credential_content_type:
|
||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
|
||||
@@ -1343,6 +1421,7 @@ class UserActivityStreamList(SubListAPIView):
|
||||
return qs.filter(Q(actor=parent) | Q(user__in=[parent]))
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class UserDetail(RetrieveUpdateDestroyAPIView):
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
@@ -2313,6 +2392,14 @@ class JobTemplateList(ListCreateAPIView):
|
||||
serializer_class = serializers.JobTemplateSerializer
|
||||
always_allow_superuser = False
|
||||
|
||||
def check_permissions(self, request):
|
||||
if request.method == 'POST':
|
||||
can_access, messages = request.user.can_access_with_errors(self.model, 'add', request.data)
|
||||
if not can_access:
|
||||
self.permission_denied(request, message=messages)
|
||||
|
||||
super(JobTemplateList, self).check_permissions(request)
|
||||
|
||||
|
||||
class JobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
model = models.JobTemplate
|
||||
@@ -2692,12 +2779,7 @@ class JobTemplateCallback(GenericAPIView):
|
||||
host for the current request.
|
||||
"""
|
||||
# Find the list of remote host names/IPs to check.
|
||||
remote_hosts = set()
|
||||
for header in settings.REMOTE_HOST_HEADERS:
|
||||
for value in self.request.META.get(header, '').split(','):
|
||||
value = value.strip()
|
||||
if value:
|
||||
remote_hosts.add(value)
|
||||
remote_hosts = set(get_remote_hosts(self.request))
|
||||
# Add the reverse lookup of IP addresses.
|
||||
for rh in list(remote_hosts):
|
||||
try:
|
||||
@@ -3037,6 +3119,14 @@ class WorkflowJobTemplateList(ListCreateAPIView):
|
||||
serializer_class = serializers.WorkflowJobTemplateSerializer
|
||||
always_allow_superuser = False
|
||||
|
||||
def check_permissions(self, request):
|
||||
if request.method == 'POST':
|
||||
can_access, messages = request.user.can_access_with_errors(self.model, 'add', request.data)
|
||||
if not can_access:
|
||||
self.permission_denied(request, message=messages)
|
||||
|
||||
super(WorkflowJobTemplateList, self).check_permissions(request)
|
||||
|
||||
|
||||
class WorkflowJobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
model = models.WorkflowJobTemplate
|
||||
@@ -4295,7 +4385,15 @@ class RoleUsersList(SubListAttachDetachAPIView):
|
||||
user = get_object_or_400(models.User, pk=sub_id)
|
||||
role = self.get_parent_object()
|
||||
|
||||
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
||||
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
|
||||
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
for model in [models.Organization, models.Team]:
|
||||
ct = content_types[model]
|
||||
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
|
||||
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
|
||||
return Response(data, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
credential_content_type = content_types[models.Credential]
|
||||
if role.content_type == credential_content_type:
|
||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
|
||||
|
||||
@@ -53,15 +53,18 @@ from awx.api.serializers import (
|
||||
CredentialSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, OrganizationCountsMixin
|
||||
from awx.api.views import immutablesharedfields
|
||||
|
||||
logger = logging.getLogger('awx.api.views.organization')
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
@@ -104,6 +107,7 @@ class OrganizationInventoriesList(SubListAPIView):
|
||||
relationship = 'inventories'
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationUsersList(BaseUsersList):
|
||||
model = User
|
||||
serializer_class = UserSerializer
|
||||
@@ -112,6 +116,7 @@ class OrganizationUsersList(BaseUsersList):
|
||||
ordering = ('username',)
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationAdminsList(BaseUsersList):
|
||||
model = User
|
||||
serializer_class = UserSerializer
|
||||
@@ -150,6 +155,7 @@ class OrganizationWorkflowJobTemplatesList(SubListCreateAPIView):
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
|
||||
model = Team
|
||||
serializer_class = TeamSerializer
|
||||
|
||||
@@ -598,7 +598,7 @@ class InstanceGroupAccess(BaseAccess):
|
||||
- a superuser
|
||||
- admin role on the Instance group
|
||||
I can add/delete Instance Groups:
|
||||
- a superuser(system administrator)
|
||||
- a superuser(system administrator), because these are not org-scoped
|
||||
I can use Instance Groups when I have:
|
||||
- use_role on the instance group
|
||||
"""
|
||||
@@ -627,7 +627,7 @@ class InstanceGroupAccess(BaseAccess):
|
||||
def can_delete(self, obj):
|
||||
if obj.name in [settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]:
|
||||
return False
|
||||
return self.user.is_superuser
|
||||
return self.user.has_obj_perm(obj, 'delete')
|
||||
|
||||
|
||||
class UserAccess(BaseAccess):
|
||||
@@ -1387,12 +1387,11 @@ class TeamAccess(BaseAccess):
|
||||
class ExecutionEnvironmentAccess(BaseAccess):
|
||||
"""
|
||||
I can see an execution environment when:
|
||||
- I'm a superuser
|
||||
- I'm a member of the same organization
|
||||
- it is a global ExecutionEnvironment
|
||||
- I can see its organization
|
||||
- It is a global ExecutionEnvironment
|
||||
I can create/change an execution environment when:
|
||||
- I'm a superuser
|
||||
- I'm an admin for the organization(s)
|
||||
- I have an organization or object role that gives access
|
||||
"""
|
||||
|
||||
model = ExecutionEnvironment
|
||||
@@ -1416,7 +1415,7 @@ class ExecutionEnvironmentAccess(BaseAccess):
|
||||
raise PermissionDenied
|
||||
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
|
||||
if not self.user.has_obj_perm(obj, 'change'):
|
||||
raise PermissionDenied
|
||||
return False
|
||||
else:
|
||||
if self.user not in obj.organization.execution_environment_admin_role:
|
||||
raise PermissionDenied
|
||||
@@ -1424,7 +1423,7 @@ class ExecutionEnvironmentAccess(BaseAccess):
|
||||
new_org = get_object_from_data('organization', Organization, data, obj=obj)
|
||||
if not new_org or self.user not in new_org.execution_environment_admin_role:
|
||||
return False
|
||||
return self.check_related('organization', Organization, data, obj=obj, mandatory=True, role_field='execution_environment_admin_role')
|
||||
return self.check_related('organization', Organization, data, obj=obj, role_field='execution_environment_admin_role')
|
||||
|
||||
def can_delete(self, obj):
|
||||
if obj.managed:
|
||||
@@ -1596,6 +1595,8 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
|
||||
inventory = get_value(Inventory, 'inventory')
|
||||
if inventory:
|
||||
if self.user not in inventory.use_role:
|
||||
if self.save_messages:
|
||||
self.messages['inventory'] = [_('You do not have use permission on Inventory')]
|
||||
return False
|
||||
|
||||
if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'):
|
||||
@@ -1604,11 +1605,16 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
|
||||
project = get_value(Project, 'project')
|
||||
# If the user has admin access to the project (as an org admin), should
|
||||
# be able to proceed without additional checks.
|
||||
if project:
|
||||
return self.user in project.use_role
|
||||
else:
|
||||
if not project:
|
||||
return False
|
||||
|
||||
if self.user not in project.use_role:
|
||||
if self.save_messages:
|
||||
self.messages['project'] = [_('You do not have use permission on Project')]
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@check_superuser
|
||||
def can_copy_related(self, obj):
|
||||
"""
|
||||
@@ -2092,11 +2098,20 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
|
||||
|
||||
return bool(
|
||||
self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True)
|
||||
and self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role')
|
||||
)
|
||||
if not self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True):
|
||||
if data.get('organization', None) is None:
|
||||
self.messages['organization'] = [_('An organization is required to create a workflow job template for normal user')]
|
||||
return False
|
||||
|
||||
if not self.check_related('inventory', Inventory, data, role_field='use_role'):
|
||||
self.messages['inventory'] = [_('You do not have use_role to the inventory')]
|
||||
return False
|
||||
|
||||
if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'):
|
||||
self.messages['execution_environment'] = [_('You do not have read_role to the execution environment')]
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def can_copy(self, obj):
|
||||
if self.save_messages:
|
||||
@@ -2628,7 +2643,7 @@ class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||
|
||||
class NotificationTemplateAccess(BaseAccess):
|
||||
"""
|
||||
I can see/use a notification_template if I have permission to
|
||||
Run standard logic from DAB RBAC
|
||||
"""
|
||||
|
||||
model = NotificationTemplate
|
||||
@@ -2649,10 +2664,7 @@ class NotificationTemplateAccess(BaseAccess):
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
if obj.organization is None:
|
||||
# only superusers are allowed to edit orphan notification templates
|
||||
return False
|
||||
return self.check_related('organization', Organization, data, obj=obj, role_field='notification_admin_role', mandatory=True)
|
||||
return self.user.has_obj_perm(obj, 'change') and self.check_related('organization', Organization, data, obj=obj, role_field='notification_admin_role')
|
||||
|
||||
def can_admin(self, obj, data):
|
||||
return self.can_change(obj, data)
|
||||
@@ -2662,9 +2674,7 @@ class NotificationTemplateAccess(BaseAccess):
|
||||
|
||||
@check_superuser
|
||||
def can_start(self, obj, validate_license=True):
|
||||
if obj.organization is None:
|
||||
return False
|
||||
return self.user in obj.organization.notification_admin_role
|
||||
return self.can_change(obj, None)
|
||||
|
||||
|
||||
class NotificationAccess(BaseAccess):
|
||||
|
||||
@@ -14,7 +14,7 @@ __all__ = [
|
||||
'STANDARD_INVENTORY_UPDATE_ENV',
|
||||
]
|
||||
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights', 'terraform')
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights', 'terraform', 'openshift_virtualization')
|
||||
PRIVILEGE_ESCALATION_METHODS = [
|
||||
('sudo', _('Sudo')),
|
||||
('su', _('Su')),
|
||||
|
||||
@@ -102,7 +102,8 @@ def create_listener_connection():
|
||||
|
||||
# Apply overrides specifically for the listener connection
|
||||
for k, v in settings.LISTENER_DATABASES.get('default', {}).items():
|
||||
conf[k] = v
|
||||
if k != 'OPTIONS':
|
||||
conf[k] = v
|
||||
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
|
||||
conf['OPTIONS'][k] = v
|
||||
|
||||
|
||||
@@ -252,7 +252,7 @@ class ImplicitRoleField(models.ForeignKey):
|
||||
kwargs.setdefault('related_name', '+')
|
||||
kwargs.setdefault('null', 'True')
|
||||
kwargs.setdefault('editable', False)
|
||||
kwargs.setdefault('on_delete', models.CASCADE)
|
||||
kwargs.setdefault('on_delete', models.SET_NULL)
|
||||
super(ImplicitRoleField, self).__init__(*args, **kwargs)
|
||||
|
||||
def deconstruct(self):
|
||||
|
||||
@@ -17,49 +17,49 @@ class Migration(migrations.Migration):
|
||||
model_name='organization',
|
||||
name='execute_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True', on_delete=django.db.models.deletion.CASCADE, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='job_template_admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='credential_admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True', on_delete=django.db.models.deletion.CASCADE, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='inventory_admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True', on_delete=django.db.models.deletion.CASCADE, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='project_admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True', on_delete=django.db.models.deletion.CASCADE, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='workflow_admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True', on_delete=django.db.models.deletion.CASCADE, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='notification_admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True', on_delete=django.db.models.deletion.CASCADE, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
@@ -67,7 +67,7 @@ class Migration(migrations.Migration):
|
||||
name='admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['singleton:system_administrator', 'organization.credential_admin_role'],
|
||||
related_name='+',
|
||||
to='main.Role',
|
||||
@@ -77,7 +77,7 @@ class Migration(migrations.Migration):
|
||||
model_name='inventory',
|
||||
name='admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True', on_delete=django.db.models.deletion.CASCADE, parent_role='organization.inventory_admin_role', related_name='+', to='main.Role'
|
||||
null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role='organization.inventory_admin_role', related_name='+', to='main.Role'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
@@ -85,7 +85,7 @@ class Migration(migrations.Migration):
|
||||
name='admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['organization.project_admin_role', 'singleton:system_administrator'],
|
||||
related_name='+',
|
||||
to='main.Role',
|
||||
@@ -96,7 +96,7 @@ class Migration(migrations.Migration):
|
||||
name='admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['singleton:system_administrator', 'organization.workflow_admin_role'],
|
||||
related_name='+',
|
||||
to='main.Role',
|
||||
@@ -107,7 +107,7 @@ class Migration(migrations.Migration):
|
||||
name='execute_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['admin_role', 'organization.execute_role'],
|
||||
related_name='+',
|
||||
to='main.Role',
|
||||
@@ -119,7 +119,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['project.organization.job_template_admin_role', 'inventory.organization.job_template_admin_role'],
|
||||
related_name='+',
|
||||
to='main.Role',
|
||||
@@ -130,7 +130,7 @@ class Migration(migrations.Migration):
|
||||
name='execute_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['admin_role', 'project.organization.execute_role', 'inventory.organization.execute_role'],
|
||||
related_name='+',
|
||||
to='main.Role',
|
||||
@@ -142,7 +142,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=[
|
||||
'admin_role',
|
||||
'execute_role',
|
||||
|
||||
@@ -18,7 +18,7 @@ class Migration(migrations.Migration):
|
||||
model_name='organization',
|
||||
name='member_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['admin_role'], related_name='+', to='main.Role'
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role=['admin_role'], related_name='+', to='main.Role'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
@@ -27,7 +27,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=[
|
||||
'member_role',
|
||||
'auditor_role',
|
||||
|
||||
@@ -36,7 +36,7 @@ class Migration(migrations.Migration):
|
||||
model_name='organization',
|
||||
name='approval_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
),
|
||||
preserve_default='True',
|
||||
),
|
||||
@@ -46,7 +46,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['organization.approval_role', 'admin_role'],
|
||||
related_name='+',
|
||||
to='main.Role',
|
||||
@@ -116,7 +116,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=[
|
||||
'member_role',
|
||||
'auditor_role',
|
||||
@@ -139,7 +139,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['singleton:system_auditor', 'organization.auditor_role', 'execute_role', 'admin_role', 'approval_role'],
|
||||
related_name='+',
|
||||
to='main.Role',
|
||||
|
||||
@@ -80,7 +80,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['organization.job_template_admin_role'],
|
||||
related_name='+',
|
||||
to='main.Role',
|
||||
@@ -92,7 +92,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['admin_role', 'organization.execute_role'],
|
||||
related_name='+',
|
||||
to='main.Role',
|
||||
@@ -104,7 +104,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'],
|
||||
related_name='+',
|
||||
to='main.Role',
|
||||
|
||||
@@ -26,7 +26,7 @@ class Migration(migrations.Migration):
|
||||
model_name='organization',
|
||||
name='execution_environment_admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role='admin_role', related_name='+', to='main.Role'
|
||||
),
|
||||
preserve_default='True',
|
||||
),
|
||||
|
||||
@@ -17,7 +17,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=[
|
||||
'member_role',
|
||||
'auditor_role',
|
||||
|
||||
@@ -17,7 +17,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['singleton:system_administrator'],
|
||||
related_name='+',
|
||||
to='main.role',
|
||||
@@ -30,7 +30,7 @@ class Migration(migrations.Migration):
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
parent_role=['singleton:system_auditor', 'use_role', 'admin_role'],
|
||||
related_name='+',
|
||||
to='main.role',
|
||||
@@ -41,7 +41,7 @@ class Migration(migrations.Migration):
|
||||
model_name='instancegroup',
|
||||
name='use_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['admin_role'], related_name='+', to='main.role'
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.SET_NULL, parent_role=['admin_role'], related_name='+', to='main.role'
|
||||
),
|
||||
preserve_default='True',
|
||||
),
|
||||
|
||||
@@ -0,0 +1,61 @@
|
||||
# Generated by Django 4.2.10 on 2024-06-12 19:59
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0193_alter_notification_notification_type_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
('terraform', 'Terraform State'),
|
||||
('openshift_virtualization', 'OpenShift Virtualization'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
('terraform', 'Terraform State'),
|
||||
('openshift_virtualization', 'OpenShift Virtualization'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
]
|
||||
26
awx/main/migrations/0195_EE_permissions.py
Normal file
26
awx/main/migrations/0195_EE_permissions.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# Generated by Django 4.2.6 on 2024-06-20 15:55
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def delete_execution_environment_read_role(apps, schema_editor):
|
||||
permission_classes = [apps.get_model('auth', 'Permission'), apps.get_model('dab_rbac', 'DABPermission')]
|
||||
for permission_cls in permission_classes:
|
||||
ee_read_perm = permission_cls.objects.filter(codename='view_executionenvironment').first()
|
||||
if ee_read_perm:
|
||||
ee_read_perm.delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0194_alter_inventorysource_source_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterModelOptions(
|
||||
name='executionenvironment',
|
||||
options={'default_permissions': ('add', 'change', 'delete'), 'ordering': ('-created',)},
|
||||
),
|
||||
migrations.RunPython(delete_execution_environment_read_role, migrations.RunPython.noop),
|
||||
]
|
||||
@@ -134,8 +134,7 @@ def get_permissions_for_role(role_field, children_map, apps):
|
||||
|
||||
# more special cases for those same above special org-level roles
|
||||
if role_field.name == 'auditor_role':
|
||||
for codename in ('view_notificationtemplate', 'view_executionenvironment'):
|
||||
perm_list.append(Permission.objects.get(codename=codename))
|
||||
perm_list.append(Permission.objects.get(codename='view_notificationtemplate'))
|
||||
|
||||
return perm_list
|
||||
|
||||
@@ -278,6 +277,7 @@ def setup_managed_role_definitions(apps, schema_editor):
|
||||
to_create = {
|
||||
'object_admin': '{cls.__name__} Admin',
|
||||
'org_admin': 'Organization Admin',
|
||||
'org_audit': 'Organization Audit',
|
||||
'org_children': 'Organization {cls.__name__} Admin',
|
||||
'special': '{cls.__name__} {action}',
|
||||
}
|
||||
@@ -290,14 +290,15 @@ def setup_managed_role_definitions(apps, schema_editor):
|
||||
managed_role_definitions = []
|
||||
|
||||
org_perms = set()
|
||||
for cls in permission_registry._registry:
|
||||
for cls in permission_registry.all_registered_models:
|
||||
ct = ContentType.objects.get_for_model(cls)
|
||||
cls_name = cls._meta.model_name
|
||||
object_perms = set(Permission.objects.filter(content_type=ct))
|
||||
# Special case for InstanceGroup which has an organiation field, but is not an organization child object
|
||||
if cls._meta.model_name != 'instancegroup':
|
||||
if cls_name != 'instancegroup':
|
||||
org_perms.update(object_perms)
|
||||
|
||||
if 'object_admin' in to_create and cls != Organization:
|
||||
if 'object_admin' in to_create and cls_name != 'organization':
|
||||
indiv_perms = object_perms.copy()
|
||||
add_perms = [perm for perm in indiv_perms if perm.codename.startswith('add_')]
|
||||
if add_perms:
|
||||
@@ -310,7 +311,7 @@ def setup_managed_role_definitions(apps, schema_editor):
|
||||
)
|
||||
)
|
||||
|
||||
if 'org_children' in to_create and cls != Organization:
|
||||
if 'org_children' in to_create and (cls_name not in ('organization', 'instancegroup', 'team')):
|
||||
org_child_perms = object_perms.copy()
|
||||
org_child_perms.add(Permission.objects.get(codename='view_organization'))
|
||||
|
||||
@@ -327,7 +328,8 @@ def setup_managed_role_definitions(apps, schema_editor):
|
||||
if 'special' in to_create:
|
||||
special_perms = []
|
||||
for perm in object_perms:
|
||||
if perm.codename.split('_')[0] not in ('add', 'change', 'update', 'delete', 'view'):
|
||||
# Organization auditor is handled separately
|
||||
if perm.codename.split('_')[0] not in ('add', 'change', 'delete', 'view', 'audit'):
|
||||
special_perms.append(perm)
|
||||
for perm in special_perms:
|
||||
action = perm.codename.split('_')[0]
|
||||
@@ -353,6 +355,19 @@ def setup_managed_role_definitions(apps, schema_editor):
|
||||
)
|
||||
)
|
||||
|
||||
if 'org_audit' in to_create:
|
||||
audit_permissions = [perm for perm in org_perms if perm.codename.startswith('view_')]
|
||||
audit_permissions.append(Permission.objects.get(codename='audit_organization'))
|
||||
managed_role_definitions.append(
|
||||
get_or_create_managed(
|
||||
to_create['org_audit'].format(cls=Organization),
|
||||
'Has permission to view all objects inside of a single organization',
|
||||
org_ct,
|
||||
audit_permissions,
|
||||
RoleDefinition,
|
||||
)
|
||||
)
|
||||
|
||||
unexpected_role_definitions = RoleDefinition.objects.filter(managed=True).exclude(pk__in=[rd.pk for rd in managed_role_definitions])
|
||||
for role_definition in unexpected_role_definitions:
|
||||
logger.info(f'Deleting old managed role definition {role_definition.name}, pk={role_definition.pk}')
|
||||
|
||||
@@ -176,17 +176,17 @@ pre_delete.connect(cleanup_created_modified_by, sender=User)
|
||||
|
||||
@property
|
||||
def user_get_organizations(user):
|
||||
return Organization.objects.filter(member_role__members=user)
|
||||
return Organization.access_qs(user, 'member')
|
||||
|
||||
|
||||
@property
|
||||
def user_get_admin_of_organizations(user):
|
||||
return Organization.objects.filter(admin_role__members=user)
|
||||
return Organization.access_qs(user, 'change')
|
||||
|
||||
|
||||
@property
|
||||
def user_get_auditor_of_organizations(user):
|
||||
return Organization.objects.filter(auditor_role__members=user)
|
||||
return Organization.access_qs(user, 'audit')
|
||||
|
||||
|
||||
@property
|
||||
|
||||
@@ -21,6 +21,10 @@ from django.conf import settings
|
||||
from django.utils.encoding import force_str
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.timezone import now
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
# DRF
|
||||
from rest_framework.serializers import ValidationError as DRFValidationError
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
@@ -41,6 +45,7 @@ from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
from awx.main.models import Team, Organization
|
||||
from awx.main.utils import encrypt_field
|
||||
from . import injectors as builtin_injectors
|
||||
|
||||
@@ -315,6 +320,15 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
else:
|
||||
raise ValueError('{} is not a dynamic input field'.format(field_name))
|
||||
|
||||
def validate_role_assignment(self, actor, role_definition):
|
||||
if isinstance(actor, User):
|
||||
if actor.is_superuser or Organization.access_qs(actor, 'change').filter(id=self.organization.id).exists():
|
||||
return
|
||||
if isinstance(actor, Team):
|
||||
if actor.organization == self.organization:
|
||||
return
|
||||
raise DRFValidationError({'detail': _(f"You cannot grant credential access to a {actor._meta.object_name} not in the credentials' organization")})
|
||||
|
||||
|
||||
class CredentialType(CommonModelNameNotUnique):
|
||||
"""
|
||||
|
||||
@@ -4,11 +4,12 @@ import datetime
|
||||
from datetime import timezone
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
import itertools
|
||||
import time
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import models, DatabaseError
|
||||
from django.db import models, DatabaseError, transaction
|
||||
from django.db.models.functions import Cast
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
@@ -605,19 +606,23 @@ class JobEvent(BasePlaybookEvent):
|
||||
def _update_host_metrics(updated_hosts_list):
|
||||
from awx.main.models import HostMetric # circular import
|
||||
|
||||
# bulk-create
|
||||
current_time = now()
|
||||
HostMetric.objects.bulk_create(
|
||||
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
|
||||
)
|
||||
# bulk-update
|
||||
batch_start, batch_size = 0, 1000
|
||||
while batch_start <= len(updated_hosts_list):
|
||||
batched_host_list = updated_hosts_list[batch_start : (batch_start + batch_size)]
|
||||
HostMetric.objects.filter(hostname__in=batched_host_list).update(
|
||||
last_automation=current_time, automated_counter=models.F('automated_counter') + 1, deleted=False
|
||||
)
|
||||
batch_start += batch_size
|
||||
|
||||
# FUTURE:
|
||||
# - Hand-rolled implementation of itertools.batched(), introduced in Python 3.12. Replace.
|
||||
# - Ability to do ORM upserts *may* have been introduced in Django 5.0.
|
||||
# See the entry about `create_defaults` in https://docs.djangoproject.com/en/5.0/releases/5.0/#models.
|
||||
# Hopefully this will be fully ready for batch use by 5.2 LTS.
|
||||
|
||||
args = [iter(updated_hosts_list)] * 500
|
||||
for hosts in itertools.zip_longest(*args):
|
||||
with transaction.atomic():
|
||||
HostMetric.objects.bulk_create(
|
||||
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in hosts if hostname is not None], ignore_conflicts=True
|
||||
)
|
||||
HostMetric.objects.filter(hostname__in=hosts).update(
|
||||
last_automation=current_time, automated_counter=models.F('automated_counter') + 1, deleted=False
|
||||
)
|
||||
|
||||
@property
|
||||
def job_verbosity(self):
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from django.db import models
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from rest_framework.exceptions import ValidationError
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import CommonModel
|
||||
from awx.main.validators import validate_container_image_name
|
||||
@@ -12,6 +14,8 @@ __all__ = ['ExecutionEnvironment']
|
||||
class ExecutionEnvironment(CommonModel):
|
||||
class Meta:
|
||||
ordering = ('-created',)
|
||||
# Remove view permission, as a temporary solution, defer to organization read permission
|
||||
default_permissions = ('add', 'change', 'delete')
|
||||
|
||||
PULL_CHOICES = [
|
||||
('always', _("Always pull container before running.")),
|
||||
@@ -53,3 +57,16 @@ class ExecutionEnvironment(CommonModel):
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:execution_environment_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def validate_role_assignment(self, actor, role_definition):
|
||||
if self.managed:
|
||||
raise ValidationError({'object_id': _('Can not assign object roles to managed Execution Environments')})
|
||||
if self.organization_id is None:
|
||||
raise ValidationError({'object_id': _('Can not assign object roles to global Execution Environments')})
|
||||
|
||||
if actor._meta.model_name == 'user' and (not actor.has_obj_perm(self.organization, 'view')):
|
||||
raise ValidationError({'user': _('User must have view permission to Execution Environment organization')})
|
||||
if actor._meta.model_name == 'team':
|
||||
organization_cls = self._meta.get_field('organization').related_model
|
||||
if self.orgaanization not in organization_cls.access_qs(actor, 'view'):
|
||||
raise ValidationError({'team': _('Team must have view permission to Execution Environment organization')})
|
||||
|
||||
@@ -933,6 +933,7 @@ class InventorySourceOptions(BaseModel):
|
||||
('controller', _('Red Hat Ansible Automation Platform')),
|
||||
('insights', _('Red Hat Insights')),
|
||||
('terraform', _('Terraform State')),
|
||||
('openshift_virtualization', _('OpenShift Virtualization')),
|
||||
]
|
||||
|
||||
# From the options of the Django management base command
|
||||
@@ -1042,7 +1043,7 @@ class InventorySourceOptions(BaseModel):
|
||||
def cloud_credential_validation(source, cred):
|
||||
if not source:
|
||||
return None
|
||||
if cred and source not in ('custom', 'scm'):
|
||||
if cred and source not in ('custom', 'scm', 'openshift_virtualization'):
|
||||
# If a credential was provided, it's important that it matches
|
||||
# the actual inventory source being used (Amazon requires Amazon
|
||||
# credentials; Rackspace requires Rackspace credentials; etc...)
|
||||
@@ -1051,12 +1052,14 @@ class InventorySourceOptions(BaseModel):
|
||||
# Allow an EC2 source to omit the credential. If Tower is running on
|
||||
# an EC2 instance with an IAM Role assigned, boto will use credentials
|
||||
# from the instance metadata instead of those explicitly provided.
|
||||
elif source in CLOUD_PROVIDERS and source != 'ec2':
|
||||
elif source in CLOUD_PROVIDERS and source not in ['ec2', 'openshift_virtualization']:
|
||||
return _('Credential is required for a cloud source.')
|
||||
elif source == 'custom' and cred and cred.credential_type.kind in ('scm', 'ssh', 'insights', 'vault'):
|
||||
return _('Credentials of type machine, source control, insights and vault are disallowed for custom inventory sources.')
|
||||
elif source == 'scm' and cred and cred.credential_type.kind in ('insights', 'vault'):
|
||||
return _('Credentials of type insights and vault are disallowed for scm inventory sources.')
|
||||
elif source == 'openshift_virtualization' and cred and cred.credential_type.kind != 'kubernetes':
|
||||
return _('Credentials of type kubernetes is requred for openshift_virtualization inventory sources.')
|
||||
return None
|
||||
|
||||
def get_cloud_credential(self):
|
||||
@@ -1693,6 +1696,16 @@ class insights(PluginFileInjector):
|
||||
use_fqcn = True
|
||||
|
||||
|
||||
class openshift_virtualization(PluginFileInjector):
|
||||
plugin_name = 'kubevirt'
|
||||
base_injector = 'template'
|
||||
namespace = 'kubevirt'
|
||||
collection = 'core'
|
||||
downstream_namespace = 'redhat'
|
||||
downstream_collection = 'openshift_virtualization'
|
||||
use_fqcn = True
|
||||
|
||||
|
||||
class constructed(PluginFileInjector):
|
||||
plugin_name = 'constructed'
|
||||
namespace = 'ansible'
|
||||
|
||||
@@ -591,14 +591,20 @@ def get_role_from_object_role(object_role):
|
||||
role_name = role_name.lower()
|
||||
model_cls = apps.get_model('main', target_model_name)
|
||||
target_model_name = get_type_for_model(model_cls)
|
||||
|
||||
# exception cases completely specific to one model naming convention
|
||||
if target_model_name == 'notification_template':
|
||||
target_model_name = 'notification' # total exception
|
||||
target_model_name = 'notification'
|
||||
elif target_model_name == 'workflow_job_template':
|
||||
target_model_name = 'workflow'
|
||||
|
||||
role_name = f'{target_model_name}_admin_role'
|
||||
elif rd.name.endswith(' Admin'):
|
||||
# cases like "project-admin"
|
||||
role_name = 'admin_role'
|
||||
elif rd.name == 'Organization Audit':
|
||||
role_name = 'auditor_role'
|
||||
else:
|
||||
print(rd.name)
|
||||
model_name, role_name = rd.name.split()
|
||||
role_name = role_name.lower()
|
||||
role_name += '_role'
|
||||
|
||||
@@ -17,7 +17,7 @@ from collections import OrderedDict
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import models, connection
|
||||
from django.db import models, connection, transaction
|
||||
from django.core.exceptions import NON_FIELD_ERRORS
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.timezone import now
|
||||
@@ -31,6 +31,7 @@ from rest_framework.exceptions import ParseError
|
||||
from polymorphic.models import PolymorphicModel
|
||||
|
||||
from ansible_base.lib.utils.models import prevent_search, get_type_for_model
|
||||
from ansible_base.rbac import permission_registry
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel
|
||||
@@ -197,9 +198,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
|
||||
@classmethod
|
||||
def _submodels_with_roles(cls):
|
||||
ujt_classes = [c for c in cls.__subclasses__() if c._meta.model_name not in ['inventorysource', 'systemjobtemplate']]
|
||||
ct_dict = ContentType.objects.get_for_models(*ujt_classes)
|
||||
return [ct.id for ct in ct_dict.values()]
|
||||
return [c for c in cls.__subclasses__() if permission_registry.is_registered(c)]
|
||||
|
||||
@classmethod
|
||||
def accessible_pk_qs(cls, accessor, role_field):
|
||||
@@ -215,8 +214,16 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
|
||||
action = to_permissions[role_field]
|
||||
|
||||
# Special condition for super auditor
|
||||
role_subclasses = cls._submodels_with_roles()
|
||||
role_cts = ContentType.objects.get_for_models(*role_subclasses).values()
|
||||
all_codenames = {f'{action}_{cls._meta.model_name}' for cls in role_subclasses}
|
||||
if not (all_codenames - accessor.singleton_permissions()):
|
||||
qs = cls.objects.filter(polymorphic_ctype__in=role_cts)
|
||||
return qs.values_list('id', flat=True)
|
||||
|
||||
return (
|
||||
RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__startswith=action, content_type_id__in=cls._submodels_with_roles())
|
||||
RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__in=all_codenames, content_type_id__in=[ct.id for ct in role_cts])
|
||||
.values_list('object_id')
|
||||
.distinct()
|
||||
)
|
||||
@@ -273,7 +280,14 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
if new_next_schedule:
|
||||
if new_next_schedule.pk == self.next_schedule_id and new_next_schedule.next_run == self.next_job_run:
|
||||
return # no-op, common for infrequent schedules
|
||||
self.next_schedule = new_next_schedule
|
||||
|
||||
# If in a transaction, use select_for_update to lock the next schedule row, which
|
||||
# prevents a race condition if new_next_schedule is deleted elsewhere during this transaction
|
||||
if transaction.get_autocommit():
|
||||
self.next_schedule = related_schedules.first()
|
||||
else:
|
||||
self.next_schedule = related_schedules.select_for_update().first()
|
||||
|
||||
self.next_job_run = new_next_schedule.next_run
|
||||
self.save(update_fields=['next_schedule', 'next_job_run'])
|
||||
|
||||
|
||||
@@ -138,7 +138,8 @@ class TaskBase:
|
||||
|
||||
# Lock
|
||||
with task_manager_bulk_reschedule():
|
||||
with advisory_lock(f"{self.prefix}_lock", wait=False) as acquired:
|
||||
lock_session_timeout_milliseconds = settings.TASK_MANAGER_LOCK_TIMEOUT * 1000 # convert to milliseconds
|
||||
with advisory_lock(f"{self.prefix}_lock", lock_session_timeout_milliseconds=lock_session_timeout_milliseconds, wait=False) as acquired:
|
||||
with transaction.atomic():
|
||||
if acquired is False:
|
||||
logger.debug(f"Not running {self.prefix} scheduler, another task holds lock")
|
||||
|
||||
@@ -36,6 +36,9 @@ import ansible_runner.cleanup
|
||||
# dateutil
|
||||
from dateutil.parser import parse as parse_date
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.resource_registry.tasks.sync import SyncExecutor
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.access import access_registry
|
||||
@@ -712,7 +715,8 @@ def awx_k8s_reaper():
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_periodic_scheduler():
|
||||
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
|
||||
lock_session_timeout_milliseconds = settings.TASK_MANAGER_LOCK_TIMEOUT * 1000
|
||||
with advisory_lock('awx_periodic_scheduler_lock', lock_session_timeout_milliseconds=lock_session_timeout_milliseconds, wait=False) as acquired:
|
||||
if acquired is False:
|
||||
logger.debug("Not running periodic scheduler, another task holds lock")
|
||||
return
|
||||
@@ -964,3 +968,17 @@ def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, p
|
||||
permission_check_func(creater, copy_mapping.values())
|
||||
if isinstance(new_obj, Inventory):
|
||||
update_inventory_computed_fields.delay(new_obj.id)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def periodic_resource_sync():
|
||||
if not getattr(settings, 'RESOURCE_SERVER', None):
|
||||
logger.debug("Skipping periodic resource_sync, RESOURCE_SERVER not configured")
|
||||
return
|
||||
|
||||
with advisory_lock('periodic_resource_sync', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
logger.debug("Not running periodic_resource_sync, another task holds lock")
|
||||
return
|
||||
|
||||
SyncExecutor().run()
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"K8S_AUTH_HOST": "https://foo.invalid",
|
||||
"K8S_AUTH_API_KEY": "fooo",
|
||||
"K8S_AUTH_VERIFY_SSL": "False"
|
||||
}
|
||||
@@ -1,22 +1,30 @@
|
||||
import pytest
|
||||
from unittest import mock
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
from django.test.utils import override_settings
|
||||
|
||||
from ansible_base.jwt_consumer.common.util import generate_x_trusted_proxy_header
|
||||
from ansible_base.lib.testing.fixtures import rsa_keypair_factory, rsa_keypair # noqa: F401; pylint: disable=unused-import
|
||||
|
||||
|
||||
class HeaderTrackingMiddleware(object):
|
||||
def __init__(self):
|
||||
self.environ = {}
|
||||
|
||||
def process_request(self, request):
|
||||
pass
|
||||
|
||||
def process_response(self, request, response):
|
||||
self.environ = request.environ
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_proxy_ip_allowed(get, patch, admin):
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'system'})
|
||||
patch(url, user=admin, data={'REMOTE_HOST_HEADERS': ['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST']})
|
||||
|
||||
class HeaderTrackingMiddleware(object):
|
||||
environ = {}
|
||||
|
||||
def process_request(self, request):
|
||||
pass
|
||||
|
||||
def process_response(self, request, response):
|
||||
self.environ = request.environ
|
||||
|
||||
# By default, `PROXY_IP_ALLOWED_LIST` is disabled, so custom `REMOTE_HOST_HEADERS`
|
||||
# should just pass through
|
||||
middleware = HeaderTrackingMiddleware()
|
||||
@@ -45,6 +53,51 @@ def test_proxy_ip_allowed(get, patch, admin):
|
||||
assert middleware.environ['HTTP_X_FROM_THE_LOAD_BALANCER'] == 'some-actual-ip'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestTrustedProxyAllowListIntegration:
|
||||
@pytest.fixture
|
||||
def url(self, patch, admin):
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'system'})
|
||||
patch(url, user=admin, data={'REMOTE_HOST_HEADERS': ['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST']})
|
||||
patch(url, user=admin, data={'PROXY_IP_ALLOWED_LIST': ['my.proxy.example.org']})
|
||||
return url
|
||||
|
||||
@pytest.fixture
|
||||
def middleware(self):
|
||||
return HeaderTrackingMiddleware()
|
||||
|
||||
def test_x_trusted_proxy_valid_signature(self, get, admin, rsa_keypair, url, middleware): # noqa: F811
|
||||
# Headers should NOT get deleted
|
||||
headers = {
|
||||
'HTTP_X_TRUSTED_PROXY': generate_x_trusted_proxy_header(rsa_keypair.private),
|
||||
'HTTP_X_FROM_THE_LOAD_BALANCER': 'some-actual-ip',
|
||||
}
|
||||
with mock.patch('ansible_base.jwt_consumer.common.cache.JWTCache.get_key_from_cache', lambda self: None):
|
||||
with override_settings(ANSIBLE_BASE_JWT_KEY=rsa_keypair.public, PROXY_IP_ALLOWED_LIST=[]):
|
||||
get(url, user=admin, middleware=middleware, **headers)
|
||||
assert middleware.environ['HTTP_X_FROM_THE_LOAD_BALANCER'] == 'some-actual-ip'
|
||||
|
||||
def test_x_trusted_proxy_invalid_signature(self, get, admin, url, patch, middleware):
|
||||
# Headers should NOT get deleted
|
||||
headers = {
|
||||
'HTTP_X_TRUSTED_PROXY': 'DEAD-BEEF',
|
||||
'HTTP_X_FROM_THE_LOAD_BALANCER': 'some-actual-ip',
|
||||
}
|
||||
with override_settings(PROXY_IP_ALLOWED_LIST=[]):
|
||||
get(url, user=admin, middleware=middleware, **headers)
|
||||
assert middleware.environ['HTTP_X_FROM_THE_LOAD_BALANCER'] == 'some-actual-ip'
|
||||
|
||||
def test_x_trusted_proxy_invalid_signature_valid_proxy(self, get, admin, url, middleware):
|
||||
# A valid explicit proxy SHOULD result in sensitive headers NOT being deleted, regardless of the trusted proxy signature results
|
||||
headers = {
|
||||
'HTTP_X_TRUSTED_PROXY': 'DEAD-BEEF',
|
||||
'REMOTE_ADDR': 'my.proxy.example.org',
|
||||
'HTTP_X_FROM_THE_LOAD_BALANCER': 'some-actual-ip',
|
||||
}
|
||||
get(url, user=admin, middleware=middleware, **headers)
|
||||
assert middleware.environ['HTTP_X_FROM_THE_LOAD_BALANCER'] == 'some-actual-ip'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestDeleteViews:
|
||||
def test_sublist_delete_permission_check(self, inventory_source, host, rando, delete):
|
||||
|
||||
66
awx/main/tests/functional/api/test_immutablesharedfields.py
Normal file
66
awx/main/tests/functional/api/test_immutablesharedfields.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import Organization
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestImmutableSharedFields:
|
||||
@pytest.fixture(autouse=True)
|
||||
def configure_settings(self, settings):
|
||||
settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT = False
|
||||
|
||||
def test_create_raises_permission_denied(self, admin_user, post):
|
||||
orgA = Organization.objects.create(name='orgA')
|
||||
resp = post(
|
||||
url=reverse('api:team_list'),
|
||||
data={'name': 'teamA', 'organization': orgA.id},
|
||||
user=admin_user,
|
||||
expect=403,
|
||||
)
|
||||
assert "Creation of this resource is not allowed" in resp.data['detail']
|
||||
|
||||
def test_perform_delete_raises_permission_denied(self, admin_user, delete):
|
||||
orgA = Organization.objects.create(name='orgA')
|
||||
team = orgA.teams.create(name='teamA')
|
||||
resp = delete(
|
||||
url=reverse('api:team_detail', kwargs={'pk': team.id}),
|
||||
user=admin_user,
|
||||
expect=403,
|
||||
)
|
||||
assert "Deletion of this resource is not allowed" in resp.data['detail']
|
||||
|
||||
def test_perform_update(self, admin_user, patch):
|
||||
orgA = Organization.objects.create(name='orgA')
|
||||
team = orgA.teams.create(name='teamA')
|
||||
# allow patching non-shared fields
|
||||
patch(
|
||||
url=reverse('api:team_detail', kwargs={'pk': team.id}),
|
||||
data={"description": "can change this field"},
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
orgB = Organization.objects.create(name='orgB')
|
||||
# prevent patching shared fields
|
||||
resp = patch(url=reverse('api:team_detail', kwargs={'pk': team.id}), data={"organization": orgB.id}, user=admin_user, expect=403)
|
||||
assert "Cannot change shared field" in resp.data['organization']
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'role',
|
||||
['admin_role', 'member_role'],
|
||||
)
|
||||
@pytest.mark.parametrize('resource', ['organization', 'team'])
|
||||
def test_prevent_assigning_member_to_organization_or_team(self, admin_user, post, resource, role):
|
||||
orgA = Organization.objects.create(name='orgA')
|
||||
if resource == 'organization':
|
||||
role = getattr(orgA, role)
|
||||
elif resource == 'team':
|
||||
teamA = orgA.teams.create(name='teamA')
|
||||
role = getattr(teamA, role)
|
||||
resp = post(
|
||||
url=reverse('api:user_roles_list', kwargs={'pk': admin_user.id}),
|
||||
data={'id': role.id},
|
||||
user=admin_user,
|
||||
expect=403,
|
||||
)
|
||||
assert f"Cannot directly modify user membership to {resource}." in resp.data['msg']
|
||||
@@ -32,13 +32,6 @@ def node_type_instance():
|
||||
return fn
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def instance_group(job_factory):
|
||||
ig = InstanceGroup(name="east")
|
||||
ig.save()
|
||||
return ig
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def containerized_instance_group(instance_group, kube_credential):
|
||||
ig = InstanceGroup(name="container")
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import pytest
|
||||
from unittest import mock
|
||||
|
||||
# AWX
|
||||
from awx.api.serializers import JobTemplateSerializer
|
||||
@@ -8,10 +9,15 @@ from awx.main.migrations import _save_password_keys as save_password_keys
|
||||
|
||||
# Django
|
||||
from django.apps import apps
|
||||
from django.test.utils import override_settings
|
||||
|
||||
# DRF
|
||||
from rest_framework.exceptions import ValidationError
|
||||
|
||||
# DAB
|
||||
from ansible_base.jwt_consumer.common.util import generate_x_trusted_proxy_header
|
||||
from ansible_base.lib.testing.fixtures import rsa_keypair_factory, rsa_keypair # noqa: F401; pylint: disable=unused-import
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
@@ -369,3 +375,113 @@ def test_job_template_missing_inventory(project, inventory, admin_user, post):
|
||||
)
|
||||
assert r.status_code == 400
|
||||
assert "Cannot start automatically, an inventory is required." in str(r.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestJobTemplateCallbackProxyIntegration:
|
||||
"""
|
||||
Test the interaction of provision job template callback feature and:
|
||||
settings.PROXY_IP_ALLOWED_LIST
|
||||
x-trusted-proxy http header
|
||||
"""
|
||||
|
||||
@pytest.fixture
|
||||
def job_template(self, inventory, project):
|
||||
jt = JobTemplate.objects.create(name='test-jt', inventory=inventory, project=project, playbook='helloworld.yml', host_config_key='abcd')
|
||||
return jt
|
||||
|
||||
@override_settings(REMOTE_HOST_HEADERS=['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST'], PROXY_IP_ALLOWED_LIST=['my.proxy.example.org'])
|
||||
def test_host_not_found(self, job_template, admin_user, post, rsa_keypair): # noqa: F811
|
||||
job_template.inventory.hosts.create(name='foobar')
|
||||
|
||||
headers = {
|
||||
'HTTP_X_FROM_THE_LOAD_BALANCER': 'baz',
|
||||
'REMOTE_HOST': 'baz',
|
||||
'REMOTE_ADDR': 'baz',
|
||||
}
|
||||
r = post(
|
||||
url=reverse('api:job_template_callback', kwargs={'pk': job_template.pk}), data={'host_config_key': 'abcd'}, user=admin_user, expect=400, **headers
|
||||
)
|
||||
assert r.data['msg'] == 'No matching host could be found!'
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'headers, expected',
|
||||
(
|
||||
pytest.param(
|
||||
{
|
||||
'HTTP_X_FROM_THE_LOAD_BALANCER': 'foobar',
|
||||
'REMOTE_HOST': 'my.proxy.example.org',
|
||||
},
|
||||
201,
|
||||
),
|
||||
pytest.param(
|
||||
{
|
||||
'HTTP_X_FROM_THE_LOAD_BALANCER': 'foobar',
|
||||
'REMOTE_HOST': 'not-my-proxy.org',
|
||||
},
|
||||
400,
|
||||
),
|
||||
),
|
||||
)
|
||||
@override_settings(REMOTE_HOST_HEADERS=['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST'], PROXY_IP_ALLOWED_LIST=['my.proxy.example.org'])
|
||||
def test_proxy_ip_allowed_list(self, job_template, admin_user, post, headers, expected): # noqa: F811
|
||||
job_template.inventory.hosts.create(name='my.proxy.example.org')
|
||||
|
||||
post(
|
||||
url=reverse('api:job_template_callback', kwargs={'pk': job_template.pk}),
|
||||
data={'host_config_key': 'abcd'},
|
||||
user=admin_user,
|
||||
expect=expected,
|
||||
**headers
|
||||
)
|
||||
|
||||
@override_settings(REMOTE_HOST_HEADERS=['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST'], PROXY_IP_ALLOWED_LIST=[])
|
||||
def test_no_proxy_trust_all_headers(self, job_template, admin_user, post):
|
||||
job_template.inventory.hosts.create(name='foobar')
|
||||
|
||||
headers = {
|
||||
'HTTP_X_FROM_THE_LOAD_BALANCER': 'foobar',
|
||||
'REMOTE_ADDR': 'bar',
|
||||
'REMOTE_HOST': 'baz',
|
||||
}
|
||||
post(url=reverse('api:job_template_callback', kwargs={'pk': job_template.pk}), data={'host_config_key': 'abcd'}, user=admin_user, expect=201, **headers)
|
||||
|
||||
@override_settings(REMOTE_HOST_HEADERS=['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST'], PROXY_IP_ALLOWED_LIST=['my.proxy.example.org'])
|
||||
def test_trusted_proxy(self, job_template, admin_user, post, rsa_keypair): # noqa: F811
|
||||
job_template.inventory.hosts.create(name='foobar')
|
||||
|
||||
headers = {
|
||||
'HTTP_X_TRUSTED_PROXY': generate_x_trusted_proxy_header(rsa_keypair.private),
|
||||
'HTTP_X_FROM_THE_LOAD_BALANCER': 'foobar, my.proxy.example.org',
|
||||
}
|
||||
|
||||
with mock.patch('ansible_base.jwt_consumer.common.cache.JWTCache.get_key_from_cache', lambda self: None):
|
||||
with override_settings(ANSIBLE_BASE_JWT_KEY=rsa_keypair.public):
|
||||
post(
|
||||
url=reverse('api:job_template_callback', kwargs={'pk': job_template.pk}),
|
||||
data={'host_config_key': 'abcd'},
|
||||
user=admin_user,
|
||||
expect=201,
|
||||
**headers
|
||||
)
|
||||
|
||||
@override_settings(REMOTE_HOST_HEADERS=['HTTP_X_FROM_THE_LOAD_BALANCER', 'REMOTE_ADDR', 'REMOTE_HOST'], PROXY_IP_ALLOWED_LIST=['my.proxy.example.org'])
|
||||
def test_trusted_proxy_host_not_found(self, job_template, admin_user, post, rsa_keypair): # noqa: F811
|
||||
job_template.inventory.hosts.create(name='foobar')
|
||||
|
||||
headers = {
|
||||
'HTTP_X_TRUSTED_PROXY': generate_x_trusted_proxy_header(rsa_keypair.private),
|
||||
'HTTP_X_FROM_THE_LOAD_BALANCER': 'baz, my.proxy.example.org',
|
||||
'REMOTE_ADDR': 'bar',
|
||||
'REMOTE_HOST': 'baz',
|
||||
}
|
||||
|
||||
with mock.patch('ansible_base.jwt_consumer.common.cache.JWTCache.get_key_from_cache', lambda self: None):
|
||||
with override_settings(ANSIBLE_BASE_JWT_KEY=rsa_keypair.public):
|
||||
post(
|
||||
url=reverse('api:job_template_callback', kwargs={'pk': job_template.pk}),
|
||||
data={'host_config_key': 'abcd'},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
**headers
|
||||
)
|
||||
|
||||
@@ -20,7 +20,7 @@ from awx.main.migrations._dab_rbac import setup_managed_role_definitions
|
||||
|
||||
# AWX
|
||||
from awx.main.models.projects import Project
|
||||
from awx.main.models.ha import Instance
|
||||
from awx.main.models.ha import Instance, InstanceGroup
|
||||
|
||||
from rest_framework.test import (
|
||||
APIRequestFactory,
|
||||
@@ -92,6 +92,11 @@ def deploy_jobtemplate(project, inventory, credential):
|
||||
return jt
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def execution_environment():
|
||||
return ExecutionEnvironment.objects.create(name="test-ee", description="test-ee", managed=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def setup_managed_roles():
|
||||
"Run the migration script to pre-create managed role definitions"
|
||||
@@ -730,6 +735,11 @@ def jt_linked(organization, project, inventory, machine_credential, credential,
|
||||
return jt
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def instance_group():
|
||||
return InstanceGroup.objects.create(name="east")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_job_template(organization):
|
||||
wjt = WorkflowJobTemplate.objects.create(name='test-workflow_job_template', organization=organization)
|
||||
|
||||
@@ -109,3 +109,17 @@ def test_team_indirect_access(get, team, admin_user, inventory):
|
||||
assert len(by_username['u1']['summary_fields']['indirect_access']) == 0
|
||||
access_entry = by_username['u1']['summary_fields']['direct_access'][0]
|
||||
assert sorted(access_entry['descendant_roles']) == sorted(['adhoc_role', 'use_role', 'update_role', 'read_role', 'admin_role'])
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_workflow_access_list(workflow_job_template, alice, bob, setup_managed_roles, get, admin_user):
|
||||
"""Basic verification that WFJT access_list is functional"""
|
||||
workflow_job_template.admin_role.members.add(alice)
|
||||
workflow_job_template.organization.workflow_admin_role.members.add(bob)
|
||||
|
||||
url = reverse('api:workflow_job_template_access_list', kwargs={'pk': workflow_job_template.pk})
|
||||
for u in (alice, bob, admin_user):
|
||||
response = get(url, user=u, expect=200)
|
||||
user_ids = [item['id'] for item in response.data['results']]
|
||||
assert alice.pk in user_ids
|
||||
assert bob.pk in user_ids
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.access import InstanceGroupAccess, NotificationTemplateAccess
|
||||
|
||||
from ansible_base.rbac.models import RoleDefinition
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_instance_group_object_role_delete(rando, instance_group, setup_managed_roles):
|
||||
"""Basic functionality of IG object-level admin role function AAP-25506"""
|
||||
rd = RoleDefinition.objects.get(name='InstanceGroup Admin')
|
||||
rd.give_permission(rando, instance_group)
|
||||
access = InstanceGroupAccess(rando)
|
||||
assert access.can_delete(instance_group)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_notification_template_object_role_change(rando, notification_template, setup_managed_roles):
|
||||
"""Basic functionality of NT object-level admin role function AAP-25493"""
|
||||
rd = RoleDefinition.objects.get(name='NotificationTemplate Admin')
|
||||
rd.give_permission(rando, notification_template)
|
||||
access = NotificationTemplateAccess(rando)
|
||||
assert access.can_change(notification_template, {'name': 'new name'})
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_auditor_role(rando, setup_managed_roles, organization, inventory, project, jt_linked):
|
||||
obj_list = (inventory, project, jt_linked)
|
||||
for obj in obj_list:
|
||||
assert obj.organization == organization, obj # sanity
|
||||
|
||||
assert [rando.has_obj_perm(obj, 'view') for obj in obj_list] == [False for i in range(3)], obj_list
|
||||
|
||||
rd = RoleDefinition.objects.get(name='Organization Audit')
|
||||
rd.give_permission(rando, organization)
|
||||
|
||||
codename_set = set(rd.permissions.values_list('codename', flat=True))
|
||||
assert not ({'view_inventory', 'view_jobtemplate', 'audit_organization'} - codename_set) # sanity
|
||||
|
||||
assert [obj in type(obj).access_qs(rando) for obj in obj_list] == [True for i in range(3)], obj_list
|
||||
assert [rando.has_obj_perm(obj, 'view') for obj in obj_list] == [True for i in range(3)], obj_list
|
||||
@@ -2,9 +2,11 @@ import pytest
|
||||
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.urls import reverse as django_reverse
|
||||
from django.test.utils import override_settings
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import JobTemplate, Inventory, Organization
|
||||
from awx.main.access import JobTemplateAccess, WorkflowJobTemplateAccess
|
||||
|
||||
from ansible_base.rbac.models import RoleDefinition
|
||||
|
||||
@@ -88,3 +90,63 @@ def test_assign_custom_add_role(admin_user, rando, organization, post, setup_man
|
||||
inv_id = r.data['id']
|
||||
inventory = Inventory.objects.get(id=inv_id)
|
||||
assert rando.has_obj_perm(inventory, 'change')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_jt_creation_permissions(setup_managed_roles, inventory, project, rando):
|
||||
"""This tests that if you assign someone required permissions in the new API
|
||||
using the managed roles, then that works to give permissions to create a job template"""
|
||||
inv_rd = RoleDefinition.objects.get(name='Inventory Admin')
|
||||
proj_rd = RoleDefinition.objects.get(name='Project Admin')
|
||||
# establish prior state
|
||||
access = JobTemplateAccess(rando)
|
||||
assert not access.can_add({'inventory': inventory.pk, 'project': project.pk, 'name': 'foo-jt'})
|
||||
|
||||
inv_rd.give_permission(rando, inventory)
|
||||
proj_rd.give_permission(rando, project)
|
||||
|
||||
assert access.can_add({'inventory': inventory.pk, 'project': project.pk, 'name': 'foo-jt'})
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_workflow_creation_permissions(setup_managed_roles, organization, workflow_job_template, rando):
|
||||
"""Similar to JT, assigning new roles gives creator permissions"""
|
||||
org_wf_rd = RoleDefinition.objects.get(name='Organization WorkflowJobTemplate Admin')
|
||||
assert workflow_job_template.organization == organization # sanity
|
||||
# establish prior state
|
||||
access = WorkflowJobTemplateAccess(rando)
|
||||
assert not access.can_add({'name': 'foo-flow', 'organization': organization.pk})
|
||||
org_wf_rd.give_permission(rando, organization)
|
||||
|
||||
assert access.can_add({'name': 'foo-flow', 'organization': organization.pk})
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_assign_credential_to_user_of_another_org(setup_managed_roles, credential, admin_user, rando, org_admin, organization, post):
|
||||
'''Test that a credential can only be assigned to a user in the same organization'''
|
||||
# cannot assign credential to rando, as rando is not in the same org as the credential
|
||||
rd = RoleDefinition.objects.get(name="Credential Admin")
|
||||
credential.organization = organization
|
||||
credential.save(update_fields=['organization'])
|
||||
assert credential.organization not in Organization.access_qs(rando, 'change')
|
||||
url = django_reverse('roleuserassignment-list')
|
||||
resp = post(url=url, data={"user": rando.id, "role_definition": rd.id, "object_id": credential.id}, user=admin_user, expect=400)
|
||||
assert "You cannot grant credential access to a User not in the credentials' organization" in str(resp.data)
|
||||
|
||||
# can assign credential to superuser
|
||||
rando.is_superuser = True
|
||||
rando.save()
|
||||
post(url=url, data={"user": rando.id, "role_definition": rd.id, "object_id": credential.id}, user=admin_user, expect=201)
|
||||
|
||||
# can assign credential to org_admin
|
||||
assert credential.organization in Organization.access_qs(org_admin, 'change')
|
||||
post(url=url, data={"user": org_admin.id, "role_definition": rd.id, "object_id": credential.id}, user=admin_user, expect=201)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@override_settings(ALLOW_LOCAL_RESOURCE_MANAGEMENT=False)
|
||||
def test_team_member_role_not_assignable(team, rando, post, admin_user, setup_managed_roles):
|
||||
member_rd = RoleDefinition.objects.get(name='Organization Member')
|
||||
url = django_reverse('roleuserassignment-list')
|
||||
r = post(url, data={'object_id': team.id, 'role_definition': member_rd.id, 'user': rando.id}, user=admin_user, expect=400)
|
||||
assert 'Not managed locally' in str(r.data)
|
||||
|
||||
120
awx/main/tests/functional/dab_rbac/test_external_auditor.py
Normal file
120
awx/main/tests/functional/dab_rbac/test_external_auditor.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import pytest
|
||||
|
||||
from django.apps import apps
|
||||
|
||||
from ansible_base.rbac.managed import SystemAuditor
|
||||
from ansible_base.rbac import permission_registry
|
||||
|
||||
from awx.main.access import check_user_access, get_user_queryset
|
||||
from awx.main.models import User, AdHocCommandEvent
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ext_auditor_rd():
|
||||
info = SystemAuditor(overrides={'name': 'Alien Auditor', 'shortname': 'ext_auditor'})
|
||||
rd, _ = info.get_or_create(apps)
|
||||
return rd
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ext_auditor(ext_auditor_rd):
|
||||
u = User.objects.create(username='external-auditor-user')
|
||||
ext_auditor_rd.give_global_permission(u)
|
||||
return u
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def obj_factory(request):
|
||||
def _rf(fixture_name):
|
||||
obj = request.getfixturevalue(fixture_name)
|
||||
|
||||
# special case to make obj organization-scoped
|
||||
if obj._meta.model_name == 'executionenvironment':
|
||||
obj.organization = request.getfixturevalue('organization')
|
||||
obj.save(update_fields=['organization'])
|
||||
|
||||
return obj
|
||||
|
||||
return _rf
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_access_qs_external_auditor(ext_auditor_rd, rando, job_template):
|
||||
ext_auditor_rd.give_global_permission(rando)
|
||||
jt_cls = apps.get_model('main', 'JobTemplate')
|
||||
ujt_cls = apps.get_model('main', 'UnifiedJobTemplate')
|
||||
assert job_template in jt_cls.access_qs(rando)
|
||||
assert job_template.id in jt_cls.access_ids_qs(rando)
|
||||
assert job_template.id in ujt_cls.accessible_pk_qs(rando, 'read_role')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('model', sorted(permission_registry.all_registered_models, key=lambda cls: cls._meta.model_name))
|
||||
class TestExternalAuditorRoleAllModels:
|
||||
def test_access_can_read_method(self, obj_factory, model, ext_auditor, rando):
|
||||
fixture_name = model._meta.verbose_name.replace(' ', '_')
|
||||
obj = obj_factory(fixture_name)
|
||||
|
||||
assert check_user_access(rando, model, 'read', obj) is False
|
||||
assert check_user_access(ext_auditor, model, 'read', obj) is True
|
||||
|
||||
def test_access_get_queryset(self, obj_factory, model, ext_auditor, rando):
|
||||
fixture_name = model._meta.verbose_name.replace(' ', '_')
|
||||
obj = obj_factory(fixture_name)
|
||||
|
||||
assert obj not in get_user_queryset(rando, model)
|
||||
assert obj in get_user_queryset(ext_auditor, model)
|
||||
|
||||
def test_global_list(self, obj_factory, model, ext_auditor, rando, get):
|
||||
fixture_name = model._meta.verbose_name.replace(' ', '_')
|
||||
obj_factory(fixture_name)
|
||||
|
||||
url = reverse(f'api:{fixture_name}_list')
|
||||
r = get(url, user=rando, expect=200)
|
||||
initial_ct = r.data['count']
|
||||
|
||||
r = get(url, user=ext_auditor, expect=200)
|
||||
assert r.data['count'] == initial_ct + 1
|
||||
|
||||
if fixture_name in ('job_template', 'workflow_job_template'):
|
||||
url = reverse('api:unified_job_template_list')
|
||||
r = get(url, user=rando, expect=200)
|
||||
initial_ct = r.data['count']
|
||||
|
||||
r = get(url, user=ext_auditor, expect=200)
|
||||
assert r.data['count'] == initial_ct + 1
|
||||
|
||||
def test_detail_view(self, obj_factory, model, ext_auditor, rando, get):
|
||||
fixture_name = model._meta.verbose_name.replace(' ', '_')
|
||||
obj = obj_factory(fixture_name)
|
||||
|
||||
url = reverse(f'api:{fixture_name}_detail', kwargs={'pk': obj.pk})
|
||||
get(url, user=rando, expect=403) # NOTE: should be 401
|
||||
get(url, user=ext_auditor, expect=200)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestExternalAuditorNonRoleModels:
|
||||
def test_ad_hoc_command_view(self, ad_hoc_command_factory, rando, ext_auditor, get):
|
||||
"""The AdHocCommandAccess class references is_system_auditor
|
||||
|
||||
this is to prove it works with other system-level view roles"""
|
||||
ad_hoc_command = ad_hoc_command_factory()
|
||||
url = reverse('api:ad_hoc_command_list')
|
||||
r = get(url, user=rando, expect=200)
|
||||
assert r.data['count'] == 0
|
||||
r = get(url, user=ext_auditor, expect=200)
|
||||
assert r.data['count'] == 1
|
||||
assert r.data['results'][0]['id'] == ad_hoc_command.id
|
||||
|
||||
event = AdHocCommandEvent.objects.create(ad_hoc_command=ad_hoc_command)
|
||||
url = reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': ad_hoc_command.id})
|
||||
r = get(url, user=rando, expect=403)
|
||||
r = get(url, user=ext_auditor, expect=200)
|
||||
assert r.data['count'] == 1
|
||||
|
||||
url = reverse('api:ad_hoc_command_event_detail', kwargs={'pk': event.id})
|
||||
r = get(url, user=rando, expect=403)
|
||||
r = get(url, user=ext_auditor, expect=200)
|
||||
assert r.data['id'] == event.id
|
||||
31
awx/main/tests/functional/dab_rbac/test_managed_roles.py
Normal file
31
awx/main/tests/functional/dab_rbac/test_managed_roles.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import pytest
|
||||
|
||||
from ansible_base.rbac.models import RoleDefinition, DABPermission
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_roles_to_not_create(setup_managed_roles):
|
||||
assert RoleDefinition.objects.filter(name='Organization Admin').count() == 1
|
||||
|
||||
SHOULD_NOT_EXIST = ('Organization Organization Admin', 'Organization Team Admin', 'Organization InstanceGroup Admin')
|
||||
|
||||
bad_rds = RoleDefinition.objects.filter(name__in=SHOULD_NOT_EXIST)
|
||||
if bad_rds.exists():
|
||||
bad_names = list(bad_rds.values_list('name', flat=True))
|
||||
raise Exception(f'Found RoleDefinitions that should not exist: {bad_names}')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_project_update_role(setup_managed_roles):
|
||||
"""Role to allow updating a project on the object-level should exist"""
|
||||
assert RoleDefinition.objects.filter(name='Project Update').count() == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_org_child_add_permission(setup_managed_roles):
|
||||
for model_name in ('Project', 'NotificationTemplate', 'WorkflowJobTemplate', 'Inventory'):
|
||||
rd = RoleDefinition.objects.get(name=f'Organization {model_name} Admin')
|
||||
assert 'add_' in str(rd.permissions.values_list('codename', flat=True)), f'The {rd.name} role definition expected to contain add_ permissions'
|
||||
|
||||
# special case for JobTemplate, anyone can create one with use permission to project/inventory
|
||||
assert not DABPermission.objects.filter(codename='add_jobtemplate').exists()
|
||||
@@ -16,7 +16,16 @@ from ansible_base.rbac.models import RoleUserAssignment, RoleDefinition
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'role_name',
|
||||
['execution_environment_admin_role', 'project_admin_role', 'admin_role', 'auditor_role', 'read_role', 'execute_role', 'notification_admin_role'],
|
||||
[
|
||||
'execution_environment_admin_role',
|
||||
'workflow_admin_role',
|
||||
'project_admin_role',
|
||||
'admin_role',
|
||||
'auditor_role',
|
||||
'read_role',
|
||||
'execute_role',
|
||||
'notification_admin_role',
|
||||
],
|
||||
)
|
||||
def test_round_trip_roles(organization, rando, role_name, setup_managed_roles):
|
||||
"""
|
||||
@@ -26,7 +35,6 @@ def test_round_trip_roles(organization, rando, role_name, setup_managed_roles):
|
||||
"""
|
||||
getattr(organization, role_name).members.add(rando)
|
||||
assignment = RoleUserAssignment.objects.get(user=rando)
|
||||
print(assignment.role_definition.name)
|
||||
old_role = get_role_from_object_role(assignment.object_role)
|
||||
assert old_role.id == getattr(organization, role_name).id
|
||||
|
||||
@@ -141,3 +149,11 @@ def test_implicit_parents_no_assignments(organization):
|
||||
with mock.patch('awx.main.models.rbac.give_or_remove_permission') as mck:
|
||||
Team.objects.create(name='random team', organization=organization)
|
||||
mck.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_user_auditor_rel(organization, rando, setup_managed_roles):
|
||||
assert rando not in organization.auditor_role
|
||||
audit_rd = RoleDefinition.objects.get(name='Organization Audit')
|
||||
audit_rd.give_permission(rando, organization)
|
||||
assert list(rando.auditor_of_organizations) == [organization]
|
||||
|
||||
@@ -4,25 +4,19 @@ import pytest
|
||||
# CRUM
|
||||
from crum import impersonate
|
||||
|
||||
# Django
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
# AWX
|
||||
from awx.main.models import UnifiedJobTemplate, Job, JobTemplate, WorkflowJobTemplate, WorkflowApprovalTemplate, Project, WorkflowJob, Schedule, Credential
|
||||
from awx.main.models import UnifiedJobTemplate, Job, JobTemplate, WorkflowJobTemplate, Project, WorkflowJob, Schedule, Credential
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.constants import JOB_VARIABLE_PREFIXES
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_subclass_types():
|
||||
assert set(UnifiedJobTemplate._submodels_with_roles()) == set(
|
||||
[
|
||||
ContentType.objects.get_for_model(JobTemplate).id,
|
||||
ContentType.objects.get_for_model(Project).id,
|
||||
ContentType.objects.get_for_model(WorkflowJobTemplate).id,
|
||||
ContentType.objects.get_for_model(WorkflowApprovalTemplate).id,
|
||||
]
|
||||
)
|
||||
assert set(UnifiedJobTemplate._submodels_with_roles()) == {
|
||||
JobTemplate,
|
||||
Project,
|
||||
WorkflowJobTemplate,
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -46,6 +46,8 @@ def generate_fake_var(element):
|
||||
|
||||
def credential_kind(source):
|
||||
"""Given the inventory source kind, return expected credential kind"""
|
||||
if source == 'openshift_virtualization':
|
||||
return 'kubernetes_bearer_token'
|
||||
return source.replace('ec2', 'aws')
|
||||
|
||||
|
||||
|
||||
@@ -85,3 +85,17 @@ class TestMigrationSmoke:
|
||||
|
||||
RoleUserAssignment = new_state.apps.get_model('dab_rbac', 'RoleUserAssignment')
|
||||
assert RoleUserAssignment.objects.filter(user=user.id, object_id=org.id).exists()
|
||||
|
||||
# Regression testing for bug that comes from current vs past models mismatch
|
||||
RoleDefinition = new_state.apps.get_model('dab_rbac', 'RoleDefinition')
|
||||
assert not RoleDefinition.objects.filter(name='Organization Organization Admin').exists()
|
||||
# Test special cases in managed role creation
|
||||
assert not RoleDefinition.objects.filter(name='Organization Team Admin').exists()
|
||||
assert not RoleDefinition.objects.filter(name='Organization InstanceGroup Admin').exists()
|
||||
|
||||
# Test that a removed EE model permission has been deleted
|
||||
new_state = migrator.apply_tested_migration(
|
||||
('main', '0195_EE_permissions'),
|
||||
)
|
||||
DABPermission = new_state.apps.get_model('dab_rbac', 'DABPermission')
|
||||
assert not DABPermission.objects.filter(codename='view_executionenvironment').exists()
|
||||
|
||||
107
awx/main/tests/functional/test_rbac_execution_environment.py
Normal file
107
awx/main/tests/functional/test_rbac_execution_environment.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import pytest
|
||||
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
from awx.main.access import ExecutionEnvironmentAccess
|
||||
from awx.main.models import ExecutionEnvironment, Organization
|
||||
from awx.main.models.rbac import get_role_codenames
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from django.urls import reverse as django_reverse
|
||||
|
||||
from ansible_base.rbac.models import RoleDefinition
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ee_rd():
|
||||
return RoleDefinition.objects.create_from_permissions(
|
||||
name='EE object admin',
|
||||
permissions=['change_executionenvironment', 'delete_executionenvironment'],
|
||||
content_type=ContentType.objects.get_for_model(ExecutionEnvironment),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def org_ee_rd():
|
||||
return RoleDefinition.objects.create_from_permissions(
|
||||
name='EE org admin',
|
||||
permissions=['add_executionenvironment', 'change_executionenvironment', 'delete_executionenvironment', 'view_organization'],
|
||||
content_type=ContentType.objects.get_for_model(Organization),
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_old_ee_role_maps_to_correct_permissions(organization):
|
||||
assert set(get_role_codenames(organization.execution_environment_admin_role)) == {
|
||||
'view_organization',
|
||||
'add_executionenvironment',
|
||||
'change_executionenvironment',
|
||||
'delete_executionenvironment',
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def org_ee(organization):
|
||||
return ExecutionEnvironment.objects.create(name='some user ee', organization=organization)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def check_user_capabilities(get, setup_managed_roles):
|
||||
def _rf(user, obj, expected):
|
||||
url = reverse('api:execution_environment_list')
|
||||
r = get(url, user=user, expect=200)
|
||||
for item in r.data['results']:
|
||||
if item['id'] == obj.pk:
|
||||
assert expected == item['summary_fields']['user_capabilities']
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(f'Could not find expected object ({obj}) in EE list result: {r.data}')
|
||||
|
||||
return _rf
|
||||
|
||||
|
||||
# ___ begin tests ___
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_managed_ee_not_assignable(control_plane_execution_environment, ee_rd, rando, admin_user, post):
|
||||
url = django_reverse('roleuserassignment-list')
|
||||
r = post(url, {'role_definition': ee_rd.pk, 'user': rando.id, 'object_id': control_plane_execution_environment.pk}, user=admin_user, expect=400)
|
||||
assert 'Can not assign object roles to managed Execution Environment' in str(r.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_org_member_required_for_assignment(org_ee, ee_rd, rando, admin_user, post):
|
||||
url = django_reverse('roleuserassignment-list')
|
||||
r = post(url, {'role_definition': ee_rd.pk, 'user': rando.id, 'object_id': org_ee.pk}, user=admin_user, expect=400)
|
||||
assert 'User must have view permission to Execution Environment organization' in str(r.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_give_object_permission_to_ee(org_ee, ee_rd, org_member, check_user_capabilities):
|
||||
access = ExecutionEnvironmentAccess(org_member)
|
||||
assert access.can_read(org_ee) # by virtue of being an org member
|
||||
assert not access.can_change(org_ee, {'name': 'new'})
|
||||
check_user_capabilities(org_member, org_ee, {'edit': False, 'delete': False, 'copy': False})
|
||||
|
||||
ee_rd.give_permission(org_member, org_ee)
|
||||
assert access.can_change(org_ee, {'name': 'new'})
|
||||
|
||||
check_user_capabilities(org_member, org_ee, {'edit': True, 'delete': True, 'copy': False})
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('style', ['new', 'old'])
|
||||
def test_give_org_permission_to_ee(org_ee, organization, org_member, check_user_capabilities, style, org_ee_rd):
|
||||
access = ExecutionEnvironmentAccess(org_member)
|
||||
assert not access.can_change(org_ee, {'name': 'new'})
|
||||
check_user_capabilities(org_member, org_ee, {'edit': False, 'delete': False, 'copy': False})
|
||||
|
||||
if style == 'new':
|
||||
org_ee_rd.give_permission(org_member, organization)
|
||||
assert org_member.has_obj_perm(org_ee.organization, 'add_executionenvironment') # sanity
|
||||
else:
|
||||
organization.execution_environment_admin_role.members.add(org_member)
|
||||
|
||||
assert access.can_change(org_ee, {'name': 'new'})
|
||||
check_user_capabilities(org_member, org_ee, {'edit': True, 'delete': True, 'copy': True})
|
||||
@@ -182,8 +182,14 @@ def test_job_template_creator_access(project, organization, rando, post, setup_m
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_permissions
|
||||
@pytest.mark.parametrize('lacking', ['project', 'inventory'])
|
||||
def test_job_template_insufficient_creator_permissions(lacking, project, inventory, organization, rando, post):
|
||||
@pytest.mark.parametrize(
|
||||
'lacking,reason',
|
||||
[
|
||||
('project', 'You do not have use permission on Project'),
|
||||
('inventory', 'You do not have use permission on Inventory'),
|
||||
],
|
||||
)
|
||||
def test_job_template_insufficient_creator_permissions(lacking, reason, project, inventory, organization, rando, post):
|
||||
if lacking != 'project':
|
||||
project.use_role.members.add(rando)
|
||||
else:
|
||||
@@ -192,12 +198,13 @@ def test_job_template_insufficient_creator_permissions(lacking, project, invento
|
||||
inventory.use_role.members.add(rando)
|
||||
else:
|
||||
inventory.read_role.members.add(rando)
|
||||
post(
|
||||
response = post(
|
||||
url=reverse('api:job_template_list'),
|
||||
data=dict(name='newly-created-jt', inventory=inventory.id, project=project.pk, playbook='helloworld.yml'),
|
||||
user=rando,
|
||||
expect=403,
|
||||
)
|
||||
assert reason in response.data[lacking]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -99,7 +99,9 @@ def test_notification_template_access_org_user(notification_template, user):
|
||||
@pytest.mark.django_db
|
||||
def test_notificaiton_template_orphan_access_org_admin(notification_template, organization, org_admin):
|
||||
notification_template.organization = None
|
||||
notification_template.save(update_fields=['organization'])
|
||||
access = NotificationTemplateAccess(org_admin)
|
||||
assert not org_admin.has_obj_perm(notification_template, 'change')
|
||||
assert not access.can_change(notification_template, {'organization': organization.id})
|
||||
|
||||
|
||||
|
||||
@@ -35,6 +35,13 @@ class TestWorkflowJobTemplateAccess:
|
||||
assert org_member in wfjt.execute_role
|
||||
assert org_member in wfjt.read_role
|
||||
|
||||
def test_non_super_admin_no_add_without_org(self, wfjt, organization, rando):
|
||||
organization.member_role.members.add(rando)
|
||||
wfjt.admin_role.members.add(rando)
|
||||
access = WorkflowJobTemplateAccess(rando, save_messages=True)
|
||||
assert not access.can_add({'name': 'without org'})
|
||||
assert 'An organization is required to create a workflow job template for normal user' in access.messages['organization']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestWorkflowJobTemplateNodeAccess:
|
||||
|
||||
@@ -8,9 +8,22 @@ from django.db import connection
|
||||
|
||||
|
||||
@contextmanager
|
||||
def advisory_lock(*args, **kwargs):
|
||||
def advisory_lock(*args, lock_session_timeout_milliseconds=0, **kwargs):
|
||||
if connection.vendor == 'postgresql':
|
||||
cur = None
|
||||
idle_in_transaction_session_timeout = None
|
||||
idle_session_timeout = None
|
||||
if lock_session_timeout_milliseconds > 0:
|
||||
with connection.cursor() as cur:
|
||||
idle_in_transaction_session_timeout = cur.execute('SHOW idle_in_transaction_session_timeout').fetchone()[0]
|
||||
idle_session_timeout = cur.execute('SHOW idle_session_timeout').fetchone()[0]
|
||||
cur.execute(f"SET idle_in_transaction_session_timeout = {lock_session_timeout_milliseconds}")
|
||||
cur.execute(f"SET idle_session_timeout = {lock_session_timeout_milliseconds}")
|
||||
with django_pglocks_advisory_lock(*args, **kwargs) as internal_lock:
|
||||
yield internal_lock
|
||||
if lock_session_timeout_milliseconds > 0:
|
||||
with connection.cursor() as cur:
|
||||
cur.execute(f"SET idle_in_transaction_session_timeout = {idle_in_transaction_session_timeout}")
|
||||
cur.execute(f"SET idle_session_timeout = {idle_session_timeout}")
|
||||
else:
|
||||
yield True
|
||||
|
||||
48
awx/main/utils/proxy.py
Normal file
48
awx/main/utils/proxy.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# Copyright (c) 2024 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
|
||||
# DRF
|
||||
from rest_framework.request import Request
|
||||
|
||||
|
||||
"""
|
||||
Note that these methods operate on request.environ. This data is from uwsgi.
|
||||
It is the source data from which request.headers (read-only) is constructed.
|
||||
"""
|
||||
|
||||
|
||||
def is_proxy_in_headers(request: Request, proxy_list: list[str], headers: list[str]) -> bool:
|
||||
"""
|
||||
Determine if the request went through at least one proxy in the list.
|
||||
Example:
|
||||
request.environ = {
|
||||
"HTTP_X_FOO": "8.8.8.8, 192.168.2.1",
|
||||
"REMOTE_ADDR": "192.168.2.1",
|
||||
"REMOTE_HOST": "foobar"
|
||||
}
|
||||
proxy_list = ["192.168.2.1"]
|
||||
headers = ["HTTP_X_FOO", "REMOTE_ADDR", "REMOTE_HOST"]
|
||||
|
||||
The above would return True since 192.168.2.1 is a value for the header HTTP_X_FOO
|
||||
|
||||
request: The DRF/Django request. request.environ dict will be used for searching for proxies
|
||||
proxy_list: A list of known and trusted proxies may be ip or hostnames
|
||||
headers: A list of keys for which to consider values that may contain a proxy
|
||||
"""
|
||||
|
||||
remote_hosts = set()
|
||||
|
||||
for header in headers:
|
||||
for value in request.environ.get(header, '').split(','):
|
||||
value = value.strip()
|
||||
if value:
|
||||
remote_hosts.add(value)
|
||||
|
||||
return bool(remote_hosts.intersection(set(proxy_list)))
|
||||
|
||||
|
||||
def delete_headers_starting_with_http(request: Request, headers: list[str]):
|
||||
for header in headers:
|
||||
if header.startswith('HTTP_'):
|
||||
request.environ.pop(header, None)
|
||||
@@ -306,7 +306,8 @@ class WebSocketRelayManager(object):
|
||||
database_conf['OPTIONS'] = deepcopy(database_conf.get('OPTIONS', {}))
|
||||
|
||||
for k, v in settings.LISTENER_DATABASES.get('default', {}).items():
|
||||
database_conf[k] = v
|
||||
if k != 'OPTIONS':
|
||||
database_conf[k] = v
|
||||
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
|
||||
database_conf['OPTIONS'][k] = v
|
||||
|
||||
|
||||
@@ -262,6 +262,7 @@ START_TASK_LIMIT = 100
|
||||
# We have the grace period so the task manager can bail out before the timeout.
|
||||
TASK_MANAGER_TIMEOUT = 300
|
||||
TASK_MANAGER_TIMEOUT_GRACE_PERIOD = 60
|
||||
TASK_MANAGER_LOCK_TIMEOUT = TASK_MANAGER_TIMEOUT + TASK_MANAGER_TIMEOUT_GRACE_PERIOD
|
||||
|
||||
# Number of seconds _in addition to_ the task manager timeout a job can stay
|
||||
# in waiting without being reaped
|
||||
@@ -492,6 +493,7 @@ CELERYBEAT_SCHEDULE = {
|
||||
'cleanup_images': {'task': 'awx.main.tasks.system.cleanup_images_and_files', 'schedule': timedelta(hours=3)},
|
||||
'cleanup_host_metrics': {'task': 'awx.main.tasks.host_metrics.cleanup_host_metrics', 'schedule': timedelta(hours=3, minutes=30)},
|
||||
'host_metric_summary_monthly': {'task': 'awx.main.tasks.host_metrics.host_metric_summary_monthly', 'schedule': timedelta(hours=4)},
|
||||
'periodic_resource_sync': {'task': 'awx.main.tasks.system.periodic_resource_sync', 'schedule': timedelta(minutes=15)},
|
||||
}
|
||||
|
||||
# Django Caching Configuration
|
||||
@@ -656,6 +658,10 @@ AWX_ANSIBLE_CALLBACK_PLUGINS = ""
|
||||
# Automatically remove nodes that have missed their heartbeats after some time
|
||||
AWX_AUTO_DEPROVISION_INSTANCES = False
|
||||
|
||||
# If False, do not allow creation of resources that are shared with the platform ingress
|
||||
# e.g. organizations, teams, and users
|
||||
ALLOW_LOCAL_RESOURCE_MANAGEMENT = True
|
||||
|
||||
# Enable Pendo on the UI, possible values are 'off', 'anonymous', and 'detailed'
|
||||
# Note: This setting may be overridden by database settings.
|
||||
PENDO_TRACKING_STATE = "off"
|
||||
@@ -778,6 +784,11 @@ INSIGHTS_EXCLUDE_EMPTY_GROUPS = False
|
||||
TERRAFORM_INSTANCE_ID_VAR = 'id'
|
||||
TERRAFORM_EXCLUDE_EMPTY_GROUPS = True
|
||||
|
||||
# ------------------------
|
||||
# OpenShift Virtualization
|
||||
# ------------------------
|
||||
OPENSHIFT_VIRTUALIZATION_EXCLUDE_EMPTY_GROUPS = True
|
||||
|
||||
# ---------------------
|
||||
# ----- Custom -----
|
||||
# ---------------------
|
||||
|
||||
3004
awx/sso/conf.py
3004
awx/sso/conf.py
File diff suppressed because it is too large
Load Diff
@@ -56,6 +56,10 @@ describe('<InventorySourceAdd />', () => {
|
||||
['satellite6', 'Red Hat Satellite 6'],
|
||||
['openstack', 'OpenStack'],
|
||||
['rhv', 'Red Hat Virtualization'],
|
||||
[
|
||||
'openshift_virtualization',
|
||||
'Red Hat OpenShift Virtualization',
|
||||
],
|
||||
['controller', 'Red Hat Ansible Automation Platform'],
|
||||
],
|
||||
},
|
||||
|
||||
@@ -23,6 +23,8 @@ const ansibleDocUrls = {
|
||||
'https://docs.ansible.com/ansible/latest/collections/ansible/builtin/constructed_inventory.html',
|
||||
terraform:
|
||||
'https://github.com/ansible-collections/cloud.terraform/blob/main/docs/cloud.terraform.terraform_state_inventory.rst',
|
||||
openshift_virtualization:
|
||||
'https://kubevirt.io/kubevirt.core/latest/plugins/kubevirt.html',
|
||||
};
|
||||
|
||||
const getInventoryHelpTextStrings = () => ({
|
||||
@@ -121,7 +123,7 @@ const getInventoryHelpTextStrings = () => ({
|
||||
<br />
|
||||
{value && (
|
||||
<div>
|
||||
{t`If you want the Inventory Source to update on launch , click on Update on Launch,
|
||||
{t`If you want the Inventory Source to update on launch , click on Update on Launch,
|
||||
and also go to `}
|
||||
<Link to={`/projects/${value.id}/details`}> {value.name} </Link>
|
||||
{t`and click on Update Revision on Launch.`}
|
||||
@@ -140,7 +142,7 @@ const getInventoryHelpTextStrings = () => ({
|
||||
<br />
|
||||
{value && (
|
||||
<div>
|
||||
{t`If you want the Inventory Source to update on launch , click on Update on Launch,
|
||||
{t`If you want the Inventory Source to update on launch , click on Update on Launch,
|
||||
and also go to `}
|
||||
<Link to={`/projects/${value.id}/details`}> {value.name} </Link>
|
||||
{t`and click on Update Revision on Launch`}
|
||||
|
||||
@@ -26,6 +26,7 @@ import {
|
||||
TerraformSubForm,
|
||||
VMwareSubForm,
|
||||
VirtualizationSubForm,
|
||||
OpenShiftVirtualizationSubForm,
|
||||
} from './InventorySourceSubForms';
|
||||
|
||||
const buildSourceChoiceOptions = (options) => {
|
||||
@@ -231,6 +232,15 @@ const InventorySourceFormFields = ({
|
||||
sourceOptions={sourceOptions}
|
||||
/>
|
||||
),
|
||||
openshift_virtualization: (
|
||||
<OpenShiftVirtualizationSubForm
|
||||
autoPopulateCredential={
|
||||
!source?.id ||
|
||||
source?.source !== 'openshift_virtualization'
|
||||
}
|
||||
sourceOptions={sourceOptions}
|
||||
/>
|
||||
),
|
||||
}[sourceField.value]
|
||||
}
|
||||
</FormColumnLayout>
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
import React, { useCallback } from 'react';
|
||||
import { useField, useFormikContext } from 'formik';
|
||||
|
||||
import { t } from '@lingui/macro';
|
||||
import { useConfig } from 'contexts/Config';
|
||||
import getDocsBaseUrl from 'util/getDocsBaseUrl';
|
||||
import CredentialLookup from 'components/Lookup/CredentialLookup';
|
||||
import { required } from 'util/validators';
|
||||
import {
|
||||
OptionsField,
|
||||
VerbosityField,
|
||||
EnabledVarField,
|
||||
EnabledValueField,
|
||||
HostFilterField,
|
||||
SourceVarsField,
|
||||
} from './SharedFields';
|
||||
import getHelpText from '../Inventory.helptext';
|
||||
|
||||
const OpenShiftVirtualizationSubForm = ({ autoPopulateCredential }) => {
|
||||
const helpText = getHelpText();
|
||||
const { setFieldValue, setFieldTouched } = useFormikContext();
|
||||
const [credentialField, credentialMeta, credentialHelpers] =
|
||||
useField('credential');
|
||||
const config = useConfig();
|
||||
|
||||
const handleCredentialUpdate = useCallback(
|
||||
(value) => {
|
||||
setFieldValue('credential', value);
|
||||
setFieldTouched('credential', true, false);
|
||||
},
|
||||
[setFieldValue, setFieldTouched]
|
||||
);
|
||||
|
||||
const docsBaseUrl = getDocsBaseUrl(config);
|
||||
return (
|
||||
<>
|
||||
<CredentialLookup
|
||||
credentialTypeNamespace="kubernetes_bearer_token"
|
||||
label={t`Credential`}
|
||||
helperTextInvalid={credentialMeta.error}
|
||||
isValid={!credentialMeta.touched || !credentialMeta.error}
|
||||
onBlur={() => credentialHelpers.setTouched()}
|
||||
onChange={handleCredentialUpdate}
|
||||
value={credentialField.value}
|
||||
required
|
||||
autoPopulate={autoPopulateCredential}
|
||||
validate={required(t`Select a value for this field`)}
|
||||
/>
|
||||
<VerbosityField />
|
||||
<HostFilterField />
|
||||
<EnabledVarField />
|
||||
<EnabledValueField />
|
||||
<OptionsField />
|
||||
<SourceVarsField
|
||||
popoverContent={helpText.sourceVars(
|
||||
docsBaseUrl,
|
||||
'openshift_virtualization'
|
||||
)}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default OpenShiftVirtualizationSubForm;
|
||||
@@ -0,0 +1,65 @@
|
||||
import React from 'react';
|
||||
import { act } from 'react-dom/test-utils';
|
||||
import { Formik } from 'formik';
|
||||
import { CredentialsAPI } from 'api';
|
||||
import { mountWithContexts } from '../../../../../testUtils/enzymeHelpers';
|
||||
import VirtualizationSubForm from './VirtualizationSubForm';
|
||||
|
||||
jest.mock('../../../../api');
|
||||
|
||||
const initialValues = {
|
||||
credential: null,
|
||||
overwrite: false,
|
||||
overwrite_vars: false,
|
||||
source_path: '',
|
||||
source_project: null,
|
||||
source_script: null,
|
||||
source_vars: '---\n',
|
||||
update_cache_timeout: 0,
|
||||
update_on_launch: true,
|
||||
verbosity: 1,
|
||||
};
|
||||
|
||||
describe('<VirtualizationSubForm />', () => {
|
||||
let wrapper;
|
||||
|
||||
beforeEach(async () => {
|
||||
CredentialsAPI.read.mockResolvedValue({
|
||||
data: { count: 0, results: [] },
|
||||
});
|
||||
|
||||
await act(async () => {
|
||||
wrapper = mountWithContexts(
|
||||
<Formik initialValues={initialValues}>
|
||||
<VirtualizationSubForm />
|
||||
</Formik>
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
test('should render subform fields', () => {
|
||||
expect(wrapper.find('FormGroup[label="Credential"]')).toHaveLength(1);
|
||||
expect(wrapper.find('FormGroup[label="Verbosity"]')).toHaveLength(1);
|
||||
expect(wrapper.find('FormGroup[label="Update options"]')).toHaveLength(1);
|
||||
expect(
|
||||
wrapper.find('FormGroup[label="Cache timeout (seconds)"]')
|
||||
).toHaveLength(1);
|
||||
expect(
|
||||
wrapper.find('VariablesField[label="Source variables"]')
|
||||
).toHaveLength(1);
|
||||
});
|
||||
|
||||
test('should make expected api calls', () => {
|
||||
expect(CredentialsAPI.read).toHaveBeenCalledTimes(1);
|
||||
expect(CredentialsAPI.read).toHaveBeenCalledWith({
|
||||
credential_type__namespace: 'rhv',
|
||||
order_by: 'name',
|
||||
page: 1,
|
||||
page_size: 5,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -9,3 +9,4 @@ export { default as ControllerSubForm } from './ControllerSubForm';
|
||||
export { default as TerraformSubForm } from './TerraformSubForm';
|
||||
export { default as VMwareSubForm } from './VMwareSubForm';
|
||||
export { default as VirtualizationSubForm } from './VirtualizationSubForm';
|
||||
export { default as OpenShiftVirtualizationSubForm } from './OpenShiftVirtualizationSubForm';
|
||||
|
||||
@@ -120,7 +120,7 @@ function NotificationTemplatesList() {
|
||||
toolbarSearchColumns={[
|
||||
{
|
||||
name: t`Name`,
|
||||
key: 'name',
|
||||
key: 'name__icontains',
|
||||
isDefault: true,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -42,7 +42,8 @@ options:
|
||||
source:
|
||||
description:
|
||||
- The source to use for this group.
|
||||
choices: [ "scm", "ec2", "gce", "azure_rm", "vmware", "satellite6", "openstack", "rhv", "controller", "insights" ]
|
||||
choices: [ "scm", "ec2", "gce", "azure_rm", "vmware", "satellite6", "openstack", "rhv", "controller", "insights", "terraform",
|
||||
"openshift_virtualization" ]
|
||||
type: str
|
||||
source_path:
|
||||
description:
|
||||
@@ -170,7 +171,22 @@ def main():
|
||||
#
|
||||
# How do we handle manual and file? The controller does not seem to be able to activate them
|
||||
#
|
||||
source=dict(choices=["scm", "ec2", "gce", "azure_rm", "vmware", "satellite6", "openstack", "rhv", "controller", "insights"]),
|
||||
source=dict(
|
||||
choices=[
|
||||
"scm",
|
||||
"ec2",
|
||||
"gce",
|
||||
"azure_rm",
|
||||
"vmware",
|
||||
"satellite6",
|
||||
"openstack",
|
||||
"rhv",
|
||||
"controller",
|
||||
"insights",
|
||||
"terraform",
|
||||
"openshift_virtualization",
|
||||
]
|
||||
),
|
||||
source_path=dict(),
|
||||
source_vars=dict(type='dict'),
|
||||
enabled_var=dict(),
|
||||
|
||||
@@ -317,7 +317,10 @@ class ApiV2(base.Base):
|
||||
if asset['natural_key']['type'] == 'project' and 'local_path' in post_data and _page['scm_type'] == post_data['scm_type']:
|
||||
del post_data['local_path']
|
||||
|
||||
_page = _page.put(post_data)
|
||||
if asset['natural_key']['type'] == 'user':
|
||||
_page = _page.patch(**post_data)
|
||||
else:
|
||||
_page = _page.put(post_data)
|
||||
changed = True
|
||||
except (exc.Common, AssertionError) as e:
|
||||
identifier = asset.get("name", None) or asset.get("username", None) or asset.get("hostname", None)
|
||||
|
||||
@@ -300,13 +300,10 @@ Container Groups
|
||||
single: container groups
|
||||
pair: containers; instance groups
|
||||
|
||||
AWX supports :term:`Container Groups`, which allow you to execute jobs in AWX regardless of whether AWX is installed as a standalone, in a virtual environment, or in a container. Container groups act as a pool of resources within a virtual environment. You can create instance groups to point to an OpenShift container, which are job environments that are provisioned on-demand as a Pod that exists only for the duration of the playbook run. This is known as the ephemeral execution model and ensures a clean environment for every job run.
|
||||
AWX supports :term:`Container Groups`, which allow you to execute jobs in pods on Kubernetes (k8s) or OpenShift clusters. Container groups act as a pool of resources within a virtual environment. These pods are created on-demand and only exist for the duration of the playbook run. This is known as the ephemeral execution model and ensures a clean environment for every job run.
|
||||
|
||||
In some cases, it is desirable to have container groups be "always-on", which is configured through the creation of an instance.
|
||||
|
||||
.. note::
|
||||
|
||||
Container Groups upgraded from versions prior to |at| 4.0 will revert back to default and completely remove the old pod definition, clearing out all custom pod definitions in the migration.
|
||||
|
||||
|
||||
Container groups are different from |ees| in that |ees| are container images and do not use a virtual environment. See :ref:`ug_execution_environments` in the |atu| for further detail.
|
||||
@@ -335,19 +332,19 @@ To create a container group:
|
||||
|
||||
.. _ag_customize_pod_spec:
|
||||
|
||||
Customize the Pod spec
|
||||
Customize the pod spec
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
AWX provides a simple default Pod specification, however, you can provide a custom YAML (or JSON) document that overrides the default Pod spec. This field uses any custom fields (i.e. ``ImagePullSecrets``) that can be "serialized" as valid Pod JSON or YAML. A full list of options can be found in the `OpenShift documentation <https://docs.openshift.com/online/pro/architecture/core_concepts/pods_and_services.html>`_.
|
||||
AWX provides a simple default pod specification, however, you can provide a custom YAML (or JSON) document that overrides the default pod spec. This field uses any custom fields (for example, ``ImagePullSecrets``) that can be "serialized" as valid pod JSON or YAML. A full list of options can be found in the `OpenShift documentation <https://docs.openshift.com/online/pro/architecture/core_concepts/pods_and_services.html>`_.
|
||||
|
||||
To customize the Pod spec, specify the namespace in the **Pod Spec Override** field by using the toggle to enable and expand the **Pod Spec Override** field and click **Save** when done.
|
||||
To customize the pod spec, check the **Customize pod specification** option to enable and expand the **Custom pod spec** field where you specify the namespace and provide additional customizations as needed.
|
||||
|
||||
|IG - CG customize pod|
|
||||
|
||||
.. |IG - CG customize pod| image:: ../common/images/instance-group-customize-cg-pod.png
|
||||
:alt: Create new container group form with the option to custom the pod spec.
|
||||
|
||||
You may provide additional customizations, if needed. Click **Expand** to view the entire customization window.
|
||||
Click **Expand** to view the entire customization window.
|
||||
|
||||
.. image:: ../common/images/instance-group-customize-cg-pod-expanded.png
|
||||
:alt: The expanded view for customizing the pod spec.
|
||||
@@ -356,6 +353,21 @@ You may provide additional customizations, if needed. Click **Expand** to view t
|
||||
|
||||
The image used at job launch time is determined by which |ee| is associated with the job. If a Container Registry credential is associated with the |ee|, then AWX will attempt to make a ``ImagePullSecret`` to pull the image. If you prefer not to give the service account permission to manage secrets, you must pre-create the ``ImagePullSecret`` and specify it on the pod spec, and omit any credential from the |ee| used.
|
||||
|
||||
.. tip::
|
||||
|
||||
In order to override DNS/host entries, use the ``hostAliases`` attribute on the pod spec. When the pod is created, these entries will be added to ``/etc/hosts`` in the container running the job.
|
||||
|
||||
::
|
||||
|
||||
spec:
|
||||
hostAliases:
|
||||
- ip: "127.0.0.1"
|
||||
hostnames:
|
||||
- "foo.local"
|
||||
|
||||
For more information, refer to Kubernetes' documentation on `Adding additional entries with hostAliases <https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/#adding-additional-entries-with-hostaliases>`_.
|
||||
|
||||
|
||||
Once the container group is successfully created, the **Details** tab of the newly created container group remains, which allows you to review and edit your container group information. This is the same menu that is opened if the Edit (|edit-button|) button is clicked from the **Instance Group** link. You can also edit **Instances** and review **Jobs** associated with this instance group.
|
||||
|
||||
.. |edit-button| image:: ../common/images/edit-button.png
|
||||
@@ -370,7 +382,7 @@ Container groups and instance groups are labeled accordingly.
|
||||
|
||||
.. note::
|
||||
|
||||
Despite the fact that customers have custom Pod specs, upgrades may be difficult if the default ``pod_spec`` changes. Most any manifest can be applied to any namespace, with the namespace specified separately, most likely you will only need to override the namespace. Similarly, pinning a default image for different releases of the platform to different versions of the default job runner container is tricky. If the default image is specified in the Pod spec, then upgrades do not pick up the new default changes are made to the default Pod spec.
|
||||
Using a custom pod spec may cause issues on upgrades if the default ``pod_spec`` changes. Since any manifest can be applied to any namespace, with the namespace specified separately, most likely you will only need to override the namespace. Similarly, pinning a default image for different releases of the platform to different versions of the default job runner container is tricky. If the default image is specified in the pod spec, then upgrades do not pick up the new default changes that are made to the default pod spec.
|
||||
|
||||
|
||||
Verify container group functions
|
||||
@@ -411,7 +423,7 @@ You can see in the jobs detail view the container was reached successfully using
|
||||
.. |Inventory with localhost ping success| image:: ../common/images/inventories-launch-adhoc-cg-test-localhost-success.png
|
||||
:alt: Jobs output view showing a successfully ran adhoc job.
|
||||
|
||||
If you have an OpenShift UI, you can see Pods appear and disappear as they deploy and terminate. Alternatively, you can use the CLI to perform a ``get pod`` operation on your namespace to watch these same events occurring in real-time.
|
||||
If you have an OpenShift UI, you can see pods appear and disappear as they deploy and terminate. Alternatively, you can use the CLI to perform a ``get pod`` operation on your namespace to watch these same events occurring in real-time.
|
||||
|
||||
|
||||
View container group jobs
|
||||
|
||||
@@ -8,6 +8,21 @@ Troubleshooting AWX
|
||||
single: troubleshooting
|
||||
single: help
|
||||
|
||||
|
||||
Some troubleshooting tools are built in the AWX user interface that may help you address some issues you might encounter. To access these tools, navigate to **Settings** and select **Troubleshooting**.
|
||||
|
||||
.. image:: ../common/images/settings_troubleshooting_highlighted.png
|
||||
|
||||
The options available are:
|
||||
|
||||
- **Enable or Disable tmp dir cleanup**: choose whether you want to clean up the ``tmp`` directory.
|
||||
- **Debug Web Requests**: choose whether you want web requests to log messages for debugging purposes.
|
||||
- **Release Receptor Work**: disables cleaning up job pods. If you disable this, the jobs pods will remain in your cluster indefinitely, allowing you to examine them post-run. If you are missing data there, run ``kubectl logs <job-pod-name>`` and provide the logs in a issue report.
|
||||
|
||||
.. image:: ../common/images/troubleshooting_options.png
|
||||
|
||||
Click **Edit** to modify the settings. Use the toggle to enable and disable the appropriate settings.
|
||||
|
||||
.. _admin_troubleshooting_extra_settings:
|
||||
|
||||
Error logging and extra settings
|
||||
@@ -220,3 +235,4 @@ If you receive the message "Skipping: No Hosts Matched" when you are trying to r
|
||||
- Make sure that if you have specified a Limit in the Job Template that it is a valid limit value and still matches something in your inventory. The Limit field takes a pattern argument, described here: http://docs.ansible.com/intro_patterns.html
|
||||
|
||||
Please file a support ticket if you still run into issues after checking these options.
|
||||
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 91 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 222 KiB |
BIN
docs/docsite/rst/common/images/troubleshooting_options.png
Normal file
BIN
docs/docsite/rst/common/images/troubleshooting_options.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 59 KiB |
@@ -1,4 +1,4 @@
|
||||
.. _ug_inventories:
|
||||
.. _ug_inventories:
|
||||
|
||||
*******************
|
||||
Inventories
|
||||
@@ -482,6 +482,7 @@ Inventory updates use dynamically-generated YAML files which are parsed by their
|
||||
- :ref:`ug_source_rhv`
|
||||
- :ref:`ug_source_rhaap`
|
||||
- :ref:`ug_source_terraform`
|
||||
- :ref:`ug_source_ocpv`
|
||||
|
||||
|
||||
Newly created configurations for inventory sources will contain the default plugin configuration values. If you want your newly created inventory sources to match the output of legacy sources, you must apply a specific set of configuration values for that source. To ensure backward compatibility, AWX uses "templates" for each of these sources to force the output of inventory plugins into the legacy format. Refer to :ref:`ir_inv_plugin_templates_reference` section of this guide for each source and their respective templates to help you migrate to the new style inventory plugin output.
|
||||
@@ -1100,11 +1101,11 @@ This inventory source uses the `terraform_state <https://github.com/ansible-coll
|
||||
|
||||
1. To configure this type of sourced inventory, select **Terraform State** from the Source field.
|
||||
|
||||
2. The Create new source window expands with the required **Credential** field. Choose from an existing Terraform backend Credential. For more information, refer to :ref:`ug_credentials`.
|
||||
2. The Create new source window expands with the required **Credential** field. Choose from an existing Terraform backend credential. For more information, refer to :ref:`ug_credentials_terraform`.
|
||||
|
||||
3. You can optionally specify the verbosity, host filter, enabled variable/value, and update options as described in the main procedure for :ref:`adding a source <ug_add_inv_common_fields>`. For Terraform, enable **Overwrite** and **Update on launch** options.
|
||||
|
||||
4. Use the **Source Variables** field to override variables used by the ``controller`` inventory plugin. Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two. For more information on these variables, see the `terraform_state <https://github.com/ansible-collections/cloud.terraform/blob/main/docs/cloud.terraform.terraform_state_inventory.rst>`_ file for detail.
|
||||
4. Use the **Source Variables** field to override variables used by the ``terraform`` inventory plugin. Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two. For more information on these variables, see the `terraform_state <https://github.com/ansible-collections/cloud.terraform/blob/main/docs/cloud.terraform.terraform_state_inventory.rst>`_ file for detail.
|
||||
|
||||
The ``backend_type`` variable is required by the Terraform state inventory plugin. This should match the remote backend configured in the Terraform backend credential, here is an example for an Amazon S3 backend:
|
||||
|
||||
@@ -1120,6 +1121,43 @@ This inventory source uses the `terraform_state <https://github.com/ansible-coll
|
||||
6. To add hosts for AWS EC2, GCE, and Azure instances, the Terraform state file in the backend must contain state for resources already deployed to EC2, GCE, or Azure. Refer to each of the Terraform providers' respective documentation to provision instances.
|
||||
|
||||
|
||||
.. _ug_source_ocpv:
|
||||
|
||||
OpenShift Virtualization
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. index::
|
||||
pair: inventories; OpenShift
|
||||
pair: inventories; OCP
|
||||
pair: inventory source; OpenShift virtualization
|
||||
|
||||
|
||||
This inventory source uses a cluster that is able to deploy OpenShift (OCP) virtualization. In order to configure an OCP virtualization requires a virtual machine deployed in a specific namespace and an OpenShift or Kubernetes API Bearer Token credential.
|
||||
|
||||
1. To configure this type of sourced inventory, select **OpenShift Virtualization** from the Source field.
|
||||
2. The Create new source window expands with the required **Credential** field. Choose from an existing Kubernetes API Bearer Token credential. For more information, refer to :ref:`ug_credentials_ocp_k8s`. In this example, the ``cmv2.engineering.redhat.com`` credential is used.
|
||||
|
||||
3. You can optionally specify the verbosity, host filter, enabled variable/value, and update options as described in the main procedure for :ref:`adding a source <ug_add_inv_common_fields>`.
|
||||
|
||||
4. Use the **Source Variables** field to override variables used by the ``kubernetes`` inventory plugin. Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two. For more information on these variables, see the `kubevirt.core.kubevirt inventory source <https://kubevirt.io/kubevirt.core/main/plugins/kubevirt.html#parameters>`_ documentation for detail.
|
||||
|
||||
In the example below, the ``connections`` variable is used to specify access to a particular namespace in a cluster.
|
||||
|
||||
::
|
||||
|
||||
---
|
||||
connections:
|
||||
- namespaces:
|
||||
- hao-test
|
||||
|
||||
|
||||
.. image:: ../common/images/inventories-create-source-ocpvirt-example.png
|
||||
|
||||
5. Save the configuration and click the **Sync** button to sync the inventory.
|
||||
|
||||
|
||||
|
||||
|
||||
.. _ug_customscripts:
|
||||
|
||||
Export old inventory scripts
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
aiohttp>=3.8.6 # CVE-2023-47627
|
||||
aiohttp>=3.9.4 # CVE-2024-30251
|
||||
ansiconv==1.0.0 # UPGRADE BLOCKER: from 2013, consider replacing instead of upgrading
|
||||
asciichartpy
|
||||
asn1
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
adal==1.2.7
|
||||
# via msrestazure
|
||||
aiohttp==3.9.3
|
||||
aiohttp==3.9.5
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# aiohttp-retry
|
||||
|
||||
@@ -24,10 +24,9 @@ gprof2dot
|
||||
atomicwrites
|
||||
flake8
|
||||
yamllint
|
||||
pip>=21.3 # PEP 660 – Editable installs for pyproject.toml based builds (wheel based)
|
||||
pip>=21.3,<=24.0 # PEP 660 – Editable installs for pyproject.toml based builds (wheel based)
|
||||
|
||||
# python debuggers
|
||||
debugpy
|
||||
remote-pdb
|
||||
sdb
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ awx_image: 'ghcr.io/ansible/awx_devel'
|
||||
pg_port: 5432
|
||||
pg_username: 'awx'
|
||||
pg_database: 'awx'
|
||||
pg_tls: false
|
||||
control_plane_node_count: 1
|
||||
minikube_container_group: false
|
||||
receptor_socket_file: /var/run/awx-receptor/receptor.sock
|
||||
|
||||
@@ -5,6 +5,9 @@ DATABASES = {
|
||||
'NAME': "{{ pg_database }}",
|
||||
'USER': "{{ pg_username }}",
|
||||
'PASSWORD': "{{ pg_password }}",
|
||||
{% if pg_tls|bool %}
|
||||
'OPTIONS': {'sslmode': 'require'},
|
||||
{% endif %}
|
||||
{% if enable_pgbouncer|bool %}
|
||||
'HOST': "pgbouncer",
|
||||
'PORT': "{{ pgbouncer_port }}",
|
||||
|
||||
@@ -237,13 +237,24 @@ services:
|
||||
image: quay.io/sclorg/postgresql-15-c9s
|
||||
container_name: tools_postgres_1
|
||||
# additional logging settings for postgres can be found https://www.postgresql.org/docs/current/runtime-config-logging.html
|
||||
command: run-postgresql -c log_destination=stderr -c log_min_messages=info -c log_min_duration_statement={{ pg_log_min_duration_statement|default(1000) }} -c max_connections={{ pg_max_connections|default(1024) }}
|
||||
command: >
|
||||
bash -c "
|
||||
{% if pg_tls|bool %}
|
||||
mkdir -p /opt/app-root/src/certs
|
||||
&& openssl genrsa -out /opt/app-root/src/certs/tls.key 2048
|
||||
&& openssl req -new -x509 -key /opt/app-root/src/certs/tls.key -out /opt/app-root/src/certs/tls.crt -subj '/CN=postgres'
|
||||
&& chmod 600 /opt/app-root/src/certs/tls.crt /opt/app-root/src/certs/tls.key &&
|
||||
{% endif %}
|
||||
run-postgresql -c log_destination=stderr -c log_min_messages=info -c log_min_duration_statement={{ pg_log_min_duration_statement|default(1000) }} -c max_connections={{ pg_max_connections|default(1024) }}"
|
||||
environment:
|
||||
POSTGRESQL_USER: {{ pg_username }}
|
||||
POSTGRESQL_DATABASE: {{ pg_database }}
|
||||
POSTGRESQL_PASSWORD: {{ pg_password }}
|
||||
volumes:
|
||||
- "awx_db_15:/var/lib/pgsql/data"
|
||||
{% if pg_tls|bool %}
|
||||
- "../../docker-compose/pgssl.conf:/opt/app-root/src/postgresql-cfg/pgssl.conf"
|
||||
{% endif %}
|
||||
networks:
|
||||
- awx
|
||||
ports:
|
||||
|
||||
5
tools/docker-compose/pgssl.conf
Normal file
5
tools/docker-compose/pgssl.conf
Normal file
@@ -0,0 +1,5 @@
|
||||
ssl = on
|
||||
ssl_cert_file = '/opt/app-root/src/certs/tls.crt' # server certificate
|
||||
ssl_key_file = '/opt/app-root/src/certs/tls.key' # server private key
|
||||
#ssl_ca_file # trusted certificate authorities
|
||||
#ssl_crl_file # certificates revoked by certificate authorities
|
||||
7
tools/scripts/ig-hotfix/.gitignore
vendored
Normal file
7
tools/scripts/ig-hotfix/.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
*~
|
||||
customer-backup.tar.*
|
||||
*.db
|
||||
*.log
|
||||
*.dot
|
||||
*.png
|
||||
*.tar.*
|
||||
36
tools/scripts/ig-hotfix/README.md
Normal file
36
tools/scripts/ig-hotfix/README.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Hotfix for Instance Groups and Roles after backup/restore corruption #
|
||||
|
||||
## role_check.py ##
|
||||
|
||||
`awx-manage shell < role_check.py 2> role_check.log > fix.py`
|
||||
|
||||
This checks the roles and resources on the system, and constructs a
|
||||
fix.py file that will change the linkages of the roles that it finds
|
||||
are incorrect. The command line above also redirects logging output to
|
||||
a file. The fix.py file (and the log file) can then be examined (and
|
||||
potentially modified) before performing the actual fix.
|
||||
|
||||
`awx-manage shell < fix.py > fix.log 2>&1`
|
||||
|
||||
This performs the fix, while redirecting all output to another log
|
||||
file. Ideally, this file should wind up being empty after execution
|
||||
completes.
|
||||
|
||||
`awx-manage shell < role_check.py 2> role_check2.log > fix2.py`
|
||||
|
||||
Re-run the check script in order to see that there are no remaining
|
||||
problems. Ideally the log file will only consist of the equal-sign
|
||||
lines.
|
||||
|
||||
|
||||
## foreignkeys.sql ##
|
||||
|
||||
This script uses Postgres internals to determine all of the foreign
|
||||
keys that cross the boundaries established by our (old) backup/restore
|
||||
logic. Users have no need to run this.
|
||||
|
||||
|
||||
## scenarios/test*.py ##
|
||||
|
||||
These files were used to set up corruption similar to that caused by
|
||||
faulty backup/restore, for testing purposes. Do not use.
|
||||
38
tools/scripts/ig-hotfix/foreignkeys.sql
Normal file
38
tools/scripts/ig-hotfix/foreignkeys.sql
Normal file
@@ -0,0 +1,38 @@
|
||||
DO $$
|
||||
DECLARE
|
||||
-- add table names here when they get excluded from main / included in topology dump
|
||||
topology text[] := ARRAY['main_instance', 'main_instancegroup', 'main_instancegroup_instances'];
|
||||
|
||||
-- add table names here when they are handled by the special-case mapping
|
||||
mapping text[] := ARRAY['main_organizationinstancegroupmembership', 'main_unifiedjobtemplateinstancegroupmembership', 'main_inventoryinstancegroupmembership'];
|
||||
BEGIN
|
||||
CREATE TABLE tmp_fk_from AS (
|
||||
SELECT DISTINCT
|
||||
tc.table_name,
|
||||
ccu.table_name AS foreign_table_name
|
||||
FROM information_schema.table_constraints AS tc
|
||||
JOIN information_schema.constraint_column_usage AS ccu
|
||||
ON ccu.constraint_name = tc.constraint_name
|
||||
WHERE tc.constraint_type = 'FOREIGN KEY'
|
||||
AND tc.table_name = ANY (topology)
|
||||
AND NOT ccu.table_name = ANY (topology || mapping)
|
||||
);
|
||||
|
||||
CREATE TABLE tmp_fk_into AS (
|
||||
SELECT DISTINCT
|
||||
tc.table_name,
|
||||
ccu.table_name AS foreign_table_name
|
||||
FROM information_schema.table_constraints AS tc
|
||||
JOIN information_schema.constraint_column_usage AS ccu
|
||||
ON ccu.constraint_name = tc.constraint_name
|
||||
WHERE tc.constraint_type = 'FOREIGN KEY'
|
||||
AND ccu.table_name = ANY (topology)
|
||||
AND NOT tc.table_name = ANY (topology || mapping)
|
||||
);
|
||||
END $$;
|
||||
|
||||
SELECT * FROM tmp_fk_from;
|
||||
SELECT * FROM tmp_fk_into;
|
||||
|
||||
DROP TABLE tmp_fk_from;
|
||||
DROP TABLE tmp_fk_into;
|
||||
204
tools/scripts/ig-hotfix/role_check.py
Normal file
204
tools/scripts/ig-hotfix/role_check.py
Normal file
@@ -0,0 +1,204 @@
|
||||
from collections import defaultdict
|
||||
import json
|
||||
import sys
|
||||
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.db.models.fields.related_descriptors import ManyToManyDescriptor
|
||||
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.models.rbac import Role
|
||||
|
||||
|
||||
team_ct = ContentType.objects.get(app_label='main', model='team')
|
||||
|
||||
crosslinked = defaultdict(lambda: defaultdict(dict))
|
||||
crosslinked_parents = defaultdict(list)
|
||||
orphaned_roles = set()
|
||||
|
||||
|
||||
def resolve(obj, path):
|
||||
fname, _, path = path.partition('.')
|
||||
new_obj = getattr(obj, fname, None)
|
||||
if new_obj is None:
|
||||
return set()
|
||||
if not path:
|
||||
return {
|
||||
new_obj,
|
||||
}
|
||||
|
||||
if isinstance(new_obj, ManyToManyDescriptor):
|
||||
return {x for o in new_obj.all() for x in resolve(o, path)}
|
||||
|
||||
return resolve(new_obj, path)
|
||||
|
||||
|
||||
for ct in ContentType.objects.order_by('id'):
|
||||
cls = ct.model_class()
|
||||
if cls is None:
|
||||
sys.stderr.write(f"{ct!r} does not have a corresponding model class in the codebase. Skipping.\n")
|
||||
continue
|
||||
if not any(isinstance(f, ImplicitRoleField) for f in cls._meta.fields):
|
||||
continue
|
||||
for obj in cls.objects.all():
|
||||
for f in cls._meta.fields:
|
||||
if not isinstance(f, ImplicitRoleField):
|
||||
continue
|
||||
r_id = getattr(obj, f'{f.name}_id', None)
|
||||
try:
|
||||
r = getattr(obj, f.name, None)
|
||||
except Role.DoesNotExist:
|
||||
sys.stderr.write(f"{cls} id={obj.id} {f.name} points to Role id={r_id}, which is not in the database.\n")
|
||||
crosslinked[ct.id][obj.id][f'{f.name}_id'] = None
|
||||
continue
|
||||
if not r:
|
||||
sys.stderr.write(f"{cls} id={obj.id} {f.name} does not have a Role object\n")
|
||||
crosslinked[ct.id][obj.id][f'{f.name}_id'] = None
|
||||
continue
|
||||
if r.content_object != obj:
|
||||
sys.stderr.write(
|
||||
f"{cls.__name__} id={obj.id} {f.name} is pointing to a Role that is assigned to a different object: role.id={r.id} {r.content_type!r} {r.object_id} {r.role_field}\n"
|
||||
)
|
||||
crosslinked[ct.id][obj.id][f'{f.name}_id'] = None
|
||||
continue
|
||||
|
||||
|
||||
sys.stderr.write('===================================\n')
|
||||
for r in Role.objects.exclude(role_field__startswith='system_').order_by('id'):
|
||||
|
||||
# The ancestor list should be a superset of both parents and implicit_parents.
|
||||
# Also, parents should be a superset of implicit_parents.
|
||||
parents = set(r.parents.values_list('id', flat=True))
|
||||
ancestors = set(r.ancestors.values_list('id', flat=True))
|
||||
implicit = set(json.loads(r.implicit_parents))
|
||||
|
||||
if not implicit:
|
||||
sys.stderr.write(f"Role id={r.id} has no implicit_parents\n")
|
||||
if not parents <= ancestors:
|
||||
sys.stderr.write(f"Role id={r.id} has parents that are not in the ancestor list: {parents - ancestors}\n")
|
||||
crosslinked[r.content_type_id][r.object_id][f'{r.role_field}_id'] = r.id
|
||||
if not implicit <= parents:
|
||||
sys.stderr.write(f"Role id={r.id} has implicit_parents that are not in the parents list: {implicit - parents}\n")
|
||||
crosslinked[r.content_type_id][r.object_id][f'{r.role_field}_id'] = r.id
|
||||
if not implicit <= ancestors:
|
||||
sys.stderr.write(f"Role id={r.id} has implicit_parents that are not in the ancestor list: {implicit - ancestors}\n")
|
||||
crosslinked[r.content_type_id][r.object_id][f'{r.role_field}_id'] = r.id
|
||||
|
||||
# Check that the Role's generic foreign key points to a legitimate object
|
||||
if not r.content_object:
|
||||
sys.stderr.write(f"Role id={r.id} is missing a valid content_object: {r.content_type!r} {r.object_id} {r.role_field}\n")
|
||||
orphaned_roles.add(r.id)
|
||||
continue
|
||||
|
||||
# Check the resource's role field parents for consistency with Role.parents.all().
|
||||
f = r.content_object._meta.get_field(r.role_field)
|
||||
f_parent = (
|
||||
set(f.parent_role)
|
||||
if isinstance(f.parent_role, list)
|
||||
else {
|
||||
f.parent_role,
|
||||
}
|
||||
)
|
||||
dotted = {x for p in f_parent if '.' in p for x in resolve(r.content_object, p)}
|
||||
plus = set()
|
||||
for p in r.parents.all():
|
||||
if p.singleton_name:
|
||||
if f'singleton:{p.singleton_name}' not in f_parent:
|
||||
plus.add(p)
|
||||
elif p.content_type == team_ct:
|
||||
# Team has been granted this role; probably legitimate.
|
||||
if p.role_field in ('admin_role', 'member_role'):
|
||||
continue
|
||||
elif (p.content_type, p.object_id) == (r.content_type, r.object_id):
|
||||
if p.role_field not in f_parent:
|
||||
plus.add(p)
|
||||
elif p in dotted:
|
||||
continue
|
||||
else:
|
||||
plus.add(p)
|
||||
|
||||
if plus:
|
||||
plus_repr = [f"{x.content_type!r} {x.object_id} {x.role_field}" for x in plus]
|
||||
sys.stderr.write(f"Role id={r.id} has cross-linked parents: {plus_repr}\n")
|
||||
crosslinked_parents[r.id].extend(x.id for x in plus)
|
||||
|
||||
try:
|
||||
rev = getattr(r.content_object, r.role_field, None)
|
||||
except Role.DoesNotExist:
|
||||
sys.stderr.write(f"Role id={r.id} {r.content_type!r} {r.object_id} {r.role_field} points at an object with a broken role.\n")
|
||||
crosslinked[r.content_type_id][r.object_id][f'{r.role_field}_id'] = r.id
|
||||
continue
|
||||
if rev is None or r.id != rev.id:
|
||||
if rev and (r.content_type_id, r.object_id, r.role_field) == (rev.content_type_id, rev.object_id, rev.role_field):
|
||||
sys.stderr.write(
|
||||
f"Role id={r.id} {r.content_type!r} {r.object_id} {r.role_field} is an orphaned duplicate of Role id={rev.id}, which is actually being used by the assigned resource\n"
|
||||
)
|
||||
orphaned_roles.add(r.id)
|
||||
elif not rev:
|
||||
sys.stderr.write(f"Role id={r.id} {r.content_type!r} {r.object_id} {r.role_field} is pointing to an object currently using no role\n")
|
||||
crosslinked[r.content_type_id][r.object_id][f'{r.role_field}_id'] = r.id
|
||||
else:
|
||||
sys.stderr.write(
|
||||
f"Role id={r.id} {r.content_type!r} {r.object_id} {r.role_field} is pointing to an object using a different role: id={rev.id} {rev.content_type!r} {rev.object_id} {rev.role_field}\n"
|
||||
)
|
||||
crosslinked[r.content_type_id][r.object_id][f'{r.role_field}_id'] = r.id
|
||||
continue
|
||||
|
||||
|
||||
sys.stderr.write('===================================\n')
|
||||
|
||||
|
||||
print(
|
||||
f"""\
|
||||
from collections import Counter
|
||||
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.models.rbac import Role
|
||||
|
||||
|
||||
delete_counts = Counter()
|
||||
update_counts = Counter()
|
||||
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
print("# Resource objects that are pointing to the wrong Role. Some of these")
|
||||
print("# do not have corresponding Roles anywhere, so delete the foreign key.")
|
||||
print("# For those, new Roles will be constructed upon save.\n")
|
||||
print("queue = set()\n")
|
||||
for ct, objs in crosslinked.items():
|
||||
print(f"cls = ContentType.objects.get(id={ct}).model_class()\n")
|
||||
for obj, kv in objs.items():
|
||||
print(f"c = cls.objects.filter(id={obj}).update(**{kv!r})")
|
||||
print("update_counts.update({cls._meta.label: c})")
|
||||
print(f"queue.add((cls, {obj}))")
|
||||
|
||||
print("\n# Role objects that are assigned to objects that do not exist")
|
||||
for r in orphaned_roles:
|
||||
print(f"c = Role.objects.filter(id={r}).update(object_id=None)")
|
||||
print("update_counts.update({'main.Role': c})")
|
||||
print(f"_, c = Role.objects.filter(id={r}).delete()")
|
||||
print("delete_counts.update(c)")
|
||||
|
||||
print('\n\n')
|
||||
for child, parents in crosslinked_parents.items():
|
||||
print(f"r = Role.objects.get(id={child})")
|
||||
print(f"r.parents.remove(*Role.objects.filter(id__in={parents!r}))")
|
||||
print(f"queue.add((r.content_object.__class__, r.object_id))")
|
||||
|
||||
print('\n\n')
|
||||
print('print("Objects deleted:", dict(delete_counts.most_common()))')
|
||||
print('print("Objects updated:", dict(update_counts.most_common()))')
|
||||
|
||||
print("\n\nfor cls, obj_id in queue:")
|
||||
print(" role_fields = [f for f in cls._meta.fields if isinstance(f, ImplicitRoleField)]")
|
||||
print(" obj = cls.objects.get(id=obj_id)")
|
||||
print(" for f in role_fields:")
|
||||
print(" r = getattr(obj, f.name, None)")
|
||||
print(" if r is not None:")
|
||||
print(" print(f'updating implicit parents on Role {r.id}')")
|
||||
print(" r.implicit_parents = '[]'")
|
||||
print(" r.save()")
|
||||
print(" obj.save()")
|
||||
19
tools/scripts/ig-hotfix/scenarios/test.py
Normal file
19
tools/scripts/ig-hotfix/scenarios/test.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from django.db import connection
|
||||
from awx.main.models import InstanceGroup
|
||||
|
||||
InstanceGroup.objects.filter(name__in=('green', 'yellow', 'red')).delete()
|
||||
|
||||
green = InstanceGroup.objects.create(name='green')
|
||||
red = InstanceGroup.objects.create(name='red')
|
||||
yellow = InstanceGroup.objects.create(name='yellow')
|
||||
|
||||
for ig in InstanceGroup.objects.all():
|
||||
print((ig.id, ig.name, ig.use_role_id))
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute("UPDATE main_instancegroup SET use_role_id = NULL WHERE name = 'red'")
|
||||
cursor.execute(f"UPDATE main_instancegroup SET use_role_id = {green.use_role_id} WHERE name = 'yellow'")
|
||||
|
||||
print("=====================================")
|
||||
for ig in InstanceGroup.objects.all():
|
||||
print((ig.id, ig.name, ig.use_role_id))
|
||||
20
tools/scripts/ig-hotfix/scenarios/test2.py
Normal file
20
tools/scripts/ig-hotfix/scenarios/test2.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from django.db import connection
|
||||
from awx.main.models import InstanceGroup
|
||||
|
||||
InstanceGroup.objects.filter(name__in=('green', 'yellow', 'red')).delete()
|
||||
|
||||
green = InstanceGroup.objects.create(name='green')
|
||||
red = InstanceGroup.objects.create(name='red')
|
||||
yellow = InstanceGroup.objects.create(name='yellow')
|
||||
|
||||
for ig in InstanceGroup.objects.all():
|
||||
print((ig.id, ig.name, ig.use_role_id))
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"UPDATE main_rbac_roles SET object_id = NULL WHERE id = {red.use_role_id}")
|
||||
cursor.execute("UPDATE main_instancegroup SET use_role_id = NULL WHERE name = 'red'")
|
||||
cursor.execute(f"UPDATE main_instancegroup SET use_role_id = {green.use_role_id} WHERE name = 'yellow'")
|
||||
|
||||
print("=====================================")
|
||||
for ig in InstanceGroup.objects.all():
|
||||
print((ig.id, ig.name, ig.use_role_id))
|
||||
30
tools/scripts/ig-hotfix/scenarios/test3.py
Normal file
30
tools/scripts/ig-hotfix/scenarios/test3.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from django.db import connection
|
||||
from awx.main.models import InstanceGroup
|
||||
|
||||
InstanceGroup.objects.filter(name__in=('green', 'yellow', 'red', 'blue')).delete()
|
||||
|
||||
green = InstanceGroup.objects.create(name='green')
|
||||
red = InstanceGroup.objects.create(name='red')
|
||||
yellow = InstanceGroup.objects.create(name='yellow')
|
||||
blue = InstanceGroup.objects.create(name='blue')
|
||||
|
||||
for ig in InstanceGroup.objects.all():
|
||||
print((ig.id, ig.name, ig.use_role_id))
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute("ALTER TABLE main_instancegroup DROP CONSTRAINT main_instancegroup_use_role_id_48ea7ecc_fk_main_rbac_roles_id")
|
||||
|
||||
cursor.execute(f"UPDATE main_rbac_roles SET object_id = NULL WHERE id = {red.use_role_id}")
|
||||
cursor.execute(f"DELETE FROM main_rbac_roles_parents WHERE from_role_id = {blue.use_role_id} OR to_role_id = {blue.use_role_id}")
|
||||
cursor.execute(f"DELETE FROM main_rbac_role_ancestors WHERE ancestor_id = {blue.use_role_id} OR descendent_id = {blue.use_role_id}")
|
||||
cursor.execute(f"DELETE FROM main_rbac_roles WHERE id = {blue.use_role_id}")
|
||||
cursor.execute("UPDATE main_instancegroup SET use_role_id = NULL WHERE name = 'red'")
|
||||
cursor.execute(f"UPDATE main_instancegroup SET use_role_id = {green.use_role_id} WHERE name = 'yellow'")
|
||||
|
||||
cursor.execute(
|
||||
"ALTER TABLE main_instancegroup ADD CONSTRAINT main_instancegroup_use_role_id_48ea7ecc_fk_main_rbac_roles_id FOREIGN KEY (use_role_id) REFERENCES public.main_rbac_roles(id) DEFERRABLE INITIALLY DEFERRED NOT VALID"
|
||||
)
|
||||
|
||||
print("=====================================")
|
||||
for ig in InstanceGroup.objects.all():
|
||||
print((ig.id, ig.name, ig.use_role_id))
|
||||
26
tools/scripts/ig-hotfix/scenarios/test4.py
Normal file
26
tools/scripts/ig-hotfix/scenarios/test4.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from django.db import connection
|
||||
from awx.main.models import InstanceGroup
|
||||
|
||||
InstanceGroup.objects.filter(name__in=('green', 'yellow', 'red')).delete()
|
||||
|
||||
green = InstanceGroup.objects.create(name='green')
|
||||
red = InstanceGroup.objects.create(name='red')
|
||||
yellow = InstanceGroup.objects.create(name='yellow')
|
||||
|
||||
for ig in InstanceGroup.objects.all():
|
||||
print((ig.id, ig.name, ig.use_role_id))
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute("UPDATE main_instancegroup SET use_role_id = NULL WHERE name = 'red'")
|
||||
cursor.execute(f"UPDATE main_instancegroup SET use_role_id = {green.use_role_id} WHERE name = 'yellow'")
|
||||
|
||||
green.refresh_from_db()
|
||||
red.refresh_from_db()
|
||||
yellow.refresh_from_db()
|
||||
green.save()
|
||||
red.save()
|
||||
yellow.save()
|
||||
|
||||
print("=====================================")
|
||||
for ig in InstanceGroup.objects.all():
|
||||
print((ig.id, ig.name, ig.use_role_id))
|
||||
@@ -25,6 +25,7 @@ SOSREPORT_CONTROLLER_COMMANDS = [
|
||||
"ls -ll /var/run/awx-receptor", # list contents of dirctory where receptor socket should be
|
||||
"ls -ll /etc/receptor",
|
||||
"receptorctl --socket /var/run/awx-receptor/receptor.sock status", # Get information about the status of the mesh
|
||||
"receptorctl --socket /var/run/awx-receptor/receptor.sock work list", # Get list of receptor work units
|
||||
"umask -p", # check current umask
|
||||
]
|
||||
|
||||
|
||||
Reference in New Issue
Block a user