mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 03:24:50 -03:30
Compare commits
63 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2529fdcfd7 | ||
|
|
19dff9c2d1 | ||
|
|
2a6cf032f8 | ||
|
|
6119b33a50 | ||
|
|
aacf9653c5 | ||
|
|
325f5250db | ||
|
|
b14518c1e5 | ||
|
|
6440e3cb55 | ||
|
|
b5f6aac3aa | ||
|
|
6e5e1c8fff | ||
|
|
bf42c63c12 | ||
|
|
df24cb692b | ||
|
|
0d825a744b | ||
|
|
5e48bf091b | ||
|
|
1294cec92c | ||
|
|
dae12ee1b8 | ||
|
|
b091f6cf79 | ||
|
|
fe564c5fad | ||
|
|
eb3bc84461 | ||
|
|
6aa2997dce | ||
|
|
dd00bbba42 | ||
|
|
fe6bac6d9e | ||
|
|
87abbd4b10 | ||
|
|
fb04e5d9f6 | ||
|
|
478e2cb28d | ||
|
|
2ac304d289 | ||
|
|
3e5851f3af | ||
|
|
adb1b12074 | ||
|
|
8fae20c48a | ||
|
|
ec364cc60e | ||
|
|
1cfd51764e | ||
|
|
0b8fedfd04 | ||
|
|
72a8173462 | ||
|
|
873b1fbe07 | ||
|
|
1f36e84b45 | ||
|
|
8c4bff2b86 | ||
|
|
14f636af84 | ||
|
|
0057c8daf6 | ||
|
|
d8a28b3c06 | ||
|
|
40c2b700fe | ||
|
|
71d548f9e5 | ||
|
|
dd98963f86 | ||
|
|
4b467dfd8d | ||
|
|
456b56778e | ||
|
|
5b3cb20f92 | ||
|
|
d7086a3c88 | ||
|
|
21e7ab078c | ||
|
|
946ca0b3b8 | ||
|
|
b831dbd608 | ||
|
|
943e455f9d | ||
|
|
53bc88abe2 | ||
|
|
3b4d95633e | ||
|
|
93c329d9d5 | ||
|
|
f4c53aaf22 | ||
|
|
333ef76cbd | ||
|
|
fc0b58fd04 | ||
|
|
bef0a8b23a | ||
|
|
a5f33456b6 | ||
|
|
21fb395912 | ||
|
|
44255f378d | ||
|
|
71a6d48612 | ||
|
|
b7e5f5d1e1 | ||
|
|
b6b167627c |
12
.github/actions/run_awx_devel/action.yml
vendored
12
.github/actions/run_awx_devel/action.yml
vendored
@@ -43,10 +43,14 @@ runs:
|
||||
- name: Update default AWX password
|
||||
shell: bash
|
||||
run: |
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
|
||||
do
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5
|
||||
SECONDS=0
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]; do
|
||||
if [[ $SECONDS -gt 600 ]]; then
|
||||
echo "Timing out, AWX never came up"
|
||||
exit 1
|
||||
fi
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5
|
||||
done
|
||||
echo "AWX is up, updating the password..."
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
|
||||
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "pip"
|
||||
directory: "docs/docsite/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 2
|
||||
labels:
|
||||
- "docs"
|
||||
- "dependencies"
|
||||
4
.github/triage_replies.md
vendored
4
.github/triage_replies.md
vendored
@@ -7,8 +7,8 @@
|
||||
|
||||
## PRs/Issues
|
||||
|
||||
### Visit our mailing list
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on our mailing list? See https://github.com/ansible/awx/#get-involved for information for ways to connect with us.
|
||||
### Visit the Forum or Matrix
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on either the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com) or the [Ansible Community Forum](https://forum.ansible.com/tag/awx)?
|
||||
|
||||
### Denied Submission
|
||||
|
||||
|
||||
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
@@ -11,6 +11,7 @@ jobs:
|
||||
common-tests:
|
||||
name: ${{ matrix.tests.name }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
@@ -20,6 +21,8 @@ jobs:
|
||||
tests:
|
||||
- name: api-test
|
||||
command: /start_tests.sh
|
||||
- name: api-migrations
|
||||
command: /start_tests.sh test_migrations
|
||||
- name: api-lint
|
||||
command: /var/lib/awx/venv/awx/bin/tox -e linters
|
||||
- name: api-swagger
|
||||
@@ -47,6 +50,7 @@ jobs:
|
||||
|
||||
dev-env:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
@@ -61,6 +65,7 @@ jobs:
|
||||
|
||||
awx-operator:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v3
|
||||
@@ -110,6 +115,7 @@ jobs:
|
||||
collection-sanity:
|
||||
name: awx_collection sanity
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
@@ -129,6 +135,7 @@ jobs:
|
||||
collection-integration:
|
||||
name: awx_collection integration
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -180,6 +187,7 @@ jobs:
|
||||
collection-integration-coverage-combine:
|
||||
name: combine awx_collection integration coverage
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
needs:
|
||||
- collection-integration
|
||||
strategy:
|
||||
|
||||
1
.github/workflows/devel_images.yml
vendored
1
.github/workflows/devel_images.yml
vendored
@@ -12,6 +12,7 @@ jobs:
|
||||
push:
|
||||
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
1
.github/workflows/docs.yml
vendored
1
.github/workflows/docs.yml
vendored
@@ -6,6 +6,7 @@ jobs:
|
||||
docsite-build:
|
||||
name: docsite test build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ on:
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
2
.github/workflows/label_issue.yml
vendored
2
.github/workflows/label_issue.yml
vendored
@@ -13,6 +13,7 @@ permissions:
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label Issue
|
||||
|
||||
steps:
|
||||
@@ -26,6 +27,7 @@ jobs:
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label Issue - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
2
.github/workflows/label_pr.yml
vendored
2
.github/workflows/label_pr.yml
vendored
@@ -14,6 +14,7 @@ permissions:
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label PR
|
||||
|
||||
steps:
|
||||
@@ -25,6 +26,7 @@ jobs:
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label PR - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
1
.github/workflows/pr_body_check.yml
vendored
1
.github/workflows/pr_body_check.yml
vendored
@@ -10,6 +10,7 @@ jobs:
|
||||
if: github.repository_owner == 'ansible' && endsWith(github.repository, 'awx')
|
||||
name: Scan PR description for semantic versioning keywords
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
5
.github/workflows/promote.yml
vendored
5
.github/workflows/promote.yml
vendored
@@ -13,8 +13,9 @@ permissions:
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
if: endsWith(github.repository, '/awx')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 90
|
||||
steps:
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v3
|
||||
@@ -46,7 +47,7 @@ jobs:
|
||||
COLLECTION_TEMPLATE_VERSION: true
|
||||
run: |
|
||||
make build_collection
|
||||
if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
||||
if [ "$(curl -L --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
||||
echo "Galaxy release already done"; \
|
||||
else \
|
||||
ansible-galaxy collection publish \
|
||||
|
||||
1
.github/workflows/stage.yml
vendored
1
.github/workflows/stage.yml
vendored
@@ -23,6 +23,7 @@ jobs:
|
||||
stage:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 90
|
||||
permissions:
|
||||
packages: write
|
||||
contents: write
|
||||
|
||||
1
.github/workflows/update_dependabot_prs.yml
vendored
1
.github/workflows/update_dependabot_prs.yml
vendored
@@ -9,6 +9,7 @@ jobs:
|
||||
name: Update Dependabot Prs
|
||||
if: contains(github.event.pull_request.labels.*.name, 'dependencies') && contains(github.event.pull_request.labels.*.name, 'component:ui')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Checkout branch
|
||||
|
||||
1
.github/workflows/upload_schema.yml
vendored
1
.github/workflows/upload_schema.yml
vendored
@@ -13,6 +13,7 @@ on:
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
@@ -10,6 +10,7 @@ build:
|
||||
3.11
|
||||
commands:
|
||||
- pip install --user tox
|
||||
- python3 -m tox -e docs
|
||||
- python3 -m tox -e docs --notest -v
|
||||
- python3 -m tox -e docs --skip-pkg-install -q
|
||||
- mkdir -p _readthedocs/html/
|
||||
- mv docs/docsite/build/html/* _readthedocs/html/
|
||||
|
||||
@@ -22,7 +22,7 @@ recursive-exclude awx/settings local_settings.py*
|
||||
include tools/scripts/request_tower_configuration.sh
|
||||
include tools/scripts/request_tower_configuration.ps1
|
||||
include tools/scripts/automation-controller-service
|
||||
include tools/scripts/failure-event-handler
|
||||
include tools/scripts/rsyslog-4xx-recovery
|
||||
include tools/scripts/awx-python
|
||||
include awx/playbooks/library/mkfifo.py
|
||||
include tools/sosreport/*
|
||||
|
||||
14
Makefile
14
Makefile
@@ -43,6 +43,8 @@ PROMETHEUS ?= false
|
||||
GRAFANA ?= false
|
||||
# If set to true docker-compose will also start a hashicorp vault instance
|
||||
VAULT ?= false
|
||||
# If set to true docker-compose will also start a hashicorp vault instance with TLS enabled
|
||||
VAULT_TLS ?= false
|
||||
# If set to true docker-compose will also start a tacacs+ instance
|
||||
TACACS ?= false
|
||||
|
||||
@@ -61,7 +63,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==8.0.4 wheel==0.38.4
|
||||
|
||||
NAME ?= awx
|
||||
|
||||
@@ -324,6 +326,12 @@ test:
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||
|
||||
test_migrations:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test $(PYTEST_ARGS) $(TEST_DIRS)
|
||||
|
||||
## Runs AWX_DOCKER_CMD inside a new docker container.
|
||||
docker-runner:
|
||||
docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD)
|
||||
@@ -522,13 +530,15 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA) \
|
||||
-e enable_vault=$(VAULT) \
|
||||
-e vault_tls=$(VAULT_TLS) \
|
||||
-e enable_tacacs=$(TACACS) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
|
||||
-e enable_vault=$(VAULT);
|
||||
-e enable_vault=$(VAULT) \
|
||||
-e vault_tls=$(VAULT_TLS);
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||
|
||||
docker-compose-credential-plugins: awx/projects docker-compose-sources
|
||||
|
||||
@@ -1,450 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import re
|
||||
import json
|
||||
from functools import reduce
|
||||
|
||||
# Django
|
||||
from django.core.exceptions import FieldError, ValidationError, FieldDoesNotExist
|
||||
from django.db import models
|
||||
from django.db.models import Q, CharField, IntegerField, BooleanField, TextField, JSONField
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey
|
||||
from django.db.models.functions import Cast
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.contrib.contenttypes.fields import GenericForeignKey
|
||||
from django.utils.encoding import force_str
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import ParseError, PermissionDenied
|
||||
from rest_framework.filters import BaseFilterBackend
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import get_type_for_model, to_python_boolean
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
|
||||
|
||||
class TypeFilterBackend(BaseFilterBackend):
|
||||
"""
|
||||
Filter on type field now returned with all objects.
|
||||
"""
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
types = None
|
||||
for key, value in request.query_params.items():
|
||||
if key == 'type':
|
||||
if ',' in value:
|
||||
types = value.split(',')
|
||||
else:
|
||||
types = (value,)
|
||||
if types:
|
||||
types_map = {}
|
||||
for ct in ContentType.objects.filter(Q(app_label='main') | Q(app_label='auth', model='user')):
|
||||
ct_model = ct.model_class()
|
||||
if not ct_model:
|
||||
continue
|
||||
ct_type = get_type_for_model(ct_model)
|
||||
types_map[ct_type] = ct.pk
|
||||
model = queryset.model
|
||||
model_type = get_type_for_model(model)
|
||||
if 'polymorphic_ctype' in get_all_field_names(model):
|
||||
types_pks = set([v for k, v in types_map.items() if k in types])
|
||||
queryset = queryset.filter(polymorphic_ctype_id__in=types_pks)
|
||||
elif model_type in types:
|
||||
queryset = queryset
|
||||
else:
|
||||
queryset = queryset.none()
|
||||
return queryset
|
||||
except FieldError as e:
|
||||
# Return a 400 for invalid field names.
|
||||
raise ParseError(*e.args)
|
||||
|
||||
|
||||
def get_fields_from_path(model, path):
|
||||
"""
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the fields in the line, and also the revised lookup path
|
||||
ex., given
|
||||
model=Organization
|
||||
path='project__timeout'
|
||||
returns tuple of fields traversed as well and a corrected path,
|
||||
for special cases we do substitutions
|
||||
([<IntegerField for timeout>], 'project__timeout')
|
||||
"""
|
||||
# Store of all the fields used to detect repeats
|
||||
field_list = []
|
||||
new_parts = []
|
||||
for name in path.split('__'):
|
||||
if model is None:
|
||||
raise ParseError(_('No related model for field {}.').format(name))
|
||||
# HACK: Make project and inventory source filtering by old field names work for backwards compatibility.
|
||||
if model._meta.object_name in ('Project', 'InventorySource'):
|
||||
name = {'current_update': 'current_job', 'last_update': 'last_job', 'last_update_failed': 'last_job_failed', 'last_updated': 'last_job_run'}.get(
|
||||
name, name
|
||||
)
|
||||
|
||||
if name == 'type' and 'polymorphic_ctype' in get_all_field_names(model):
|
||||
name = 'polymorphic_ctype'
|
||||
new_parts.append('polymorphic_ctype__model')
|
||||
else:
|
||||
new_parts.append(name)
|
||||
|
||||
if name in getattr(model, 'PASSWORD_FIELDS', ()):
|
||||
raise PermissionDenied(_('Filtering on password fields is not allowed.'))
|
||||
elif name == 'pk':
|
||||
field = model._meta.pk
|
||||
else:
|
||||
name_alt = name.replace("_", "")
|
||||
if name_alt in model._meta.fields_map.keys():
|
||||
field = model._meta.fields_map[name_alt]
|
||||
new_parts.pop()
|
||||
new_parts.append(name_alt)
|
||||
else:
|
||||
field = model._meta.get_field(name)
|
||||
if isinstance(field, ForeignObjectRel) and getattr(field.field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
elif getattr(field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
if field in field_list:
|
||||
# Field traversed twice, could create infinite JOINs, DoSing Tower
|
||||
raise ParseError(_('Loops not allowed in filters, detected on field {}.').format(field.name))
|
||||
field_list.append(field)
|
||||
model = getattr(field, 'related_model', None)
|
||||
|
||||
return field_list, '__'.join(new_parts)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
"""
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and the revised lookup path
|
||||
ex.
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
"""
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
return (field_list[-1], new_path)
|
||||
|
||||
|
||||
class FieldLookupBackend(BaseFilterBackend):
|
||||
"""
|
||||
Filter using field lookups provided via query string parameters.
|
||||
"""
|
||||
|
||||
RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by', 'search', 'type', 'host_filter', 'count_disabled', 'no_truncate', 'limit')
|
||||
|
||||
SUPPORTED_LOOKUPS = (
|
||||
'exact',
|
||||
'iexact',
|
||||
'contains',
|
||||
'icontains',
|
||||
'startswith',
|
||||
'istartswith',
|
||||
'endswith',
|
||||
'iendswith',
|
||||
'regex',
|
||||
'iregex',
|
||||
'gt',
|
||||
'gte',
|
||||
'lt',
|
||||
'lte',
|
||||
'in',
|
||||
'isnull',
|
||||
'search',
|
||||
)
|
||||
|
||||
# A list of fields that we know can be filtered on without the possibility
|
||||
# of introducing duplicates
|
||||
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField, TextField)
|
||||
|
||||
def get_fields_from_lookup(self, model, lookup):
|
||||
if '__' in lookup and lookup.rsplit('__', 1)[-1] in self.SUPPORTED_LOOKUPS:
|
||||
path, suffix = lookup.rsplit('__', 1)
|
||||
else:
|
||||
path = lookup
|
||||
suffix = 'exact'
|
||||
|
||||
if not path:
|
||||
raise ParseError(_('Query string field name not provided.'))
|
||||
|
||||
# FIXME: Could build up a list of models used across relationships, use
|
||||
# those lookups combined with request.user.get_queryset(Model) to make
|
||||
# sure user cannot query using objects he could not view.
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
|
||||
new_lookup = new_path
|
||||
new_lookup = '__'.join([new_path, suffix])
|
||||
return field_list, new_lookup
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
'''Method to match return type of single field, if needed.'''
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
return (field_list[-1], new_lookup)
|
||||
|
||||
def to_python_related(self, value):
|
||||
value = force_str(value)
|
||||
if value.lower() in ('none', 'null'):
|
||||
return None
|
||||
else:
|
||||
return int(value)
|
||||
|
||||
def value_to_python_for_field(self, field, value):
|
||||
if isinstance(field, models.BooleanField):
|
||||
return to_python_boolean(value)
|
||||
elif isinstance(field, (ForeignObjectRel, ManyToManyField, GenericForeignKey, ForeignKey)):
|
||||
try:
|
||||
return self.to_python_related(value)
|
||||
except ValueError:
|
||||
raise ParseError(_('Invalid {field_name} id: {field_id}').format(field_name=getattr(field, 'name', 'related field'), field_id=value))
|
||||
else:
|
||||
return field.to_python(value)
|
||||
|
||||
def value_to_python(self, model, lookup, value):
|
||||
try:
|
||||
lookup.encode("ascii")
|
||||
except UnicodeEncodeError:
|
||||
raise ValueError("%r is not an allowed field name. Must be ascii encodable." % lookup)
|
||||
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
field = field_list[-1]
|
||||
|
||||
needs_distinct = not all(isinstance(f, self.NO_DUPLICATES_ALLOW_LIST) for f in field_list)
|
||||
|
||||
# Type names are stored without underscores internally, but are presented and
|
||||
# and serialized over the API containing underscores so we remove `_`
|
||||
# for polymorphic_ctype__model lookups.
|
||||
if new_lookup.startswith('polymorphic_ctype__model'):
|
||||
value = value.replace('_', '')
|
||||
elif new_lookup.endswith('__isnull'):
|
||||
value = to_python_boolean(value)
|
||||
elif new_lookup.endswith('__in'):
|
||||
items = []
|
||||
if not value:
|
||||
raise ValueError('cannot provide empty value for __in')
|
||||
for item in value.split(','):
|
||||
items.append(self.value_to_python_for_field(field, item))
|
||||
value = items
|
||||
elif new_lookup.endswith('__regex') or new_lookup.endswith('__iregex'):
|
||||
try:
|
||||
re.compile(value)
|
||||
except re.error as e:
|
||||
raise ValueError(e.args[0])
|
||||
elif new_lookup.endswith('__iexact'):
|
||||
if not isinstance(field, (CharField, TextField)):
|
||||
raise ValueError(f'{field.name} is not a text field and cannot be filtered by case-insensitive search')
|
||||
elif new_lookup.endswith('__search'):
|
||||
related_model = getattr(field, 'related_model', None)
|
||||
if not related_model:
|
||||
raise ValueError('%s is not searchable' % new_lookup[:-8])
|
||||
new_lookups = []
|
||||
for rm_field in related_model._meta.fields:
|
||||
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description', 'playbook'):
|
||||
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
|
||||
return value, new_lookups, needs_distinct
|
||||
else:
|
||||
if isinstance(field, JSONField):
|
||||
new_lookup = new_lookup.replace(field.name, f'{field.name}_as_txt')
|
||||
value = self.value_to_python_for_field(field, value)
|
||||
return value, new_lookup, needs_distinct
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
# Apply filters specified via query_params. Each entry in the lists
|
||||
# below is (negate, field, value).
|
||||
and_filters = []
|
||||
or_filters = []
|
||||
chain_filters = []
|
||||
role_filters = []
|
||||
search_filters = {}
|
||||
needs_distinct = False
|
||||
# Can only have two values: 'AND', 'OR'
|
||||
# If 'AND' is used, an item must satisfy all conditions to show up in the results.
|
||||
# If 'OR' is used, an item just needs to satisfy one condition to appear in results.
|
||||
search_filter_relation = 'OR'
|
||||
for key, values in request.query_params.lists():
|
||||
if key in self.RESERVED_NAMES:
|
||||
continue
|
||||
|
||||
# HACK: make `created` available via API for the Django User ORM model
|
||||
# so it keep compatibility with other objects which exposes the `created` attr.
|
||||
if queryset.model._meta.object_name == 'User' and key.startswith('created'):
|
||||
key = key.replace('created', 'date_joined')
|
||||
|
||||
# HACK: Make job event filtering by host name mostly work even
|
||||
# when not capturing job event hosts M2M.
|
||||
if queryset.model._meta.object_name == 'JobEvent' and key.startswith('hosts__name'):
|
||||
key = key.replace('hosts__name', 'or__host__name')
|
||||
or_filters.append((False, 'host__name__isnull', True))
|
||||
|
||||
# Custom __int filter suffix (internal use only).
|
||||
q_int = False
|
||||
if key.endswith('__int'):
|
||||
key = key[:-5]
|
||||
q_int = True
|
||||
|
||||
# RBAC filtering
|
||||
if key == 'role_level':
|
||||
role_filters.append(values[0])
|
||||
continue
|
||||
|
||||
# Search across related objects.
|
||||
if key.endswith('__search'):
|
||||
if values and ',' in values[0]:
|
||||
search_filter_relation = 'AND'
|
||||
values = reduce(lambda list1, list2: list1 + list2, [i.split(',') for i in values])
|
||||
for value in values:
|
||||
search_value, new_keys, _ = self.value_to_python(queryset.model, key, force_str(value))
|
||||
assert isinstance(new_keys, list)
|
||||
search_filters[search_value] = new_keys
|
||||
# by definition, search *only* joins across relations,
|
||||
# so it _always_ needs a .distinct()
|
||||
needs_distinct = True
|
||||
continue
|
||||
|
||||
# Custom chain__ and or__ filters, mutually exclusive (both can
|
||||
# precede not__).
|
||||
q_chain = False
|
||||
q_or = False
|
||||
if key.startswith('chain__'):
|
||||
key = key[7:]
|
||||
q_chain = True
|
||||
elif key.startswith('or__'):
|
||||
key = key[4:]
|
||||
q_or = True
|
||||
|
||||
# Custom not__ filter prefix.
|
||||
q_not = False
|
||||
if key.startswith('not__'):
|
||||
key = key[5:]
|
||||
q_not = True
|
||||
|
||||
# Convert value(s) to python and add to the appropriate list.
|
||||
for value in values:
|
||||
if q_int:
|
||||
value = int(value)
|
||||
value, new_key, distinct = self.value_to_python(queryset.model, key, value)
|
||||
if distinct:
|
||||
needs_distinct = True
|
||||
if '_as_txt' in new_key:
|
||||
fname = next(item for item in new_key.split('__') if item.endswith('_as_txt'))
|
||||
queryset = queryset.annotate(**{fname: Cast(fname[:-7], output_field=TextField())})
|
||||
if q_chain:
|
||||
chain_filters.append((q_not, new_key, value))
|
||||
elif q_or:
|
||||
or_filters.append((q_not, new_key, value))
|
||||
else:
|
||||
and_filters.append((q_not, new_key, value))
|
||||
|
||||
# Now build Q objects for database query filter.
|
||||
if and_filters or or_filters or chain_filters or role_filters or search_filters:
|
||||
args = []
|
||||
for n, k, v in and_filters:
|
||||
if n:
|
||||
args.append(~Q(**{k: v}))
|
||||
else:
|
||||
args.append(Q(**{k: v}))
|
||||
for role_name in role_filters:
|
||||
if not hasattr(queryset.model, 'accessible_pk_qs'):
|
||||
raise ParseError(_('Cannot apply role_level filter to this list because its model does not use roles for access control.'))
|
||||
args.append(Q(pk__in=queryset.model.accessible_pk_qs(request.user, role_name)))
|
||||
if or_filters:
|
||||
q = Q()
|
||||
for n, k, v in or_filters:
|
||||
if n:
|
||||
q |= ~Q(**{k: v})
|
||||
else:
|
||||
q |= Q(**{k: v})
|
||||
args.append(q)
|
||||
if search_filters and search_filter_relation == 'OR':
|
||||
q = Q()
|
||||
for term, constrains in search_filters.items():
|
||||
for constrain in constrains:
|
||||
q |= Q(**{constrain: term})
|
||||
args.append(q)
|
||||
elif search_filters and search_filter_relation == 'AND':
|
||||
for term, constrains in search_filters.items():
|
||||
q_chain = Q()
|
||||
for constrain in constrains:
|
||||
q_chain |= Q(**{constrain: term})
|
||||
queryset = queryset.filter(q_chain)
|
||||
for n, k, v in chain_filters:
|
||||
if n:
|
||||
q = ~Q(**{k: v})
|
||||
else:
|
||||
q = Q(**{k: v})
|
||||
queryset = queryset.filter(q)
|
||||
queryset = queryset.filter(*args)
|
||||
if needs_distinct:
|
||||
queryset = queryset.distinct()
|
||||
return queryset
|
||||
except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e:
|
||||
raise ParseError(e.args[0])
|
||||
except ValidationError as e:
|
||||
raise ParseError(json.dumps(e.messages, ensure_ascii=False))
|
||||
|
||||
|
||||
class OrderByBackend(BaseFilterBackend):
|
||||
"""
|
||||
Filter to apply ordering based on query string parameters.
|
||||
"""
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
order_by = None
|
||||
for key, value in request.query_params.items():
|
||||
if key in ('order', 'order_by'):
|
||||
order_by = value
|
||||
if ',' in value:
|
||||
order_by = value.split(',')
|
||||
else:
|
||||
order_by = (value,)
|
||||
default_order_by = self.get_default_ordering(view)
|
||||
# glue the order by and default order by together so that the default is the backup option
|
||||
order_by = list(order_by or []) + list(default_order_by or [])
|
||||
if order_by:
|
||||
order_by = self._validate_ordering_fields(queryset.model, order_by)
|
||||
# Special handling of the type field for ordering. In this
|
||||
# case, we're not sorting exactly on the type field, but
|
||||
# given the limited number of views with multiple types,
|
||||
# sorting on polymorphic_ctype.model is effectively the same.
|
||||
new_order_by = []
|
||||
if 'polymorphic_ctype' in get_all_field_names(queryset.model):
|
||||
for field in order_by:
|
||||
if field == 'type':
|
||||
new_order_by.append('polymorphic_ctype__model')
|
||||
elif field == '-type':
|
||||
new_order_by.append('-polymorphic_ctype__model')
|
||||
else:
|
||||
new_order_by.append(field)
|
||||
else:
|
||||
for field in order_by:
|
||||
if field not in ('type', '-type'):
|
||||
new_order_by.append(field)
|
||||
queryset = queryset.order_by(*new_order_by)
|
||||
return queryset
|
||||
except FieldError as e:
|
||||
# Return a 400 for invalid field names.
|
||||
raise ParseError(*e.args)
|
||||
|
||||
def get_default_ordering(self, view):
|
||||
ordering = getattr(view, 'ordering', None)
|
||||
if isinstance(ordering, str):
|
||||
return (ordering,)
|
||||
return ordering
|
||||
|
||||
def _validate_ordering_fields(self, model, order_by):
|
||||
for field_name in order_by:
|
||||
# strip off the negation prefix `-` if it exists
|
||||
prefix = ''
|
||||
path = field_name
|
||||
if field_name[0] == '-':
|
||||
prefix = field_name[0]
|
||||
path = field_name[1:]
|
||||
try:
|
||||
field, new_path = get_field_from_path(model, path)
|
||||
new_path = '{}{}'.format(prefix, new_path)
|
||||
except (FieldError, FieldDoesNotExist) as e:
|
||||
raise ParseError(e.args[0])
|
||||
yield new_path
|
||||
@@ -30,12 +30,13 @@ from rest_framework.permissions import IsAuthenticated
|
||||
from rest_framework.renderers import StaticHTMLRenderer
|
||||
from rest_framework.negotiation import DefaultContentNegotiation
|
||||
|
||||
from ansible_base.filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
||||
from ansible_base.utils.models import get_all_field_names
|
||||
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
||||
from awx.main.access import optimize_queryset
|
||||
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
from awx.main.views import ApiErrorView
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer
|
||||
|
||||
@@ -43,6 +43,8 @@ from rest_framework.utils.serializer_helpers import ReturnList
|
||||
# Django-Polymorphic
|
||||
from polymorphic.models import PolymorphicModel
|
||||
|
||||
from ansible_base.utils.models import get_type_for_model
|
||||
|
||||
# AWX
|
||||
from awx.main.access import get_user_capabilities
|
||||
from awx.main.constants import ACTIVE_STATES, CENSOR_VALUE
|
||||
@@ -99,10 +101,9 @@ from awx.main.models import (
|
||||
CLOUD_INVENTORY_SOURCES,
|
||||
)
|
||||
from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES
|
||||
from awx.main.models.rbac import get_roles_on_resource, role_summary_fields_generator
|
||||
from awx.main.models.rbac import role_summary_fields_generator, RoleAncestorEntry
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.utils import (
|
||||
get_type_for_model,
|
||||
get_model_for_type,
|
||||
camelcase_to_underscore,
|
||||
getattrd,
|
||||
@@ -2201,6 +2202,99 @@ class BulkHostCreateSerializer(serializers.Serializer):
|
||||
return return_data
|
||||
|
||||
|
||||
class BulkHostDeleteSerializer(serializers.Serializer):
|
||||
hosts = serializers.ListField(
|
||||
allow_empty=False,
|
||||
max_length=100000,
|
||||
write_only=True,
|
||||
help_text=_('List of hosts ids to be deleted, e.g. [105, 130, 131, 200]'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Host
|
||||
fields = ('hosts',)
|
||||
|
||||
def validate(self, attrs):
|
||||
request = self.context.get('request', None)
|
||||
max_hosts = settings.BULK_HOST_MAX_DELETE
|
||||
# Validating the number of hosts to be deleted
|
||||
if len(attrs['hosts']) > max_hosts:
|
||||
raise serializers.ValidationError(
|
||||
{
|
||||
"ERROR": 'Number of hosts exceeds system setting BULK_HOST_MAX_DELETE',
|
||||
"BULK_HOST_MAX_DELETE": max_hosts,
|
||||
"Hosts_count": len(attrs['hosts']),
|
||||
}
|
||||
)
|
||||
|
||||
# Getting list of all host objects, filtered by the list of the hosts to delete
|
||||
attrs['host_qs'] = Host.objects.get_queryset().filter(pk__in=attrs['hosts']).only('id', 'inventory_id', 'name')
|
||||
|
||||
# Converting the queryset data in a dict. to reduce the number of queries when
|
||||
# manipulating the data
|
||||
attrs['hosts_data'] = attrs['host_qs'].values()
|
||||
|
||||
if len(attrs['host_qs']) == 0:
|
||||
error_hosts = {host: "Hosts do not exist or you lack permission to delete it" for host in attrs['hosts']}
|
||||
raise serializers.ValidationError({'hosts': error_hosts})
|
||||
|
||||
if len(attrs['host_qs']) < len(attrs['hosts']):
|
||||
hosts_exists = [host['id'] for host in attrs['hosts_data']]
|
||||
failed_hosts = list(set(attrs['hosts']).difference(hosts_exists))
|
||||
error_hosts = {host: "Hosts do not exist or you lack permission to delete it" for host in failed_hosts}
|
||||
raise serializers.ValidationError({'hosts': error_hosts})
|
||||
|
||||
# Getting all inventories that the hosts can be in
|
||||
inv_list = list(set([host['inventory_id'] for host in attrs['hosts_data']]))
|
||||
|
||||
# Checking that the user have permission to all inventories
|
||||
errors = dict()
|
||||
for inv in Inventory.objects.get_queryset().filter(pk__in=inv_list):
|
||||
if request and not request.user.is_superuser:
|
||||
if request.user not in inv.admin_role:
|
||||
errors[inv.name] = "Lack permissions to delete hosts from this inventory."
|
||||
if errors != {}:
|
||||
raise PermissionDenied({"inventories": errors})
|
||||
|
||||
# check the inventory type only if the user have permission to it.
|
||||
errors = dict()
|
||||
for inv in Inventory.objects.get_queryset().filter(pk__in=inv_list):
|
||||
if inv.kind != '':
|
||||
errors[inv.name] = "Hosts can only be deleted from manual inventories."
|
||||
if errors != {}:
|
||||
raise serializers.ValidationError({"inventories": errors})
|
||||
attrs['inventories'] = inv_list
|
||||
return attrs
|
||||
|
||||
def delete(self, validated_data):
|
||||
result = {"hosts": dict()}
|
||||
changes = {'deleted_hosts': dict()}
|
||||
for inventory in validated_data['inventories']:
|
||||
changes['deleted_hosts'][inventory] = list()
|
||||
|
||||
for host in validated_data['hosts_data']:
|
||||
result["hosts"][host["id"]] = f"The host {host['name']} was deleted"
|
||||
changes['deleted_hosts'][host["inventory_id"]].append({"host_id": host["id"], "host_name": host["name"]})
|
||||
|
||||
try:
|
||||
validated_data['host_qs'].delete()
|
||||
except Exception as e:
|
||||
raise serializers.ValidationError({"detail": _(f"cannot delete hosts, host deletion error {e}")})
|
||||
|
||||
request = self.context.get('request', None)
|
||||
|
||||
for inventory in validated_data['inventories']:
|
||||
activity_entry = ActivityStream.objects.create(
|
||||
operation='update',
|
||||
object1='inventory',
|
||||
changes=json.dumps(changes['deleted_hosts'][inventory]),
|
||||
actor=request.user,
|
||||
)
|
||||
activity_entry.inventory.add(inventory)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class GroupTreeSerializer(GroupSerializer):
|
||||
children = serializers.SerializerMethodField()
|
||||
|
||||
@@ -2664,6 +2758,17 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
||||
if 'summary_fields' not in ret:
|
||||
ret['summary_fields'] = {}
|
||||
|
||||
team_content_type = ContentType.objects.get_for_model(Team)
|
||||
content_type = ContentType.objects.get_for_model(obj)
|
||||
|
||||
def get_roles_on_resource(parent_role):
|
||||
"Returns a string list of the roles a parent_role has for current obj."
|
||||
return list(
|
||||
RoleAncestorEntry.objects.filter(ancestor=parent_role, content_type_id=content_type.id, object_id=obj.id)
|
||||
.values_list('role_field', flat=True)
|
||||
.distinct()
|
||||
)
|
||||
|
||||
def format_role_perm(role):
|
||||
role_dict = {'id': role.id, 'name': role.name, 'description': role.description}
|
||||
try:
|
||||
@@ -2679,7 +2784,7 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
||||
else:
|
||||
# Singleton roles should not be managed from this view, as per copy/edit rework spec
|
||||
role_dict['user_capabilities'] = {'unattach': False}
|
||||
return {'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)}
|
||||
return {'role': role_dict, 'descendant_roles': get_roles_on_resource(role)}
|
||||
|
||||
def format_team_role_perm(naive_team_role, permissive_role_ids):
|
||||
ret = []
|
||||
@@ -2705,12 +2810,9 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
||||
else:
|
||||
# Singleton roles should not be managed from this view, as per copy/edit rework spec
|
||||
role_dict['user_capabilities'] = {'unattach': False}
|
||||
ret.append({'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)})
|
||||
ret.append({'role': role_dict, 'descendant_roles': get_roles_on_resource(team_role)})
|
||||
return ret
|
||||
|
||||
team_content_type = ContentType.objects.get_for_model(Team)
|
||||
content_type = ContentType.objects.get_for_model(obj)
|
||||
|
||||
direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True)
|
||||
all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True)
|
||||
|
||||
|
||||
22
awx/api/templates/api/bulk_host_delete_view.md
Normal file
22
awx/api/templates/api/bulk_host_delete_view.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Bulk Host Delete
|
||||
|
||||
This endpoint allows the client to delete multiple hosts from inventories.
|
||||
They may do this by providing a list of hosts ID's to be deleted.
|
||||
|
||||
Example:
|
||||
|
||||
{
|
||||
"hosts": [1, 2, 3, 4, 5]
|
||||
}
|
||||
|
||||
Return data:
|
||||
|
||||
{
|
||||
"hosts": {
|
||||
"1": "The host a1 was deleted",
|
||||
"2": "The host a2 was deleted",
|
||||
"3": "The host a3 was deleted",
|
||||
"4": "The host a4 was deleted",
|
||||
"5": "The host a5 was deleted",
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
collections:
|
||||
- name: ansible.receptor
|
||||
version: 2.0.0
|
||||
version: 2.0.2
|
||||
|
||||
@@ -36,6 +36,7 @@ from awx.api.views import (
|
||||
from awx.api.views.bulk import (
|
||||
BulkView,
|
||||
BulkHostCreateView,
|
||||
BulkHostDeleteView,
|
||||
BulkJobLaunchView,
|
||||
)
|
||||
|
||||
@@ -152,6 +153,7 @@ v2_urls = [
|
||||
re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
|
||||
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
|
||||
re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'),
|
||||
re_path(r'^bulk/host_delete/$', BulkHostDeleteView.as_view(), name='bulk_host_delete'),
|
||||
re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'),
|
||||
]
|
||||
|
||||
|
||||
@@ -128,6 +128,10 @@ logger = logging.getLogger('awx.api.views')
|
||||
|
||||
|
||||
def unpartitioned_event_horizon(cls):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT 1 FROM INFORMATION_SCHEMA.TABLES WHERE table_name = '_unpartitioned_{cls._meta.db_table}';")
|
||||
if not cursor.fetchone():
|
||||
return 0
|
||||
with connection.cursor() as cursor:
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _unpartitioned_{cls._meta.db_table}')
|
||||
@@ -738,8 +742,8 @@ class TeamActivityStreamList(SubListAPIView):
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(
|
||||
Q(team=parent)
|
||||
| Q(project__in=models.Project.accessible_objects(parent, 'read_role'))
|
||||
| Q(credential__in=models.Credential.accessible_objects(parent, 'read_role'))
|
||||
| Q(project__in=models.Project.accessible_objects(parent.member_role, 'read_role'))
|
||||
| Q(credential__in=models.Credential.accessible_objects(parent.member_role, 'read_role'))
|
||||
)
|
||||
|
||||
|
||||
@@ -1393,7 +1397,7 @@ class OrganizationCredentialList(SubListCreateAPIView):
|
||||
self.check_parent_access(organization)
|
||||
|
||||
user_visible = models.Credential.accessible_objects(self.request.user, 'read_role').all()
|
||||
org_set = models.Credential.accessible_objects(organization.admin_role, 'read_role').all()
|
||||
org_set = models.Credential.objects.filter(organization=organization)
|
||||
|
||||
if self.request.user.is_superuser or self.request.user.is_system_auditor:
|
||||
return org_set
|
||||
|
||||
@@ -34,6 +34,7 @@ class BulkView(APIView):
|
||||
'''List top level resources'''
|
||||
data = OrderedDict()
|
||||
data['host_create'] = reverse('api:bulk_host_create', request=request)
|
||||
data['host_delete'] = reverse('api:bulk_host_delete', request=request)
|
||||
data['job_launch'] = reverse('api:bulk_job_launch', request=request)
|
||||
return Response(data)
|
||||
|
||||
@@ -72,3 +73,20 @@ class BulkHostCreateView(GenericAPIView):
|
||||
result = serializer.create(serializer.validated_data)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class BulkHostDeleteView(GenericAPIView):
|
||||
permission_classes = [IsAuthenticated]
|
||||
model = Host
|
||||
serializer_class = serializers.BulkHostDeleteSerializer
|
||||
allowed_methods = ['GET', 'POST', 'OPTIONS']
|
||||
|
||||
def get(self, request):
|
||||
return Response({"detail": "Bulk delete hosts with this endpoint"}, status=status.HTTP_200_OK)
|
||||
|
||||
def post(self, request):
|
||||
serializer = serializers.BulkHostDeleteSerializer(data=request.data, context={'request': request})
|
||||
if serializer.is_valid():
|
||||
result = serializer.delete(serializer.validated_data)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
@@ -7,8 +7,10 @@ import json
|
||||
# Django
|
||||
from django.db import models
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import CreatedModifiedModel, prevent_search
|
||||
from awx.main.models.base import CreatedModifiedModel
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.conf import settings_registry
|
||||
|
||||
|
||||
@@ -20,11 +20,12 @@ from rest_framework.exceptions import ParseError, PermissionDenied
|
||||
# Django OAuth Toolkit
|
||||
from awx.main.models.oauth import OAuth2Application, OAuth2AccessToken
|
||||
|
||||
from ansible_base.utils.validation import to_python_boolean
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import (
|
||||
get_object_or_400,
|
||||
get_pk_from_dict,
|
||||
to_python_boolean,
|
||||
get_licenser,
|
||||
)
|
||||
from awx.main.models import (
|
||||
@@ -79,7 +80,6 @@ __all__ = [
|
||||
'get_user_queryset',
|
||||
'check_user_access',
|
||||
'check_user_access_with_errors',
|
||||
'user_accessible_objects',
|
||||
'consumer_access',
|
||||
]
|
||||
|
||||
@@ -136,10 +136,6 @@ def register_access(model_class, access_class):
|
||||
access_registry[model_class] = access_class
|
||||
|
||||
|
||||
def user_accessible_objects(user, role_name):
|
||||
return ResourceMixin._accessible_objects(User, user, role_name)
|
||||
|
||||
|
||||
def get_user_queryset(user, model_class):
|
||||
"""
|
||||
Return a queryset for the given model_class containing only the instances
|
||||
|
||||
@@ -694,16 +694,18 @@ register(
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_MAX_DISK_USAGE_GB',
|
||||
'LOG_AGGREGATOR_ACTION_QUEUE_SIZE',
|
||||
field_class=fields.IntegerField,
|
||||
default=1,
|
||||
default=131072,
|
||||
min_value=1,
|
||||
label=_('Maximum disk persistence for external log aggregation (in GB)'),
|
||||
label=_('Maximum number of messages that can be stored in the log action queue'),
|
||||
help_text=_(
|
||||
'Amount of data to store (in gigabytes) during an outage of '
|
||||
'the external log aggregator (defaults to 1). '
|
||||
'Equivalent to the rsyslogd queue.maxdiskspace setting for main_queue. '
|
||||
'Notably, this is used for the rsyslogd main queue (for input messages).'
|
||||
'Defines how large the rsyslog action queue can grow in number of messages '
|
||||
'stored. This can have an impact on memory utilization. When the queue '
|
||||
'reaches 75% of this number, the queue will start writing to disk '
|
||||
'(queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and '
|
||||
'DEBUG messages will start to be discarded (queue.discardMark with '
|
||||
'queue.discardSeverity=5).'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
@@ -718,8 +720,7 @@ register(
|
||||
'Amount of data to store (in gigabytes) if an rsyslog action takes time '
|
||||
'to process an incoming message (defaults to 1). '
|
||||
'Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). '
|
||||
'Like LOG_AGGREGATOR_MAX_DISK_USAGE_GB, it stores files in the directory specified '
|
||||
'by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.'
|
||||
'It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
@@ -826,6 +827,16 @@ register(
|
||||
category_slug='bulk',
|
||||
)
|
||||
|
||||
register(
|
||||
'BULK_HOST_MAX_DELETE',
|
||||
field_class=fields.IntegerField,
|
||||
default=250,
|
||||
label=_('Max number of hosts to allow to be deleted in a single bulk action'),
|
||||
help_text=_('Max number of hosts to allow to be deleted in a single bulk action'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
)
|
||||
|
||||
register(
|
||||
'UI_NEXT',
|
||||
field_class=fields.BooleanField,
|
||||
|
||||
@@ -2,29 +2,29 @@ from .plugin import CredentialPlugin
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
try:
|
||||
from delinea.secrets.vault import SecretsVault
|
||||
except ImportError:
|
||||
from thycotic.secrets.vault import SecretsVault
|
||||
|
||||
from delinea.secrets.vault import PasswordGrantAuthorizer, SecretsVault
|
||||
from base64 import b64decode
|
||||
|
||||
dsv_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretservercloud.com'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretsvaultcloud.com'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'tld',
|
||||
'label': _('Top-level Domain (TLD)'),
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretservercloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'com.sg', 'eu'],
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretsvaultcloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'eu'],
|
||||
'default': 'com',
|
||||
},
|
||||
{'id': 'client_id', 'label': _('Client ID'), 'type': 'string'},
|
||||
{
|
||||
'id': 'client_id',
|
||||
'label': _('Client ID'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'client_secret',
|
||||
'label': _('Client Secret'),
|
||||
@@ -45,8 +45,16 @@ dsv_inputs = {
|
||||
'help_text': _('The field to extract from the secret'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'secret_decoding',
|
||||
'label': _('Should the secret be base64 decoded?'),
|
||||
'help_text': _('Specify whether the secret should be base64 decoded, typically used for storing files, such as SSH keys'),
|
||||
'choices': ['No Decoding', 'Decode Base64'],
|
||||
'type': 'string',
|
||||
'default': 'No Decoding',
|
||||
},
|
||||
],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field'],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field', 'secret_decoding'],
|
||||
}
|
||||
|
||||
if settings.DEBUG:
|
||||
@@ -55,12 +63,32 @@ if settings.DEBUG:
|
||||
'id': 'url_template',
|
||||
'label': _('URL template'),
|
||||
'type': 'string',
|
||||
'default': 'https://{}.secretsvaultcloud.{}/v1',
|
||||
'default': 'https://{}.secretsvaultcloud.{}',
|
||||
}
|
||||
)
|
||||
|
||||
dsv_plugin = CredentialPlugin(
|
||||
'Thycotic DevOps Secrets Vault',
|
||||
dsv_inputs,
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path'])['data'][kwargs['secret_field']], # fmt: skip
|
||||
)
|
||||
|
||||
def dsv_backend(**kwargs):
|
||||
tenant_name = kwargs['tenant']
|
||||
tenant_tld = kwargs.get('tld', 'com')
|
||||
tenant_url_template = kwargs.get('url_template', 'https://{}.secretsvaultcloud.{}')
|
||||
client_id = kwargs['client_id']
|
||||
client_secret = kwargs['client_secret']
|
||||
secret_path = kwargs['path']
|
||||
secret_field = kwargs['secret_field']
|
||||
# providing a default value to remain backward compatible for secrets that have not specified this option
|
||||
secret_decoding = kwargs.get('secret_decoding', 'No Decoding')
|
||||
|
||||
tenant_url = tenant_url_template.format(tenant_name, tenant_tld.strip("."))
|
||||
|
||||
authorizer = PasswordGrantAuthorizer(tenant_url, client_id, client_secret)
|
||||
dsv_secret = SecretsVault(tenant_url, authorizer).get_secret(secret_path)
|
||||
|
||||
# files can be uploaded base64 decoded to DSV and thus decoding it only, when asked for
|
||||
if secret_decoding == 'Decode Base64':
|
||||
return b64decode(dsv_secret['data'][secret_field]).decode()
|
||||
|
||||
return dsv_secret['data'][secret_field]
|
||||
|
||||
|
||||
dsv_plugin = CredentialPlugin(name='Thycotic DevOps Secrets Vault', inputs=dsv_inputs, backend=dsv_backend)
|
||||
|
||||
@@ -41,6 +41,34 @@ base_inputs = {
|
||||
'secret': True,
|
||||
'help_text': _('The Secret ID for AppRole Authentication'),
|
||||
},
|
||||
{
|
||||
'id': 'client_cert_public',
|
||||
'label': _('Client Certificate'),
|
||||
'type': 'string',
|
||||
'multiline': True,
|
||||
'help_text': _(
|
||||
'The PEM-encoded client certificate used for TLS client authentication.'
|
||||
' This should include the certificate and any intermediate certififcates.'
|
||||
),
|
||||
},
|
||||
{
|
||||
'id': 'client_cert_private',
|
||||
'label': _('Client Certificate Key'),
|
||||
'type': 'string',
|
||||
'multiline': True,
|
||||
'secret': True,
|
||||
'help_text': _('The certificate private key used for TLS client authentication.'),
|
||||
},
|
||||
{
|
||||
'id': 'client_cert_role',
|
||||
'label': _('TLS Authentication Role'),
|
||||
'type': 'string',
|
||||
'multiline': False,
|
||||
'help_text': _(
|
||||
'The role configured in Hashicorp Vault for TLS client authentication.'
|
||||
' If not provided, Hashicorp Vault may assign roles based on the certificate used.'
|
||||
),
|
||||
},
|
||||
{
|
||||
'id': 'namespace',
|
||||
'label': _('Namespace name (Vault Enterprise only)'),
|
||||
@@ -164,8 +192,10 @@ def handle_auth(**kwargs):
|
||||
token = method_auth(**kwargs, auth_param=approle_auth(**kwargs))
|
||||
elif kwargs.get('kubernetes_role'):
|
||||
token = method_auth(**kwargs, auth_param=kubernetes_auth(**kwargs))
|
||||
elif kwargs.get('client_cert_public') and kwargs.get('client_cert_private'):
|
||||
token = method_auth(**kwargs, auth_param=client_cert_auth(**kwargs))
|
||||
else:
|
||||
raise Exception('Either token or AppRole/Kubernetes authentication parameters must be set')
|
||||
raise Exception('Either a token or AppRole, Kubernetes, or TLS authentication parameters must be set')
|
||||
|
||||
return token
|
||||
|
||||
@@ -181,6 +211,10 @@ def kubernetes_auth(**kwargs):
|
||||
return {'role': kwargs['kubernetes_role'], 'jwt': jwt}
|
||||
|
||||
|
||||
def client_cert_auth(**kwargs):
|
||||
return {'name': kwargs.get('client_cert_role')}
|
||||
|
||||
|
||||
def method_auth(**kwargs):
|
||||
# get auth method specific params
|
||||
request_kwargs = {'json': kwargs['auth_param'], 'timeout': 30}
|
||||
@@ -193,13 +227,22 @@ def method_auth(**kwargs):
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
sess = requests.Session()
|
||||
|
||||
# Namespace support
|
||||
if kwargs.get('namespace'):
|
||||
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
|
||||
request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/')
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
# TLS client certificate support
|
||||
if kwargs.get('client_cert_public') and kwargs.get('client_cert_private'):
|
||||
# Add client cert to requests Session before making call
|
||||
with CertFiles(kwargs['client_cert_public'], key=kwargs['client_cert_private']) as client_cert:
|
||||
sess.cert = client_cert
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
else:
|
||||
# Make call without client certificate
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
resp.raise_for_status()
|
||||
token = resp.json()['auth']['client_token']
|
||||
return token
|
||||
|
||||
@@ -37,8 +37,11 @@ class Control(object):
|
||||
def running(self, *args, **kwargs):
|
||||
return self.control_with_reply('running', *args, **kwargs)
|
||||
|
||||
def cancel(self, task_ids, *args, **kwargs):
|
||||
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
|
||||
def cancel(self, task_ids, with_reply=True):
|
||||
if with_reply:
|
||||
return self.control_with_reply('cancel', extra_data={'task_ids': task_ids})
|
||||
else:
|
||||
self.control({'control': 'cancel', 'task_ids': task_ids, 'reply_to': None}, extra_data={'task_ids': task_ids})
|
||||
|
||||
def schedule(self, *args, **kwargs):
|
||||
return self.control_with_reply('schedule', *args, **kwargs)
|
||||
|
||||
@@ -89,8 +89,9 @@ class AWXConsumerBase(object):
|
||||
if task_ids and not msg:
|
||||
logger.info(f'Could not locate running tasks to cancel with ids={task_ids}')
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
if reply_queue is not None:
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
elif control == 'reload':
|
||||
for worker in self.pool.workers:
|
||||
worker.quit()
|
||||
|
||||
@@ -9,6 +9,7 @@ from django.conf import settings
|
||||
# AWX
|
||||
import awx.main.fields
|
||||
from awx.main.models import Host
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def replaces():
|
||||
@@ -131,9 +132,11 @@ class Migration(migrations.Migration):
|
||||
help_text='If enabled, Tower will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.',
|
||||
),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
sql="CREATE INDEX host_ansible_facts_default_gin ON {} USING gin(ansible_facts jsonb_path_ops);".format(Host._meta.db_table),
|
||||
reverse_sql='DROP INDEX host_ansible_facts_default_gin;',
|
||||
sqlite_sql=dbawaremigrations.RunSQL.noop,
|
||||
sqlite_reverse_sql=dbawaremigrations.RunSQL.noop,
|
||||
),
|
||||
# SCM file-based inventories
|
||||
migrations.AddField(
|
||||
|
||||
@@ -3,24 +3,27 @@ from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
tables_to_drop = [
|
||||
'celery_taskmeta',
|
||||
'celery_tasksetmeta',
|
||||
'djcelery_crontabschedule',
|
||||
'djcelery_intervalschedule',
|
||||
'djcelery_periodictask',
|
||||
'djcelery_periodictasks',
|
||||
'djcelery_taskstate',
|
||||
'djcelery_workerstate',
|
||||
'djkombu_message',
|
||||
'djkombu_queue',
|
||||
]
|
||||
postgres_sql = ([("DROP TABLE IF EXISTS {} CASCADE;".format(table))] for table in tables_to_drop)
|
||||
sqlite_sql = ([("DROP TABLE IF EXISTS {};".format(table))] for table in tables_to_drop)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0049_v330_validate_instance_capacity_adjustment'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunSQL([("DROP TABLE IF EXISTS {} CASCADE;".format(table))])
|
||||
for table in (
|
||||
'celery_taskmeta',
|
||||
'celery_tasksetmeta',
|
||||
'djcelery_crontabschedule',
|
||||
'djcelery_intervalschedule',
|
||||
'djcelery_periodictask',
|
||||
'djcelery_periodictasks',
|
||||
'djcelery_taskstate',
|
||||
'djcelery_workerstate',
|
||||
'djkombu_message',
|
||||
'djkombu_queue',
|
||||
)
|
||||
]
|
||||
operations = [dbawaremigrations.RunSQL(p, sqlite_sql=s) for p, s in zip(postgres_sql, sqlite_sql)]
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/6010
|
||||
@@ -24,6 +26,11 @@ def migrate_event_data(apps, schema_editor):
|
||||
cursor.execute(f'ALTER TABLE {tblname} ALTER COLUMN id TYPE bigint USING id::bigint;')
|
||||
|
||||
|
||||
def migrate_event_data_sqlite(apps, schema_editor):
|
||||
# TODO: cmeyers fill this in
|
||||
return
|
||||
|
||||
|
||||
class FakeAlterField(migrations.AlterField):
|
||||
def database_forwards(self, *args):
|
||||
# this is intentionally left blank, because we're
|
||||
@@ -37,7 +44,7 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
dbawaremigrations.RunPython(migrate_event_data, sqlite_code=migrate_event_data_sqlite),
|
||||
FakeAlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='id',
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/9039
|
||||
@@ -59,6 +61,10 @@ def migrate_event_data(apps, schema_editor):
|
||||
cursor.execute('DROP INDEX IF EXISTS main_jobevent_job_id_idx')
|
||||
|
||||
|
||||
def migrate_event_data_sqlite(apps, schema_editor):
|
||||
return None
|
||||
|
||||
|
||||
class FakeAddField(migrations.AddField):
|
||||
def database_forwards(self, *args):
|
||||
# this is intentionally left blank, because we're
|
||||
@@ -72,7 +78,7 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
dbawaremigrations.RunPython(migrate_event_data, sqlite_code=migrate_event_data_sqlite),
|
||||
FakeAddField(
|
||||
model_name='jobevent',
|
||||
name='job_created',
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
import awx.main.models.notifications
|
||||
from django.db import migrations, models
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
@@ -104,11 +106,12 @@ class Migration(migrations.Migration):
|
||||
name='deleted_actor',
|
||||
field=models.JSONField(null=True),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_activitystream RENAME setting TO setting_old;
|
||||
ALTER TABLE main_activitystream ALTER COLUMN setting_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_activitystream RENAME setting TO setting_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='activitystream',
|
||||
@@ -121,11 +124,12 @@ class Migration(migrations.Migration):
|
||||
name='setting',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_job RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_job ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_job RENAME survey_passwords TO survey_passwords_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='job',
|
||||
@@ -138,11 +142,12 @@ class Migration(migrations.Migration):
|
||||
name='survey_passwords',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_joblaunchconfig RENAME char_prompts TO char_prompts_old;
|
||||
ALTER TABLE main_joblaunchconfig ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_joblaunchconfig RENAME char_prompts TO char_prompts_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='joblaunchconfig',
|
||||
@@ -155,11 +160,12 @@ class Migration(migrations.Migration):
|
||||
name='char_prompts',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_joblaunchconfig RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_joblaunchconfig ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_joblaunchconfig RENAME survey_passwords TO survey_passwords_old;",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='joblaunchconfig',
|
||||
@@ -172,11 +178,12 @@ class Migration(migrations.Migration):
|
||||
name='survey_passwords',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_notification RENAME body TO body_old;
|
||||
ALTER TABLE main_notification ALTER COLUMN body_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_notification RENAME body TO body_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='notification',
|
||||
@@ -189,11 +196,12 @@ class Migration(migrations.Migration):
|
||||
name='body',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_unifiedjob RENAME job_env TO job_env_old;
|
||||
ALTER TABLE main_unifiedjob ALTER COLUMN job_env_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_unifiedjob RENAME job_env TO job_env_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='unifiedjob',
|
||||
@@ -206,11 +214,12 @@ class Migration(migrations.Migration):
|
||||
name='job_env',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjob RENAME char_prompts TO char_prompts_old;
|
||||
ALTER TABLE main_workflowjob ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjob RENAME char_prompts TO char_prompts_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjob',
|
||||
@@ -223,11 +232,12 @@ class Migration(migrations.Migration):
|
||||
name='char_prompts',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjob RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_workflowjob ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjob RENAME survey_passwords TO survey_passwords_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjob',
|
||||
@@ -240,11 +250,12 @@ class Migration(migrations.Migration):
|
||||
name='survey_passwords',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjobnode RENAME char_prompts TO char_prompts_old;
|
||||
ALTER TABLE main_workflowjobnode ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjobnode RENAME char_prompts TO char_prompts_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjobnode',
|
||||
@@ -257,11 +268,12 @@ class Migration(migrations.Migration):
|
||||
name='char_prompts',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjobnode RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_workflowjobnode ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjobnode RENAME survey_passwords TO survey_passwords_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjobnode',
|
||||
|
||||
@@ -3,6 +3,8 @@ from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def delete_taggit_contenttypes(apps, schema_editor):
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
@@ -20,8 +22,8 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunSQL("DROP TABLE IF EXISTS taggit_tag CASCADE;"),
|
||||
migrations.RunSQL("DROP TABLE IF EXISTS taggit_taggeditem CASCADE;"),
|
||||
dbawaremigrations.RunSQL("DROP TABLE IF EXISTS taggit_tag CASCADE;", sqlite_sql="DROP TABLE IF EXISTS taggit_tag;"),
|
||||
dbawaremigrations.RunSQL("DROP TABLE IF EXISTS taggit_taggeditem CASCADE;", sqlite_sql="DROP TABLE IF EXISTS taggit_taggeditem;"),
|
||||
migrations.RunPython(delete_taggit_contenttypes),
|
||||
migrations.RunPython(delete_taggit_migration_records),
|
||||
]
|
||||
|
||||
61
awx/main/migrations/_sqlite_helper.py
Normal file
61
awx/main/migrations/_sqlite_helper.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class RunSQL(migrations.operations.special.RunSQL):
|
||||
"""
|
||||
Bit of a hack here. Django actually wants this decision made in the router
|
||||
and we can pass **hints.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if 'sqlite_sql' not in kwargs:
|
||||
raise ValueError("sqlite_sql parameter required")
|
||||
sqlite_sql = kwargs.pop('sqlite_sql')
|
||||
|
||||
self.sqlite_sql = sqlite_sql
|
||||
self.sqlite_reverse_sql = kwargs.pop('sqlite_reverse_sql', None)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def database_forwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.sql = self.sqlite_sql or migrations.RunSQL.noop
|
||||
super().database_forwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
def database_backwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.reverse_sql = self.sqlite_reverse_sql or migrations.RunSQL.noop
|
||||
super().database_backwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
|
||||
class RunPython(migrations.operations.special.RunPython):
|
||||
"""
|
||||
Bit of a hack here. Django actually wants this decision made in the router
|
||||
and we can pass **hints.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if 'sqlite_code' not in kwargs:
|
||||
raise ValueError("sqlite_code parameter required")
|
||||
sqlite_code = kwargs.pop('sqlite_code')
|
||||
|
||||
self.sqlite_code = sqlite_code
|
||||
self.sqlite_reverse_code = kwargs.pop('sqlite_reverse_code', None)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def database_forwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.code = self.sqlite_code or migrations.RunPython.noop
|
||||
super().database_forwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
def database_backwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.reverse_code = self.sqlite_reverse_code or migrations.RunPython.noop
|
||||
super().database_backwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
|
||||
class _sqlitemigrations:
|
||||
RunPython = RunPython
|
||||
RunSQL = RunSQL
|
||||
|
||||
|
||||
dbawaremigrations = _sqlitemigrations()
|
||||
@@ -6,8 +6,10 @@ from django.conf import settings # noqa
|
||||
from django.db import connection
|
||||
from django.db.models.signals import pre_delete # noqa
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import BaseModel, PrimordialModel, prevent_search, accepts_json, CLOUD_INVENTORY_SOURCES, VERBOSITY_CHOICES # noqa
|
||||
from awx.main.models.base import BaseModel, PrimordialModel, accepts_json, CLOUD_INVENTORY_SOURCES, VERBOSITY_CHOICES # noqa
|
||||
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate, StdoutMaxBytesExceeded # noqa
|
||||
from awx.main.models.organization import Organization, Profile, Team, UserSessionMembership # noqa
|
||||
from awx.main.models.credential import Credential, CredentialType, CredentialInputSource, ManagedCredentialType, build_safe_env # noqa
|
||||
@@ -57,7 +59,6 @@ from awx.main.models.ha import ( # noqa
|
||||
from awx.main.models.rbac import ( # noqa
|
||||
Role,
|
||||
batch_role_ancestor_rebuilding,
|
||||
get_roles_on_resource,
|
||||
role_summary_fields_generator,
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
@@ -91,13 +92,12 @@ from oauth2_provider.models import Grant, RefreshToken # noqa -- needed django-
|
||||
|
||||
# Add custom methods to User model for permissions checks.
|
||||
from django.contrib.auth.models import User # noqa
|
||||
from awx.main.access import get_user_queryset, check_user_access, check_user_access_with_errors, user_accessible_objects # noqa
|
||||
from awx.main.access import get_user_queryset, check_user_access, check_user_access_with_errors # noqa
|
||||
|
||||
|
||||
User.add_to_class('get_queryset', get_user_queryset)
|
||||
User.add_to_class('can_access', check_user_access)
|
||||
User.add_to_class('can_access_with_errors', check_user_access_with_errors)
|
||||
User.add_to_class('accessible_objects', user_accessible_objects)
|
||||
|
||||
|
||||
def convert_jsonfields():
|
||||
|
||||
@@ -12,9 +12,11 @@ from django.utils.text import Truncator
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import prevent_search, AD_HOC_JOB_TYPE_CHOICES, VERBOSITY_CHOICES, VarsDictProperty
|
||||
from awx.main.models.base import AD_HOC_JOB_TYPE_CHOICES, VERBOSITY_CHOICES, VarsDictProperty
|
||||
from awx.main.models.events import AdHocCommandEvent, UnpartitionedAdHocCommandEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob
|
||||
from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate
|
||||
|
||||
@@ -15,7 +15,6 @@ from awx.main.utils import encrypt_field, parse_yaml_or_json
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
|
||||
__all__ = [
|
||||
'prevent_search',
|
||||
'VarsDictProperty',
|
||||
'BaseModel',
|
||||
'CreatedModifiedModel',
|
||||
@@ -384,23 +383,6 @@ class NotificationFieldsModel(BaseModel):
|
||||
notification_templates_started = models.ManyToManyField("NotificationTemplate", blank=True, related_name='%(class)s_notification_templates_for_started')
|
||||
|
||||
|
||||
def prevent_search(relation):
|
||||
"""
|
||||
Used to mark a model field or relation as "restricted from filtering"
|
||||
e.g.,
|
||||
|
||||
class AuthToken(BaseModel):
|
||||
user = prevent_search(models.ForeignKey(...))
|
||||
sensitive_data = prevent_search(models.CharField(...))
|
||||
|
||||
The flag set by this function is used by
|
||||
`awx.api.filters.FieldLookupBackend` to block fields and relations that
|
||||
should not be searchable/filterable via search query params
|
||||
"""
|
||||
setattr(relation, '__prevent_search__', True)
|
||||
return relation
|
||||
|
||||
|
||||
def accepts_json(relation):
|
||||
"""
|
||||
Used to mark a model field as allowing JSON e.g,. JobTemplate.extra_vars
|
||||
|
||||
@@ -17,6 +17,8 @@ from django.db.models import Sum, Q
|
||||
import redis
|
||||
from solo.models import SingletonModel
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.utils import is_testing
|
||||
@@ -24,7 +26,7 @@ from awx.api.versioning import reverse
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.managers import InstanceManager, UUID_DEFAULT
|
||||
from awx.main.constants import JOB_FOLDER_PREFIX
|
||||
from awx.main.models.base import BaseModel, HasEditsMixin, prevent_search
|
||||
from awx.main.models.base import BaseModel, HasEditsMixin
|
||||
from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
|
||||
@@ -25,6 +25,8 @@ from django.db.models import Q
|
||||
# REST Framework
|
||||
from rest_framework.exceptions import ParseError
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
@@ -35,7 +37,7 @@ from awx.main.fields import (
|
||||
OrderedManyToManyField,
|
||||
)
|
||||
from awx.main.managers import HostManager, HostMetricActiveManager
|
||||
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json
|
||||
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, accepts_json
|
||||
from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate
|
||||
from awx.main.models.mixins import (
|
||||
|
||||
@@ -20,13 +20,14 @@ from django.core.exceptions import FieldDoesNotExist
|
||||
# REST Framework
|
||||
from rest_framework.exceptions import ParseError
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.constants import HOST_FACTS_FIELDS
|
||||
from awx.main.models.base import (
|
||||
BaseModel,
|
||||
CreatedModifiedModel,
|
||||
prevent_search,
|
||||
accepts_json,
|
||||
JOB_TYPE_CHOICES,
|
||||
NEW_JOB_TYPE_CHOICES,
|
||||
|
||||
@@ -9,7 +9,6 @@ import requests
|
||||
# Django
|
||||
from django.apps import apps
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User # noqa
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
@@ -17,9 +16,10 @@ from django.db.models.query import QuerySet
|
||||
from django.utils.crypto import get_random_string
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import prevent_search
|
||||
from awx.main.models.rbac import Role, RoleAncestorEntry, get_roles_on_resource
|
||||
from awx.main.models.rbac import Role, RoleAncestorEntry
|
||||
from awx.main.utils import parse_yaml_or_json, get_custom_venv_choices, get_licenser, polymorphic
|
||||
from awx.main.utils.execution_environments import get_default_execution_environment
|
||||
from awx.main.utils.encryption import decrypt_value, get_encryption_key, is_encrypted
|
||||
@@ -54,10 +54,7 @@ class ResourceMixin(models.Model):
|
||||
Use instead of `MyModel.objects` when you want to only consider
|
||||
resources that a user has specific permissions for. For example:
|
||||
MyModel.accessible_objects(user, 'read_role').filter(name__istartswith='bar');
|
||||
NOTE: This should only be used for list type things. If you have a
|
||||
specific resource you want to check permissions on, it is more
|
||||
performant to resolve the resource in question then call
|
||||
`myresource.get_permissions(user)`.
|
||||
NOTE: This should only be used for list type things.
|
||||
"""
|
||||
return ResourceMixin._accessible_objects(cls, accessor, role_field)
|
||||
|
||||
@@ -67,13 +64,12 @@ class ResourceMixin(models.Model):
|
||||
|
||||
@staticmethod
|
||||
def _accessible_pk_qs(cls, accessor, role_field, content_types=None):
|
||||
if type(accessor) == User:
|
||||
if accessor._meta.model_name == 'user':
|
||||
ancestor_roles = accessor.roles.all()
|
||||
elif type(accessor) == Role:
|
||||
ancestor_roles = [accessor]
|
||||
else:
|
||||
accessor_type = ContentType.objects.get_for_model(accessor)
|
||||
ancestor_roles = Role.objects.filter(content_type__pk=accessor_type.id, object_id=accessor.id)
|
||||
raise RuntimeError(f'Role filters only valid for users and ancestor role, received {accessor}')
|
||||
|
||||
if content_types is None:
|
||||
ct_kwarg = dict(content_type_id=ContentType.objects.get_for_model(cls).id)
|
||||
@@ -86,15 +82,6 @@ class ResourceMixin(models.Model):
|
||||
def _accessible_objects(cls, accessor, role_field):
|
||||
return cls.objects.filter(pk__in=ResourceMixin._accessible_pk_qs(cls, accessor, role_field))
|
||||
|
||||
def get_permissions(self, accessor):
|
||||
"""
|
||||
Returns a string list of the roles a accessor has for a given resource.
|
||||
An accessor can be either a User, Role, or an arbitrary resource that
|
||||
contains one or more Roles associated with it.
|
||||
"""
|
||||
|
||||
return get_roles_on_resource(self, accessor)
|
||||
|
||||
|
||||
class SurveyJobTemplateMixin(models.Model):
|
||||
class Meta:
|
||||
|
||||
@@ -15,9 +15,11 @@ from django.utils.encoding import smart_str, force_str
|
||||
from jinja2 import sandbox, ChainableUndefined
|
||||
from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import CommonModelNameNotUnique, CreatedModifiedModel, prevent_search
|
||||
from awx.main.models.base import CommonModelNameNotUnique, CreatedModifiedModel
|
||||
from awx.main.utils import encrypt_field, decrypt_field, set_environ
|
||||
from awx.main.notifications.email_backend import CustomEmailBackend
|
||||
from awx.main.notifications.slack_backend import SlackBackend
|
||||
|
||||
@@ -15,12 +15,10 @@ from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from django.contrib.auth.models import User # noqa
|
||||
|
||||
__all__ = [
|
||||
'Role',
|
||||
'batch_role_ancestor_rebuilding',
|
||||
'get_roles_on_resource',
|
||||
'ROLE_SINGLETON_SYSTEM_ADMINISTRATOR',
|
||||
'ROLE_SINGLETON_SYSTEM_AUDITOR',
|
||||
'role_summary_fields_generator',
|
||||
@@ -170,16 +168,10 @@ class Role(models.Model):
|
||||
return reverse('api:role_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def __contains__(self, accessor):
|
||||
if type(accessor) == User:
|
||||
if accessor._meta.model_name == 'user':
|
||||
return self.ancestors.filter(members=accessor).exists()
|
||||
elif accessor.__class__.__name__ == 'Team':
|
||||
return self.ancestors.filter(pk=accessor.member_role.id).exists()
|
||||
elif type(accessor) == Role:
|
||||
return self.ancestors.filter(pk=accessor.pk).exists()
|
||||
else:
|
||||
accessor_type = ContentType.objects.get_for_model(accessor)
|
||||
roles = Role.objects.filter(content_type__pk=accessor_type.id, object_id=accessor.id)
|
||||
return self.ancestors.filter(pk__in=roles).exists()
|
||||
raise RuntimeError(f'Role evaluations only valid for users, received {accessor}')
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@@ -460,31 +452,6 @@ class RoleAncestorEntry(models.Model):
|
||||
object_id = models.PositiveIntegerField(null=False)
|
||||
|
||||
|
||||
def get_roles_on_resource(resource, accessor):
|
||||
"""
|
||||
Returns a string list of the roles a accessor has for a given resource.
|
||||
An accessor can be either a User, Role, or an arbitrary resource that
|
||||
contains one or more Roles associated with it.
|
||||
"""
|
||||
|
||||
if type(accessor) == User:
|
||||
roles = accessor.roles.all()
|
||||
elif type(accessor) == Role:
|
||||
roles = [accessor]
|
||||
else:
|
||||
accessor_type = ContentType.objects.get_for_model(accessor)
|
||||
roles = Role.objects.filter(content_type__pk=accessor_type.id, object_id=accessor.id)
|
||||
|
||||
return [
|
||||
role_field
|
||||
for role_field in RoleAncestorEntry.objects.filter(
|
||||
ancestor__in=roles, content_type_id=ContentType.objects.get_for_model(resource).id, object_id=resource.id
|
||||
)
|
||||
.values_list('role_field', flat=True)
|
||||
.distinct()
|
||||
]
|
||||
|
||||
|
||||
def role_summary_fields_generator(content_object, role_field):
|
||||
global role_descriptions
|
||||
global role_names
|
||||
|
||||
@@ -30,8 +30,10 @@ from rest_framework.exceptions import ParseError
|
||||
# Django-Polymorphic
|
||||
from polymorphic.models import PolymorphicModel
|
||||
|
||||
from ansible_base.utils.models import prevent_search, get_type_for_model
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel, prevent_search
|
||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.registrar import activity_stream_registrar
|
||||
@@ -42,7 +44,6 @@ from awx.main.utils.common import (
|
||||
_inventory_updates,
|
||||
copy_model_by_class,
|
||||
copy_m2m_relationships,
|
||||
get_type_for_model,
|
||||
parse_yaml_or_json,
|
||||
getattr_dne,
|
||||
ScheduleDependencyManager,
|
||||
@@ -1439,6 +1440,11 @@ class UnifiedJob(
|
||||
if not self.celery_task_id:
|
||||
return
|
||||
canceled = []
|
||||
if not connection.get_autocommit():
|
||||
# this condition is purpose-written for the task manager, when it cancels jobs in workflows
|
||||
ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id], with_reply=False)
|
||||
return True # task manager itself needs to act under assumption that cancel was received
|
||||
|
||||
try:
|
||||
# Use control and reply mechanism to cancel and obtain confirmation
|
||||
timeout = 5
|
||||
|
||||
@@ -23,9 +23,11 @@ from crum import get_current_user
|
||||
from jinja2 import sandbox
|
||||
from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import prevent_search, accepts_json, UnifiedJobTemplate, UnifiedJob
|
||||
from awx.main.models import accepts_json, UnifiedJobTemplate, UnifiedJob
|
||||
from awx.main.models.notifications import NotificationTemplate, JobNotificationMixin
|
||||
from awx.main.models.base import CreatedModifiedModel, VarsDictProperty
|
||||
from awx.main.models.rbac import ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR
|
||||
|
||||
@@ -39,11 +39,15 @@ class TwilioBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
logger.error(smart_str(_("Exception connecting to Twilio: {}").format(e)))
|
||||
|
||||
for m in messages:
|
||||
try:
|
||||
connection.messages.create(to=m.to, from_=m.from_email, body=m.subject)
|
||||
sent_messages += 1
|
||||
except Exception as e:
|
||||
logger.error(smart_str(_("Exception sending messages: {}").format(e)))
|
||||
if not self.fail_silently:
|
||||
raise
|
||||
failed = False
|
||||
for dest in m.to:
|
||||
try:
|
||||
logger.debug(smart_str(_("FROM: {} / TO: {}").format(m.from_email, dest)))
|
||||
connection.messages.create(to=dest, from_=m.from_email, body=m.subject)
|
||||
sent_messages += 1
|
||||
except Exception as e:
|
||||
logger.error(smart_str(_("Exception sending messages: {}").format(e)))
|
||||
failed = True
|
||||
if not self.fail_silently and failed:
|
||||
raise
|
||||
return sent_messages
|
||||
|
||||
@@ -17,6 +17,8 @@ from django.utils.timezone import now as tz_now
|
||||
from django.conf import settings
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
from ansible_base.utils.models import get_type_for_model
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.reaper import reap_job
|
||||
from awx.main.models import (
|
||||
@@ -34,7 +36,6 @@ from awx.main.models import (
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils import (
|
||||
get_type_for_model,
|
||||
ScheduleTaskManager,
|
||||
ScheduleWorkflowManager,
|
||||
)
|
||||
@@ -270,6 +271,9 @@ class WorkflowManager(TaskBase):
|
||||
job.status = 'failed'
|
||||
job.save(update_fields=['status', 'job_explanation'])
|
||||
job.websocket_emit_status('failed')
|
||||
# NOTE: sending notification templates here is slightly worse performance
|
||||
# this is not yet optimized in the same way as for the TaskManager
|
||||
job.send_notification_templates('failed')
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||
@@ -430,6 +434,25 @@ class TaskManager(TaskBase):
|
||||
self.tm_models = TaskManagerModels()
|
||||
self.controlplane_ig = self.tm_models.instance_groups.controlplane_ig
|
||||
|
||||
def process_job_dep_failures(self, task):
|
||||
"""If job depends on a job that has failed, mark as failed and handle misc stuff."""
|
||||
for dep in task.dependent_jobs.all():
|
||||
# if we detect a failed or error dependency, go ahead and fail this task.
|
||||
if dep.status in ("error", "failed"):
|
||||
task.status = 'failed'
|
||||
logger.warning(f'Previous task failed task: {task.id} dep: {dep.id} task manager')
|
||||
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(dep)),
|
||||
dep.name,
|
||||
dep.id,
|
||||
)
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
task.websocket_emit_status('failed')
|
||||
self.pre_start_failed.append(task.id)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def job_blocked_by(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
# in the old task manager this was handled as a method on each task object outside of the graph and
|
||||
@@ -441,20 +464,6 @@ class TaskManager(TaskBase):
|
||||
for dep in task.dependent_jobs.all():
|
||||
if dep.status in ACTIVE_STATES:
|
||||
return dep
|
||||
# if we detect a failed or error dependency, go ahead and fail this
|
||||
# task. The errback on the dependency takes some time to trigger,
|
||||
# and we don't want the task to enter running state if its
|
||||
# dependency has failed or errored.
|
||||
elif dep.status in ("error", "failed"):
|
||||
task.status = 'failed'
|
||||
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(dep)),
|
||||
dep.name,
|
||||
dep.id,
|
||||
)
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
task.websocket_emit_status('failed')
|
||||
return dep
|
||||
|
||||
return None
|
||||
|
||||
@@ -474,7 +483,6 @@ class TaskManager(TaskBase):
|
||||
if self.start_task_limit == 0:
|
||||
# schedule another run immediately after this task manager
|
||||
ScheduleTaskManager().schedule()
|
||||
from awx.main.tasks.system import handle_work_error, handle_work_success
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
@@ -485,7 +493,7 @@ class TaskManager(TaskBase):
|
||||
task.job_explanation += ' '
|
||||
task.job_explanation += 'Task failed pre-start check.'
|
||||
task.save()
|
||||
# TODO: run error handler to fail sub-tasks and send notifications
|
||||
self.pre_start_failed.append(task.id)
|
||||
else:
|
||||
if type(task) is WorkflowJob:
|
||||
task.status = 'running'
|
||||
@@ -507,19 +515,16 @@ class TaskManager(TaskBase):
|
||||
# apply_async does a NOTIFY to the channel dispatcher is listening to
|
||||
# postgres will treat this as part of the transaction, which is what we want
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
task_actual = {'type': get_type_for_model(type(task)), 'id': task.id}
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
opts,
|
||||
queue=task.get_queue_name(),
|
||||
uuid=task.celery_task_id,
|
||||
callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
errbacks=[{'task': handle_work_error.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
)
|
||||
|
||||
# In exception cases, like a job failing pre-start checks, we send the websocket status message
|
||||
# for jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
|
||||
# In exception cases, like a job failing pre-start checks, we send the websocket status message.
|
||||
# For jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
|
||||
if task.status != 'waiting':
|
||||
task.websocket_emit_status(task.status) # adds to on_commit
|
||||
|
||||
@@ -540,6 +545,11 @@ class TaskManager(TaskBase):
|
||||
if self.timed_out():
|
||||
logger.warning("Task manager has reached time out while processing pending jobs, exiting loop early")
|
||||
break
|
||||
|
||||
has_failed = self.process_job_dep_failures(task)
|
||||
if has_failed:
|
||||
continue
|
||||
|
||||
blocked_by = self.job_blocked_by(task)
|
||||
if blocked_by:
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_tasks_blocked", 1)
|
||||
@@ -653,6 +663,11 @@ class TaskManager(TaskBase):
|
||||
reap_job(j, 'failed')
|
||||
|
||||
def process_tasks(self):
|
||||
# maintain a list of jobs that went to an early failure state,
|
||||
# meaning the dispatcher never got these jobs,
|
||||
# that means we have to handle notifications for those
|
||||
self.pre_start_failed = []
|
||||
|
||||
running_tasks = [t for t in self.all_tasks if t.status in ['waiting', 'running']]
|
||||
self.process_running_tasks(running_tasks)
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_running_processed", len(running_tasks))
|
||||
@@ -662,6 +677,11 @@ class TaskManager(TaskBase):
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(pending_tasks))
|
||||
|
||||
if self.pre_start_failed:
|
||||
from awx.main.tasks.system import handle_failure_notifications
|
||||
|
||||
handle_failure_notifications.delay(self.pre_start_failed)
|
||||
|
||||
def timeout_approval_node(self, task):
|
||||
if self.timed_out():
|
||||
logger.warning("Task manager has reached time out while processing approval nodes, exiting loop early")
|
||||
|
||||
@@ -74,6 +74,8 @@ from awx.main.utils.common import (
|
||||
extract_ansible_vars,
|
||||
get_awx_version,
|
||||
create_partition,
|
||||
ScheduleWorkflowManager,
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils.handlers import SpecialInventoryHandler
|
||||
@@ -450,6 +452,12 @@ class BaseTask(object):
|
||||
instance.ansible_version = ansible_version_info
|
||||
instance.save(update_fields=['ansible_version'])
|
||||
|
||||
# Run task manager appropriately for speculative dependencies
|
||||
if instance.unifiedjob_blocked_jobs.exists():
|
||||
ScheduleTaskManager().schedule()
|
||||
if instance.spawned_by_workflow:
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return False
|
||||
|
||||
|
||||
@@ -16,7 +16,9 @@ class SignalExit(Exception):
|
||||
class SignalState:
|
||||
def reset(self):
|
||||
self.sigterm_flag = False
|
||||
self.is_active = False
|
||||
self.sigint_flag = False
|
||||
|
||||
self.is_active = False # for nested context managers
|
||||
self.original_sigterm = None
|
||||
self.original_sigint = None
|
||||
self.raise_exception = False
|
||||
@@ -24,23 +26,36 @@ class SignalState:
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
def set_flag(self, *args):
|
||||
"""Method to pass into the python signal.signal method to receive signals"""
|
||||
self.sigterm_flag = True
|
||||
def raise_if_needed(self):
|
||||
if self.raise_exception:
|
||||
self.raise_exception = False # so it is not raised a second time in error handling
|
||||
raise SignalExit()
|
||||
|
||||
def set_sigterm_flag(self, *args):
|
||||
self.sigterm_flag = True
|
||||
self.raise_if_needed()
|
||||
|
||||
def set_sigint_flag(self, *args):
|
||||
self.sigint_flag = True
|
||||
self.raise_if_needed()
|
||||
|
||||
def connect_signals(self):
|
||||
self.original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
self.original_sigint = signal.getsignal(signal.SIGINT)
|
||||
signal.signal(signal.SIGTERM, self.set_flag)
|
||||
signal.signal(signal.SIGINT, self.set_flag)
|
||||
signal.signal(signal.SIGTERM, self.set_sigterm_flag)
|
||||
signal.signal(signal.SIGINT, self.set_sigint_flag)
|
||||
self.is_active = True
|
||||
|
||||
def restore_signals(self):
|
||||
signal.signal(signal.SIGTERM, self.original_sigterm)
|
||||
signal.signal(signal.SIGINT, self.original_sigint)
|
||||
# if we got a signal while context manager was active, call parent methods.
|
||||
if self.sigterm_flag:
|
||||
if callable(self.original_sigterm):
|
||||
self.original_sigterm()
|
||||
if self.sigint_flag:
|
||||
if callable(self.original_sigint):
|
||||
self.original_sigint()
|
||||
self.reset()
|
||||
|
||||
|
||||
@@ -48,7 +63,7 @@ signal_state = SignalState()
|
||||
|
||||
|
||||
def signal_callback():
|
||||
return signal_state.sigterm_flag
|
||||
return bool(signal_state.sigterm_flag or signal_state.sigint_flag)
|
||||
|
||||
|
||||
def with_signal_handling(f):
|
||||
|
||||
@@ -53,13 +53,7 @@ from awx.main.models import (
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_task_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
get_type_for_model,
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal,
|
||||
ScheduleWorkflowManager,
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal
|
||||
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
@@ -765,63 +759,19 @@ def awx_periodic_scheduler():
|
||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||
|
||||
|
||||
def schedule_manager_success_or_error(instance):
|
||||
if instance.unifiedjob_blocked_jobs.exists():
|
||||
ScheduleTaskManager().schedule()
|
||||
if instance.spawned_by_workflow:
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
|
||||
return
|
||||
if not instance:
|
||||
return
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_error(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in error callback.'.format(task_actual['type'], task_actual['id']))
|
||||
return
|
||||
if not instance:
|
||||
return
|
||||
|
||||
subtasks = instance.get_jobs_fail_chain() # reverse of dependent_jobs mostly
|
||||
logger.debug(f'Executing error task id {task_actual["id"]}, subtasks: {[subtask.id for subtask in subtasks]}')
|
||||
|
||||
deps_of_deps = {}
|
||||
|
||||
for subtask in subtasks:
|
||||
if subtask.celery_task_id != instance.celery_task_id and not subtask.cancel_flag and not subtask.status in ('successful', 'failed'):
|
||||
# If there are multiple in the dependency chain, A->B->C, and this was called for A, blame B for clarity
|
||||
blame_job = deps_of_deps.get(subtask.id, instance)
|
||||
subtask.status = 'failed'
|
||||
subtask.failed = True
|
||||
if not subtask.job_explanation:
|
||||
subtask.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(blame_job)),
|
||||
blame_job.name,
|
||||
blame_job.id,
|
||||
)
|
||||
subtask.save()
|
||||
subtask.websocket_emit_status("failed")
|
||||
|
||||
for sub_subtask in subtask.get_jobs_fail_chain():
|
||||
deps_of_deps[sub_subtask.id] = subtask
|
||||
|
||||
# We only send 1 job complete message since all the job completion message
|
||||
# handling does is trigger the scheduler. If we extend the functionality of
|
||||
# what the job complete message handler does then we may want to send a
|
||||
# completion event for each job here.
|
||||
schedule_manager_success_or_error(instance)
|
||||
def handle_failure_notifications(task_ids):
|
||||
"""A task-ified version of the method that sends notifications."""
|
||||
found_task_ids = set()
|
||||
for instance in UnifiedJob.objects.filter(id__in=task_ids):
|
||||
found_task_ids.add(instance.id)
|
||||
try:
|
||||
instance.send_notification_templates('failed')
|
||||
except Exception:
|
||||
logger.exception(f'Error preparing notifications for task {instance.id}')
|
||||
deleted_tasks = set(task_ids) - found_task_ids
|
||||
if deleted_tasks:
|
||||
logger.warning(f'Could not send notifications for {deleted_tasks} because they were not found in the database')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
|
||||
@@ -2,12 +2,12 @@ from unittest import mock
|
||||
import pytest
|
||||
import json
|
||||
|
||||
from ansible_base.utils.models import get_type_for_model
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.jobs import JobTemplate, Job
|
||||
from awx.main.models.activity_stream import ActivityStream
|
||||
from awx.main.access import JobTemplateAccess
|
||||
from awx.main.utils.common import get_type_for_model
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -309,3 +309,139 @@ def test_bulk_job_set_all_prompt(job_template, organization, inventory, project,
|
||||
assert node[0].limit == 'kansas'
|
||||
assert node[0].skip_tags == 'foobar'
|
||||
assert node[0].job_tags == 'untagged'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('num_hosts, num_queries', [(1, 70), (10, 150), (25, 250)])
|
||||
def test_bulk_host_delete_num_queries(organization, inventory, post, get, user, num_hosts, num_queries, django_assert_max_num_queries):
|
||||
'''
|
||||
If I am a...
|
||||
org admin
|
||||
inventory admin at org level
|
||||
admin of a particular inventory
|
||||
superuser
|
||||
|
||||
Bulk Host delete should take under a certain number of queries
|
||||
'''
|
||||
users_list = setup_admin_users_list(organization, inventory, user)
|
||||
for u in users_list:
|
||||
hosts = [{'name': str(uuid4())} for i in range(num_hosts)]
|
||||
with django_assert_max_num_queries(num_queries):
|
||||
bulk_host_create_response = post(reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': hosts}, u, expect=201).data
|
||||
assert len(bulk_host_create_response['hosts']) == len(hosts), f"unexpected number of hosts created for user {u}"
|
||||
hosts_ids_created = get_inventory_hosts(get, inventory.id, u)
|
||||
bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, u, expect=201).data
|
||||
assert len(bulk_host_delete_response['hosts'].keys()) == len(hosts), f"unexpected number of hosts deleted for user {u}"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_host_delete_rbac(organization, inventory, post, get, user):
|
||||
'''
|
||||
If I am a...
|
||||
org admin
|
||||
inventory admin at org level
|
||||
admin of a particular invenotry
|
||||
... I can bulk delete hosts
|
||||
|
||||
Everyone else cannot
|
||||
'''
|
||||
admin_users_list = setup_admin_users_list(organization, inventory, user)
|
||||
users_list = setup_none_admin_uses_list(organization, inventory, user)
|
||||
|
||||
for indx, u in enumerate(admin_users_list):
|
||||
bulk_host_create_response = post(
|
||||
reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': [{'name': f'foobar-{indx}'}]}, u, expect=201
|
||||
).data
|
||||
assert len(bulk_host_create_response['hosts']) == 1, f"unexpected number of hosts created for user {u}"
|
||||
assert Host.objects.filter(inventory__id=inventory.id)[0].name == f'foobar-{indx}'
|
||||
hosts_ids_created = get_inventory_hosts(get, inventory.id, u)
|
||||
bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, u, expect=201).data
|
||||
assert len(bulk_host_delete_response['hosts'].keys()) == 1, f"unexpected number of hosts deleted by user {u}"
|
||||
|
||||
for indx, create_u in enumerate(admin_users_list):
|
||||
bulk_host_create_response = post(
|
||||
reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': [{'name': f'foobar2-{indx}'}]}, create_u, expect=201
|
||||
).data
|
||||
print(bulk_host_create_response)
|
||||
assert bulk_host_create_response['hosts'][0]['name'] == f'foobar2-{indx}'
|
||||
hosts_ids_created = get_inventory_hosts(get, inventory.id, create_u)
|
||||
print(f"Try to delete {hosts_ids_created}")
|
||||
for delete_u in users_list:
|
||||
bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, delete_u, expect=403).data
|
||||
assert "Lack permissions to delete hosts from this inventory." in bulk_host_delete_response['inventories'].values()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_host_delete_from_multiple_inv(organization, inventory, post, get, user):
|
||||
'''
|
||||
If I am inventory admin at org level
|
||||
|
||||
Bulk Host delete should be enabled only on my inventory
|
||||
'''
|
||||
num_hosts = 10
|
||||
inventory.organization = organization
|
||||
|
||||
# Create second inventory
|
||||
inv2 = organization.inventories.create(name="second-test-inv")
|
||||
inv2.organization = organization
|
||||
admin2_user = user('inventory2_admin', False)
|
||||
inv2.admin_role.members.add(admin2_user)
|
||||
|
||||
admin_user = user('inventory_admin', False)
|
||||
inventory.admin_role.members.add(admin_user)
|
||||
|
||||
organization.member_role.members.add(admin_user)
|
||||
organization.member_role.members.add(admin2_user)
|
||||
|
||||
hosts = [{'name': str(uuid4())} for i in range(num_hosts)]
|
||||
hosts2 = [{'name': str(uuid4())} for i in range(num_hosts)]
|
||||
|
||||
# create hosts in each of the inventories
|
||||
bulk_host_create_response = post(reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': hosts}, admin_user, expect=201).data
|
||||
assert len(bulk_host_create_response['hosts']) == len(hosts), f"unexpected number of hosts created for user {admin_user}"
|
||||
|
||||
bulk_host_create_response2 = post(reverse('api:bulk_host_create'), {'inventory': inv2.id, 'hosts': hosts2}, admin2_user, expect=201).data
|
||||
assert len(bulk_host_create_response2['hosts']) == len(hosts), f"unexpected number of hosts created for user {admin2_user}"
|
||||
|
||||
# get all hosts ids - from both inventories
|
||||
hosts_ids_created = get_inventory_hosts(get, inventory.id, admin_user)
|
||||
hosts_ids_created += get_inventory_hosts(get, inv2.id, admin2_user)
|
||||
|
||||
expected_error = "Lack permissions to delete hosts from this inventory."
|
||||
# try to delete ALL hosts with admin user of inventory 1.
|
||||
for inv_name, invadmin in zip([inv2.name, inventory.name], [admin_user, admin2_user]):
|
||||
bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, invadmin, expect=403).data
|
||||
result_message = bulk_host_delete_response['inventories'][inv_name]
|
||||
assert result_message == expected_error, f"deleted hosts without permission by user {invadmin}"
|
||||
|
||||
|
||||
def setup_admin_users_list(organization, inventory, user):
|
||||
inventory.organization = organization
|
||||
inventory_admin = user('inventory_admin', False)
|
||||
org_admin = user('org_admin', False)
|
||||
org_inv_admin = user('org_admin', False)
|
||||
superuser = user('admin', True)
|
||||
for u in [org_admin, org_inv_admin, inventory_admin]:
|
||||
organization.member_role.members.add(u)
|
||||
organization.admin_role.members.add(org_admin)
|
||||
organization.inventory_admin_role.members.add(org_inv_admin)
|
||||
inventory.admin_role.members.add(inventory_admin)
|
||||
return [inventory_admin, org_inv_admin, superuser, org_admin]
|
||||
|
||||
|
||||
def setup_none_admin_uses_list(organization, inventory, user):
|
||||
inventory.organization = organization
|
||||
auditor = user('auditor', False)
|
||||
member = user('member', False)
|
||||
use_inv_member = user('member', False)
|
||||
for u in [auditor, member, use_inv_member]:
|
||||
organization.member_role.members.add(u)
|
||||
inventory.use_role.members.add(use_inv_member)
|
||||
organization.auditor_role.members.add(auditor)
|
||||
return [auditor, member, use_inv_member]
|
||||
|
||||
|
||||
def get_inventory_hosts(get, inv_id, use_user):
|
||||
data = get(reverse('api:inventory_hosts_list', kwargs={'pk': inv_id}), use_user, expect=200).data
|
||||
results = [host['id'] for host in data['results']]
|
||||
return results
|
||||
|
||||
@@ -40,6 +40,26 @@ def test_hashivault_kubernetes_auth():
|
||||
assert res == expected_res
|
||||
|
||||
|
||||
def test_hashivault_client_cert_auth_explicit_role():
|
||||
kwargs = {
|
||||
'client_cert_role': 'test-cert-1',
|
||||
}
|
||||
expected_res = {
|
||||
'name': 'test-cert-1',
|
||||
}
|
||||
res = hashivault.client_cert_auth(**kwargs)
|
||||
assert res == expected_res
|
||||
|
||||
|
||||
def test_hashivault_client_cert_auth_no_role():
|
||||
kwargs = {}
|
||||
expected_res = {
|
||||
'name': None,
|
||||
}
|
||||
res = hashivault.client_cert_auth(**kwargs)
|
||||
assert res == expected_res
|
||||
|
||||
|
||||
def test_hashivault_handle_auth_token():
|
||||
kwargs = {
|
||||
'token': 'the_token',
|
||||
@@ -73,6 +93,22 @@ def test_hashivault_handle_auth_kubernetes():
|
||||
assert token == 'the_token'
|
||||
|
||||
|
||||
def test_hashivault_handle_auth_client_cert():
|
||||
kwargs = {
|
||||
'client_cert_public': "foo",
|
||||
'client_cert_private': "bar",
|
||||
'client_cert_role': 'test-cert-1',
|
||||
}
|
||||
auth_params = {
|
||||
'name': 'test-cert-1',
|
||||
}
|
||||
with mock.patch.object(hashivault, 'method_auth') as method_mock:
|
||||
method_mock.return_value = 'the_token'
|
||||
token = hashivault.handle_auth(**kwargs)
|
||||
method_mock.assert_called_with(**kwargs, auth_param=auth_params)
|
||||
assert token == 'the_token'
|
||||
|
||||
|
||||
def test_hashivault_handle_auth_not_enough_args():
|
||||
with pytest.raises(Exception):
|
||||
hashivault.handle_auth()
|
||||
|
||||
44
awx/main/tests/functional/test_migrations.py
Normal file
44
awx/main/tests/functional/test_migrations.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import pytest
|
||||
|
||||
from django_test_migrations.plan import all_migrations, nodes_to_tuples
|
||||
|
||||
"""
|
||||
Most tests that live in here can probably be deleted at some point. They are mainly
|
||||
for a developer. When AWX versions that users upgrade from falls out of support that
|
||||
is when migration tests can be deleted. This is also a good time to squash. Squashing
|
||||
will likely mess with the tests that live here.
|
||||
|
||||
The smoke test should be kept in here. The smoke test ensures that our migrations
|
||||
continue to work when sqlite is the backing database (vs. the default DB of postgres).
|
||||
"""
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestMigrationSmoke:
|
||||
def test_happy_path(self, migrator):
|
||||
"""
|
||||
This smoke test runs all the migrations.
|
||||
|
||||
Example of how to use django-test-migration to invoke particular migration(s)
|
||||
while weaving in object creation and assertions.
|
||||
|
||||
Note that this is more than just an example. It is a smoke test because it runs ALL
|
||||
the migrations. Our "normal" unit tests subvert the migrations running because it is slow.
|
||||
"""
|
||||
migration_nodes = all_migrations('default')
|
||||
migration_tuples = nodes_to_tuples(migration_nodes)
|
||||
final_migration = migration_tuples[-1]
|
||||
|
||||
migrator.apply_initial_migration(('main', None))
|
||||
# I just picked a newish migration at the time of writing this.
|
||||
# If someone from the future finds themselves here because the are squashing migrations
|
||||
# it is fine to change the 0180_... below to some other newish migration
|
||||
intermediate_state = migrator.apply_tested_migration(('main', '0180_add_hostmetric_fields'))
|
||||
|
||||
Instance = intermediate_state.apps.get_model('main', 'Instance')
|
||||
# Create any old object in the database
|
||||
Instance.objects.create(hostname='foobar', node_type='control')
|
||||
|
||||
final_state = migrator.apply_tested_migration(final_migration)
|
||||
Instance = final_state.apps.get_model('main', 'Instance')
|
||||
assert Instance.objects.filter(hostname='foobar').count() == 1
|
||||
@@ -208,6 +208,6 @@ def test_auto_parenting():
|
||||
@pytest.mark.django_db
|
||||
def test_update_parents_keeps_teams(team, project):
|
||||
project.update_role.parents.add(team.member_role)
|
||||
assert team.member_role in project.update_role # test prep sanity check
|
||||
assert list(Project.accessible_objects(team.member_role, 'update_role')) == [project] # test prep sanity check
|
||||
update_role_parentage_for_instance(project)
|
||||
assert team.member_role in project.update_role # actual assertion
|
||||
assert list(Project.accessible_objects(team.member_role, 'update_role')) == [project] # actual assertion
|
||||
|
||||
@@ -92,7 +92,7 @@ def test_team_accessible_by(team, user, project):
|
||||
u = user('team_member', False)
|
||||
|
||||
team.member_role.children.add(project.use_role)
|
||||
assert team in project.read_role
|
||||
assert list(Project.accessible_objects(team.member_role, 'read_role')) == [project]
|
||||
assert u not in project.read_role
|
||||
|
||||
team.member_role.members.add(u)
|
||||
@@ -104,7 +104,7 @@ def test_team_accessible_objects(team, user, project):
|
||||
u = user('team_member', False)
|
||||
|
||||
team.member_role.children.add(project.use_role)
|
||||
assert len(Project.accessible_objects(team, 'read_role')) == 1
|
||||
assert len(Project.accessible_objects(team.member_role, 'read_role')) == 1
|
||||
assert not Project.accessible_objects(u, 'read_role')
|
||||
|
||||
team.member_role.members.add(u)
|
||||
|
||||
@@ -122,25 +122,6 @@ def test_team_org_resource_role(ext_auth, organization, rando, org_admin, team):
|
||||
] == [True for i in range(2)]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_user_accessible_objects(user, organization):
|
||||
"""
|
||||
We cannot directly use accessible_objects for User model because
|
||||
both editing and read permissions are obligated to complex business logic
|
||||
"""
|
||||
admin = user('admin', False)
|
||||
u = user('john', False)
|
||||
access = UserAccess(admin)
|
||||
assert access.get_queryset().count() == 1 # can only see himself
|
||||
|
||||
organization.member_role.members.add(u)
|
||||
organization.member_role.members.add(admin)
|
||||
assert access.get_queryset().count() == 2
|
||||
|
||||
organization.member_role.members.remove(u)
|
||||
assert access.get_queryset().count() == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_org_admin_create_sys_auditor(org_admin):
|
||||
access = UserAccess(org_admin)
|
||||
|
||||
@@ -5,8 +5,8 @@ import tempfile
|
||||
import shutil
|
||||
|
||||
from awx.main.tasks.jobs import RunJob
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files, handle_work_error
|
||||
from awx.main.models import Instance, Job, InventoryUpdate, ProjectUpdate
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files
|
||||
from awx.main.models import Instance, Job
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -73,17 +73,3 @@ def test_does_not_run_reaped_job(mocker, mock_me):
|
||||
job.refresh_from_db()
|
||||
assert job.status == 'failed'
|
||||
mock_run.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_handle_work_error_nested(project, inventory_source):
|
||||
pu = ProjectUpdate.objects.create(status='failed', project=project, celery_task_id='1234')
|
||||
iu = InventoryUpdate.objects.create(status='pending', inventory_source=inventory_source, source='scm')
|
||||
job = Job.objects.create(status='pending')
|
||||
iu.dependent_jobs.add(pu)
|
||||
job.dependent_jobs.add(pu, iu)
|
||||
handle_work_error({'type': 'project_update', 'id': pu.id})
|
||||
iu.refresh_from_db()
|
||||
job.refresh_from_db()
|
||||
assert iu.job_explanation == f'Previous Task Failed: {{"job_type": "project_update", "job_name": "", "job_id": "{pu.id}"}}'
|
||||
assert job.job_explanation == f'Previous Task Failed: {{"job_type": "inventory_update", "job_name": "", "job_id": "{iu.id}"}}'
|
||||
|
||||
@@ -3,15 +3,13 @@
|
||||
import pytest
|
||||
|
||||
# Django
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
|
||||
from rest_framework.exceptions import PermissionDenied, ParseError
|
||||
from ansible_base.filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
||||
|
||||
from awx.api.filters import FieldLookupBackend, OrderByBackend, get_field_from_path
|
||||
from awx.main.models import (
|
||||
AdHocCommand,
|
||||
ActivityStream,
|
||||
Credential,
|
||||
Job,
|
||||
JobTemplate,
|
||||
SystemJob,
|
||||
@@ -20,88 +18,11 @@ from awx.main.models import (
|
||||
WorkflowJob,
|
||||
WorkflowJobTemplate,
|
||||
WorkflowJobOptions,
|
||||
InventorySource,
|
||||
JobEvent,
|
||||
)
|
||||
from awx.main.models.oauth import OAuth2Application
|
||||
from awx.main.models.jobs import JobOptions
|
||||
|
||||
|
||||
def test_related():
|
||||
field_lookup = FieldLookupBackend()
|
||||
lookup = '__'.join(['inventory', 'organization', 'pk'])
|
||||
field, new_lookup = field_lookup.get_field_from_lookup(InventorySource, lookup)
|
||||
print(field)
|
||||
print(new_lookup)
|
||||
|
||||
|
||||
def test_invalid_filter_key():
|
||||
field_lookup = FieldLookupBackend()
|
||||
# FieldDoesNotExist is caught and converted to ParseError by filter_queryset
|
||||
with pytest.raises(FieldDoesNotExist) as excinfo:
|
||||
field_lookup.value_to_python(JobEvent, 'event_data.task_action', 'foo')
|
||||
assert 'has no field named' in str(excinfo)
|
||||
|
||||
|
||||
def test_invalid_field_hop():
|
||||
with pytest.raises(ParseError) as excinfo:
|
||||
get_field_from_path(Credential, 'organization__description__user')
|
||||
assert 'No related model for' in str(excinfo)
|
||||
|
||||
|
||||
def test_invalid_order_by_key():
|
||||
field_order_by = OrderByBackend()
|
||||
with pytest.raises(ParseError) as excinfo:
|
||||
[f for f in field_order_by._validate_ordering_fields(JobEvent, ('event_data.task_action',))]
|
||||
assert 'has no field named' in str(excinfo)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(u"empty_value", [u'', ''])
|
||||
def test_empty_in(empty_value):
|
||||
field_lookup = FieldLookupBackend()
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
field_lookup.value_to_python(JobTemplate, 'project__name__in', empty_value)
|
||||
assert 'empty value for __in' in str(excinfo.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(u"valid_value", [u'foo', u'foo,'])
|
||||
def test_valid_in(valid_value):
|
||||
field_lookup = FieldLookupBackend()
|
||||
value, new_lookup, _ = field_lookup.value_to_python(JobTemplate, 'project__name__in', valid_value)
|
||||
assert 'foo' in value
|
||||
|
||||
|
||||
def test_invalid_field():
|
||||
invalid_field = u"ヽヾ"
|
||||
field_lookup = FieldLookupBackend()
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
field_lookup.value_to_python(WorkflowJobTemplate, invalid_field, 'foo')
|
||||
assert 'is not an allowed field name. Must be ascii encodable.' in str(excinfo.value)
|
||||
|
||||
|
||||
def test_valid_iexact():
|
||||
field_lookup = FieldLookupBackend()
|
||||
value, new_lookup, _ = field_lookup.value_to_python(JobTemplate, 'project__name__iexact', 'foo')
|
||||
assert 'foo' in value
|
||||
|
||||
|
||||
def test_invalid_iexact():
|
||||
field_lookup = FieldLookupBackend()
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
field_lookup.value_to_python(Job, 'id__iexact', '1')
|
||||
assert 'is not a text field and cannot be filtered by case-insensitive search' in str(excinfo.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('lookup_suffix', ['', 'contains', 'startswith', 'in'])
|
||||
@pytest.mark.parametrize('password_field', Credential.PASSWORD_FIELDS)
|
||||
def test_filter_on_password_field(password_field, lookup_suffix):
|
||||
field_lookup = FieldLookupBackend()
|
||||
lookup = '__'.join(filter(None, [password_field, lookup_suffix]))
|
||||
with pytest.raises(PermissionDenied) as excinfo:
|
||||
field, new_lookup = field_lookup.get_field_from_lookup(Credential, lookup)
|
||||
assert 'not allowed' in str(excinfo.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'model, query',
|
||||
[
|
||||
@@ -128,10 +49,3 @@ def test_filter_sensitive_fields_and_relations(model, query):
|
||||
with pytest.raises(PermissionDenied) as excinfo:
|
||||
field, new_lookup = field_lookup.get_field_from_lookup(model, query)
|
||||
assert 'not allowed' in str(excinfo.value)
|
||||
|
||||
|
||||
def test_looping_filters_prohibited():
|
||||
field_lookup = FieldLookupBackend()
|
||||
with pytest.raises(ParseError) as loop_exc:
|
||||
field_lookup.get_field_from_lookup(Job, 'job_events__job__job_events')
|
||||
assert 'job_events' in str(loop_exc.value)
|
||||
|
||||
@@ -47,7 +47,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="logs-01.loggly.com" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/")', # noqa
|
||||
'action(type="omhttp" server="logs-01.loggly.com" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -61,7 +61,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="udp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx")', # noqa
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="udp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -75,7 +75,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="tcp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx")', # noqa
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="tcp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -89,7 +89,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -103,7 +103,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -117,7 +117,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -131,7 +131,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -145,7 +145,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -159,7 +159,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -173,7 +173,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="endpoint5.collection.us2.sumologic.com" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="receiver/v1/http/ZaVnC4dhaV0qoiETY0MrM3wwLoDgO1jFgjOxE6-39qokkj3LGtOroZ8wNaN2M6DtgYrJZsmSi4-36_Up5TbbN_8hosYonLKHSSOSKY845LuLZBCBwStrHQ==")', # noqa
|
||||
'action(type="omhttp" server="endpoint5.collection.us2.sumologic.com" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="receiver/v1/http/ZaVnC4dhaV0qoiETY0MrM3wwLoDgO1jFgjOxE6-39qokkj3LGtOroZ8wNaN2M6DtgYrJZsmSi4-36_Up5TbbN_8hosYonLKHSSOSKY845LuLZBCBwStrHQ==")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
|
||||
@@ -1,8 +1,43 @@
|
||||
import signal
|
||||
import functools
|
||||
|
||||
from awx.main.tasks.signals import signal_state, signal_callback, with_signal_handling
|
||||
|
||||
|
||||
def pytest_sigint():
|
||||
pytest_sigint.called_count += 1
|
||||
|
||||
|
||||
def pytest_sigterm():
|
||||
pytest_sigterm.called_count += 1
|
||||
|
||||
|
||||
def tmp_signals_for_test(func):
|
||||
"""
|
||||
When we run our internal signal handlers, it will call the original signal
|
||||
handlers when its own work is finished.
|
||||
This would crash the test runners normally, because those methods will
|
||||
shut down the process.
|
||||
So this is a decorator to safely replace existing signal handlers
|
||||
with new signal handlers that do nothing so that tests do not crash.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper():
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
original_sigint = signal.getsignal(signal.SIGINT)
|
||||
signal.signal(signal.SIGTERM, pytest_sigterm)
|
||||
signal.signal(signal.SIGINT, pytest_sigint)
|
||||
pytest_sigterm.called_count = 0
|
||||
pytest_sigint.called_count = 0
|
||||
func()
|
||||
signal.signal(signal.SIGTERM, original_sigterm)
|
||||
signal.signal(signal.SIGINT, original_sigint)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@tmp_signals_for_test
|
||||
def test_outer_inner_signal_handling():
|
||||
"""
|
||||
Even if the flag is set in the outer context, its value should persist in the inner context
|
||||
@@ -15,17 +50,22 @@ def test_outer_inner_signal_handling():
|
||||
@with_signal_handling
|
||||
def f1():
|
||||
assert signal_callback() is False
|
||||
signal_state.set_flag()
|
||||
signal_state.set_sigterm_flag()
|
||||
assert signal_callback()
|
||||
f2()
|
||||
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
assert signal_callback() is False
|
||||
assert pytest_sigterm.called_count == 0
|
||||
assert pytest_sigint.called_count == 0
|
||||
f1()
|
||||
assert signal_callback() is False
|
||||
assert signal.getsignal(signal.SIGTERM) is original_sigterm
|
||||
assert pytest_sigterm.called_count == 1
|
||||
assert pytest_sigint.called_count == 0
|
||||
|
||||
|
||||
@tmp_signals_for_test
|
||||
def test_inner_outer_signal_handling():
|
||||
"""
|
||||
Even if the flag is set in the inner context, its value should persist in the outer context
|
||||
@@ -34,7 +74,7 @@ def test_inner_outer_signal_handling():
|
||||
@with_signal_handling
|
||||
def f2():
|
||||
assert signal_callback() is False
|
||||
signal_state.set_flag()
|
||||
signal_state.set_sigint_flag()
|
||||
assert signal_callback()
|
||||
|
||||
@with_signal_handling
|
||||
@@ -45,6 +85,10 @@ def test_inner_outer_signal_handling():
|
||||
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
assert signal_callback() is False
|
||||
assert pytest_sigterm.called_count == 0
|
||||
assert pytest_sigint.called_count == 0
|
||||
f1()
|
||||
assert signal_callback() is False
|
||||
assert signal.getsignal(signal.SIGTERM) is original_sigterm
|
||||
assert pytest_sigterm.called_count == 0
|
||||
assert pytest_sigint.called_count == 1
|
||||
|
||||
@@ -143,13 +143,6 @@ def test_send_notifications_job_id(mocker):
|
||||
assert UnifiedJob.objects.get.called_with(id=1)
|
||||
|
||||
|
||||
def test_work_success_callback_missing_job():
|
||||
task_data = {'type': 'project_update', 'id': 9999}
|
||||
with mock.patch('django.db.models.query.QuerySet.get') as get_mock:
|
||||
get_mock.side_effect = ProjectUpdate.DoesNotExist()
|
||||
assert system.handle_work_success(task_data) is None
|
||||
|
||||
|
||||
@mock.patch('awx.main.models.UnifiedJob.objects.get')
|
||||
@mock.patch('awx.main.models.Notification.objects.filter')
|
||||
def test_send_notifications_list(mock_notifications_filter, mock_job_get, mocker):
|
||||
|
||||
@@ -12,6 +12,8 @@ from unittest import mock
|
||||
|
||||
from rest_framework.exceptions import ParseError
|
||||
|
||||
from ansible_base.utils.models import get_type_for_model
|
||||
|
||||
from awx.main.utils import common
|
||||
from awx.api.validators import HostnameRegexValidator
|
||||
|
||||
@@ -106,7 +108,7 @@ TEST_MODELS = [
|
||||
# Cases relied on for scheduler dependent jobs list
|
||||
@pytest.mark.parametrize('model,name', TEST_MODELS)
|
||||
def test_get_type_for_model(model, name):
|
||||
assert common.get_type_for_model(model) == name
|
||||
assert get_type_for_model(model) == name
|
||||
|
||||
|
||||
def test_get_model_for_invalid_type():
|
||||
|
||||
@@ -68,7 +68,9 @@ class mockHost:
|
||||
|
||||
@mock.patch('awx.main.utils.filters.get_model', return_value=mockHost())
|
||||
class TestSmartFilterQueryFromString:
|
||||
@mock.patch('awx.api.filters.get_fields_from_path', lambda model, path: ([model], path)) # disable field filtering, because a__b isn't a real Host field
|
||||
@mock.patch(
|
||||
'ansible_base.filters.rest_framework.field_lookup_backend.get_fields_from_path', lambda model, path: ([model], path)
|
||||
) # disable field filtering, because a__b isn't a real Host field
|
||||
@pytest.mark.parametrize(
|
||||
"filter_string,q_expected",
|
||||
[
|
||||
|
||||
@@ -52,12 +52,10 @@ __all__ = [
|
||||
'get_awx_http_client_headers',
|
||||
'get_awx_version',
|
||||
'update_scm_url',
|
||||
'get_type_for_model',
|
||||
'get_model_for_type',
|
||||
'copy_model_by_class',
|
||||
'copy_m2m_relationships',
|
||||
'prefetch_page_capabilities',
|
||||
'to_python_boolean',
|
||||
'datetime_hook',
|
||||
'ignore_inventory_computed_fields',
|
||||
'ignore_inventory_group_removal',
|
||||
@@ -110,18 +108,6 @@ def get_object_or_400(klass, *args, **kwargs):
|
||||
raise ParseError(*e.args)
|
||||
|
||||
|
||||
def to_python_boolean(value, allow_none=False):
|
||||
value = str(value)
|
||||
if value.lower() in ('true', '1', 't'):
|
||||
return True
|
||||
elif value.lower() in ('false', '0', 'f'):
|
||||
return False
|
||||
elif allow_none and value.lower() in ('none', 'null'):
|
||||
return None
|
||||
else:
|
||||
raise ValueError(_(u'Unable to convert "%s" to boolean') % value)
|
||||
|
||||
|
||||
def datetime_hook(d):
|
||||
new_d = {}
|
||||
for key, value in d.items():
|
||||
@@ -569,14 +555,6 @@ def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
|
||||
dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
|
||||
|
||||
|
||||
def get_type_for_model(model):
|
||||
"""
|
||||
Return type name for a given model class.
|
||||
"""
|
||||
opts = model._meta.concrete_model._meta
|
||||
return camelcase_to_underscore(opts.object_name)
|
||||
|
||||
|
||||
def get_model_for_type(type_name):
|
||||
"""
|
||||
Return model class for a given type name.
|
||||
|
||||
@@ -1,27 +1,10 @@
|
||||
# Copyright (c) 2017 Ansible by Red Hat
|
||||
# All Rights Reserved.
|
||||
|
||||
from itertools import chain
|
||||
|
||||
from awx.settings.application_name import set_application_name
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
def get_all_field_names(model):
|
||||
# Implements compatibility with _meta.get_all_field_names
|
||||
# See: https://docs.djangoproject.com/en/1.11/ref/models/meta/#migrating-from-the-old-api
|
||||
return list(
|
||||
set(
|
||||
chain.from_iterable(
|
||||
(field.name, field.attname) if hasattr(field, 'attname') else (field.name,)
|
||||
for field in model._meta.get_fields()
|
||||
# For complete backwards compatibility, you may want to exclude
|
||||
# GenericForeignKey from the results.
|
||||
if not (field.many_to_one and field.related_model is None)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def set_connection_name(function):
|
||||
set_application_name(settings.DATABASES, settings.CLUSTER_HOST_ID, function=function)
|
||||
|
||||
@@ -17,11 +17,26 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
port = getattr(settings, 'LOG_AGGREGATOR_PORT', '')
|
||||
protocol = getattr(settings, 'LOG_AGGREGATOR_PROTOCOL', '')
|
||||
timeout = getattr(settings, 'LOG_AGGREGATOR_TCP_TIMEOUT', 5)
|
||||
max_disk_space_main_queue = getattr(settings, 'LOG_AGGREGATOR_MAX_DISK_USAGE_GB', 1)
|
||||
action_queue_size = getattr(settings, 'LOG_AGGREGATOR_ACTION_QUEUE_SIZE', 131072)
|
||||
max_disk_space_action_queue = getattr(settings, 'LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB', 1)
|
||||
spool_directory = getattr(settings, 'LOG_AGGREGATOR_MAX_DISK_USAGE_PATH', '/var/lib/awx').rstrip('/')
|
||||
error_log_file = getattr(settings, 'LOG_AGGREGATOR_RSYSLOGD_ERROR_LOG_FILE', '')
|
||||
|
||||
queue_options = [
|
||||
f'queue.spoolDirectory="{spool_directory}"',
|
||||
'queue.filename="awx-external-logger-action-queue"',
|
||||
f'queue.maxDiskSpace="{max_disk_space_action_queue}g"', # overall disk space for all queue files
|
||||
'queue.maxFileSize="100m"', # individual file size
|
||||
'queue.type="LinkedList"',
|
||||
'queue.saveOnShutdown="on"',
|
||||
'queue.syncqueuefiles="on"', # (f)sync when checkpoint occurs
|
||||
'queue.checkpointInterval="1000"', # Update disk queue every 1000 messages
|
||||
f'queue.size="{action_queue_size}"', # max number of messages in queue
|
||||
f'queue.highwaterMark="{int(action_queue_size * 0.75)}"', # 75% of queue.size
|
||||
f'queue.discardMark="{int(action_queue_size * 0.9)}"', # 90% of queue.size
|
||||
'queue.discardSeverity="5"', # Only discard notice, info, debug if we must discard anything
|
||||
]
|
||||
|
||||
if not os.access(spool_directory, os.W_OK):
|
||||
spool_directory = '/var/lib/awx'
|
||||
|
||||
@@ -33,7 +48,6 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
'$WorkDirectory /var/lib/awx/rsyslog',
|
||||
f'$MaxMessageSize {max_bytes}',
|
||||
'$IncludeConfig /var/lib/awx/rsyslog/conf.d/*.conf',
|
||||
f'main_queue(queue.spoolDirectory="{spool_directory}" queue.maxdiskspace="{max_disk_space_main_queue}g" queue.type="Disk" queue.filename="awx-external-logger-backlog")', # noqa
|
||||
'module(load="imuxsock" SysSock.Use="off")',
|
||||
'input(type="imuxsock" Socket="' + settings.LOGGING['handlers']['external_logger']['address'] + '" unlink="on" RateLimit.Burst="0")',
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
@@ -79,12 +93,7 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
'action.resumeRetryCount="-1"',
|
||||
'template="awx"',
|
||||
f'action.resumeInterval="{timeout}"',
|
||||
f'queue.spoolDirectory="{spool_directory}"',
|
||||
'queue.filename="awx-external-logger-action-queue"',
|
||||
f'queue.maxdiskspace="{max_disk_space_action_queue}g"',
|
||||
'queue.type="LinkedList"',
|
||||
'queue.saveOnShutdown="on"',
|
||||
]
|
||||
] + queue_options
|
||||
if error_log_file:
|
||||
params.append(f'errorfile="{error_log_file}"')
|
||||
if parsed.path:
|
||||
@@ -112,9 +121,18 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
params = ' '.join(params)
|
||||
parts.extend(['module(load="omhttp")', f'action({params})'])
|
||||
elif protocol and host and port:
|
||||
parts.append(
|
||||
f'action(type="omfwd" target="{host}" port="{port}" protocol="{protocol}" action.resumeRetryCount="-1" action.resumeInterval="{timeout}" template="awx")' # noqa
|
||||
)
|
||||
params = [
|
||||
'type="omfwd"',
|
||||
f'target="{host}"',
|
||||
f'port="{port}"',
|
||||
f'protocol="{protocol}"',
|
||||
'action.resumeRetryCount="-1"',
|
||||
f'action.resumeInterval="{timeout}"',
|
||||
'template="awx"',
|
||||
] + queue_options
|
||||
params = ' '.join(params)
|
||||
parts.append(f'action({params})')
|
||||
|
||||
else:
|
||||
parts.append('action(type="omfile" file="/dev/null")') # rsyslog needs *at least* one valid action to start
|
||||
tmpl = '\n'.join(parts)
|
||||
|
||||
@@ -161,7 +161,7 @@ class SmartFilter(object):
|
||||
else:
|
||||
# detect loops and restrict access to sensitive fields
|
||||
# this import is intentional here to avoid a circular import
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from ansible_base.filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
||||
|
||||
FieldLookupBackend().get_field_from_lookup(Host, k)
|
||||
kwargs[k] = v
|
||||
|
||||
@@ -199,6 +199,8 @@ class Licenser(object):
|
||||
license['support_level'] = attr.get('value')
|
||||
elif attr.get('name') == 'usage':
|
||||
license['usage'] = attr.get('value')
|
||||
elif attr.get('name') == 'ph_product_name' and attr.get('value') == 'RHEL Developer':
|
||||
license['license_type'] = 'developer'
|
||||
|
||||
if not license:
|
||||
logger.error("No valid subscriptions found in manifest")
|
||||
@@ -322,7 +324,9 @@ class Licenser(object):
|
||||
def generate_license_options_from_entitlements(self, json):
|
||||
from dateutil.parser import parse
|
||||
|
||||
ValidSub = collections.namedtuple('ValidSub', 'sku name support_level end_date trial quantity pool_id satellite subscription_id account_number usage')
|
||||
ValidSub = collections.namedtuple(
|
||||
'ValidSub', 'sku name support_level end_date trial developer_license quantity pool_id satellite subscription_id account_number usage'
|
||||
)
|
||||
valid_subs = []
|
||||
for sub in json:
|
||||
satellite = sub.get('satellite')
|
||||
@@ -350,6 +354,7 @@ class Licenser(object):
|
||||
|
||||
sku = sub['productId']
|
||||
trial = sku.startswith('S') # i.e.,, SER/SVC
|
||||
developer_license = False
|
||||
support_level = ''
|
||||
usage = ''
|
||||
pool_id = sub['id']
|
||||
@@ -364,9 +369,24 @@ class Licenser(object):
|
||||
support_level = attr.get('value')
|
||||
elif attr.get('name') == 'usage':
|
||||
usage = attr.get('value')
|
||||
elif attr.get('name') == 'ph_product_name' and attr.get('value') == 'RHEL Developer':
|
||||
developer_license = True
|
||||
|
||||
valid_subs.append(
|
||||
ValidSub(sku, sub['productName'], support_level, end_date, trial, quantity, pool_id, satellite, subscription_id, account_number, usage)
|
||||
ValidSub(
|
||||
sku,
|
||||
sub['productName'],
|
||||
support_level,
|
||||
end_date,
|
||||
trial,
|
||||
developer_license,
|
||||
quantity,
|
||||
pool_id,
|
||||
satellite,
|
||||
subscription_id,
|
||||
account_number,
|
||||
usage,
|
||||
)
|
||||
)
|
||||
|
||||
if valid_subs:
|
||||
@@ -381,6 +401,8 @@ class Licenser(object):
|
||||
if sub.trial:
|
||||
license._attrs['trial'] = True
|
||||
license._attrs['license_type'] = 'trial'
|
||||
if sub.developer_license:
|
||||
license._attrs['license_type'] = 'developer'
|
||||
license._attrs['instance_count'] = min(MAX_INSTANCES, license._attrs['instance_count'])
|
||||
human_instances = license._attrs['instance_count']
|
||||
if human_instances == MAX_INSTANCES:
|
||||
|
||||
@@ -3,6 +3,8 @@ import logging
|
||||
import asyncio
|
||||
from typing import Dict
|
||||
|
||||
import ipaddress
|
||||
|
||||
import aiohttp
|
||||
from aiohttp import client_exceptions
|
||||
import aioredis
|
||||
@@ -71,7 +73,16 @@ class WebsocketRelayConnection:
|
||||
if not self.channel_layer:
|
||||
self.channel_layer = get_channel_layer()
|
||||
|
||||
uri = f"{self.protocol}://{self.remote_host}:{self.remote_port}/websocket/relay/"
|
||||
# figure out if what we have is an ipaddress, IPv6 Addresses must have brackets added for uri
|
||||
uri_hostname = self.remote_host
|
||||
try:
|
||||
# Throws ValueError if self.remote_host is a hostname like example.com, not an IPv4 or IPv6 ip address
|
||||
if isinstance(ipaddress.ip_address(uri_hostname), ipaddress.IPv6Address):
|
||||
uri_hostname = f"[{uri_hostname}]"
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
uri = f"{self.protocol}://{uri_hostname}:{self.remote_port}/websocket/relay/"
|
||||
timeout = aiohttp.ClientTimeout(total=10)
|
||||
|
||||
secret_val = WebsocketSecretAuthHelper.construct_secret()
|
||||
@@ -216,7 +227,8 @@ class WebSocketRelayManager(object):
|
||||
continue
|
||||
try:
|
||||
if not notif.payload or notif.channel != "web_ws_heartbeat":
|
||||
return
|
||||
logger.warning(f"Unexpected channel or missing payload. {notif.channel}, {notif.payload}")
|
||||
continue
|
||||
|
||||
try:
|
||||
payload = json.loads(notif.payload)
|
||||
@@ -224,13 +236,15 @@ class WebSocketRelayManager(object):
|
||||
logmsg = "Failed to decode message from pg_notify channel `web_ws_heartbeat`"
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logmsg = "{} {}".format(logmsg, payload)
|
||||
logger.warning(logmsg)
|
||||
return
|
||||
logger.warning(logmsg)
|
||||
continue
|
||||
|
||||
# Skip if the message comes from the same host we are running on
|
||||
# In this case, we'll be sharing a redis, no need to relay.
|
||||
if payload.get("hostname") == self.local_hostname:
|
||||
return
|
||||
hostname = payload.get("hostname")
|
||||
logger.debug("Received a heartbeat request for {hostname}. Skipping as we use redis for local host.")
|
||||
continue
|
||||
|
||||
action = payload.get("action")
|
||||
|
||||
@@ -239,7 +253,7 @@ class WebSocketRelayManager(object):
|
||||
ip = payload.get("ip") or hostname # try back to hostname if ip isn't supplied
|
||||
if ip is None:
|
||||
logger.warning(f"Received invalid {action} ws_heartbeat, missing hostname and ip: {payload}")
|
||||
return
|
||||
continue
|
||||
logger.debug(f"Web host {hostname} ({ip}) {action} heartbeat received.")
|
||||
|
||||
if action == "online":
|
||||
|
||||
@@ -11,6 +11,7 @@ from datetime import timedelta
|
||||
|
||||
# python-ldap
|
||||
import ldap
|
||||
from split_settings.tools import include
|
||||
|
||||
|
||||
DEBUG = True
|
||||
@@ -131,6 +132,9 @@ BULK_JOB_MAX_LAUNCH = 100
|
||||
# Maximum number of host that can be created in 1 bulk host create
|
||||
BULK_HOST_MAX_CREATE = 100
|
||||
|
||||
# Maximum number of host that can be deleted in 1 bulk host delete
|
||||
BULK_HOST_MAX_DELETE = 250
|
||||
|
||||
SITE_ID = 1
|
||||
|
||||
# Make this unique, and don't share it with anybody.
|
||||
@@ -336,6 +340,7 @@ INSTALLED_APPS = [
|
||||
'awx.ui',
|
||||
'awx.sso',
|
||||
'solo',
|
||||
'ansible_base',
|
||||
]
|
||||
|
||||
INTERNAL_IPS = ('127.0.0.1',)
|
||||
@@ -350,12 +355,6 @@ REST_FRAMEWORK = {
|
||||
'awx.api.authentication.LoggedBasicAuthentication',
|
||||
),
|
||||
'DEFAULT_PERMISSION_CLASSES': ('awx.api.permissions.ModelAccessPermission',),
|
||||
'DEFAULT_FILTER_BACKENDS': (
|
||||
'awx.api.filters.TypeFilterBackend',
|
||||
'awx.api.filters.FieldLookupBackend',
|
||||
'rest_framework.filters.SearchFilter',
|
||||
'awx.api.filters.OrderByBackend',
|
||||
),
|
||||
'DEFAULT_PARSER_CLASSES': ('awx.api.parsers.JSONParser',),
|
||||
'DEFAULT_RENDERER_CLASSES': ('awx.api.renderers.DefaultJSONRenderer', 'awx.api.renderers.BrowsableAPIRenderer'),
|
||||
'DEFAULT_METADATA_CLASS': 'awx.api.metadata.Metadata',
|
||||
@@ -796,7 +795,7 @@ LOG_AGGREGATOR_ENABLED = False
|
||||
LOG_AGGREGATOR_TCP_TIMEOUT = 5
|
||||
LOG_AGGREGATOR_VERIFY_CERT = True
|
||||
LOG_AGGREGATOR_LEVEL = 'INFO'
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_GB = 1 # Main queue
|
||||
LOG_AGGREGATOR_ACTION_QUEUE_SIZE = 131072
|
||||
LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB = 1 # Action queue
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_PATH = '/var/lib/awx'
|
||||
LOG_AGGREGATOR_RSYSLOGD_DEBUG = False
|
||||
@@ -1063,3 +1062,12 @@ CLEANUP_HOST_METRICS_HARD_THRESHOLD = 36 # months
|
||||
# Host metric summary monthly task - last time of run
|
||||
HOST_METRIC_SUMMARY_TASK_LAST_TS = None
|
||||
HOST_METRIC_SUMMARY_TASK_INTERVAL = 7 # days
|
||||
|
||||
|
||||
# django-ansible-base
|
||||
ANSIBLE_BASE_FEATURES = {'AUTHENTICATION': False, 'SWAGGER': False, 'FILTERING': True}
|
||||
|
||||
from ansible_base import settings # noqa: E402
|
||||
|
||||
settings_file = os.path.join(os.path.dirname(settings.__file__), 'dynamic_settings.py')
|
||||
include(settings_file)
|
||||
|
||||
@@ -1341,7 +1341,6 @@ register(
|
||||
'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
required=True,
|
||||
validators=[validate_certificate],
|
||||
label=_('SAML Service Provider Public Certificate'),
|
||||
help_text=_('Create a keypair to use as a service provider (SP) and include the certificate content here.'),
|
||||
@@ -1353,7 +1352,6 @@ register(
|
||||
'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
required=True,
|
||||
validators=[validate_private_key],
|
||||
label=_('SAML Service Provider Private Key'),
|
||||
help_text=_('Create a keypair to use as a service provider (SP) and include the private key content here.'),
|
||||
@@ -1365,7 +1363,6 @@ register(
|
||||
register(
|
||||
'SOCIAL_AUTH_SAML_ORG_INFO',
|
||||
field_class=SAMLOrgInfoField,
|
||||
required=True,
|
||||
label=_('SAML Service Provider Organization Info'),
|
||||
help_text=_('Provide the URL, display name, and the name of your app. Refer to the documentation for example syntax.'),
|
||||
category=_('SAML'),
|
||||
@@ -1379,7 +1376,6 @@ register(
|
||||
'SOCIAL_AUTH_SAML_TECHNICAL_CONTACT',
|
||||
field_class=SAMLContactField,
|
||||
allow_blank=True,
|
||||
required=True,
|
||||
label=_('SAML Service Provider Technical Contact'),
|
||||
help_text=_('Provide the name and email address of the technical contact for your service provider. Refer to the documentation for example syntax.'),
|
||||
category=_('SAML'),
|
||||
@@ -1391,7 +1387,6 @@ register(
|
||||
'SOCIAL_AUTH_SAML_SUPPORT_CONTACT',
|
||||
field_class=SAMLContactField,
|
||||
allow_blank=True,
|
||||
required=True,
|
||||
label=_('SAML Service Provider Support Contact'),
|
||||
help_text=_('Provide the name and email address of the support contact for your service provider. Refer to the documentation for example syntax.'),
|
||||
category=_('SAML'),
|
||||
|
||||
@@ -20,6 +20,7 @@ import UnsupportedScheduleForm from './UnsupportedScheduleForm';
|
||||
import parseRuleObj, { UnsupportedRRuleError } from './parseRuleObj';
|
||||
import buildRuleObj from './buildRuleObj';
|
||||
import buildRuleSet from './buildRuleSet';
|
||||
import mergeArraysByCredentialType from './mergeArraysByCredentialType';
|
||||
|
||||
const NUM_DAYS_PER_FREQUENCY = {
|
||||
week: 7,
|
||||
@@ -350,6 +351,12 @@ function ScheduleForm({
|
||||
startDate: currentDate,
|
||||
startTime: time,
|
||||
timezone: schedule.timezone || now.zoneName,
|
||||
credentials: mergeArraysByCredentialType(
|
||||
resourceDefaultCredentials,
|
||||
credentials
|
||||
),
|
||||
labels: originalLabels.current,
|
||||
instance_groups: originalInstanceGroups.current,
|
||||
};
|
||||
|
||||
if (hasDaysToKeepField) {
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
export default function mergeArraysByCredentialType(
|
||||
defaultCredentials = [],
|
||||
overrides = []
|
||||
) {
|
||||
const mergedArray = [...defaultCredentials];
|
||||
|
||||
overrides.forEach((override) => {
|
||||
const index = mergedArray.findIndex(
|
||||
(defaultCred) => defaultCred.credential_type === override.credential_type
|
||||
);
|
||||
if (index !== -1) {
|
||||
mergedArray.splice(index, 1);
|
||||
}
|
||||
mergedArray.push(override);
|
||||
});
|
||||
|
||||
return mergedArray;
|
||||
}
|
||||
@@ -29,7 +29,7 @@ SettingsAPI.readCategory.mockResolvedValue({
|
||||
LOG_AGGREGATOR_TCP_TIMEOUT: 5,
|
||||
LOG_AGGREGATOR_VERIFY_CERT: true,
|
||||
LOG_AGGREGATOR_LEVEL: 'INFO',
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_GB: 1,
|
||||
LOG_AGGREGATOR_ACTION_QUEUE_SIZE: 131072,
|
||||
LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB: 1,
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_PATH: '/var/lib/awx',
|
||||
LOG_AGGREGATOR_RSYSLOGD_DEBUG: false,
|
||||
|
||||
@@ -119,7 +119,7 @@ function LoggingEdit() {
|
||||
...logging.LOG_AGGREGATOR_ENABLED,
|
||||
help_text: (
|
||||
<>
|
||||
{logging.LOG_AGGREGATOR_ENABLED.help_text}
|
||||
{logging.LOG_AGGREGATOR_ENABLED?.help_text}
|
||||
{!formik.values.LOG_AGGREGATOR_ENABLED &&
|
||||
(!formik.values.LOG_AGGREGATOR_HOST ||
|
||||
!formik.values.LOG_AGGREGATOR_TYPE) && (
|
||||
|
||||
@@ -31,7 +31,7 @@ const mockSettings = {
|
||||
LOG_AGGREGATOR_TCP_TIMEOUT: 123,
|
||||
LOG_AGGREGATOR_VERIFY_CERT: true,
|
||||
LOG_AGGREGATOR_LEVEL: 'ERROR',
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_GB: 1,
|
||||
LOG_AGGREGATOR_ACTION_QUEUE_SIZE: 131072,
|
||||
LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB: 1,
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_PATH: '/var/lib/awx',
|
||||
LOG_AGGREGATOR_RSYSLOGD_DEBUG: false,
|
||||
|
||||
@@ -659,21 +659,21 @@
|
||||
]
|
||||
]
|
||||
},
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_GB": {
|
||||
"LOG_AGGREGATOR_ACTION_QUEUE_SIZE": {
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"label": "Maximum disk persistence for external log aggregation (in GB)",
|
||||
"help_text": "Amount of data to store (in gigabytes) during an outage of the external log aggregator (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting for main_queue. Notably, this is used for the rsyslogd main queue (for input messages).",
|
||||
"label": "Maximum number of messages that can be stored in the log action queue",
|
||||
"help_text": "Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5).",
|
||||
"min_value": 1,
|
||||
"category": "Logging",
|
||||
"category_slug": "logging",
|
||||
"default": 1
|
||||
"default": 131072
|
||||
},
|
||||
"LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB": {
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"label": "Maximum disk persistence for rsyslogd action queuing (in GB)",
|
||||
"help_text": "Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). Like LOG_AGGREGATOR_MAX_DISK_USAGE_GB, it stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.",
|
||||
"help_text": "Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.",
|
||||
"min_value": 1,
|
||||
"category": "Logging",
|
||||
"category_slug": "logging",
|
||||
@@ -5016,10 +5016,10 @@
|
||||
]
|
||||
]
|
||||
},
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_GB": {
|
||||
"LOG_AGGREGATOR_ACTION_QUEUE_SIZE": {
|
||||
"type": "integer",
|
||||
"label": "Maximum disk persistence for external log aggregation (in GB)",
|
||||
"help_text": "Amount of data to store (in gigabytes) during an outage of the external log aggregator (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting for main_queue. Notably, this is used for the rsyslogd main queue (for input messages).",
|
||||
"label": "Maximum number of messages that can be stored in the log action queue",
|
||||
"help_text": "Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5).",
|
||||
"min_value": 1,
|
||||
"category": "Logging",
|
||||
"category_slug": "logging",
|
||||
@@ -5028,7 +5028,7 @@
|
||||
"LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB": {
|
||||
"type": "integer",
|
||||
"label": "Maximum disk persistence for rsyslogd action queuing (in GB)",
|
||||
"help_text": "Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). Like LOG_AGGREGATOR_MAX_DISK_USAGE_GB, it stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.",
|
||||
"help_text": "Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.",
|
||||
"min_value": 1,
|
||||
"category": "Logging",
|
||||
"category_slug": "logging",
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
"LOG_AGGREGATOR_TCP_TIMEOUT": 5,
|
||||
"LOG_AGGREGATOR_VERIFY_CERT": true,
|
||||
"LOG_AGGREGATOR_LEVEL": "INFO",
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_GB": 1,
|
||||
"LOG_AGGREGATOR_ACTION_QUEUE_SIZE": 131072,
|
||||
"LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB": 1,
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_PATH": "/var/lib/awx",
|
||||
"LOG_AGGREGATOR_RSYSLOGD_DEBUG": false,
|
||||
@@ -548,4 +548,4 @@
|
||||
"adj_list": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"LOG_AGGREGATOR_TCP_TIMEOUT": 5,
|
||||
"LOG_AGGREGATOR_VERIFY_CERT": true,
|
||||
"LOG_AGGREGATOR_LEVEL": "INFO",
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_GB": 1,
|
||||
"LOG_AGGREGATOR_ACTION_QUEUE_SIZE": 131072,
|
||||
"LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB": 1,
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_PATH": "/var/lib/awx",
|
||||
"LOG_AGGREGATOR_RSYSLOGD_DEBUG": false,
|
||||
|
||||
@@ -8,6 +8,7 @@ action_groups:
|
||||
- application
|
||||
- bulk_job_launch
|
||||
- bulk_host_create
|
||||
- bulk_host_delete
|
||||
- controller_meta
|
||||
- credential_input_source
|
||||
- credential
|
||||
|
||||
@@ -980,6 +980,15 @@ class ControllerAPIModule(ControllerModule):
|
||||
def create_or_update_if_needed(
|
||||
self, existing_item, new_item, endpoint=None, item_type='unknown', on_create=None, on_update=None, auto_exit=True, associations=None
|
||||
):
|
||||
# Remove boolean values of certain specific types
|
||||
# this is needed so that boolean fields will not get a false value when not provided
|
||||
for key in list(new_item.keys()):
|
||||
if key in self.argument_spec:
|
||||
param_spec = self.argument_spec[key]
|
||||
if 'type' in param_spec and param_spec['type'] == 'bool':
|
||||
if new_item[key] is None:
|
||||
new_item.pop(key)
|
||||
|
||||
if existing_item:
|
||||
return self.update_if_needed(existing_item, new_item, on_update=on_update, auto_exit=auto_exit, associations=associations)
|
||||
else:
|
||||
|
||||
@@ -118,6 +118,7 @@ status:
|
||||
'''
|
||||
|
||||
from ..module_utils.controller_api import ControllerAPIModule
|
||||
import json
|
||||
|
||||
|
||||
def main():
|
||||
@@ -161,7 +162,11 @@ def main():
|
||||
}
|
||||
for arg in ['job_type', 'limit', 'forks', 'verbosity', 'extra_vars', 'become_enabled', 'diff_mode']:
|
||||
if module.params.get(arg):
|
||||
post_data[arg] = module.params.get(arg)
|
||||
# extra_var can receive a dict or a string, if a dict covert it to a string
|
||||
if arg == 'extra_vars' and type(module.params.get(arg)) is not str:
|
||||
post_data[arg] = json.dumps(module.params.get(arg))
|
||||
else:
|
||||
post_data[arg] = module.params.get(arg)
|
||||
|
||||
# Attempt to look up the related items the user specified (these will fail the module if not found)
|
||||
post_data['inventory'] = module.resolve_name_to_id('inventories', inventory)
|
||||
|
||||
65
awx_collection/plugins/modules/bulk_host_delete.py
Normal file
65
awx_collection/plugins/modules/bulk_host_delete.py
Normal file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bulk_host_delete
|
||||
author: "Avi Layani (@Avilir)"
|
||||
short_description: Bulk host delete in Automation Platform Controller
|
||||
description:
|
||||
- Single-request bulk host deletion in Automation Platform Controller.
|
||||
- Provides a way to delete many hosts at once from inventories in Controller.
|
||||
options:
|
||||
hosts:
|
||||
description:
|
||||
- List of hosts id's to delete from inventory.
|
||||
required: True
|
||||
type: list
|
||||
elements: int
|
||||
extends_documentation_fragment: awx.awx.auth
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Bulk host delete
|
||||
bulk_host_delete:
|
||||
hosts:
|
||||
- 1
|
||||
- 2
|
||||
'''
|
||||
|
||||
from ..module_utils.controller_api import ControllerAPIModule
|
||||
|
||||
|
||||
def main():
|
||||
# Any additional arguments that are not fields of the item can be added here
|
||||
argument_spec = dict(
|
||||
hosts=dict(required=True, type='list', elements='int'),
|
||||
)
|
||||
|
||||
# Create a module for ourselves
|
||||
module = ControllerAPIModule(argument_spec=argument_spec)
|
||||
|
||||
# Extract our parameters
|
||||
hosts = module.params.get('hosts')
|
||||
|
||||
# Delete the hosts
|
||||
result = module.post_endpoint("bulk/host_delete", data={"hosts": hosts})
|
||||
|
||||
if result['status_code'] != 201:
|
||||
module.fail_json(msg="Failed to delete hosts, see response for details", response=result)
|
||||
|
||||
module.json_output['changed'] = True
|
||||
|
||||
module.exit_json(**module.json_output)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -58,6 +58,7 @@ options:
|
||||
Insights, Machine, Microsoft Azure Key Vault, Microsoft Azure Resource Manager, Network, OpenShift or Kubernetes API
|
||||
Bearer Token, OpenStack, Red Hat Ansible Automation Platform, Red Hat Satellite 6, Red Hat Virtualization, Source Control,
|
||||
Thycotic DevOps Secrets Vault, Thycotic Secret Server, Vault, VMware vCenter, or a custom credential type
|
||||
required: True
|
||||
type: str
|
||||
inputs:
|
||||
description:
|
||||
@@ -214,7 +215,7 @@ def main():
|
||||
copy_from=dict(),
|
||||
description=dict(),
|
||||
organization=dict(),
|
||||
credential_type=dict(),
|
||||
credential_type=dict(required=True),
|
||||
inputs=dict(type='dict', no_log=True),
|
||||
update_secrets=dict(type='bool', default=True, no_log=False),
|
||||
user=dict(),
|
||||
|
||||
@@ -115,7 +115,7 @@ EXAMPLES = '''
|
||||
- name: Export a job template named "My Template" and all Credentials
|
||||
export:
|
||||
job_templates: "My Template"
|
||||
credential: 'all'
|
||||
credentials: 'all'
|
||||
|
||||
- name: Export a list of inventories
|
||||
export:
|
||||
|
||||
@@ -18,30 +18,55 @@ import pytest
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
from awx.main.tests.functional.conftest import _request
|
||||
from awx.main.models import Organization, Project, Inventory, JobTemplate, Credential, CredentialType, ExecutionEnvironment, UnifiedJob
|
||||
from awx.main.tests.functional.conftest import credentialtype_scm, credentialtype_ssh # noqa: F401; pylint: disable=unused-variable
|
||||
from awx.main.models import (
|
||||
Organization,
|
||||
Project,
|
||||
Inventory,
|
||||
JobTemplate,
|
||||
Credential,
|
||||
CredentialType,
|
||||
ExecutionEnvironment,
|
||||
UnifiedJob,
|
||||
WorkflowJobTemplate,
|
||||
NotificationTemplate,
|
||||
Schedule,
|
||||
)
|
||||
|
||||
from django.db import transaction
|
||||
|
||||
try:
|
||||
import tower_cli # noqa
|
||||
|
||||
HAS_TOWER_CLI = True
|
||||
except ImportError:
|
||||
HAS_TOWER_CLI = False
|
||||
|
||||
try:
|
||||
# Because awxkit will be a directory at the root of this makefile and we are using python3, import awxkit will work even if its not installed.
|
||||
# However, awxkit will not contain api whih causes a stack failure down on line 170 when we try to mock it.
|
||||
# So here we are importing awxkit.api to prevent that. Then you only get an error on tests for awxkit functionality.
|
||||
import awxkit.api # noqa
|
||||
|
||||
HAS_AWX_KIT = True
|
||||
except ImportError:
|
||||
HAS_AWX_KIT = False
|
||||
|
||||
HAS_TOWER_CLI = False
|
||||
HAS_AWX_KIT = False
|
||||
logger = logging.getLogger('awx.main.tests')
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def awxkit_path_set(monkeypatch):
|
||||
"""Monkey patch sys.path, insert awxkit source code so that
|
||||
the package does not need to be installed.
|
||||
"""
|
||||
base_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'awxkit'))
|
||||
monkeypatch.syspath_prepend(base_folder)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def import_awxkit():
|
||||
global HAS_TOWER_CLI
|
||||
global HAS_AWX_KIT
|
||||
try:
|
||||
import tower_cli # noqa
|
||||
HAS_TOWER_CLI = True
|
||||
except ImportError:
|
||||
HAS_TOWER_CLI = False
|
||||
|
||||
try:
|
||||
import awxkit # noqa
|
||||
HAS_AWX_KIT = True
|
||||
except ImportError:
|
||||
HAS_AWX_KIT = False
|
||||
|
||||
|
||||
def sanitize_dict(din):
|
||||
"""Sanitize Django response data to purge it of internal types
|
||||
so it may be used to cast a requests response object
|
||||
@@ -123,7 +148,7 @@ def run_module(request, collection_import):
|
||||
sanitize_dict(py_data)
|
||||
resp._content = bytes(json.dumps(django_response.data), encoding='utf8')
|
||||
resp.status_code = django_response.status_code
|
||||
resp.headers = {'X-API-Product-Name': 'AWX', 'X-API-Product-Version': '0.0.1-devel'}
|
||||
resp.headers = dict(django_response.headers)
|
||||
|
||||
if request.config.getoption('verbose') > 0:
|
||||
logger.info('%s %s by %s, code:%s', method, '/api/' + url.split('/api/')[1], request_user.username, resp.status_code)
|
||||
@@ -236,10 +261,8 @@ def job_template(project, inventory):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def machine_credential(organization):
|
||||
ssh_type = CredentialType.defaults['ssh']()
|
||||
ssh_type.save()
|
||||
return Credential.objects.create(credential_type=ssh_type, name='machine-cred', inputs={'username': 'test_user', 'password': 'pas4word'})
|
||||
def machine_credential(credentialtype_ssh, organization): # noqa: F811
|
||||
return Credential.objects.create(credential_type=credentialtype_ssh, name='machine-cred', inputs={'username': 'test_user', 'password': 'pas4word'})
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -253,9 +276,7 @@ def vault_credential(organization):
|
||||
def kube_credential():
|
||||
ct = CredentialType.defaults['kubernetes_bearer_token']()
|
||||
ct.save()
|
||||
return Credential.objects.create(
|
||||
credential_type=ct, name='kube-cred', inputs={'host': 'my.cluster', 'bearer_token': 'my-token', 'verify_ssl': False}
|
||||
)
|
||||
return Credential.objects.create(credential_type=ct, name='kube-cred', inputs={'host': 'my.cluster', 'bearer_token': 'my-token', 'verify_ssl': False})
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -288,3 +309,42 @@ def mock_has_unpartitioned_events():
|
||||
# We mock this out to circumvent the migration query.
|
||||
with mock.patch.object(UnifiedJob, 'has_unpartitioned_events', new=False) as _fixture:
|
||||
yield _fixture
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_job_template(organization, inventory):
|
||||
return WorkflowJobTemplate.objects.create(name='test-workflow_job_template', organization=organization, inventory=inventory)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def notification_template(organization):
|
||||
return NotificationTemplate.objects.create(
|
||||
name='test-notification_template',
|
||||
organization=organization,
|
||||
notification_type="webhook",
|
||||
notification_configuration=dict(
|
||||
url="http://localhost",
|
||||
username="",
|
||||
password="",
|
||||
headers={
|
||||
"Test": "Header",
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def scm_credential(credentialtype_scm, organization): # noqa: F811
|
||||
return Credential.objects.create(
|
||||
credential_type=credentialtype_scm, name='scm-cred', inputs={'username': 'optimus', 'password': 'prime'}, organization=organization
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def rrule():
|
||||
return 'DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1'
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def schedule(job_template, rrule):
|
||||
return Schedule.objects.create(unified_job_template=job_template, name='test-sched', rrule=rrule)
|
||||
|
||||
@@ -45,3 +45,28 @@ def test_bulk_host_create(run_module, admin_user, inventory):
|
||||
resp_hosts = inventory.hosts.all().values_list('name', flat=True)
|
||||
for h in hosts:
|
||||
assert h['name'] in resp_hosts
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_host_delete(run_module, admin_user, inventory):
|
||||
hosts = [dict(name="127.0.0.1"), dict(name="foo.dns.org")]
|
||||
result = run_module(
|
||||
'bulk_host_create',
|
||||
{
|
||||
'inventory': inventory.name,
|
||||
'hosts': hosts,
|
||||
},
|
||||
admin_user,
|
||||
)
|
||||
assert not result.get('failed', False), result.get('msg', result)
|
||||
assert result.get('changed'), result
|
||||
resp_hosts_ids = list(inventory.hosts.all().values_list('id', flat=True))
|
||||
result = run_module(
|
||||
'bulk_host_delete',
|
||||
{
|
||||
'hosts': resp_hosts_ids,
|
||||
},
|
||||
admin_user,
|
||||
)
|
||||
assert not result.get('failed', False), result.get('msg', result)
|
||||
assert result.get('changed'), result
|
||||
|
||||
@@ -50,6 +50,7 @@ no_endpoint_for_module = [
|
||||
extra_endpoints = {
|
||||
'bulk_job_launch': '/api/v2/bulk/job_launch/',
|
||||
'bulk_host_create': '/api/v2/bulk/host_create/',
|
||||
'bulk_host_delete': '/api/v2/bulk/host_delete/',
|
||||
}
|
||||
|
||||
# Global module parameters we can ignore
|
||||
|
||||
154
awx_collection/test/awx/test_export.py
Normal file
154
awx_collection/test/awx/test_export.py
Normal file
@@ -0,0 +1,154 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
import pytest
|
||||
|
||||
from awx.main.models.execution_environments import ExecutionEnvironment
|
||||
from awx.main.models.jobs import JobTemplate
|
||||
|
||||
from awx.main.tests.functional.conftest import user, system_auditor # noqa: F401; pylint: disable=unused-import
|
||||
|
||||
|
||||
ASSETS = set([
|
||||
"users",
|
||||
"organizations",
|
||||
"teams",
|
||||
"credential_types",
|
||||
"credentials",
|
||||
"notification_templates",
|
||||
"projects",
|
||||
"inventory",
|
||||
"inventory_sources",
|
||||
"job_templates",
|
||||
"workflow_job_templates",
|
||||
"execution_environments",
|
||||
"applications",
|
||||
"schedules",
|
||||
])
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def job_template(project, inventory, organization, machine_credential):
|
||||
jt = JobTemplate.objects.create(name='test-jt', project=project, inventory=inventory, organization=organization, playbook='helloworld.yml')
|
||||
jt.credentials.add(machine_credential)
|
||||
jt.save()
|
||||
return jt
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def execution_environment(organization):
|
||||
return ExecutionEnvironment.objects.create(name="test-ee", description="test-ee", managed=False, organization=organization)
|
||||
|
||||
|
||||
def find_by(result, name, key, value):
|
||||
for c in result[name]:
|
||||
if c[key] == value:
|
||||
return c
|
||||
values = [c.get(key, None) for c in result[name]]
|
||||
raise ValueError(f"Failed to find assets['{name}'][{key}] = '{value}' valid values are {values}")
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_export(run_module, admin_user):
|
||||
"""
|
||||
There should be nothing to export EXCEPT the admin user.
|
||||
"""
|
||||
result = run_module('export', dict(all=True), admin_user)
|
||||
assert not result.get('failed', False), result.get('msg', result)
|
||||
assets = result['assets']
|
||||
|
||||
assert set(result['assets'].keys()) == ASSETS
|
||||
|
||||
u = find_by(assets, 'users', 'username', 'admin')
|
||||
assert u['is_superuser'] is True
|
||||
|
||||
all_assets_except_users = {k: v for k, v in assets.items() if k != 'users'}
|
||||
|
||||
for k, v in all_assets_except_users.items():
|
||||
assert v == [], f"Expected resource {k} to be empty. Instead it is {v}"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_export_simple(
|
||||
run_module,
|
||||
organization,
|
||||
project,
|
||||
inventory,
|
||||
job_template,
|
||||
scm_credential,
|
||||
machine_credential,
|
||||
workflow_job_template,
|
||||
execution_environment,
|
||||
notification_template,
|
||||
rrule,
|
||||
schedule,
|
||||
admin_user,
|
||||
):
|
||||
"""
|
||||
TODO: Ensure there aren't _more_ results in each resource than we expect
|
||||
"""
|
||||
result = run_module('export', dict(all=True), admin_user)
|
||||
assert not result.get('failed', False), result.get('msg', result)
|
||||
assets = result['assets']
|
||||
|
||||
u = find_by(assets, 'users', 'username', 'admin')
|
||||
assert u['is_superuser'] is True
|
||||
|
||||
find_by(assets, 'organizations', 'name', 'Default')
|
||||
|
||||
r = find_by(assets, 'credentials', 'name', 'scm-cred')
|
||||
assert r['credential_type']['kind'] == 'scm'
|
||||
assert r['credential_type']['name'] == 'Source Control'
|
||||
|
||||
r = find_by(assets, 'credentials', 'name', 'machine-cred')
|
||||
assert r['credential_type']['kind'] == 'ssh'
|
||||
assert r['credential_type']['name'] == 'Machine'
|
||||
|
||||
r = find_by(assets, 'job_templates', 'name', 'test-jt')
|
||||
assert r['natural_key']['organization']['name'] == 'Default'
|
||||
assert r['inventory']['name'] == 'test-inv'
|
||||
assert r['project']['name'] == 'test-proj'
|
||||
|
||||
find_by(r['related'], 'credentials', 'name', 'machine-cred')
|
||||
|
||||
r = find_by(assets, 'inventory', 'name', 'test-inv')
|
||||
assert r['organization']['name'] == 'Default'
|
||||
|
||||
r = find_by(assets, 'projects', 'name', 'test-proj')
|
||||
assert r['organization']['name'] == 'Default'
|
||||
|
||||
r = find_by(assets, 'workflow_job_templates', 'name', 'test-workflow_job_template')
|
||||
assert r['natural_key']['organization']['name'] == 'Default'
|
||||
assert r['inventory']['name'] == 'test-inv'
|
||||
|
||||
r = find_by(assets, 'execution_environments', 'name', 'test-ee')
|
||||
assert r['organization']['name'] == 'Default'
|
||||
|
||||
r = find_by(assets, 'schedules', 'name', 'test-sched')
|
||||
assert r['rrule'] == rrule
|
||||
|
||||
r = find_by(assets, 'notification_templates', 'name', 'test-notification_template')
|
||||
assert r['organization']['name'] == 'Default'
|
||||
assert r['notification_configuration']['url'] == 'http://localhost'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_export_system_auditor(run_module, schedule, system_auditor): # noqa: F811
|
||||
"""
|
||||
This test illustrates that deficiency of export when ran as non-root user (i.e. system auditor).
|
||||
The OPTIONS endpoint does NOT return POST for a system auditor. This is bad for the export code
|
||||
because it relies on crawling the OPTIONS POST response to determine the fields to export.
|
||||
"""
|
||||
result = run_module('export', dict(all=True), system_auditor)
|
||||
assert result.get('failed', False), result.get('msg', result)
|
||||
|
||||
assert 'Failed to export assets substring not found' in result['msg'], (
|
||||
'If you found this error then you have probably fixed a feature! The export code attempts to assertain the POST fields from the `description` field,'
|
||||
' but both the API side and the client inference code are lacking.'
|
||||
)
|
||||
|
||||
# r = result['assets']['schedules'][0]
|
||||
# assert r['natural_key']['name'] == 'test-sched'
|
||||
|
||||
# assert 'rrule' not in r, 'If you found this error then you have probably fixed a feature! We WANT rrule to be found in the export schedule payload.'
|
||||
@@ -72,6 +72,21 @@
|
||||
- "result is changed"
|
||||
- "result.status == 'successful'"
|
||||
|
||||
- name: Launch an Ad Hoc Command with extra_vars
|
||||
ad_hoc_command:
|
||||
inventory: "Demo Inventory"
|
||||
credential: "{{ ssh_cred_name }}"
|
||||
module_name: "ping"
|
||||
extra_vars:
|
||||
var1: "test var"
|
||||
wait: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result is changed"
|
||||
- "result.status == 'successful'"
|
||||
|
||||
- name: Launch an Ad Hoc Command with Execution Environment specified
|
||||
ad_hoc_command:
|
||||
inventory: "Demo Inventory"
|
||||
|
||||
@@ -0,0 +1,80 @@
|
||||
---
|
||||
- name: "Generate a random string for test"
|
||||
set_fact:
|
||||
test_id: "{{ lookup('password', '/dev/null chars=ascii_letters length=16') }}"
|
||||
when: "test_id is not defined"
|
||||
|
||||
- name: "Generate a unique name"
|
||||
set_fact:
|
||||
bulk_inv_name: "AWX-Collection-tests-bulk_host_create-{{ test_id }}"
|
||||
|
||||
- name: "Get our collection package"
|
||||
controller_meta:
|
||||
register: "controller_meta"
|
||||
|
||||
- name: "Generate the name of our plugin"
|
||||
set_fact:
|
||||
plugin_name: "{{ controller_meta.prefix }}.controller_api"
|
||||
|
||||
- name: "Create an inventory"
|
||||
inventory:
|
||||
name: "{{ bulk_inv_name }}"
|
||||
organization: "Default"
|
||||
state: "present"
|
||||
register: "inventory_result"
|
||||
|
||||
- name: "Bulk Host Create"
|
||||
bulk_host_create:
|
||||
hosts:
|
||||
- name: "123.456.789.123"
|
||||
description: "myhost1"
|
||||
variables:
|
||||
food: "carrot"
|
||||
color: "orange"
|
||||
- name: "example.dns.gg"
|
||||
description: "myhost2"
|
||||
enabled: "false"
|
||||
inventory: "{{ bulk_inv_name }}"
|
||||
register: "result"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result is not failed"
|
||||
|
||||
- name: "Get our collection package"
|
||||
controller_meta:
|
||||
register: "controller_meta"
|
||||
|
||||
- name: "Generate the name of our plugin"
|
||||
set_fact:
|
||||
plugin_name: "{{ controller_meta.prefix }}.controller_api"
|
||||
|
||||
- name: "Setting the inventory hosts endpoint"
|
||||
set_fact:
|
||||
endpoint: "inventories/{{ inventory_result.id }}/hosts/"
|
||||
|
||||
- name: "Get hosts information from inventory"
|
||||
set_fact:
|
||||
hosts_created: "{{ query(plugin_name, endpoint, return_objects=True) }}"
|
||||
host_id_list: []
|
||||
|
||||
- name: "Extract host IDs from hosts information"
|
||||
set_fact:
|
||||
host_id_list: "{{ host_id_list + [item.id] }}"
|
||||
loop: "{{ hosts_created }}"
|
||||
|
||||
- name: "Bulk Host Delete"
|
||||
bulk_host_delete:
|
||||
hosts: "{{ host_id_list }}"
|
||||
register: "result"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result is not failed"
|
||||
|
||||
# cleanup
|
||||
- name: "Delete inventory"
|
||||
inventory:
|
||||
name: "{{ bulk_inv_name }}"
|
||||
organization: "Default"
|
||||
state: "absent"
|
||||
@@ -60,7 +60,7 @@
|
||||
- result['job_info']['skip_tags'] == "skipbaz"
|
||||
- result['job_info']['limit'] == "localhost"
|
||||
- result['job_info']['job_tags'] == "Hello World"
|
||||
- result['job_info']['inventory'] == {{ inventory_id }}
|
||||
- result['job_info']['inventory'] == inventory_id | int
|
||||
- "result['job_info']['extra_vars'] == '{\"animal\": \"bear\", \"food\": \"carrot\"}'"
|
||||
|
||||
# cleanup
|
||||
|
||||
@@ -71,6 +71,19 @@
|
||||
that:
|
||||
- "result is changed"
|
||||
|
||||
- name: Delete a credential without credential_type
|
||||
credential:
|
||||
name: "{{ ssh_cred_name1 }}"
|
||||
organization: Default
|
||||
state: absent
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result is failed"
|
||||
|
||||
|
||||
- name: Create an Org-specific credential with an ID with exists
|
||||
credential:
|
||||
name: "{{ ssh_cred_name1 }}"
|
||||
|
||||
@@ -42,6 +42,16 @@
|
||||
that:
|
||||
- "result is not changed"
|
||||
|
||||
- name: Modify the host as a no-op
|
||||
host:
|
||||
name: "{{ host_name }}"
|
||||
inventory: "{{ inv_name }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result is not changed"
|
||||
|
||||
- name: Delete a Host
|
||||
host:
|
||||
name: "{{ host_name }}"
|
||||
@@ -68,6 +78,15 @@
|
||||
that:
|
||||
- "result is changed"
|
||||
|
||||
- name: Use lookup to check that host was enabled
|
||||
ansible.builtin.set_fact:
|
||||
host_enabled_test: "lookup('awx.awx.controller_api', 'hosts/{{result.id}}/').enabled"
|
||||
|
||||
- name: Newly created host should have API default value for enabled
|
||||
assert:
|
||||
that:
|
||||
- host_enabled_test
|
||||
|
||||
- name: Delete a Host
|
||||
host:
|
||||
name: "{{ result.id }}"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user