mirror of
https://github.com/ansible/awx.git
synced 2026-01-13 11:00:03 -03:30
Merge branch 'release_3.1.0' into stable
* release_3.1.0: (2069 commits) Updating changelog for 3.1 release Switch job_type to check from sync when detecting delete_on_update use Unicode apostrophes - not single quotes - for French i18n strings pin appdirs==1.4.2 only cancel deps if we can cancel the inv update fixing module_name check and adding support for the debug module cancel jobs dependent on inv update update tests CSS tweaks to workflow results panels like inventory updates, check if project update deps already processed Revert "Merge pull request #5553 from chrismeyersfsu/fix-waiting_blocked" Add awx/ui/client/languages to .gitignore Delete awx/ui/client/languages/*.json refactor based on review Add missing permission check. Make current_groups a set to easily avoid duplicates, update asgi-amqp requirement avoid duplicated related search fields Fix workflow audit items fixing module name, json blob, and stdout-for-yum-module on host event just like we fail running tasks fail waiting tasks ...
This commit is contained in:
commit
fc66bb1e42
2
.dockerignore
Normal file
2
.dockerignore
Normal file
@ -0,0 +1,2 @@
|
||||
.git
|
||||
awx/ui/node_modules
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@ -3,7 +3,7 @@
|
||||
.tags1
|
||||
|
||||
# Tower
|
||||
awx/settings/local_settings.py*
|
||||
awx/settings/local_*.py*
|
||||
awx/*.sqlite3
|
||||
awx/*.sqlite3_*
|
||||
awx/job_status
|
||||
@ -22,6 +22,7 @@ tower/tower_warnings.log
|
||||
celerybeat-schedule
|
||||
awx/ui/static
|
||||
awx/ui/build_test
|
||||
awx/ui/client/languages
|
||||
|
||||
# Tower setup playbook testing
|
||||
setup/test/roles/postgresql
|
||||
@ -31,6 +32,7 @@ setup/test/roles/postgresql
|
||||
__pycache__
|
||||
/build
|
||||
/deb-build
|
||||
/reprepro
|
||||
/rpm-build
|
||||
/tar-build
|
||||
/setup-bundle-build
|
||||
@ -44,14 +46,15 @@ __pycache__
|
||||
/bower.json
|
||||
/package.json
|
||||
/testem.yml
|
||||
/coverage
|
||||
**/coverage
|
||||
/.istanbul.yml
|
||||
node_modules/**
|
||||
**/node_modules/**
|
||||
/tmp
|
||||
npm-debug.log
|
||||
|
||||
# UI build debugging
|
||||
/DEBUG
|
||||
# UI build flag files
|
||||
awx/ui/.deps_built
|
||||
awx/ui/.release_built
|
||||
|
||||
# Testing
|
||||
.cache
|
||||
@ -104,8 +107,10 @@ reports
|
||||
*.log.[0-9]
|
||||
*.results
|
||||
local/
|
||||
*.mo
|
||||
|
||||
# AWX python libs populated by requirements.txt
|
||||
awx/lib/.deps_built
|
||||
awx/lib/site-packages
|
||||
venv/*
|
||||
use_dev_supervisor.txt
|
||||
|
||||
41
.jshintrc
41
.jshintrc
@ -1,41 +0,0 @@
|
||||
{
|
||||
"browser": true,
|
||||
"jquery": true,
|
||||
"esnext": true,
|
||||
"globalstrict": true,
|
||||
"curly": true,
|
||||
"immed": true,
|
||||
"latedef": "nofunc",
|
||||
"noarg": true,
|
||||
"nonew": true,
|
||||
"maxerr": 10000,
|
||||
"notypeof": true,
|
||||
"globals": {
|
||||
"beforeEach": false,
|
||||
"inject": false,
|
||||
"module": false,
|
||||
"angular":false,
|
||||
"alert":false,
|
||||
"$AnsibleConfig":true,
|
||||
"$basePath":true,
|
||||
"jsyaml":false,
|
||||
"_":false,
|
||||
"d3":false,
|
||||
"Donut3D":false,
|
||||
"nv":false,
|
||||
"it": false,
|
||||
"xit": false,
|
||||
"expect": false,
|
||||
"context": false,
|
||||
"describe": false,
|
||||
"moment": false
|
||||
},
|
||||
"strict": false,
|
||||
"quotmark": false,
|
||||
"trailing": true,
|
||||
"undef": true,
|
||||
"unused": true,
|
||||
"eqeqeq": true,
|
||||
"indent": 4,
|
||||
"newcap": false
|
||||
}
|
||||
31
.travis.yml
Normal file
31
.travis.yml
Normal file
@ -0,0 +1,31 @@
|
||||
sudo: false
|
||||
language: python
|
||||
python:
|
||||
- '2.7'
|
||||
env:
|
||||
- TOXENV=api-lint
|
||||
- TOXENV=api
|
||||
- TOXENV=ui-lint
|
||||
- TOXENV=ui
|
||||
install:
|
||||
- pip install tox
|
||||
script:
|
||||
- tox
|
||||
# after_success:
|
||||
# - TOXENV=coveralls tox
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- swig
|
||||
- libxmlsec1-dev
|
||||
- postgresql-9.5
|
||||
- libssl-dev
|
||||
cache:
|
||||
pip: true
|
||||
directories:
|
||||
- node_modules
|
||||
- .tox
|
||||
services:
|
||||
- mongodb
|
||||
# Enable when we stop using sqlite for API tests
|
||||
# - postgresql
|
||||
20
COPYING
20
COPYING
@ -1,5 +1,19 @@
|
||||
The Ansible Tower Software is a commercial software licensed to you pursuant to the Ansible Software Subscription and Services Agreement (“EULA”) located at www.ansible.com/subscription-agreement and an annual Order/Agreement with Ansible, Inc.
|
||||
ANSIBLE TOWER BY RED HAT END USER LICENSE AGREEMENT
|
||||
|
||||
The Ansible Tower Software is free for use up to ten (10) Nodes, any additional Nodes shall be purchased.
|
||||
This end user license agreement (“EULA”) governs the use of the Ansible Tower software and any related updates, upgrades, versions, appearance, structure and organization (the “Ansible Tower Software”), regardless of the delivery mechanism.
|
||||
|
||||
Ansible and Ansible Tower are registered Trademarks of Ansible, Inc.
|
||||
1. License Grant. Subject to the terms of this EULA, Red Hat, Inc. and its affiliates (“Red Hat”) grant to you (“You”) a non-transferable, non-exclusive, worldwide, non-sublicensable, limited, revocable license to use the Ansible Tower Software for the term of the associated Red Hat Software Subscription(s) and in a quantity equal to the number of Red Hat Software Subscriptions purchased from Red Hat for the Ansible Tower Software (“License”), each as set forth on the applicable Red Hat ordering document. You acquire only the right to use the Ansible Tower Software and do not acquire any rights of ownership. Red Hat reserves all rights to the Ansible Tower Software not expressly granted to You. This License grant pertains solely to Your use of the Ansible Tower Software and is not intended to limit Your rights under, or grant You rights that supersede, the license terms of any software packages which may be made available with the Ansible Tower Software that are subject to an open source software license.
|
||||
|
||||
2. Intellectual Property Rights. Title to the Ansible Tower Software and each component, copy and modification, including all derivative works whether made by Red Hat, You or on Red Hat's behalf, including those made at Your suggestion and all associated intellectual property rights, are and shall remain the sole and exclusive property of Red Hat and/or it licensors. The License does not authorize You (nor may You allow any third party, specifically non-employees of Yours) to: (a) copy, distribute, reproduce, use or allow third party access to the Ansible Tower Software except as expressly authorized hereunder; (b) decompile, disassemble, reverse engineer, translate, modify, convert or apply any procedure or process to the Ansible Tower Software in order to ascertain, derive, and/or appropriate for any reason or purpose, including the Ansible Tower Software source code or source listings or any trade secret information or process contained in the Ansible Tower Software (except as permitted under applicable law); (c) execute or incorporate other software (except for approved software as appears in the Ansible Tower Software documentation or specifically approved by Red Hat in writing) into Ansible Tower Software, or create a derivative work of any part of the Ansible Tower Software; (d) remove any trademarks, trade names or titles, copyrights legends or any other proprietary marking on the Ansible Tower Software; (e) disclose the results of any benchmarking of the Ansible Tower Software (whether or not obtained with Red Hat’s assistance) to any third party; (f) attempt to circumvent any user limits or other license, timing or use restrictions that are built into, defined or agreed upon, regarding the Ansible Tower Software. You are hereby notified that the Ansible Tower Software may contain time-out devices, counter devices, and/or other devices intended to ensure the limits of the License will not be exceeded (“Limiting Devices”). If the Ansible Tower Software contains Limiting Devices, Red Hat will provide You materials necessary to use the Ansible Tower Software to the extent permitted. You may not tamper with or otherwise take any action to defeat or circumvent a Limiting Device or other control measure, including but not limited to, resetting the unit amount or using false host identification number for the purpose of extending any term of the License.
|
||||
|
||||
3. Evaluation Licenses. Unless You have purchased Ansible Tower Software Subscriptions from Red Hat or an authorized reseller under the terms of a commercial agreement with Red Hat, all use of the Ansible Tower Software shall be limited to testing purposes and not for production use (“Evaluation”). Unless otherwise agreed by Red Hat, Evaluation of the Ansible Tower Software shall be limited to an evaluation environment and the Ansible Tower Software shall not be used to manage any systems or virtual machines on networks being used in the operation of Your business or any other non-evaluation purpose. Unless otherwise agreed by Red Hat, You shall limit all Evaluation use to a single 30 day evaluation period and shall not download or otherwise obtain additional copies of the Ansible Tower Software or license keys for Evaluation.
|
||||
|
||||
4. Limited Warranty. Except as specifically stated in this Section 4, to the maximum extent permitted under applicable law, the Ansible Tower Software and the components are provided and licensed “as is” without warranty of any kind, expressed or implied, including the implied warranties of merchantability, non-infringement or fitness for a particular purpose. Red Hat warrants solely to You that the media on which the Ansible Tower Software may be furnished will be free from defects in materials and manufacture under normal use for a period of thirty (30) days from the date of delivery to You. Red Hat does not warrant that the functions contained in the Ansible Tower Software will meet Your requirements or that the operation of the Ansible Tower Software will be entirely error free, appear precisely as described in the accompanying documentation, or comply with regulatory requirements.
|
||||
|
||||
5. Limitation of Remedies and Liability. To the maximum extent permitted by applicable law, Your exclusive remedy under this EULA is to return any defective media within thirty (30) days of delivery along with a copy of Your payment receipt and Red Hat, at its option, will replace it or refund the money paid by You for the media. To the maximum extent permitted under applicable law, neither Red Hat nor any Red Hat authorized distributor will be liable to You for any incidental or consequential damages, including lost profits or lost savings arising out of the use or inability to use the Ansible Tower Software or any component, even if Red Hat or the authorized distributor has been advised of the possibility of such damages. In no event shall Red Hat's liability or an authorized distributor’s liability exceed the amount that You paid to Red Hat for the Ansible Tower Software during the twelve months preceding the first event giving rise to liability.
|
||||
|
||||
6. Export Control. In accordance with the laws of the United States and other countries, You represent and warrant that You: (a) understand that the Ansible Tower Software and its components may be subject to export controls under the U.S. Commerce Department’s Export Administration Regulations (“EAR”); (b) are not located in any country listed in Country Group E:1 in Supplement No. 1 to part 740 of the EAR; (c) will not export, re-export, or transfer the Ansible Tower Software to any prohibited destination or to any end user who has been prohibited from participating in US export transactions by any federal agency of the US government; (d) will not use or transfer the Ansible Tower Software for use in connection with the design, development or production of nuclear, chemical or biological weapons, or rocket systems, space launch vehicles, or sounding rockets or unmanned air vehicle systems; (e) understand and agree that if you are in the United States and you export or transfer the Ansible Tower Software to eligible end users, you will, to the extent required by EAR Section 740.17 obtain a license for such export or transfer and will submit semi-annual reports to the Commerce Department’s Bureau of Industry and Security, which include the name and address (including country) of each transferee; and (f) understand that countries including the United States may restrict the import, use, or export of encryption products (which may include the Ansible Tower Software) and agree that you shall be solely responsible for compliance with any such import, use, or export restrictions.
|
||||
|
||||
7. General. If any provision of this EULA is held to be unenforceable, that shall not affect the enforceability of the remaining provisions. This agreement shall be governed by the laws of the State of New York and of the United States, without regard to any conflict of laws provisions. The rights and obligations of the parties to this EULA shall not be governed by the United Nations Convention on the International Sale of Goods.
|
||||
|
||||
Copyright © 2015 Red Hat, Inc. All rights reserved. "Red Hat" and “Ansible Tower” are registered trademarks of Red Hat, Inc. All other trademarks are the property of their respective owners.
|
||||
|
||||
@ -6,6 +6,7 @@ recursive-include awx/ui/templates *.html
|
||||
recursive-include awx/ui/static *
|
||||
recursive-include awx/playbooks *.yml
|
||||
recursive-include awx/lib/site-packages *
|
||||
recursive-include awx/plugins *.ps1
|
||||
recursive-include requirements *.txt
|
||||
recursive-include config *
|
||||
recursive-include docs/licenses *
|
||||
@ -17,6 +18,7 @@ recursive-exclude awx/settings local_settings.py*
|
||||
include tools/scripts/request_tower_configuration.sh
|
||||
include tools/scripts/request_tower_configuration.ps1
|
||||
include tools/scripts/ansible-tower-service
|
||||
include tools/scripts/failure-event-handler
|
||||
include tools/scripts/tower-python
|
||||
include tools/sosreport/*
|
||||
include COPYING
|
||||
|
||||
458
Makefile
458
Makefile
@ -4,23 +4,21 @@ SITELIB=$(shell $(PYTHON) -c "from distutils.sysconfig import get_python_lib; pr
|
||||
OFFICIAL ?= no
|
||||
PACKER ?= packer
|
||||
PACKER_BUILD_OPTS ?= -var 'official=$(OFFICIAL)' -var 'aw_repo_url=$(AW_REPO_URL)'
|
||||
GRUNT ?= $(shell [ -t 0 ] && echo "grunt" || echo "grunt --no-color")
|
||||
TESTEM ?= ./node_modules/.bin/testem
|
||||
BROCCOLI_BIN ?= ./node_modules/.bin/broccoli
|
||||
MOCHA_BIN ?= ./node_modules/.bin/_mocha
|
||||
ISTANBUL_BIN ?= ./node_modules/.bin/istanbul
|
||||
BROWSER_SYNC_BIN ?= ./node_modules/.bin/browser-sync
|
||||
NODE ?= node
|
||||
NPM_BIN ?= npm
|
||||
DEPS_SCRIPT ?= packaging/bundle/deps.py
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
VENV_BASE ?= /tower_devel/venv
|
||||
GCLOUD_AUTH ?= $(shell gcloud auth print-access-token)
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
|
||||
COMPOSE_HOST ?= $(shell hostname)
|
||||
|
||||
VENV_BASE ?= /venv
|
||||
SCL_PREFIX ?=
|
||||
CELERY_SCHEDULE_FILE ?= /celerybeat-schedule
|
||||
|
||||
CLIENT_TEST_DIR ?= build_test
|
||||
|
||||
# Python packages to install only from source (not from binary wheels)
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser
|
||||
@ -47,7 +45,7 @@ ifeq ($(OFFICIAL),yes)
|
||||
AW_REPO_URL ?= http://releases.ansible.com/ansible-tower
|
||||
else
|
||||
RELEASE ?= $(BUILD)
|
||||
AW_REPO_URL ?= http://jenkins.testing.ansible.com/ansible-tower_nightlies_RTYUIOPOIUYTYU/$(GIT_BRANCH)
|
||||
AW_REPO_URL ?= http://jenkins.testing.ansible.com/ansible-tower_nightlies_f8b8c5588b2505970227a7b0900ef69040ad5a00/$(GIT_BRANCH)
|
||||
endif
|
||||
|
||||
# Allow AMI license customization
|
||||
@ -81,7 +79,7 @@ SETUP_TAR_CHECKSUM=$(NAME)-setup-CHECKSUM
|
||||
|
||||
# DEB build parameters
|
||||
DEBUILD_BIN ?= debuild
|
||||
DEBUILD_OPTS = --source-option="-I"
|
||||
DEBUILD_OPTS =
|
||||
DPUT_BIN ?= dput
|
||||
DPUT_OPTS ?= -c .dput.cf -u
|
||||
REPREPRO_BIN ?= reprepro
|
||||
@ -170,20 +168,22 @@ ifeq ($(DISTRO),ubuntu)
|
||||
SETUP_INSTALL_ARGS += --install-layout=deb
|
||||
endif
|
||||
|
||||
# UI flag files
|
||||
UI_DEPS_FLAG_FILE = awx/ui/.deps_built
|
||||
UI_RELEASE_FLAG_FILE = awx/ui/.release_built
|
||||
|
||||
.DEFAULT_GOAL := build
|
||||
|
||||
.PHONY: clean clean-tmp clean-venv rebase push requirements requirements_dev \
|
||||
requirements_jenkins \
|
||||
develop refresh adduser migrate dbchange dbshell runserver celeryd \
|
||||
receiver test test_unit test_coverage coverage_html test_jenkins dev_build \
|
||||
release_build release_clean sdist rpmtar mock-rpm mock-srpm rpm-sign \
|
||||
build-ui sync-ui test-ui build-ui-for-coverage test-ui-for-coverage \
|
||||
build-ui-for-browser-tests test-ui-debug jshint ngdocs \
|
||||
websocket-proxy browser-sync browser-sync-reload brocolli-watcher \
|
||||
devjs minjs testjs_ci \
|
||||
deb deb-src debian debsign pbuilder reprepro setup_tarball \
|
||||
vagrant-virtualbox virtualbox-centos-7 virtualbox-centos-6 \
|
||||
vagrant-vmware clean-bundle setup_bundle_tarball
|
||||
virtualbox-ovf virtualbox-centos-7 virtualbox-centos-6 \
|
||||
clean-bundle setup_bundle_tarball \
|
||||
ui-docker-machine ui-docker ui-release ui-devel \
|
||||
ui-test ui-deps ui-test-ci ui-test-saucelabs jlaska
|
||||
|
||||
|
||||
# Remove setup build files
|
||||
clean-tar:
|
||||
@ -197,11 +197,6 @@ clean-rpm:
|
||||
clean-deb:
|
||||
rm -rf deb-build reprepro
|
||||
|
||||
# Remove grunt build files
|
||||
clean-grunt:
|
||||
rm -f package.json Gruntfile.js Brocfile.js bower.json
|
||||
rm -rf node_modules
|
||||
|
||||
# Remove packer artifacts
|
||||
clean-packer:
|
||||
rm -rf packer_cache
|
||||
@ -218,13 +213,10 @@ clean-bundle:
|
||||
|
||||
# remove ui build artifacts
|
||||
clean-ui:
|
||||
rm -rf DEBUG
|
||||
|
||||
clean-static:
|
||||
rm -rf awx/ui/static/
|
||||
|
||||
clean-build-test:
|
||||
rm -rf awx/ui/build_test/
|
||||
rm -rf awx/ui/node_modules/
|
||||
rm -f $(UI_DEPS_FLAG_FILE)
|
||||
rm -f $(UI_RELEASE_FLAG_FILE)
|
||||
|
||||
clean-tmp:
|
||||
rm -rf tmp/
|
||||
@ -233,18 +225,23 @@ clean-venv:
|
||||
rm -rf venv/
|
||||
|
||||
# Remove temporary build files, compiled Python files.
|
||||
clean: clean-rpm clean-deb clean-grunt clean-ui clean-static clean-build-test clean-tar clean-packer clean-bundle clean-venv
|
||||
clean: clean-rpm clean-deb clean-ui clean-tar clean-packer clean-bundle
|
||||
rm -rf awx/public
|
||||
rm -rf awx/lib/site-packages
|
||||
rm -rf awx/lib/.deps_built
|
||||
rm -rf dist/*
|
||||
rm -rf awx/job_status
|
||||
rm -rf awx/job_output
|
||||
rm -rf reports
|
||||
rm -f awx/awx_test.sqlite3
|
||||
rm -rf tmp
|
||||
mkdir tmp
|
||||
rm -rf build $(NAME)-$(VERSION) *.egg-info
|
||||
find . -type f -regex ".*\.py[co]$$" -delete
|
||||
find . -type d -name "__pycache__" -delete
|
||||
|
||||
# convenience target to assert environment variables are defined
|
||||
guard-%:
|
||||
@if [ "${${*}}" == "" ]; then \
|
||||
@if [ "$${$*}" = "" ]; then \
|
||||
echo "The required environment variable '$*' is not set"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@ -267,7 +264,7 @@ virtualenv_ansible:
|
||||
if [ ! -d "$(VENV_BASE)/ansible" ]; then \
|
||||
virtualenv --system-site-packages --setuptools $(VENV_BASE)/ansible && \
|
||||
$(VENV_BASE)/ansible/bin/pip install -I setuptools==23.0.0 && \
|
||||
$(VENV_BASE)/ansible/bin/pip install -I pip==8.1.1; \
|
||||
$(VENV_BASE)/ansible/bin/pip install -I pip==8.1.2; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
@ -279,48 +276,43 @@ virtualenv_tower:
|
||||
if [ ! -d "$(VENV_BASE)/tower" ]; then \
|
||||
virtualenv --system-site-packages --setuptools $(VENV_BASE)/tower && \
|
||||
$(VENV_BASE)/tower/bin/pip install -I setuptools==23.0.0 && \
|
||||
$(VENV_BASE)/tower/bin/pip install -I pip==8.1.1; \
|
||||
$(VENV_BASE)/tower/bin/pip install -I pip==8.1.2; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
requirements_ansible: virtualenv_ansible
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/ansible/bin/activate; \
|
||||
$(VENV_BASE)/ansible/bin/pip install --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements_ansible.txt ;\
|
||||
$(VENV_BASE)/ansible/bin/pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements_ansible.txt ;\
|
||||
$(VENV_BASE)/ansible/bin/pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt; \
|
||||
else \
|
||||
pip install --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements_ansible.txt ; \
|
||||
pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements_ansible.txt ; \
|
||||
pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt; \
|
||||
fi
|
||||
|
||||
# Install third-party requirements needed for Tower's environment.
|
||||
requirements_tower: virtualenv_tower
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
$(VENV_BASE)/tower/bin/pip install --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements.txt ;\
|
||||
$(VENV_BASE)/tower/bin/pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements.txt ;\
|
||||
$(VENV_BASE)/tower/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt; \
|
||||
else \
|
||||
pip install --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements.txt ; \
|
||||
pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements.txt ; \
|
||||
pip uninstall --yes -r requirements/requirements_tower_uninstall.txt; \
|
||||
fi
|
||||
|
||||
requirements_tower_dev:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
$(VENV_BASE)/tower/bin/pip install -r requirements/requirements_dev.txt; \
|
||||
$(VENV_BASE)/tower/bin/pip uninstall --yes -r requirements/requirements_dev_uninstall.txt; \
|
||||
fi
|
||||
|
||||
# Install third-party requirements needed for running unittests in jenkins
|
||||
requirements_jenkins:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
$(VENV_BASE)/tower/bin/pip install -Ir requirements/requirements_jenkins.txt; \
|
||||
else \
|
||||
pip install -Ir requirements/requirements_jenkins..txt; \
|
||||
fi && \
|
||||
$(NPM_BIN) install csslint jshint
|
||||
|
||||
requirements: requirements_ansible requirements_tower
|
||||
|
||||
requirements_dev: requirements requirements_tower_dev
|
||||
|
||||
requirements_test: requirements requirements_jenkins
|
||||
requirements_test: requirements
|
||||
|
||||
# "Install" ansible-tower package in development mode.
|
||||
develop:
|
||||
@ -328,8 +320,8 @@ develop:
|
||||
pip uninstall -y awx; \
|
||||
$(PYTHON) setup.py develop; \
|
||||
else \
|
||||
sudo pip uninstall -y awx; \
|
||||
sudo $(PYTHON) setup.py develop; \
|
||||
pip uninstall -y awx; \
|
||||
$(PYTHON) setup.py develop; \
|
||||
fi
|
||||
|
||||
version_file:
|
||||
@ -341,7 +333,7 @@ init:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
tower-manage register_instance --primary --hostname=127.0.0.1; \
|
||||
tower-manage register_instance --hostname=$(COMPOSE_HOST); \
|
||||
|
||||
# Refresh development environment after pulling new code.
|
||||
refresh: clean requirements_dev version_file develop migrate
|
||||
@ -366,15 +358,17 @@ dbshell:
|
||||
sudo -u postgres psql -d awx-dev
|
||||
|
||||
server_noattach:
|
||||
tmux new-session -d -s tower 'exec make runserver'
|
||||
tmux new-session -d -s tower 'exec make uwsgi'
|
||||
tmux rename-window 'Tower'
|
||||
tmux select-window -t tower:0
|
||||
tmux split-window -v 'exec make celeryd'
|
||||
tmux split-window -h 'exec make taskmanager'
|
||||
tmux new-window 'exec make receiver'
|
||||
tmux new-window 'exec make daphne'
|
||||
tmux select-window -t tower:1
|
||||
tmux rename-window 'WebSockets'
|
||||
tmux split-window -h 'exec make runworker'
|
||||
tmux new-window 'exec make receiver'
|
||||
tmux select-window -t tower:2
|
||||
tmux rename-window 'Extra Services'
|
||||
tmux split-window -v 'exec make socketservice'
|
||||
tmux split-window -h 'exec make factcacher'
|
||||
|
||||
server: server_noattach
|
||||
@ -384,6 +378,12 @@ server: server_noattach
|
||||
servercc: server_noattach
|
||||
tmux -2 -CC attach-session -t tower
|
||||
|
||||
supervisor:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
supervisord --configuration /supervisor.conf --pidfile=/tmp/supervisor_pid
|
||||
|
||||
# Alternate approach to tmux to run all development tasks specified in
|
||||
# Procfile. https://youtu.be/OPMgaibszjk
|
||||
honcho:
|
||||
@ -392,6 +392,36 @@ honcho:
|
||||
fi; \
|
||||
honcho start
|
||||
|
||||
flower:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py celery flower --address=0.0.0.0 --port=5555 --broker=amqp://guest:guest@$(RABBITMQ_HOST):5672//
|
||||
|
||||
collectstatic:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
mkdir -p awx/public/static && $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
||||
|
||||
uwsgi: collectstatic
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
uwsgi -b 32768 --socket :8050 --module=awx.wsgi:application --home=/venv/tower --chdir=/tower_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --master-fifo=/awxfifo --lazy-apps
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
daphne -b 0.0.0.0 -p 8051 awx.asgi:channel_layer
|
||||
|
||||
runworker:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py runworker --only-channels websocket.*
|
||||
|
||||
# Run the built-in development webserver (by default on http://localhost:8013).
|
||||
runserver:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@ -404,7 +434,8 @@ celeryd:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py celeryd -l DEBUG -B --autoscale=20,2 -Ofair --schedule=$(CELERY_SCHEDULE_FILE)
|
||||
$(PYTHON) manage.py celeryd -l DEBUG -B --autoreload --autoscale=20,3 --schedule=$(CELERY_SCHEDULE_FILE) -Q projects,jobs,default,scheduler,broadcast_all,$(COMPOSE_HOST) -n celery@$(COMPOSE_HOST)
|
||||
#$(PYTHON) manage.py celery multi show projects jobs default -l DEBUG -Q:projects projects -Q:jobs jobs -Q:default default -c:projects 1 -c:jobs 3 -c:default 3 -Ofair -B --schedule=$(CELERY_SCHEDULE_FILE)
|
||||
|
||||
# Run to start the zeromq callback receiver
|
||||
receiver:
|
||||
@ -413,12 +444,6 @@ receiver:
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_callback_receiver
|
||||
|
||||
taskmanager:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_task_system
|
||||
|
||||
socketservice:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
@ -431,6 +456,9 @@ factcacher:
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_fact_cache_receiver
|
||||
|
||||
nginx:
|
||||
nginx -g "daemon off;"
|
||||
|
||||
reports:
|
||||
mkdir -p $@
|
||||
|
||||
@ -438,7 +466,10 @@ pep8: reports
|
||||
@(set -o pipefail && $@ | tee reports/$@.report)
|
||||
|
||||
flake8: reports
|
||||
@$@ --output-file=reports/$@.report
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
(set -o pipefail && $@ | tee reports/$@.report)
|
||||
|
||||
pyflakes: reports
|
||||
@(set -o pipefail && $@ | tee reports/$@.report)
|
||||
@ -448,7 +479,7 @@ pylint: reports
|
||||
|
||||
check: flake8 pep8 # pyflakes pylint
|
||||
|
||||
TEST_DIRS=awx/main/tests
|
||||
TEST_DIRS ?= awx/main/tests awx/conf/tests awx/sso/tests
|
||||
# Run all API unit tests.
|
||||
test:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@ -460,7 +491,7 @@ test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
py.test awx/main/tests/unit
|
||||
py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit
|
||||
|
||||
# Run all API unit tests with coverage enabled.
|
||||
test_coverage:
|
||||
@ -481,173 +512,107 @@ test_tox:
|
||||
# Alias existing make target so old versions run against Jekins the same way
|
||||
test_jenkins : test_coverage
|
||||
|
||||
# Make fake data
|
||||
DATA_GEN_PRESET = ""
|
||||
bulk_data:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) tools/data_generators/rbac_dummy_data_generator.py --preset=$(DATA_GEN_PRESET)
|
||||
|
||||
# l10n TASKS
|
||||
# --------------------------------------
|
||||
|
||||
# check for UI po files
|
||||
HAVE_PO := $(shell ls awx/ui/po/*.po 2>/dev/null)
|
||||
check-po:
|
||||
ifdef HAVE_PO
|
||||
# Should be 'Language: zh-CN' but not 'Language: zh_CN' in zh_CN.po
|
||||
for po in awx/ui/po/*.po ; do \
|
||||
echo $$po; \
|
||||
mo="awx/ui/po/`basename $$po .po`.mo"; \
|
||||
msgfmt --check --verbose $$po -o $$mo; \
|
||||
if test "$$?" -ne 0 ; then \
|
||||
exit -1; \
|
||||
fi; \
|
||||
rm $$mo; \
|
||||
name=`echo "$$po" | grep '-'`; \
|
||||
if test "x$$name" != x ; then \
|
||||
right_name=`echo $$language | sed -e 's/-/_/'`; \
|
||||
echo "ERROR: WRONG $$name CORRECTION: $$right_name"; \
|
||||
exit -1; \
|
||||
fi; \
|
||||
language=`grep '^"Language:' "$$po" | grep '_'`; \
|
||||
if test "x$$language" != x ; then \
|
||||
right_language=`echo $$language | sed -e 's/_/-/'`; \
|
||||
echo "ERROR: WRONG $$language CORRECTION: $$right_language in $$po"; \
|
||||
exit -1; \
|
||||
fi; \
|
||||
done;
|
||||
else
|
||||
@echo No PO files
|
||||
endif
|
||||
|
||||
# generate UI .pot
|
||||
pot: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui run pot
|
||||
|
||||
# generate django .pot .po
|
||||
LANG = "en-us"
|
||||
messages:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/tower/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py makemessages -l $(LANG) --keep-pot
|
||||
|
||||
# generate l10n .json .mo
|
||||
languages: $(UI_DEPS_FLAG_FILE) check-po
|
||||
$(NPM_BIN) --prefix awx/ui run languages
|
||||
$(PYTHON) tools/scripts/compilemessages.py
|
||||
|
||||
# End l10n TASKS
|
||||
# --------------------------------------
|
||||
|
||||
# UI TASKS
|
||||
# --------------------------------------
|
||||
|
||||
# begin targets that pull ui files from packaging to the root of the app
|
||||
Gruntfile.js: packaging/node/Gruntfile.js
|
||||
cp $< $@
|
||||
ui-deps: $(UI_DEPS_FLAG_FILE)
|
||||
|
||||
Brocfile.js: packaging/node/Brocfile.js
|
||||
cp $< $@
|
||||
$(UI_DEPS_FLAG_FILE):
|
||||
$(NPM_BIN) --unsafe-perm --prefix awx/ui install awx/ui
|
||||
touch $(UI_DEPS_FLAG_FILE)
|
||||
|
||||
bower.json: packaging/node/bower.json
|
||||
cp $< $@
|
||||
ui-docker-machine: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui run ui-docker-machine -- $(MAKEFLAGS)
|
||||
|
||||
package.json: packaging/node/package.template
|
||||
sed -e 's#%NAME%#$(NAME)#;s#%VERSION%#$(VERSION)#;s#%GIT_REMOTE_URL%#$(GIT_REMOTE_URL)#;' $< > $@
|
||||
# Native docker. Builds UI and raises BrowserSync & filesystem polling.
|
||||
ui-docker: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui run ui-docker -- $(MAKEFLAGS)
|
||||
|
||||
testem.yml: packaging/node/testem.yml
|
||||
cp $< $@
|
||||
# Builds UI with development UI without raising browser-sync or filesystem polling.
|
||||
ui-devel: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui run build-devel -- $(MAKEFLAGS)
|
||||
|
||||
.istanbul.yml: packaging/node/.istanbul.yml
|
||||
cp $< $@
|
||||
# end targets that pull ui files from packaging to the root of the app
|
||||
ui-release: $(UI_RELEASE_FLAG_FILE)
|
||||
|
||||
# update package.json and install npm dependencies
|
||||
node_modules: package.json
|
||||
$(NPM_BIN) install
|
||||
touch $@
|
||||
$(UI_RELEASE_FLAG_FILE): languages $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui run build-release
|
||||
touch $(UI_RELEASE_FLAG_FILE)
|
||||
|
||||
# helper tasks to run broccoli build process at awx/ui/<destination_dir>,
|
||||
# to build the ui, use the build-ui target instead:
|
||||
# UI_FLAGS=<flags as seen in Brocfile.js and
|
||||
# packaging/node/tower-app.js>: additional parameters to pass broccoli
|
||||
# for building
|
||||
awx/ui/static: node_modules clean-ui clean-static Brocfile.js bower.json
|
||||
$(BROCCOLI_BIN) build awx/ui/static -- $(UI_FLAGS)
|
||||
ui-test: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui run test
|
||||
|
||||
awx/ui/build_test: node_modules clean-ui clean-build-test Brocfile.js bower.json
|
||||
$(BROCCOLI_BIN) build awx/ui/build_test -- $(UI_FLAGS)
|
||||
ui-test-ci: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui run test:ci
|
||||
|
||||
# build the ui to awx/ui/static:
|
||||
# defaults to standard dev build (concatenated, non-minified, sourcemaps, no
|
||||
# tests)
|
||||
# PROD=true: standard prod build (concatenated, minified, no sourcemaps,
|
||||
# compressed, no tests)
|
||||
# EXTRA_UI_FLAGS=<flags as seen in Brocfile.js and
|
||||
# packaging/node/tower-app.js>: additional parameters to pass broccoli
|
||||
# for building
|
||||
PROD ?= false
|
||||
testjs_ci:
|
||||
echo "Update UI unittests later" #ui-test-ci
|
||||
|
||||
# TODO: Remove after 2.4 (alias for devjs/minjs)
|
||||
devjs: build-ui
|
||||
minjs: build-ui
|
||||
ifeq ($(MAKECMDGOALS),minjs)
|
||||
PROD = true
|
||||
endif
|
||||
jshint:
|
||||
$(NPM_BIN) run --prefix awx/ui jshint
|
||||
|
||||
ifeq ($(PROD),true)
|
||||
UI_FLAGS=--silent --compress --no-docs --no-debug --no-sourcemaps \
|
||||
$(EXTRA_UI_FLAGS)
|
||||
else
|
||||
UI_FLAGS=$(EXTRA_UI_FLAGS)
|
||||
endif
|
||||
|
||||
build-ui: awx/ui/static
|
||||
|
||||
# launch watcher to continuously build the ui to awx/ui/static and run tests
|
||||
# after changes are made:
|
||||
# WATCHER_FLAGS: options to be utilized by broccoli timepiece
|
||||
# UI_FLAGS=<flags as seen in Brocfile.js and
|
||||
# packaging/node/tower-app.js>: additional parameters to pass broccoli
|
||||
# for building
|
||||
# DOCKER_MACHINE_NAME=<name of docker-machine tower is running on>: when
|
||||
# passed, not only will brocolli rebuild, but browser-sync will proxy
|
||||
# proxy tower and refresh the ui when a change is made.
|
||||
DOCKER_MACHINE_NAME ?= none
|
||||
ifeq ($(DOCKER_MACHINE_NAME),none)
|
||||
sync-ui: node_modules clean-tmp brocolli-watcher
|
||||
else
|
||||
sync-ui: node_modules clean-tmp
|
||||
tmux new-session -d -s ui_sync 'exec make brocolli-watcher'
|
||||
tmux rename-window 'UI Sync'
|
||||
tmux select-window -t ui_sync:0
|
||||
tmux split-window -v 'exec make browser-sync'
|
||||
tmux split-window -h 'exec make websocket-proxy'
|
||||
tmux select-layout main-vertical
|
||||
tmux attach-session -t ui_sync
|
||||
endif
|
||||
|
||||
websocket-proxy:
|
||||
docker-machine ssh $(DOCKER_MACHINE_NAME) -L 8080:localhost:8080
|
||||
|
||||
browser-sync:
|
||||
$(BROWSER_SYNC_BIN) start --proxy $(shell docker-machine ip $(DOCKER_MACHINE_NAME)):8013 --ws
|
||||
|
||||
browser-sync-reload:
|
||||
$(BROWSER_SYNC_BIN) reload
|
||||
|
||||
brocolli-watcher: Brocfile.js testem.yml
|
||||
$(NODE) tools/ui/timepiece.js awx/ui/static $(WATCHER_FLAGS) -- $(UI_FLAGS)
|
||||
|
||||
# run ui unit-tests:
|
||||
# defaults to a useful dev testing run. Builds the ui to awx/ui/build_test
|
||||
# and runs mocha (node.js) tests with istanbul coverage (and an html
|
||||
# coverage report)
|
||||
# UI_TESTS_TO_RUN=<file>-test.js: Set this to only run a specific test file
|
||||
# CI=true: Builds the ui to awx/ui/build_test
|
||||
# and runs mocha (node.js) tests with istanbul coverage (and a cobertura
|
||||
# coverage report). Also builds the ui to awx/ui/static and runs the
|
||||
# testem (phantomjs) tests. Outputs these to XUNIT format to be consumed
|
||||
# and displayed in jenkins
|
||||
# DEBUG=true: Builds the ui to awx/ui/static and runs testem tests in Chrome
|
||||
# so you can breakpoint the tests and underlying code to figure out why
|
||||
# tests are failing.
|
||||
# TESTEM_DEBUG_BROWSER: the browser to run tests in, default to Chrome
|
||||
|
||||
# TODO: deprecated past 2.4
|
||||
testjs_ci: test-ui # w var UI_TEST_MODE=CI
|
||||
|
||||
UI_TEST_MODE ?= DEV
|
||||
ifeq ($(UI_TEST_MODE),CI)
|
||||
# ci testing run
|
||||
# this used to be testjs_ci, sort-of
|
||||
REPORTER = xunit
|
||||
test-ui: .istanbul.yml build-ui-for-coverage test-ui-for-coverage
|
||||
else
|
||||
ifeq ($(UI_TEST_MODE),DEV_DEBUG)
|
||||
# debug (breakpoint) dev testing run
|
||||
test-ui: build-ui-for-browser-tests test-ui-debug
|
||||
else
|
||||
# default dev testing run
|
||||
test-ui: .istanbul.yml build-ui-for-coverage test-ui-for-coverage
|
||||
endif
|
||||
endif
|
||||
|
||||
# helper tasks to test ui, don't call directly
|
||||
build-ui-for-coverage: UI_FLAGS=--node-tests --no-concat --no-styles
|
||||
build-ui-for-coverage: awx/ui/build_test
|
||||
|
||||
REPORTER ?= standard
|
||||
UI_TESTS_TO_RUN ?= all
|
||||
ifeq ($(REPORTER), xunit)
|
||||
test-ui-for-coverage:
|
||||
XUNIT_FILE=reports/test-results-ui.xml NODE_PATH=awx/ui/build_test $(ISTANBUL_BIN) cover --include-all-sources $(MOCHA_BIN) -- --full-trace --reporter xunit-file $(shell find awx/ui/build_test -name '*-test.js'); cp coverage/ui-coverage-report.xml reports/coverage-report-ui.xml
|
||||
else
|
||||
ifeq ($(UI_TESTS_TO_RUN), all)
|
||||
test-ui-for-coverage:
|
||||
NODE_PATH=awx/ui/build_test $(ISTANBUL_BIN) cover --include-all-sources $(MOCHA_BIN) -- --full-trace $(shell find awx/ui/build_test -name '*-test.js')
|
||||
else
|
||||
test-ui-for-coverage:
|
||||
NODE_PATH=awx/ui/build_test $(ISTANBUL_BIN) cover $(MOCHA_BIN) -- --full-trace $(shell find awx/ui/build_test -name '$(UI_TESTS_TO_RUN)')
|
||||
endif
|
||||
endif
|
||||
|
||||
build-ui-for-browser-tests: UI_FLAGS=--no-styles --no-compress --browser-tests --no-node-tests
|
||||
build-ui-for-browser-tests: awx/ui/static
|
||||
|
||||
TESTEM_DEBUG_BROWSER ?= Chrome
|
||||
test-ui-debug:
|
||||
PATH=./node_modules/.bin:$(PATH) $(TESTEM) --file testem.yml -l $(TESTEM_DEBUG_BROWSER)
|
||||
|
||||
# lint .js files
|
||||
jshint: node_modules Gruntfile.js
|
||||
$(GRUNT) $@
|
||||
|
||||
# generate ui docs
|
||||
ngdocs: build-ui Gruntfile.js
|
||||
$(GRUNT) $@
|
||||
ui-test-saucelabs: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui run test:saucelabs
|
||||
|
||||
# END UI TASKS
|
||||
# --------------------------------------
|
||||
@ -688,7 +653,7 @@ release_clean:
|
||||
-(rm *.tar)
|
||||
-(rm -rf ($RELEASE))
|
||||
|
||||
dist/$(SDIST_TAR_FILE): minjs
|
||||
dist/$(SDIST_TAR_FILE): ui-release
|
||||
BUILD="$(BUILD)" $(PYTHON) setup.py sdist
|
||||
|
||||
sdist: dist/$(SDIST_TAR_FILE)
|
||||
@ -730,7 +695,8 @@ rpm-build:
|
||||
|
||||
rpm-build/$(SDIST_TAR_FILE): rpm-build dist/$(SDIST_TAR_FILE)
|
||||
cp packaging/rpm/$(NAME).spec rpm-build/
|
||||
cp packaging/rpm/$(NAME).te rpm-build/
|
||||
cp packaging/rpm/tower.te rpm-build/
|
||||
cp packaging/rpm/tower.fc rpm-build/
|
||||
cp packaging/rpm/$(NAME).sysconfig rpm-build/
|
||||
cp packaging/remove_tower_source.py rpm-build/
|
||||
cp packaging/bytecompile.sh rpm-build/
|
||||
@ -800,7 +766,9 @@ debian: deb-build/$(DEB_TAR_NAME)
|
||||
endif
|
||||
|
||||
deb-build/$(DEB_NVR).dsc: deb-build/$(DEB_TAR_NAME)
|
||||
cd deb-build/$(DEB_TAR_NAME) && $(DEBUILD) -S
|
||||
cd deb-build/$(DEB_TAR_NAME) && \
|
||||
cp debian/control.$(DEB_DIST) debian/control && \
|
||||
$(DEBUILD) -S
|
||||
|
||||
deb-src: deb-build/$(DEB_NVR).dsc
|
||||
@echo "#############################################"
|
||||
@ -890,28 +858,46 @@ install:
|
||||
export SCL_PREFIX HTTPD_SCL_PREFIX
|
||||
$(PYTHON) setup.py install $(SETUP_INSTALL_ARGS)
|
||||
|
||||
# Docker Compose Development environment
|
||||
docker-compose:
|
||||
docker-compose -f tools/docker-compose.yml up --no-recreate
|
||||
docker-auth:
|
||||
docker login -e 1234@5678.com -u oauth2accesstoken -p "$(GCLOUD_AUTH)" https://gcr.io
|
||||
|
||||
docker-compose-test:
|
||||
cd tools && docker-compose run --rm --service-ports tower /bin/bash
|
||||
# Docker Compose Development environment
|
||||
docker-compose: docker-auth
|
||||
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose.yml up --no-recreate tower
|
||||
|
||||
docker-compose-cluster: docker-auth
|
||||
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose-cluster.yml up
|
||||
|
||||
docker-compose-test: docker-auth
|
||||
cd tools && TAG=$(COMPOSE_TAG) docker-compose run --rm --service-ports tower /bin/bash
|
||||
|
||||
docker-compose-build:
|
||||
docker build -t ansible/tower_devel -f tools/docker-compose/Dockerfile .
|
||||
docker tag ansible/tower_devel gcr.io/ansible-tower-engineering/tower_devel:$(COMPOSE_TAG)
|
||||
#docker push gcr.io/ansible-tower-engineering/tower_devel:$(COMPOSE_TAG)
|
||||
|
||||
MACHINE?=default
|
||||
docker-clean:
|
||||
rm -f awx/lib/.deps_built
|
||||
eval $$(docker-machine env $(MACHINE))
|
||||
docker stop $$(docker ps -a -q)
|
||||
-docker rm $$(docker ps -f name=tools_tower -a -q)
|
||||
-docker rmi tools_tower
|
||||
$(foreach container_id,$(shell docker ps -f name=tools_tower -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
-docker images | grep "tower_devel" | awk '{print $$1 ":" $$2}' | xargs docker rmi
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
mongo-debug-ui:
|
||||
docker run -it --rm --name mongo-express --link tools_mongo_1:mongo -e ME_CONFIG_OPTIONS_EDITORTHEME=ambiance -e ME_CONFIG_BASICAUTH_USERNAME=admin -e ME_CONFIG_BASICAUTH_PASSWORD=password -p 8081:8081 knickers/mongo-express
|
||||
# Docker Development Environment with Elastic Stack Connected
|
||||
docker-compose-elk: docker-auth
|
||||
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
mongo-container:
|
||||
docker run -it --link tools_mongo_1:mongo --rm mongo sh -c 'exec mongo "$MONGO_PORT_27017_TCP_ADDR:$MONGO_PORT_27017_TCP_PORT/system_tracking_dev"'
|
||||
docker-compose-cluster-elk: docker-auth
|
||||
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose-cluster.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
clean-elk:
|
||||
docker stop tools_kibana_1
|
||||
docker stop tools_logstash_1
|
||||
docker stop tools_elasticsearch_1
|
||||
docker rm tools_logstash_1
|
||||
docker rm tools_elasticsearch_1
|
||||
docker rm tools_kibana_1
|
||||
|
||||
psql-container:
|
||||
docker run -it --link tools_postgres_1:postgres --rm postgres:9.4.1 sh -c 'exec psql -h "$$POSTGRES_PORT_5432_TCP_ADDR" -p "$$POSTGRES_PORT_5432_TCP_PORT" -U postgres'
|
||||
docker run -it --net tools_default --rm postgres:9.4.1 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
|
||||
|
||||
8
Procfile
8
Procfile
@ -1,6 +1,8 @@
|
||||
runserver: make runserver
|
||||
nginx: make nginx
|
||||
runworker: make runworker
|
||||
daphne: make daphne
|
||||
celeryd: make celeryd
|
||||
taskmanager: make taskmanager
|
||||
receiver: make receiver
|
||||
socketservice: make socketservice
|
||||
factcacher: make factcacher
|
||||
flower: make flower
|
||||
uwsgi: make uwsgi
|
||||
|
||||
@ -5,7 +5,7 @@ import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
__version__ = '3.0.3'
|
||||
__version__ = '3.1.0'
|
||||
|
||||
__all__ = ['__version__']
|
||||
|
||||
@ -17,6 +17,7 @@ try:
|
||||
except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
|
||||
def find_commands(management_dir):
|
||||
# Modified version of function from django/core/management/__init__.py.
|
||||
command_dir = os.path.join(management_dir, 'commands')
|
||||
@ -33,6 +34,7 @@ def find_commands(management_dir):
|
||||
pass
|
||||
return commands
|
||||
|
||||
|
||||
def prepare_env():
|
||||
# Update the default settings environment variable based on current mode.
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'awx.settings.%s' % MODE)
|
||||
|
||||
@ -6,8 +6,10 @@ import urllib
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework import authentication
|
||||
@ -16,10 +18,10 @@ from rest_framework import HTTP_HEADER_ENCODING
|
||||
|
||||
# AWX
|
||||
from awx.main.models import UnifiedJob, AuthToken
|
||||
from awx.main.conf import tower_settings
|
||||
|
||||
logger = logging.getLogger('awx.api.authentication')
|
||||
|
||||
|
||||
class TokenAuthentication(authentication.TokenAuthentication):
|
||||
'''
|
||||
Custom token authentication using tokens that expire and are associated
|
||||
@ -62,10 +64,10 @@ class TokenAuthentication(authentication.TokenAuthentication):
|
||||
return None
|
||||
|
||||
if len(auth) == 1:
|
||||
msg = 'Invalid token header. No credentials provided.'
|
||||
msg = _('Invalid token header. No credentials provided.')
|
||||
raise exceptions.AuthenticationFailed(msg)
|
||||
elif len(auth) > 2:
|
||||
msg = 'Invalid token header. Token string should not contain spaces.'
|
||||
msg = _('Invalid token header. Token string should not contain spaces.')
|
||||
raise exceptions.AuthenticationFailed(msg)
|
||||
|
||||
return self.authenticate_credentials(auth[1])
|
||||
@ -93,14 +95,14 @@ class TokenAuthentication(authentication.TokenAuthentication):
|
||||
|
||||
# Token invalidated due to session limit config being reduced
|
||||
# Session limit reached invalidation will also take place on authentication
|
||||
if tower_settings.AUTH_TOKEN_PER_USER != -1:
|
||||
if settings.AUTH_TOKEN_PER_USER != -1:
|
||||
if not token.in_valid_tokens(now=now):
|
||||
token.invalidate(reason='limit_reached')
|
||||
raise exceptions.AuthenticationFailed(AuthToken.reason_long('limit_reached'))
|
||||
|
||||
# If the user is inactive, then return an error.
|
||||
if not token.user.is_active:
|
||||
raise exceptions.AuthenticationFailed('User inactive or deleted')
|
||||
raise exceptions.AuthenticationFailed(_('User inactive or deleted'))
|
||||
|
||||
# Refresh the token.
|
||||
# The token is extended from "right now" + configurable setting amount.
|
||||
@ -123,12 +125,19 @@ class TokenGetAuthentication(TokenAuthentication):
|
||||
class LoggedBasicAuthentication(authentication.BasicAuthentication):
|
||||
|
||||
def authenticate(self, request):
|
||||
if not settings.AUTH_BASIC_ENABLED:
|
||||
return
|
||||
ret = super(LoggedBasicAuthentication, self).authenticate(request)
|
||||
if ret:
|
||||
username = ret[0].username if ret[0] else '<none>'
|
||||
logger.debug(smart_text(u"User {} performed a {} to {} through the API".format(username, request.method, request.path)))
|
||||
return ret
|
||||
|
||||
def authenticate_header(self, request):
|
||||
if not settings.AUTH_BASIC_ENABLED:
|
||||
return
|
||||
return super(LoggedBasicAuthentication, self).authenticate_header(request)
|
||||
|
||||
|
||||
class TaskAuthentication(authentication.BaseAuthentication):
|
||||
'''
|
||||
@ -149,7 +158,7 @@ class TaskAuthentication(authentication.BaseAuthentication):
|
||||
return None
|
||||
token = unified_job.task_auth_token
|
||||
if auth[1] != token:
|
||||
raise exceptions.AuthenticationFailed('Invalid task token')
|
||||
raise exceptions.AuthenticationFailed(_('Invalid task token'))
|
||||
return (None, token)
|
||||
|
||||
def authenticate_header(self, request):
|
||||
|
||||
35
awx/api/conf.py
Normal file
35
awx/api/conf.py
Normal file
@ -0,0 +1,35 @@
|
||||
# Django
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Tower
|
||||
from awx.conf import fields, register
|
||||
|
||||
|
||||
register(
|
||||
'AUTH_TOKEN_EXPIRATION',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=60,
|
||||
label=_('Idle Time Force Log Out'),
|
||||
help_text=_('Number of seconds that a user is inactive before they will need to login again.'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
|
||||
register(
|
||||
'AUTH_TOKEN_PER_USER',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=-1,
|
||||
label=_('Maximum number of simultaneous logins'),
|
||||
help_text=_('Maximum number of simultaneous logins a user may have. To disable enter -1.'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
|
||||
register(
|
||||
'AUTH_BASIC_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Enable HTTP Basic Auth'),
|
||||
help_text=_('Enable HTTP Basic Auth for the API Browser.'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
@ -9,9 +9,11 @@ from django.core.exceptions import FieldError, ValidationError
|
||||
from django.db import models
|
||||
from django.db.models import Q
|
||||
from django.db.models.fields import FieldDoesNotExist
|
||||
from django.db.models.fields.related import ForeignObjectRel
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.contrib.contenttypes.fields import GenericForeignKey
|
||||
from django.utils.encoding import force_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import ParseError, PermissionDenied
|
||||
@ -19,6 +21,8 @@ from rest_framework.filters import BaseFilterBackend
|
||||
|
||||
# Ansible Tower
|
||||
from awx.main.utils import get_type_for_model, to_python_boolean
|
||||
from awx.main.models.rbac import RoleAncestorEntry
|
||||
|
||||
|
||||
class MongoFilterBackend(BaseFilterBackend):
|
||||
|
||||
@ -26,6 +30,7 @@ class MongoFilterBackend(BaseFilterBackend):
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
return queryset
|
||||
|
||||
|
||||
class TypeFilterBackend(BaseFilterBackend):
|
||||
'''
|
||||
Filter on type field now returned with all objects.
|
||||
@ -62,6 +67,7 @@ class TypeFilterBackend(BaseFilterBackend):
|
||||
# Return a 400 for invalid field names.
|
||||
raise ParseError(*e.args)
|
||||
|
||||
|
||||
class FieldLookupBackend(BaseFilterBackend):
|
||||
'''
|
||||
Filter using field lookups provided via query string parameters.
|
||||
@ -73,7 +79,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
SUPPORTED_LOOKUPS = ('exact', 'iexact', 'contains', 'icontains',
|
||||
'startswith', 'istartswith', 'endswith', 'iendswith',
|
||||
'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'in',
|
||||
'isnull')
|
||||
'isnull', 'search')
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
field = None
|
||||
@ -84,8 +90,8 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
# those lookups combined with request.user.get_queryset(Model) to make
|
||||
# sure user cannot query using objects he could not view.
|
||||
new_parts = []
|
||||
for n, name in enumerate(parts[:-1]):
|
||||
|
||||
for name in parts[:-1]:
|
||||
# HACK: Make project and inventory source filtering by old field names work for backwards compatibility.
|
||||
if model._meta.object_name in ('Project', 'InventorySource'):
|
||||
name = {
|
||||
@ -95,15 +101,28 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
'last_updated': 'last_job_run',
|
||||
}.get(name, name)
|
||||
|
||||
new_parts.append(name)
|
||||
if name == 'type' and 'polymorphic_ctype' in model._meta.get_all_field_names():
|
||||
name = 'polymorphic_ctype'
|
||||
new_parts.append('polymorphic_ctype__model')
|
||||
else:
|
||||
new_parts.append(name)
|
||||
|
||||
|
||||
if name in getattr(model, 'PASSWORD_FIELDS', ()):
|
||||
raise PermissionDenied('Filtering on password fields is not allowed.')
|
||||
raise PermissionDenied(_('Filtering on password fields is not allowed.'))
|
||||
elif name == 'pk':
|
||||
field = model._meta.pk
|
||||
else:
|
||||
field = model._meta.get_field_by_name(name)[0]
|
||||
name_alt = name.replace("_", "")
|
||||
if name_alt in model._meta.fields_map.keys():
|
||||
field = model._meta.fields_map[name_alt]
|
||||
new_parts.pop()
|
||||
new_parts.append(name_alt)
|
||||
else:
|
||||
field = model._meta.get_field_by_name(name)[0]
|
||||
if isinstance(field, ForeignObjectRel) and getattr(field.field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
elif getattr(field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
model = getattr(field, 'related_model', None) or field.model
|
||||
|
||||
if parts:
|
||||
@ -123,14 +142,20 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
return to_python_boolean(value, allow_none=True)
|
||||
elif isinstance(field, models.BooleanField):
|
||||
return to_python_boolean(value)
|
||||
elif isinstance(field, ForeignObjectRel):
|
||||
elif isinstance(field, (ForeignObjectRel, ManyToManyField, GenericForeignKey, ForeignKey)):
|
||||
return self.to_python_related(value)
|
||||
else:
|
||||
return field.to_python(value)
|
||||
|
||||
def value_to_python(self, model, lookup, value):
|
||||
field, new_lookup = self.get_field_from_lookup(model, lookup)
|
||||
if new_lookup.endswith('__isnull'):
|
||||
|
||||
# Type names are stored without underscores internally, but are presented and
|
||||
# and serialized over the API containing underscores so we remove `_`
|
||||
# for polymorphic_ctype__model lookups.
|
||||
if new_lookup.startswith('polymorphic_ctype__model'):
|
||||
value = value.replace('_','')
|
||||
elif new_lookup.endswith('__isnull'):
|
||||
value = to_python_boolean(value)
|
||||
elif new_lookup.endswith('__in'):
|
||||
items = []
|
||||
@ -144,6 +169,15 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
re.compile(value)
|
||||
except re.error as e:
|
||||
raise ValueError(e.args[0])
|
||||
elif new_lookup.endswith('__search'):
|
||||
related_model = getattr(field, 'related_model', None)
|
||||
if not related_model:
|
||||
raise ValueError('%s is not searchable' % new_lookup[:-8])
|
||||
new_lookups = []
|
||||
for rm_field in related_model._meta.fields:
|
||||
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description'):
|
||||
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
|
||||
return value, new_lookups
|
||||
else:
|
||||
value = self.value_to_python_for_field(field, value)
|
||||
return value, new_lookup
|
||||
@ -155,6 +189,8 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
and_filters = []
|
||||
or_filters = []
|
||||
chain_filters = []
|
||||
role_filters = []
|
||||
search_filters = []
|
||||
for key, values in request.query_params.lists():
|
||||
if key in self.RESERVED_NAMES:
|
||||
continue
|
||||
@ -171,6 +207,21 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
key = key[:-5]
|
||||
q_int = True
|
||||
|
||||
# RBAC filtering
|
||||
if key == 'role_level':
|
||||
role_filters.append(values[0])
|
||||
continue
|
||||
|
||||
# Search across related objects.
|
||||
if key.endswith('__search'):
|
||||
for value in values:
|
||||
for search_term in force_text(value).replace(',', ' ').split():
|
||||
search_value, new_keys = self.value_to_python(queryset.model, key, search_term)
|
||||
assert isinstance(new_keys, list)
|
||||
for new_key in new_keys:
|
||||
search_filters.append((new_key, search_value))
|
||||
continue
|
||||
|
||||
# Custom chain__ and or__ filters, mutually exclusive (both can
|
||||
# precede not__).
|
||||
q_chain = False
|
||||
@ -201,13 +252,21 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
and_filters.append((q_not, new_key, value))
|
||||
|
||||
# Now build Q objects for database query filter.
|
||||
if and_filters or or_filters or chain_filters:
|
||||
if and_filters or or_filters or chain_filters or role_filters or search_filters:
|
||||
args = []
|
||||
for n, k, v in and_filters:
|
||||
if n:
|
||||
args.append(~Q(**{k:v}))
|
||||
else:
|
||||
args.append(Q(**{k:v}))
|
||||
for role_name in role_filters:
|
||||
args.append(
|
||||
Q(pk__in=RoleAncestorEntry.objects.filter(
|
||||
ancestor__in=request.user.roles.all(),
|
||||
content_type_id=ContentType.objects.get_for_model(queryset.model).id,
|
||||
role_field=role_name
|
||||
).values_list('object_id').distinct())
|
||||
)
|
||||
if or_filters:
|
||||
q = Q()
|
||||
for n,k,v in or_filters:
|
||||
@ -216,6 +275,11 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
else:
|
||||
q |= Q(**{k:v})
|
||||
args.append(q)
|
||||
if search_filters:
|
||||
q = Q()
|
||||
for k,v in search_filters:
|
||||
q |= Q(**{k:v})
|
||||
args.append(q)
|
||||
for n,k,v in chain_filters:
|
||||
if n:
|
||||
q = ~Q(**{k:v})
|
||||
@ -224,11 +288,12 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
queryset = queryset.filter(q)
|
||||
queryset = queryset.filter(*args).distinct()
|
||||
return queryset
|
||||
except (FieldError, FieldDoesNotExist, ValueError) as e:
|
||||
except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e:
|
||||
raise ParseError(e.args[0])
|
||||
except ValidationError as e:
|
||||
raise ParseError(e.messages)
|
||||
|
||||
|
||||
class OrderByBackend(BaseFilterBackend):
|
||||
'''
|
||||
Filter to apply ordering based on query string parameters.
|
||||
|
||||
@ -9,12 +9,14 @@ import time
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import connection
|
||||
from django.db.models.fields import FieldDoesNotExist
|
||||
from django.http import QueryDict
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.template.loader import render_to_string
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.safestring import mark_safe
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.authentication import get_authorization_header
|
||||
@ -25,6 +27,7 @@ from rest_framework import status
|
||||
from rest_framework import views
|
||||
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.utils import * # noqa
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer
|
||||
@ -40,6 +43,8 @@ __all__ = ['APIView', 'GenericAPIView', 'ListAPIView', 'SimpleListAPIView',
|
||||
'DeleteLastUnattachLabelMixin',]
|
||||
|
||||
logger = logging.getLogger('awx.api.generics')
|
||||
analytics_logger = logging.getLogger('awx.analytics.performance')
|
||||
|
||||
|
||||
def get_view_name(cls, suffix=None):
|
||||
'''
|
||||
@ -58,6 +63,7 @@ def get_view_name(cls, suffix=None):
|
||||
return ('%s %s' % (name, suffix)) if suffix else name
|
||||
return views.get_view_name(cls, suffix=None)
|
||||
|
||||
|
||||
def get_view_description(cls, html=False):
|
||||
'''
|
||||
Wrapper around REST framework get_view_description() to support
|
||||
@ -77,6 +83,7 @@ def get_view_description(cls, html=False):
|
||||
desc = '<div class="description">%s</div>' % desc
|
||||
return mark_safe(desc)
|
||||
|
||||
|
||||
class APIView(views.APIView):
|
||||
|
||||
def initialize_request(self, request, *args, **kwargs):
|
||||
@ -104,6 +111,7 @@ class APIView(views.APIView):
|
||||
logger.warn(status_msg)
|
||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||
time_started = getattr(self, 'time_started', None)
|
||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||
if time_started:
|
||||
time_elapsed = time.time() - self.time_started
|
||||
response['X-API-Time'] = '%0.3fs' % time_elapsed
|
||||
@ -112,6 +120,8 @@ class APIView(views.APIView):
|
||||
q_times = [float(q['time']) for q in connection.queries[queries_before:]]
|
||||
response['X-API-Query-Count'] = len(q_times)
|
||||
response['X-API-Query-Time'] = '%0.3fs' % sum(q_times)
|
||||
|
||||
analytics_logger.info("api response", extra=dict(python_objects=dict(request=request, response=response)))
|
||||
return response
|
||||
|
||||
def get_authenticate_header(self, request):
|
||||
@ -150,6 +160,8 @@ class APIView(views.APIView):
|
||||
'new_in_230': getattr(self, 'new_in_230', False),
|
||||
'new_in_240': getattr(self, 'new_in_240', False),
|
||||
'new_in_300': getattr(self, 'new_in_300', False),
|
||||
'new_in_310': getattr(self, 'new_in_310', False),
|
||||
'deprecated': getattr(self, 'deprecated', False),
|
||||
}
|
||||
|
||||
def get_description(self, html=False):
|
||||
@ -224,17 +236,26 @@ class GenericAPIView(generics.GenericAPIView, APIView):
|
||||
d['settings'] = settings
|
||||
return d
|
||||
|
||||
|
||||
class SimpleListAPIView(generics.ListAPIView, GenericAPIView):
|
||||
|
||||
def get_queryset(self):
|
||||
return self.request.user.get_queryset(self.model)
|
||||
|
||||
|
||||
class ListAPIView(generics.ListAPIView, GenericAPIView):
|
||||
# Base class for a read-only list view.
|
||||
|
||||
def get_queryset(self):
|
||||
return self.request.user.get_queryset(self.model)
|
||||
|
||||
def paginate_queryset(self, queryset):
|
||||
page = super(ListAPIView, self).paginate_queryset(queryset)
|
||||
# Queries RBAC info & stores into list objects
|
||||
if hasattr(self, 'capabilities_prefetch') and page is not None:
|
||||
cache_list_capabilities(page, self.capabilities_prefetch, self.model, self.request.user)
|
||||
return page
|
||||
|
||||
def get_description_context(self):
|
||||
opts = self.model._meta
|
||||
if 'username' in opts.get_all_field_names():
|
||||
@ -252,14 +273,61 @@ class ListAPIView(generics.ListAPIView, GenericAPIView):
|
||||
fields = []
|
||||
for field in self.model._meta.fields:
|
||||
if field.name in ('username', 'first_name', 'last_name', 'email',
|
||||
'name', 'description', 'email'):
|
||||
'name', 'description'):
|
||||
fields.append(field.name)
|
||||
return fields
|
||||
|
||||
@property
|
||||
def related_search_fields(self):
|
||||
def skip_related_name(name):
|
||||
return (
|
||||
name is None or name.endswith('_role') or name.startswith('_') or
|
||||
name.startswith('deprecated_') or name.endswith('_set') or
|
||||
name == 'polymorphic_ctype')
|
||||
|
||||
fields = set([])
|
||||
for field in self.model._meta.fields:
|
||||
if skip_related_name(field.name):
|
||||
continue
|
||||
if getattr(field, 'related_model', None):
|
||||
fields.add('{}__search'.format(field.name))
|
||||
for rel in self.model._meta.related_objects:
|
||||
name = rel.related_model._meta.verbose_name.replace(" ", "_")
|
||||
if skip_related_name(name):
|
||||
continue
|
||||
fields.add('{}__search'.format(name))
|
||||
m2m_rel = []
|
||||
m2m_rel += self.model._meta.local_many_to_many
|
||||
if issubclass(self.model, UnifiedJobTemplate) and self.model != UnifiedJobTemplate:
|
||||
m2m_rel += UnifiedJobTemplate._meta.local_many_to_many
|
||||
if issubclass(self.model, UnifiedJob) and self.model != UnifiedJob:
|
||||
m2m_rel += UnifiedJob._meta.local_many_to_many
|
||||
for relationship in m2m_rel:
|
||||
if skip_related_name(relationship.name):
|
||||
continue
|
||||
if relationship.related_model._meta.app_label != 'main':
|
||||
continue
|
||||
fields.add('{}__search'.format(relationship.name))
|
||||
fields = list(fields)
|
||||
|
||||
allowed_fields = []
|
||||
for field in fields:
|
||||
try:
|
||||
FieldLookupBackend().get_field_from_lookup(self.model, field)
|
||||
except PermissionDenied:
|
||||
pass
|
||||
except FieldDoesNotExist:
|
||||
allowed_fields.append(field)
|
||||
else:
|
||||
allowed_fields.append(field)
|
||||
return allowed_fields
|
||||
|
||||
|
||||
class ListCreateAPIView(ListAPIView, generics.ListCreateAPIView):
|
||||
# Base class for a list view that allows creating new objects.
|
||||
pass
|
||||
|
||||
|
||||
class ParentMixin(object):
|
||||
|
||||
def get_parent_object(self):
|
||||
@ -278,7 +346,8 @@ class ParentMixin(object):
|
||||
if not self.request.user.can_access(*args):
|
||||
raise PermissionDenied()
|
||||
|
||||
class SubListAPIView(ListAPIView, ParentMixin):
|
||||
|
||||
class SubListAPIView(ParentMixin, ListAPIView):
|
||||
# Base class for a read-only sublist view.
|
||||
|
||||
# Subclasses should define at least:
|
||||
@ -305,6 +374,7 @@ class SubListAPIView(ListAPIView, ParentMixin):
|
||||
sublist_qs = getattrd(parent, self.relationship).distinct()
|
||||
return qs & sublist_qs
|
||||
|
||||
|
||||
class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
|
||||
# Base class for a sublist view that allows for creating subobjects
|
||||
# associated with the parent object.
|
||||
@ -357,10 +427,14 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
|
||||
headers = {'Location': obj.get_absolute_url()}
|
||||
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
|
||||
|
||||
class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
|
||||
# Base class for a sublist view that allows for creating subobjects and
|
||||
# attaching/detaching them from the parent.
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
return None
|
||||
|
||||
def get_description_context(self):
|
||||
d = super(SubListCreateAttachDetachAPIView, self).get_description_context()
|
||||
d.update({
|
||||
@ -397,6 +471,13 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
|
||||
skip_sub_obj_read_check=created):
|
||||
raise PermissionDenied()
|
||||
|
||||
# Verify that the relationship to be added is valid.
|
||||
attach_errors = self.is_valid_relation(parent, sub, created=created)
|
||||
if attach_errors is not None:
|
||||
if created:
|
||||
sub.delete()
|
||||
return Response(attach_errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Attach the object to the collection.
|
||||
if sub not in relationship.all():
|
||||
relationship.add(sub)
|
||||
@ -413,7 +494,7 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
|
||||
sub_id = request.data.get('id', None)
|
||||
res = None
|
||||
if not sub_id:
|
||||
data = dict(msg='"id" is required to disassociate')
|
||||
data = dict(msg=_('"id" is required to disassociate'))
|
||||
res = Response(data, status=status.HTTP_400_BAD_REQUEST)
|
||||
return (sub_id, res)
|
||||
|
||||
@ -449,12 +530,13 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
|
||||
else:
|
||||
return self.attach(request, *args, **kwargs)
|
||||
|
||||
'''
|
||||
Models for which you want the last instance to be deleted from the database
|
||||
when the last disassociate is called should inherit from this class. Further,
|
||||
the model should implement is_detached()
|
||||
'''
|
||||
|
||||
class DeleteLastUnattachLabelMixin(object):
|
||||
'''
|
||||
Models for which you want the last instance to be deleted from the database
|
||||
when the last disassociate is called should inherit from this class. Further,
|
||||
the model should implement is_detached()
|
||||
'''
|
||||
def unattach(self, request, *args, **kwargs):
|
||||
(sub_id, res) = super(DeleteLastUnattachLabelMixin, self).unattach_validate(request)
|
||||
if res:
|
||||
@ -469,12 +551,15 @@ class DeleteLastUnattachLabelMixin(object):
|
||||
|
||||
return res
|
||||
|
||||
class SubDetailAPIView(generics.RetrieveAPIView, GenericAPIView, ParentMixin):
|
||||
|
||||
class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView):
|
||||
pass
|
||||
|
||||
|
||||
class RetrieveAPIView(generics.RetrieveAPIView, GenericAPIView):
|
||||
pass
|
||||
|
||||
|
||||
class RetrieveUpdateAPIView(RetrieveAPIView, generics.RetrieveUpdateAPIView):
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
@ -489,6 +574,7 @@ class RetrieveUpdateAPIView(RetrieveAPIView, generics.RetrieveUpdateAPIView):
|
||||
''' scrub any fields the user cannot/should not put/patch, based on user context. This runs after read-only serialization filtering '''
|
||||
pass
|
||||
|
||||
|
||||
class RetrieveDestroyAPIView(RetrieveAPIView, generics.RetrieveDestroyAPIView):
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
@ -499,21 +585,21 @@ class RetrieveDestroyAPIView(RetrieveAPIView, generics.RetrieveDestroyAPIView):
|
||||
obj.delete()
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, RetrieveDestroyAPIView):
|
||||
pass
|
||||
|
||||
|
||||
class DestroyAPIView(GenericAPIView, generics.DestroyAPIView):
|
||||
pass
|
||||
|
||||
|
||||
class ResourceAccessList(ListAPIView):
|
||||
class ResourceAccessList(ParentMixin, ListAPIView):
|
||||
|
||||
serializer_class = ResourceAccessListElementSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
self.object_id = self.kwargs['pk']
|
||||
resource_model = getattr(self, 'resource_model')
|
||||
obj = get_object_or_404(resource_model, pk=self.object_id)
|
||||
obj = self.get_parent_object()
|
||||
|
||||
content_type = ContentType.objects.get_for_model(obj)
|
||||
roles = set(Role.objects.filter(content_type=content_type, object_id=obj.id))
|
||||
|
||||
@ -1,49 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from rest_framework.exceptions import APIException
|
||||
|
||||
from awx.main.task_engine import TaskSerializer
|
||||
|
||||
|
||||
class LicenseForbids(APIException):
|
||||
status_code = 402
|
||||
default_detail = 'Your Tower license does not allow that.'
|
||||
|
||||
|
||||
def get_license(show_key=False, bypass_database=False):
|
||||
"""Return a dictionary representing the license currently in
|
||||
place on this Tower instance.
|
||||
"""
|
||||
license_reader = TaskSerializer()
|
||||
if bypass_database:
|
||||
return license_reader.from_file(show_key=show_key)
|
||||
return license_reader.from_database(show_key=show_key)
|
||||
|
||||
|
||||
def feature_enabled(name, bypass_database=False):
|
||||
"""Return True if the requested feature is enabled, False otherwise.
|
||||
If the feature does not exist, raise KeyError.
|
||||
"""
|
||||
license = get_license(bypass_database=bypass_database)
|
||||
|
||||
# Sanity check: If there is no license, the feature is considered
|
||||
# to be off.
|
||||
if 'features' not in license:
|
||||
return False
|
||||
|
||||
# Return the correct feature flag.
|
||||
return license['features'].get(name, False)
|
||||
|
||||
def feature_exists(name):
|
||||
"""Return True if the requested feature is enabled, False otherwise.
|
||||
If the feature does not exist, raise KeyError.
|
||||
"""
|
||||
license = get_license()
|
||||
|
||||
# Sanity check: If there is no license, the feature is considered
|
||||
# to be off.
|
||||
if 'features' not in license:
|
||||
return False
|
||||
|
||||
return name in license['features']
|
||||
@ -1,59 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
import sys
|
||||
|
||||
from optparse import make_option
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.task_engine import TaskSerializer
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Return a exit status of 0 if MongoDB should be active, and an
|
||||
exit status of 1 otherwise.
|
||||
|
||||
This script is intended to be used by bash and init scripts to
|
||||
conditionally start MongoDB, so its focus is on being bash-friendly.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(Command, self).__init__()
|
||||
BaseCommand.option_list += (make_option('--local',
|
||||
dest='local',
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Only check if mongo should be running locally"),)
|
||||
|
||||
def handle(self, *args, **kwargs):
|
||||
# Get the license data.
|
||||
license_reader = TaskSerializer()
|
||||
license_data = license_reader.from_database()
|
||||
|
||||
# Does the license have features, at all?
|
||||
# If there is no license yet, then all features are clearly off.
|
||||
if 'features' not in license_data:
|
||||
print('No license available.')
|
||||
sys.exit(2)
|
||||
|
||||
# Does the license contain the system tracking feature?
|
||||
# If and only if it does, MongoDB should run.
|
||||
system_tracking = license_data['features']['system_tracking']
|
||||
|
||||
# Okay, do we need MongoDB to be turned on?
|
||||
# This is a silly variable assignment right now, but I expect the
|
||||
# rules here will grow more complicated over time.
|
||||
uses_mongo = system_tracking # noqa
|
||||
|
||||
if is_ha_environment() and kwargs['local'] and uses_mongo:
|
||||
print("HA Configuration detected. Database should be remote")
|
||||
uses_mongo = False
|
||||
|
||||
# If we do not need Mongo, return a non-zero exit status.
|
||||
if not uses_mongo:
|
||||
print('MongoDB NOT required')
|
||||
sys.exit(1)
|
||||
|
||||
# We do need Mongo, return zero.
|
||||
print('MongoDB required')
|
||||
sys.exit(0)
|
||||
@ -7,12 +7,13 @@ from collections import OrderedDict
|
||||
from django.core.exceptions import PermissionDenied
|
||||
from django.http import Http404
|
||||
from django.utils.encoding import force_text, smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework import exceptions
|
||||
from rest_framework import metadata
|
||||
from rest_framework import serializers
|
||||
from rest_framework.relations import RelatedField
|
||||
from rest_framework.relations import RelatedField, ManyRelatedField
|
||||
from rest_framework.request import clone_request
|
||||
|
||||
# Ansible Tower
|
||||
@ -29,7 +30,9 @@ class Metadata(metadata.SimpleMetadata):
|
||||
text_attrs = [
|
||||
'read_only', 'label', 'help_text',
|
||||
'min_length', 'max_length',
|
||||
'min_value', 'max_value'
|
||||
'min_value', 'max_value',
|
||||
'category', 'category_slug',
|
||||
'defined_in_file'
|
||||
]
|
||||
|
||||
for attr in text_attrs:
|
||||
@ -37,29 +40,37 @@ class Metadata(metadata.SimpleMetadata):
|
||||
if value is not None and value != '':
|
||||
field_info[attr] = force_text(value, strings_only=True)
|
||||
|
||||
placeholder = getattr(field, 'placeholder', serializers.empty)
|
||||
if placeholder is not serializers.empty:
|
||||
field_info['placeholder'] = placeholder
|
||||
|
||||
# Update help text for common fields.
|
||||
serializer = getattr(field, 'parent', None)
|
||||
if serializer:
|
||||
field_help_text = {
|
||||
'id': 'Database ID for this {}.',
|
||||
'name': 'Name of this {}.',
|
||||
'description': 'Optional description of this {}.',
|
||||
'type': 'Data type for this {}.',
|
||||
'url': 'URL for this {}.',
|
||||
'related': 'Data structure with URLs of related resources.',
|
||||
'summary_fields': 'Data structure with name/description for related resources.',
|
||||
'created': 'Timestamp when this {} was created.',
|
||||
'modified': 'Timestamp when this {} was last modified.',
|
||||
'id': _('Database ID for this {}.'),
|
||||
'name': _('Name of this {}.'),
|
||||
'description': _('Optional description of this {}.'),
|
||||
'type': _('Data type for this {}.'),
|
||||
'url': _('URL for this {}.'),
|
||||
'related': _('Data structure with URLs of related resources.'),
|
||||
'summary_fields': _('Data structure with name/description for related resources.'),
|
||||
'created': _('Timestamp when this {} was created.'),
|
||||
'modified': _('Timestamp when this {} was last modified.'),
|
||||
}
|
||||
if field.field_name in field_help_text:
|
||||
opts = serializer.Meta.model._meta.concrete_model._meta
|
||||
verbose_name = smart_text(opts.verbose_name)
|
||||
field_info['help_text'] = field_help_text[field.field_name].format(verbose_name)
|
||||
if hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'):
|
||||
opts = serializer.Meta.model._meta.concrete_model._meta
|
||||
verbose_name = smart_text(opts.verbose_name)
|
||||
field_info['help_text'] = field_help_text[field.field_name].format(verbose_name)
|
||||
|
||||
# Indicate if a field has a default value.
|
||||
# FIXME: Still isn't showing all default values?
|
||||
try:
|
||||
field_info['default'] = field.get_default()
|
||||
default = field.get_default()
|
||||
if field.field_name == 'TOWER_URL_BASE' and default == 'https://towerhost':
|
||||
default = '{}://{}'.format(self.request.scheme, self.request.get_host())
|
||||
field_info['default'] = default
|
||||
except serializers.SkipField:
|
||||
pass
|
||||
|
||||
@ -68,7 +79,7 @@ class Metadata(metadata.SimpleMetadata):
|
||||
elif getattr(field, 'fields', None):
|
||||
field_info['children'] = self.get_serializer_info(field)
|
||||
|
||||
if hasattr(field, 'choices') and not isinstance(field, RelatedField):
|
||||
if not isinstance(field, (RelatedField, ManyRelatedField)) and hasattr(field, 'choices'):
|
||||
field_info['choices'] = [(choice_value, choice_name) for choice_value, choice_name in field.choices.items()]
|
||||
|
||||
# Indicate if a field is write-only.
|
||||
@ -112,19 +123,20 @@ class Metadata(metadata.SimpleMetadata):
|
||||
actions = {}
|
||||
for method in {'GET', 'PUT', 'POST'} & set(view.allowed_methods):
|
||||
view.request = clone_request(request, method)
|
||||
obj = None
|
||||
try:
|
||||
# Test global permissions
|
||||
if hasattr(view, 'check_permissions'):
|
||||
view.check_permissions(view.request)
|
||||
# Test object permissions
|
||||
if method == 'PUT' and hasattr(view, 'get_object'):
|
||||
view.get_object()
|
||||
obj = view.get_object()
|
||||
except (exceptions.APIException, PermissionDenied, Http404):
|
||||
continue
|
||||
else:
|
||||
# If user has appropriate permissions for the view, include
|
||||
# appropriate metadata about the fields that should be supplied.
|
||||
serializer = view.get_serializer()
|
||||
serializer = view.get_serializer(instance=obj)
|
||||
actions[method] = self.get_serializer_info(serializer)
|
||||
finally:
|
||||
view.request = request
|
||||
@ -140,27 +152,34 @@ class Metadata(metadata.SimpleMetadata):
|
||||
# For GET method, remove meta attributes that aren't relevant
|
||||
# when reading a field and remove write-only fields.
|
||||
if method == 'GET':
|
||||
meta.pop('required', None)
|
||||
meta.pop('read_only', None)
|
||||
meta.pop('default', None)
|
||||
meta.pop('min_length', None)
|
||||
meta.pop('max_length', None)
|
||||
attrs_to_remove = ('required', 'read_only', 'default', 'min_length', 'max_length', 'placeholder')
|
||||
for attr in attrs_to_remove:
|
||||
meta.pop(attr, None)
|
||||
meta.get('child', {}).pop(attr, None)
|
||||
if meta.pop('write_only', False):
|
||||
actions['GET'].pop(field)
|
||||
|
||||
# For PUT/POST methods, remove read-only fields.
|
||||
if method in ('PUT', 'POST'):
|
||||
# This value should always be False for PUT/POST, so don't
|
||||
# show it (file-based read-only settings can't be updated)
|
||||
meta.pop('defined_in_file', False)
|
||||
|
||||
if meta.pop('read_only', False):
|
||||
actions[method].pop(field)
|
||||
|
||||
return actions
|
||||
|
||||
def determine_metadata(self, request, view):
|
||||
# store request on self so we can use it to generate field defaults
|
||||
# (such as TOWER_URL_BASE)
|
||||
self.request = request
|
||||
|
||||
metadata = super(Metadata, self).determine_metadata(request, view)
|
||||
|
||||
# Add version number in which view was added to Tower.
|
||||
added_in_version = '1.2'
|
||||
for version in ('3.0.0', '2.4.0', '2.3.0', '2.2.0', '2.1.0', '2.0.0', '1.4.8', '1.4.5', '1.4', '1.3'):
|
||||
for version in ('3.1.0', '3.0.0', '2.4.0', '2.3.0', '2.2.0', '2.1.0', '2.0.0', '1.4.8', '1.4.5', '1.4', '1.3'):
|
||||
if getattr(view, 'new_in_%s' % version.replace('.', ''), False):
|
||||
added_in_version = version
|
||||
break
|
||||
@ -176,8 +195,17 @@ class Metadata(metadata.SimpleMetadata):
|
||||
if getattr(view, 'search_fields', None):
|
||||
metadata['search_fields'] = view.search_fields
|
||||
|
||||
# Add related search fields if available from the view.
|
||||
if getattr(view, 'related_search_fields', None):
|
||||
metadata['related_search_fields'] = view.related_search_fields
|
||||
|
||||
from rest_framework import generics
|
||||
if isinstance(view, generics.ListAPIView) and hasattr(view, 'paginator'):
|
||||
metadata['max_page_size'] = view.paginator.max_page_size
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
class RoleMetadata(Metadata):
|
||||
def determine_metadata(self, request, view):
|
||||
metadata = super(RoleMetadata, self).determine_metadata(request, view)
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Django REST Framework
|
||||
from django.conf import settings
|
||||
from rest_framework import pagination
|
||||
from rest_framework.utils.urls import replace_query_param
|
||||
|
||||
@ -9,11 +10,13 @@ from rest_framework.utils.urls import replace_query_param
|
||||
class Pagination(pagination.PageNumberPagination):
|
||||
|
||||
page_size_query_param = 'page_size'
|
||||
max_page_size = settings.MAX_PAGE_SIZE
|
||||
|
||||
def get_next_link(self):
|
||||
if not self.page.has_next():
|
||||
return None
|
||||
url = self.request and self.request.get_full_path() or ''
|
||||
url = url.encode('utf-8')
|
||||
page_number = self.page.next_page_number()
|
||||
return replace_query_param(url, self.page_query_param, page_number)
|
||||
|
||||
@ -21,5 +24,6 @@ class Pagination(pagination.PageNumberPagination):
|
||||
if not self.page.has_previous():
|
||||
return None
|
||||
url = self.request and self.request.get_full_path() or ''
|
||||
url = url.encode('utf-8')
|
||||
page_number = self.page.previous_page_number()
|
||||
return replace_query_param(url, self.page_query_param, page_number)
|
||||
|
||||
@ -5,6 +5,7 @@ import json
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.utils import six
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework import parsers
|
||||
@ -27,4 +28,4 @@ class JSONParser(parsers.JSONParser):
|
||||
data = stream.read().decode(encoding)
|
||||
return json.loads(data, object_pairs_hook=OrderedDict)
|
||||
except ValueError as exc:
|
||||
raise ParseError('JSON parse error - %s' % six.text_type(exc))
|
||||
raise ParseError(_('JSON parse error - %s') % six.text_type(exc))
|
||||
|
||||
@ -4,9 +4,6 @@
|
||||
# Python
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.http import Http404
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import MethodNotAllowed, PermissionDenied
|
||||
from rest_framework import permissions
|
||||
@ -19,7 +16,8 @@ from awx.main.utils import get_object_or_400
|
||||
logger = logging.getLogger('awx.api.permissions')
|
||||
|
||||
__all__ = ['ModelAccessPermission', 'JobTemplateCallbackPermission',
|
||||
'TaskPermission', 'ProjectUpdatePermission', 'UserPermission']
|
||||
'TaskPermission', 'ProjectUpdatePermission', 'UserPermission',]
|
||||
|
||||
|
||||
class ModelAccessPermission(permissions.BasePermission):
|
||||
'''
|
||||
@ -49,6 +47,9 @@ class ModelAccessPermission(permissions.BasePermission):
|
||||
if not check_user_access(request.user, view.parent_model, 'read',
|
||||
parent_obj):
|
||||
return False
|
||||
if hasattr(view, 'parent_key'):
|
||||
if not check_user_access(request.user, view.model, 'add', {view.parent_key: parent_obj.pk}):
|
||||
return False
|
||||
return True
|
||||
elif getattr(view, 'is_job_start', False):
|
||||
if not obj:
|
||||
@ -92,13 +93,6 @@ class ModelAccessPermission(permissions.BasePermission):
|
||||
method based on the request method.
|
||||
'''
|
||||
|
||||
# Check that obj (if given) is active, otherwise raise a 404.
|
||||
active = getattr(obj, 'active', getattr(obj, 'is_active', True))
|
||||
if callable(active):
|
||||
active = active()
|
||||
if not active:
|
||||
raise Http404()
|
||||
|
||||
# Don't allow anonymous users. 401, not 403, hence no raised exception.
|
||||
if not request.user or request.user.is_anonymous():
|
||||
return False
|
||||
@ -137,6 +131,7 @@ class ModelAccessPermission(permissions.BasePermission):
|
||||
def has_object_permission(self, request, view, obj):
|
||||
return self.has_permission(request, view, obj)
|
||||
|
||||
|
||||
class JobTemplateCallbackPermission(ModelAccessPermission):
|
||||
'''
|
||||
Permission check used by job template callback view for requests from
|
||||
@ -162,6 +157,7 @@ class JobTemplateCallbackPermission(ModelAccessPermission):
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
class TaskPermission(ModelAccessPermission):
|
||||
'''
|
||||
Permission checks used for API callbacks from running a task.
|
||||
@ -186,11 +182,10 @@ class TaskPermission(ModelAccessPermission):
|
||||
# token.
|
||||
if view.model == Inventory and request.method.lower() in ('head', 'get'):
|
||||
return bool(not obj or obj.pk == unified_job.inventory_id)
|
||||
elif view.model in (JobEvent, AdHocCommandEvent) and request.method.lower() == 'post':
|
||||
return bool(not obj or obj.pk == unified_job.pk)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class ProjectUpdatePermission(ModelAccessPermission):
|
||||
'''
|
||||
Permission check used by ProjectUpdateView to determine who can update projects
|
||||
@ -206,6 +201,10 @@ class ProjectUpdatePermission(ModelAccessPermission):
|
||||
|
||||
class UserPermission(ModelAccessPermission):
|
||||
def check_post_permissions(self, request, view, obj=None):
|
||||
if request.user.is_superuser:
|
||||
if not request.data:
|
||||
return request.user.admin_of_organizations.exists()
|
||||
elif request.user.is_superuser:
|
||||
return True
|
||||
raise PermissionDenied()
|
||||
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework import renderers
|
||||
from rest_framework.request import override_method
|
||||
|
||||
|
||||
class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer):
|
||||
@ -30,6 +31,8 @@ class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer):
|
||||
# Set a flag on the view to indiciate to the view/serializer that we're
|
||||
# creating a raw data form for the browsable API. Store the original
|
||||
# request method to determine how to populate the raw data form.
|
||||
if request.method in {'OPTIONS', 'DELETE'}:
|
||||
return
|
||||
try:
|
||||
setattr(view, '_raw_data_form_marker', True)
|
||||
setattr(view, '_raw_data_request_method', request.method)
|
||||
@ -41,10 +44,16 @@ class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer):
|
||||
def get_rendered_html_form(self, data, view, method, request):
|
||||
# Never show auto-generated form (only raw form).
|
||||
obj = getattr(view, 'object', None)
|
||||
if not self.show_form_for_method(view, method, request, obj):
|
||||
return
|
||||
if method in ('DELETE', 'OPTIONS'):
|
||||
return True # Don't actually need to return a form
|
||||
if obj is None and hasattr(view, 'get_object') and hasattr(view, 'retrieve'):
|
||||
try:
|
||||
obj = view.get_object()
|
||||
except Exception:
|
||||
obj = None
|
||||
with override_method(view, request, method) as request:
|
||||
if not self.show_form_for_method(view, method, request, obj):
|
||||
return
|
||||
if method in ('DELETE', 'OPTIONS'):
|
||||
return True # Don't actually need to return a form
|
||||
|
||||
def get_filter_form(self, data, view, request):
|
||||
# Don't show filter form in browsable API.
|
||||
@ -71,3 +80,8 @@ class AnsiTextRenderer(PlainTextRenderer):
|
||||
|
||||
media_type = 'text/plain'
|
||||
format = 'ansi'
|
||||
|
||||
|
||||
class AnsiDownloadRenderer(PlainTextRenderer):
|
||||
|
||||
format = "ansi_download"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -56,6 +56,10 @@ within all designated text fields of a model.
|
||||
|
||||
_Added in AWX 1.4_
|
||||
|
||||
(_Added in Ansible Tower 3.1.0_) Search across related fields:
|
||||
|
||||
?related__search=findme
|
||||
|
||||
## Filtering
|
||||
|
||||
Any additional query string parameters may be used to filter the list of
|
||||
@ -132,3 +136,8 @@ values.
|
||||
|
||||
Lists (for the `in` lookup) may be specified as a comma-separated list of
|
||||
values.
|
||||
|
||||
(_Added in Ansible Tower 3.1.0_) Filtering based on the requesting user's
|
||||
level of access by query string parameter.
|
||||
|
||||
* `role_level`: Level of role to filter on, such as `admin_role`
|
||||
|
||||
@ -1,9 +1,13 @@
|
||||
{% if not version_label_flag or version_label_flag == 'true' %}
|
||||
{% if new_in_13 %}> _Added in AWX 1.3_{% endif %}
|
||||
{% if new_in_14 %}> _Added in AWX 1.4_{% endif %}
|
||||
{% if new_in_145 %}> _Added in Ansible Tower 1.4.5_{% endif %}
|
||||
{% if new_in_148 %}> _Added in Ansible Tower 1.4.8_{% endif %}
|
||||
{% if new_in_200 %}> _New in Ansible Tower 2.0.0_{% endif %}
|
||||
{% if new_in_220 %}> _New in Ansible Tower 2.2.0_{% endif %}
|
||||
{% if new_in_230 %}> _New in Ansible Tower 2.3.0_{% endif %}
|
||||
{% if new_in_240 %}> _New in Ansible Tower 2.4.0_{% endif %}
|
||||
{% if new_in_300 %}> _New in Ansible Tower 3.0.0_{% endif %}
|
||||
{% if new_in_200 %}> _Added in Ansible Tower 2.0.0_{% endif %}
|
||||
{% if new_in_220 %}> _Added in Ansible Tower 2.2.0_{% endif %}
|
||||
{% if new_in_230 %}> _Added in Ansible Tower 2.3.0_{% endif %}
|
||||
{% if new_in_240 %}> _Added in Ansible Tower 2.4.0_{% endif %}
|
||||
{% if new_in_300 %}> _Added in Ansible Tower 3.0.0_{% endif %}
|
||||
{% if new_in_310 %}> _New in Ansible Tower 3.1.0_{% endif %}
|
||||
{% if deprecated %}> _This resource has been deprecated and will be removed in a future release_{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@ -32,3 +32,6 @@ agent that originally obtained it.
|
||||
Each request that uses the token for authentication will refresh its expiration
|
||||
timestamp and keep it from expiring. A token only expires when it is not used
|
||||
for the configured timeout interval (default 1800 seconds).
|
||||
|
||||
A DELETE request with the token set will cause the token to be invalidated and
|
||||
no further requests can be made with it.
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
{% with 'false' as version_label_flag %}
|
||||
{% include "api/sub_list_create_api_view.md" %}
|
||||
{% endwith %}
|
||||
|
||||
Labels not associated with any other resources are deleted. A label can become disassociated with a resource as a result of 3 events.
|
||||
|
||||
@ -6,4 +8,6 @@ Labels not associated with any other resources are deleted. A label can become d
|
||||
2. A job is deleted with labels
|
||||
3. A cleanup job deletes a job with labels
|
||||
|
||||
{% with 'true' as version_label_flag %}
|
||||
{% include "api/_new_in_awx.md" %}
|
||||
{% endwith %}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
POST requests to this resource should include the full specification for a Job Template Survey
|
||||
POST requests to this resource should include the full specification for a {{ model_verbose_name|title }}'s Survey
|
||||
|
||||
Here is an example survey specification:
|
||||
|
||||
@ -30,7 +30,7 @@ Within each survey item `type` must be one of:
|
||||
* multiselect: For survey questions where multiple items from a presented list can be selected
|
||||
|
||||
Each item must contain a `question_name` and `question_description` field that describes the survey question itself.
|
||||
The `variable` elements of each survey items represents the key that will be given to the playbook when the job template
|
||||
The `variable` elements of each survey items represents the key that will be given to the playbook when the {{model_verbose_name}}
|
||||
is launched. It will contain the value as a result of the survey.
|
||||
|
||||
Here is a more comprehensive example showing the various question types and their acceptable parameters:
|
||||
|
||||
@ -2,20 +2,33 @@ Launch a Job Template:
|
||||
|
||||
Make a POST request to this resource to launch the system job template.
|
||||
|
||||
An extra parameter `extra_vars` is suggested in order to pass extra parameters
|
||||
to the system job task.
|
||||
Variables specified inside of the parameter `extra_vars` are passed to the
|
||||
system job task as command line parameters. These tasks can be ran manually
|
||||
on the host system via the `tower-manage` command.
|
||||
|
||||
For example on `cleanup_jobs` and `cleanup_activitystream`:
|
||||
|
||||
`{"days": 30}`
|
||||
`{"extra_vars": {"days": 30}}`
|
||||
|
||||
Which will act on data older than 30 days.
|
||||
|
||||
For `cleanup_facts`:
|
||||
|
||||
`{"older_than": "4w", `granularity`: "3d"}`
|
||||
`{"extra_vars": {"older_than": "4w", "granularity": "3d"}}`
|
||||
|
||||
Which will reduce the granularity of scan data to one scan per 3 days when the data is older than 4w.
|
||||
|
||||
For `cleanup_activitystream` and `cleanup_jobs` commands, providing
|
||||
`"dry_run": true` inside of `extra_vars` will show items that will be
|
||||
removed without deleting them.
|
||||
|
||||
Each individual system job task has its own default values, which are
|
||||
applicable either when running it from the command line or launching its
|
||||
system job template with empty `extra_vars`.
|
||||
|
||||
- Defaults for `cleanup_activitystream`: days=90
|
||||
- Defaults for `cleanup_facts`: older_than="30d", granularity="1w"
|
||||
- Defaults for `cleanup_jobs`: days=90
|
||||
|
||||
If successful, the response status code will be 202. If the job cannot be
|
||||
launched, a 405 status code will be returned.
|
||||
|
||||
@ -13,6 +13,7 @@ Use the `format` query string parameter to specify the output format.
|
||||
* Plain Text with ANSI color codes: `?format=ansi`
|
||||
* JSON structure: `?format=json`
|
||||
* Downloaded Plain Text: `?format=txt_download`
|
||||
* Downloaded Plain Text with ANSI color codes: `?format=ansi_download`
|
||||
|
||||
(_New in Ansible Tower 2.0.0_) When using the Browsable API, HTML and JSON
|
||||
formats, the `start_line` and `end_line` query string parameters can be used
|
||||
@ -21,7 +22,8 @@ to specify a range of line numbers to retrieve.
|
||||
Use `dark=1` or `dark=0` as a query string parameter to force or disable a
|
||||
dark background.
|
||||
|
||||
+Files over {{ settings.STDOUT_MAX_BYTES_DISPLAY|filesizeformat }} (configurable) will not display in the browser. Use the `txt_download`
|
||||
+format to download the file directly to view it.
|
||||
Files over {{ settings.STDOUT_MAX_BYTES_DISPLAY|filesizeformat }} (configurable)
|
||||
will not display in the browser. Use the `txt_download` or `ansi_download`
|
||||
formats to download the file directly to view it.
|
||||
|
||||
{% include "api/_new_in_awx.md" %}
|
||||
|
||||
12
awx/api/templates/api/workflow_job_cancel.md
Normal file
12
awx/api/templates/api/workflow_job_cancel.md
Normal file
@ -0,0 +1,12 @@
|
||||
# Cancel Workflow Job
|
||||
|
||||
Make a GET request to this resource to determine if the workflow job can be
|
||||
canceled. The response will include the following field:
|
||||
|
||||
* `can_cancel`: Indicates whether this workflow job is in a state that can
|
||||
be canceled (boolean, read-only)
|
||||
|
||||
Make a POST request to this endpoint to submit a request to cancel a pending
|
||||
or running workflow job. The response status code will be 202 if the
|
||||
request to cancel was successfully submitted, or 405 if the workflow job
|
||||
cannot be canceled.
|
||||
5
awx/api/templates/api/workflow_job_relaunch.md
Normal file
5
awx/api/templates/api/workflow_job_relaunch.md
Normal file
@ -0,0 +1,5 @@
|
||||
Relaunch a workflow job:
|
||||
|
||||
Make a POST request to this endpoint to launch a workflow job identical to the parent workflow job. This will spawn jobs, project updates, or inventory updates based on the unified job templates referenced in the workflow nodes in the workflow job. No POST data is accepted for this action.
|
||||
|
||||
If successful, the response status code will be 201 and serialized data of the new workflow job will be returned.
|
||||
34
awx/api/templates/api/workflow_job_template_copy.md
Normal file
34
awx/api/templates/api/workflow_job_template_copy.md
Normal file
@ -0,0 +1,34 @@
|
||||
Copy a Workflow Job Template:
|
||||
|
||||
Make a GET request to this resource to determine if the current user has
|
||||
permission to copy the {{model_verbose_name}} and whether any linked
|
||||
templates or prompted fields will be ignored due to permissions problems.
|
||||
The response will include the following fields:
|
||||
|
||||
* `can_copy`: Flag indicating whether the active user has permission to make
|
||||
a copy of this {{model_verbose_name}}, provides same content as the
|
||||
{{model_verbose_name}} detail view summary_fields.user_capabilities.copy
|
||||
(boolean, read-only)
|
||||
* `can_copy_without_user_input`: Flag indicating if the user should be
|
||||
prompted for confirmation before the copy is executed (boolean, read-only)
|
||||
* `templates_unable_to_copy`: List of node ids of nodes that have a related
|
||||
job template, project, or inventory that the current user lacks permission
|
||||
to use and will be missing in workflow nodes of the copy (array, read-only)
|
||||
* `inventories_unable_to_copy`: List of node ids of nodes that have a related
|
||||
prompted inventory that the current user lacks permission
|
||||
to use and will be missing in workflow nodes of the copy (array, read-only)
|
||||
* `credentials_unable_to_copy`: List of node ids of nodes that have a related
|
||||
prompted credential that the current user lacks permission
|
||||
to use and will be missing in workflow nodes of the copy (array, read-only)
|
||||
|
||||
Make a POST request to this endpoint to save a copy of this
|
||||
{{model_verbose_name}}. No POST data is accepted for this action.
|
||||
|
||||
If successful, the response status code will be 201. The response body will
|
||||
contain serialized data about the new {{model_verbose_name}}, which will be
|
||||
similar to the original {{model_verbose_name}}, but with an additional `@`
|
||||
and a timestamp in the name.
|
||||
|
||||
All workflow nodes and connections in the original will also exist in the
|
||||
copy. The nodes will be missing related resources if the user did not have
|
||||
access to use them.
|
||||
34
awx/api/templates/api/workflow_job_template_launch.md
Normal file
34
awx/api/templates/api/workflow_job_template_launch.md
Normal file
@ -0,0 +1,34 @@
|
||||
Launch a Workflow Job Template:
|
||||
|
||||
Make a GET request to this resource to determine if the workflow_job_template
|
||||
can be launched and whether any passwords are required to launch the
|
||||
workflow_job_template. The response will include the following fields:
|
||||
|
||||
* `can_start_without_user_input`: Flag indicating if the workflow_job_template
|
||||
can be launched without user-input (boolean, read-only)
|
||||
* `variables_needed_to_start`: Required variable names required to launch the
|
||||
workflow_job_template (array, read-only)
|
||||
* `survey_enabled`: Flag indicating whether the workflow_job_template has an
|
||||
enabled survey (boolean, read-only)
|
||||
* `extra_vars`: Text which is the `extra_vars` field of this workflow_job_template
|
||||
(text, read-only)
|
||||
* `node_templates_missing`: List of node ids of all nodes that have a
|
||||
null `unified_job_template`, which will cause their branches to stop
|
||||
execution (list, read-only)
|
||||
* `node_prompts_rejected`: List of node ids of all nodes that have
|
||||
specified a field that will be rejected because its `unified_job_template`
|
||||
does not allow prompting for this field, this will not halt execution of
|
||||
the branch but the field will be ignored (list, read-only)
|
||||
* `workflow_job_template_data`: JSON object listing general information of
|
||||
this workflow_job_template (JSON object, read-only)
|
||||
|
||||
Make a POST request to this resource to launch the workflow_job_template. If any
|
||||
credential, inventory, project or extra variables (extra_vars) are required, they
|
||||
must be passed via POST data, with extra_vars given as a YAML or JSON string and
|
||||
escaped parentheses.
|
||||
|
||||
If successful, the response status code will be 201. If any required passwords
|
||||
are not provided, a 400 status code will be returned. If the workflow job cannot
|
||||
be launched, a 405 status code will be returned. If the provided credential or
|
||||
inventory are not allowed to be used by the user, then a 403 status code will
|
||||
be returned.
|
||||
@ -0,0 +1,15 @@
|
||||
# Workflow Job Template Workflow Node List
|
||||
|
||||
Workflow nodes reference templates to execute and define the ordering
|
||||
in which to execute them. After a job in this workflow finishes,
|
||||
the subsequent actions are to:
|
||||
|
||||
- run nodes contained in "failure_nodes" or "always_nodes" if job failed
|
||||
- run nodes contained in "success_nodes" or "always_nodes" if job succeeded
|
||||
|
||||
The workflow job is marked as `successful` if all of the jobs running as
|
||||
a part of the workflow job have completed, and the workflow job has not
|
||||
been canceled. Even if a job within the workflow has failed, the workflow
|
||||
job will not be marked as failed.
|
||||
|
||||
{% include "api/sub_list_create_api_view.md" %}
|
||||
@ -18,6 +18,7 @@ organization_urls = patterns('awx.api.views',
|
||||
url(r'^(?P<pk>[0-9]+)/admins/$', 'organization_admins_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/inventories/$', 'organization_inventories_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/projects/$', 'organization_projects_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/workflow_job_templates/$', 'organization_workflow_job_templates_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/teams/$', 'organization_teams_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/credentials/$', 'organization_credential_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/activity_stream/$', 'organization_activity_stream_list'),
|
||||
@ -60,6 +61,7 @@ project_urls = patterns('awx.api.views',
|
||||
)
|
||||
|
||||
project_update_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'project_update_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'project_update_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/cancel/$', 'project_update_cancel'),
|
||||
url(r'^(?P<pk>[0-9]+)/stdout/$', 'project_update_stdout'),
|
||||
@ -145,6 +147,7 @@ inventory_source_urls = patterns('awx.api.views',
|
||||
)
|
||||
|
||||
inventory_update_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'inventory_update_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'inventory_update_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/cancel/$', 'inventory_update_cancel'),
|
||||
url(r'^(?P<pk>[0-9]+)/stdout/$', 'inventory_update_stdout'),
|
||||
@ -202,8 +205,6 @@ job_urls = patterns('awx.api.views',
|
||||
url(r'^(?P<pk>[0-9]+)/relaunch/$', 'job_relaunch'),
|
||||
url(r'^(?P<pk>[0-9]+)/job_host_summaries/$', 'job_job_host_summaries_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/job_events/$', 'job_job_events_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/job_plays/$', 'job_job_plays_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/job_tasks/$', 'job_job_tasks_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/activity_stream/$', 'job_activity_stream_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/stdout/$', 'job_stdout'),
|
||||
url(r'^(?P<pk>[0-9]+)/notifications/$', 'job_notifications_list'),
|
||||
@ -228,6 +229,7 @@ ad_hoc_command_urls = patterns('awx.api.views',
|
||||
url(r'^(?P<pk>[0-9]+)/relaunch/$', 'ad_hoc_command_relaunch'),
|
||||
url(r'^(?P<pk>[0-9]+)/events/$', 'ad_hoc_command_ad_hoc_command_events_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/activity_stream/$', 'ad_hoc_command_activity_stream_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/notifications/$', 'ad_hoc_command_notifications_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/stdout/$', 'ad_hoc_command_stdout'),
|
||||
)
|
||||
|
||||
@ -254,6 +256,36 @@ system_job_urls = patterns('awx.api.views',
|
||||
url(r'^(?P<pk>[0-9]+)/notifications/$', 'system_job_notifications_list'),
|
||||
)
|
||||
|
||||
workflow_job_template_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'workflow_job_template_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'workflow_job_template_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/workflow_jobs/$', 'workflow_job_template_jobs_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/launch/$', 'workflow_job_template_launch'),
|
||||
url(r'^(?P<pk>[0-9]+)/copy/$', 'workflow_job_template_copy'),
|
||||
url(r'^(?P<pk>[0-9]+)/schedules/$', 'workflow_job_template_schedules_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/survey_spec/$', 'workflow_job_template_survey_spec'),
|
||||
url(r'^(?P<pk>[0-9]+)/workflow_nodes/$', 'workflow_job_template_workflow_nodes_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/activity_stream/$', 'workflow_job_template_activity_stream_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/notification_templates_any/$', 'workflow_job_template_notification_templates_any_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/notification_templates_error/$', 'workflow_job_template_notification_templates_error_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/notification_templates_success/$', 'workflow_job_template_notification_templates_success_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/access_list/$', 'workflow_job_template_access_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/object_roles/$', 'workflow_job_template_object_roles_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/labels/$', 'workflow_job_template_label_list'),
|
||||
)
|
||||
|
||||
workflow_job_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'workflow_job_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'workflow_job_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/workflow_nodes/$', 'workflow_job_workflow_nodes_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/labels/$', 'workflow_job_label_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/cancel/$', 'workflow_job_cancel'),
|
||||
url(r'^(?P<pk>[0-9]+)/relaunch/$', 'workflow_job_relaunch'),
|
||||
url(r'^(?P<pk>[0-9]+)/notifications/$', 'workflow_job_notifications_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/activity_stream/$', 'workflow_job_activity_stream_list'),
|
||||
)
|
||||
|
||||
|
||||
notification_template_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'notification_template_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'notification_template_detail'),
|
||||
@ -271,6 +303,22 @@ label_urls = patterns('awx.api.views',
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'label_detail'),
|
||||
)
|
||||
|
||||
workflow_job_template_node_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'workflow_job_template_node_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'workflow_job_template_node_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/success_nodes/$', 'workflow_job_template_node_success_nodes_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/failure_nodes/$', 'workflow_job_template_node_failure_nodes_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/always_nodes/$', 'workflow_job_template_node_always_nodes_list'),
|
||||
)
|
||||
|
||||
workflow_job_node_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'workflow_job_node_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'workflow_job_node_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/success_nodes/$', 'workflow_job_node_success_nodes_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/failure_nodes/$', 'workflow_job_node_failure_nodes_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/always_nodes/$', 'workflow_job_node_always_nodes_list'),
|
||||
)
|
||||
|
||||
schedule_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'schedule_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'schedule_detail'),
|
||||
@ -282,10 +330,6 @@ activity_stream_urls = patterns('awx.api.views',
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'activity_stream_detail'),
|
||||
)
|
||||
|
||||
settings_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'settings_list'),
|
||||
url(r'^reset/$', 'settings_reset'))
|
||||
|
||||
v1_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'api_v1_root_view'),
|
||||
url(r'^ping/$', 'api_v1_ping_view'),
|
||||
@ -295,8 +339,7 @@ v1_urls = patterns('awx.api.views',
|
||||
url(r'^me/$', 'user_me_list'),
|
||||
url(r'^dashboard/$', 'dashboard_view'),
|
||||
url(r'^dashboard/graphs/jobs/$','dashboard_jobs_graph_view'),
|
||||
# TODO: Uncomment aftger 3.0 when we bring database settings endpoints back
|
||||
# url(r'^settings/', include(settings_urls)),
|
||||
url(r'^settings/', include('awx.conf.urls')),
|
||||
url(r'^schedules/', include(schedule_urls)),
|
||||
url(r'^organizations/', include(organization_urls)),
|
||||
url(r'^users/', include(user_urls)),
|
||||
@ -321,7 +364,11 @@ v1_urls = patterns('awx.api.views',
|
||||
url(r'^system_jobs/', include(system_job_urls)),
|
||||
url(r'^notification_templates/', include(notification_template_urls)),
|
||||
url(r'^notifications/', include(notification_urls)),
|
||||
url(r'^workflow_job_templates/',include(workflow_job_template_urls)),
|
||||
url(r'^workflow_jobs/' ,include(workflow_job_urls)),
|
||||
url(r'^labels/', include(label_urls)),
|
||||
url(r'^workflow_job_template_nodes/', include(workflow_job_template_node_urls)),
|
||||
url(r'^workflow_job_nodes/', include(workflow_job_node_urls)),
|
||||
url(r'^unified_job_templates/$','unified_job_template_list'),
|
||||
url(r'^unified_jobs/$', 'unified_job_list'),
|
||||
url(r'^activity_stream/', include(activity_stream_urls)),
|
||||
|
||||
@ -1,81 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from collections import OrderedDict
|
||||
import copy
|
||||
import functools
|
||||
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.settings import api_settings
|
||||
from rest_framework import status
|
||||
|
||||
def paginated(method):
|
||||
"""Given an method with a Django REST Framework API method signature
|
||||
(e.g. `def get(self, request, ...):`), abstract out boilerplate pagination
|
||||
duties.
|
||||
|
||||
This causes the method to receive two additional keyword arguments:
|
||||
`limit`, and `offset`. The method expects a two-tuple to be
|
||||
returned, with a result list as the first item, and the total number
|
||||
of results (across all pages) as the second item.
|
||||
"""
|
||||
@functools.wraps(method)
|
||||
def func(self, request, *args, **kwargs):
|
||||
# Manually spin up pagination.
|
||||
# How many results do we show?
|
||||
paginator_class = api_settings.DEFAULT_PAGINATION_CLASS
|
||||
limit = paginator_class.page_size
|
||||
if request.query_params.get(paginator_class.page_size_query_param, False):
|
||||
limit = request.query_params[paginator_class.page_size_query_param]
|
||||
if paginator_class.max_page_size:
|
||||
limit = min(paginator_class.max_page_size, limit)
|
||||
limit = int(limit)
|
||||
|
||||
# Get the order parameter if it's given
|
||||
if request.query_params.get("ordering", False):
|
||||
ordering = request.query_params["ordering"]
|
||||
else:
|
||||
ordering = None
|
||||
|
||||
# What page are we on?
|
||||
page = int(request.query_params.get('page', 1))
|
||||
offset = (page - 1) * limit
|
||||
|
||||
# Add the limit, offset, page, and order variables to the keyword arguments
|
||||
# being sent to the underlying method.
|
||||
kwargs['limit'] = limit
|
||||
kwargs['offset'] = offset
|
||||
kwargs['ordering'] = ordering
|
||||
|
||||
# Okay, call the underlying method.
|
||||
results, count, stat = method(self, request, *args, **kwargs)
|
||||
if stat is None:
|
||||
stat = status.HTTP_200_OK
|
||||
|
||||
if stat == status.HTTP_200_OK:
|
||||
# Determine the next and previous pages, if any.
|
||||
prev, next_ = None, None
|
||||
if page > 1:
|
||||
get_copy = copy.copy(request.GET)
|
||||
get_copy['page'] = page - 1
|
||||
prev = '%s?%s' % (request.path, get_copy.urlencode())
|
||||
if count > offset + limit:
|
||||
get_copy = copy.copy(request.GET)
|
||||
get_copy['page'] = page + 1
|
||||
next_ = '%s?%s' % (request.path, get_copy.urlencode())
|
||||
|
||||
# Compile the results into a dictionary with pagination
|
||||
# information.
|
||||
answer = OrderedDict((
|
||||
('count', count),
|
||||
('next', next_),
|
||||
('previous', prev),
|
||||
('results', results),
|
||||
))
|
||||
else:
|
||||
answer = results
|
||||
|
||||
# Okay, we're done; return response data.
|
||||
return Response(answer, status=stat)
|
||||
return func
|
||||
|
||||
1901
awx/api/views.py
1901
awx/api/views.py
File diff suppressed because it is too large
Load Diff
37
awx/asgi.py
Normal file
37
awx/asgi.py
Normal file
@ -0,0 +1,37 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
import os
|
||||
import logging
|
||||
from awx import __version__ as tower_version
|
||||
|
||||
# Prepare the AWX environment.
|
||||
from awx import prepare_env, MODE
|
||||
prepare_env() # NOQA
|
||||
|
||||
from django.core.wsgi import get_wsgi_application # NOQA
|
||||
from channels.asgi import get_channel_layer
|
||||
|
||||
"""
|
||||
ASGI config for AWX project.
|
||||
|
||||
It exposes the ASGI callable as a module-level variable named ``channel_layer``.
|
||||
|
||||
For more information on this file, see
|
||||
https://channels.readthedocs.io/en/latest/deploying.html
|
||||
"""
|
||||
|
||||
if MODE == 'production':
|
||||
logger = logging.getLogger('awx.main.models.jobs')
|
||||
try:
|
||||
fd = open("/var/lib/awx/.tower_version", "r")
|
||||
if fd.read().strip() != tower_version:
|
||||
raise Exception()
|
||||
except Exception:
|
||||
logger.error("Missing or incorrect metadata for Tower version. Ensure Tower was installed using the setup playbook.")
|
||||
raise Exception("Missing or incorrect metadata for Tower version. Ensure Tower was installed using the setup playbook.")
|
||||
|
||||
|
||||
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings")
|
||||
|
||||
|
||||
channel_layer = get_channel_layer()
|
||||
18
awx/conf/__init__.py
Normal file
18
awx/conf/__init__.py
Normal file
@ -0,0 +1,18 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Django
|
||||
from django.utils.module_loading import autodiscover_modules
|
||||
|
||||
# Tower
|
||||
from .registry import settings_registry
|
||||
|
||||
default_app_config = 'awx.conf.apps.ConfConfig'
|
||||
|
||||
|
||||
def register(setting, **kwargs):
|
||||
settings_registry.register(setting, **kwargs)
|
||||
|
||||
|
||||
def autodiscover():
|
||||
autodiscover_modules('conf', register_to=settings_registry)
|
||||
45
awx/conf/access.py
Normal file
45
awx/conf/access.py
Normal file
@ -0,0 +1,45 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Django
|
||||
from django.db.models import Q
|
||||
|
||||
# Tower
|
||||
from awx.main.access import BaseAccess, register_access
|
||||
from awx.conf.models import Setting
|
||||
|
||||
|
||||
class SettingAccess(BaseAccess):
|
||||
'''
|
||||
- I can see settings when I am a super user or system auditor.
|
||||
- I can edit settings when I am a super user.
|
||||
- I can clear settings when I am a super user.
|
||||
- I can always see/edit/clear my own user settings.
|
||||
'''
|
||||
|
||||
model = Setting
|
||||
|
||||
# For the checks below, obj will be an instance of a "Settings" class with
|
||||
# an attribute for each setting and a "user" attribute (set to None unless
|
||||
# it is a user setting).
|
||||
|
||||
def get_queryset(self):
|
||||
if self.user.is_superuser or self.user.is_system_auditor:
|
||||
return self.model.objects.filter(Q(user__isnull=True) | Q(user=self.user))
|
||||
else:
|
||||
return self.model.objects.filter(user=self.user)
|
||||
|
||||
def can_read(self, obj):
|
||||
return bool(self.user.is_superuser or self.user.is_system_auditor or (obj and obj.user == self.user))
|
||||
|
||||
def can_add(self, data):
|
||||
return False # There is no API endpoint to POST new settings.
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return bool(self.user.is_superuser or (obj and obj.user == self.user))
|
||||
|
||||
def can_delete(self, obj):
|
||||
return bool(self.user.is_superuser or (obj and obj.user == self.user))
|
||||
|
||||
|
||||
register_access(Setting, SettingAccess)
|
||||
18
awx/conf/apps.py
Normal file
18
awx/conf/apps.py
Normal file
@ -0,0 +1,18 @@
|
||||
# Django
|
||||
from django.apps import AppConfig
|
||||
# from django.core import checks
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from awx.main.utils.handlers import configure_external_logger
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class ConfConfig(AppConfig):
|
||||
|
||||
name = 'awx.conf'
|
||||
verbose_name = _('Configuration')
|
||||
|
||||
def ready(self):
|
||||
self.module.autodiscover()
|
||||
from .settings import SettingsWrapper
|
||||
SettingsWrapper.initialize()
|
||||
configure_external_logger(settings)
|
||||
103
awx/conf/conf.py
Normal file
103
awx/conf/conf.py
Normal file
@ -0,0 +1,103 @@
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Tower
|
||||
from awx.conf import fields, register
|
||||
from awx.conf import settings_registry
|
||||
|
||||
# Define a conf.py file within your app and register each setting similarly to
|
||||
# the example below. Any field class from Django REST Framework or subclass
|
||||
# thereof can be used for validation/conversion of the setting. All keyword
|
||||
# arguments to the register function (except field_class, category,
|
||||
# category_slug, depends_on, placeholder) will be used to initialize
|
||||
# the field_class.
|
||||
|
||||
register(
|
||||
'ANSIBLE_COW_SELECTION',
|
||||
field_class=fields.ChoiceField,
|
||||
choices=[
|
||||
('bud-frogs', _('Bud Frogs')),
|
||||
('bunny', _('Bunny')),
|
||||
('cheese', _('Cheese')),
|
||||
('daemon', _('Daemon')),
|
||||
('default', _('Default Cow')),
|
||||
('dragon', _('Dragon')),
|
||||
('elephant-in-snake', _('Elephant in Snake')),
|
||||
('elephant', _('Elephant')),
|
||||
('eyes', _('Eyes')),
|
||||
('hellokitty', _('Hello Kitty')),
|
||||
('kitty', _('Kitty')),
|
||||
('luke-koala', _('Luke Koala')),
|
||||
('meow', _('Meow')),
|
||||
('milk', _('Milk')),
|
||||
('moofasa', _('Moofasa')),
|
||||
('moose', _('Moose')),
|
||||
('ren', _('Ren')),
|
||||
('sheep', _('Sheep')),
|
||||
('small', _('Small Cow')),
|
||||
('stegosaurus', _('Stegosaurus')),
|
||||
('stimpy', _('Stimpy')),
|
||||
('supermilker', _('Super Milker')),
|
||||
('three-eyes', _('Three Eyes')),
|
||||
('turkey', _('Turkey')),
|
||||
('turtle', _('Turtle')),
|
||||
('tux', _('Tux')),
|
||||
('udder', _('Udder')),
|
||||
('vader-koala', _('Vader Koala')),
|
||||
('vader', _('Vader')),
|
||||
('www', _('WWW')),
|
||||
],
|
||||
default='default',
|
||||
label=_('Cow Selection'),
|
||||
help_text=_('Select which cow to use with cowsay when running jobs.'),
|
||||
category=_('Cows'),
|
||||
# Optional; category_slug will be slugified version of category if not
|
||||
# explicitly provided.
|
||||
category_slug='cows',
|
||||
)
|
||||
|
||||
|
||||
def _get_read_only_ansible_cow_selection_default():
|
||||
return getattr(settings, 'ANSIBLE_COW_SELECTION', 'No default cow!')
|
||||
|
||||
|
||||
register(
|
||||
'READONLY_ANSIBLE_COW_SELECTION',
|
||||
field_class=fields.CharField,
|
||||
# read_only must be set via kwargs even if field_class sets it.
|
||||
read_only=True,
|
||||
# default can be a callable to dynamically compute the value; should be in
|
||||
# the plain JSON format stored in the DB and used in the API.
|
||||
default=_get_read_only_ansible_cow_selection_default,
|
||||
label=_('Example Read-Only Setting'),
|
||||
help_text=_('Example setting that cannot be changed.'),
|
||||
category=_('Cows'),
|
||||
category_slug='cows',
|
||||
# Optional; list of other settings this read-only setting depends on. When
|
||||
# the other settings change, the cached value for this setting will be
|
||||
# cleared to require it to be recomputed.
|
||||
depends_on=['ANSIBLE_COW_SELECTION'],
|
||||
# Optional; licensed feature required to be able to view or modify this
|
||||
# setting.
|
||||
feature_required='rebranding',
|
||||
# Optional; field is stored encrypted in the database and only $encrypted$
|
||||
# is returned via the API.
|
||||
encrypted=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'EXAMPLE_USER_SETTING',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
label=_('Example Setting'),
|
||||
help_text=_('Example setting which can be different for each user.'),
|
||||
category=_('User'),
|
||||
category_slug='user',
|
||||
default='',
|
||||
)
|
||||
|
||||
# Unregister the example settings above.
|
||||
settings_registry.unregister('ANSIBLE_COW_SELECTION')
|
||||
settings_registry.unregister('READONLY_ANSIBLE_COW_SELECTION')
|
||||
settings_registry.unregister('EXAMPLE_USER_SETTING')
|
||||
76
awx/conf/fields.py
Normal file
76
awx/conf/fields.py
Normal file
@ -0,0 +1,76 @@
|
||||
# Python
|
||||
import logging
|
||||
import urlparse
|
||||
|
||||
# Django
|
||||
from django.core.validators import URLValidator
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import * # noqa
|
||||
|
||||
logger = logging.getLogger('awx.conf.fields')
|
||||
|
||||
# Use DRF fields to convert/validate settings:
|
||||
# - to_representation(obj) should convert a native Python object to a primitive
|
||||
# serializable type. This primitive type will be what is presented in the API
|
||||
# and stored in the JSON field in the datbase.
|
||||
# - to_internal_value(data) should convert the primitive type back into the
|
||||
# appropriate Python type to be used in settings.
|
||||
|
||||
|
||||
class CharField(CharField):
|
||||
|
||||
def to_representation(self, value):
|
||||
# django_rest_frameworks' default CharField implementation casts `None`
|
||||
# to a string `"None"`:
|
||||
#
|
||||
# https://github.com/tomchristie/django-rest-framework/blob/cbad236f6d817d992873cd4df6527d46ab243ed1/rest_framework/fields.py#L761
|
||||
if value is None:
|
||||
return None
|
||||
return super(CharField, self).to_representation(value)
|
||||
|
||||
|
||||
class StringListField(ListField):
|
||||
|
||||
child = CharField()
|
||||
|
||||
def to_representation(self, value):
|
||||
if value is None and self.allow_null:
|
||||
return None
|
||||
return super(StringListField, self).to_representation(value)
|
||||
|
||||
|
||||
class URLField(CharField):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
schemes = kwargs.pop('schemes', None)
|
||||
self.allow_plain_hostname = kwargs.pop('allow_plain_hostname', False)
|
||||
super(URLField, self).__init__(**kwargs)
|
||||
validator_kwargs = dict(message=_('Enter a valid URL'))
|
||||
if schemes is not None:
|
||||
validator_kwargs['schemes'] = schemes
|
||||
self.validators.append(URLValidator(**validator_kwargs))
|
||||
|
||||
def to_representation(self, value):
|
||||
if value is None:
|
||||
return ''
|
||||
return super(URLField, self).to_representation(value)
|
||||
|
||||
def run_validators(self, value):
|
||||
if self.allow_plain_hostname:
|
||||
try:
|
||||
url_parts = urlparse.urlsplit(value)
|
||||
if url_parts.hostname and '.' not in url_parts.hostname:
|
||||
netloc = '{}.local'.format(url_parts.hostname)
|
||||
if url_parts.port:
|
||||
netloc = '{}:{}'.format(netloc, url_parts.port)
|
||||
if url_parts.username:
|
||||
if url_parts.password:
|
||||
netloc = '{}:{}@{}' % (url_parts.username, url_parts.password, netloc)
|
||||
else:
|
||||
netloc = '{}@{}' % (url_parts.username, netloc)
|
||||
value = urlparse.urlunsplit([url_parts.scheme, netloc, url_parts.path, url_parts.query, url_parts.fragment])
|
||||
except:
|
||||
raise # If something fails here, just fall through and let the validators check it.
|
||||
super(URLField, self).run_validators(value)
|
||||
50
awx/conf/license.py
Normal file
50
awx/conf/license.py
Normal file
@ -0,0 +1,50 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Django
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import APIException
|
||||
|
||||
# Tower
|
||||
from awx.main.task_engine import TaskEnhancer
|
||||
|
||||
__all__ = ['LicenseForbids', 'get_license', 'get_licensed_features',
|
||||
'feature_enabled', 'feature_exists']
|
||||
|
||||
|
||||
class LicenseForbids(APIException):
|
||||
status_code = 402
|
||||
default_detail = _('Your Tower license does not allow that.')
|
||||
|
||||
|
||||
def _get_validated_license_data():
|
||||
return TaskEnhancer().validate_enhancements()
|
||||
|
||||
|
||||
def get_license(show_key=False):
|
||||
"""Return a dictionary representing the active license on this Tower instance."""
|
||||
license_data = _get_validated_license_data()
|
||||
if not show_key:
|
||||
license_data.pop('license_key', None)
|
||||
return license_data
|
||||
|
||||
|
||||
def get_licensed_features():
|
||||
"""Return a set of all features enabled by the active license."""
|
||||
features = set()
|
||||
for feature, enabled in _get_validated_license_data().get('features', {}).items():
|
||||
if enabled:
|
||||
features.add(feature)
|
||||
return features
|
||||
|
||||
|
||||
def feature_enabled(name):
|
||||
"""Return True if the requested feature is enabled, False otherwise."""
|
||||
return _get_validated_license_data().get('features', {}).get(name, False)
|
||||
|
||||
|
||||
def feature_exists(name):
|
||||
"""Return True if the requested feature name exists, False otherwise."""
|
||||
return bool(name in _get_validated_license_data().get('features', {}))
|
||||
459
awx/conf/management/commands/migrate_to_database_settings.py
Normal file
459
awx/conf/management/commands/migrate_to_database_settings.py
Normal file
@ -0,0 +1,459 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import base64
|
||||
import collections
|
||||
import difflib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction
|
||||
from django.utils.text import slugify
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Tower
|
||||
from awx import MODE
|
||||
from awx.conf import settings_registry
|
||||
from awx.conf.fields import empty, SkipField
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.utils import comment_assignments
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'category',
|
||||
nargs='*',
|
||||
type=str,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--dry-run',
|
||||
action='store_true',
|
||||
dest='dry_run',
|
||||
default=False,
|
||||
help=_('Only show which settings would be commented/migrated.'),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--skip-errors',
|
||||
action='store_true',
|
||||
dest='skip_errors',
|
||||
default=False,
|
||||
help=_('Skip over settings that would raise an error when commenting/migrating.'),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--no-comment',
|
||||
action='store_true',
|
||||
dest='no_comment',
|
||||
default=False,
|
||||
help=_('Skip commenting out settings in files.'),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--backup-suffix',
|
||||
dest='backup_suffix',
|
||||
default=now().strftime('.%Y%m%d%H%M%S'),
|
||||
help=_('Backup existing settings files with this suffix.'),
|
||||
)
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.skip_errors = bool(options.get('skip_errors', False))
|
||||
self.no_comment = bool(options.get('no_comment', False))
|
||||
self.backup_suffix = options.get('backup_suffix', '')
|
||||
self.categories = options.get('category', None) or ['all']
|
||||
self.style.HEADING = self.style.MIGRATE_HEADING
|
||||
self.style.LABEL = self.style.MIGRATE_LABEL
|
||||
self.style.OK = self.style.SQL_FIELD
|
||||
self.style.SKIP = self.style.WARNING
|
||||
self.style.VALUE = self.style.SQL_KEYWORD
|
||||
|
||||
# Determine if any categories provided are invalid.
|
||||
category_slugs = []
|
||||
invalid_categories = []
|
||||
for category in self.categories:
|
||||
category_slug = slugify(category)
|
||||
if category_slug in settings_registry.get_registered_categories():
|
||||
if category_slug not in category_slugs:
|
||||
category_slugs.append(category_slug)
|
||||
else:
|
||||
if category not in invalid_categories:
|
||||
invalid_categories.append(category)
|
||||
if len(invalid_categories) == 1:
|
||||
raise CommandError('Invalid setting category: {}'.format(invalid_categories[0]))
|
||||
elif len(invalid_categories) > 1:
|
||||
raise CommandError('Invalid setting categories: {}'.format(', '.join(invalid_categories)))
|
||||
|
||||
# Build a list of all settings to be migrated.
|
||||
registered_settings = []
|
||||
for category_slug in category_slugs:
|
||||
for registered_setting in settings_registry.get_registered_settings(category_slug=category_slug, read_only=False):
|
||||
if registered_setting not in registered_settings:
|
||||
registered_settings.append(registered_setting)
|
||||
|
||||
self._migrate_settings(registered_settings)
|
||||
|
||||
def _get_settings_file_patterns(self):
|
||||
if MODE == 'development':
|
||||
return [
|
||||
'/etc/tower/settings.py',
|
||||
'/etc/tower/conf.d/*.py',
|
||||
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'settings', 'local_*.py')
|
||||
]
|
||||
else:
|
||||
return [
|
||||
os.environ.get('AWX_SETTINGS_FILE', '/etc/tower/settings.py'),
|
||||
os.path.join(os.environ.get('AWX_SETTINGS_DIR', '/etc/tower/conf.d/'), '*.py'),
|
||||
]
|
||||
|
||||
def _get_license_file(self):
|
||||
return os.environ.get('AWX_LICENSE_FILE', '/etc/tower/license')
|
||||
|
||||
def _comment_license_file(self, dry_run=True):
|
||||
license_file = self._get_license_file()
|
||||
diff_lines = []
|
||||
if os.path.exists(license_file):
|
||||
try:
|
||||
raw_license_data = open(license_file).read()
|
||||
json.loads(raw_license_data)
|
||||
except Exception as e:
|
||||
raise CommandError('Error reading license from {0}: {1!r}'.format(license_file, e))
|
||||
if self.backup_suffix:
|
||||
backup_license_file = '{}{}'.format(license_file, self.backup_suffix)
|
||||
else:
|
||||
backup_license_file = '{}.old'.format(license_file)
|
||||
diff_lines = list(difflib.unified_diff(
|
||||
raw_license_data.splitlines(),
|
||||
[],
|
||||
fromfile=backup_license_file,
|
||||
tofile=license_file,
|
||||
lineterm='',
|
||||
))
|
||||
if not dry_run:
|
||||
if self.backup_suffix:
|
||||
shutil.copy2(license_file, backup_license_file)
|
||||
os.remove(license_file)
|
||||
return diff_lines
|
||||
|
||||
def _get_local_settings_file(self):
|
||||
if MODE == 'development':
|
||||
static_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'ui', 'static')
|
||||
else:
|
||||
static_root = settings.STATIC_ROOT
|
||||
return os.path.join(static_root, 'local_settings.json')
|
||||
|
||||
def _comment_local_settings_file(self, dry_run=True):
|
||||
local_settings_file = self._get_local_settings_file()
|
||||
diff_lines = []
|
||||
if os.path.exists(local_settings_file):
|
||||
try:
|
||||
raw_local_settings_data = open(local_settings_file).read()
|
||||
json.loads(raw_local_settings_data)
|
||||
except Exception as e:
|
||||
if not self.skip_errors:
|
||||
raise CommandError('Error reading local settings from {0}: {1!r}'.format(local_settings_file, e))
|
||||
return diff_lines
|
||||
if self.backup_suffix:
|
||||
backup_local_settings_file = '{}{}'.format(local_settings_file, self.backup_suffix)
|
||||
else:
|
||||
backup_local_settings_file = '{}.old'.format(local_settings_file)
|
||||
diff_lines = list(difflib.unified_diff(
|
||||
raw_local_settings_data.splitlines(),
|
||||
[],
|
||||
fromfile=backup_local_settings_file,
|
||||
tofile=local_settings_file,
|
||||
lineterm='',
|
||||
))
|
||||
if not dry_run:
|
||||
if self.backup_suffix:
|
||||
shutil.copy2(local_settings_file, backup_local_settings_file)
|
||||
os.remove(local_settings_file)
|
||||
return diff_lines
|
||||
|
||||
def _get_custom_logo_file(self):
|
||||
if MODE == 'development':
|
||||
static_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'ui', 'static')
|
||||
else:
|
||||
static_root = settings.STATIC_ROOT
|
||||
return os.path.join(static_root, 'assets', 'custom_console_logo.png')
|
||||
|
||||
def _comment_custom_logo_file(self, dry_run=True):
|
||||
custom_logo_file = self._get_custom_logo_file()
|
||||
diff_lines = []
|
||||
if os.path.exists(custom_logo_file):
|
||||
try:
|
||||
raw_custom_logo_data = open(custom_logo_file).read()
|
||||
except Exception as e:
|
||||
if not self.skip_errors:
|
||||
raise CommandError('Error reading custom logo from {0}: {1!r}'.format(custom_logo_file, e))
|
||||
return diff_lines
|
||||
if self.backup_suffix:
|
||||
backup_custom_logo_file = '{}{}'.format(custom_logo_file, self.backup_suffix)
|
||||
else:
|
||||
backup_custom_logo_file = '{}.old'.format(custom_logo_file)
|
||||
diff_lines = list(difflib.unified_diff(
|
||||
['<PNG Image ({} bytes)>'.format(len(raw_custom_logo_data))],
|
||||
[],
|
||||
fromfile=backup_custom_logo_file,
|
||||
tofile=custom_logo_file,
|
||||
lineterm='',
|
||||
))
|
||||
if not dry_run:
|
||||
if self.backup_suffix:
|
||||
shutil.copy2(custom_logo_file, backup_custom_logo_file)
|
||||
os.remove(custom_logo_file)
|
||||
return diff_lines
|
||||
|
||||
def _check_if_needs_comment(self, patterns, setting):
|
||||
files_to_comment = []
|
||||
# If any diffs are returned, this setting needs to be commented.
|
||||
diffs = comment_assignments(patterns, setting, dry_run=True)
|
||||
if setting == 'LICENSE':
|
||||
diffs.extend(self._comment_license_file(dry_run=True))
|
||||
elif setting == 'CUSTOM_LOGIN_INFO':
|
||||
diffs.extend(self._comment_local_settings_file(dry_run=True))
|
||||
elif setting == 'CUSTOM_LOGO':
|
||||
diffs.extend(self._comment_custom_logo_file(dry_run=True))
|
||||
for diff in diffs:
|
||||
for line in diff.splitlines():
|
||||
if line.startswith('+++ '):
|
||||
files_to_comment.append(line[4:])
|
||||
return files_to_comment
|
||||
|
||||
def _check_if_needs_migration(self, setting):
|
||||
# Check whether the current value differs from the default.
|
||||
default_value = settings.DEFAULTS_SNAPSHOT.get(setting, empty)
|
||||
if default_value is empty and setting != 'LICENSE':
|
||||
field = settings_registry.get_setting_field(setting, read_only=True)
|
||||
try:
|
||||
default_value = field.get_default()
|
||||
except SkipField:
|
||||
pass
|
||||
current_value = getattr(settings, setting, empty)
|
||||
if setting == 'CUSTOM_LOGIN_INFO' and current_value in {empty, ''}:
|
||||
local_settings_file = self._get_local_settings_file()
|
||||
try:
|
||||
if os.path.exists(local_settings_file):
|
||||
local_settings = json.load(open(local_settings_file))
|
||||
current_value = local_settings.get('custom_login_info', '')
|
||||
except Exception as e:
|
||||
if not self.skip_errors:
|
||||
raise CommandError('Error reading custom login info from {0}: {1!r}'.format(local_settings_file, e))
|
||||
if setting == 'CUSTOM_LOGO' and current_value in {empty, ''}:
|
||||
custom_logo_file = self._get_custom_logo_file()
|
||||
try:
|
||||
if os.path.exists(custom_logo_file):
|
||||
custom_logo_data = open(custom_logo_file).read()
|
||||
if custom_logo_data:
|
||||
current_value = 'data:image/png;base64,{}'.format(base64.b64encode(custom_logo_data))
|
||||
else:
|
||||
current_value = ''
|
||||
except Exception as e:
|
||||
if not self.skip_errors:
|
||||
raise CommandError('Error reading custom logo from {0}: {1!r}'.format(custom_logo_file, e))
|
||||
if current_value != default_value:
|
||||
if current_value is empty:
|
||||
current_value = None
|
||||
return current_value
|
||||
return empty
|
||||
|
||||
def _display_tbd(self, setting, files_to_comment, migrate_value, comment_error=None, migrate_error=None):
|
||||
if self.verbosity >= 1:
|
||||
if files_to_comment:
|
||||
if migrate_value is not empty:
|
||||
action = 'Migrate + Comment'
|
||||
else:
|
||||
action = 'Comment'
|
||||
if comment_error or migrate_error:
|
||||
action = self.style.ERROR('{} (skipped)'.format(action))
|
||||
else:
|
||||
action = self.style.OK(action)
|
||||
self.stdout.write(' {}: {}'.format(
|
||||
self.style.LABEL(setting),
|
||||
action,
|
||||
))
|
||||
if self.verbosity >= 2:
|
||||
if migrate_error:
|
||||
self.stdout.write(' - Migrate value: {}'.format(
|
||||
self.style.ERROR(migrate_error),
|
||||
))
|
||||
elif migrate_value is not empty:
|
||||
self.stdout.write(' - Migrate value: {}'.format(
|
||||
self.style.VALUE(repr(migrate_value)),
|
||||
))
|
||||
if comment_error:
|
||||
self.stdout.write(' - Comment: {}'.format(
|
||||
self.style.ERROR(comment_error),
|
||||
))
|
||||
elif files_to_comment:
|
||||
for file_to_comment in files_to_comment:
|
||||
self.stdout.write(' - Comment in: {}'.format(
|
||||
self.style.VALUE(file_to_comment),
|
||||
))
|
||||
else:
|
||||
if self.verbosity >= 2:
|
||||
self.stdout.write(' {}: {}'.format(
|
||||
self.style.LABEL(setting),
|
||||
self.style.SKIP('No Migration'),
|
||||
))
|
||||
|
||||
def _display_migrate(self, setting, action, display_value):
|
||||
if self.verbosity >= 1:
|
||||
if action == 'No Change':
|
||||
action = self.style.SKIP(action)
|
||||
else:
|
||||
action = self.style.OK(action)
|
||||
self.stdout.write(' {}: {}'.format(
|
||||
self.style.LABEL(setting),
|
||||
action,
|
||||
))
|
||||
if self.verbosity >= 2:
|
||||
for line in display_value.splitlines():
|
||||
self.stdout.write(' {}'.format(
|
||||
self.style.VALUE(line),
|
||||
))
|
||||
|
||||
def _display_diff_summary(self, filename, added, removed):
|
||||
self.stdout.write(' {} {}{} {}{}'.format(
|
||||
self.style.LABEL(filename),
|
||||
self.style.ERROR('-'),
|
||||
self.style.ERROR(int(removed)),
|
||||
self.style.OK('+'),
|
||||
self.style.OK(str(added)),
|
||||
))
|
||||
|
||||
def _display_comment(self, diffs):
|
||||
for diff in diffs:
|
||||
if self.verbosity >= 2:
|
||||
for line in diff.splitlines():
|
||||
display_line = line
|
||||
if line.startswith('--- ') or line.startswith('+++ '):
|
||||
display_line = self.style.LABEL(line)
|
||||
elif line.startswith('-'):
|
||||
display_line = self.style.ERROR(line)
|
||||
elif line.startswith('+'):
|
||||
display_line = self.style.OK(line)
|
||||
elif line.startswith('@@'):
|
||||
display_line = self.style.VALUE(line)
|
||||
if line.startswith('--- ') or line.startswith('+++ '):
|
||||
self.stdout.write(' ' + display_line)
|
||||
else:
|
||||
self.stdout.write(' ' + display_line)
|
||||
elif self.verbosity >= 1:
|
||||
filename, lines_added, lines_removed = None, 0, 0
|
||||
for line in diff.splitlines():
|
||||
if line.startswith('+++ '):
|
||||
if filename:
|
||||
self._display_diff_summary(filename, lines_added, lines_removed)
|
||||
filename, lines_added, lines_removed = line[4:], 0, 0
|
||||
elif line.startswith('+'):
|
||||
lines_added += 1
|
||||
elif line.startswith('-'):
|
||||
lines_removed += 1
|
||||
if filename:
|
||||
self._display_diff_summary(filename, lines_added, lines_removed)
|
||||
|
||||
def _migrate_settings(self, registered_settings):
|
||||
patterns = self._get_settings_file_patterns()
|
||||
|
||||
# Determine which settings need to be commented/migrated.
|
||||
if self.verbosity >= 1:
|
||||
self.stdout.write(self.style.HEADING('Discovering settings to be migrated and commented:'))
|
||||
to_migrate = collections.OrderedDict()
|
||||
to_comment = collections.OrderedDict()
|
||||
for name in registered_settings:
|
||||
comment_error, migrate_error = None, None
|
||||
files_to_comment = []
|
||||
try:
|
||||
files_to_comment = self._check_if_needs_comment(patterns, name)
|
||||
except Exception as e:
|
||||
comment_error = 'Error commenting {0}: {1!r}'.format(name, e)
|
||||
if not self.skip_errors:
|
||||
raise CommandError(comment_error)
|
||||
if files_to_comment:
|
||||
to_comment[name] = files_to_comment
|
||||
migrate_value = empty
|
||||
if files_to_comment:
|
||||
migrate_value = self._check_if_needs_migration(name)
|
||||
if migrate_value is not empty:
|
||||
field = settings_registry.get_setting_field(name)
|
||||
assert not field.read_only
|
||||
try:
|
||||
data = field.to_representation(migrate_value)
|
||||
setting_value = field.run_validation(data)
|
||||
db_value = field.to_representation(setting_value)
|
||||
to_migrate[name] = db_value
|
||||
except Exception as e:
|
||||
to_comment.pop(name)
|
||||
migrate_error = 'Unable to assign value {0!r} to setting "{1}: {2!s}".'.format(migrate_value, name, e)
|
||||
if not self.skip_errors:
|
||||
raise CommandError(migrate_error)
|
||||
self._display_tbd(name, files_to_comment, migrate_value, comment_error, migrate_error)
|
||||
if self.verbosity == 1 and not to_migrate and not to_comment:
|
||||
self.stdout.write(' No settings found to migrate or comment!')
|
||||
|
||||
# Now migrate those settings to the database.
|
||||
if self.verbosity >= 1:
|
||||
if self.dry_run:
|
||||
self.stdout.write(self.style.HEADING('Migrating settings to database (dry-run):'))
|
||||
else:
|
||||
self.stdout.write(self.style.HEADING('Migrating settings to database:'))
|
||||
if not to_migrate:
|
||||
self.stdout.write(' No settings to migrate!')
|
||||
for name, db_value in to_migrate.items():
|
||||
display_value = json.dumps(db_value, indent=4)
|
||||
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
|
||||
action = 'No Change'
|
||||
if not setting:
|
||||
action = 'Migrated'
|
||||
if not self.dry_run:
|
||||
Setting.objects.create(key=name, user=None, value=db_value)
|
||||
elif setting.value != db_value or type(setting.value) != type(db_value):
|
||||
action = 'Updated'
|
||||
if not self.dry_run:
|
||||
setting.value = db_value
|
||||
setting.save(update_fields=['value'])
|
||||
self._display_migrate(name, action, display_value)
|
||||
|
||||
# Now comment settings in settings files.
|
||||
if self.verbosity >= 1:
|
||||
if bool(self.dry_run or self.no_comment):
|
||||
self.stdout.write(self.style.HEADING('Commenting settings in files (dry-run):'))
|
||||
else:
|
||||
self.stdout.write(self.style.HEADING('Commenting settings in files:'))
|
||||
if not to_comment:
|
||||
self.stdout.write(' No settings to comment!')
|
||||
if to_comment:
|
||||
to_comment_patterns = []
|
||||
license_file_to_comment = None
|
||||
local_settings_file_to_comment = None
|
||||
custom_logo_file_to_comment = None
|
||||
for files_to_comment in to_comment.values():
|
||||
for file_to_comment in files_to_comment:
|
||||
if file_to_comment == self._get_license_file():
|
||||
license_file_to_comment = file_to_comment
|
||||
elif file_to_comment == self._get_local_settings_file():
|
||||
local_settings_file_to_comment = file_to_comment
|
||||
elif file_to_comment == self._get_custom_logo_file():
|
||||
custom_logo_file_to_comment = file_to_comment
|
||||
elif file_to_comment not in to_comment_patterns:
|
||||
to_comment_patterns.append(file_to_comment)
|
||||
# Run once in dry-run mode to catch any errors from updating the files.
|
||||
diffs = comment_assignments(to_comment_patterns, to_comment.keys(), dry_run=True, backup_suffix=self.backup_suffix)
|
||||
# Then, if really updating, run again.
|
||||
if not self.dry_run and not self.no_comment:
|
||||
diffs = comment_assignments(to_comment_patterns, to_comment.keys(), dry_run=False, backup_suffix=self.backup_suffix)
|
||||
if license_file_to_comment:
|
||||
diffs.extend(self._comment_license_file(dry_run=False))
|
||||
if local_settings_file_to_comment:
|
||||
diffs.extend(self._comment_local_settings_file(dry_run=False))
|
||||
if custom_logo_file_to_comment:
|
||||
diffs.extend(self._comment_custom_logo_file(dry_run=False))
|
||||
self._display_comment(diffs)
|
||||
30
awx/conf/migrations/0001_initial.py
Normal file
30
awx/conf/migrations/0001_initial.py
Normal file
@ -0,0 +1,30 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
import jsonfield.fields
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='Setting',
|
||||
fields=[
|
||||
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
|
||||
('created', models.DateTimeField(default=None, editable=False)),
|
||||
('modified', models.DateTimeField(default=None, editable=False)),
|
||||
('key', models.CharField(max_length=255)),
|
||||
('value', jsonfield.fields.JSONField(null=True)),
|
||||
('user', models.ForeignKey(related_name='settings', default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
|
||||
],
|
||||
options={
|
||||
'abstract': False,
|
||||
},
|
||||
),
|
||||
]
|
||||
76
awx/conf/migrations/0002_v310_copy_tower_settings.py
Normal file
76
awx/conf/migrations/0002_v310_copy_tower_settings.py
Normal file
@ -0,0 +1,76 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
import json
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def copy_tower_settings(apps, schema_editor):
|
||||
TowerSettings = apps.get_model('main', 'TowerSettings')
|
||||
Setting = apps.get_model('conf', 'Setting')
|
||||
for tower_setting in TowerSettings.objects.all().iterator():
|
||||
try:
|
||||
value = tower_setting.value
|
||||
# LICENSE is stored as a string; convert it to a dict.
|
||||
if tower_setting.key == 'LICENSE':
|
||||
value = json.loads(value)
|
||||
setting, created = Setting.objects.get_or_create(
|
||||
key=tower_setting.key,
|
||||
user=tower_setting.user,
|
||||
created=tower_setting.created,
|
||||
modified=tower_setting.modified,
|
||||
defaults=dict(value=value),
|
||||
)
|
||||
if not created and setting.value != value:
|
||||
setting.value = value
|
||||
setting.save(update_fields=['value'])
|
||||
except Setting.MultipleObjectsReturned:
|
||||
pass
|
||||
|
||||
|
||||
def revert_tower_settings(apps, schema_editor):
|
||||
TowerSettings = apps.get_model('main', 'TowerSettings')
|
||||
Setting = apps.get_model('conf', 'Setting')
|
||||
for setting in Setting.objects.all().iterator():
|
||||
value = setting.value
|
||||
# LICENSE is stored as a JSON object; convert it back to a string.
|
||||
if setting.key == 'LICENSE':
|
||||
value = json.dumps(value)
|
||||
defaults = dict(
|
||||
value=value,
|
||||
value_type='string',
|
||||
description='',
|
||||
category='',
|
||||
)
|
||||
try:
|
||||
tower_setting, created = TowerSettings.objects.get_or_create(
|
||||
key=setting.key,
|
||||
user=setting.user,
|
||||
defaults=defaults,
|
||||
)
|
||||
if not created:
|
||||
update_fields = []
|
||||
for k, v in defaults.items():
|
||||
if getattr(tower_setting, k) != v:
|
||||
setattr(tower_setting, k, v)
|
||||
update_fields.append(k)
|
||||
if update_fields:
|
||||
tower_setting.save(update_fields=update_fields)
|
||||
except TowerSettings.MultipleObjectsReturned:
|
||||
pass
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('conf', '0001_initial'),
|
||||
('main', '0034_v310_release'),
|
||||
]
|
||||
|
||||
run_before = [
|
||||
('main', '0035_v310_remove_tower_settings'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(copy_tower_settings, revert_tower_settings),
|
||||
]
|
||||
20
awx/conf/migrations/0003_v310_JSONField_changes.py
Normal file
20
awx/conf/migrations/0003_v310_JSONField_changes.py
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
import awx.main.fields
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('conf', '0002_v310_copy_tower_settings'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='setting',
|
||||
name='value',
|
||||
field=awx.main.fields.JSONField(null=True),
|
||||
),
|
||||
]
|
||||
81
awx/conf/models.py
Normal file
81
awx/conf/models.py
Normal file
@ -0,0 +1,81 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import json
|
||||
|
||||
# Django
|
||||
from django.db import models
|
||||
|
||||
# Tower
|
||||
from awx.main.models.base import CreatedModifiedModel, prevent_search
|
||||
from awx.main.fields import JSONField
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.conf import settings_registry
|
||||
|
||||
__all__ = ['Setting']
|
||||
|
||||
|
||||
class Setting(CreatedModifiedModel):
|
||||
|
||||
key = models.CharField(
|
||||
max_length=255,
|
||||
)
|
||||
value = JSONField(
|
||||
null=True,
|
||||
)
|
||||
user = prevent_search(models.ForeignKey(
|
||||
'auth.User',
|
||||
related_name='settings',
|
||||
default=None,
|
||||
null=True,
|
||||
editable=False,
|
||||
on_delete=models.CASCADE,
|
||||
))
|
||||
|
||||
def __unicode__(self):
|
||||
try:
|
||||
json_value = json.dumps(self.value)
|
||||
except ValueError:
|
||||
# In the rare case the DB value is invalid JSON.
|
||||
json_value = u'<Invalid JSON>'
|
||||
if self.user:
|
||||
return u'{} ({}) = {}'.format(self.key, self.user, json_value)
|
||||
else:
|
||||
return u'{} = {}'.format(self.key, json_value)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
encrypted = settings_registry.is_setting_encrypted(self.key)
|
||||
new_instance = not bool(self.pk)
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
# When first saving to the database, don't store any encrypted field
|
||||
# value, but instead save it until after the instance is created.
|
||||
# Otherwise, store encrypted value to the database.
|
||||
if encrypted:
|
||||
if new_instance:
|
||||
self._saved_value = self.value
|
||||
self.value = ''
|
||||
else:
|
||||
self.value = encrypt_field(self, 'value')
|
||||
if 'value' not in update_fields:
|
||||
update_fields.append('value')
|
||||
super(Setting, self).save(*args, **kwargs)
|
||||
# After saving a new instance for the first time, set the encrypted
|
||||
# field and save again.
|
||||
if encrypted and new_instance:
|
||||
self.value = self._saved_value
|
||||
self.save(update_fields=['value'])
|
||||
|
||||
@classmethod
|
||||
def get_cache_key(self, key):
|
||||
return key
|
||||
|
||||
|
||||
import awx.conf.signals # noqa
|
||||
|
||||
from awx.main.registrar import activity_stream_registrar # noqa
|
||||
activity_stream_registrar.connect(Setting)
|
||||
|
||||
import awx.conf.access # noqa
|
||||
156
awx/conf/registry.py
Normal file
156
awx/conf/registry.py
Normal file
@ -0,0 +1,156 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
from collections import OrderedDict
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.utils.text import slugify
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
logger = logging.getLogger('awx.conf.registry')
|
||||
|
||||
__all__ = ['settings_registry']
|
||||
|
||||
|
||||
class SettingsRegistry(object):
|
||||
"""Registry of all API-configurable settings and categories."""
|
||||
|
||||
def __init__(self, settings=None):
|
||||
"""
|
||||
:param settings: a ``django.conf.LazySettings`` object used to lookup
|
||||
file-based field values (e.g., ``local_settings.py``
|
||||
and ``/etc/tower/conf.d/example.py``). If unspecified,
|
||||
defaults to ``django.conf.settings``.
|
||||
"""
|
||||
if settings is None:
|
||||
from django.conf import settings
|
||||
self._registry = OrderedDict()
|
||||
self._dependent_settings = {}
|
||||
self.settings = settings
|
||||
|
||||
def register(self, setting, **kwargs):
|
||||
if setting in self._registry:
|
||||
raise ImproperlyConfigured('Setting "{}" is already registered.'.format(setting))
|
||||
category = kwargs.setdefault('category', None)
|
||||
category_slug = kwargs.setdefault('category_slug', slugify(category or '') or None)
|
||||
if category_slug in {'all', 'changed', 'user-defaults'}:
|
||||
raise ImproperlyConfigured('"{}" is a reserved category slug.'.format(category_slug))
|
||||
if 'field_class' not in kwargs:
|
||||
raise ImproperlyConfigured('Setting must provide a field_class keyword argument.')
|
||||
self._registry[setting] = kwargs
|
||||
|
||||
# Normally for read-only/dynamic settings, depends_on will specify other
|
||||
# settings whose changes may affect the value of this setting. Store
|
||||
# this setting as a dependent for the other settings, so we can know
|
||||
# which extra cache keys to clear when a setting changes.
|
||||
depends_on = kwargs.setdefault('depends_on', None) or set()
|
||||
for depends_on_setting in depends_on:
|
||||
dependent_settings = self._dependent_settings.setdefault(depends_on_setting, set())
|
||||
dependent_settings.add(setting)
|
||||
|
||||
def unregister(self, setting):
|
||||
self._registry.pop(setting, None)
|
||||
for dependent_settings in self._dependent_settings.values():
|
||||
dependent_settings.discard(setting)
|
||||
|
||||
def get_dependent_settings(self, setting):
|
||||
return self._dependent_settings.get(setting, set())
|
||||
|
||||
def get_registered_categories(self, features_enabled=None):
|
||||
categories = {
|
||||
'all': _('All'),
|
||||
'changed': _('Changed'),
|
||||
}
|
||||
for setting, kwargs in self._registry.items():
|
||||
category_slug = kwargs.get('category_slug', None)
|
||||
if category_slug is None or category_slug in categories:
|
||||
continue
|
||||
if features_enabled is not None:
|
||||
feature_required = kwargs.get('feature_required', None)
|
||||
if feature_required and feature_required not in features_enabled:
|
||||
continue
|
||||
if category_slug == 'user':
|
||||
categories['user'] = _('User')
|
||||
categories['user-defaults'] = _('User-Defaults')
|
||||
else:
|
||||
categories[category_slug] = kwargs.get('category', None) or category_slug
|
||||
return categories
|
||||
|
||||
def get_registered_settings(self, category_slug=None, read_only=None, features_enabled=None):
|
||||
setting_names = []
|
||||
if category_slug == 'user-defaults':
|
||||
category_slug = 'user'
|
||||
if category_slug == 'changed':
|
||||
category_slug = 'all'
|
||||
for setting, kwargs in self._registry.items():
|
||||
if category_slug not in {None, 'all', kwargs.get('category_slug', None)}:
|
||||
continue
|
||||
if read_only in {True, False} and kwargs.get('read_only', False) != read_only:
|
||||
# Note: Doesn't catch fields that set read_only via __init__;
|
||||
# read-only field kwargs should always include read_only=True.
|
||||
continue
|
||||
if features_enabled is not None:
|
||||
feature_required = kwargs.get('feature_required', None)
|
||||
if feature_required and feature_required not in features_enabled:
|
||||
continue
|
||||
setting_names.append(setting)
|
||||
return setting_names
|
||||
|
||||
def is_setting_encrypted(self, setting):
|
||||
return bool(self._registry.get(setting, {}).get('encrypted', False))
|
||||
|
||||
def get_setting_field(self, setting, mixin_class=None, for_user=False, **kwargs):
|
||||
from rest_framework.fields import empty
|
||||
field_kwargs = {}
|
||||
field_kwargs.update(self._registry[setting])
|
||||
field_kwargs.update(kwargs)
|
||||
field_class = original_field_class = field_kwargs.pop('field_class')
|
||||
if mixin_class:
|
||||
field_class = type(field_class.__name__, (mixin_class, field_class), {})
|
||||
category_slug = field_kwargs.pop('category_slug', None)
|
||||
category = field_kwargs.pop('category', None)
|
||||
depends_on = frozenset(field_kwargs.pop('depends_on', None) or [])
|
||||
placeholder = field_kwargs.pop('placeholder', empty)
|
||||
feature_required = field_kwargs.pop('feature_required', empty)
|
||||
encrypted = bool(field_kwargs.pop('encrypted', False))
|
||||
defined_in_file = bool(field_kwargs.pop('defined_in_file', False))
|
||||
if getattr(field_kwargs.get('child', None), 'source', None) is not None:
|
||||
field_kwargs['child'].source = None
|
||||
field_instance = field_class(**field_kwargs)
|
||||
field_instance.category_slug = category_slug
|
||||
field_instance.category = category
|
||||
field_instance.depends_on = depends_on
|
||||
if placeholder is not empty:
|
||||
field_instance.placeholder = placeholder
|
||||
if feature_required is not empty:
|
||||
field_instance.feature_required = feature_required
|
||||
field_instance.defined_in_file = defined_in_file
|
||||
if field_instance.defined_in_file:
|
||||
field_instance.help_text = (
|
||||
str(_('This value has been set manually in a settings file.')) +
|
||||
'\n\n' +
|
||||
str(field_instance.help_text)
|
||||
)
|
||||
field_instance.encrypted = encrypted
|
||||
original_field_instance = field_instance
|
||||
if field_class != original_field_class:
|
||||
original_field_instance = original_field_class(**field_kwargs)
|
||||
if category_slug == 'user' and for_user:
|
||||
try:
|
||||
field_instance.default = original_field_instance.to_representation(getattr(self.settings, setting))
|
||||
except:
|
||||
logger.warning('Unable to retrieve default value for user setting "%s".', setting, exc_info=True)
|
||||
elif not field_instance.read_only or field_instance.default is empty or field_instance.defined_in_file:
|
||||
try:
|
||||
field_instance.default = original_field_instance.to_representation(self.settings._awx_conf_settings._get_default(setting))
|
||||
except AttributeError:
|
||||
pass
|
||||
except:
|
||||
logger.warning('Unable to retrieve default value for setting "%s".', setting, exc_info=True)
|
||||
return field_instance
|
||||
|
||||
|
||||
settings_registry = SettingsRegistry()
|
||||
83
awx/conf/serializers.py
Normal file
83
awx/conf/serializers.py
Normal file
@ -0,0 +1,83 @@
|
||||
# Django REST Framework
|
||||
from rest_framework import serializers
|
||||
|
||||
# Tower
|
||||
from awx.api.fields import VerbatimField
|
||||
from awx.api.serializers import BaseSerializer
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf import settings_registry
|
||||
|
||||
|
||||
class SettingSerializer(BaseSerializer):
|
||||
"""Read-only serializer for activity stream."""
|
||||
|
||||
value = VerbatimField(allow_null=True)
|
||||
|
||||
class Meta:
|
||||
model = Setting
|
||||
fields = ('id', 'key', 'value')
|
||||
readonly_fields = ('id', 'key', 'value')
|
||||
|
||||
def __init__(self, instance=None, data=serializers.empty, **kwargs):
|
||||
if instance is None and data is not serializers.empty and 'key' in data:
|
||||
try:
|
||||
instance = Setting.objects.get(key=data['key'])
|
||||
except Setting.DoesNotExist:
|
||||
pass
|
||||
super(SettingSerializer, self).__init__(instance, data, **kwargs)
|
||||
|
||||
|
||||
class SettingCategorySerializer(serializers.Serializer):
|
||||
"""Serialize setting category """
|
||||
|
||||
url = serializers.CharField(
|
||||
read_only=True,
|
||||
)
|
||||
slug = serializers.CharField(
|
||||
read_only=True,
|
||||
)
|
||||
name = serializers.CharField(
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
|
||||
class SettingFieldMixin(object):
|
||||
"""Mixin to use a registered setting field class for API display/validation."""
|
||||
|
||||
def to_representation(self, obj):
|
||||
if getattr(self, 'encrypted', False) and isinstance(obj, basestring) and obj:
|
||||
return '$encrypted$'
|
||||
return obj
|
||||
|
||||
def to_internal_value(self, value):
|
||||
if getattr(self, 'encrypted', False) and isinstance(value, basestring) and value.startswith('$encrypted$'):
|
||||
raise serializers.SkipField()
|
||||
obj = super(SettingFieldMixin, self).to_internal_value(value)
|
||||
return super(SettingFieldMixin, self).to_representation(obj)
|
||||
|
||||
|
||||
class SettingSingletonSerializer(serializers.Serializer):
|
||||
"""Present a group of settings (by category) as a single object."""
|
||||
|
||||
def __init__(self, instance=None, data=serializers.empty, **kwargs):
|
||||
# Instance (if given) should be an object with attributes for all of the
|
||||
# settings in the category; never an actual Setting model instance.
|
||||
assert instance is None or not hasattr(instance, 'pk')
|
||||
super(SettingSingletonSerializer, self).__init__(instance, data, **kwargs)
|
||||
|
||||
def get_fields(self):
|
||||
fields = super(SettingSingletonSerializer, self).get_fields()
|
||||
try:
|
||||
category_slug = self.context['view'].kwargs.get('category_slug', 'all')
|
||||
except (KeyError, AttributeError):
|
||||
category_slug = ''
|
||||
for key in settings_registry.get_registered_settings(category_slug=category_slug):
|
||||
if self.instance and not hasattr(self.instance, key):
|
||||
continue
|
||||
extra_kwargs = {}
|
||||
# Make LICENSE read-only here; update via /api/v1/config/ only.
|
||||
if key == 'LICENSE':
|
||||
extra_kwargs['read_only'] = True
|
||||
field = settings_registry.get_setting_field(key, mixin_class=SettingFieldMixin, for_user=bool(category_slug == 'user'), **extra_kwargs)
|
||||
fields[key] = field
|
||||
return fields
|
||||
406
awx/conf/settings.py
Normal file
406
awx/conf/settings.py
Normal file
@ -0,0 +1,406 @@
|
||||
# Python
|
||||
from collections import namedtuple
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
import six
|
||||
|
||||
# Django
|
||||
from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db import ProgrammingError, OperationalError
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import empty, SkipField
|
||||
|
||||
# Tower
|
||||
from awx.main.utils import encrypt_field, decrypt_field
|
||||
from awx.main.utils.db import get_tower_migration_version
|
||||
from awx.conf import settings_registry
|
||||
from awx.conf.models import Setting
|
||||
|
||||
# FIXME: Gracefully handle when settings are accessed before the database is
|
||||
# ready (or during migrations).
|
||||
|
||||
logger = logging.getLogger('awx.conf.settings')
|
||||
|
||||
# Store a special value to indicate when a setting is not set in the database.
|
||||
SETTING_CACHE_NOTSET = '___notset___'
|
||||
|
||||
# Cannot store None in memcached; use a special value instead to indicate None.
|
||||
# If the special value for None is the same as the "not set" value, then a value
|
||||
# of None will be equivalent to the setting not being set (and will raise an
|
||||
# AttributeError if there is no other default defined).
|
||||
# SETTING_CACHE_NONE = '___none___'
|
||||
SETTING_CACHE_NONE = SETTING_CACHE_NOTSET
|
||||
|
||||
# Cannot store empty list/tuple in memcached; use a special value instead to
|
||||
# indicate an empty list.
|
||||
SETTING_CACHE_EMPTY_LIST = '___[]___'
|
||||
|
||||
# Cannot store empty dict in memcached; use a special value instead to indicate
|
||||
# an empty dict.
|
||||
SETTING_CACHE_EMPTY_DICT = '___{}___'
|
||||
|
||||
# Expire settings from cache after this many seconds.
|
||||
SETTING_CACHE_TIMEOUT = 60
|
||||
|
||||
# Flag indicating whether to store field default values in the cache.
|
||||
SETTING_CACHE_DEFAULTS = True
|
||||
|
||||
__all__ = ['SettingsWrapper']
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _log_database_error():
|
||||
try:
|
||||
yield
|
||||
except (ProgrammingError, OperationalError) as e:
|
||||
if get_tower_migration_version() < '310':
|
||||
logger.info('Using default settings until version 3.1 migration.')
|
||||
else:
|
||||
logger.warning('Database settings are not available, using defaults (%s)', e, exc_info=True)
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
class EncryptedCacheProxy(object):
|
||||
|
||||
def __init__(self, cache, registry, encrypter=None, decrypter=None):
|
||||
"""
|
||||
This proxy wraps a Django cache backend and overwrites the
|
||||
`get`/`set`/`set_many` methods to handle field encryption/decryption
|
||||
for sensitive values.
|
||||
|
||||
:param cache: the Django cache backend to proxy to
|
||||
:param registry: the settings registry instance used to determine if
|
||||
a field is encrypted or not.
|
||||
:param encrypter: a callable used to encrypt field values; defaults to
|
||||
``awx.main.utils.encrypt_field``
|
||||
:param decrypter: a callable used to decrypt field values; defaults to
|
||||
``awx.main.utils.decrypt_field``
|
||||
"""
|
||||
|
||||
# These values have to be stored via self.__dict__ in this way to get
|
||||
# around the magic __setattr__ method on this class.
|
||||
self.__dict__['cache'] = cache
|
||||
self.__dict__['registry'] = registry
|
||||
self.__dict__['encrypter'] = encrypter or encrypt_field
|
||||
self.__dict__['decrypter'] = decrypter or decrypt_field
|
||||
|
||||
def get(self, key, **kwargs):
|
||||
value = self.cache.get(key, **kwargs)
|
||||
value = self._handle_encryption(self.decrypter, key, value)
|
||||
|
||||
# python-memcached auto-encodes unicode on cache set in python2
|
||||
# https://github.com/linsomniac/python-memcached/issues/79
|
||||
# https://github.com/linsomniac/python-memcached/blob/288c159720eebcdf667727a859ef341f1e908308/memcache.py#L961
|
||||
if six.PY2 and isinstance(value, six.binary_type):
|
||||
try:
|
||||
six.text_type(value)
|
||||
except UnicodeDecodeError:
|
||||
value = value.decode('utf-8')
|
||||
return value
|
||||
|
||||
def set(self, key, value, **kwargs):
|
||||
self.cache.set(
|
||||
key,
|
||||
self._handle_encryption(self.encrypter, key, value),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def set_many(self, data, **kwargs):
|
||||
for key, value in data.items():
|
||||
self.set(key, value, **kwargs)
|
||||
|
||||
def _handle_encryption(self, method, key, value):
|
||||
TransientSetting = namedtuple('TransientSetting', ['pk', 'value'])
|
||||
|
||||
if value is not empty and self.registry.is_setting_encrypted(key):
|
||||
# If the setting exists in the database, we'll use its primary key
|
||||
# as part of the AES key when encrypting/decrypting
|
||||
return method(
|
||||
TransientSetting(
|
||||
pk=getattr(self._get_setting_from_db(key), 'pk', None),
|
||||
value=value
|
||||
),
|
||||
'value'
|
||||
)
|
||||
|
||||
# If the field in question isn't an "encrypted" field, this function is
|
||||
# a no-op; it just returns the provided value
|
||||
return value
|
||||
|
||||
def _get_setting_from_db(self, key):
|
||||
field = self.registry.get_setting_field(key)
|
||||
if not field.read_only:
|
||||
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.cache, name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
setattr(self.cache, name, value)
|
||||
|
||||
|
||||
class SettingsWrapper(UserSettingsHolder):
|
||||
|
||||
@classmethod
|
||||
def initialize(cls, cache=None, registry=None):
|
||||
"""
|
||||
Used to initialize and wrap the Django settings context.
|
||||
|
||||
:param cache: the Django cache backend to use for caching setting
|
||||
values. ``django.core.cache`` is used by default.
|
||||
:param registry: the settings registry instance used. The global
|
||||
``awx.conf.settings_registry`` is used by default.
|
||||
"""
|
||||
if not getattr(settings, '_awx_conf_settings', False):
|
||||
settings_wrapper = cls(
|
||||
settings._wrapped,
|
||||
cache=cache or django_cache,
|
||||
registry=registry or settings_registry
|
||||
)
|
||||
settings._wrapped = settings_wrapper
|
||||
|
||||
def __init__(self, default_settings, cache, registry):
|
||||
"""
|
||||
This constructor is generally not called directly, but by
|
||||
``SettingsWrapper.initialize`` at app startup time when settings are
|
||||
parsed.
|
||||
"""
|
||||
|
||||
# These values have to be stored via self.__dict__ in this way to get
|
||||
# around the magic __setattr__ method on this class (which is used to
|
||||
# store API-assigned settings in the database).
|
||||
self.__dict__['default_settings'] = default_settings
|
||||
self.__dict__['_awx_conf_settings'] = self
|
||||
self.__dict__['_awx_conf_preload_expires'] = None
|
||||
self.__dict__['_awx_conf_preload_lock'] = threading.RLock()
|
||||
self.__dict__['_awx_conf_init_readonly'] = False
|
||||
self.__dict__['cache'] = EncryptedCacheProxy(cache, registry)
|
||||
self.__dict__['registry'] = registry
|
||||
|
||||
def _get_supported_settings(self):
|
||||
return self.registry.get_registered_settings()
|
||||
|
||||
def _get_writeable_settings(self):
|
||||
return self.registry.get_registered_settings(read_only=False)
|
||||
|
||||
def _get_cache_value(self, value):
|
||||
if value is None:
|
||||
value = SETTING_CACHE_NONE
|
||||
elif isinstance(value, (list, tuple)) and len(value) == 0:
|
||||
value = SETTING_CACHE_EMPTY_LIST
|
||||
elif isinstance(value, (dict,)) and len(value) == 0:
|
||||
value = SETTING_CACHE_EMPTY_DICT
|
||||
return value
|
||||
|
||||
def _preload_cache(self):
|
||||
# Ensure we're only modifying local preload timeout from one thread.
|
||||
with self._awx_conf_preload_lock:
|
||||
# If local preload timeout has not expired, skip preloading.
|
||||
if self._awx_conf_preload_expires and self._awx_conf_preload_expires > time.time():
|
||||
return
|
||||
# Otherwise update local preload timeout.
|
||||
self.__dict__['_awx_conf_preload_expires'] = time.time() + SETTING_CACHE_TIMEOUT
|
||||
# Check for any settings that have been defined in Python files and
|
||||
# make those read-only to avoid overriding in the database.
|
||||
if not self._awx_conf_init_readonly and 'migrate_to_database_settings' not in sys.argv:
|
||||
defaults_snapshot = self._get_default('DEFAULTS_SNAPSHOT')
|
||||
for key in self._get_writeable_settings():
|
||||
init_default = defaults_snapshot.get(key, None)
|
||||
try:
|
||||
file_default = self._get_default(key)
|
||||
except AttributeError:
|
||||
file_default = None
|
||||
if file_default != init_default and file_default is not None:
|
||||
logger.debug('Setting %s has been marked read-only!', key)
|
||||
self.registry._registry[key]['read_only'] = True
|
||||
self.registry._registry[key]['defined_in_file'] = True
|
||||
self.__dict__['_awx_conf_init_readonly'] = True
|
||||
# If local preload timer has expired, check to see if another process
|
||||
# has already preloaded the cache and skip preloading if so.
|
||||
if self.cache.get('_awx_conf_preload_expires', default=empty) is not empty:
|
||||
return
|
||||
# Initialize all database-configurable settings with a marker value so
|
||||
# to indicate from the cache that the setting is not configured without
|
||||
# a database lookup.
|
||||
settings_to_cache = dict([(key, SETTING_CACHE_NOTSET) for key in self._get_writeable_settings()])
|
||||
# Load all settings defined in the database.
|
||||
for setting in Setting.objects.filter(key__in=settings_to_cache.keys(), user__isnull=True).order_by('pk'):
|
||||
if settings_to_cache[setting.key] != SETTING_CACHE_NOTSET:
|
||||
continue
|
||||
if self.registry.is_setting_encrypted(setting.key):
|
||||
value = decrypt_field(setting, 'value')
|
||||
else:
|
||||
value = setting.value
|
||||
settings_to_cache[setting.key] = self._get_cache_value(value)
|
||||
# Load field default value for any settings not found in the database.
|
||||
if SETTING_CACHE_DEFAULTS:
|
||||
for key, value in settings_to_cache.items():
|
||||
if value != SETTING_CACHE_NOTSET:
|
||||
continue
|
||||
field = self.registry.get_setting_field(key)
|
||||
try:
|
||||
settings_to_cache[key] = self._get_cache_value(field.get_default())
|
||||
except SkipField:
|
||||
pass
|
||||
# Generate a cache key for each setting and store them all at once.
|
||||
settings_to_cache = dict([(Setting.get_cache_key(k), v) for k, v in settings_to_cache.items()])
|
||||
settings_to_cache['_awx_conf_preload_expires'] = self._awx_conf_preload_expires
|
||||
logger.debug('cache set_many(%r, %r)', settings_to_cache, SETTING_CACHE_TIMEOUT)
|
||||
self.cache.set_many(settings_to_cache, timeout=SETTING_CACHE_TIMEOUT)
|
||||
|
||||
def _get_local(self, name):
|
||||
self._preload_cache()
|
||||
cache_key = Setting.get_cache_key(name)
|
||||
try:
|
||||
cache_value = self.cache.get(cache_key, default=empty)
|
||||
except ValueError:
|
||||
cache_value = empty
|
||||
logger.debug('cache get(%r, %r) -> %r', cache_key, empty, cache_value)
|
||||
if cache_value == SETTING_CACHE_NOTSET:
|
||||
value = empty
|
||||
elif cache_value == SETTING_CACHE_NONE:
|
||||
value = None
|
||||
elif cache_value == SETTING_CACHE_EMPTY_LIST:
|
||||
value = []
|
||||
elif cache_value == SETTING_CACHE_EMPTY_DICT:
|
||||
value = {}
|
||||
else:
|
||||
value = cache_value
|
||||
field = self.registry.get_setting_field(name)
|
||||
if value is empty:
|
||||
setting = None
|
||||
if not field.read_only:
|
||||
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
|
||||
if setting:
|
||||
if getattr(field, 'encrypted', False):
|
||||
value = decrypt_field(setting, 'value')
|
||||
else:
|
||||
value = setting.value
|
||||
else:
|
||||
value = SETTING_CACHE_NOTSET
|
||||
if SETTING_CACHE_DEFAULTS:
|
||||
try:
|
||||
value = field.get_default()
|
||||
except SkipField:
|
||||
pass
|
||||
# If None implies not set, convert when reading the value.
|
||||
if value is None and SETTING_CACHE_NOTSET == SETTING_CACHE_NONE:
|
||||
value = SETTING_CACHE_NOTSET
|
||||
if cache_value != value:
|
||||
logger.debug('cache set(%r, %r, %r)', cache_key,
|
||||
self._get_cache_value(value),
|
||||
SETTING_CACHE_TIMEOUT)
|
||||
self.cache.set(cache_key, self._get_cache_value(value), timeout=SETTING_CACHE_TIMEOUT)
|
||||
if value == SETTING_CACHE_NOTSET and not SETTING_CACHE_DEFAULTS:
|
||||
try:
|
||||
value = field.get_default()
|
||||
except SkipField:
|
||||
pass
|
||||
if value not in (empty, SETTING_CACHE_NOTSET):
|
||||
try:
|
||||
if field.read_only:
|
||||
internal_value = field.to_internal_value(value)
|
||||
field.run_validators(internal_value)
|
||||
return internal_value
|
||||
else:
|
||||
return field.run_validation(value)
|
||||
except:
|
||||
logger.warning(
|
||||
'The current value "%r" for setting "%s" is invalid.',
|
||||
value, name, exc_info=True)
|
||||
return empty
|
||||
|
||||
def _get_default(self, name):
|
||||
return getattr(self.default_settings, name)
|
||||
|
||||
@property
|
||||
def SETTINGS_MODULE(self):
|
||||
return self._get_default('SETTINGS_MODULE')
|
||||
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
if name in self._get_supported_settings():
|
||||
with _log_database_error():
|
||||
value = self._get_local(name)
|
||||
if value is not empty:
|
||||
return value
|
||||
return self._get_default(name)
|
||||
|
||||
def _set_local(self, name, value):
|
||||
field = self.registry.get_setting_field(name)
|
||||
if field.read_only:
|
||||
logger.warning('Attempt to set read only setting "%s".', name)
|
||||
raise ImproperlyConfigured('Setting "%s" is read only.'.format(name))
|
||||
|
||||
try:
|
||||
data = field.to_representation(value)
|
||||
setting_value = field.run_validation(data)
|
||||
db_value = field.to_representation(setting_value)
|
||||
except Exception as e:
|
||||
logger.exception('Unable to assign value "%r" to setting "%s".',
|
||||
value, name, exc_info=True)
|
||||
raise e
|
||||
|
||||
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
|
||||
if not setting:
|
||||
setting = Setting.objects.create(key=name, user=None, value=db_value)
|
||||
# post_save handler will delete from cache when added.
|
||||
elif setting.value != db_value or type(setting.value) != type(db_value):
|
||||
setting.value = db_value
|
||||
setting.save(update_fields=['value'])
|
||||
# post_save handler will delete from cache when changed.
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name in self._get_supported_settings():
|
||||
with _log_database_error():
|
||||
self._set_local(name, value)
|
||||
else:
|
||||
setattr(self.default_settings, name, value)
|
||||
|
||||
def _del_local(self, name):
|
||||
field = self.registry.get_setting_field(name)
|
||||
if field.read_only:
|
||||
logger.warning('Attempt to delete read only setting "%s".', name)
|
||||
raise ImproperlyConfigured('Setting "%s" is read only.'.format(name))
|
||||
for setting in Setting.objects.filter(key=name, user__isnull=True):
|
||||
setting.delete()
|
||||
# pre_delete handler will delete from cache.
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name in self._get_supported_settings():
|
||||
with _log_database_error():
|
||||
self._del_local(name)
|
||||
else:
|
||||
delattr(self.default_settings, name)
|
||||
|
||||
def __dir__(self):
|
||||
keys = []
|
||||
with _log_database_error():
|
||||
for setting in Setting.objects.filter(
|
||||
key__in=self._get_supported_settings(), user__isnull=True):
|
||||
# Skip returning settings that have been overridden but are
|
||||
# considered to be "not set".
|
||||
if setting.value is None and SETTING_CACHE_NOTSET == SETTING_CACHE_NONE:
|
||||
continue
|
||||
if setting.key not in keys:
|
||||
keys.append(str(setting.key))
|
||||
for key in dir(self.default_settings):
|
||||
if key not in keys:
|
||||
keys.append(key)
|
||||
return keys
|
||||
|
||||
def is_overridden(self, setting):
|
||||
set_locally = False
|
||||
if setting in self._get_supported_settings():
|
||||
with _log_database_error():
|
||||
set_locally = Setting.objects.filter(key=setting, user__isnull=True).exists()
|
||||
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
|
||||
return (set_locally or set_on_default)
|
||||
73
awx/conf/signals.py
Normal file
73
awx/conf/signals.py
Normal file
@ -0,0 +1,73 @@
|
||||
# Python
|
||||
import logging
|
||||
import sys
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.signals import setting_changed
|
||||
from django.db.models.signals import post_save, pre_delete, post_delete
|
||||
from django.core.cache import cache
|
||||
from django.dispatch import receiver
|
||||
|
||||
# Tower
|
||||
import awx.main.signals
|
||||
from awx.conf import settings_registry
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.serializers import SettingSerializer
|
||||
from awx.main.tasks import process_cache_changes
|
||||
|
||||
logger = logging.getLogger('awx.conf.signals')
|
||||
|
||||
awx.main.signals.model_serializer_mapping[Setting] = SettingSerializer
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
def handle_setting_change(key, for_delete=False):
|
||||
# When a setting changes or is deleted, remove its value from cache along
|
||||
# with any other settings that depend on it.
|
||||
setting_keys = [key]
|
||||
for dependent_key in settings_registry.get_dependent_settings(key):
|
||||
# Note: Doesn't handle multiple levels of dependencies!
|
||||
setting_keys.append(dependent_key)
|
||||
cache_keys = set([Setting.get_cache_key(k) for k in setting_keys])
|
||||
logger.debug('sending signals to delete cache keys(%r)', cache_keys)
|
||||
cache.delete_many(cache_keys)
|
||||
if 'migrate_to_database_settings' not in sys.argv:
|
||||
process_cache_changes.delay(list(cache_keys))
|
||||
|
||||
# Send setting_changed signal with new value for each setting.
|
||||
for setting_key in setting_keys:
|
||||
setting_changed.send(
|
||||
sender=Setting,
|
||||
setting=setting_key,
|
||||
value=getattr(settings, setting_key, None),
|
||||
enter=not bool(for_delete),
|
||||
)
|
||||
|
||||
|
||||
@receiver(post_save, sender=Setting)
|
||||
def on_post_save_setting(sender, **kwargs):
|
||||
instance = kwargs['instance']
|
||||
# Skip for user-specific settings.
|
||||
if instance.user:
|
||||
return
|
||||
handle_setting_change(instance.key)
|
||||
|
||||
|
||||
@receiver(pre_delete, sender=Setting)
|
||||
def on_pre_delete_setting(sender, **kwargs):
|
||||
instance = kwargs['instance']
|
||||
# Skip for user-specific settings.
|
||||
if instance.user:
|
||||
return
|
||||
# Save instance key (setting name) for post_delete.
|
||||
instance._saved_key_ = instance.key
|
||||
|
||||
|
||||
@receiver(post_delete, sender=Setting)
|
||||
def on_post_delete_setting(sender, **kwargs):
|
||||
instance = kwargs['instance']
|
||||
key = getattr(instance, '_saved_key_', None)
|
||||
if key:
|
||||
handle_setting_change(key, True)
|
||||
2
awx/conf/tests/__init__.py
Normal file
2
awx/conf/tests/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
311
awx/conf/tests/unit/test_registry.py
Normal file
311
awx/conf/tests/unit/test_registry.py
Normal file
@ -0,0 +1,311 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from django.conf import LazySettings
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from rest_framework.fields import empty
|
||||
import pytest
|
||||
|
||||
from awx.conf import fields
|
||||
from awx.conf.settings import SettingsWrapper
|
||||
from awx.conf.registry import SettingsRegistry
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def reg(request):
|
||||
"""
|
||||
This fixture initializes an awx settings registry object and passes it as
|
||||
an argument into the test function.
|
||||
"""
|
||||
cache = LocMemCache(str(uuid4()), {}) # make a new random cache each time
|
||||
settings = LazySettings()
|
||||
registry = SettingsRegistry(settings)
|
||||
|
||||
# @pytest.mark.defined_in_file can be used to mark specific setting values
|
||||
# as "defined in a settings file". This is analogous to manually
|
||||
# specifying a setting on the filesystem (e.g., in a local_settings.py in
|
||||
# development, or in /etc/tower/conf.d/<something>.py)
|
||||
defaults = request.node.get_marker('defined_in_file')
|
||||
if defaults:
|
||||
settings.configure(**defaults.kwargs)
|
||||
settings._wrapped = SettingsWrapper(settings._wrapped,
|
||||
cache,
|
||||
registry)
|
||||
return registry
|
||||
|
||||
|
||||
def test_simple_setting_registration(reg):
|
||||
assert reg.get_registered_settings() == []
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
assert reg.get_registered_settings() == ['AWX_SOME_SETTING_ENABLED']
|
||||
|
||||
|
||||
def test_simple_setting_unregistration(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
assert reg.get_registered_settings() == ['AWX_SOME_SETTING_ENABLED']
|
||||
|
||||
reg.unregister('AWX_SOME_SETTING_ENABLED')
|
||||
assert reg.get_registered_settings() == []
|
||||
|
||||
|
||||
def test_duplicate_setting_registration(reg):
|
||||
"ensure that settings cannot be registered twice."
|
||||
with pytest.raises(ImproperlyConfigured):
|
||||
for i in range(2):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
|
||||
def test_field_class_required_for_registration(reg):
|
||||
"settings must specify a field class to register"
|
||||
with pytest.raises(ImproperlyConfigured):
|
||||
reg.register('AWX_SOME_SETTING_ENABLED')
|
||||
|
||||
|
||||
def test_get_registered_settings_by_slug(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
assert reg.get_registered_settings(category_slug='system') == [
|
||||
'AWX_SOME_SETTING_ENABLED'
|
||||
]
|
||||
assert reg.get_registered_settings(category_slug='other') == []
|
||||
|
||||
|
||||
def test_get_registered_read_only_settings(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
reg.register(
|
||||
'AWX_SOME_READ_ONLY',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
read_only=True
|
||||
)
|
||||
assert reg.get_registered_settings(read_only=True) ==[
|
||||
'AWX_SOME_READ_ONLY'
|
||||
]
|
||||
assert reg.get_registered_settings(read_only=False) == [
|
||||
'AWX_SOME_SETTING_ENABLED'
|
||||
]
|
||||
assert reg.get_registered_settings() == [
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
'AWX_SOME_READ_ONLY'
|
||||
]
|
||||
|
||||
|
||||
def test_get_registered_settings_with_required_features(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
feature_required='superpowers',
|
||||
)
|
||||
assert reg.get_registered_settings(features_enabled=[]) == []
|
||||
assert reg.get_registered_settings(features_enabled=['superpowers']) == [
|
||||
'AWX_SOME_SETTING_ENABLED'
|
||||
]
|
||||
|
||||
|
||||
def test_get_dependent_settings(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
reg.register(
|
||||
'AWX_SOME_DEPENDENT_SETTING',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
depends_on=['AWX_SOME_SETTING_ENABLED']
|
||||
)
|
||||
assert reg.get_dependent_settings('AWX_SOME_SETTING_ENABLED') == set([
|
||||
'AWX_SOME_DEPENDENT_SETTING'
|
||||
])
|
||||
|
||||
|
||||
def test_get_registered_categories(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
reg.register(
|
||||
'AWX_SOME_OTHER_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('OtherSystem'),
|
||||
category_slug='other-system'
|
||||
)
|
||||
assert reg.get_registered_categories() == {
|
||||
'all': _('All'),
|
||||
'changed': _('Changed'),
|
||||
'system': _('System'),
|
||||
'other-system': _('OtherSystem'),
|
||||
}
|
||||
|
||||
|
||||
def test_get_registered_categories_with_required_features(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
feature_required='superpowers'
|
||||
)
|
||||
reg.register(
|
||||
'AWX_SOME_OTHER_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('OtherSystem'),
|
||||
category_slug='other-system',
|
||||
feature_required='sortapowers'
|
||||
)
|
||||
assert reg.get_registered_categories(features_enabled=[]) == {
|
||||
'all': _('All'),
|
||||
'changed': _('Changed'),
|
||||
}
|
||||
assert reg.get_registered_categories(features_enabled=['superpowers']) == {
|
||||
'all': _('All'),
|
||||
'changed': _('Changed'),
|
||||
'system': _('System'),
|
||||
}
|
||||
assert reg.get_registered_categories(features_enabled=['sortapowers']) == {
|
||||
'all': _('All'),
|
||||
'changed': _('Changed'),
|
||||
'other-system': _('OtherSystem'),
|
||||
}
|
||||
assert reg.get_registered_categories(
|
||||
features_enabled=['superpowers', 'sortapowers']
|
||||
) == {
|
||||
'all': _('All'),
|
||||
'changed': _('Changed'),
|
||||
'system': _('System'),
|
||||
'other-system': _('OtherSystem'),
|
||||
}
|
||||
|
||||
|
||||
def test_is_setting_encrypted(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
reg.register(
|
||||
'AWX_SOME_ENCRYPTED_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
encrypted=True
|
||||
)
|
||||
assert reg.is_setting_encrypted('AWX_SOME_SETTING_ENABLED') is False
|
||||
assert reg.is_setting_encrypted('AWX_SOME_ENCRYPTED_SETTING') is True
|
||||
|
||||
|
||||
def test_simple_field(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
placeholder='Example Value',
|
||||
feature_required='superpowers'
|
||||
)
|
||||
|
||||
field = reg.get_setting_field('AWX_SOME_SETTING')
|
||||
assert isinstance(field, fields.CharField)
|
||||
assert field.category == _('System')
|
||||
assert field.category_slug == 'system'
|
||||
assert field.default is empty
|
||||
assert field.placeholder == 'Example Value'
|
||||
assert field.feature_required == 'superpowers'
|
||||
|
||||
|
||||
def test_field_with_custom_attribute(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
field = reg.get_setting_field('AWX_SOME_SETTING_ENABLED',
|
||||
category_slug='other-system')
|
||||
assert field.category_slug == 'other-system'
|
||||
|
||||
|
||||
def test_field_with_custom_mixin(reg):
|
||||
class GreatMixin(object):
|
||||
|
||||
def is_great(self):
|
||||
return True
|
||||
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
field = reg.get_setting_field('AWX_SOME_SETTING_ENABLED',
|
||||
mixin_class=GreatMixin)
|
||||
assert isinstance(field, fields.BooleanField)
|
||||
assert isinstance(field, GreatMixin)
|
||||
assert field.is_great() is True
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
def test_default_value_from_settings(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
field = reg.get_setting_field('AWX_SOME_SETTING')
|
||||
assert field.default == 'DEFAULT'
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
def test_default_value_from_settings_with_custom_representation(reg):
|
||||
class LowercaseCharField(fields.CharField):
|
||||
|
||||
def to_representation(self, value):
|
||||
return value.lower()
|
||||
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=LowercaseCharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
field = reg.get_setting_field('AWX_SOME_SETTING')
|
||||
assert field.default == 'default'
|
||||
467
awx/conf/tests/unit/test_settings.py
Normal file
467
awx/conf/tests/unit/test_settings.py
Normal file
@ -0,0 +1,467 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from contextlib import contextmanager
|
||||
from uuid import uuid4
|
||||
import time
|
||||
|
||||
from django.conf import LazySettings
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
import pytest
|
||||
import six
|
||||
|
||||
from awx.conf import models, fields
|
||||
from awx.conf.settings import SettingsWrapper, EncryptedCacheProxy, SETTING_CACHE_NOTSET
|
||||
from awx.conf.registry import SettingsRegistry
|
||||
|
||||
from awx.main.utils import encrypt_field, decrypt_field
|
||||
|
||||
|
||||
@contextmanager
|
||||
def apply_patches(_patches):
|
||||
[p.start() for p in _patches]
|
||||
yield
|
||||
[p.stop() for p in _patches]
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def settings(request):
|
||||
"""
|
||||
This fixture initializes a Django settings object that wraps our
|
||||
`awx.conf.settings.SettingsWrapper` and passes it as an argument into the
|
||||
test function.
|
||||
|
||||
This mimics the work done by `awx.conf.settings.SettingsWrapper.initialize`
|
||||
on `django.conf.settings`.
|
||||
"""
|
||||
cache = LocMemCache(str(uuid4()), {}) # make a new random cache each time
|
||||
settings = LazySettings()
|
||||
registry = SettingsRegistry(settings)
|
||||
|
||||
# @pytest.mark.defined_in_file can be used to mark specific setting values
|
||||
# as "defined in a settings file". This is analogous to manually
|
||||
# specifying a setting on the filesystem (e.g., in a local_settings.py in
|
||||
# development, or in /etc/tower/conf.d/<something>.py)
|
||||
in_file_marker = request.node.get_marker('defined_in_file')
|
||||
defaults = in_file_marker.kwargs if in_file_marker else {}
|
||||
defaults['DEFAULTS_SNAPSHOT'] = {}
|
||||
settings.configure(**defaults)
|
||||
settings._wrapped = SettingsWrapper(settings._wrapped,
|
||||
cache,
|
||||
registry)
|
||||
return settings
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(DEBUG=True)
|
||||
def test_unregistered_setting(settings):
|
||||
"native Django settings are not stored in DB, and aren't cached"
|
||||
assert settings.DEBUG is True
|
||||
assert settings.cache.get('DEBUG') is None
|
||||
|
||||
|
||||
def test_cached_settings_unicode_is_auto_decoded(settings):
|
||||
# https://github.com/linsomniac/python-memcached/issues/79
|
||||
# https://github.com/linsomniac/python-memcached/blob/288c159720eebcdf667727a859ef341f1e908308/memcache.py#L961
|
||||
|
||||
value = six.u('Iñtërnâtiônàlizætiøn').encode('utf-8') # this simulates what python-memcached does on cache.set()
|
||||
settings.cache.set('DEBUG', value)
|
||||
assert settings.cache.get('DEBUG') == six.u('Iñtërnâtiônàlizætiøn')
|
||||
|
||||
|
||||
def test_read_only_setting(settings):
|
||||
settings.registry.register(
|
||||
'AWX_READ_ONLY',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
default='NO-EDITS',
|
||||
read_only=True
|
||||
)
|
||||
assert settings.AWX_READ_ONLY == 'NO-EDITS'
|
||||
assert len(settings.registry.get_registered_settings(read_only=False)) == 0
|
||||
settings = settings.registry.get_registered_settings(read_only=True)
|
||||
assert settings == ['AWX_READ_ONLY']
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
@pytest.mark.parametrize('read_only', [True, False])
|
||||
def test_setting_defined_in_file(settings, read_only):
|
||||
kwargs = {'read_only': True} if read_only else {}
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
**kwargs
|
||||
)
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert len(settings.registry.get_registered_settings(read_only=False)) == 0
|
||||
settings = settings.registry.get_registered_settings(read_only=True)
|
||||
assert settings == ['AWX_SOME_SETTING']
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
def test_setting_defined_in_file_with_empty_default(settings):
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
default='',
|
||||
)
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert len(settings.registry.get_registered_settings(read_only=False)) == 0
|
||||
settings = settings.registry.get_registered_settings(read_only=True)
|
||||
assert settings == ['AWX_SOME_SETTING']
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
def test_setting_defined_in_file_with_specific_default(settings):
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
default=123
|
||||
)
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert len(settings.registry.get_registered_settings(read_only=False)) == 0
|
||||
settings = settings.registry.get_registered_settings(read_only=True)
|
||||
assert settings == ['AWX_SOME_SETTING']
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
def test_read_only_defaults_are_cached(settings):
|
||||
"read-only settings are stored in the cache"
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == 'DEFAULT'
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
def test_cache_respects_timeout(settings):
|
||||
"only preload the cache every SETTING_CACHE_TIMEOUT settings"
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
cache_expiration = settings.cache.get('_awx_conf_preload_expires')
|
||||
assert cache_expiration > time.time()
|
||||
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert settings.cache.get('_awx_conf_preload_expires') == cache_expiration
|
||||
|
||||
|
||||
def test_default_setting(settings, mocker):
|
||||
"settings that specify a default are inserted into the cache"
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
default='DEFAULT'
|
||||
)
|
||||
|
||||
settings_to_cache = mocker.Mock(**{'order_by.return_value': []})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=settings_to_cache):
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == 'DEFAULT'
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
def test_setting_is_from_setting_file(settings, mocker):
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert settings.registry.get_setting_field('AWX_SOME_SETTING').defined_in_file is True
|
||||
|
||||
|
||||
def test_setting_is_not_from_setting_file(settings, mocker):
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
default='DEFAULT'
|
||||
)
|
||||
|
||||
settings_to_cache = mocker.Mock(**{'order_by.return_value': []})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=settings_to_cache):
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert settings.registry.get_setting_field('AWX_SOME_SETTING').defined_in_file is False
|
||||
|
||||
|
||||
def test_empty_setting(settings, mocker):
|
||||
"settings with no default and no defined value are not valid"
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
mocks = mocker.Mock(**{
|
||||
'order_by.return_value': mocker.Mock(**{
|
||||
'__iter__': lambda self: iter([]),
|
||||
'first.return_value': None
|
||||
}),
|
||||
})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks):
|
||||
with pytest.raises(AttributeError):
|
||||
settings.AWX_SOME_SETTING
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == SETTING_CACHE_NOTSET
|
||||
|
||||
|
||||
def test_setting_from_db(settings, mocker):
|
||||
"settings can be loaded from the database"
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
default='DEFAULT'
|
||||
)
|
||||
|
||||
setting_from_db = mocker.Mock(key='AWX_SOME_SETTING', value='FROM_DB')
|
||||
mocks = mocker.Mock(**{
|
||||
'order_by.return_value': mocker.Mock(**{
|
||||
'__iter__': lambda self: iter([setting_from_db]),
|
||||
'first.return_value': setting_from_db
|
||||
}),
|
||||
})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks):
|
||||
assert settings.AWX_SOME_SETTING == 'FROM_DB'
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == 'FROM_DB'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('encrypted', (True, False))
|
||||
def test_setting_from_db_with_unicode(settings, mocker, encrypted):
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
default='DEFAULT',
|
||||
encrypted=encrypted
|
||||
)
|
||||
# this simulates a bug in python-memcached; see https://github.com/linsomniac/python-memcached/issues/79
|
||||
value = six.u('Iñtërnâtiônàlizætiøn').encode('utf-8')
|
||||
|
||||
setting_from_db = mocker.Mock(key='AWX_SOME_SETTING', value=value)
|
||||
mocks = mocker.Mock(**{
|
||||
'order_by.return_value': mocker.Mock(**{
|
||||
'__iter__': lambda self: iter([setting_from_db]),
|
||||
'first.return_value': setting_from_db
|
||||
}),
|
||||
})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks):
|
||||
assert settings.AWX_SOME_SETTING == six.u('Iñtërnâtiônàlizætiøn')
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == six.u('Iñtërnâtiônàlizætiøn')
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
def test_read_only_setting_assignment(settings):
|
||||
"read-only settings cannot be overwritten"
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
with pytest.raises(ImproperlyConfigured):
|
||||
settings.AWX_SOME_SETTING = 'CHANGED'
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
|
||||
|
||||
def test_db_setting_create(settings, mocker):
|
||||
"settings are stored in the database when set for the first time"
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
setting_list = mocker.Mock(**{'order_by.return_value.first.return_value': None})
|
||||
with apply_patches([
|
||||
mocker.patch('awx.conf.models.Setting.objects.filter',
|
||||
return_value=setting_list),
|
||||
mocker.patch('awx.conf.models.Setting.objects.create', mocker.Mock())
|
||||
]):
|
||||
settings.AWX_SOME_SETTING = 'NEW-VALUE'
|
||||
|
||||
models.Setting.objects.create.assert_called_with(
|
||||
key='AWX_SOME_SETTING',
|
||||
user=None,
|
||||
value='NEW-VALUE'
|
||||
)
|
||||
|
||||
|
||||
def test_db_setting_update(settings, mocker):
|
||||
"settings are updated in the database when their value changes"
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
existing_setting = mocker.Mock(key='AWX_SOME_SETTING', value='FROM_DB')
|
||||
setting_list = mocker.Mock(**{
|
||||
'order_by.return_value.first.return_value': existing_setting
|
||||
})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=setting_list):
|
||||
settings.AWX_SOME_SETTING = 'NEW-VALUE'
|
||||
|
||||
assert existing_setting.value == 'NEW-VALUE'
|
||||
existing_setting.save.assert_called_with(update_fields=['value'])
|
||||
|
||||
|
||||
def test_db_setting_deletion(settings, mocker):
|
||||
"settings are auto-deleted from the database"
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
existing_setting = mocker.Mock(key='AWX_SOME_SETTING', value='FROM_DB')
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=[existing_setting]):
|
||||
del settings.AWX_SOME_SETTING
|
||||
|
||||
assert existing_setting.delete.call_count == 1
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
def test_read_only_setting_deletion(settings):
|
||||
"read-only settings cannot be deleted"
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
with pytest.raises(ImproperlyConfigured):
|
||||
del settings.AWX_SOME_SETTING
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
|
||||
|
||||
def test_charfield_properly_sets_none(settings, mocker):
|
||||
"see: https://github.com/ansible/ansible-tower/issues/5322"
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
allow_null=True
|
||||
)
|
||||
|
||||
setting_list = mocker.Mock(**{'order_by.return_value.first.return_value': None})
|
||||
with apply_patches([
|
||||
mocker.patch('awx.conf.models.Setting.objects.filter',
|
||||
return_value=setting_list),
|
||||
mocker.patch('awx.conf.models.Setting.objects.create', mocker.Mock())
|
||||
]):
|
||||
settings.AWX_SOME_SETTING = None
|
||||
|
||||
models.Setting.objects.create.assert_called_with(
|
||||
key='AWX_SOME_SETTING',
|
||||
user=None,
|
||||
value=None
|
||||
)
|
||||
|
||||
|
||||
def test_settings_use_an_encrypted_cache(settings):
|
||||
settings.registry.register(
|
||||
'AWX_ENCRYPTED',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
encrypted=True
|
||||
)
|
||||
assert isinstance(settings.cache, EncryptedCacheProxy)
|
||||
assert settings.cache.__dict__['encrypter'] == encrypt_field
|
||||
assert settings.cache.__dict__['decrypter'] == decrypt_field
|
||||
|
||||
|
||||
def test_sensitive_cache_data_is_encrypted(settings, mocker):
|
||||
"fields marked as `encrypted` are stored in the cache with encryption"
|
||||
settings.registry.register(
|
||||
'AWX_ENCRYPTED',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
encrypted=True
|
||||
)
|
||||
|
||||
def rot13(obj, attribute):
|
||||
assert obj.pk == 123
|
||||
return getattr(obj, attribute).encode('rot13')
|
||||
|
||||
native_cache = LocMemCache(str(uuid4()), {})
|
||||
cache = EncryptedCacheProxy(
|
||||
native_cache,
|
||||
settings.registry,
|
||||
encrypter=rot13,
|
||||
decrypter=rot13
|
||||
)
|
||||
# Insert the setting value into the database; the encryption process will
|
||||
# use its primary key as part of the encryption key
|
||||
setting_from_db = mocker.Mock(pk=123, key='AWX_ENCRYPTED', value='SECRET!')
|
||||
mocks = mocker.Mock(**{
|
||||
'order_by.return_value': mocker.Mock(**{
|
||||
'__iter__': lambda self: iter([setting_from_db]),
|
||||
'first.return_value': setting_from_db
|
||||
}),
|
||||
})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks):
|
||||
cache.set('AWX_ENCRYPTED', 'SECRET!')
|
||||
assert cache.get('AWX_ENCRYPTED') == 'SECRET!'
|
||||
assert native_cache.get('AWX_ENCRYPTED') == 'FRPERG!'
|
||||
|
||||
|
||||
def test_readonly_sensitive_cache_data_is_encrypted(settings):
|
||||
"readonly fields marked as `encrypted` are stored in the cache with encryption"
|
||||
settings.registry.register(
|
||||
'AWX_ENCRYPTED',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
read_only=True,
|
||||
encrypted=True
|
||||
)
|
||||
|
||||
def rot13(obj, attribute):
|
||||
assert obj.pk is None
|
||||
return getattr(obj, attribute).encode('rot13')
|
||||
|
||||
native_cache = LocMemCache(str(uuid4()), {})
|
||||
cache = EncryptedCacheProxy(
|
||||
native_cache,
|
||||
settings.registry,
|
||||
encrypter=rot13,
|
||||
decrypter=rot13
|
||||
)
|
||||
cache.set('AWX_ENCRYPTED', 'SECRET!')
|
||||
assert cache.get('AWX_ENCRYPTED') == 'SECRET!'
|
||||
assert native_cache.get('AWX_ENCRYPTED') == 'FRPERG!'
|
||||
15
awx/conf/urls.py
Normal file
15
awx/conf/urls.py
Normal file
@ -0,0 +1,15 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Django
|
||||
from django.conf.urls import patterns
|
||||
|
||||
# Tower
|
||||
from awx.api.urls import url
|
||||
|
||||
|
||||
urlpatterns = patterns(
|
||||
'awx.conf.views',
|
||||
url(r'^$', 'setting_category_list'),
|
||||
url(r'^(?P<category_slug>[a-z0-9-]+)/$', 'setting_singleton_detail'),
|
||||
)
|
||||
110
awx/conf/utils.py
Executable file
110
awx/conf/utils.py
Executable file
@ -0,0 +1,110 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Python
|
||||
import difflib
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# RedBaron
|
||||
from redbaron import RedBaron, indent
|
||||
|
||||
__all__ = ['comment_assignments']
|
||||
|
||||
|
||||
def comment_assignments(patterns, assignment_names, dry_run=True, backup_suffix='.old'):
|
||||
if isinstance(patterns, basestring):
|
||||
patterns = [patterns]
|
||||
diffs = []
|
||||
for pattern in patterns:
|
||||
for filename in sorted(glob.glob(pattern)):
|
||||
filename = os.path.abspath(os.path.normpath(filename))
|
||||
if backup_suffix:
|
||||
backup_filename = '{}{}'.format(filename, backup_suffix)
|
||||
else:
|
||||
backup_filename = None
|
||||
diff = comment_assignments_in_file(filename, assignment_names, dry_run, backup_filename)
|
||||
if diff:
|
||||
diffs.append(diff)
|
||||
return diffs
|
||||
|
||||
|
||||
def comment_assignments_in_file(filename, assignment_names, dry_run=True, backup_filename=None):
|
||||
if isinstance(assignment_names, basestring):
|
||||
assignment_names = [assignment_names]
|
||||
else:
|
||||
assignment_names = assignment_names[:]
|
||||
current_file_data = open(filename).read()
|
||||
|
||||
for assignment_name in assignment_names[:]:
|
||||
if assignment_name in current_file_data:
|
||||
continue
|
||||
if assignment_name in assignment_names:
|
||||
assignment_names.remove(assignment_name)
|
||||
if not assignment_names:
|
||||
return ''
|
||||
|
||||
replace_lines = {}
|
||||
rb = RedBaron(current_file_data)
|
||||
for assignment_node in rb.find_all('assignment'):
|
||||
for assignment_name in assignment_names:
|
||||
|
||||
# Only target direct assignments to a variable.
|
||||
name_node = assignment_node.find('name', value=assignment_name)
|
||||
if not name_node:
|
||||
continue
|
||||
if assignment_node.target.type != 'name':
|
||||
continue
|
||||
|
||||
# Build a new node that comments out the existing assignment node.
|
||||
indentation = '{}# '.format(assignment_node.indentation or '')
|
||||
new_node_content = indent(assignment_node.dumps(), indentation)
|
||||
new_node_lines = new_node_content.splitlines()
|
||||
# Add a pass statement in case the assignment block is the only
|
||||
# child in a parent code block to prevent a syntax error.
|
||||
if assignment_node.indentation:
|
||||
new_node_lines[0] = new_node_lines[0].replace(indentation, '{}pass # '.format(assignment_node.indentation or ''), 1)
|
||||
new_node_lines[0] = '{0}This setting is now configured via the Tower API.\n{1}'.format(indentation, new_node_lines[0])
|
||||
|
||||
# Store new node lines in dictionary to be replaced in file.
|
||||
start_lineno = assignment_node.absolute_bounding_box.top_left.line
|
||||
end_lineno = assignment_node.absolute_bounding_box.bottom_right.line
|
||||
for n, new_node_line in enumerate(new_node_lines):
|
||||
new_lineno = start_lineno + n
|
||||
assert new_lineno <= end_lineno
|
||||
replace_lines[new_lineno] = new_node_line
|
||||
|
||||
if not replace_lines:
|
||||
return ''
|
||||
|
||||
# Iterate through all lines in current file and replace as needed.
|
||||
current_file_lines = current_file_data.splitlines()
|
||||
new_file_lines = []
|
||||
for n, line in enumerate(current_file_lines):
|
||||
new_file_lines.append(replace_lines.get(n + 1, line))
|
||||
new_file_data = '\n'.join(new_file_lines)
|
||||
new_file_lines = new_file_data.splitlines()
|
||||
|
||||
# If changed, syntax check and write the new file; return a diff of changes.
|
||||
diff_lines = []
|
||||
if new_file_data != current_file_data:
|
||||
compile(new_file_data, filename, 'exec')
|
||||
if backup_filename:
|
||||
from_file = backup_filename
|
||||
else:
|
||||
from_file = '{}.old'.format(filename)
|
||||
to_file = filename
|
||||
diff_lines = list(difflib.unified_diff(current_file_lines, new_file_lines, fromfile=from_file, tofile=to_file, lineterm=''))
|
||||
if not dry_run:
|
||||
if backup_filename:
|
||||
shutil.copy2(filename, backup_filename)
|
||||
with open(filename, 'wb') as fileobj:
|
||||
fileobj.write(new_file_data)
|
||||
return '\n'.join(diff_lines)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pattern = os.path.join(os.path.dirname(__file__), '..', 'settings', 'local_*.py')
|
||||
diffs = comment_assignments(pattern, ['AUTH_LDAP_ORGANIZATION_MAP'])
|
||||
for diff in diffs:
|
||||
print(diff)
|
||||
141
awx/conf/views.py
Normal file
141
awx/conf/views.py
Normal file
@ -0,0 +1,141 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import collections
|
||||
import sys
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.urlresolvers import reverse
|
||||
from django.http import Http404
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import serializers
|
||||
from rest_framework import status
|
||||
|
||||
# Tower
|
||||
from awx.api.generics import * # noqa
|
||||
from awx.main.utils import * # noqa
|
||||
from awx.conf.license import get_licensed_features
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
|
||||
from awx.conf import settings_registry
|
||||
|
||||
|
||||
SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name'))
|
||||
|
||||
|
||||
class SettingCategoryList(ListAPIView):
|
||||
|
||||
model = Setting # Not exactly, but needed for the view.
|
||||
serializer_class = SettingCategorySerializer
|
||||
filter_backends = []
|
||||
new_in_310 = True
|
||||
view_name = _('Setting Categories')
|
||||
|
||||
def get_queryset(self):
|
||||
setting_categories = []
|
||||
categories = settings_registry.get_registered_categories(features_enabled=get_licensed_features())
|
||||
if self.request.user.is_superuser or self.request.user.is_system_auditor:
|
||||
pass # categories = categories
|
||||
elif 'user' in categories:
|
||||
categories = {'user', _('User')}
|
||||
else:
|
||||
categories = {}
|
||||
for category_slug in sorted(categories.keys()):
|
||||
url = reverse('api:setting_singleton_detail', args=(category_slug,))
|
||||
setting_categories.append(SettingCategory(url, category_slug, categories[category_slug]))
|
||||
return setting_categories
|
||||
|
||||
|
||||
class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = Setting # Not exactly, but needed for the view.
|
||||
serializer_class = SettingSingletonSerializer
|
||||
filter_backends = []
|
||||
new_in_310 = True
|
||||
view_name = _('Setting Detail')
|
||||
|
||||
def get_queryset(self):
|
||||
self.category_slug = self.kwargs.get('category_slug', 'all')
|
||||
all_category_slugs = settings_registry.get_registered_categories(features_enabled=get_licensed_features()).keys()
|
||||
if self.request.user.is_superuser or getattr(self.request.user, 'is_system_auditor', False):
|
||||
category_slugs = all_category_slugs
|
||||
else:
|
||||
category_slugs = {'user'}
|
||||
if self.category_slug not in all_category_slugs:
|
||||
raise Http404
|
||||
if self.category_slug not in category_slugs:
|
||||
raise PermissionDenied()
|
||||
|
||||
registered_settings = settings_registry.get_registered_settings(category_slug=self.category_slug, read_only=False, features_enabled=get_licensed_features())
|
||||
if self.category_slug == 'user':
|
||||
return Setting.objects.filter(key__in=registered_settings, user=self.request.user)
|
||||
else:
|
||||
return Setting.objects.filter(key__in=registered_settings, user__isnull=True)
|
||||
|
||||
def get_object(self):
|
||||
settings_qs = self.get_queryset()
|
||||
registered_settings = settings_registry.get_registered_settings(category_slug=self.category_slug, features_enabled=get_licensed_features())
|
||||
all_settings = {}
|
||||
for setting in settings_qs:
|
||||
all_settings[setting.key] = setting.value
|
||||
for key in registered_settings:
|
||||
if key in all_settings or self.category_slug == 'changed':
|
||||
continue
|
||||
try:
|
||||
field = settings_registry.get_setting_field(key, for_user=bool(self.category_slug == 'user'))
|
||||
all_settings[key] = field.get_default()
|
||||
except serializers.SkipField:
|
||||
all_settings[key] = None
|
||||
all_settings['user'] = self.request.user if self.category_slug == 'user' else None
|
||||
obj = type('Settings', (object,), all_settings)()
|
||||
self.check_object_permissions(self.request, obj)
|
||||
return obj
|
||||
|
||||
def perform_update(self, serializer):
|
||||
settings_qs = self.get_queryset()
|
||||
user = self.request.user if self.category_slug == 'user' else None
|
||||
for key, value in serializer.validated_data.items():
|
||||
if key == 'LICENSE':
|
||||
continue
|
||||
if settings_registry.is_setting_encrypted(key) and isinstance(value, basestring) and value.startswith('$encrypted$'):
|
||||
continue
|
||||
setattr(serializer.instance, key, value)
|
||||
setting = settings_qs.filter(key=key).order_by('pk').first()
|
||||
if not setting:
|
||||
setting = Setting.objects.create(key=key, user=user, value=value)
|
||||
elif setting.value != value or type(setting.value) != type(value):
|
||||
setting.value = value
|
||||
setting.save(update_fields=['value'])
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
self.perform_destroy(instance)
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
def perform_destroy(self, instance):
|
||||
for setting in self.get_queryset().exclude(key='LICENSE'):
|
||||
setting.delete()
|
||||
|
||||
# When TOWER_URL_BASE is deleted from the API, reset it to the hostname
|
||||
# used to make the request as a default.
|
||||
if hasattr(instance, 'TOWER_URL_BASE'):
|
||||
url = '{}://{}'.format(self.request.scheme, self.request.get_host())
|
||||
if settings.TOWER_URL_BASE != url:
|
||||
settings.TOWER_URL_BASE = url
|
||||
|
||||
|
||||
# Create view functions for all of the class-based views to simplify inclusion
|
||||
# in URL patterns and reverse URL lookups, converting CamelCase names to
|
||||
# lowercase_with_underscore (e.g. MyView.as_view() becomes my_view).
|
||||
this_module = sys.modules[__name__]
|
||||
for attr, value in locals().items():
|
||||
if isinstance(value, type) and issubclass(value, APIView):
|
||||
name = camelcase_to_underscore(attr)
|
||||
view = value.as_view()
|
||||
setattr(this_module, name, view)
|
||||
@ -1,2 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
@ -1,2 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
@ -1,6 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from .fact import * # noqa
|
||||
@ -1,217 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from mongoengine import connect
|
||||
from mongoengine.base import BaseField
|
||||
from mongoengine import Document, DateTimeField, ReferenceField, StringField, IntField
|
||||
from mongoengine.connection import get_db, ConnectionError
|
||||
from awx.fact.utils.dbtransform import register_key_transform, KeyTransform
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger('awx.fact.models.fact')
|
||||
|
||||
|
||||
key_transform = KeyTransform([('.', '\uff0E'), ('$', '\uff04')])
|
||||
|
||||
# NOTE: I think it might be better to use register_connection here: https://github.com/MongoEngine/mongoengine/blob/0.9/mongoengine/connection.py#L21
|
||||
# but I'm not doing that because I don't see how we can also register the key transform as needed or set the tz_aware preference
|
||||
@classmethod
|
||||
def _get_db_monkeypatched(cls):
|
||||
""" Override the default _get_db mechanism to start a connection to the database """
|
||||
# Connect to Mongo
|
||||
try:
|
||||
# Sanity check: If we have intentionally invalid settings, then we
|
||||
# know we cannot connect.
|
||||
if settings.MONGO_HOST == NotImplemented:
|
||||
raise ConnectionError
|
||||
|
||||
# Attempt to connect to the MongoDB database.
|
||||
connect(settings.MONGO_DB,
|
||||
host=settings.MONGO_HOST,
|
||||
port=int(settings.MONGO_PORT),
|
||||
username=settings.MONGO_USERNAME,
|
||||
password=settings.MONGO_PASSWORD,
|
||||
tz_aware=settings.USE_TZ)
|
||||
register_key_transform(get_db())
|
||||
except (ConnectionError, AttributeError):
|
||||
logger.info('Failed to establish connect to MongoDB')
|
||||
return get_db(cls._meta.get("db_alias", "default"))
|
||||
|
||||
Document._get_db = _get_db_monkeypatched
|
||||
|
||||
class TransformField(BaseField):
|
||||
def to_python(self, value):
|
||||
return key_transform.transform_outgoing(value, None)
|
||||
|
||||
def prepare_query_value(self, op, value):
|
||||
if op == 'set':
|
||||
value = key_transform.transform_incoming(value, None)
|
||||
return super(TransformField, self).prepare_query_value(op, value)
|
||||
|
||||
def to_mongo(self, value):
|
||||
value = key_transform.transform_incoming(value, None)
|
||||
return value
|
||||
|
||||
class FactHost(Document):
|
||||
hostname = StringField(max_length=100, required=True, unique_with='inventory_id')
|
||||
inventory_id = IntField(required=True, unique_with='hostname')
|
||||
|
||||
# TODO: Consider using hashed index on hostname. django-mongo may not support this but
|
||||
# executing raw js will
|
||||
meta = {
|
||||
'indexes': [
|
||||
('hostname', 'inventory_id')
|
||||
]
|
||||
}
|
||||
|
||||
class Fact(Document):
|
||||
timestamp = DateTimeField(required=True)
|
||||
host = ReferenceField(FactHost, required=True)
|
||||
module = StringField(max_length=50, required=True)
|
||||
fact = TransformField(required=True)
|
||||
|
||||
# TODO: Consider using hashed index on host. django-mongo may not support this but
|
||||
# executing raw js will
|
||||
meta = {
|
||||
'indexes': [
|
||||
'-timestamp',
|
||||
'host',
|
||||
]
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def add_fact(timestamp, fact, host, module):
|
||||
fact_obj = Fact(timestamp=timestamp, host=host, module=module, fact=fact)
|
||||
fact_obj.save()
|
||||
version_obj = FactVersion(timestamp=timestamp, host=host, module=module, fact=fact_obj)
|
||||
version_obj.save()
|
||||
return (fact_obj, version_obj)
|
||||
|
||||
# TODO: if we want to relax the need to include module...
|
||||
# If module not specified then filter query may return more than 1 result.
|
||||
# Thus, the resulting facts must somehow be unioned/concated/ or kept as an array.
|
||||
@staticmethod
|
||||
def get_host_version(hostname, inventory_id, timestamp, module):
|
||||
try:
|
||||
host = FactHost.objects.get(hostname=hostname, inventory_id=inventory_id)
|
||||
except FactHost.DoesNotExist:
|
||||
return None
|
||||
|
||||
kv = {
|
||||
'host' : host.id,
|
||||
'timestamp__lte': timestamp,
|
||||
'module': module,
|
||||
}
|
||||
|
||||
try:
|
||||
facts = Fact.objects.filter(**kv).order_by("-timestamp")
|
||||
if not facts:
|
||||
return None
|
||||
return facts[0]
|
||||
except Fact.DoesNotExist:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_host_timeline(hostname, inventory_id, module):
|
||||
try:
|
||||
host = FactHost.objects.get(hostname=hostname, inventory_id=inventory_id)
|
||||
except FactHost.DoesNotExist:
|
||||
return None
|
||||
|
||||
kv = {
|
||||
'host': host.id,
|
||||
'module': module,
|
||||
}
|
||||
|
||||
return FactVersion.objects.filter(**kv).order_by("-timestamp").values_list('timestamp')
|
||||
|
||||
# FIXME: single facts no longer works with the addition of the inventory_id field to the FactHost document
|
||||
@staticmethod
|
||||
def get_single_facts(hostnames, fact_key, fact_value, timestamp, module):
|
||||
kv = {
|
||||
'hostname': {
|
||||
'$in': hostnames,
|
||||
}
|
||||
}
|
||||
fields = {
|
||||
'_id': 1
|
||||
}
|
||||
host_ids = FactHost._get_collection().find(kv, fields)
|
||||
if not host_ids or host_ids.count() == 0:
|
||||
return None
|
||||
# TODO: use mongo to transform [{_id: <>}, {_id: <>},...] into [_id, _id,...]
|
||||
host_ids = [e['_id'] for e in host_ids]
|
||||
|
||||
pipeline = []
|
||||
match = {
|
||||
'host': {
|
||||
'$in': host_ids
|
||||
},
|
||||
'timestamp': {
|
||||
'$lte': timestamp
|
||||
},
|
||||
'module': module
|
||||
}
|
||||
sort = {
|
||||
'timestamp': -1
|
||||
}
|
||||
group = {
|
||||
'_id': '$host',
|
||||
'timestamp': {
|
||||
'$first': '$timestamp'
|
||||
},
|
||||
'fact': {
|
||||
'$first': '$fact'
|
||||
}
|
||||
}
|
||||
project = {
|
||||
'_id': 0,
|
||||
'fact': 1,
|
||||
}
|
||||
pipeline.append({'$match': match}) # noqa
|
||||
pipeline.append({'$sort': sort}) # noqa
|
||||
pipeline.append({'$group': group}) # noqa
|
||||
pipeline.append({'$project': project}) # noqa
|
||||
q = FactVersion._get_collection().aggregate(pipeline)
|
||||
if not q or 'result' not in q or len(q['result']) == 0:
|
||||
return None
|
||||
# TODO: use mongo to transform [{fact: <>}, {fact: <>},...] into [fact, fact,...]
|
||||
fact_ids = [fact['fact'] for fact in q['result']]
|
||||
|
||||
kv = {
|
||||
'fact.%s' % fact_key : fact_value,
|
||||
'_id': {
|
||||
'$in': fact_ids
|
||||
}
|
||||
}
|
||||
fields = {
|
||||
'fact.%s.$' % fact_key : 1,
|
||||
'host': 1,
|
||||
'timestamp': 1,
|
||||
'module': 1,
|
||||
}
|
||||
facts = Fact._get_collection().find(kv, fields)
|
||||
#fact_objs = [Fact(**f) for f in facts]
|
||||
# Translate pymongo python structure to mongoengine Fact object
|
||||
fact_objs = []
|
||||
for f in facts:
|
||||
f['id'] = f.pop('_id')
|
||||
fact_objs.append(Fact(**f))
|
||||
return fact_objs
|
||||
|
||||
class FactVersion(Document):
|
||||
timestamp = DateTimeField(required=True)
|
||||
host = ReferenceField(FactHost, required=True)
|
||||
module = StringField(max_length=50, required=True)
|
||||
fact = ReferenceField(Fact, required=True)
|
||||
# TODO: Consider using hashed index on module. django-mongo may not support this but
|
||||
# executing raw js will
|
||||
meta = {
|
||||
'indexes': [
|
||||
'-timestamp',
|
||||
'module',
|
||||
'host',
|
||||
]
|
||||
}
|
||||
@ -1,2 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
@ -1,58 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Pymongo
|
||||
from pymongo.son_manipulator import SONManipulator
|
||||
|
||||
class KeyTransform(SONManipulator):
|
||||
|
||||
def __init__(self, replace):
|
||||
self.replace = replace
|
||||
|
||||
def replace_key(self, key):
|
||||
for (replace, replacement) in self.replace:
|
||||
key = key.replace(replace, replacement)
|
||||
return key
|
||||
|
||||
def revert_key(self, key):
|
||||
for (replacement, replace) in self.replace:
|
||||
key = key.replace(replace, replacement)
|
||||
return key
|
||||
|
||||
def replace_incoming(self, obj):
|
||||
if isinstance(obj, dict):
|
||||
value = {}
|
||||
for k, v in obj.items():
|
||||
value[self.replace_key(k)] = self.replace_incoming(v)
|
||||
elif isinstance(obj, list):
|
||||
value = [self.replace_incoming(elem)
|
||||
for elem in obj]
|
||||
else:
|
||||
value = obj
|
||||
|
||||
return value
|
||||
|
||||
def replace_outgoing(self, obj):
|
||||
if isinstance(obj, dict):
|
||||
value = {}
|
||||
for k, v in obj.items():
|
||||
value[self.revert_key(k)] = self.replace_outgoing(v)
|
||||
elif isinstance(obj, list):
|
||||
value = [self.replace_outgoing(elem)
|
||||
for elem in obj]
|
||||
else:
|
||||
value = obj
|
||||
|
||||
return value
|
||||
|
||||
def transform_incoming(self, son, collection):
|
||||
return self.replace_incoming(son)
|
||||
|
||||
def transform_outgoing(self, son, collection):
|
||||
if not collection or collection.name != 'fact':
|
||||
return son
|
||||
return self.replace_outgoing(son)
|
||||
|
||||
def register_key_transform(db):
|
||||
#db.add_son_manipulator(KeyTransform([('.', '\uff0E'), ('$', '\uff04')]))
|
||||
pass
|
||||
@ -1,2 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
@ -1,33 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
'''
|
||||
Compability library for support of both Django 1.4.x and Django 1.5.x.
|
||||
'''
|
||||
|
||||
try:
|
||||
from django.utils.html import format_html
|
||||
except ImportError:
|
||||
from django.utils.html import conditional_escape
|
||||
from django.utils.safestring import mark_safe
|
||||
|
||||
def format_html(format_string, *args, **kwargs):
|
||||
args_safe = map(conditional_escape, args)
|
||||
kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in
|
||||
kwargs.items()])
|
||||
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
|
||||
|
||||
try:
|
||||
from django.utils.log import RequireDebugTrue
|
||||
except ImportError:
|
||||
import logging
|
||||
from django.conf import settings
|
||||
|
||||
class RequireDebugTrue(logging.Filter):
|
||||
def filter(self, record):
|
||||
return settings.DEBUG
|
||||
|
||||
try:
|
||||
from django.utils.text import slugify # noqa
|
||||
except ImportError:
|
||||
from django.template.defaultfilters import slugify # noqa
|
||||
26
awx/lib/sitecustomize.py
Normal file
26
awx/lib/sitecustomize.py
Normal file
@ -0,0 +1,26 @@
|
||||
# Python
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Based on http://stackoverflow.com/a/6879344/131141 -- Initialize tower display
|
||||
# callback as early as possible to wrap ansible.display.Display methods.
|
||||
|
||||
|
||||
def argv_ready(argv):
|
||||
if argv and os.path.basename(argv[0]) in {'ansible', 'ansible-playbook'}:
|
||||
import tower_display_callback # noqa
|
||||
|
||||
|
||||
class argv_placeholder(object):
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
argv_ready(sys.argv)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
if hasattr(sys, 'argv'):
|
||||
argv_ready(sys.argv)
|
||||
else:
|
||||
sys.argv = argv_placeholder()
|
||||
25
awx/lib/tower_display_callback/__init__.py
Normal file
25
awx/lib/tower_display_callback/__init__.py
Normal file
@ -0,0 +1,25 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Tower Display Callback
|
||||
from . import cleanup # noqa (registers control persistent cleanup)
|
||||
from . import display # noqa (wraps ansible.display.Display methods)
|
||||
from .module import TowerDefaultCallbackModule, TowerMinimalCallbackModule
|
||||
|
||||
__all__ = ['TowerDefaultCallbackModule', 'TowerMinimalCallbackModule']
|
||||
80
awx/lib/tower_display_callback/cleanup.py
Normal file
80
awx/lib/tower_display_callback/cleanup.py
Normal file
@ -0,0 +1,80 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import atexit
|
||||
import glob
|
||||
import os
|
||||
import pwd
|
||||
|
||||
# PSUtil
|
||||
import psutil
|
||||
|
||||
__all__ = []
|
||||
|
||||
main_pid = os.getpid()
|
||||
|
||||
|
||||
@atexit.register
|
||||
def terminate_ssh_control_masters():
|
||||
# Only run this cleanup from the main process.
|
||||
if os.getpid() != main_pid:
|
||||
return
|
||||
# Determine if control persist is being used and if any open sockets
|
||||
# exist after running the playbook.
|
||||
cp_path = os.environ.get('ANSIBLE_SSH_CONTROL_PATH', '')
|
||||
if not cp_path:
|
||||
return
|
||||
cp_dir = os.path.dirname(cp_path)
|
||||
if not os.path.exists(cp_dir):
|
||||
return
|
||||
cp_pattern = os.path.join(cp_dir, 'ansible-ssh-*')
|
||||
cp_files = glob.glob(cp_pattern)
|
||||
if not cp_files:
|
||||
return
|
||||
|
||||
# Attempt to find any running control master processes.
|
||||
username = pwd.getpwuid(os.getuid())[0]
|
||||
ssh_cm_procs = []
|
||||
for proc in psutil.process_iter():
|
||||
try:
|
||||
pname = proc.name()
|
||||
pcmdline = proc.cmdline()
|
||||
pusername = proc.username()
|
||||
except psutil.NoSuchProcess:
|
||||
continue
|
||||
if pusername != username:
|
||||
continue
|
||||
if pname != 'ssh':
|
||||
continue
|
||||
for cp_file in cp_files:
|
||||
if pcmdline and cp_file in pcmdline[0]:
|
||||
ssh_cm_procs.append(proc)
|
||||
break
|
||||
|
||||
# Terminate then kill control master processes. Workaround older
|
||||
# version of psutil that may not have wait_procs implemented.
|
||||
for proc in ssh_cm_procs:
|
||||
try:
|
||||
proc.terminate()
|
||||
except psutil.NoSuchProcess:
|
||||
continue
|
||||
procs_gone, procs_alive = psutil.wait_procs(ssh_cm_procs, timeout=5)
|
||||
for proc in procs_alive:
|
||||
proc.kill()
|
||||
98
awx/lib/tower_display_callback/display.py
Normal file
98
awx/lib/tower_display_callback/display.py
Normal file
@ -0,0 +1,98 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import functools
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
# Ansible
|
||||
from ansible.utils.display import Display
|
||||
|
||||
# Tower Display Callback
|
||||
from .events import event_context
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
def with_context(**context):
|
||||
global event_context
|
||||
|
||||
def wrap(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
with event_context.set_local(**context):
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
return wrap
|
||||
|
||||
|
||||
for attr in dir(Display):
|
||||
if attr.startswith('_') or 'cow' in attr or 'prompt' in attr:
|
||||
continue
|
||||
if attr in ('display', 'v', 'vv', 'vvv', 'vvvv', 'vvvvv', 'vvvvvv', 'verbose'):
|
||||
continue
|
||||
if not callable(getattr(Display, attr)):
|
||||
continue
|
||||
setattr(Display, attr, with_context(**{attr: True})(getattr(Display, attr)))
|
||||
|
||||
|
||||
def with_verbosity(f):
|
||||
global event_context
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
host = args[2] if len(args) >= 3 else kwargs.get('host', None)
|
||||
caplevel = args[3] if len(args) >= 4 else kwargs.get('caplevel', 2)
|
||||
context = dict(verbose=True, verbosity=(caplevel + 1))
|
||||
if host is not None:
|
||||
context['remote_addr'] = host
|
||||
with event_context.set_local(**context):
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
Display.verbose = with_verbosity(Display.verbose)
|
||||
|
||||
|
||||
def display_with_context(f):
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
log_only = args[5] if len(args) >= 6 else kwargs.get('log_only', False)
|
||||
stderr = args[3] if len(args) >= 4 else kwargs.get('stderr', False)
|
||||
event_uuid = event_context.get().get('uuid', None)
|
||||
with event_context.display_lock:
|
||||
# If writing only to a log file or there is already an event UUID
|
||||
# set (from a callback module method), skip dumping the event data.
|
||||
if log_only or event_uuid:
|
||||
return f(*args, **kwargs)
|
||||
try:
|
||||
fileobj = sys.stderr if stderr else sys.stdout
|
||||
event_context.add_local(uuid=str(uuid.uuid4()))
|
||||
event_context.dump_begin(fileobj)
|
||||
return f(*args, **kwargs)
|
||||
finally:
|
||||
event_context.dump_end(fileobj)
|
||||
event_context.remove_local(uuid=None)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
Display.display = display_with_context(Display.display)
|
||||
214
awx/lib/tower_display_callback/events.py
Normal file
214
awx/lib/tower_display_callback/events.py
Normal file
@ -0,0 +1,214 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import base64
|
||||
import contextlib
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import threading
|
||||
import uuid
|
||||
import memcache
|
||||
|
||||
# Kombu
|
||||
from kombu import Connection, Exchange, Producer
|
||||
|
||||
__all__ = ['event_context']
|
||||
|
||||
|
||||
class CallbackQueueEventDispatcher(object):
|
||||
|
||||
def __init__(self):
|
||||
self.callback_connection = os.getenv('CALLBACK_CONNECTION', None)
|
||||
self.connection_queue = os.getenv('CALLBACK_QUEUE', '')
|
||||
self.connection = None
|
||||
self.exchange = None
|
||||
self._init_logging()
|
||||
|
||||
def _init_logging(self):
|
||||
try:
|
||||
self.job_callback_debug = int(os.getenv('JOB_CALLBACK_DEBUG', '0'))
|
||||
except ValueError:
|
||||
self.job_callback_debug = 0
|
||||
self.logger = logging.getLogger('awx.plugins.callback.job_event_callback')
|
||||
if self.job_callback_debug >= 2:
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
elif self.job_callback_debug >= 1:
|
||||
self.logger.setLevel(logging.INFO)
|
||||
else:
|
||||
self.logger.setLevel(logging.WARNING)
|
||||
handler = logging.StreamHandler()
|
||||
formatter = logging.Formatter('%(levelname)-8s %(process)-8d %(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
self.logger.addHandler(handler)
|
||||
self.logger.propagate = False
|
||||
|
||||
def dispatch(self, obj):
|
||||
if not self.callback_connection or not self.connection_queue:
|
||||
return
|
||||
active_pid = os.getpid()
|
||||
for retry_count in xrange(4):
|
||||
try:
|
||||
if not hasattr(self, 'connection_pid'):
|
||||
self.connection_pid = active_pid
|
||||
if self.connection_pid != active_pid:
|
||||
self.connection = None
|
||||
if self.connection is None:
|
||||
self.connection = Connection(self.callback_connection)
|
||||
self.exchange = Exchange(self.connection_queue, type='direct')
|
||||
|
||||
producer = Producer(self.connection)
|
||||
producer.publish(obj,
|
||||
serializer='json',
|
||||
compression='bzip2',
|
||||
exchange=self.exchange,
|
||||
declare=[self.exchange],
|
||||
routing_key=self.connection_queue)
|
||||
return
|
||||
except Exception, e:
|
||||
self.logger.info('Publish Job Event Exception: %r, retry=%d', e,
|
||||
retry_count, exc_info=True)
|
||||
retry_count += 1
|
||||
if retry_count >= 3:
|
||||
break
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
'''
|
||||
Store global and local (per thread/process) data associated with callback
|
||||
events and other display output methods.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self.display_lock = multiprocessing.RLock()
|
||||
self.dispatcher = CallbackQueueEventDispatcher()
|
||||
cache_actual = os.getenv('CACHE', '127.0.0.1:11211')
|
||||
self.cache = memcache.Client([cache_actual], debug=0)
|
||||
|
||||
def add_local(self, **kwargs):
|
||||
if not hasattr(self, '_local'):
|
||||
self._local = threading.local()
|
||||
self._local._ctx = {}
|
||||
self._local._ctx.update(kwargs)
|
||||
|
||||
def remove_local(self, **kwargs):
|
||||
if hasattr(self, '_local'):
|
||||
for key in kwargs.keys():
|
||||
self._local._ctx.pop(key, None)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_local(self, **kwargs):
|
||||
try:
|
||||
self.add_local(**kwargs)
|
||||
yield
|
||||
finally:
|
||||
self.remove_local(**kwargs)
|
||||
|
||||
def get_local(self):
|
||||
return getattr(getattr(self, '_local', None), '_ctx', {})
|
||||
|
||||
def add_global(self, **kwargs):
|
||||
if not hasattr(self, '_global_ctx'):
|
||||
self._global_ctx = {}
|
||||
self._global_ctx.update(kwargs)
|
||||
|
||||
def remove_global(self, **kwargs):
|
||||
if hasattr(self, '_global_ctx'):
|
||||
for key in kwargs.keys():
|
||||
self._global_ctx.pop(key, None)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_global(self, **kwargs):
|
||||
try:
|
||||
self.add_global(**kwargs)
|
||||
yield
|
||||
finally:
|
||||
self.remove_global(**kwargs)
|
||||
|
||||
def get_global(self):
|
||||
return getattr(self, '_global_ctx', {})
|
||||
|
||||
def get(self):
|
||||
ctx = {}
|
||||
ctx.update(self.get_global())
|
||||
ctx.update(self.get_local())
|
||||
return ctx
|
||||
|
||||
def get_begin_dict(self):
|
||||
event_data = self.get()
|
||||
if os.getenv('JOB_ID', ''):
|
||||
event_data['job_id'] = int(os.getenv('JOB_ID', '0'))
|
||||
if os.getenv('AD_HOC_COMMAND_ID', ''):
|
||||
event_data['ad_hoc_command_id'] = int(os.getenv('AD_HOC_COMMAND_ID', '0'))
|
||||
event_data.setdefault('pid', os.getpid())
|
||||
event_data.setdefault('uuid', str(uuid.uuid4()))
|
||||
event_data.setdefault('created', datetime.datetime.utcnow().isoformat())
|
||||
if not event_data.get('parent_uuid', None) and event_data.get('job_id', None):
|
||||
for key in ('task_uuid', 'play_uuid', 'playbook_uuid'):
|
||||
parent_uuid = event_data.get(key, None)
|
||||
if parent_uuid and parent_uuid != event_data.get('uuid', None):
|
||||
event_data['parent_uuid'] = parent_uuid
|
||||
break
|
||||
|
||||
event = event_data.pop('event', None)
|
||||
if not event:
|
||||
event = 'verbose'
|
||||
for key in ('debug', 'verbose', 'deprecated', 'warning', 'system_warning', 'error'):
|
||||
if event_data.get(key, False):
|
||||
event = key
|
||||
break
|
||||
max_res = int(os.getenv("MAX_EVENT_RES", 700000))
|
||||
if event not in ('playbook_on_stats',) and "res" in event_data and len(str(event_data['res'])) > max_res:
|
||||
event_data['res'] = {}
|
||||
event_dict = dict(event=event, event_data=event_data)
|
||||
for key in event_data.keys():
|
||||
if key in ('job_id', 'ad_hoc_command_id', 'uuid', 'parent_uuid', 'created',):
|
||||
event_dict[key] = event_data.pop(key)
|
||||
elif key in ('verbosity', 'pid'):
|
||||
event_dict[key] = event_data[key]
|
||||
return event_dict
|
||||
|
||||
def get_end_dict(self):
|
||||
return {}
|
||||
|
||||
def dump(self, fileobj, data, max_width=78, flush=False):
|
||||
b64data = base64.b64encode(json.dumps(data))
|
||||
with self.display_lock:
|
||||
fileobj.write(u'\x1b[K')
|
||||
for offset in xrange(0, len(b64data), max_width):
|
||||
chunk = b64data[offset:offset + max_width]
|
||||
escaped_chunk = u'{}\x1b[{}D'.format(chunk, len(chunk))
|
||||
fileobj.write(escaped_chunk)
|
||||
fileobj.write(u'\x1b[K')
|
||||
if flush:
|
||||
fileobj.flush()
|
||||
|
||||
def dump_begin(self, fileobj):
|
||||
begin_dict = self.get_begin_dict()
|
||||
self.cache.set(":1:ev-{}".format(begin_dict['uuid']), begin_dict)
|
||||
self.dump(fileobj, {'uuid': begin_dict['uuid']})
|
||||
|
||||
def dump_end(self, fileobj):
|
||||
self.dump(fileobj, self.get_end_dict(), flush=True)
|
||||
|
||||
|
||||
event_context = EventContext()
|
||||
28
awx/lib/tower_display_callback/minimal.py
Normal file
28
awx/lib/tower_display_callback/minimal.py
Normal file
@ -0,0 +1,28 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import os
|
||||
|
||||
# Ansible
|
||||
import ansible
|
||||
|
||||
# Because of the way Ansible loads plugins, it's not possible to import
|
||||
# ansible.plugins.callback.minimal when being loaded as the minimal plugin. Ugh.
|
||||
execfile(os.path.join(os.path.dirname(ansible.__file__), 'plugins', 'callback', 'minimal.py'))
|
||||
461
awx/lib/tower_display_callback/module.py
Normal file
461
awx/lib/tower_display_callback/module.py
Normal file
@ -0,0 +1,461 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import contextlib
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
# Ansible
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.plugins.callback.default import CallbackModule as DefaultCallbackModule
|
||||
|
||||
# Tower Display Callback
|
||||
from .events import event_context
|
||||
from .minimal import CallbackModule as MinimalCallbackModule
|
||||
|
||||
|
||||
class BaseCallbackModule(CallbackBase):
|
||||
'''
|
||||
Callback module for logging ansible/ansible-playbook events.
|
||||
'''
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
|
||||
# These events should never have an associated play.
|
||||
EVENTS_WITHOUT_PLAY = [
|
||||
'playbook_on_start',
|
||||
'playbook_on_stats',
|
||||
]
|
||||
|
||||
# These events should never have an associated task.
|
||||
EVENTS_WITHOUT_TASK = EVENTS_WITHOUT_PLAY + [
|
||||
'playbook_on_setup',
|
||||
'playbook_on_notify',
|
||||
'playbook_on_import_for_host',
|
||||
'playbook_on_not_import_for_host',
|
||||
'playbook_on_no_hosts_matched',
|
||||
'playbook_on_no_hosts_remaining',
|
||||
]
|
||||
|
||||
CENSOR_FIELD_WHITELIST = [
|
||||
'msg',
|
||||
'failed',
|
||||
'changed',
|
||||
'results',
|
||||
'start',
|
||||
'end',
|
||||
'delta',
|
||||
'cmd',
|
||||
'_ansible_no_log',
|
||||
'rc',
|
||||
'failed_when_result',
|
||||
'skipped',
|
||||
'skip_reason',
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
super(BaseCallbackModule, self).__init__()
|
||||
self.task_uuids = set()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def capture_event_data(self, event, **event_data):
|
||||
|
||||
event_data.setdefault('uuid', str(uuid.uuid4()))
|
||||
|
||||
if event not in self.EVENTS_WITHOUT_TASK:
|
||||
task = event_data.pop('task', None)
|
||||
else:
|
||||
task = None
|
||||
|
||||
with event_context.display_lock:
|
||||
try:
|
||||
event_context.add_local(event=event, **event_data)
|
||||
if task:
|
||||
self.set_task(task, local=True)
|
||||
event_context.dump_begin(sys.stdout)
|
||||
yield
|
||||
finally:
|
||||
event_context.dump_end(sys.stdout)
|
||||
if task:
|
||||
self.clear_task(local=True)
|
||||
event_context.remove_local(event=None, **event_data)
|
||||
|
||||
def set_playbook(self, playbook):
|
||||
# NOTE: Ansible doesn't generate a UUID for playbook_on_start so do it for them.
|
||||
self.playbook_uuid = str(uuid.uuid4())
|
||||
file_name = getattr(playbook, '_file_name', '???')
|
||||
event_context.add_global(playbook=file_name, playbook_uuid=self.playbook_uuid)
|
||||
self.clear_play()
|
||||
|
||||
def set_play(self, play):
|
||||
if hasattr(play, 'hosts'):
|
||||
if isinstance(play.hosts, list):
|
||||
pattern = ','.join(play.hosts)
|
||||
else:
|
||||
pattern = play.hosts
|
||||
else:
|
||||
pattern = ''
|
||||
name = play.get_name().strip() or pattern
|
||||
event_context.add_global(play=name, play_uuid=str(play._uuid), play_pattern=pattern)
|
||||
self.clear_task()
|
||||
|
||||
def clear_play(self):
|
||||
event_context.remove_global(play=None, play_uuid=None, play_pattern=None)
|
||||
self.clear_task()
|
||||
|
||||
def set_task(self, task, local=False):
|
||||
# FIXME: Task is "global" unless using free strategy!
|
||||
task_ctx = dict(
|
||||
task=(task.name or task.action),
|
||||
task_uuid=str(task._uuid),
|
||||
task_action=task.action,
|
||||
)
|
||||
try:
|
||||
task_ctx['task_path'] = task.get_path()
|
||||
except AttributeError:
|
||||
pass
|
||||
if not task.no_log:
|
||||
task_args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
task_ctx['task_args'] = task_args
|
||||
if getattr(task, '_role', None):
|
||||
task_role = task._role._role_name
|
||||
else:
|
||||
task_role = getattr(task, 'role_name', '')
|
||||
if task_role:
|
||||
task_ctx['role'] = task_role
|
||||
if local:
|
||||
event_context.add_local(**task_ctx)
|
||||
else:
|
||||
event_context.add_global(**task_ctx)
|
||||
|
||||
def clear_task(self, local=False):
|
||||
task_ctx = dict(task=None, task_path=None, task_uuid=None, task_action=None, task_args=None, role=None)
|
||||
if local:
|
||||
event_context.remove_local(**task_ctx)
|
||||
else:
|
||||
event_context.remove_global(**task_ctx)
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.set_playbook(playbook)
|
||||
event_data = dict(
|
||||
uuid=self.playbook_uuid,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_start(playbook)
|
||||
|
||||
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None,
|
||||
encrypt=None, confirm=False, salt_size=None,
|
||||
salt=None, default=None):
|
||||
event_data = dict(
|
||||
varname=varname,
|
||||
private=private,
|
||||
prompt=prompt,
|
||||
encrypt=encrypt,
|
||||
confirm=confirm,
|
||||
salt_size=salt_size,
|
||||
salt=salt,
|
||||
default=default,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_vars_prompt', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_vars_prompt(
|
||||
varname, private, prompt, encrypt, confirm, salt_size, salt,
|
||||
default,
|
||||
)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
event_data = dict(
|
||||
included_file=included_file._filename if included_file is not None else None,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_include', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_include(included_file)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.set_play(play)
|
||||
if hasattr(play, 'hosts'):
|
||||
if isinstance(play.hosts, list):
|
||||
pattern = ','.join(play.hosts)
|
||||
else:
|
||||
pattern = play.hosts
|
||||
else:
|
||||
pattern = ''
|
||||
name = play.get_name().strip() or pattern
|
||||
event_data = dict(
|
||||
name=name,
|
||||
pattern=pattern,
|
||||
uuid=str(play._uuid),
|
||||
)
|
||||
with self.capture_event_data('playbook_on_play_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_play_start(play)
|
||||
|
||||
def v2_playbook_on_import_for_host(self, result, imported_file):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
with self.capture_event_data('playbook_on_import_for_host'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_import_for_host(result, imported_file)
|
||||
|
||||
def v2_playbook_on_not_import_for_host(self, result, missing_file):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
with self.capture_event_data('playbook_on_not_import_for_host'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_not_import_for_host(result, missing_file)
|
||||
|
||||
def v2_playbook_on_setup(self):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
with self.capture_event_data('playbook_on_setup'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_setup()
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
# FIXME: Flag task path output as vv.
|
||||
task_uuid = str(task._uuid)
|
||||
if task_uuid in self.task_uuids:
|
||||
# FIXME: When this task UUID repeats, it means the play is using the
|
||||
# free strategy, so different hosts may be running different tasks
|
||||
# within a play.
|
||||
return
|
||||
self.task_uuids.add(task_uuid)
|
||||
self.set_task(task)
|
||||
event_data = dict(
|
||||
task=task,
|
||||
name=task.get_name(),
|
||||
is_conditional=is_conditional,
|
||||
uuid=task_uuid,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_task_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_task_start(task, is_conditional)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
self.set_task(task)
|
||||
event_data = dict(
|
||||
task=task,
|
||||
name=task.get_name(),
|
||||
uuid=str(task._uuid),
|
||||
is_conditional=True,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_task_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_cleanup_task_start(task)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
# NOTE: Re-using playbook_on_task_start event for this v2-specific
|
||||
# event, but setting is_conditional=True, which is how v1 identified a
|
||||
# task run as a handler.
|
||||
self.set_task(task)
|
||||
event_data = dict(
|
||||
task=task,
|
||||
name=task.get_name(),
|
||||
uuid=str(task._uuid),
|
||||
is_conditional=True,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_task_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_handler_task_start(task)
|
||||
|
||||
def v2_playbook_on_no_hosts_matched(self):
|
||||
with self.capture_event_data('playbook_on_no_hosts_matched'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_no_hosts_matched()
|
||||
|
||||
def v2_playbook_on_no_hosts_remaining(self):
|
||||
with self.capture_event_data('playbook_on_no_hosts_remaining'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_no_hosts_remaining()
|
||||
|
||||
def v2_playbook_on_notify(self, result, handler):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
handler=handler,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_notify', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_notify(result, handler)
|
||||
|
||||
'''
|
||||
ansible_stats is, retoractively, added in 2.2
|
||||
'''
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
self.clear_play()
|
||||
# FIXME: Add count of plays/tasks.
|
||||
event_data = dict(
|
||||
changed=stats.changed,
|
||||
dark=stats.dark,
|
||||
failures=stats.failures,
|
||||
ok=stats.ok,
|
||||
processed=stats.processed,
|
||||
skipped=stats.skipped,
|
||||
artifact_data=stats.custom.get('_run', {}) if hasattr(stats, 'custom') else {}
|
||||
)
|
||||
|
||||
with self.capture_event_data('playbook_on_stats', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_stats(stats)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
# FIXME: Display detailed results or not based on verbosity.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
event_loop=result._task.loop if hasattr(result._task, 'loop') else None,
|
||||
)
|
||||
with self.capture_event_data('runner_on_ok', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_ok(result)
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
# FIXME: Add verbosity for exception/results output.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
res=result._result,
|
||||
task=result._task,
|
||||
ignore_errors=ignore_errors,
|
||||
event_loop=result._task.loop if hasattr(result._task, 'loop') else None,
|
||||
)
|
||||
with self.capture_event_data('runner_on_failed', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_failed(result, ignore_errors)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
task=result._task,
|
||||
event_loop=result._task.loop if hasattr(result._task, 'loop') else None,
|
||||
)
|
||||
with self.capture_event_data('runner_on_skipped', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_skipped(result)
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_on_unreachable', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_unreachable(result)
|
||||
|
||||
def v2_runner_on_no_hosts(self, task):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
task=task,
|
||||
)
|
||||
with self.capture_event_data('runner_on_no_hosts', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_no_hosts(task)
|
||||
|
||||
def v2_runner_on_async_poll(self, result):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
jid=result._result.get('ansible_job_id'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_async_poll', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_async_poll(result)
|
||||
|
||||
def v2_runner_on_async_ok(self, result):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
jid=result._result.get('ansible_job_id'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_async_ok', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_async_ok(result)
|
||||
|
||||
def v2_runner_on_async_failed(self, result):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
jid=result._result.get('ansible_job_id'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_async_failed', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_async_failed(result)
|
||||
|
||||
def v2_runner_on_file_diff(self, result, diff):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
diff=diff,
|
||||
)
|
||||
with self.capture_event_data('runner_on_file_diff', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_file_diff(result, diff)
|
||||
|
||||
def v2_on_file_diff(self, result):
|
||||
# NOTE: Logged as runner_on_file_diff.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
diff=result._result.get('diff'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_file_diff', **event_data):
|
||||
super(BaseCallbackModule, self).v2_on_file_diff(result)
|
||||
|
||||
def v2_runner_item_on_ok(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_item_on_ok', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_item_on_ok(result)
|
||||
|
||||
def v2_runner_item_on_failed(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_item_on_failed', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_item_on_failed(result)
|
||||
|
||||
def v2_runner_item_on_skipped(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_item_on_skipped', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_item_on_skipped(result)
|
||||
|
||||
def v2_runner_retry(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_retry', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_retry(result)
|
||||
|
||||
|
||||
class TowerDefaultCallbackModule(BaseCallbackModule, DefaultCallbackModule):
|
||||
|
||||
CALLBACK_NAME = 'tower_display'
|
||||
|
||||
|
||||
class TowerMinimalCallbackModule(BaseCallbackModule, MinimalCallbackModule):
|
||||
|
||||
CALLBACK_NAME = 'minimal'
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
pass
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.set_task(task)
|
||||
3833
awx/locale/django.pot
Normal file
3833
awx/locale/django.pot
Normal file
File diff suppressed because it is too large
Load Diff
3812
awx/locale/en-us/LC_MESSAGES/django.po
Normal file
3812
awx/locale/en-us/LC_MESSAGES/django.po
Normal file
File diff suppressed because it is too large
Load Diff
4453
awx/locale/fr/LC_MESSAGES/django.po
Normal file
4453
awx/locale/fr/LC_MESSAGES/django.po
Normal file
File diff suppressed because it is too large
Load Diff
4126
awx/locale/ja/LC_MESSAGES/django.po
Normal file
4126
awx/locale/ja/LC_MESSAGES/django.po
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,2 +1,4 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
default_app_config = 'awx.main.apps.MainConfig'
|
||||
|
||||
1033
awx/main/access.py
1033
awx/main/access.py
File diff suppressed because it is too large
Load Diff
9
awx/main/apps.py
Normal file
9
awx/main/apps.py
Normal file
@ -0,0 +1,9 @@
|
||||
# Django
|
||||
from django.apps import AppConfig
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
|
||||
class MainConfig(AppConfig):
|
||||
|
||||
name = 'awx.main'
|
||||
verbose_name = _('Main')
|
||||
360
awx/main/conf.py
360
awx/main/conf.py
@ -1,50 +1,326 @@
|
||||
# Copyright (c) 2015 Ansible, Inc..
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from django.conf import settings as django_settings
|
||||
from django.db.utils import ProgrammingError
|
||||
from django.db import OperationalError
|
||||
from awx.main.models.configuration import TowerSettings
|
||||
# Django
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Tower
|
||||
from awx.conf import fields, register
|
||||
|
||||
logger = logging.getLogger('awx.main.conf')
|
||||
|
||||
class TowerConfiguration(object):
|
||||
register(
|
||||
'ACTIVITY_STREAM_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Enable Activity Stream'),
|
||||
help_text=_('Enable capturing activity for the Tower activity stream.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
feature_required='activity_streams',
|
||||
)
|
||||
|
||||
# TODO: Caching so we don't have to hit the database every time for settings
|
||||
def __getattr__(self, key):
|
||||
settings_manifest = django_settings.TOWER_SETTINGS_MANIFEST
|
||||
if key not in settings_manifest:
|
||||
raise AttributeError("Tower Setting with key '{0}' is not defined in the manifest".format(key))
|
||||
default_value = settings_manifest[key]['default']
|
||||
ts = TowerSettings.objects.filter(key=key)
|
||||
try:
|
||||
if not ts.exists():
|
||||
try:
|
||||
val_actual = getattr(django_settings, key)
|
||||
except AttributeError:
|
||||
val_actual = default_value
|
||||
return val_actual
|
||||
return ts[0].value_converted
|
||||
except (ProgrammingError, OperationalError), e:
|
||||
# Database is not available yet, usually during migrations so lets use the default
|
||||
logger.debug("Database settings not available yet, using defaults ({0})".format(e))
|
||||
return default_value
|
||||
register(
|
||||
'ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Enable Activity Stream for Inventory Sync'),
|
||||
help_text=_('Enable capturing activity for the Tower activity stream when running inventory sync.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
feature_required='activity_streams',
|
||||
)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
settings_manifest = django_settings.TOWER_SETTINGS_MANIFEST
|
||||
if key not in settings_manifest:
|
||||
raise AttributeError("Tower Setting with key '{0}' does not exist".format(key))
|
||||
settings_entry = settings_manifest[key]
|
||||
try:
|
||||
settings_actual = TowerSettings.objects.get(key=key)
|
||||
except TowerSettings.DoesNotExist:
|
||||
settings_actual = TowerSettings(key=key,
|
||||
description=settings_entry['description'],
|
||||
category=settings_entry['category'],
|
||||
value_type=settings_entry['type'])
|
||||
settings_actual.value_converted = value
|
||||
settings_actual.save()
|
||||
register(
|
||||
'ORG_ADMINS_CAN_SEE_ALL_USERS',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('All Users Visible to Organization Admins'),
|
||||
help_text=_('Controls whether any Organization Admin can view all users, even those not associated with their Organization.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
tower_settings = TowerConfiguration()
|
||||
register(
|
||||
'TOWER_ADMIN_ALERTS',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Enable Tower Administrator Alerts'),
|
||||
help_text=_('Allow Tower to email Admin users for system events that may require attention.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'TOWER_URL_BASE',
|
||||
field_class=fields.URLField,
|
||||
schemes=('http', 'https'),
|
||||
allow_plain_hostname=True, # Allow hostname only without TLD.
|
||||
label=_('Base URL of the Tower host'),
|
||||
help_text=_('This setting is used by services like notifications to render '
|
||||
'a valid url to the Tower host.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'REMOTE_HOST_HEADERS',
|
||||
field_class=fields.StringListField,
|
||||
label=_('Remote Host Headers'),
|
||||
help_text=_('HTTP headers and meta keys to search to determine remote host '
|
||||
'name or IP. Add additional items to this list, such as '
|
||||
'"HTTP_X_FORWARDED_FOR", if behind a reverse proxy.\n\n'
|
||||
'Note: The headers will be searched in order and the first '
|
||||
'found remote host name or IP will be used.\n\n'
|
||||
'In the below example 8.8.8.7 would be the chosen IP address.\n'
|
||||
'X-Forwarded-For: 8.8.8.7, 192.168.2.1, 127.0.0.1\n'
|
||||
'Host: 127.0.0.1\n'
|
||||
'REMOTE_HOST_HEADERS = [\'HTTP_X_FORWARDED_FOR\', '
|
||||
'\'REMOTE_ADDR\', \'REMOTE_HOST\']'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
|
||||
def _load_default_license_from_file():
|
||||
try:
|
||||
license_file = os.environ.get('AWX_LICENSE_FILE', '/etc/tower/license')
|
||||
if os.path.exists(license_file):
|
||||
license_data = json.load(open(license_file))
|
||||
logger.debug('Read license data from "%s".', license_file)
|
||||
return license_data
|
||||
except:
|
||||
logger.warning('Could not read license from "%s".', license_file, exc_info=True)
|
||||
return {}
|
||||
|
||||
|
||||
register(
|
||||
'LICENSE',
|
||||
field_class=fields.DictField,
|
||||
default=_load_default_license_from_file,
|
||||
label=_('Tower License'),
|
||||
help_text=_('The license controls which features and functionality are '
|
||||
'enabled in Tower. Use /api/v1/config/ to update or change '
|
||||
'the license.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'AD_HOC_COMMANDS',
|
||||
field_class=fields.StringListField,
|
||||
label=_('Ansible Modules Allowed for Ad Hoc Jobs'),
|
||||
help_text=_('List of modules allowed to be used by ad-hoc jobs.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
required=False,
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_PROOT_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Enable job isolation'),
|
||||
help_text=_('Isolates an Ansible job from protected parts of the Tower system to prevent exposing sensitive information.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_PROOT_BASE_PATH',
|
||||
field_class=fields.CharField,
|
||||
label=_('Job isolation execution path'),
|
||||
help_text=_('Create temporary working directories for isolated jobs in this location.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_PROOT_HIDE_PATHS',
|
||||
field_class=fields.StringListField,
|
||||
required=False,
|
||||
label=_('Paths to hide from isolated jobs'),
|
||||
help_text=_('Additional paths to hide from isolated processes.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_PROOT_SHOW_PATHS',
|
||||
field_class=fields.StringListField,
|
||||
required=False,
|
||||
label=_('Paths to expose to isolated jobs'),
|
||||
help_text=_('Whitelist of paths that would otherwise be hidden to expose to isolated jobs.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'STDOUT_MAX_BYTES_DISPLAY',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
label=_('Standard Output Maximum Display Size'),
|
||||
help_text=_('Maximum Size of Standard Output in bytes to display before requiring the output be downloaded.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'EVENT_STDOUT_MAX_BYTES_DISPLAY',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
label=_('Job Event Standard Output Maximum Display Size'),
|
||||
help_text=_(u'Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. `stdout` will end with `\u2026` when truncated.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'SCHEDULE_MAX_JOBS',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=1,
|
||||
label=_('Maximum Scheduled Jobs'),
|
||||
help_text=_('Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ANSIBLE_CALLBACK_PLUGINS',
|
||||
field_class=fields.StringListField,
|
||||
required=False,
|
||||
label=_('Ansible Callback Plugins'),
|
||||
help_text=_('List of paths to search for extra callback plugins to be used when running jobs.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_JOB_TIMEOUT',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
default=0,
|
||||
label=_('Default Job Timeout'),
|
||||
help_text=_('Maximum time to allow jobs to run. Use value of 0 to indicate that no '
|
||||
'timeout should be imposed. A timeout set on an individual job template will override this.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_INVENTORY_UPDATE_TIMEOUT',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
default=0,
|
||||
label=_('Default Inventory Update Timeout'),
|
||||
help_text=_('Maximum time to allow inventory updates to run. Use value of 0 to indicate that no '
|
||||
'timeout should be imposed. A timeout set on an individual inventory source will override this.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_PROJECT_UPDATE_TIMEOUT',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
default=0,
|
||||
label=_('Default Project Update Timeout'),
|
||||
help_text=_('Maximum time to allow project updates to run. Use value of 0 to indicate that no '
|
||||
'timeout should be imposed. A timeout set on an individual project will override this.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'LOG_AGGREGATOR_HOST',
|
||||
field_class=fields.CharField,
|
||||
allow_null=True,
|
||||
label=_('Logging Aggregator'),
|
||||
help_text=_('Hostname/IP where external logs will be sent to.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_PORT',
|
||||
field_class=fields.IntegerField,
|
||||
allow_null=True,
|
||||
label=_('Logging Aggregator Port'),
|
||||
help_text=_('Port on Logging Aggregator to send logs to (if required).'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_TYPE',
|
||||
field_class=fields.ChoiceField,
|
||||
choices=['logstash', 'splunk', 'loggly', 'sumologic', 'other'],
|
||||
allow_null=True,
|
||||
label=_('Logging Aggregator Type'),
|
||||
help_text=_('Format messages for the chosen log aggregator.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_USERNAME',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('Logging Aggregator Username'),
|
||||
help_text=_('Username for external log aggregator (if required).'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
required=False,
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_PASSWORD',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
encrypted=True,
|
||||
label=_('Logging Aggregator Password/Token'),
|
||||
help_text=_('Password or authentication token for external log aggregator (if required).'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
required=False,
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_LOGGERS',
|
||||
field_class=fields.StringListField,
|
||||
default=['awx', 'activity_stream', 'job_events', 'system_tracking'],
|
||||
label=_('Loggers to send data to the log aggregator from'),
|
||||
help_text=_('List of loggers that will send HTTP logs to the collector, these can '
|
||||
'include any or all of: \n'
|
||||
'awx - Tower service logs\n'
|
||||
'activity_stream - activity stream records\n'
|
||||
'job_events - callback data from Ansible job events\n'
|
||||
'system_tracking - facts gathered from scan jobs.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_INDIVIDUAL_FACTS',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Log System Tracking Facts Individually'),
|
||||
help_text=_('If set, system tracking facts will be sent for each package, service, or'
|
||||
'other item found in a scan, allowing for greater search query granularity. '
|
||||
'If unset, facts will be sent as a single dictionary, allowing for greater '
|
||||
'efficiency in fact processing.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Enable External Logging'),
|
||||
help_text=_('Enable sending logs to external log aggregator.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_TOWER_UUID',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
label=_('Cluster-wide Tower unique identifier.'),
|
||||
help_text=_('Useful to uniquely identify Tower instances.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
default=None,
|
||||
)
|
||||
|
||||
89
awx/main/consumers.py
Normal file
89
awx/main/consumers.py
Normal file
@ -0,0 +1,89 @@
|
||||
import json
|
||||
import logging
|
||||
import urllib
|
||||
|
||||
from channels import Group
|
||||
from channels.sessions import channel_session
|
||||
from channels.handler import AsgiRequest
|
||||
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
|
||||
from django.contrib.auth.models import User
|
||||
from awx.main.models.organization import AuthToken
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.consumers')
|
||||
|
||||
|
||||
def discard_groups(message):
|
||||
if 'groups' in message.channel_session:
|
||||
for group in message.channel_session['groups']:
|
||||
Group(group).discard(message.reply_channel)
|
||||
|
||||
|
||||
@channel_session
|
||||
def ws_connect(message):
|
||||
connect_text = {'accept':False, 'user':None}
|
||||
|
||||
message.content['method'] = 'FAKE'
|
||||
request = AsgiRequest(message)
|
||||
token = request.COOKIES.get('token', None)
|
||||
if token is not None:
|
||||
token = urllib.unquote(token).strip('"')
|
||||
try:
|
||||
auth_token = AuthToken.objects.get(key=token)
|
||||
if auth_token.in_valid_tokens:
|
||||
message.channel_session['user_id'] = auth_token.user_id
|
||||
connect_text['accept'] = True
|
||||
connect_text['user'] = auth_token.user_id
|
||||
except AuthToken.DoesNotExist:
|
||||
logger.error("auth_token provided was invalid.")
|
||||
message.reply_channel.send({"text": json.dumps(connect_text)})
|
||||
|
||||
|
||||
@channel_session
|
||||
def ws_disconnect(message):
|
||||
discard_groups(message)
|
||||
|
||||
|
||||
@channel_session
|
||||
def ws_receive(message):
|
||||
from awx.main.access import consumer_access
|
||||
|
||||
user_id = message.channel_session.get('user_id', None)
|
||||
if user_id is None:
|
||||
logger.error("No valid user found for websocket.")
|
||||
message.reply_channel.send({"text": json.dumps({"error": "no valid user"})})
|
||||
return None
|
||||
|
||||
user = User.objects.get(pk=user_id)
|
||||
raw_data = message.content['text']
|
||||
data = json.loads(raw_data)
|
||||
|
||||
if 'groups' in data:
|
||||
discard_groups(message)
|
||||
groups = data['groups']
|
||||
current_groups = set(message.channel_session.pop('groups') if 'groups' in message.channel_session else [])
|
||||
for group_name,v in groups.items():
|
||||
if type(v) is list:
|
||||
for oid in v:
|
||||
name = '{}-{}'.format(group_name, oid)
|
||||
access_cls = consumer_access(group_name)
|
||||
if access_cls is not None:
|
||||
user_access = access_cls(user)
|
||||
if not user_access.get_queryset().filter(pk=oid).exists():
|
||||
message.reply_channel.send({"text": json.dumps({"error": "access denied to channel {0} for resource id {1}".format(group_name, oid)})})
|
||||
continue
|
||||
current_groups.add(name)
|
||||
Group(name).add(message.reply_channel)
|
||||
else:
|
||||
current_groups.add(group_name)
|
||||
Group(group_name).add(message.reply_channel)
|
||||
message.channel_session['groups'] = list(current_groups)
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
try:
|
||||
Group(group).send({"text": json.dumps(payload, cls=DjangoJSONEncoder)})
|
||||
except ValueError:
|
||||
logger.error("Invalid payload emitting channel {} on topic: {}".format(group, payload))
|
||||
@ -19,17 +19,31 @@ from django.db.models.fields.related import (
|
||||
)
|
||||
from django.utils.encoding import smart_text
|
||||
|
||||
# Django-JSONField
|
||||
from jsonfield import JSONField as upstream_JSONField
|
||||
|
||||
# AWX
|
||||
from awx.main.models.rbac import batch_role_ancestor_rebuilding, Role
|
||||
from awx.main.utils import get_current_apps
|
||||
|
||||
|
||||
__all__ = ['AutoOneToOneField', 'ImplicitRoleField']
|
||||
__all__ = ['AutoOneToOneField', 'ImplicitRoleField', 'JSONField']
|
||||
|
||||
|
||||
class JSONField(upstream_JSONField):
|
||||
|
||||
def db_type(self, connection):
|
||||
return 'text'
|
||||
|
||||
def from_db_value(self, value, expression, connection, context):
|
||||
if value in {'', None} and not self.null:
|
||||
return {}
|
||||
return super(JSONField, self).from_db_value(value, expression, connection, context)
|
||||
|
||||
# Based on AutoOneToOneField from django-annoying:
|
||||
# https://bitbucket.org/offline/django-annoying/src/a0de8b294db3/annoying/fields.py
|
||||
|
||||
|
||||
class AutoSingleRelatedObjectDescriptor(SingleRelatedObjectDescriptor):
|
||||
"""Descriptor for access to the object from its related class."""
|
||||
|
||||
@ -46,6 +60,7 @@ class AutoSingleRelatedObjectDescriptor(SingleRelatedObjectDescriptor):
|
||||
obj.save()
|
||||
return obj
|
||||
|
||||
|
||||
class AutoOneToOneField(models.OneToOneField):
|
||||
"""OneToOneField that creates related object if it doesn't exist."""
|
||||
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
# AWX
|
||||
from awx.main.models import Instance
|
||||
|
||||
|
||||
def is_ha_environment():
|
||||
"""Return True if this is an HA environment, and False
|
||||
otherwise.
|
||||
|
||||
@ -1,172 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from optparse import make_option
|
||||
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.models import Project
|
||||
|
||||
|
||||
class OptionEnforceError(Exception):
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def __str__(self):
|
||||
return repr(self.value)
|
||||
|
||||
class BaseCommandInstance(BaseCommand):
|
||||
#option_list = BaseCommand.option_list
|
||||
|
||||
def __init__(self):
|
||||
super(BaseCommandInstance, self).__init__()
|
||||
self.enforce_primary_role = False
|
||||
self.enforce_roles = False
|
||||
self.enforce_hostname_set = False
|
||||
self.enforce_unique_find = False
|
||||
|
||||
self.option_primary = False
|
||||
self.option_secondary = False
|
||||
self.option_hostname = None
|
||||
self.option_uuid = None
|
||||
|
||||
self.UUID = settings.SYSTEM_UUID
|
||||
self.unique_fields = {}
|
||||
|
||||
@staticmethod
|
||||
def generate_option_hostname():
|
||||
return make_option('--hostname',
|
||||
dest='hostname',
|
||||
default='',
|
||||
help='Find instance by specified hostname.')
|
||||
|
||||
@staticmethod
|
||||
def generate_option_hostname_set():
|
||||
return make_option('--hostname',
|
||||
dest='hostname',
|
||||
default='',
|
||||
help='Hostname to assign to the new instance.')
|
||||
|
||||
@staticmethod
|
||||
def generate_option_primary():
|
||||
return make_option('--primary',
|
||||
action='store_true',
|
||||
default=False,
|
||||
dest='primary',
|
||||
help='Register instance as primary.')
|
||||
|
||||
@staticmethod
|
||||
def generate_option_secondary():
|
||||
return make_option('--secondary',
|
||||
action='store_true',
|
||||
default=False,
|
||||
dest='secondary',
|
||||
help='Register instance as secondary.')
|
||||
|
||||
@staticmethod
|
||||
def generate_option_uuid():
|
||||
return make_option('--uuid',
|
||||
dest='uuid',
|
||||
default='',
|
||||
help='Find instance by specified uuid.')
|
||||
|
||||
def include_option_primary_role(self):
|
||||
BaseCommand.option_list += ( BaseCommandInstance.generate_option_primary(), )
|
||||
self.enforce_primary_role = True
|
||||
|
||||
def include_options_roles(self):
|
||||
self.include_option_primary_role()
|
||||
BaseCommand.option_list += ( BaseCommandInstance.generate_option_secondary(), )
|
||||
self.enforce_roles = True
|
||||
|
||||
def include_option_hostname_set(self):
|
||||
BaseCommand.option_list += ( BaseCommandInstance.generate_option_hostname_set(), )
|
||||
self.enforce_hostname_set = True
|
||||
|
||||
def include_option_hostname_uuid_find(self):
|
||||
BaseCommand.option_list += ( BaseCommandInstance.generate_option_hostname(), BaseCommandInstance.generate_option_uuid(), )
|
||||
self.enforce_unique_find = True
|
||||
|
||||
def get_option_hostname(self):
|
||||
return self.option_hostname
|
||||
|
||||
def get_option_uuid(self):
|
||||
return self.option_uuid
|
||||
|
||||
def is_option_primary(self):
|
||||
return self.option_primary
|
||||
|
||||
def is_option_secondary(self):
|
||||
return self.option_secondary
|
||||
|
||||
def get_UUID(self):
|
||||
return self.UUID
|
||||
|
||||
# for the enforce_unique_find policy
|
||||
def get_unique_fields(self):
|
||||
return self.unique_fields
|
||||
|
||||
@property
|
||||
def usage_error(self):
|
||||
if self.enforce_roles and self.enforce_hostname_set:
|
||||
return CommandError('--hostname and one of --primary or --secondary is required.')
|
||||
elif self.enforce_hostname_set:
|
||||
return CommandError('--hostname is required.')
|
||||
elif self.enforce_primary_role:
|
||||
return CommandError('--primary is required.')
|
||||
elif self.enforce_roles:
|
||||
return CommandError('One of --primary or --secondary is required.')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
if self.enforce_hostname_set and self.enforce_unique_find:
|
||||
raise OptionEnforceError('Can not enforce --hostname as a setter and --hostname as a getter')
|
||||
|
||||
if self.enforce_roles:
|
||||
self.option_primary = options['primary']
|
||||
self.option_secondary = options['secondary']
|
||||
|
||||
if self.is_option_primary() and self.is_option_secondary() or not (self.is_option_primary() or self.is_option_secondary()):
|
||||
raise self.usage_error
|
||||
elif self.enforce_primary_role:
|
||||
if options['primary']:
|
||||
self.option_primary = options['primary']
|
||||
else:
|
||||
raise self.usage_error
|
||||
|
||||
if self.enforce_hostname_set:
|
||||
if options['hostname']:
|
||||
self.option_hostname = options['hostname']
|
||||
else:
|
||||
raise self.usage_error
|
||||
|
||||
if self.enforce_unique_find:
|
||||
if options['hostname']:
|
||||
self.unique_fields['hostname'] = self.option_hostname = options['hostname']
|
||||
|
||||
if options['uuid']:
|
||||
self.unique_fields['uuid'] = self.option_uuid = options['uuid']
|
||||
|
||||
if len(self.unique_fields) == 0:
|
||||
self.unique_fields['uuid'] = self.get_UUID()
|
||||
|
||||
@staticmethod
|
||||
def __instance_str(instance, fields):
|
||||
string = '('
|
||||
for field in fields:
|
||||
string += '%s="%s",' % (field, getattr(instance, field))
|
||||
if len(fields) > 0:
|
||||
string = string[:-1]
|
||||
string += ')'
|
||||
return string
|
||||
|
||||
@staticmethod
|
||||
def instance_str(instance):
|
||||
return BaseCommandInstance.__instance_str(instance, ('uuid', 'hostname', 'role'))
|
||||
|
||||
def update_projects(self, instance):
|
||||
"""Update all projects, ensuring the job runs against this instance,
|
||||
which is the primary instance.
|
||||
"""
|
||||
for project in Project.objects.all():
|
||||
project.update()
|
||||
@ -13,6 +13,7 @@ from django.utils.timezone import now
|
||||
# AWX
|
||||
from awx.main.models import ActivityStream
|
||||
|
||||
|
||||
class Command(NoArgsCommand):
|
||||
'''
|
||||
Management command to purge old activity stream events.
|
||||
|
||||
@ -12,6 +12,7 @@ from django.utils.timezone import now
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
'''
|
||||
Management command to cleanup expired auth tokens
|
||||
|
||||
@ -13,11 +13,12 @@ from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
from awx.main.models.fact import Fact
|
||||
from awx.api.license import feature_enabled
|
||||
from awx.conf.license import feature_enabled
|
||||
|
||||
OLDER_THAN = 'older_than'
|
||||
GRANULARITY = 'granularity'
|
||||
|
||||
|
||||
class CleanupFacts(object):
|
||||
def __init__(self):
|
||||
self.timestamp = None
|
||||
@ -27,7 +28,7 @@ class CleanupFacts(object):
|
||||
# Find all factVersion < pivot && > (pivot - granularity) grouped by host sorted by time descending (because it's indexed this way)
|
||||
# foreach group
|
||||
# Delete all except LAST entry (or Delete all except the FIRST entry, it's an arbitrary decision)
|
||||
#
|
||||
#
|
||||
# pivot -= granularity
|
||||
# group by host
|
||||
def cleanup(self, older_than_abs, granularity, module=None):
|
||||
@ -89,17 +90,18 @@ class CleanupFacts(object):
|
||||
deleted_count = self.cleanup(t - older_than, granularity, module=module)
|
||||
print("Deleted %d facts." % deleted_count)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Cleanup facts. For each host older than the value specified, keep one fact scan for each time window (granularity).'
|
||||
option_list = BaseCommand.option_list + (
|
||||
make_option('--older_than',
|
||||
dest='older_than',
|
||||
default=None,
|
||||
help='Specify the relative time to consider facts older than (w)eek (d)ay or (y)ear (i.e. 5d, 2w, 1y).'),
|
||||
default='30d',
|
||||
help='Specify the relative time to consider facts older than (w)eek (d)ay or (y)ear (i.e. 5d, 2w, 1y). Defaults to 30d.'),
|
||||
make_option('--granularity',
|
||||
dest='granularity',
|
||||
default=None,
|
||||
help='Window duration to group same hosts by for deletion (w)eek (d)ay or (y)ear (i.e. 5d, 2w, 1y).'),
|
||||
default='1w',
|
||||
help='Window duration to group same hosts by for deletion (w)eek (d)ay or (y)ear (i.e. 5d, 2w, 1y). Defaults to 1w.'),
|
||||
make_option('--module',
|
||||
dest='module',
|
||||
default=None,
|
||||
@ -142,4 +144,3 @@ class Command(BaseCommand):
|
||||
raise CommandError('--granularity invalid value "%s"' % options[GRANULARITY])
|
||||
|
||||
cleanup_facts.run(older_than, granularity, module=options['module'])
|
||||
|
||||
|
||||
@ -12,7 +12,18 @@ from django.db import transaction
|
||||
from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
from awx.main.models import Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob
|
||||
from awx.main.models import (
|
||||
Job, AdHocCommand, ProjectUpdate, InventoryUpdate,
|
||||
SystemJob, WorkflowJob, Notification
|
||||
)
|
||||
from awx.main.signals import ( # noqa
|
||||
emit_update_inventory_on_created_or_deleted,
|
||||
emit_update_inventory_computed_fields,
|
||||
disable_activity_stream,
|
||||
disable_computed_fields
|
||||
)
|
||||
from django.db.models.signals import post_save, post_delete, m2m_changed # noqa
|
||||
|
||||
|
||||
class Command(NoArgsCommand):
|
||||
'''
|
||||
@ -29,107 +40,140 @@ class Command(NoArgsCommand):
|
||||
'be removed)'),
|
||||
make_option('--jobs', dest='only_jobs', action='store_true',
|
||||
default=False,
|
||||
help='Only remove jobs'),
|
||||
help='Remove jobs'),
|
||||
make_option('--ad-hoc-commands', dest='only_ad_hoc_commands',
|
||||
action='store_true', default=False,
|
||||
help='Only remove ad hoc commands'),
|
||||
help='Remove ad hoc commands'),
|
||||
make_option('--project-updates', dest='only_project_updates',
|
||||
action='store_true', default=False,
|
||||
help='Only remove project updates'),
|
||||
help='Remove project updates'),
|
||||
make_option('--inventory-updates', dest='only_inventory_updates',
|
||||
action='store_true', default=False,
|
||||
help='Only remove inventory updates'),
|
||||
help='Remove inventory updates'),
|
||||
make_option('--management-jobs', default=False,
|
||||
action='store_true', dest='only_management_jobs',
|
||||
help='Only remove management jobs')
|
||||
help='Remove management jobs'),
|
||||
make_option('--notifications', dest='only_notifications',
|
||||
action='store_true', default=False,
|
||||
help='Remove notifications'),
|
||||
make_option('--workflow-jobs', default=False,
|
||||
action='store_true', dest='only_workflow_jobs',
|
||||
help='Remove workflow jobs')
|
||||
)
|
||||
|
||||
def cleanup_jobs(self):
|
||||
#jobs_qs = Job.objects.exclude(status__in=('pending', 'running'))
|
||||
#jobs_qs = jobs_qs.filter(created__lte=self.cutoff)
|
||||
skipped, deleted = 0, 0
|
||||
for job in Job.objects.all():
|
||||
job_display = '"%s" (started %s, %d host summaries, %d events)' % \
|
||||
(unicode(job), unicode(job.created),
|
||||
job_display = '"%s" (%d host summaries, %d events)' % \
|
||||
(unicode(job),
|
||||
job.job_host_summaries.count(), job.job_events.count())
|
||||
if job.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s job %s', action_text, job.status, job_display)
|
||||
skipped += 1
|
||||
elif job.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, job_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, job_display)
|
||||
if not self.dry_run:
|
||||
job.delete()
|
||||
deleted += 1
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_ad_hoc_commands(self):
|
||||
skipped, deleted = 0, 0
|
||||
for ad_hoc_command in AdHocCommand.objects.all():
|
||||
ad_hoc_command_display = '"%s" (started %s, %d events)' % \
|
||||
(unicode(ad_hoc_command), unicode(ad_hoc_command.created),
|
||||
ad_hoc_command_display = '"%s" (%d events)' % \
|
||||
(unicode(ad_hoc_command),
|
||||
ad_hoc_command.ad_hoc_command_events.count())
|
||||
if ad_hoc_command.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s ad hoc command %s', action_text, ad_hoc_command.status, ad_hoc_command_display)
|
||||
skipped += 1
|
||||
elif ad_hoc_command.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, ad_hoc_command_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, ad_hoc_command_display)
|
||||
if not self.dry_run:
|
||||
ad_hoc_command.delete()
|
||||
deleted += 1
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_project_updates(self):
|
||||
skipped, deleted = 0, 0
|
||||
for pu in ProjectUpdate.objects.all():
|
||||
pu_display = '"%s" (started %s)' % (unicode(pu), unicode(pu.created))
|
||||
pu_display = '"%s" (type %s)' % (unicode(pu), unicode(pu.launch_type))
|
||||
if pu.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s project update %s', action_text, pu.status, pu_display)
|
||||
if pu in (pu.project.current_update, pu.project.last_update) and pu.project.scm_type:
|
||||
skipped += 1
|
||||
elif pu in (pu.project.current_update, pu.project.last_update) and pu.project.scm_type:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, pu_display)
|
||||
skipped += 1
|
||||
elif pu.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, pu_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, pu_display)
|
||||
if not self.dry_run:
|
||||
pu.delete()
|
||||
deleted += 1
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_inventory_updates(self):
|
||||
skipped, deleted = 0, 0
|
||||
for iu in InventoryUpdate.objects.all():
|
||||
iu_display = '"%s" (started %s)' % (unicode(iu), unicode(iu.created))
|
||||
iu_display = '"%s" (source %s)' % (unicode(iu), unicode(iu.source))
|
||||
if iu.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s inventory update %s', action_text, iu.status, iu_display)
|
||||
if iu in (iu.inventory_source.current_update, iu.inventory_source.last_update) and iu.inventory_source.source:
|
||||
skipped += 1
|
||||
elif iu in (iu.inventory_source.current_update, iu.inventory_source.last_update) and iu.inventory_source.source:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, iu_display)
|
||||
skipped += 1
|
||||
elif iu.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, iu_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, iu_display)
|
||||
if not self.dry_run:
|
||||
iu.delete()
|
||||
deleted += 1
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_management_jobs(self):
|
||||
skipped, deleted = 0, 0
|
||||
for sj in SystemJob.objects.all():
|
||||
sj_display = '"%s" (started %s)' % (unicode(sj), unicode(sj.created))
|
||||
sj_display = '"%s" (type %s)' % (unicode(sj), unicode(sj.job_type))
|
||||
if sj.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s system_job %s', action_text, sj.status, sj_display)
|
||||
skipped += 1
|
||||
elif sj.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, sj_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, sj_display)
|
||||
if not self.dry_run:
|
||||
sj.delete()
|
||||
deleted += 1
|
||||
return skipped, deleted
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO,
|
||||
@ -141,6 +185,50 @@ class Command(NoArgsCommand):
|
||||
self.logger.addHandler(handler)
|
||||
self.logger.propagate = False
|
||||
|
||||
def cleanup_workflow_jobs(self):
|
||||
skipped, deleted = 0, 0
|
||||
for workflow_job in WorkflowJob.objects.all():
|
||||
workflow_job_display = '"{}" ({} nodes)'.format(
|
||||
unicode(workflow_job),
|
||||
workflow_job.workflow_nodes.count())
|
||||
if workflow_job.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s job %s', action_text, workflow_job.status, workflow_job_display)
|
||||
skipped += 1
|
||||
elif workflow_job.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, workflow_job_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, workflow_job_display)
|
||||
if not self.dry_run:
|
||||
workflow_job.delete()
|
||||
deleted += 1
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_notifications(self):
|
||||
skipped, deleted = 0, 0
|
||||
for notification in Notification.objects.all():
|
||||
notification_display = '"{}" (started {}, {} type, {} sent)'.format(
|
||||
unicode(notification), unicode(notification.created),
|
||||
notification.notification_type, notification.notifications_sent)
|
||||
if notification.status in ('pending',):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s notification %s', action_text, notification.status, notification_display)
|
||||
skipped += 1
|
||||
elif notification.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, notification_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, notification_display)
|
||||
if not self.dry_run:
|
||||
notification.delete()
|
||||
deleted += 1
|
||||
return skipped, deleted
|
||||
|
||||
@transaction.atomic
|
||||
def handle_noargs(self, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
@ -151,13 +239,19 @@ class Command(NoArgsCommand):
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
except OverflowError:
|
||||
raise CommandError('--days specified is too large. Try something less than 99999 (about 270 years).')
|
||||
model_names = ('jobs', 'ad_hoc_commands', 'project_updates', 'inventory_updates', 'management_jobs')
|
||||
model_names = ('jobs', 'ad_hoc_commands', 'project_updates', 'inventory_updates',
|
||||
'management_jobs', 'workflow_jobs', 'notifications')
|
||||
models_to_cleanup = set()
|
||||
for m in model_names:
|
||||
if options.get('only_%s' % m, False):
|
||||
models_to_cleanup.add(m)
|
||||
if not models_to_cleanup:
|
||||
models_to_cleanup.update(model_names)
|
||||
for m in model_names:
|
||||
if m in models_to_cleanup:
|
||||
getattr(self, 'cleanup_%s' % m)()
|
||||
with disable_activity_stream(), disable_computed_fields():
|
||||
for m in model_names:
|
||||
if m in models_to_cleanup:
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
|
||||
@ -47,3 +47,4 @@ class Command(BaseCommand):
|
||||
inventory=i,
|
||||
credential=c)
|
||||
print('Default organization added.')
|
||||
print('Demo Credential, Inventory, and Job Template added.')
|
||||
|
||||
33
awx/main/management/commands/deprovision_node.py
Normal file
33
awx/main/management/commands/deprovision_node.py
Normal file
@ -0,0 +1,33 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
import subprocess
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from optparse import make_option
|
||||
from awx.main.models import Instance
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Deprovision a Tower cluster node
|
||||
"""
|
||||
|
||||
option_list = BaseCommand.option_list + (
|
||||
make_option('--name', dest='name', type='string',
|
||||
help='Hostname used during provisioning'),
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
if not options.get('name'):
|
||||
raise CommandError("--name is a required argument")
|
||||
instance = Instance.objects.filter(hostname=options.get('name'))
|
||||
if instance.exists():
|
||||
instance.delete()
|
||||
result = subprocess.Popen("rabbitmqctl forget_cluster_node rabbitmq@{}".format(options.get('name')), shell=True).wait()
|
||||
if result != 0:
|
||||
print("Node deprovisioning may have failed when attempting to remove the RabbitMQ instance from the cluster")
|
||||
else:
|
||||
print('Successfully deprovisioned {}'.format(options.get('name')))
|
||||
else:
|
||||
print('No instance found matching name {}'.format(options.get('name')))
|
||||
|
||||
@ -26,10 +26,9 @@ from django.utils.encoding import smart_text
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.task_engine import TaskEnhancer
|
||||
from awx.main.utils import ignore_inventory_computed_fields, check_proot_installed, wrap_args_with_proot
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.task_engine import TaskSerializer as LicenseReader
|
||||
from awx.main.conf import tower_settings
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.inventory_import')
|
||||
|
||||
@ -65,7 +64,7 @@ class MemObject(object):
|
||||
all_vars = {}
|
||||
files_found = 0
|
||||
for suffix in ('', '.yml', '.yaml', '.json'):
|
||||
path = ''.join([base_path, suffix])
|
||||
path = ''.join([base_path, suffix]).encode("utf-8")
|
||||
if not os.path.exists(path):
|
||||
continue
|
||||
if not os.path.isfile(path):
|
||||
@ -358,7 +357,7 @@ class ExecutableJsonLoader(BaseLoader):
|
||||
data = {}
|
||||
stdout, stderr = '', ''
|
||||
try:
|
||||
if self.is_custom and getattr(tower_settings, 'AWX_PROOT_ENABLED', False):
|
||||
if self.is_custom and getattr(settings, 'AWX_PROOT_ENABLED', False):
|
||||
if not check_proot_installed():
|
||||
raise RuntimeError("proot is not installed but is configured for use")
|
||||
kwargs = {'proot_temp_dir': self.source_dir} # TODO: Remove proot dir
|
||||
@ -463,7 +462,7 @@ class ExecutableJsonLoader(BaseLoader):
|
||||
# to set their variables
|
||||
for k,v in self.all_group.all_hosts.iteritems():
|
||||
if 'hostvars' not in _meta:
|
||||
data = self.command_to_json([self.source, '--host', k])
|
||||
data = self.command_to_json([self.source, '--host', k.encode("utf-8")])
|
||||
else:
|
||||
data = _meta['hostvars'].get(k, {})
|
||||
if isinstance(data, dict):
|
||||
@ -483,6 +482,7 @@ def load_inventory_source(source, all_group=None, group_filter_re=None,
|
||||
# good naming conventions
|
||||
source = source.replace('azure.py', 'windows_azure.py')
|
||||
source = source.replace('satellite6.py', 'foreman.py')
|
||||
source = source.replace('vmware.py', 'vmware_inventory.py')
|
||||
logger.debug('Analyzing type of source: %s', source)
|
||||
original_all_group = all_group
|
||||
if not os.path.exists(source):
|
||||
@ -1191,9 +1191,8 @@ class Command(NoArgsCommand):
|
||||
self._create_update_group_hosts()
|
||||
|
||||
def check_license(self):
|
||||
reader = LicenseReader()
|
||||
license_info = reader.from_database()
|
||||
if not license_info or len(license_info) == 0:
|
||||
license_info = TaskEnhancer().validate_enhancements()
|
||||
if license_info.get('license_key', 'UNLICENSED') == 'UNLICENSED':
|
||||
self.logger.error(LICENSE_NON_EXISTANT_MESSAGE)
|
||||
raise CommandError('No Tower license found!')
|
||||
available_instances = license_info.get('available_instances', 0)
|
||||
@ -1255,6 +1254,12 @@ class Command(NoArgsCommand):
|
||||
except re.error:
|
||||
raise CommandError('invalid regular expression for --host-filter')
|
||||
|
||||
'''
|
||||
TODO: Remove this deprecation when we remove support for rax.py
|
||||
'''
|
||||
if self.source == "rax.py":
|
||||
self.logger.info("Rackspace inventory sync is Deprecated in Tower 3.1.0 and support for Rackspace will be removed in a future release.")
|
||||
|
||||
begin = time.time()
|
||||
self.load_inventory_from_database()
|
||||
|
||||
|
||||
@ -1,12 +1,11 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from awx.main.management.commands._base_instance import BaseCommandInstance
|
||||
from awx.main.models import Instance
|
||||
from django.core.management.base import NoArgsCommand
|
||||
|
||||
instance_str = BaseCommandInstance.instance_str
|
||||
|
||||
class Command(BaseCommandInstance):
|
||||
class Command(NoArgsCommand):
|
||||
"""List instances from the Tower database
|
||||
"""
|
||||
|
||||
@ -14,5 +13,4 @@ class Command(BaseCommandInstance):
|
||||
super(Command, self).__init__()
|
||||
|
||||
for instance in Instance.objects.all():
|
||||
print("uuid: %s; hostname: %s; primary: %s; created: %s; modified: %s" %
|
||||
(instance.uuid, instance.hostname, instance.primary, instance.created, instance.modified))
|
||||
print("hostname: {}; created: {}; heartbeat: {}".format(instance.hostname, instance.created, instance.modified))
|
||||
|
||||
@ -1,63 +1,30 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from django.core.management.base import CommandError
|
||||
|
||||
from awx.main.management.commands._base_instance import BaseCommandInstance
|
||||
from awx.main.models import Instance
|
||||
from django.conf import settings
|
||||
|
||||
instance_str = BaseCommandInstance.instance_str
|
||||
from optparse import make_option
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
class Command(BaseCommandInstance):
|
||||
"""Internal tower command.
|
||||
Regsiter this instance with the database for HA tracking.
|
||||
|
||||
This command is idempotent.
|
||||
|
||||
This command will error out in the following conditions:
|
||||
|
||||
* Attempting to register a secondary machine with no primary machines.
|
||||
* Attempting to register a primary instance when a different primary
|
||||
instance exists.
|
||||
* Attempting to re-register an instance with changed values.
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Internal tower command.
|
||||
Regsiter this instance with the database for HA tracking.
|
||||
"""
|
||||
def __init__(self):
|
||||
super(Command, self).__init__()
|
||||
|
||||
self.include_options_roles()
|
||||
self.include_option_hostname_set()
|
||||
option_list = BaseCommand.option_list + (
|
||||
make_option('--hostname', dest='hostname', type='string',
|
||||
help='Hostname used during provisioning'),
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
super(Command, self).handle(*args, **options)
|
||||
|
||||
uuid = self.get_UUID()
|
||||
|
||||
# Is there an existing record for this machine? If so, retrieve that record and look for issues.
|
||||
try:
|
||||
instance = Instance.objects.get(uuid=uuid)
|
||||
if instance.hostname != self.get_option_hostname():
|
||||
raise CommandError('Instance already registered with a different hostname %s.' % instance_str(instance))
|
||||
print("Instance already registered %s" % instance_str(instance))
|
||||
except Instance.DoesNotExist:
|
||||
# Get a status on primary machines (excluding this one, regardless of its status).
|
||||
other_instances = Instance.objects.exclude(uuid=uuid)
|
||||
primaries = other_instances.filter(primary=True).count()
|
||||
|
||||
# If this instance is being set to primary and a *different* primary machine alreadyexists, error out.
|
||||
if self.is_option_primary() and primaries:
|
||||
raise CommandError('Another instance is already registered as primary.')
|
||||
|
||||
# Lastly, if there are no primary machines at all, then don't allow this to be registered as a secondary machine.
|
||||
if self.is_option_secondary() and not primaries:
|
||||
raise CommandError('Unable to register a secondary machine until another primary machine has been registered.')
|
||||
|
||||
# Okay, we've checked for appropriate errata; perform the registration.
|
||||
instance = Instance(uuid=uuid, primary=self.is_option_primary(), hostname=self.get_option_hostname())
|
||||
instance.save()
|
||||
|
||||
# If this is a primary instance, update projects.
|
||||
if instance.primary:
|
||||
self.update_projects(instance)
|
||||
|
||||
# Done!
|
||||
print('Successfully registered instance %s.' % instance_str(instance))
|
||||
def handle(self, **options):
|
||||
uuid = settings.SYSTEM_UUID
|
||||
instance = Instance.objects.filter(hostname=options.get('hostname'))
|
||||
if instance.exists():
|
||||
print("Instance already registered {}".format(instance[0]))
|
||||
return
|
||||
instance = Instance(uuid=uuid, hostname=options.get('hostname'))
|
||||
instance.save()
|
||||
print('Successfully registered instance {}'.format(instance))
|
||||
|
||||
@ -1,43 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from django.core.management.base import CommandError
|
||||
from awx.main.management.commands._base_instance import BaseCommandInstance
|
||||
|
||||
from awx.main.models import Instance
|
||||
|
||||
instance_str = BaseCommandInstance.instance_str
|
||||
|
||||
class Command(BaseCommandInstance):
|
||||
"""Internal tower command.
|
||||
Remove an existing instance from the HA instance table.
|
||||
|
||||
This command is idempotent.
|
||||
|
||||
This command will error out in the following conditions:
|
||||
|
||||
* Attempting to remove a primary instance.
|
||||
"""
|
||||
def __init__(self):
|
||||
super(Command, self).__init__()
|
||||
|
||||
self.include_option_hostname_uuid_find()
|
||||
|
||||
def handle(self, *args, **options):
|
||||
super(Command, self).handle(*args, **options)
|
||||
|
||||
# Is there an existing record for this machine? If so, retrieve that record and look for issues.
|
||||
try:
|
||||
# Get the instance.
|
||||
instance = Instance.objects.get(**self.get_unique_fields())
|
||||
|
||||
# Sanity check: Do not remove the primary instance.
|
||||
if instance.primary:
|
||||
raise CommandError('Cannot remove primary instance %s. Another instance must be promoted to primary first.' % instance_str(instance))
|
||||
|
||||
# Remove the instance.
|
||||
instance.delete()
|
||||
print('Successfully removed instance %s.' % instance_str(instance))
|
||||
except Instance.DoesNotExist:
|
||||
print('No matching instance found to remove.')
|
||||
|
||||
@ -2,34 +2,38 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import os
|
||||
import sys
|
||||
import datetime
|
||||
import logging
|
||||
import signal
|
||||
import time
|
||||
from multiprocessing import Process, Queue
|
||||
from uuid import UUID
|
||||
from multiprocessing import Process
|
||||
from multiprocessing import Queue as MPQueue
|
||||
from Queue import Empty as QueueEmpty
|
||||
from Queue import Full as QueueFull
|
||||
|
||||
from kombu import Connection, Exchange, Queue
|
||||
from kombu.mixins import ConsumerMixin
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.management.base import NoArgsCommand
|
||||
from django.db import transaction, DatabaseError
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.timezone import FixedOffset
|
||||
from django.db import connection
|
||||
from django.db import connection as django_connection
|
||||
from django.db import DatabaseError
|
||||
from django.core.cache import cache as django_cache
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.socket import Socket
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
class CallbackReceiver(object):
|
||||
def __init__(self):
|
||||
self.parent_mappings = {}
|
||||
|
||||
def run_subscriber(self, use_workers=True):
|
||||
class CallbackBrokerWorker(ConsumerMixin):
|
||||
def __init__(self, connection, use_workers=True):
|
||||
self.connection = connection
|
||||
self.worker_queues = []
|
||||
self.total_messages = 0
|
||||
self.init_workers(use_workers)
|
||||
|
||||
def init_workers(self, use_workers=True):
|
||||
def shutdown_handler(active_workers):
|
||||
def _handler(signum, frame):
|
||||
try:
|
||||
@ -42,253 +46,88 @@ class CallbackReceiver(object):
|
||||
pass
|
||||
return _handler
|
||||
|
||||
def check_pre_handle(data):
|
||||
event = data.get('event', '')
|
||||
if event == 'playbook_on_play_start':
|
||||
return True
|
||||
return False
|
||||
|
||||
worker_queues = []
|
||||
|
||||
if use_workers:
|
||||
connection.close()
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
for idx in range(settings.JOB_EVENT_WORKERS):
|
||||
queue_actual = Queue(settings.JOB_EVENT_MAX_QUEUE_SIZE)
|
||||
queue_actual = MPQueue(settings.JOB_EVENT_MAX_QUEUE_SIZE)
|
||||
w = Process(target=self.callback_worker, args=(queue_actual, idx,))
|
||||
w.start()
|
||||
if settings.DEBUG:
|
||||
logger.info('Started worker %s' % str(idx))
|
||||
worker_queues.append([0, queue_actual, w])
|
||||
self.worker_queues.append([0, queue_actual, w])
|
||||
elif settings.DEBUG:
|
||||
logger.warn('Started callback receiver (no workers)')
|
||||
|
||||
main_process = Process(
|
||||
target=self.callback_handler,
|
||||
args=(use_workers, worker_queues,)
|
||||
)
|
||||
main_process.daemon = True
|
||||
main_process.start()
|
||||
signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in self.worker_queues]))
|
||||
signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in self.worker_queues]))
|
||||
|
||||
signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in worker_queues] + [main_process]))
|
||||
signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in worker_queues] + [main_process]))
|
||||
while True:
|
||||
workers_changed = False
|
||||
idx = 0
|
||||
for queue_worker in worker_queues:
|
||||
if not queue_worker[2].is_alive():
|
||||
logger.warn("Worker %s was not alive, restarting" % str(queue_worker))
|
||||
workers_changed = True
|
||||
queue_worker[2].join()
|
||||
w = Process(target=self.callback_worker, args=(queue_worker[1], idx,))
|
||||
w.daemon = True
|
||||
w.start()
|
||||
signal.signal(signal.SIGINT, shutdown_handler([w]))
|
||||
signal.signal(signal.SIGTERM, shutdown_handler([w]))
|
||||
queue_worker[2] = w
|
||||
idx += 1
|
||||
if workers_changed:
|
||||
signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in worker_queues] + [main_process]))
|
||||
signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in worker_queues] + [main_process]))
|
||||
if not main_process.is_alive():
|
||||
logger.error("Main process is not alive")
|
||||
for queue_worker in worker_queues:
|
||||
queue_worker[2].terminate()
|
||||
break
|
||||
time.sleep(0.1)
|
||||
def get_consumers(self, Consumer, channel):
|
||||
return [Consumer(queues=[Queue(settings.CALLBACK_QUEUE,
|
||||
Exchange(settings.CALLBACK_QUEUE, type='direct'),
|
||||
routing_key=settings.CALLBACK_QUEUE)],
|
||||
accept=['json'],
|
||||
callbacks=[self.process_task])]
|
||||
|
||||
def write_queue_worker(self, preferred_queue, worker_queues, message):
|
||||
def process_task(self, body, message):
|
||||
if "uuid" in body and body['uuid']:
|
||||
try:
|
||||
queue = UUID(body['uuid']).int % settings.JOB_EVENT_WORKERS
|
||||
except Exception:
|
||||
queue = self.total_messages % settings.JOB_EVENT_WORKERS
|
||||
else:
|
||||
queue = self.total_messages % settings.JOB_EVENT_WORKERS
|
||||
self.write_queue_worker(queue, body)
|
||||
self.total_messages += 1
|
||||
message.ack()
|
||||
|
||||
def write_queue_worker(self, preferred_queue, body):
|
||||
queue_order = sorted(range(settings.JOB_EVENT_WORKERS), cmp=lambda x, y: -1 if x==preferred_queue else 0)
|
||||
write_attempt_order = []
|
||||
for queue_actual in queue_order:
|
||||
try:
|
||||
worker_actual = worker_queues[queue_actual]
|
||||
worker_actual[1].put(message, block=True, timeout=2)
|
||||
worker_actual = self.worker_queues[queue_actual]
|
||||
worker_actual[1].put(body, block=True, timeout=5)
|
||||
worker_actual[0] += 1
|
||||
return queue_actual
|
||||
except QueueFull:
|
||||
pass
|
||||
except Exception:
|
||||
import traceback
|
||||
tb = traceback.format_exc()
|
||||
logger.warn("Could not write to queue %s" % preferred_queue)
|
||||
continue
|
||||
return None
|
||||
|
||||
def callback_handler(self, use_workers, worker_queues):
|
||||
total_messages = 0
|
||||
last_parent_events = {}
|
||||
with Socket('callbacks', 'r') as callbacks:
|
||||
for message in callbacks.listen():
|
||||
total_messages += 1
|
||||
if 'ad_hoc_command_id' in message:
|
||||
self.process_ad_hoc_event(message)
|
||||
elif not use_workers:
|
||||
self.process_job_event(message)
|
||||
else:
|
||||
job_parent_events = last_parent_events.get(message['job_id'], {})
|
||||
if message['event'] in ('playbook_on_play_start', 'playbook_on_stats', 'playbook_on_vars_prompt'):
|
||||
parent = job_parent_events.get('playbook_on_start', None)
|
||||
elif message['event'] in ('playbook_on_notify',
|
||||
'playbook_on_setup',
|
||||
'playbook_on_task_start',
|
||||
'playbook_on_no_hosts_matched',
|
||||
'playbook_on_no_hosts_remaining',
|
||||
'playbook_on_include',
|
||||
'playbook_on_import_for_host',
|
||||
'playbook_on_not_import_for_host'):
|
||||
parent = job_parent_events.get('playbook_on_play_start', None)
|
||||
elif message['event'].startswith('runner_on_') or message['event'].startswith('runner_item_on_'):
|
||||
list_parents = []
|
||||
list_parents.append(job_parent_events.get('playbook_on_setup', None))
|
||||
list_parents.append(job_parent_events.get('playbook_on_task_start', None))
|
||||
list_parents = sorted(filter(lambda x: x is not None, list_parents), cmp=lambda x, y: y.id - x.id)
|
||||
parent = list_parents[0] if len(list_parents) > 0 else None
|
||||
else:
|
||||
parent = None
|
||||
if parent is not None:
|
||||
message['parent'] = parent.id
|
||||
if 'created' in message:
|
||||
del(message['created'])
|
||||
if message['event'] in ('playbook_on_start', 'playbook_on_play_start',
|
||||
'playbook_on_setup', 'playbook_on_task_start'):
|
||||
job_parent_events[message['event']] = self.process_job_event(message)
|
||||
else:
|
||||
if message['event'] == 'playbook_on_stats':
|
||||
job_parent_events = {}
|
||||
|
||||
actual_queue = self.write_queue_worker(total_messages % settings.JOB_EVENT_WORKERS, worker_queues, message)
|
||||
# NOTE: It might be better to recycle the entire callback receiver process if one or more of the queues are too full
|
||||
# the drawback is that if we under extremely high load we may be legitimately taking a while to process messages
|
||||
if actual_queue is None:
|
||||
logger.error("All queues full!")
|
||||
sys.exit(1)
|
||||
last_parent_events[message['job_id']] = job_parent_events
|
||||
|
||||
@transaction.atomic
|
||||
def process_job_event(self, data):
|
||||
# Sanity check: Do we need to do anything at all?
|
||||
event = data.get('event', '')
|
||||
parent_id = data.get('parent', None)
|
||||
if not event or 'job_id' not in data:
|
||||
return
|
||||
|
||||
# Get the correct "verbose" value from the job.
|
||||
# If for any reason there's a problem, just use 0.
|
||||
try:
|
||||
verbose = Job.objects.get(id=data['job_id']).verbosity
|
||||
except Exception as e:
|
||||
verbose = 0
|
||||
|
||||
# Convert the datetime for the job event's creation appropriately,
|
||||
# and include a time zone for it.
|
||||
#
|
||||
# In the event of any issue, throw it out, and Django will just save
|
||||
# the current time.
|
||||
try:
|
||||
if not isinstance(data['created'], datetime.datetime):
|
||||
data['created'] = parse_datetime(data['created'])
|
||||
if not data['created'].tzinfo:
|
||||
data['created'] = data['created'].replace(tzinfo=FixedOffset(0))
|
||||
except (KeyError, ValueError):
|
||||
data.pop('created', None)
|
||||
|
||||
# Print the data to stdout if we're in DEBUG mode.
|
||||
if settings.DEBUG:
|
||||
print(data)
|
||||
|
||||
# Sanity check: Don't honor keys that we don't recognize.
|
||||
for key in data.keys():
|
||||
if key not in ('job_id', 'event', 'event_data',
|
||||
'created', 'counter'):
|
||||
data.pop(key)
|
||||
|
||||
# Save any modifications to the job event to the database.
|
||||
# If we get a database error of some kind, bail out.
|
||||
try:
|
||||
# If we're not in verbose mode, wipe out any module
|
||||
# arguments.
|
||||
res = data['event_data'].get('res', {})
|
||||
if isinstance(res, dict):
|
||||
i = res.get('invocation', {})
|
||||
if verbose == 0 and 'module_args' in i:
|
||||
i['module_args'] = ''
|
||||
|
||||
# Create a new JobEvent object.
|
||||
job_event = JobEvent(**data)
|
||||
if parent_id is not None:
|
||||
job_event.parent = JobEvent.objects.get(id=parent_id)
|
||||
job_event.save(post_process=True)
|
||||
|
||||
# Retrun the job event object.
|
||||
return job_event
|
||||
except DatabaseError as e:
|
||||
# Log the error and bail out.
|
||||
logger.error('Database error saving job event: %s', e)
|
||||
return None
|
||||
|
||||
@transaction.atomic
|
||||
def process_ad_hoc_event(self, data):
|
||||
# Sanity check: Do we need to do anything at all?
|
||||
event = data.get('event', '')
|
||||
if not event or 'ad_hoc_command_id' not in data:
|
||||
return
|
||||
|
||||
# Get the correct "verbose" value from the job.
|
||||
# If for any reason there's a problem, just use 0.
|
||||
try:
|
||||
verbose = AdHocCommand.objects.get(id=data['ad_hoc_command_id']).verbosity
|
||||
except Exception as e:
|
||||
verbose = 0
|
||||
|
||||
# Convert the datetime for the job event's creation appropriately,
|
||||
# and include a time zone for it.
|
||||
#
|
||||
# In the event of any issue, throw it out, and Django will just save
|
||||
# the current time.
|
||||
try:
|
||||
if not isinstance(data['created'], datetime.datetime):
|
||||
data['created'] = parse_datetime(data['created'])
|
||||
if not data['created'].tzinfo:
|
||||
data['created'] = data['created'].replace(tzinfo=FixedOffset(0))
|
||||
except (KeyError, ValueError):
|
||||
data.pop('created', None)
|
||||
|
||||
# Print the data to stdout if we're in DEBUG mode.
|
||||
if settings.DEBUG:
|
||||
print(data)
|
||||
|
||||
# Sanity check: Don't honor keys that we don't recognize.
|
||||
for key in data.keys():
|
||||
if key not in ('ad_hoc_command_id', 'event', 'event_data',
|
||||
'created', 'counter'):
|
||||
data.pop(key)
|
||||
|
||||
# Save any modifications to the ad hoc command event to the database.
|
||||
# If we get a database error of some kind, bail out.
|
||||
try:
|
||||
# If we're not in verbose mode, wipe out any module
|
||||
# arguments. FIXME: Needed for adhoc?
|
||||
res = data['event_data'].get('res', {})
|
||||
if isinstance(res, dict):
|
||||
i = res.get('invocation', {})
|
||||
if verbose == 0 and 'module_args' in i:
|
||||
i['module_args'] = ''
|
||||
|
||||
# Create a new AdHocCommandEvent object.
|
||||
ad_hoc_command_event = AdHocCommandEvent.objects.create(**data)
|
||||
|
||||
# Retrun the ad hoc comamnd event object.
|
||||
return ad_hoc_command_event
|
||||
except DatabaseError as e:
|
||||
# Log the error and bail out.
|
||||
logger.error('Database error saving ad hoc command event: %s', e)
|
||||
logger.warn("Detail: {}".format(tb))
|
||||
write_attempt_order.append(preferred_queue)
|
||||
logger.warn("Could not write payload to any queue, attempted order: {}".format(write_attempt_order))
|
||||
return None
|
||||
|
||||
def callback_worker(self, queue_actual, idx):
|
||||
while True:
|
||||
try:
|
||||
message = queue_actual.get(block=True, timeout=1)
|
||||
body = queue_actual.get(block=True, timeout=1)
|
||||
except QueueEmpty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error("Exception on listen socket, restarting: " + str(e))
|
||||
break
|
||||
self.process_job_event(message)
|
||||
logger.error("Exception on worker thread, restarting: " + str(e))
|
||||
continue
|
||||
try:
|
||||
if 'job_id' not in body and 'ad_hoc_command_id' not in body:
|
||||
raise Exception('Payload does not have a job_id or ad_hoc_command_id')
|
||||
if settings.DEBUG:
|
||||
logger.info('Body: {}'.format(body))
|
||||
try:
|
||||
if 'job_id' in body:
|
||||
JobEvent.create_from_data(**body)
|
||||
elif 'ad_hoc_command_id' in body:
|
||||
AdHocCommandEvent.create_from_data(**body)
|
||||
except DatabaseError as e:
|
||||
logger.error('Database Error Saving Job Event: {}'.format(e))
|
||||
except Exception as exc:
|
||||
import traceback
|
||||
tb = traceback.format_exc()
|
||||
logger.error('Callback Task Processor Raised Exception: %r', exc)
|
||||
logger.error('Detail: {}'.format(tb))
|
||||
|
||||
|
||||
class Command(NoArgsCommand):
|
||||
'''
|
||||
@ -299,9 +138,9 @@ class Command(NoArgsCommand):
|
||||
help = 'Launch the job callback receiver'
|
||||
|
||||
def handle_noargs(self, **options):
|
||||
cr = CallbackReceiver()
|
||||
try:
|
||||
cr.run_subscriber()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
try:
|
||||
worker = CallbackBrokerWorker(conn)
|
||||
worker.run()
|
||||
except KeyboardInterrupt:
|
||||
print('Terminating Callback Receiver')
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user